aboutsummaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/CMakeLists.txt79
-rw-r--r--lib/Makefile.mk13
-rw-r--r--lib/asan/CMakeLists.txt24
-rw-r--r--lib/asan/asan_activation.cc6
-rw-r--r--lib/asan/asan_activation_flags.inc1
-rw-r--r--lib/asan/asan_allocator.cc62
-rw-r--r--lib/asan/asan_allocator.h30
-rw-r--r--lib/asan/asan_debugging.cc125
-rw-r--r--lib/asan/asan_descriptions.cc486
-rw-r--r--lib/asan/asan_descriptions.h253
-rw-r--r--lib/asan/asan_errors.cc503
-rw-r--r--lib/asan/asan_errors.h390
-rw-r--r--lib/asan/asan_fake_stack.cc2
-rw-r--r--lib/asan/asan_fake_stack.h6
-rw-r--r--lib/asan/asan_flags.cc12
-rw-r--r--lib/asan/asan_flags.inc11
-rw-r--r--lib/asan/asan_globals.cc79
-rw-r--r--lib/asan/asan_globals_win.cc62
-rw-r--r--lib/asan/asan_globals_win.h34
-rw-r--r--lib/asan/asan_interceptors.cc151
-rw-r--r--lib/asan/asan_interface_internal.h19
-rw-r--r--lib/asan/asan_internal.h18
-rw-r--r--lib/asan/asan_mac.cc9
-rw-r--r--lib/asan/asan_malloc_linux.cc8
-rw-r--r--lib/asan/asan_malloc_win.cc6
-rw-r--r--lib/asan/asan_mapping.h39
-rw-r--r--lib/asan/asan_memory_profile.cc2
-rw-r--r--lib/asan/asan_new_delete.cc62
-rw-r--r--lib/asan/asan_poisoning.cc47
-rw-r--r--lib/asan/asan_poisoning.h4
-rw-r--r--lib/asan/asan_posix.cc19
-rw-r--r--lib/asan/asan_report.cc983
-rw-r--r--lib/asan/asan_report.h26
-rw-r--r--lib/asan/asan_rtl.cc157
-rw-r--r--lib/asan/asan_scariness_score.h21
-rw-r--r--lib/asan/asan_thread.cc18
-rw-r--r--lib/asan/asan_thread.h3
-rw-r--r--lib/asan/asan_win.cc148
-rw-r--r--lib/asan/asan_win_dll_thunk.cc72
-rw-r--r--lib/asan/asan_win_dynamic_runtime_thunk.cc43
-rwxr-xr-xlib/asan/scripts/asan_device_setup31
-rw-r--r--lib/asan/tests/CMakeLists.txt33
-rw-r--r--lib/asan/tests/asan_asm_test.cc26
-rw-r--r--lib/asan/tests/asan_interface_test.cc4
-rw-r--r--lib/asan/tests/asan_internal_interface_test.cc36
-rw-r--r--lib/asan/tests/asan_noinst_test.cc9
-rw-r--r--lib/asan/tests/asan_str_test.cc12
-rw-r--r--lib/asan/tests/asan_test.cc2
-rw-r--r--lib/asan/tests/asan_test_main.cc11
-rw-r--r--lib/asan/tests/asan_test_utils.h4
-rw-r--r--lib/builtins/CMakeLists.txt167
-rw-r--r--lib/builtins/Makefile.mk25
-rw-r--r--lib/builtins/arm/Makefile.mk20
-rw-r--r--lib/builtins/arm/aeabi_idivmod.S18
-rw-r--r--lib/builtins/arm/aeabi_ldivmod.S12
-rw-r--r--lib/builtins/arm/aeabi_uidivmod.S24
-rw-r--r--lib/builtins/arm/aeabi_uldivmod.S12
-rw-r--r--lib/builtins/arm/comparesf2.S3
-rw-r--r--lib/builtins/arm/divsi3.S20
-rw-r--r--lib/builtins/arm/udivsi3.S133
-rw-r--r--lib/builtins/arm64/Makefile.mk20
-rw-r--r--lib/builtins/armv6m/Makefile.mk20
-rw-r--r--lib/builtins/assembly.h3
-rw-r--r--lib/builtins/atomic.c11
-rw-r--r--lib/builtins/clear_cache.c4
-rw-r--r--lib/builtins/i386/Makefile.mk20
-rw-r--r--lib/builtins/int_lib.h6
-rw-r--r--lib/builtins/mingw_fixfloat.c36
-rw-r--r--lib/builtins/ppc/Makefile.mk20
-rw-r--r--lib/builtins/x86_64/Makefile.mk20
-rw-r--r--lib/cfi/CMakeLists.txt4
-rw-r--r--lib/cfi/cfi.cc2
-rw-r--r--lib/dfsan/CMakeLists.txt5
-rw-r--r--lib/dfsan/dfsan.cc25
-rw-r--r--lib/dfsan/dfsan.h3
-rw-r--r--lib/dfsan/dfsan_interceptors.cc2
-rw-r--r--lib/dfsan/dfsan_platform.h17
-rw-r--r--lib/dfsan/done_abilist.txt8
-rw-r--r--lib/esan/CMakeLists.txt5
-rw-r--r--lib/esan/cache_frag.cpp4
-rw-r--r--lib/esan/esan.cpp8
-rw-r--r--lib/esan/esan.h1
-rw-r--r--lib/esan/esan_flags.cpp2
-rw-r--r--lib/esan/esan_hashtable.h381
-rw-r--r--lib/esan/esan_interceptors.cpp42
-rw-r--r--lib/esan/esan_interface_internal.h3
-rw-r--r--lib/esan/esan_linux.cpp2
-rw-r--r--lib/esan/esan_shadow.h131
-rw-r--r--lib/interception/interception.h6
-rw-r--r--lib/interception/interception_win.cc138
-rw-r--r--lib/interception/tests/CMakeLists.txt1
-rw-r--r--lib/interception/tests/interception_linux_test.cc7
-rw-r--r--lib/interception/tests/interception_win_test.cc39
-rw-r--r--lib/lsan/CMakeLists.txt7
-rw-r--r--lib/lsan/lsan_allocator.cc19
-rw-r--r--lib/lsan/lsan_common.cc3
-rw-r--r--lib/lsan/lsan_common_linux.cc2
-rw-r--r--lib/lsan/lsan_thread.cc2
-rw-r--r--lib/msan/CMakeLists.txt4
-rw-r--r--lib/msan/msan.h97
-rw-r--r--lib/msan/msan_allocator.cc48
-rw-r--r--lib/msan/msan_interceptors.cc86
-rw-r--r--lib/msan/msan_interface_internal.h10
-rw-r--r--lib/msan/msan_linux.cc3
-rw-r--r--lib/profile/CMakeLists.txt5
-rw-r--r--lib/profile/GCDAProfiling.c4
-rw-r--r--lib/profile/InstrProfData.inc11
-rw-r--r--lib/profile/InstrProfiling.c16
-rw-r--r--lib/profile/InstrProfiling.h63
-rw-r--r--lib/profile/InstrProfilingFile.c162
-rw-r--r--lib/profile/InstrProfilingInternal.h18
-rw-r--r--lib/profile/InstrProfilingPort.h6
-rw-r--r--lib/profile/InstrProfilingRuntime.cc3
-rw-r--r--lib/profile/InstrProfilingUtil.c39
-rw-r--r--lib/profile/InstrProfilingUtil.h21
-rw-r--r--lib/profile/InstrProfilingValue.c2
-rw-r--r--lib/profile/WindowsMMap.c3
-rw-r--r--lib/safestack/CMakeLists.txt4
-rw-r--r--lib/safestack/safestack.cc2
-rw-r--r--lib/sanitizer_common/.clang-tidy4
-rw-r--r--lib/sanitizer_common/CMakeLists.txt21
-rw-r--r--lib/sanitizer_common/sanitizer_addrhashmap.h12
-rw-r--r--lib/sanitizer_common/sanitizer_allocator.cc34
-rw-r--r--lib/sanitizer_common/sanitizer_allocator.h1452
-rw-r--r--lib/sanitizer_common/sanitizer_allocator_bytemap.h103
-rw-r--r--lib/sanitizer_common/sanitizer_allocator_combined.h233
-rw-r--r--lib/sanitizer_common/sanitizer_allocator_interface.h4
-rw-r--r--lib/sanitizer_common/sanitizer_allocator_internal.h4
-rw-r--r--lib/sanitizer_common/sanitizer_allocator_local_cache.h249
-rw-r--r--lib/sanitizer_common/sanitizer_allocator_primary32.h310
-rw-r--r--lib/sanitizer_common/sanitizer_allocator_primary64.h522
-rw-r--r--lib/sanitizer_common/sanitizer_allocator_secondary.h282
-rw-r--r--lib/sanitizer_common/sanitizer_allocator_size_class_map.h217
-rw-r--r--lib/sanitizer_common/sanitizer_allocator_stats.h107
-rw-r--r--lib/sanitizer_common/sanitizer_atomic.h5
-rw-r--r--lib/sanitizer_common/sanitizer_common.cc25
-rw-r--r--lib/sanitizer_common/sanitizer_common.h96
-rw-r--r--lib/sanitizer_common/sanitizer_common_interceptors.inc330
-rw-r--r--lib/sanitizer_common/sanitizer_common_interceptors_format.inc7
-rwxr-xr-xlib/sanitizer_common/sanitizer_common_interceptors_ioctl.inc3
-rw-r--r--lib/sanitizer_common/sanitizer_common_libcdep.cc22
-rw-r--r--lib/sanitizer_common/sanitizer_common_nolibc.cc8
-rw-r--r--lib/sanitizer_common/sanitizer_coverage_libcdep.cc45
-rw-r--r--lib/sanitizer_common/sanitizer_coverage_libcdep_new.cc165
-rw-r--r--lib/sanitizer_common/sanitizer_dbghelp.h42
-rw-r--r--lib/sanitizer_common/sanitizer_flags.cc5
-rw-r--r--lib/sanitizer_common/sanitizer_flags.inc21
-rw-r--r--lib/sanitizer_common/sanitizer_interface_internal.h11
-rw-r--r--lib/sanitizer_common/sanitizer_internal_defs.h23
-rw-r--r--lib/sanitizer_common/sanitizer_libc.h10
-rw-r--r--lib/sanitizer_common/sanitizer_libignore.cc31
-rw-r--r--lib/sanitizer_common/sanitizer_linux.cc24
-rw-r--r--lib/sanitizer_common/sanitizer_linux.h2
-rw-r--r--lib/sanitizer_common/sanitizer_linux_libcdep.cc9
-rw-r--r--lib/sanitizer_common/sanitizer_linux_mips64.S23
-rw-r--r--lib/sanitizer_common/sanitizer_mac.cc126
-rw-r--r--lib/sanitizer_common/sanitizer_mac.h6
-rw-r--r--lib/sanitizer_common/sanitizer_malloc_mac.inc36
-rw-r--r--lib/sanitizer_common/sanitizer_platform.h8
-rw-r--r--lib/sanitizer_common/sanitizer_platform_interceptors.h12
-rw-r--r--lib/sanitizer_common/sanitizer_platform_limits_linux.cc6
-rw-r--r--lib/sanitizer_common/sanitizer_platform_limits_posix.cc13
-rw-r--r--lib/sanitizer_common/sanitizer_platform_limits_posix.h93
-rw-r--r--lib/sanitizer_common/sanitizer_posix_libcdep.cc25
-rw-r--r--lib/sanitizer_common/sanitizer_printf.cc2
-rw-r--r--lib/sanitizer_common/sanitizer_procmaps.h13
-rw-r--r--lib/sanitizer_common/sanitizer_procmaps_freebsd.cc4
-rw-r--r--lib/sanitizer_common/sanitizer_procmaps_linux.cc4
-rw-r--r--lib/sanitizer_common/sanitizer_procmaps_mac.cc91
-rw-r--r--lib/sanitizer_common/sanitizer_quarantine.h1
-rw-r--r--lib/sanitizer_common/sanitizer_stackdepot.cc6
-rw-r--r--lib/sanitizer_common/sanitizer_stacktrace_libcdep.cc57
-rw-r--r--lib/sanitizer_common/sanitizer_stacktrace_printer.cc29
-rw-r--r--lib/sanitizer_common/sanitizer_stacktrace_printer.h7
-rw-r--r--lib/sanitizer_common/sanitizer_stoptheworld_linux_libcdep.cc8
-rw-r--r--lib/sanitizer_common/sanitizer_symbolizer_libcdep.cc32
-rw-r--r--lib/sanitizer_common/sanitizer_symbolizer_mac.cc7
-rw-r--r--lib/sanitizer_common/sanitizer_symbolizer_posix_libcdep.cc113
-rw-r--r--lib/sanitizer_common/sanitizer_symbolizer_win.cc40
-rw-r--r--lib/sanitizer_common/sanitizer_thread_registry.cc2
-rw-r--r--lib/sanitizer_common/sanitizer_win.cc92
-rwxr-xr-xlib/sanitizer_common/scripts/gen_dynamic_list.py8
-rw-r--r--lib/sanitizer_common/symbolizer/sanitizer_symbolize.cc72
-rw-r--r--lib/sanitizer_common/symbolizer/sanitizer_wrappers.cc175
-rwxr-xr-xlib/sanitizer_common/symbolizer/scripts/build_symbolizer.sh187
-rw-r--r--lib/sanitizer_common/symbolizer/scripts/global_symbols.txt137
-rw-r--r--lib/sanitizer_common/tests/CMakeLists.txt5
-rw-r--r--lib/sanitizer_common/tests/malloc_stress_transfer_test.cc37
-rw-r--r--lib/sanitizer_common/tests/sanitizer_allocator_test.cc267
-rw-r--r--lib/sanitizer_common/tests/sanitizer_allocator_testlib.cc37
-rw-r--r--lib/sanitizer_common/tests/sanitizer_common_test.cc53
-rw-r--r--lib/sanitizer_common/tests/sanitizer_format_interceptor_test.cc4
-rw-r--r--lib/sanitizer_common/tests/sanitizer_libc_test.cc2
-rw-r--r--lib/sanitizer_common/tests/sanitizer_nolibc_test_main.cc2
-rw-r--r--lib/sanitizer_common/tests/sanitizer_procmaps_test.cc21
-rw-r--r--lib/sanitizer_common/tests/sanitizer_test_main.cc2
-rw-r--r--lib/scudo/CMakeLists.txt8
-rw-r--r--lib/scudo/scudo_allocator.cpp407
-rw-r--r--lib/scudo/scudo_allocator.h49
-rw-r--r--lib/scudo/scudo_allocator_secondary.h188
-rw-r--r--lib/scudo/scudo_flags.cpp26
-rw-r--r--lib/scudo/scudo_flags.h2
-rw-r--r--lib/scudo/scudo_interceptors.cpp2
-rw-r--r--lib/scudo/scudo_new_delete.cpp2
-rw-r--r--lib/scudo/scudo_termination.cpp19
-rw-r--r--lib/scudo/scudo_utils.cpp158
-rw-r--r--lib/scudo/scudo_utils.h26
-rw-r--r--lib/stats/stats_client.cc1
-rw-r--r--lib/tsan/CMakeLists.txt12
-rw-r--r--lib/tsan/go/build.bat2
-rwxr-xr-xlib/tsan/go/buildgo.sh2
-rw-r--r--lib/tsan/go/tsan_go.cc5
-rw-r--r--lib/tsan/rtl/tsan_clock.cc2
-rw-r--r--lib/tsan/rtl/tsan_debugging.cc77
-rw-r--r--lib/tsan/rtl/tsan_defs.h14
-rw-r--r--lib/tsan/rtl/tsan_flags.cc2
-rw-r--r--lib/tsan/rtl/tsan_flags.inc3
-rw-r--r--lib/tsan/rtl/tsan_interceptors.cc43
-rw-r--r--lib/tsan/rtl/tsan_interceptors_mac.cc68
-rw-r--r--lib/tsan/rtl/tsan_interface.h19
-rw-r--r--lib/tsan/rtl/tsan_interface_atomic.cc20
-rw-r--r--lib/tsan/rtl/tsan_interface_inl.h8
-rw-r--r--lib/tsan/rtl/tsan_interface_java.cc17
-rw-r--r--lib/tsan/rtl/tsan_interface_java.h4
-rw-r--r--lib/tsan/rtl/tsan_libdispatch_mac.cc75
-rw-r--r--lib/tsan/rtl/tsan_mman.cc29
-rw-r--r--lib/tsan/rtl/tsan_mutexset.h4
-rw-r--r--lib/tsan/rtl/tsan_platform.h223
-rw-r--r--lib/tsan/rtl/tsan_platform_linux.cc31
-rw-r--r--lib/tsan/rtl/tsan_platform_mac.cc82
-rw-r--r--lib/tsan/rtl/tsan_platform_posix.cc2
-rw-r--r--lib/tsan/rtl/tsan_platform_windows.cc4
-rw-r--r--lib/tsan/rtl/tsan_report.cc4
-rw-r--r--lib/tsan/rtl/tsan_rtl.cc93
-rw-r--r--lib/tsan/rtl/tsan_rtl.h37
-rw-r--r--lib/tsan/rtl/tsan_rtl_aarch64.S76
-rw-r--r--lib/tsan/rtl/tsan_rtl_mips64.S214
-rw-r--r--lib/tsan/rtl/tsan_rtl_mutex.cc8
-rw-r--r--lib/tsan/rtl/tsan_rtl_proc.cc4
-rw-r--r--lib/tsan/rtl/tsan_rtl_report.cc18
-rw-r--r--lib/tsan/rtl/tsan_rtl_thread.cc25
-rw-r--r--lib/tsan/rtl/tsan_suppressions.cc4
-rw-r--r--lib/tsan/rtl/tsan_sync.cc4
-rw-r--r--lib/tsan/rtl/tsan_sync.h8
-rw-r--r--lib/tsan/rtl/tsan_trace.h4
-rw-r--r--lib/tsan/tests/CMakeLists.txt6
-rw-r--r--lib/tsan/tests/rtl/tsan_posix.cc69
-rw-r--r--lib/tsan/tests/rtl/tsan_posix_util.h77
-rw-r--r--lib/tsan/tests/rtl/tsan_test_util_posix.cc53
-rw-r--r--lib/tsan/tests/unit/tsan_mman_test.cc8
-rw-r--r--lib/ubsan/CMakeLists.txt22
-rw-r--r--lib/ubsan/ubsan_init.cc1
-rw-r--r--lib/ubsan/ubsan_type_hash_itanium.cc14
-rw-r--r--lib/xray/CMakeLists.txt79
-rw-r--r--lib/xray/tests/CMakeLists.txt59
-rw-r--r--lib/xray/tests/unit/CMakeLists.txt2
-rw-r--r--lib/xray/tests/unit/buffer_queue_test.cc81
-rw-r--r--lib/xray/tests/unit/xray_unit_test_main.cc18
-rw-r--r--lib/xray/xray_AArch64.cc119
-rw-r--r--lib/xray/xray_arm.cc156
-rw-r--r--lib/xray/xray_buffer_queue.cc65
-rw-r--r--lib/xray/xray_buffer_queue.h86
-rw-r--r--lib/xray/xray_defs.h22
-rw-r--r--lib/xray/xray_emulate_tsc.h40
-rw-r--r--lib/xray/xray_flags.cc62
-rw-r--r--lib/xray/xray_flags.h37
-rw-r--r--lib/xray/xray_flags.inc22
-rw-r--r--lib/xray/xray_init.cc70
-rw-r--r--lib/xray/xray_inmemory_log.cc185
-rw-r--r--lib/xray/xray_interface.cc207
-rw-r--r--lib/xray/xray_interface_internal.h69
-rw-r--r--lib/xray/xray_trampoline_AArch64.S89
-rw-r--r--lib/xray/xray_trampoline_arm.S65
-rw-r--r--lib/xray/xray_trampoline_x86_64.S147
-rw-r--r--lib/xray/xray_x86_64.cc202
-rw-r--r--lib/xray/xray_x86_64.h32
276 files changed, 12510 insertions, 4363 deletions
diff --git a/lib/CMakeLists.txt b/lib/CMakeLists.txt
index a2b55c4e35c5..4ab1e933af3a 100644
--- a/lib/CMakeLists.txt
+++ b/lib/CMakeLists.txt
@@ -4,56 +4,59 @@
include(AddCompilerRT)
include(SanitizerUtils)
+# Hoist the building of sanitizer_common on whether we're building either the
+# sanitizers or xray (or both).
+#
+#TODO: Refactor sanitizer_common into smaller pieces (e.g. flag parsing, utils).
+if (COMPILER_RT_HAS_SANITIZER_COMMON AND
+ (COMPILER_RT_BUILD_SANITIZERS OR COMPILER_RT_BUILD_XRAY))
+ add_subdirectory(sanitizer_common)
+endif()
+
if(COMPILER_RT_BUILD_BUILTINS)
add_subdirectory(builtins)
endif()
-if(COMPILER_RT_BUILD_SANITIZERS)
- if(COMPILER_RT_HAS_INTERCEPTION)
- add_subdirectory(interception)
+function(compiler_rt_build_runtime runtime)
+ string(TOUPPER ${runtime} runtime_uppercase)
+ if(COMPILER_RT_HAS_${runtime_uppercase})
+ add_subdirectory(${runtime})
+ foreach(directory ${ARGN})
+ add_subdirectory(${directory})
+ endforeach()
endif()
+endfunction()
+
+function(compiler_rt_build_sanitizer sanitizer)
+ string(TOUPPER ${sanitizer} sanitizer_uppercase)
+ string(TOLOWER ${sanitizer} sanitizer_lowercase)
+ list(FIND COMPILER_RT_SANITIZERS_TO_BUILD ${sanitizer_lowercase} result)
+ if(NOT ${result} EQUAL -1)
+ compiler_rt_build_runtime(${sanitizer} ${ARGN})
+ endif()
+endfunction()
+
+if(COMPILER_RT_BUILD_SANITIZERS)
+ compiler_rt_build_runtime(interception)
if(COMPILER_RT_HAS_SANITIZER_COMMON)
- add_subdirectory(sanitizer_common)
add_subdirectory(stats)
add_subdirectory(lsan)
add_subdirectory(ubsan)
endif()
- if(COMPILER_RT_HAS_ASAN)
- add_subdirectory(asan)
- endif()
-
- if(COMPILER_RT_HAS_DFSAN)
- add_subdirectory(dfsan)
- endif()
-
- if(COMPILER_RT_HAS_MSAN)
- add_subdirectory(msan)
- endif()
+ compiler_rt_build_sanitizer(asan)
+ compiler_rt_build_sanitizer(dfsan)
+ compiler_rt_build_sanitizer(msan)
+ compiler_rt_build_sanitizer(tsan tsan/dd)
+ compiler_rt_build_sanitizer(safestack)
+ compiler_rt_build_sanitizer(cfi)
+ compiler_rt_build_sanitizer(esan)
+ compiler_rt_build_sanitizer(scudo)
- if(COMPILER_RT_HAS_PROFILE)
- add_subdirectory(profile)
- endif()
-
- if(COMPILER_RT_HAS_TSAN)
- add_subdirectory(tsan)
- add_subdirectory(tsan/dd)
- endif()
-
- if(COMPILER_RT_HAS_SAFESTACK)
- add_subdirectory(safestack)
- endif()
-
- if(COMPILER_RT_HAS_CFI)
- add_subdirectory(cfi)
- endif()
-
- if(COMPILER_RT_HAS_ESAN)
- add_subdirectory(esan)
- endif()
+ compiler_rt_build_runtime(profile)
+endif()
- if(COMPILER_RT_HAS_SCUDO)
- add_subdirectory(scudo)
- endif()
+if(COMPILER_RT_BUILD_XRAY)
+ compiler_rt_build_runtime(xray)
endif()
diff --git a/lib/Makefile.mk b/lib/Makefile.mk
deleted file mode 100644
index b1540bdce705..000000000000
--- a/lib/Makefile.mk
+++ /dev/null
@@ -1,13 +0,0 @@
-#===- lib/Makefile.mk --------------------------------------*- Makefile -*--===#
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-#===------------------------------------------------------------------------===#
-
-SubDirs :=
-
-# Add submodules.
-SubDirs += builtins
diff --git a/lib/asan/CMakeLists.txt b/lib/asan/CMakeLists.txt
index b7e41fc7021c..1258ef6165b4 100644
--- a/lib/asan/CMakeLists.txt
+++ b/lib/asan/CMakeLists.txt
@@ -4,9 +4,12 @@ set(ASAN_SOURCES
asan_allocator.cc
asan_activation.cc
asan_debugging.cc
+ asan_descriptions.cc
+ asan_errors.cc
asan_fake_stack.cc
asan_flags.cc
asan_globals.cc
+ asan_globals_win.cc
asan_interceptors.cc
asan_linux.cc
asan_mac.cc
@@ -35,14 +38,9 @@ include_directories(..)
set(ASAN_CFLAGS ${SANITIZER_COMMON_CFLAGS})
append_rtti_flag(OFF ASAN_CFLAGS)
-set(ASAN_COMMON_DEFINITIONS
- ASAN_HAS_EXCEPTIONS=1)
-
set(ASAN_DYNAMIC_LINK_FLAGS)
if(ANDROID)
- list(APPEND ASAN_COMMON_DEFINITIONS
- ASAN_LOW_MEMORY=1)
# On Android, -z global does not do what it is documented to do.
# On Android, -z global moves the library ahead in the lookup order,
# placing it right after the LD_PRELOADs. This is used to compensate for the fact
@@ -105,8 +103,7 @@ if(NOT APPLE)
endif()
# Build ASan runtimes shipped with Clang.
-add_custom_target(asan)
-set_target_properties(asan PROPERTIES FOLDER "Compiler-RT Misc")
+add_compiler_rt_component(asan)
if(APPLE)
add_compiler_rt_runtime(clang_rt.asan
@@ -207,15 +204,25 @@ else()
STATIC
ARCHS ${arch}
SOURCES asan_win_dll_thunk.cc
+ asan_globals_win.cc
$<TARGET_OBJECTS:RTInterception.${arch}>
CFLAGS ${ASAN_CFLAGS} -DASAN_DLL_THUNK
DEFS ${ASAN_COMMON_DEFINITIONS}
PARENT_TARGET asan)
+
+ set(DYNAMIC_RUNTIME_THUNK_CFLAGS "-DASAN_DYNAMIC_RUNTIME_THUNK")
+ if(MSVC)
+ list(APPEND DYNAMIC_RUNTIME_THUNK_CFLAGS "-Zl")
+ elseif(CMAKE_C_COMPILER_ID MATCHES Clang)
+ list(APPEND DYNAMIC_RUNTIME_THUNK_CFLAGS "-nodefaultlibs")
+ endif()
+
add_compiler_rt_runtime(clang_rt.asan_dynamic_runtime_thunk
STATIC
ARCHS ${arch}
SOURCES asan_win_dynamic_runtime_thunk.cc
- CFLAGS ${ASAN_CFLAGS} -DASAN_DYNAMIC_RUNTIME_THUNK -Zl
+ asan_globals_win.cc
+ CFLAGS ${ASAN_CFLAGS} ${DYNAMIC_RUNTIME_THUNK_CFLAGS}
DEFS ${ASAN_COMMON_DEFINITIONS}
PARENT_TARGET asan)
endif()
@@ -223,7 +230,6 @@ else()
endif()
add_compiler_rt_resource_file(asan_blacklist asan_blacklist.txt asan)
-add_dependencies(compiler-rt asan)
add_subdirectory(scripts)
diff --git a/lib/asan/asan_activation.cc b/lib/asan/asan_activation.cc
index a5ace85038a6..bb41a0eb559e 100644
--- a/lib/asan/asan_activation.cc
+++ b/lib/asan/asan_activation.cc
@@ -79,11 +79,13 @@ static struct AsanDeactivatedFlags {
Report(
"quarantine_size_mb %d, max_redzone %d, poison_heap %d, "
"malloc_context_size %d, alloc_dealloc_mismatch %d, "
- "allocator_may_return_null %d, coverage %d, coverage_dir %s\n",
+ "allocator_may_return_null %d, coverage %d, coverage_dir %s, "
+ "allocator_release_to_os_interval_ms %d\n",
allocator_options.quarantine_size_mb, allocator_options.max_redzone,
poison_heap, malloc_context_size,
allocator_options.alloc_dealloc_mismatch,
- allocator_options.may_return_null, coverage, coverage_dir);
+ allocator_options.may_return_null, coverage, coverage_dir,
+ allocator_options.release_to_os_interval_ms);
}
} asan_deactivated_flags;
diff --git a/lib/asan/asan_activation_flags.inc b/lib/asan/asan_activation_flags.inc
index d4c089ec6538..67440e6fde03 100644
--- a/lib/asan/asan_activation_flags.inc
+++ b/lib/asan/asan_activation_flags.inc
@@ -33,3 +33,4 @@ COMMON_ACTIVATION_FLAG(bool, coverage)
COMMON_ACTIVATION_FLAG(const char *, coverage_dir)
COMMON_ACTIVATION_FLAG(int, verbosity)
COMMON_ACTIVATION_FLAG(bool, help)
+COMMON_ACTIVATION_FLAG(s32, allocator_release_to_os_interval_ms)
diff --git a/lib/asan/asan_allocator.cc b/lib/asan/asan_allocator.cc
index 6a5d227ca54c..36bd04689dbf 100644
--- a/lib/asan/asan_allocator.cc
+++ b/lib/asan/asan_allocator.cc
@@ -207,25 +207,27 @@ QuarantineCache *GetQuarantineCache(AsanThreadLocalMallocStorage *ms) {
void AllocatorOptions::SetFrom(const Flags *f, const CommonFlags *cf) {
quarantine_size_mb = f->quarantine_size_mb;
+ thread_local_quarantine_size_kb = f->thread_local_quarantine_size_kb;
min_redzone = f->redzone;
max_redzone = f->max_redzone;
may_return_null = cf->allocator_may_return_null;
alloc_dealloc_mismatch = f->alloc_dealloc_mismatch;
+ release_to_os_interval_ms = cf->allocator_release_to_os_interval_ms;
}
void AllocatorOptions::CopyTo(Flags *f, CommonFlags *cf) {
f->quarantine_size_mb = quarantine_size_mb;
+ f->thread_local_quarantine_size_kb = thread_local_quarantine_size_kb;
f->redzone = min_redzone;
f->max_redzone = max_redzone;
cf->allocator_may_return_null = may_return_null;
f->alloc_dealloc_mismatch = alloc_dealloc_mismatch;
+ cf->allocator_release_to_os_interval_ms = release_to_os_interval_ms;
}
struct Allocator {
static const uptr kMaxAllowedMallocSize =
FIRST_32_SECOND_64(3UL << 30, 1ULL << 40);
- static const uptr kMaxThreadLocalQuarantine =
- FIRST_32_SECOND_64(1 << 18, 1 << 20);
AsanAllocator allocator;
AsanQuarantine quarantine;
@@ -254,7 +256,7 @@ struct Allocator {
void SharedInitCode(const AllocatorOptions &options) {
CheckOptions(options);
quarantine.Init((uptr)options.quarantine_size_mb << 20,
- kMaxThreadLocalQuarantine);
+ (uptr)options.thread_local_quarantine_size_kb << 10);
atomic_store(&alloc_dealloc_mismatch, options.alloc_dealloc_mismatch,
memory_order_release);
atomic_store(&min_redzone, options.min_redzone, memory_order_release);
@@ -262,22 +264,59 @@ struct Allocator {
}
void Initialize(const AllocatorOptions &options) {
- allocator.Init(options.may_return_null);
+ allocator.Init(options.may_return_null, options.release_to_os_interval_ms);
SharedInitCode(options);
}
+ void RePoisonChunk(uptr chunk) {
+ // This could a user-facing chunk (with redzones), or some internal
+ // housekeeping chunk, like TransferBatch. Start by assuming the former.
+ AsanChunk *ac = GetAsanChunk((void *)chunk);
+ uptr allocated_size = allocator.GetActuallyAllocatedSize((void *)ac);
+ uptr beg = ac->Beg();
+ uptr end = ac->Beg() + ac->UsedSize(true);
+ uptr chunk_end = chunk + allocated_size;
+ if (chunk < beg && beg < end && end <= chunk_end) {
+ // Looks like a valid AsanChunk. Or maybe not. Be conservative and only
+ // poison the redzones.
+ PoisonShadow(chunk, beg - chunk, kAsanHeapLeftRedzoneMagic);
+ uptr end_aligned_down = RoundDownTo(end, SHADOW_GRANULARITY);
+ FastPoisonShadowPartialRightRedzone(
+ end_aligned_down, end - end_aligned_down,
+ chunk_end - end_aligned_down, kAsanHeapLeftRedzoneMagic);
+ } else {
+ // This can not be an AsanChunk. Poison everything. It may be reused as
+ // AsanChunk later.
+ PoisonShadow(chunk, allocated_size, kAsanHeapLeftRedzoneMagic);
+ }
+ }
+
void ReInitialize(const AllocatorOptions &options) {
allocator.SetMayReturnNull(options.may_return_null);
+ allocator.SetReleaseToOSIntervalMs(options.release_to_os_interval_ms);
SharedInitCode(options);
+
+ // Poison all existing allocation's redzones.
+ if (CanPoisonMemory()) {
+ allocator.ForceLock();
+ allocator.ForEachChunk(
+ [](uptr chunk, void *alloc) {
+ ((Allocator *)alloc)->RePoisonChunk(chunk);
+ },
+ this);
+ allocator.ForceUnlock();
+ }
}
void GetOptions(AllocatorOptions *options) const {
options->quarantine_size_mb = quarantine.GetSize() >> 20;
+ options->thread_local_quarantine_size_kb = quarantine.GetCacheSize() >> 10;
options->min_redzone = atomic_load(&min_redzone, memory_order_acquire);
options->max_redzone = atomic_load(&max_redzone, memory_order_acquire);
options->may_return_null = allocator.MayReturnNull();
options->alloc_dealloc_mismatch =
atomic_load(&alloc_dealloc_mismatch, memory_order_acquire);
+ options->release_to_os_interval_ms = allocator.ReleaseToOSIntervalMs();
}
// -------------------- Helper methods. -------------------------
@@ -356,7 +395,7 @@ struct Allocator {
if (size > kMaxAllowedMallocSize || needed_size > kMaxAllowedMallocSize) {
Report("WARNING: AddressSanitizer failed to allocate 0x%zx bytes\n",
(void*)size);
- return allocator.ReturnNullOrDie();
+ return allocator.ReturnNullOrDieOnBadRequest();
}
AsanThread *t = GetCurrentThread();
@@ -373,8 +412,7 @@ struct Allocator {
allocator.Allocate(cache, needed_size, 8, false, check_rss_limit);
}
- if (!allocated)
- return allocator.ReturnNullOrDie();
+ if (!allocated) return allocator.ReturnNullOrDieOnOOM();
if (*(u8 *)MEM_TO_SHADOW((uptr)allocated) == 0 && CanPoisonMemory()) {
// Heap poisoning is enabled, but the allocator provides an unpoisoned
@@ -530,7 +568,7 @@ struct Allocator {
if (delete_size && flags()->new_delete_type_mismatch &&
delete_size != m->UsedSize()) {
- ReportNewDeleteSizeMismatch(p, m->UsedSize(), delete_size, stack);
+ ReportNewDeleteSizeMismatch(p, delete_size, stack);
}
QuarantineChunk(m, ptr, stack, alloc_type);
@@ -563,7 +601,7 @@ struct Allocator {
void *Calloc(uptr nmemb, uptr size, BufferedStackTrace *stack) {
if (CallocShouldReturnNullDueToOverflow(size, nmemb))
- return allocator.ReturnNullOrDie();
+ return allocator.ReturnNullOrDieOnBadRequest();
void *ptr = Allocate(nmemb * size, 8, stack, FROM_MALLOC, false);
// If the memory comes from the secondary allocator no need to clear it
// as it comes directly from mmap.
@@ -673,6 +711,9 @@ uptr AsanChunkView::End() { return Beg() + UsedSize(); }
uptr AsanChunkView::UsedSize() { return chunk_->UsedSize(); }
uptr AsanChunkView::AllocTid() { return chunk_->alloc_tid; }
uptr AsanChunkView::FreeTid() { return chunk_->free_tid; }
+AllocType AsanChunkView::GetAllocType() {
+ return (AllocType)chunk_->alloc_type;
+}
static StackTrace GetStackTraceFromId(u32 id) {
CHECK(id);
@@ -707,6 +748,9 @@ void GetAllocatorOptions(AllocatorOptions *options) {
AsanChunkView FindHeapChunkByAddress(uptr addr) {
return instance.FindHeapChunkByAddress(addr);
}
+AsanChunkView FindHeapChunkByAllocBeg(uptr addr) {
+ return AsanChunkView(instance.GetAsanChunk(reinterpret_cast<void*>(addr)));
+}
void AsanThreadLocalMallocStorage::CommitBack() {
instance.CommitBack(this);
diff --git a/lib/asan/asan_allocator.h b/lib/asan/asan_allocator.h
index 2f9f7aaf8316..51de67858721 100644
--- a/lib/asan/asan_allocator.h
+++ b/lib/asan/asan_allocator.h
@@ -33,10 +33,12 @@ struct AsanChunk;
struct AllocatorOptions {
u32 quarantine_size_mb;
+ u32 thread_local_quarantine_size_kb;
u16 min_redzone;
u16 max_redzone;
u8 may_return_null;
u8 alloc_dealloc_mismatch;
+ s32 release_to_os_interval_ms;
void SetFrom(const Flags *f, const CommonFlags *cf);
void CopyTo(Flags *f, CommonFlags *cf);
@@ -62,6 +64,7 @@ class AsanChunkView {
u32 GetFreeStackId();
StackTrace GetAllocStack();
StackTrace GetFreeStack();
+ AllocType GetAllocType();
bool AddrIsInside(uptr addr, uptr access_size, sptr *offset) {
if (addr >= Beg() && (addr + access_size) <= End()) {
*offset = addr - Beg();
@@ -90,6 +93,7 @@ class AsanChunkView {
};
AsanChunkView FindHeapChunkByAddress(uptr address);
+AsanChunkView FindHeapChunkByAllocBeg(uptr address);
// List of AsanChunks with total size.
class AsanChunkFifoList: public IntrusiveList<AsanChunk> {
@@ -117,18 +121,36 @@ struct AsanMapUnmapCallback {
# if defined(__powerpc64__)
const uptr kAllocatorSpace = 0xa0000000000ULL;
const uptr kAllocatorSize = 0x20000000000ULL; // 2T.
+typedef DefaultSizeClassMap SizeClassMap;
+# elif defined(__aarch64__) && SANITIZER_ANDROID
+const uptr kAllocatorSpace = 0x3000000000ULL;
+const uptr kAllocatorSize = 0x2000000000ULL; // 128G.
+typedef VeryCompactSizeClassMap SizeClassMap;
# elif defined(__aarch64__)
-// AArch64/SANITIZIER_CAN_USER_ALLOCATOR64 is only for 42-bit VMA
+// AArch64/SANITIZER_CAN_USER_ALLOCATOR64 is only for 42-bit VMA
// so no need to different values for different VMA.
const uptr kAllocatorSpace = 0x10000000000ULL;
const uptr kAllocatorSize = 0x10000000000ULL; // 3T.
+typedef DefaultSizeClassMap SizeClassMap;
+# elif SANITIZER_WINDOWS
+const uptr kAllocatorSpace = ~(uptr)0;
+const uptr kAllocatorSize = 0x8000000000ULL; // 500G
+typedef DefaultSizeClassMap SizeClassMap;
# else
const uptr kAllocatorSpace = 0x600000000000ULL;
const uptr kAllocatorSize = 0x40000000000ULL; // 4T.
-# endif
typedef DefaultSizeClassMap SizeClassMap;
-typedef SizeClassAllocator64<kAllocatorSpace, kAllocatorSize, 0 /*metadata*/,
- SizeClassMap, AsanMapUnmapCallback> PrimaryAllocator;
+# endif
+struct AP64 { // Allocator64 parameters. Deliberately using a short name.
+ static const uptr kSpaceBeg = kAllocatorSpace;
+ static const uptr kSpaceSize = kAllocatorSize;
+ static const uptr kMetadataSize = 0;
+ typedef __asan::SizeClassMap SizeClassMap;
+ typedef AsanMapUnmapCallback MapUnmapCallback;
+ static const uptr kFlags = 0;
+};
+
+typedef SizeClassAllocator64<AP64> PrimaryAllocator;
#else // Fallback to SizeClassAllocator32.
static const uptr kRegionSizeLog = 20;
static const uptr kNumRegions = SANITIZER_MMAP_RANGE_SIZE >> kRegionSizeLog;
diff --git a/lib/asan/asan_debugging.cc b/lib/asan/asan_debugging.cc
index 7c3a8a73bd4e..37c5922dc0fc 100644
--- a/lib/asan/asan_debugging.cc
+++ b/lib/asan/asan_debugging.cc
@@ -14,74 +14,39 @@
//===----------------------------------------------------------------------===//
#include "asan_allocator.h"
+#include "asan_descriptions.h"
#include "asan_flags.h"
#include "asan_internal.h"
#include "asan_mapping.h"
#include "asan_report.h"
#include "asan_thread.h"
-namespace __asan {
-
-void GetInfoForStackVar(uptr addr, AddressDescription *descr, AsanThread *t) {
- descr->name[0] = 0;
- descr->region_address = 0;
- descr->region_size = 0;
- descr->region_kind = "stack";
+namespace {
+using namespace __asan;
- AsanThread::StackFrameAccess access;
- if (!t->GetStackFrameAccessByAddr(addr, &access))
- return;
+static void FindInfoForStackVar(uptr addr, const char *frame_descr, uptr offset,
+ char *name, uptr name_size,
+ uptr &region_address, uptr &region_size) {
InternalMmapVector<StackVarDescr> vars(16);
- if (!ParseFrameDescription(access.frame_descr, &vars)) {
+ if (!ParseFrameDescription(frame_descr, &vars)) {
return;
}
for (uptr i = 0; i < vars.size(); i++) {
- if (access.offset <= vars[i].beg + vars[i].size) {
- internal_strncat(descr->name, vars[i].name_pos,
- Min(descr->name_size, vars[i].name_len));
- descr->region_address = addr - (access.offset - vars[i].beg);
- descr->region_size = vars[i].size;
+ if (offset <= vars[i].beg + vars[i].size) {
+ // We use name_len + 1 because strlcpy will guarantee a \0 at the end, so
+ // if we're limiting the copy due to name_len, we add 1 to ensure we copy
+ // the whole name and then terminate with '\0'.
+ internal_strlcpy(name, vars[i].name_pos,
+ Min(name_size, vars[i].name_len + 1));
+ region_address = addr - (offset - vars[i].beg);
+ region_size = vars[i].size;
return;
}
}
}
-void GetInfoForHeapAddress(uptr addr, AddressDescription *descr) {
- AsanChunkView chunk = FindHeapChunkByAddress(addr);
-
- descr->name[0] = 0;
- descr->region_address = 0;
- descr->region_size = 0;
-
- if (!chunk.IsValid()) {
- descr->region_kind = "heap-invalid";
- return;
- }
-
- descr->region_address = chunk.Beg();
- descr->region_size = chunk.UsedSize();
- descr->region_kind = "heap";
-}
-
-void AsanLocateAddress(uptr addr, AddressDescription *descr) {
- if (DescribeAddressIfShadow(addr, descr, /* print */ false)) {
- return;
- }
- if (GetInfoForAddressIfGlobal(addr, descr)) {
- return;
- }
- asanThreadRegistry().Lock();
- AsanThread *thread = FindThreadByStackAddress(addr);
- asanThreadRegistry().Unlock();
- if (thread) {
- GetInfoForStackVar(addr, descr, thread);
- return;
- }
- GetInfoForHeapAddress(addr, descr);
-}
-
-static uptr AsanGetStack(uptr addr, uptr *trace, u32 size, u32 *thread_id,
+uptr AsanGetStack(uptr addr, uptr *trace, u32 size, u32 *thread_id,
bool alloc_stack) {
AsanChunkView chunk = FindHeapChunkByAddress(addr);
if (!chunk.IsValid()) return 0;
@@ -108,18 +73,58 @@ static uptr AsanGetStack(uptr addr, uptr *trace, u32 size, u32 *thread_id,
return 0;
}
-} // namespace __asan
-
-using namespace __asan;
+} // namespace
SANITIZER_INTERFACE_ATTRIBUTE
const char *__asan_locate_address(uptr addr, char *name, uptr name_size,
- uptr *region_address, uptr *region_size) {
- AddressDescription descr = { name, name_size, 0, 0, nullptr };
- AsanLocateAddress(addr, &descr);
- if (region_address) *region_address = descr.region_address;
- if (region_size) *region_size = descr.region_size;
- return descr.region_kind;
+ uptr *region_address_ptr,
+ uptr *region_size_ptr) {
+ AddressDescription descr(addr);
+ uptr region_address = 0;
+ uptr region_size = 0;
+ const char *region_kind = nullptr;
+ if (name && name_size > 0) name[0] = 0;
+
+ if (auto shadow = descr.AsShadow()) {
+ // region_{address,size} are already 0
+ switch (shadow->kind) {
+ case kShadowKindLow:
+ region_kind = "low shadow";
+ break;
+ case kShadowKindGap:
+ region_kind = "shadow gap";
+ break;
+ case kShadowKindHigh:
+ region_kind = "high shadow";
+ break;
+ }
+ } else if (auto heap = descr.AsHeap()) {
+ region_kind = "heap";
+ region_address = heap->chunk_access.chunk_begin;
+ region_size = heap->chunk_access.chunk_size;
+ } else if (auto stack = descr.AsStack()) {
+ region_kind = "stack";
+ if (!stack->frame_descr) {
+ // region_{address,size} are already 0
+ } else {
+ FindInfoForStackVar(addr, stack->frame_descr, stack->offset, name,
+ name_size, region_address, region_size);
+ }
+ } else if (auto global = descr.AsGlobal()) {
+ region_kind = "global";
+ auto &g = global->globals[0];
+ internal_strlcpy(name, g.name, name_size);
+ region_address = g.beg;
+ region_size = g.size;
+ } else {
+ // region_{address,size} are already 0
+ region_kind = "heap-invalid";
+ }
+
+ CHECK(region_kind);
+ if (region_address_ptr) *region_address_ptr = region_address;
+ if (region_size_ptr) *region_size_ptr = region_size;
+ return region_kind;
}
SANITIZER_INTERFACE_ATTRIBUTE
diff --git a/lib/asan/asan_descriptions.cc b/lib/asan/asan_descriptions.cc
new file mode 100644
index 000000000000..0ecbe091c8b1
--- /dev/null
+++ b/lib/asan/asan_descriptions.cc
@@ -0,0 +1,486 @@
+//===-- asan_descriptions.cc ------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of AddressSanitizer, an address sanity checker.
+//
+// ASan functions for getting information about an address and/or printing it.
+//===----------------------------------------------------------------------===//
+
+#include "asan_descriptions.h"
+#include "asan_mapping.h"
+#include "asan_report.h"
+#include "asan_stack.h"
+#include "sanitizer_common/sanitizer_stackdepot.h"
+
+namespace __asan {
+
+// Return " (thread_name) " or an empty string if the name is empty.
+const char *ThreadNameWithParenthesis(AsanThreadContext *t, char buff[],
+ uptr buff_len) {
+ const char *name = t->name;
+ if (name[0] == '\0') return "";
+ buff[0] = 0;
+ internal_strncat(buff, " (", 3);
+ internal_strncat(buff, name, buff_len - 4);
+ internal_strncat(buff, ")", 2);
+ return buff;
+}
+
+const char *ThreadNameWithParenthesis(u32 tid, char buff[], uptr buff_len) {
+ if (tid == kInvalidTid) return "";
+ asanThreadRegistry().CheckLocked();
+ AsanThreadContext *t = GetThreadContextByTidLocked(tid);
+ return ThreadNameWithParenthesis(t, buff, buff_len);
+}
+
+void DescribeThread(AsanThreadContext *context) {
+ CHECK(context);
+ asanThreadRegistry().CheckLocked();
+ // No need to announce the main thread.
+ if (context->tid == 0 || context->announced) {
+ return;
+ }
+ context->announced = true;
+ char tname[128];
+ InternalScopedString str(1024);
+ str.append("Thread T%d%s", context->tid,
+ ThreadNameWithParenthesis(context->tid, tname, sizeof(tname)));
+ if (context->parent_tid == kInvalidTid) {
+ str.append(" created by unknown thread\n");
+ Printf("%s", str.data());
+ return;
+ }
+ str.append(
+ " created by T%d%s here:\n", context->parent_tid,
+ ThreadNameWithParenthesis(context->parent_tid, tname, sizeof(tname)));
+ Printf("%s", str.data());
+ StackDepotGet(context->stack_id).Print();
+ // Recursively described parent thread if needed.
+ if (flags()->print_full_thread_history) {
+ AsanThreadContext *parent_context =
+ GetThreadContextByTidLocked(context->parent_tid);
+ DescribeThread(parent_context);
+ }
+}
+
+// Shadow descriptions
+static bool GetShadowKind(uptr addr, ShadowKind *shadow_kind) {
+ CHECK(!AddrIsInMem(addr));
+ if (AddrIsInShadowGap(addr)) {
+ *shadow_kind = kShadowKindGap;
+ } else if (AddrIsInHighShadow(addr)) {
+ *shadow_kind = kShadowKindHigh;
+ } else if (AddrIsInLowShadow(addr)) {
+ *shadow_kind = kShadowKindLow;
+ } else {
+ CHECK(0 && "Address is not in memory and not in shadow?");
+ return false;
+ }
+ return true;
+}
+
+bool DescribeAddressIfShadow(uptr addr) {
+ ShadowAddressDescription descr;
+ if (!GetShadowAddressInformation(addr, &descr)) return false;
+ descr.Print();
+ return true;
+}
+
+bool GetShadowAddressInformation(uptr addr, ShadowAddressDescription *descr) {
+ if (AddrIsInMem(addr)) return false;
+ ShadowKind shadow_kind;
+ if (!GetShadowKind(addr, &shadow_kind)) return false;
+ if (shadow_kind != kShadowKindGap) descr->shadow_byte = *(u8 *)addr;
+ descr->addr = addr;
+ descr->kind = shadow_kind;
+ return true;
+}
+
+// Heap descriptions
+static void GetAccessToHeapChunkInformation(ChunkAccess *descr,
+ AsanChunkView chunk, uptr addr,
+ uptr access_size) {
+ descr->bad_addr = addr;
+ if (chunk.AddrIsAtLeft(addr, access_size, &descr->offset)) {
+ descr->access_type = kAccessTypeLeft;
+ } else if (chunk.AddrIsAtRight(addr, access_size, &descr->offset)) {
+ descr->access_type = kAccessTypeRight;
+ if (descr->offset < 0) {
+ descr->bad_addr -= descr->offset;
+ descr->offset = 0;
+ }
+ } else if (chunk.AddrIsInside(addr, access_size, &descr->offset)) {
+ descr->access_type = kAccessTypeInside;
+ } else {
+ descr->access_type = kAccessTypeUnknown;
+ }
+ descr->chunk_begin = chunk.Beg();
+ descr->chunk_size = chunk.UsedSize();
+ descr->alloc_type = chunk.GetAllocType();
+}
+
+static void PrintHeapChunkAccess(uptr addr, const ChunkAccess &descr) {
+ Decorator d;
+ InternalScopedString str(4096);
+ str.append("%s", d.Location());
+ switch (descr.access_type) {
+ case kAccessTypeLeft:
+ str.append("%p is located %zd bytes to the left of",
+ (void *)descr.bad_addr, descr.offset);
+ break;
+ case kAccessTypeRight:
+ str.append("%p is located %zd bytes to the right of",
+ (void *)descr.bad_addr, descr.offset);
+ break;
+ case kAccessTypeInside:
+ str.append("%p is located %zd bytes inside of", (void *)descr.bad_addr,
+ descr.offset);
+ break;
+ case kAccessTypeUnknown:
+ str.append(
+ "%p is located somewhere around (this is AddressSanitizer bug!)",
+ (void *)descr.bad_addr);
+ }
+ str.append(" %zu-byte region [%p,%p)\n", descr.chunk_size,
+ (void *)descr.chunk_begin,
+ (void *)(descr.chunk_begin + descr.chunk_size));
+ str.append("%s", d.EndLocation());
+ Printf("%s", str.data());
+}
+
+bool GetHeapAddressInformation(uptr addr, uptr access_size,
+ HeapAddressDescription *descr) {
+ AsanChunkView chunk = FindHeapChunkByAddress(addr);
+ if (!chunk.IsValid()) {
+ return false;
+ }
+ descr->addr = addr;
+ GetAccessToHeapChunkInformation(&descr->chunk_access, chunk, addr,
+ access_size);
+ CHECK_NE(chunk.AllocTid(), kInvalidTid);
+ descr->alloc_tid = chunk.AllocTid();
+ descr->alloc_stack_id = chunk.GetAllocStackId();
+ descr->free_tid = chunk.FreeTid();
+ if (descr->free_tid != kInvalidTid)
+ descr->free_stack_id = chunk.GetFreeStackId();
+ return true;
+}
+
+static StackTrace GetStackTraceFromId(u32 id) {
+ CHECK(id);
+ StackTrace res = StackDepotGet(id);
+ CHECK(res.trace);
+ return res;
+}
+
+bool DescribeAddressIfHeap(uptr addr, uptr access_size) {
+ HeapAddressDescription descr;
+ if (!GetHeapAddressInformation(addr, access_size, &descr)) {
+ Printf(
+ "AddressSanitizer can not describe address in more detail "
+ "(wild memory access suspected).\n");
+ return false;
+ }
+ descr.Print();
+ return true;
+}
+
+// Stack descriptions
+bool GetStackAddressInformation(uptr addr, uptr access_size,
+ StackAddressDescription *descr) {
+ AsanThread *t = FindThreadByStackAddress(addr);
+ if (!t) return false;
+
+ descr->addr = addr;
+ descr->tid = t->tid();
+ // Try to fetch precise stack frame for this access.
+ AsanThread::StackFrameAccess access;
+ if (!t->GetStackFrameAccessByAddr(addr, &access)) {
+ descr->frame_descr = nullptr;
+ return true;
+ }
+
+ descr->offset = access.offset;
+ descr->access_size = access_size;
+ descr->frame_pc = access.frame_pc;
+ descr->frame_descr = access.frame_descr;
+
+#if SANITIZER_PPC64V1
+ // On PowerPC64 ELFv1, the address of a function actually points to a
+ // three-doubleword data structure with the first field containing
+ // the address of the function's code.
+ descr->frame_pc = *reinterpret_cast<uptr *>(descr->frame_pc);
+#endif
+ descr->frame_pc += 16;
+
+ return true;
+}
+
+static void PrintAccessAndVarIntersection(const StackVarDescr &var, uptr addr,
+ uptr access_size, uptr prev_var_end,
+ uptr next_var_beg) {
+ uptr var_end = var.beg + var.size;
+ uptr addr_end = addr + access_size;
+ const char *pos_descr = nullptr;
+ // If the variable [var.beg, var_end) is the nearest variable to the
+ // current memory access, indicate it in the log.
+ if (addr >= var.beg) {
+ if (addr_end <= var_end)
+ pos_descr = "is inside"; // May happen if this is a use-after-return.
+ else if (addr < var_end)
+ pos_descr = "partially overflows";
+ else if (addr_end <= next_var_beg &&
+ next_var_beg - addr_end >= addr - var_end)
+ pos_descr = "overflows";
+ } else {
+ if (addr_end > var.beg)
+ pos_descr = "partially underflows";
+ else if (addr >= prev_var_end && addr - prev_var_end >= var.beg - addr_end)
+ pos_descr = "underflows";
+ }
+ InternalScopedString str(1024);
+ str.append(" [%zd, %zd)", var.beg, var_end);
+ // Render variable name.
+ str.append(" '");
+ for (uptr i = 0; i < var.name_len; ++i) {
+ str.append("%c", var.name_pos[i]);
+ }
+ str.append("'");
+ if (pos_descr) {
+ Decorator d;
+ // FIXME: we may want to also print the size of the access here,
+ // but in case of accesses generated by memset it may be confusing.
+ str.append("%s <== Memory access at offset %zd %s this variable%s\n",
+ d.Location(), addr, pos_descr, d.EndLocation());
+ } else {
+ str.append("\n");
+ }
+ Printf("%s", str.data());
+}
+
+bool DescribeAddressIfStack(uptr addr, uptr access_size) {
+ StackAddressDescription descr;
+ if (!GetStackAddressInformation(addr, access_size, &descr)) return false;
+ descr.Print();
+ return true;
+}
+
+// Global descriptions
+static void DescribeAddressRelativeToGlobal(uptr addr, uptr access_size,
+ const __asan_global &g) {
+ InternalScopedString str(4096);
+ Decorator d;
+ str.append("%s", d.Location());
+ if (addr < g.beg) {
+ str.append("%p is located %zd bytes to the left", (void *)addr,
+ g.beg - addr);
+ } else if (addr + access_size > g.beg + g.size) {
+ if (addr < g.beg + g.size) addr = g.beg + g.size;
+ str.append("%p is located %zd bytes to the right", (void *)addr,
+ addr - (g.beg + g.size));
+ } else {
+ // Can it happen?
+ str.append("%p is located %zd bytes inside", (void *)addr, addr - g.beg);
+ }
+ str.append(" of global variable '%s' defined in '",
+ MaybeDemangleGlobalName(g.name));
+ PrintGlobalLocation(&str, g);
+ str.append("' (0x%zx) of size %zu\n", g.beg, g.size);
+ str.append("%s", d.EndLocation());
+ PrintGlobalNameIfASCII(&str, g);
+ Printf("%s", str.data());
+}
+
+bool GetGlobalAddressInformation(uptr addr, uptr access_size,
+ GlobalAddressDescription *descr) {
+ descr->addr = addr;
+ int globals_num = GetGlobalsForAddress(addr, descr->globals, descr->reg_sites,
+ ARRAY_SIZE(descr->globals));
+ descr->size = globals_num;
+ descr->access_size = access_size;
+ return globals_num != 0;
+}
+
+bool DescribeAddressIfGlobal(uptr addr, uptr access_size,
+ const char *bug_type) {
+ GlobalAddressDescription descr;
+ if (!GetGlobalAddressInformation(addr, access_size, &descr)) return false;
+
+ descr.Print(bug_type);
+ return true;
+}
+
+void ShadowAddressDescription::Print() const {
+ Printf("Address %p is located in the %s area.\n", addr, ShadowNames[kind]);
+}
+
+void GlobalAddressDescription::Print(const char *bug_type) const {
+ for (int i = 0; i < size; i++) {
+ DescribeAddressRelativeToGlobal(addr, access_size, globals[i]);
+ if (bug_type &&
+ 0 == internal_strcmp(bug_type, "initialization-order-fiasco") &&
+ reg_sites[i]) {
+ Printf(" registered at:\n");
+ StackDepotGet(reg_sites[i]).Print();
+ }
+ }
+}
+
+void StackAddressDescription::Print() const {
+ Decorator d;
+ char tname[128];
+ Printf("%s", d.Location());
+ Printf("Address %p is located in stack of thread T%d%s", addr, tid,
+ ThreadNameWithParenthesis(tid, tname, sizeof(tname)));
+
+ if (!frame_descr) {
+ Printf("%s\n", d.EndLocation());
+ return;
+ }
+ Printf(" at offset %zu in frame%s\n", offset, d.EndLocation());
+
+ // Now we print the frame where the alloca has happened.
+ // We print this frame as a stack trace with one element.
+ // The symbolizer may print more than one frame if inlining was involved.
+ // The frame numbers may be different than those in the stack trace printed
+ // previously. That's unfortunate, but I have no better solution,
+ // especially given that the alloca may be from entirely different place
+ // (e.g. use-after-scope, or different thread's stack).
+ Printf("%s", d.EndLocation());
+ StackTrace alloca_stack(&frame_pc, 1);
+ alloca_stack.Print();
+
+ InternalMmapVector<StackVarDescr> vars(16);
+ if (!ParseFrameDescription(frame_descr, &vars)) {
+ Printf(
+ "AddressSanitizer can't parse the stack frame "
+ "descriptor: |%s|\n",
+ frame_descr);
+ // 'addr' is a stack address, so return true even if we can't parse frame
+ return;
+ }
+ uptr n_objects = vars.size();
+ // Report the number of stack objects.
+ Printf(" This frame has %zu object(s):\n", n_objects);
+
+ // Report all objects in this frame.
+ for (uptr i = 0; i < n_objects; i++) {
+ uptr prev_var_end = i ? vars[i - 1].beg + vars[i - 1].size : 0;
+ uptr next_var_beg = i + 1 < n_objects ? vars[i + 1].beg : ~(0UL);
+ PrintAccessAndVarIntersection(vars[i], offset, access_size, prev_var_end,
+ next_var_beg);
+ }
+ Printf(
+ "HINT: this may be a false positive if your program uses "
+ "some custom stack unwind mechanism or swapcontext\n");
+ if (SANITIZER_WINDOWS)
+ Printf(" (longjmp, SEH and C++ exceptions *are* supported)\n");
+ else
+ Printf(" (longjmp and C++ exceptions *are* supported)\n");
+
+ DescribeThread(GetThreadContextByTidLocked(tid));
+}
+
+void HeapAddressDescription::Print() const {
+ PrintHeapChunkAccess(addr, chunk_access);
+
+ asanThreadRegistry().CheckLocked();
+ AsanThreadContext *alloc_thread = GetThreadContextByTidLocked(alloc_tid);
+ StackTrace alloc_stack = GetStackTraceFromId(alloc_stack_id);
+
+ char tname[128];
+ Decorator d;
+ AsanThreadContext *free_thread = nullptr;
+ if (free_tid != kInvalidTid) {
+ free_thread = GetThreadContextByTidLocked(free_tid);
+ Printf("%sfreed by thread T%d%s here:%s\n", d.Allocation(),
+ free_thread->tid,
+ ThreadNameWithParenthesis(free_thread, tname, sizeof(tname)),
+ d.EndAllocation());
+ StackTrace free_stack = GetStackTraceFromId(free_stack_id);
+ free_stack.Print();
+ Printf("%spreviously allocated by thread T%d%s here:%s\n", d.Allocation(),
+ alloc_thread->tid,
+ ThreadNameWithParenthesis(alloc_thread, tname, sizeof(tname)),
+ d.EndAllocation());
+ } else {
+ Printf("%sallocated by thread T%d%s here:%s\n", d.Allocation(),
+ alloc_thread->tid,
+ ThreadNameWithParenthesis(alloc_thread, tname, sizeof(tname)),
+ d.EndAllocation());
+ }
+ alloc_stack.Print();
+ DescribeThread(GetCurrentThread());
+ if (free_thread) DescribeThread(free_thread);
+ DescribeThread(alloc_thread);
+}
+
+AddressDescription::AddressDescription(uptr addr, uptr access_size,
+ bool shouldLockThreadRegistry) {
+ if (GetShadowAddressInformation(addr, &data.shadow)) {
+ data.kind = kAddressKindShadow;
+ return;
+ }
+ if (GetHeapAddressInformation(addr, access_size, &data.heap)) {
+ data.kind = kAddressKindHeap;
+ return;
+ }
+
+ bool isStackMemory = false;
+ if (shouldLockThreadRegistry) {
+ ThreadRegistryLock l(&asanThreadRegistry());
+ isStackMemory = GetStackAddressInformation(addr, access_size, &data.stack);
+ } else {
+ isStackMemory = GetStackAddressInformation(addr, access_size, &data.stack);
+ }
+ if (isStackMemory) {
+ data.kind = kAddressKindStack;
+ return;
+ }
+
+ if (GetGlobalAddressInformation(addr, access_size, &data.global)) {
+ data.kind = kAddressKindGlobal;
+ return;
+ }
+ data.kind = kAddressKindWild;
+ addr = 0;
+}
+
+void PrintAddressDescription(uptr addr, uptr access_size,
+ const char *bug_type) {
+ ShadowAddressDescription shadow_descr;
+ if (GetShadowAddressInformation(addr, &shadow_descr)) {
+ shadow_descr.Print();
+ return;
+ }
+
+ GlobalAddressDescription global_descr;
+ if (GetGlobalAddressInformation(addr, access_size, &global_descr)) {
+ global_descr.Print(bug_type);
+ return;
+ }
+
+ StackAddressDescription stack_descr;
+ if (GetStackAddressInformation(addr, access_size, &stack_descr)) {
+ stack_descr.Print();
+ return;
+ }
+
+ HeapAddressDescription heap_descr;
+ if (GetHeapAddressInformation(addr, access_size, &heap_descr)) {
+ heap_descr.Print();
+ return;
+ }
+
+ // We exhausted our possibilities. Bail out.
+ Printf(
+ "AddressSanitizer can not describe address in more detail "
+ "(wild memory access suspected).\n");
+}
+} // namespace __asan
diff --git a/lib/asan/asan_descriptions.h b/lib/asan/asan_descriptions.h
new file mode 100644
index 000000000000..0ee677eb7d0e
--- /dev/null
+++ b/lib/asan/asan_descriptions.h
@@ -0,0 +1,253 @@
+//===-- asan_descriptions.h -------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of AddressSanitizer, an address sanity checker.
+//
+// ASan-private header for asan_descriptions.cc.
+// TODO(filcab): Most struct definitions should move to the interface headers.
+//===----------------------------------------------------------------------===//
+#ifndef ASAN_DESCRIPTIONS_H
+#define ASAN_DESCRIPTIONS_H
+
+#include "asan_allocator.h"
+#include "asan_thread.h"
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_report_decorator.h"
+
+namespace __asan {
+
+void DescribeThread(AsanThreadContext *context);
+static inline void DescribeThread(AsanThread *t) {
+ if (t) DescribeThread(t->context());
+}
+const char *ThreadNameWithParenthesis(AsanThreadContext *t, char buff[],
+ uptr buff_len);
+const char *ThreadNameWithParenthesis(u32 tid, char buff[], uptr buff_len);
+
+class Decorator : public __sanitizer::SanitizerCommonDecorator {
+ public:
+ Decorator() : SanitizerCommonDecorator() {}
+ const char *Access() { return Blue(); }
+ const char *EndAccess() { return Default(); }
+ const char *Location() { return Green(); }
+ const char *EndLocation() { return Default(); }
+ const char *Allocation() { return Magenta(); }
+ const char *EndAllocation() { return Default(); }
+
+ const char *ShadowByte(u8 byte) {
+ switch (byte) {
+ case kAsanHeapLeftRedzoneMagic:
+ case kAsanArrayCookieMagic:
+ return Red();
+ case kAsanHeapFreeMagic:
+ return Magenta();
+ case kAsanStackLeftRedzoneMagic:
+ case kAsanStackMidRedzoneMagic:
+ case kAsanStackRightRedzoneMagic:
+ return Red();
+ case kAsanStackAfterReturnMagic:
+ return Magenta();
+ case kAsanInitializationOrderMagic:
+ return Cyan();
+ case kAsanUserPoisonedMemoryMagic:
+ case kAsanContiguousContainerOOBMagic:
+ case kAsanAllocaLeftMagic:
+ case kAsanAllocaRightMagic:
+ return Blue();
+ case kAsanStackUseAfterScopeMagic:
+ return Magenta();
+ case kAsanGlobalRedzoneMagic:
+ return Red();
+ case kAsanInternalHeapMagic:
+ return Yellow();
+ case kAsanIntraObjectRedzone:
+ return Yellow();
+ default:
+ return Default();
+ }
+ }
+ const char *EndShadowByte() { return Default(); }
+ const char *MemoryByte() { return Magenta(); }
+ const char *EndMemoryByte() { return Default(); }
+};
+
+enum ShadowKind : u8 {
+ kShadowKindLow,
+ kShadowKindGap,
+ kShadowKindHigh,
+};
+static const char *const ShadowNames[] = {"low shadow", "shadow gap",
+ "high shadow"};
+
+struct ShadowAddressDescription {
+ uptr addr;
+ ShadowKind kind;
+ u8 shadow_byte;
+
+ void Print() const;
+};
+
+bool GetShadowAddressInformation(uptr addr, ShadowAddressDescription *descr);
+bool DescribeAddressIfShadow(uptr addr);
+
+enum AccessType {
+ kAccessTypeLeft,
+ kAccessTypeRight,
+ kAccessTypeInside,
+ kAccessTypeUnknown, // This means we have an AddressSanitizer bug!
+};
+
+struct ChunkAccess {
+ uptr bad_addr;
+ sptr offset;
+ uptr chunk_begin;
+ uptr chunk_size;
+ u32 access_type : 2;
+ u32 alloc_type : 2;
+};
+
+struct HeapAddressDescription {
+ uptr addr;
+ uptr alloc_tid;
+ uptr free_tid;
+ u32 alloc_stack_id;
+ u32 free_stack_id;
+ ChunkAccess chunk_access;
+
+ void Print() const;
+};
+
+bool GetHeapAddressInformation(uptr addr, uptr access_size,
+ HeapAddressDescription *descr);
+bool DescribeAddressIfHeap(uptr addr, uptr access_size = 1);
+
+struct StackAddressDescription {
+ uptr addr;
+ uptr tid;
+ uptr offset;
+ uptr frame_pc;
+ uptr access_size;
+ const char *frame_descr;
+
+ void Print() const;
+};
+
+bool GetStackAddressInformation(uptr addr, uptr access_size,
+ StackAddressDescription *descr);
+
+struct GlobalAddressDescription {
+ uptr addr;
+ // Assume address is close to at most four globals.
+ static const int kMaxGlobals = 4;
+ __asan_global globals[kMaxGlobals];
+ u32 reg_sites[kMaxGlobals];
+ uptr access_size;
+ u8 size;
+
+ void Print(const char *bug_type = "") const;
+};
+
+bool GetGlobalAddressInformation(uptr addr, uptr access_size,
+ GlobalAddressDescription *descr);
+bool DescribeAddressIfGlobal(uptr addr, uptr access_size, const char *bug_type);
+
+// General function to describe an address. Will try to describe the address as
+// a shadow, global (variable), stack, or heap address.
+// bug_type is optional and is used for checking if we're reporting an
+// initialization-order-fiasco
+// The proper access_size should be passed for stack, global, and heap
+// addresses. Defaults to 1.
+// Each of the *AddressDescription functions has its own Print() member, which
+// may take access_size and bug_type parameters if needed.
+void PrintAddressDescription(uptr addr, uptr access_size = 1,
+ const char *bug_type = "");
+
+enum AddressKind {
+ kAddressKindWild,
+ kAddressKindShadow,
+ kAddressKindHeap,
+ kAddressKindStack,
+ kAddressKindGlobal,
+};
+
+class AddressDescription {
+ struct AddressDescriptionData {
+ AddressKind kind;
+ union {
+ ShadowAddressDescription shadow;
+ HeapAddressDescription heap;
+ StackAddressDescription stack;
+ GlobalAddressDescription global;
+ uptr addr;
+ };
+ };
+
+ AddressDescriptionData data;
+
+ public:
+ AddressDescription() = default;
+ // shouldLockThreadRegistry allows us to skip locking if we're sure we already
+ // have done it.
+ AddressDescription(uptr addr, bool shouldLockThreadRegistry = true)
+ : AddressDescription(addr, 1, shouldLockThreadRegistry) {}
+ AddressDescription(uptr addr, uptr access_size,
+ bool shouldLockThreadRegistry = true);
+
+ uptr Address() const {
+ switch (data.kind) {
+ case kAddressKindWild:
+ return data.addr;
+ case kAddressKindShadow:
+ return data.shadow.addr;
+ case kAddressKindHeap:
+ return data.heap.addr;
+ case kAddressKindStack:
+ return data.stack.addr;
+ case kAddressKindGlobal:
+ return data.global.addr;
+ }
+ UNREACHABLE("AddressInformation kind is invalid");
+ }
+ void Print(const char *bug_descr = nullptr) const {
+ switch (data.kind) {
+ case kAddressKindWild:
+ Printf("Address %p is a wild pointer.\n", data.addr);
+ return;
+ case kAddressKindShadow:
+ return data.shadow.Print();
+ case kAddressKindHeap:
+ return data.heap.Print();
+ case kAddressKindStack:
+ return data.stack.Print();
+ case kAddressKindGlobal:
+ // initialization-order-fiasco has a special Print()
+ return data.global.Print(bug_descr);
+ }
+ UNREACHABLE("AddressInformation kind is invalid");
+ }
+
+ void StoreTo(AddressDescriptionData *dst) const { *dst = data; }
+
+ const ShadowAddressDescription *AsShadow() const {
+ return data.kind == kAddressKindShadow ? &data.shadow : nullptr;
+ }
+ const HeapAddressDescription *AsHeap() const {
+ return data.kind == kAddressKindHeap ? &data.heap : nullptr;
+ }
+ const StackAddressDescription *AsStack() const {
+ return data.kind == kAddressKindStack ? &data.stack : nullptr;
+ }
+ const GlobalAddressDescription *AsGlobal() const {
+ return data.kind == kAddressKindGlobal ? &data.global : nullptr;
+ }
+};
+
+} // namespace __asan
+
+#endif // ASAN_DESCRIPTIONS_H
diff --git a/lib/asan/asan_errors.cc b/lib/asan/asan_errors.cc
new file mode 100644
index 000000000000..c287ba1b4be6
--- /dev/null
+++ b/lib/asan/asan_errors.cc
@@ -0,0 +1,503 @@
+//===-- asan_errors.cc ------------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of AddressSanitizer, an address sanity checker.
+//
+// ASan implementation for error structures.
+//===----------------------------------------------------------------------===//
+
+#include "asan_errors.h"
+#include <signal.h>
+#include "asan_descriptions.h"
+#include "asan_mapping.h"
+#include "asan_report.h"
+#include "asan_stack.h"
+#include "sanitizer_common/sanitizer_stackdepot.h"
+
+namespace __asan {
+
+void ErrorStackOverflow::Print() {
+ Decorator d;
+ Printf("%s", d.Warning());
+ Report(
+ "ERROR: AddressSanitizer: %s on address %p"
+ " (pc %p bp %p sp %p T%d)\n", scariness.GetDescription(),
+ (void *)addr, (void *)pc, (void *)bp, (void *)sp, tid);
+ Printf("%s", d.EndWarning());
+ scariness.Print();
+ BufferedStackTrace stack;
+ GetStackTraceWithPcBpAndContext(&stack, kStackTraceMax, pc, bp, context,
+ common_flags()->fast_unwind_on_fatal);
+ stack.Print();
+ ReportErrorSummary(scariness.GetDescription(), &stack);
+}
+
+static void MaybeDumpInstructionBytes(uptr pc) {
+ if (!flags()->dump_instruction_bytes || (pc < GetPageSizeCached())) return;
+ InternalScopedString str(1024);
+ str.append("First 16 instruction bytes at pc: ");
+ if (IsAccessibleMemoryRange(pc, 16)) {
+ for (int i = 0; i < 16; ++i) {
+ PrintMemoryByte(&str, "", ((u8 *)pc)[i], /*in_shadow*/ false, " ");
+ }
+ str.append("\n");
+ } else {
+ str.append("unaccessible\n");
+ }
+ Report("%s", str.data());
+}
+
+static void MaybeDumpRegisters(void *context) {
+ if (!flags()->dump_registers) return;
+ SignalContext::DumpAllRegisters(context);
+}
+
+void ErrorDeadlySignal::Print() {
+ Decorator d;
+ Printf("%s", d.Warning());
+ const char *description = DescribeSignalOrException(signo);
+ Report(
+ "ERROR: AddressSanitizer: %s on unknown address %p (pc %p bp %p sp %p "
+ "T%d)\n",
+ description, (void *)addr, (void *)pc, (void *)bp, (void *)sp, tid);
+ Printf("%s", d.EndWarning());
+ if (pc < GetPageSizeCached()) Report("Hint: pc points to the zero page.\n");
+ if (is_memory_access) {
+ const char *access_type =
+ write_flag == SignalContext::WRITE
+ ? "WRITE"
+ : (write_flag == SignalContext::READ ? "READ" : "UNKNOWN");
+ Report("The signal is caused by a %s memory access.\n", access_type);
+ if (addr < GetPageSizeCached())
+ Report("Hint: address points to the zero page.\n");
+ }
+ scariness.Print();
+ BufferedStackTrace stack;
+ GetStackTraceWithPcBpAndContext(&stack, kStackTraceMax, pc, bp, context,
+ common_flags()->fast_unwind_on_fatal);
+ stack.Print();
+ MaybeDumpInstructionBytes(pc);
+ MaybeDumpRegisters(context);
+ Printf("AddressSanitizer can not provide additional info.\n");
+ ReportErrorSummary(description, &stack);
+}
+
+void ErrorDoubleFree::Print() {
+ Decorator d;
+ Printf("%s", d.Warning());
+ char tname[128];
+ Report(
+ "ERROR: AddressSanitizer: attempting %s on %p in "
+ "thread T%d%s:\n",
+ scariness.GetDescription(), addr_description.addr, tid,
+ ThreadNameWithParenthesis(tid, tname, sizeof(tname)));
+ Printf("%s", d.EndWarning());
+ scariness.Print();
+ GET_STACK_TRACE_FATAL(second_free_stack->trace[0],
+ second_free_stack->top_frame_bp);
+ stack.Print();
+ addr_description.Print();
+ ReportErrorSummary(scariness.GetDescription(), &stack);
+}
+
+void ErrorNewDeleteSizeMismatch::Print() {
+ Decorator d;
+ Printf("%s", d.Warning());
+ char tname[128];
+ Report(
+ "ERROR: AddressSanitizer: %s on %p in thread "
+ "T%d%s:\n",
+ scariness.GetDescription(), addr_description.addr, tid,
+ ThreadNameWithParenthesis(tid, tname, sizeof(tname)));
+ Printf("%s object passed to delete has wrong type:\n", d.EndWarning());
+ Printf(
+ " size of the allocated type: %zd bytes;\n"
+ " size of the deallocated type: %zd bytes.\n",
+ addr_description.chunk_access.chunk_size, delete_size);
+ CHECK_GT(free_stack->size, 0);
+ scariness.Print();
+ GET_STACK_TRACE_FATAL(free_stack->trace[0], free_stack->top_frame_bp);
+ stack.Print();
+ addr_description.Print();
+ ReportErrorSummary(scariness.GetDescription(), &stack);
+ Report(
+ "HINT: if you don't care about these errors you may set "
+ "ASAN_OPTIONS=new_delete_type_mismatch=0\n");
+}
+
+void ErrorFreeNotMalloced::Print() {
+ Decorator d;
+ Printf("%s", d.Warning());
+ char tname[128];
+ Report(
+ "ERROR: AddressSanitizer: attempting free on address "
+ "which was not malloc()-ed: %p in thread T%d%s\n",
+ addr_description.Address(), tid,
+ ThreadNameWithParenthesis(tid, tname, sizeof(tname)));
+ Printf("%s", d.EndWarning());
+ CHECK_GT(free_stack->size, 0);
+ scariness.Print();
+ GET_STACK_TRACE_FATAL(free_stack->trace[0], free_stack->top_frame_bp);
+ stack.Print();
+ addr_description.Print();
+ ReportErrorSummary(scariness.GetDescription(), &stack);
+}
+
+void ErrorAllocTypeMismatch::Print() {
+ static const char *alloc_names[] = {"INVALID", "malloc", "operator new",
+ "operator new []"};
+ static const char *dealloc_names[] = {"INVALID", "free", "operator delete",
+ "operator delete []"};
+ CHECK_NE(alloc_type, dealloc_type);
+ Decorator d;
+ Printf("%s", d.Warning());
+ Report("ERROR: AddressSanitizer: %s (%s vs %s) on %p\n",
+ scariness.GetDescription(),
+ alloc_names[alloc_type], dealloc_names[dealloc_type],
+ addr_description.addr);
+ Printf("%s", d.EndWarning());
+ CHECK_GT(dealloc_stack->size, 0);
+ scariness.Print();
+ GET_STACK_TRACE_FATAL(dealloc_stack->trace[0], dealloc_stack->top_frame_bp);
+ stack.Print();
+ addr_description.Print();
+ ReportErrorSummary(scariness.GetDescription(), &stack);
+ Report(
+ "HINT: if you don't care about these errors you may set "
+ "ASAN_OPTIONS=alloc_dealloc_mismatch=0\n");
+}
+
+void ErrorMallocUsableSizeNotOwned::Print() {
+ Decorator d;
+ Printf("%s", d.Warning());
+ Report(
+ "ERROR: AddressSanitizer: attempting to call malloc_usable_size() for "
+ "pointer which is not owned: %p\n",
+ addr_description.Address());
+ Printf("%s", d.EndWarning());
+ stack->Print();
+ addr_description.Print();
+ ReportErrorSummary(scariness.GetDescription(), stack);
+}
+
+void ErrorSanitizerGetAllocatedSizeNotOwned::Print() {
+ Decorator d;
+ Printf("%s", d.Warning());
+ Report(
+ "ERROR: AddressSanitizer: attempting to call "
+ "__sanitizer_get_allocated_size() for pointer which is not owned: %p\n",
+ addr_description.Address());
+ Printf("%s", d.EndWarning());
+ stack->Print();
+ addr_description.Print();
+ ReportErrorSummary(scariness.GetDescription(), stack);
+}
+
+void ErrorStringFunctionMemoryRangesOverlap::Print() {
+ Decorator d;
+ char bug_type[100];
+ internal_snprintf(bug_type, sizeof(bug_type), "%s-param-overlap", function);
+ Printf("%s", d.Warning());
+ Report(
+ "ERROR: AddressSanitizer: %s: memory ranges [%p,%p) and [%p, %p) "
+ "overlap\n",
+ bug_type, addr1_description.Address(),
+ addr1_description.Address() + length1, addr2_description.Address(),
+ addr2_description.Address() + length2);
+ Printf("%s", d.EndWarning());
+ scariness.Print();
+ stack->Print();
+ addr1_description.Print();
+ addr2_description.Print();
+ ReportErrorSummary(bug_type, stack);
+}
+
+void ErrorStringFunctionSizeOverflow::Print() {
+ Decorator d;
+ Printf("%s", d.Warning());
+ Report("ERROR: AddressSanitizer: %s: (size=%zd)\n",
+ scariness.GetDescription(), size);
+ Printf("%s", d.EndWarning());
+ scariness.Print();
+ stack->Print();
+ addr_description.Print();
+ ReportErrorSummary(scariness.GetDescription(), stack);
+}
+
+void ErrorBadParamsToAnnotateContiguousContainer::Print() {
+ Report(
+ "ERROR: AddressSanitizer: bad parameters to "
+ "__sanitizer_annotate_contiguous_container:\n"
+ " beg : %p\n"
+ " end : %p\n"
+ " old_mid : %p\n"
+ " new_mid : %p\n",
+ beg, end, old_mid, new_mid);
+ uptr granularity = SHADOW_GRANULARITY;
+ if (!IsAligned(beg, granularity))
+ Report("ERROR: beg is not aligned by %d\n", granularity);
+ stack->Print();
+ ReportErrorSummary(scariness.GetDescription(), stack);
+}
+
+void ErrorODRViolation::Print() {
+ Decorator d;
+ Printf("%s", d.Warning());
+ Report("ERROR: AddressSanitizer: %s (%p):\n", scariness.GetDescription(),
+ global1.beg);
+ Printf("%s", d.EndWarning());
+ InternalScopedString g1_loc(256), g2_loc(256);
+ PrintGlobalLocation(&g1_loc, global1);
+ PrintGlobalLocation(&g2_loc, global2);
+ Printf(" [1] size=%zd '%s' %s\n", global1.size,
+ MaybeDemangleGlobalName(global1.name), g1_loc.data());
+ Printf(" [2] size=%zd '%s' %s\n", global2.size,
+ MaybeDemangleGlobalName(global2.name), g2_loc.data());
+ if (stack_id1 && stack_id2) {
+ Printf("These globals were registered at these points:\n");
+ Printf(" [1]:\n");
+ StackDepotGet(stack_id1).Print();
+ Printf(" [2]:\n");
+ StackDepotGet(stack_id2).Print();
+ }
+ Report(
+ "HINT: if you don't care about these errors you may set "
+ "ASAN_OPTIONS=detect_odr_violation=0\n");
+ InternalScopedString error_msg(256);
+ error_msg.append("%s: global '%s' at %s", scariness.GetDescription(),
+ MaybeDemangleGlobalName(global1.name), g1_loc.data());
+ ReportErrorSummary(error_msg.data());
+}
+
+void ErrorInvalidPointerPair::Print() {
+ Decorator d;
+ Printf("%s", d.Warning());
+ Report("ERROR: AddressSanitizer: %s: %p %p\n", scariness.GetDescription(),
+ addr1_description.Address(), addr2_description.Address());
+ Printf("%s", d.EndWarning());
+ GET_STACK_TRACE_FATAL(pc, bp);
+ stack.Print();
+ addr1_description.Print();
+ addr2_description.Print();
+ ReportErrorSummary(scariness.GetDescription(), &stack);
+}
+
+static bool AdjacentShadowValuesAreFullyPoisoned(u8 *s) {
+ return s[-1] > 127 && s[1] > 127;
+}
+
+ErrorGeneric::ErrorGeneric(u32 tid, uptr pc_, uptr bp_, uptr sp_, uptr addr,
+ bool is_write_, uptr access_size_)
+ : ErrorBase(tid),
+ addr_description(addr, access_size_, /*shouldLockThreadRegistry=*/false),
+ pc(pc_),
+ bp(bp_),
+ sp(sp_),
+ access_size(access_size_),
+ is_write(is_write_),
+ shadow_val(0) {
+ scariness.Clear();
+ if (access_size) {
+ if (access_size <= 9) {
+ char desr[] = "?-byte";
+ desr[0] = '0' + access_size;
+ scariness.Scare(access_size + access_size / 2, desr);
+ } else if (access_size >= 10) {
+ scariness.Scare(15, "multi-byte");
+ }
+ is_write ? scariness.Scare(20, "write") : scariness.Scare(1, "read");
+
+ // Determine the error type.
+ bug_descr = "unknown-crash";
+ if (AddrIsInMem(addr)) {
+ u8 *shadow_addr = (u8 *)MemToShadow(addr);
+ // If we are accessing 16 bytes, look at the second shadow byte.
+ if (*shadow_addr == 0 && access_size > SHADOW_GRANULARITY) shadow_addr++;
+ // If we are in the partial right redzone, look at the next shadow byte.
+ if (*shadow_addr > 0 && *shadow_addr < 128) shadow_addr++;
+ bool far_from_bounds = false;
+ shadow_val = *shadow_addr;
+ int bug_type_score = 0;
+ // For use-after-frees reads are almost as bad as writes.
+ int read_after_free_bonus = 0;
+ switch (shadow_val) {
+ case kAsanHeapLeftRedzoneMagic:
+ case kAsanArrayCookieMagic:
+ bug_descr = "heap-buffer-overflow";
+ bug_type_score = 10;
+ far_from_bounds = AdjacentShadowValuesAreFullyPoisoned(shadow_addr);
+ break;
+ case kAsanHeapFreeMagic:
+ bug_descr = "heap-use-after-free";
+ bug_type_score = 20;
+ if (!is_write) read_after_free_bonus = 18;
+ break;
+ case kAsanStackLeftRedzoneMagic:
+ bug_descr = "stack-buffer-underflow";
+ bug_type_score = 25;
+ far_from_bounds = AdjacentShadowValuesAreFullyPoisoned(shadow_addr);
+ break;
+ case kAsanInitializationOrderMagic:
+ bug_descr = "initialization-order-fiasco";
+ bug_type_score = 1;
+ break;
+ case kAsanStackMidRedzoneMagic:
+ case kAsanStackRightRedzoneMagic:
+ bug_descr = "stack-buffer-overflow";
+ bug_type_score = 25;
+ far_from_bounds = AdjacentShadowValuesAreFullyPoisoned(shadow_addr);
+ break;
+ case kAsanStackAfterReturnMagic:
+ bug_descr = "stack-use-after-return";
+ bug_type_score = 30;
+ if (!is_write) read_after_free_bonus = 18;
+ break;
+ case kAsanUserPoisonedMemoryMagic:
+ bug_descr = "use-after-poison";
+ bug_type_score = 20;
+ break;
+ case kAsanContiguousContainerOOBMagic:
+ bug_descr = "container-overflow";
+ bug_type_score = 10;
+ break;
+ case kAsanStackUseAfterScopeMagic:
+ bug_descr = "stack-use-after-scope";
+ bug_type_score = 10;
+ break;
+ case kAsanGlobalRedzoneMagic:
+ bug_descr = "global-buffer-overflow";
+ bug_type_score = 10;
+ far_from_bounds = AdjacentShadowValuesAreFullyPoisoned(shadow_addr);
+ break;
+ case kAsanIntraObjectRedzone:
+ bug_descr = "intra-object-overflow";
+ bug_type_score = 10;
+ break;
+ case kAsanAllocaLeftMagic:
+ case kAsanAllocaRightMagic:
+ bug_descr = "dynamic-stack-buffer-overflow";
+ bug_type_score = 25;
+ far_from_bounds = AdjacentShadowValuesAreFullyPoisoned(shadow_addr);
+ break;
+ }
+ scariness.Scare(bug_type_score + read_after_free_bonus, bug_descr);
+ if (far_from_bounds) scariness.Scare(10, "far-from-bounds");
+ }
+ }
+}
+
+static void PrintContainerOverflowHint() {
+ Printf("HINT: if you don't care about these errors you may set "
+ "ASAN_OPTIONS=detect_container_overflow=0.\n"
+ "If you suspect a false positive see also: "
+ "https://github.com/google/sanitizers/wiki/"
+ "AddressSanitizerContainerOverflow.\n");
+}
+
+static void PrintShadowByte(InternalScopedString *str, const char *before,
+ u8 byte, const char *after = "\n") {
+ PrintMemoryByte(str, before, byte, /*in_shadow*/true, after);
+}
+
+static void PrintLegend(InternalScopedString *str) {
+ str->append(
+ "Shadow byte legend (one shadow byte represents %d "
+ "application bytes):\n",
+ (int)SHADOW_GRANULARITY);
+ PrintShadowByte(str, " Addressable: ", 0);
+ str->append(" Partially addressable: ");
+ for (u8 i = 1; i < SHADOW_GRANULARITY; i++) PrintShadowByte(str, "", i, " ");
+ str->append("\n");
+ PrintShadowByte(str, " Heap left redzone: ",
+ kAsanHeapLeftRedzoneMagic);
+ PrintShadowByte(str, " Freed heap region: ", kAsanHeapFreeMagic);
+ PrintShadowByte(str, " Stack left redzone: ",
+ kAsanStackLeftRedzoneMagic);
+ PrintShadowByte(str, " Stack mid redzone: ",
+ kAsanStackMidRedzoneMagic);
+ PrintShadowByte(str, " Stack right redzone: ",
+ kAsanStackRightRedzoneMagic);
+ PrintShadowByte(str, " Stack after return: ",
+ kAsanStackAfterReturnMagic);
+ PrintShadowByte(str, " Stack use after scope: ",
+ kAsanStackUseAfterScopeMagic);
+ PrintShadowByte(str, " Global redzone: ", kAsanGlobalRedzoneMagic);
+ PrintShadowByte(str, " Global init order: ",
+ kAsanInitializationOrderMagic);
+ PrintShadowByte(str, " Poisoned by user: ",
+ kAsanUserPoisonedMemoryMagic);
+ PrintShadowByte(str, " Container overflow: ",
+ kAsanContiguousContainerOOBMagic);
+ PrintShadowByte(str, " Array cookie: ",
+ kAsanArrayCookieMagic);
+ PrintShadowByte(str, " Intra object redzone: ",
+ kAsanIntraObjectRedzone);
+ PrintShadowByte(str, " ASan internal: ", kAsanInternalHeapMagic);
+ PrintShadowByte(str, " Left alloca redzone: ", kAsanAllocaLeftMagic);
+ PrintShadowByte(str, " Right alloca redzone: ", kAsanAllocaRightMagic);
+}
+
+static void PrintShadowBytes(InternalScopedString *str, const char *before,
+ u8 *bytes, u8 *guilty, uptr n) {
+ Decorator d;
+ if (before) str->append("%s%p:", before, bytes);
+ for (uptr i = 0; i < n; i++) {
+ u8 *p = bytes + i;
+ const char *before =
+ p == guilty ? "[" : (p - 1 == guilty && i != 0) ? "" : " ";
+ const char *after = p == guilty ? "]" : "";
+ PrintShadowByte(str, before, *p, after);
+ }
+ str->append("\n");
+}
+
+static void PrintShadowMemoryForAddress(uptr addr) {
+ if (!AddrIsInMem(addr)) return;
+ uptr shadow_addr = MemToShadow(addr);
+ const uptr n_bytes_per_row = 16;
+ uptr aligned_shadow = shadow_addr & ~(n_bytes_per_row - 1);
+ InternalScopedString str(4096 * 8);
+ str.append("Shadow bytes around the buggy address:\n");
+ for (int i = -5; i <= 5; i++) {
+ const char *prefix = (i == 0) ? "=>" : " ";
+ PrintShadowBytes(&str, prefix, (u8 *)(aligned_shadow + i * n_bytes_per_row),
+ (u8 *)shadow_addr, n_bytes_per_row);
+ }
+ if (flags()->print_legend) PrintLegend(&str);
+ Printf("%s", str.data());
+}
+
+void ErrorGeneric::Print() {
+ Decorator d;
+ Printf("%s", d.Warning());
+ uptr addr = addr_description.Address();
+ Report("ERROR: AddressSanitizer: %s on address %p at pc %p bp %p sp %p\n",
+ bug_descr, (void *)addr, pc, bp, sp);
+ Printf("%s", d.EndWarning());
+
+ char tname[128];
+ Printf("%s%s of size %zu at %p thread T%d%s%s\n", d.Access(),
+ access_size ? (is_write ? "WRITE" : "READ") : "ACCESS", access_size,
+ (void *)addr, tid,
+ ThreadNameWithParenthesis(tid, tname, sizeof(tname)), d.EndAccess());
+
+ scariness.Print();
+ GET_STACK_TRACE_FATAL(pc, bp);
+ stack.Print();
+
+ // Pass bug_descr because we have a special case for
+ // initialization-order-fiasco
+ addr_description.Print(bug_descr);
+ if (shadow_val == kAsanContiguousContainerOOBMagic)
+ PrintContainerOverflowHint();
+ ReportErrorSummary(bug_descr, &stack);
+ PrintShadowMemoryForAddress(addr);
+}
+
+} // namespace __asan
diff --git a/lib/asan/asan_errors.h b/lib/asan/asan_errors.h
new file mode 100644
index 000000000000..9a124924470e
--- /dev/null
+++ b/lib/asan/asan_errors.h
@@ -0,0 +1,390 @@
+//===-- asan_errors.h -------------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of AddressSanitizer, an address sanity checker.
+//
+// ASan-private header for error structures.
+//===----------------------------------------------------------------------===//
+#ifndef ASAN_ERRORS_H
+#define ASAN_ERRORS_H
+
+#include "asan_descriptions.h"
+#include "asan_scariness_score.h"
+#include "sanitizer_common/sanitizer_common.h"
+
+namespace __asan {
+
+struct ErrorBase {
+ ErrorBase() = default;
+ explicit ErrorBase(u32 tid_) : tid(tid_) {}
+ ScarinessScoreBase scariness;
+ u32 tid;
+};
+
+struct ErrorStackOverflow : ErrorBase {
+ uptr addr, pc, bp, sp;
+ // ErrorStackOverflow never owns the context.
+ void *context;
+ // VS2013 doesn't implement unrestricted unions, so we need a trivial default
+ // constructor
+ ErrorStackOverflow() = default;
+ ErrorStackOverflow(u32 tid, const SignalContext &sig)
+ : ErrorBase(tid),
+ addr(sig.addr),
+ pc(sig.pc),
+ bp(sig.bp),
+ sp(sig.sp),
+ context(sig.context) {
+ scariness.Clear();
+ scariness.Scare(10, "stack-overflow");
+ }
+ void Print();
+};
+
+struct ErrorDeadlySignal : ErrorBase {
+ uptr addr, pc, bp, sp;
+ // ErrorDeadlySignal never owns the context.
+ void *context;
+ int signo;
+ SignalContext::WriteFlag write_flag;
+ bool is_memory_access;
+ // VS2013 doesn't implement unrestricted unions, so we need a trivial default
+ // constructor
+ ErrorDeadlySignal() = default;
+ ErrorDeadlySignal(u32 tid, const SignalContext &sig, int signo_)
+ : ErrorBase(tid),
+ addr(sig.addr),
+ pc(sig.pc),
+ bp(sig.bp),
+ sp(sig.sp),
+ context(sig.context),
+ signo(signo_),
+ write_flag(sig.write_flag),
+ is_memory_access(sig.is_memory_access) {
+ scariness.Clear();
+ if (is_memory_access) {
+ if (addr < GetPageSizeCached()) {
+ scariness.Scare(10, "null-deref");
+ } else if (addr == pc) {
+ scariness.Scare(60, "wild-jump");
+ } else if (write_flag == SignalContext::WRITE) {
+ scariness.Scare(30, "wild-addr-write");
+ } else if (write_flag == SignalContext::READ) {
+ scariness.Scare(20, "wild-addr-read");
+ } else {
+ scariness.Scare(25, "wild-addr");
+ }
+ } else {
+ scariness.Scare(10, "signal");
+ }
+ }
+ void Print();
+};
+
+struct ErrorDoubleFree : ErrorBase {
+ // ErrorDoubleFree doesn't own the stack trace.
+ const BufferedStackTrace *second_free_stack;
+ HeapAddressDescription addr_description;
+ // VS2013 doesn't implement unrestricted unions, so we need a trivial default
+ // constructor
+ ErrorDoubleFree() = default;
+ ErrorDoubleFree(u32 tid, BufferedStackTrace *stack, uptr addr)
+ : ErrorBase(tid), second_free_stack(stack) {
+ CHECK_GT(second_free_stack->size, 0);
+ GetHeapAddressInformation(addr, 1, &addr_description);
+ scariness.Clear();
+ scariness.Scare(42, "double-free");
+ }
+ void Print();
+};
+
+struct ErrorNewDeleteSizeMismatch : ErrorBase {
+ // ErrorNewDeleteSizeMismatch doesn't own the stack trace.
+ const BufferedStackTrace *free_stack;
+ HeapAddressDescription addr_description;
+ uptr delete_size;
+ // VS2013 doesn't implement unrestricted unions, so we need a trivial default
+ // constructor
+ ErrorNewDeleteSizeMismatch() = default;
+ ErrorNewDeleteSizeMismatch(u32 tid, BufferedStackTrace *stack, uptr addr,
+ uptr delete_size_)
+ : ErrorBase(tid), free_stack(stack), delete_size(delete_size_) {
+ GetHeapAddressInformation(addr, 1, &addr_description);
+ scariness.Clear();
+ scariness.Scare(10, "new-delete-type-mismatch");
+ }
+ void Print();
+};
+
+struct ErrorFreeNotMalloced : ErrorBase {
+ // ErrorFreeNotMalloced doesn't own the stack trace.
+ const BufferedStackTrace *free_stack;
+ AddressDescription addr_description;
+ // VS2013 doesn't implement unrestricted unions, so we need a trivial default
+ // constructor
+ ErrorFreeNotMalloced() = default;
+ ErrorFreeNotMalloced(u32 tid, BufferedStackTrace *stack, uptr addr)
+ : ErrorBase(tid),
+ free_stack(stack),
+ addr_description(addr, /*shouldLockThreadRegistry=*/false) {
+ scariness.Clear();
+ scariness.Scare(40, "bad-free");
+ }
+ void Print();
+};
+
+struct ErrorAllocTypeMismatch : ErrorBase {
+ // ErrorAllocTypeMismatch doesn't own the stack trace.
+ const BufferedStackTrace *dealloc_stack;
+ HeapAddressDescription addr_description;
+ AllocType alloc_type, dealloc_type;
+ // VS2013 doesn't implement unrestricted unions, so we need a trivial default
+ // constructor
+ ErrorAllocTypeMismatch() = default;
+ ErrorAllocTypeMismatch(u32 tid, BufferedStackTrace *stack, uptr addr,
+ AllocType alloc_type_, AllocType dealloc_type_)
+ : ErrorBase(tid),
+ dealloc_stack(stack),
+ alloc_type(alloc_type_),
+ dealloc_type(dealloc_type_) {
+ GetHeapAddressInformation(addr, 1, &addr_description);
+ scariness.Clear();
+ scariness.Scare(10, "alloc-dealloc-mismatch");
+ };
+ void Print();
+};
+
+struct ErrorMallocUsableSizeNotOwned : ErrorBase {
+ // ErrorMallocUsableSizeNotOwned doesn't own the stack trace.
+ const BufferedStackTrace *stack;
+ AddressDescription addr_description;
+ // VS2013 doesn't implement unrestricted unions, so we need a trivial default
+ // constructor
+ ErrorMallocUsableSizeNotOwned() = default;
+ ErrorMallocUsableSizeNotOwned(u32 tid, BufferedStackTrace *stack_, uptr addr)
+ : ErrorBase(tid),
+ stack(stack_),
+ addr_description(addr, /*shouldLockThreadRegistry=*/false) {
+ scariness.Clear();
+ scariness.Scare(10, "bad-malloc_usable_size");
+ }
+ void Print();
+};
+
+struct ErrorSanitizerGetAllocatedSizeNotOwned : ErrorBase {
+ // ErrorSanitizerGetAllocatedSizeNotOwned doesn't own the stack trace.
+ const BufferedStackTrace *stack;
+ AddressDescription addr_description;
+ // VS2013 doesn't implement unrestricted unions, so we need a trivial default
+ // constructor
+ ErrorSanitizerGetAllocatedSizeNotOwned() = default;
+ ErrorSanitizerGetAllocatedSizeNotOwned(u32 tid, BufferedStackTrace *stack_,
+ uptr addr)
+ : ErrorBase(tid),
+ stack(stack_),
+ addr_description(addr, /*shouldLockThreadRegistry=*/false) {
+ scariness.Clear();
+ scariness.Scare(10, "bad-__sanitizer_get_allocated_size");
+ }
+ void Print();
+};
+
+struct ErrorStringFunctionMemoryRangesOverlap : ErrorBase {
+ // ErrorStringFunctionMemoryRangesOverlap doesn't own the stack trace.
+ const BufferedStackTrace *stack;
+ uptr length1, length2;
+ AddressDescription addr1_description;
+ AddressDescription addr2_description;
+ const char *function;
+ // VS2013 doesn't implement unrestricted unions, so we need a trivial default
+ // constructor
+ ErrorStringFunctionMemoryRangesOverlap() = default;
+ ErrorStringFunctionMemoryRangesOverlap(u32 tid, BufferedStackTrace *stack_,
+ uptr addr1, uptr length1_, uptr addr2,
+ uptr length2_, const char *function_)
+ : ErrorBase(tid),
+ stack(stack_),
+ length1(length1_),
+ length2(length2_),
+ addr1_description(addr1, length1, /*shouldLockThreadRegistry=*/false),
+ addr2_description(addr2, length2, /*shouldLockThreadRegistry=*/false),
+ function(function_) {
+ char bug_type[100];
+ internal_snprintf(bug_type, sizeof(bug_type), "%s-param-overlap", function);
+ scariness.Clear();
+ scariness.Scare(10, bug_type);
+ }
+ void Print();
+};
+
+struct ErrorStringFunctionSizeOverflow : ErrorBase {
+ // ErrorStringFunctionSizeOverflow doesn't own the stack trace.
+ const BufferedStackTrace *stack;
+ AddressDescription addr_description;
+ uptr size;
+ // VS2013 doesn't implement unrestricted unions, so we need a trivial default
+ // constructor
+ ErrorStringFunctionSizeOverflow() = default;
+ ErrorStringFunctionSizeOverflow(u32 tid, BufferedStackTrace *stack_,
+ uptr addr, uptr size_)
+ : ErrorBase(tid),
+ stack(stack_),
+ addr_description(addr, /*shouldLockThreadRegistry=*/false),
+ size(size_) {
+ scariness.Clear();
+ scariness.Scare(10, "negative-size-param");
+ }
+ void Print();
+};
+
+struct ErrorBadParamsToAnnotateContiguousContainer : ErrorBase {
+ // ErrorBadParamsToAnnotateContiguousContainer doesn't own the stack trace.
+ const BufferedStackTrace *stack;
+ uptr beg, end, old_mid, new_mid;
+ // VS2013 doesn't implement unrestricted unions, so we need a trivial default
+ // constructor
+ ErrorBadParamsToAnnotateContiguousContainer() = default;
+ // PS4: Do we want an AddressDescription for beg?
+ ErrorBadParamsToAnnotateContiguousContainer(u32 tid,
+ BufferedStackTrace *stack_,
+ uptr beg_, uptr end_,
+ uptr old_mid_, uptr new_mid_)
+ : ErrorBase(tid),
+ stack(stack_),
+ beg(beg_),
+ end(end_),
+ old_mid(old_mid_),
+ new_mid(new_mid_) {
+ scariness.Clear();
+ scariness.Scare(10, "bad-__sanitizer_annotate_contiguous_container");
+ }
+ void Print();
+};
+
+struct ErrorODRViolation : ErrorBase {
+ __asan_global global1, global2;
+ u32 stack_id1, stack_id2;
+ // VS2013 doesn't implement unrestricted unions, so we need a trivial default
+ // constructor
+ ErrorODRViolation() = default;
+ ErrorODRViolation(u32 tid, const __asan_global *g1, u32 stack_id1_,
+ const __asan_global *g2, u32 stack_id2_)
+ : ErrorBase(tid),
+ global1(*g1),
+ global2(*g2),
+ stack_id1(stack_id1_),
+ stack_id2(stack_id2_) {
+ scariness.Clear();
+ scariness.Scare(10, "odr-violation");
+ }
+ void Print();
+};
+
+struct ErrorInvalidPointerPair : ErrorBase {
+ uptr pc, bp, sp;
+ AddressDescription addr1_description;
+ AddressDescription addr2_description;
+ // VS2013 doesn't implement unrestricted unions, so we need a trivial default
+ // constructor
+ ErrorInvalidPointerPair() = default;
+ ErrorInvalidPointerPair(u32 tid, uptr pc_, uptr bp_, uptr sp_, uptr p1,
+ uptr p2)
+ : ErrorBase(tid),
+ pc(pc_),
+ bp(bp_),
+ sp(sp_),
+ addr1_description(p1, 1, /*shouldLockThreadRegistry=*/false),
+ addr2_description(p2, 1, /*shouldLockThreadRegistry=*/false) {
+ scariness.Clear();
+ scariness.Scare(10, "invalid-pointer-pair");
+ }
+ void Print();
+};
+
+struct ErrorGeneric : ErrorBase {
+ AddressDescription addr_description;
+ uptr pc, bp, sp;
+ uptr access_size;
+ const char *bug_descr;
+ bool is_write;
+ u8 shadow_val;
+ // VS2013 doesn't implement unrestricted unions, so we need a trivial default
+ // constructor
+ ErrorGeneric() = default;
+ ErrorGeneric(u32 tid, uptr addr, uptr pc_, uptr bp_, uptr sp_, bool is_write_,
+ uptr access_size_);
+ void Print();
+};
+
+// clang-format off
+#define ASAN_FOR_EACH_ERROR_KIND(macro) \
+ macro(StackOverflow) \
+ macro(DeadlySignal) \
+ macro(DoubleFree) \
+ macro(NewDeleteSizeMismatch) \
+ macro(FreeNotMalloced) \
+ macro(AllocTypeMismatch) \
+ macro(MallocUsableSizeNotOwned) \
+ macro(SanitizerGetAllocatedSizeNotOwned) \
+ macro(StringFunctionMemoryRangesOverlap) \
+ macro(StringFunctionSizeOverflow) \
+ macro(BadParamsToAnnotateContiguousContainer) \
+ macro(ODRViolation) \
+ macro(InvalidPointerPair) \
+ macro(Generic)
+// clang-format on
+
+#define ASAN_DEFINE_ERROR_KIND(name) kErrorKind##name,
+#define ASAN_ERROR_DESCRIPTION_MEMBER(name) Error##name name;
+#define ASAN_ERROR_DESCRIPTION_CONSTRUCTOR(name) \
+ ErrorDescription(Error##name const &e) : kind(kErrorKind##name), name(e) {}
+#define ASAN_ERROR_DESCRIPTION_PRINT(name) \
+ case kErrorKind##name: \
+ return name.Print();
+
+enum ErrorKind {
+ kErrorKindInvalid = 0,
+ ASAN_FOR_EACH_ERROR_KIND(ASAN_DEFINE_ERROR_KIND)
+};
+
+struct ErrorDescription {
+ ErrorKind kind;
+ // We're using a tagged union because it allows us to have a trivially
+ // copiable type and use the same structures as the public interface.
+ //
+ // We can add a wrapper around it to make it "more c++-like", but that would
+ // add a lot of code and the benefit wouldn't be that big.
+ union {
+ ErrorBase Base;
+ ASAN_FOR_EACH_ERROR_KIND(ASAN_ERROR_DESCRIPTION_MEMBER)
+ };
+
+ ErrorDescription() { internal_memset(this, 0, sizeof(*this)); }
+ ASAN_FOR_EACH_ERROR_KIND(ASAN_ERROR_DESCRIPTION_CONSTRUCTOR)
+
+ bool IsValid() { return kind != kErrorKindInvalid; }
+ void Print() {
+ switch (kind) {
+ ASAN_FOR_EACH_ERROR_KIND(ASAN_ERROR_DESCRIPTION_PRINT)
+ case kErrorKindInvalid:
+ CHECK(0);
+ }
+ CHECK(0);
+ }
+};
+
+#undef ASAN_FOR_EACH_ERROR_KIND
+#undef ASAN_DEFINE_ERROR_KIND
+#undef ASAN_ERROR_DESCRIPTION_MEMBER
+#undef ASAN_ERROR_DESCRIPTION_CONSTRUCTOR
+#undef ASAN_ERROR_DESCRIPTION_PRINT
+
+} // namespace __asan
+
+#endif // ASAN_ERRORS_H
diff --git a/lib/asan/asan_fake_stack.cc b/lib/asan/asan_fake_stack.cc
index 16feccd0d54a..017b7d2af129 100644
--- a/lib/asan/asan_fake_stack.cc
+++ b/lib/asan/asan_fake_stack.cc
@@ -100,7 +100,7 @@ FakeFrame *FakeStack::Allocate(uptr stack_size_log, uptr class_id,
// if the signal arrives between checking and setting flags[pos], the
// signal handler's fake stack will start from a different hint_position
// and so will not touch this particular byte. So, it is safe to do this
- // with regular non-atimic load and store (at least I was not able to make
+ // with regular non-atomic load and store (at least I was not able to make
// this code crash).
if (flags[pos]) continue;
flags[pos] = 1;
diff --git a/lib/asan/asan_fake_stack.h b/lib/asan/asan_fake_stack.h
index 74ca02df9056..da9a91c23025 100644
--- a/lib/asan/asan_fake_stack.h
+++ b/lib/asan/asan_fake_stack.h
@@ -52,7 +52,7 @@ struct FakeFrame {
// Allocate() flips the appropriate allocation flag atomically, thus achieving
// async-signal safety.
// This allocator does not have quarantine per se, but it tries to allocate the
-// frames in round robin fasion to maximize the delay between a deallocation
+// frames in round robin fashion to maximize the delay between a deallocation
// and the next allocation.
class FakeStack {
static const uptr kMinStackFrameSizeLog = 6; // Min frame is 64B.
@@ -99,12 +99,12 @@ class FakeStack {
return ((uptr)1) << (stack_size_log - kMinStackFrameSizeLog - class_id);
}
- // Divide n by the numbe of frames in size class.
+ // Divide n by the number of frames in size class.
static uptr ModuloNumberOfFrames(uptr stack_size_log, uptr class_id, uptr n) {
return n & (NumberOfFrames(stack_size_log, class_id) - 1);
}
- // The the pointer to the flags of the given class_id.
+ // The pointer to the flags of the given class_id.
u8 *GetFlags(uptr stack_size_log, uptr class_id) {
return reinterpret_cast<u8 *>(this) + kFlagsOffset +
FlagsOffset(stack_size_log, class_id);
diff --git a/lib/asan/asan_flags.cc b/lib/asan/asan_flags.cc
index 345a35ce3bb3..4db407d4512b 100644
--- a/lib/asan/asan_flags.cc
+++ b/lib/asan/asan_flags.cc
@@ -156,9 +156,19 @@ void InitializeFlags() {
f->quarantine_size_mb = f->quarantine_size >> 20;
if (f->quarantine_size_mb < 0) {
const int kDefaultQuarantineSizeMb =
- (ASAN_LOW_MEMORY) ? 1UL << 6 : 1UL << 8;
+ (ASAN_LOW_MEMORY) ? 1UL << 4 : 1UL << 8;
f->quarantine_size_mb = kDefaultQuarantineSizeMb;
}
+ if (f->thread_local_quarantine_size_kb < 0) {
+ const u32 kDefaultThreadLocalQuarantineSizeKb =
+ // It is not advised to go lower than 64Kb, otherwise quarantine batches
+ // pushed from thread local quarantine to global one will create too
+ // much overhead. One quarantine batch size is 8Kb and it holds up to
+ // 1021 chunk, which amounts to 1/8 memory overhead per batch when
+ // thread local quarantine is set to 64Kb.
+ (ASAN_LOW_MEMORY) ? 1 << 6 : FIRST_32_SECOND_64(1 << 8, 1 << 10);
+ f->thread_local_quarantine_size_kb = kDefaultThreadLocalQuarantineSizeKb;
+ }
if (!f->replace_str && common_flags()->intercept_strlen) {
Report("WARNING: strlen interceptor is enabled even though replace_str=0. "
"Use intercept_strlen=0 to disable it.");
diff --git a/lib/asan/asan_flags.inc b/lib/asan/asan_flags.inc
index 9496a47490c7..4712efb86224 100644
--- a/lib/asan/asan_flags.inc
+++ b/lib/asan/asan_flags.inc
@@ -23,6 +23,12 @@ ASAN_FLAG(int, quarantine_size_mb, -1,
"Size (in Mb) of quarantine used to detect use-after-free "
"errors. Lower value may reduce memory usage but increase the "
"chance of false negatives.")
+ASAN_FLAG(int, thread_local_quarantine_size_kb, -1,
+ "Size (in Kb) of thread local quarantine used to detect "
+ "use-after-free errors. Lower value may reduce memory usage but "
+ "increase the chance of false negatives. It is not advised to go "
+ "lower than 64Kb, otherwise frequent transfers to global quarantine "
+ "might affect performance.")
ASAN_FLAG(int, redzone, 16,
"Minimal size (in bytes) of redzones around heap objects. "
"Requirement: redzone >= 16, is a power of two.")
@@ -102,7 +108,7 @@ ASAN_FLAG(bool, poison_array_cookie, true,
// https://github.com/google/sanitizers/issues/309
// TODO(glider,timurrrr): Fix known issues and enable this back.
ASAN_FLAG(bool, alloc_dealloc_mismatch,
- (SANITIZER_MAC == 0) && (SANITIZER_WINDOWS == 0),
+ !SANITIZER_MAC && !SANITIZER_WINDOWS && !SANITIZER_ANDROID,
"Report errors on malloc/delete, new/free, new/delete[], etc.")
ASAN_FLAG(bool, new_delete_type_mismatch, true,
@@ -133,6 +139,9 @@ ASAN_FLAG(int, detect_odr_violation, 2,
"have different sizes")
ASAN_FLAG(bool, dump_instruction_bytes, false,
"If true, dump 16 bytes starting at the instruction that caused SEGV")
+ASAN_FLAG(bool, dump_registers, true,
+ "If true, dump values of CPU registers when SEGV happens. Only "
+ "available on OS X for now.")
ASAN_FLAG(const char *, suppressions, "", "Suppressions file name.")
ASAN_FLAG(bool, halt_on_error, true,
"Crash the program after printing the first error report "
diff --git a/lib/asan/asan_globals.cc b/lib/asan/asan_globals.cc
index f185761809c7..b7233067358c 100644
--- a/lib/asan/asan_globals.cc
+++ b/lib/asan/asan_globals.cc
@@ -25,6 +25,7 @@
#include "sanitizer_common/sanitizer_mutex.h"
#include "sanitizer_common/sanitizer_placement_new.h"
#include "sanitizer_common/sanitizer_stackdepot.h"
+#include "sanitizer_common/sanitizer_symbolizer.h"
namespace __asan {
@@ -123,18 +124,6 @@ int GetGlobalsForAddress(uptr addr, Global *globals, u32 *reg_sites,
return res;
}
-bool GetInfoForAddressIfGlobal(uptr addr, AddressDescription *descr) {
- Global g = {};
- if (GetGlobalsForAddress(addr, &g, nullptr, 1)) {
- internal_strncpy(descr->name, g.name, descr->name_size);
- descr->region_address = g.beg;
- descr->region_size = g.size;
- descr->region_kind = "global";
- return true;
- }
- return false;
-}
-
enum GlobalSymbolState {
UNREGISTERED = 0,
REGISTERED = 1
@@ -279,6 +268,46 @@ void StopInitOrderChecking() {
}
}
+static bool IsASCII(unsigned char c) { return /*0x00 <= c &&*/ c <= 0x7F; }
+
+const char *MaybeDemangleGlobalName(const char *name) {
+ // We can spoil names of globals with C linkage, so use an heuristic
+ // approach to check if the name should be demangled.
+ bool should_demangle = false;
+ if (name[0] == '_' && name[1] == 'Z')
+ should_demangle = true;
+ else if (SANITIZER_WINDOWS && name[0] == '\01' && name[1] == '?')
+ should_demangle = true;
+
+ return should_demangle ? Symbolizer::GetOrInit()->Demangle(name) : name;
+}
+
+// Check if the global is a zero-terminated ASCII string. If so, print it.
+void PrintGlobalNameIfASCII(InternalScopedString *str, const __asan_global &g) {
+ for (uptr p = g.beg; p < g.beg + g.size - 1; p++) {
+ unsigned char c = *(unsigned char *)p;
+ if (c == '\0' || !IsASCII(c)) return;
+ }
+ if (*(char *)(g.beg + g.size - 1) != '\0') return;
+ str->append(" '%s' is ascii string '%s'\n", MaybeDemangleGlobalName(g.name),
+ (char *)g.beg);
+}
+
+static const char *GlobalFilename(const __asan_global &g) {
+ const char *res = g.module_name;
+ // Prefer the filename from source location, if is available.
+ if (g.location) res = g.location->filename;
+ CHECK(res);
+ return res;
+}
+
+void PrintGlobalLocation(InternalScopedString *str, const __asan_global &g) {
+ str->append("%s", GlobalFilename(g));
+ if (!g.location) return;
+ if (g.location->line_no) str->append(":%d", g.location->line_no);
+ if (g.location->column_no) str->append(":%d", g.location->column_no);
+}
+
} // namespace __asan
// ---------------------- Interface ---------------- {{{1
@@ -319,6 +348,20 @@ void __asan_register_globals(__asan_global *globals, uptr n) {
Printf("=== ID %d; %p %p\n", stack_id, &globals[0], &globals[n - 1]);
}
for (uptr i = 0; i < n; i++) {
+ if (SANITIZER_WINDOWS && globals[i].beg == 0) {
+ // The MSVC incremental linker may pad globals out to 256 bytes. As long
+ // as __asan_global is less than 256 bytes large and its size is a power
+ // of two, we can skip over the padding.
+ static_assert(
+ sizeof(__asan_global) < 256 &&
+ (sizeof(__asan_global) & (sizeof(__asan_global) - 1)) == 0,
+ "sizeof(__asan_global) incompatible with incremental linker padding");
+ // If these are padding bytes, the rest of the global should be zero.
+ CHECK(globals[i].size == 0 && globals[i].size_with_redzone == 0 &&
+ globals[i].name == nullptr && globals[i].module_name == nullptr &&
+ globals[i].odr_indicator == 0);
+ continue;
+ }
RegisterGlobal(&globals[i]);
}
}
@@ -329,6 +372,11 @@ void __asan_unregister_globals(__asan_global *globals, uptr n) {
if (!flags()->report_globals) return;
BlockingMutexLock lock(&mu_for_globals);
for (uptr i = 0; i < n; i++) {
+ if (SANITIZER_WINDOWS && globals[i].beg == 0) {
+ // Skip globals that look like padding from the MSVC incremental linker.
+ // See comment in __asan_register_globals.
+ continue;
+ }
UnregisterGlobal(&globals[i]);
}
}
@@ -339,10 +387,10 @@ void __asan_unregister_globals(__asan_global *globals, uptr n) {
// initializer can only touch global variables in the same TU.
void __asan_before_dynamic_init(const char *module_name) {
if (!flags()->check_initialization_order ||
- !CanPoisonMemory())
+ !CanPoisonMemory() ||
+ !dynamic_init_globals)
return;
bool strict_init_order = flags()->strict_init_order;
- CHECK(dynamic_init_globals);
CHECK(module_name);
CHECK(asan_inited);
BlockingMutexLock lock(&mu_for_globals);
@@ -365,7 +413,8 @@ void __asan_before_dynamic_init(const char *module_name) {
// TU are poisoned. It simply unpoisons all dynamically initialized globals.
void __asan_after_dynamic_init() {
if (!flags()->check_initialization_order ||
- !CanPoisonMemory())
+ !CanPoisonMemory() ||
+ !dynamic_init_globals)
return;
CHECK(asan_inited);
BlockingMutexLock lock(&mu_for_globals);
diff --git a/lib/asan/asan_globals_win.cc b/lib/asan/asan_globals_win.cc
new file mode 100644
index 000000000000..56c0d1a532f9
--- /dev/null
+++ b/lib/asan/asan_globals_win.cc
@@ -0,0 +1,62 @@
+//===-- asan_globals_win.cc -----------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Global registration code that is linked into every Windows DLL and EXE.
+//
+//===----------------------------------------------------------------------===//
+
+#include "asan_interface_internal.h"
+#if SANITIZER_WINDOWS
+
+namespace __asan {
+
+#pragma section(".ASAN$GA", read, write) // NOLINT
+#pragma section(".ASAN$GZ", read, write) // NOLINT
+extern "C" __declspec(allocate(".ASAN$GA"))
+__asan_global __asan_globals_start = {};
+extern "C" __declspec(allocate(".ASAN$GZ"))
+__asan_global __asan_globals_end = {};
+#pragma comment(linker, "/merge:.ASAN=.data")
+
+static void call_on_globals(void (*hook)(__asan_global *, uptr)) {
+ __asan_global *start = &__asan_globals_start + 1;
+ __asan_global *end = &__asan_globals_end;
+ uptr bytediff = (uptr)end - (uptr)start;
+ if (bytediff % sizeof(__asan_global) != 0) {
+#ifdef ASAN_DLL_THUNK
+ __debugbreak();
+#else
+ CHECK("corrupt asan global array");
+#endif
+ }
+ // We know end >= start because the linker sorts the portion after the dollar
+ // sign alphabetically.
+ uptr n = end - start;
+ hook(start, n);
+}
+
+static void register_dso_globals() {
+ call_on_globals(&__asan_register_globals);
+}
+
+static void unregister_dso_globals() {
+ call_on_globals(&__asan_unregister_globals);
+}
+
+// Register globals
+#pragma section(".CRT$XCU", long, read) // NOLINT
+#pragma section(".CRT$XTX", long, read) // NOLINT
+extern "C" __declspec(allocate(".CRT$XCU"))
+void (*const __asan_dso_reg_hook)() = &register_dso_globals;
+extern "C" __declspec(allocate(".CRT$XTX"))
+void (*const __asan_dso_unreg_hook)() = &unregister_dso_globals;
+
+} // namespace __asan
+
+#endif // SANITIZER_WINDOWS
diff --git a/lib/asan/asan_globals_win.h b/lib/asan/asan_globals_win.h
new file mode 100644
index 000000000000..d4ed9c1f38e1
--- /dev/null
+++ b/lib/asan/asan_globals_win.h
@@ -0,0 +1,34 @@
+//===-- asan_globals_win.h --------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Interface to the Windows-specific global management code. Separated into a
+// standalone header to allow inclusion from asan_win_dynamic_runtime_thunk,
+// which defines symbols that clash with other sanitizer headers.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef ASAN_GLOBALS_WIN_H
+#define ASAN_GLOBALS_WIN_H
+
+#if !defined(_MSC_VER)
+#error "this file is Windows-only, and uses MSVC pragmas"
+#endif
+
+#if defined(_WIN64)
+#define SANITIZER_SYM_PREFIX
+#else
+#define SANITIZER_SYM_PREFIX "_"
+#endif
+
+// Use this macro to force linking asan_globals_win.cc into the DSO.
+#define ASAN_LINK_GLOBALS_WIN() \
+ __pragma( \
+ comment(linker, "/include:" SANITIZER_SYM_PREFIX "__asan_dso_reg_hook"))
+
+#endif // ASAN_GLOBALS_WIN_H
diff --git a/lib/asan/asan_interceptors.cc b/lib/asan/asan_interceptors.cc
index 518ceebf62a5..606016d4f4d3 100644
--- a/lib/asan/asan_interceptors.cc
+++ b/lib/asan/asan_interceptors.cc
@@ -81,6 +81,51 @@ struct AsanInterceptorContext {
} \
} while (0)
+// memcpy is called during __asan_init() from the internals of printf(...).
+// We do not treat memcpy with to==from as a bug.
+// See http://llvm.org/bugs/show_bug.cgi?id=11763.
+#define ASAN_MEMCPY_IMPL(ctx, to, from, size) \
+ do { \
+ if (UNLIKELY(!asan_inited)) return internal_memcpy(to, from, size); \
+ if (asan_init_is_running) { \
+ return REAL(memcpy)(to, from, size); \
+ } \
+ ENSURE_ASAN_INITED(); \
+ if (flags()->replace_intrin) { \
+ if (to != from) { \
+ CHECK_RANGES_OVERLAP("memcpy", to, size, from, size); \
+ } \
+ ASAN_READ_RANGE(ctx, from, size); \
+ ASAN_WRITE_RANGE(ctx, to, size); \
+ } \
+ return REAL(memcpy)(to, from, size); \
+ } while (0)
+
+// memset is called inside Printf.
+#define ASAN_MEMSET_IMPL(ctx, block, c, size) \
+ do { \
+ if (UNLIKELY(!asan_inited)) return internal_memset(block, c, size); \
+ if (asan_init_is_running) { \
+ return REAL(memset)(block, c, size); \
+ } \
+ ENSURE_ASAN_INITED(); \
+ if (flags()->replace_intrin) { \
+ ASAN_WRITE_RANGE(ctx, block, size); \
+ } \
+ return REAL(memset)(block, c, size); \
+ } while (0)
+
+#define ASAN_MEMMOVE_IMPL(ctx, to, from, size) \
+ do { \
+ if (UNLIKELY(!asan_inited)) return internal_memmove(to, from, size); \
+ ENSURE_ASAN_INITED(); \
+ if (flags()->replace_intrin) { \
+ ASAN_READ_RANGE(ctx, from, size); \
+ ASAN_WRITE_RANGE(ctx, to, size); \
+ } \
+ return internal_memmove(to, from, size); \
+ } while (0)
+
#define ASAN_READ_RANGE(ctx, offset, size) \
ACCESS_MEMORY_RANGE(ctx, offset, size, false)
#define ASAN_WRITE_RANGE(ctx, offset, size) \
@@ -198,10 +243,25 @@ DECLARE_REAL_AND_INTERCEPTOR(void, free, void *)
} else { \
*begin = *end = 0; \
}
-// Asan needs custom handling of these:
-#undef SANITIZER_INTERCEPT_MEMSET
-#undef SANITIZER_INTERCEPT_MEMMOVE
-#undef SANITIZER_INTERCEPT_MEMCPY
+
+#define COMMON_INTERCEPTOR_MEMMOVE_IMPL(ctx, to, from, size) \
+ do { \
+ ASAN_INTERCEPTOR_ENTER(ctx, memmove); \
+ ASAN_MEMMOVE_IMPL(ctx, to, from, size); \
+ } while (false)
+
+#define COMMON_INTERCEPTOR_MEMCPY_IMPL(ctx, to, from, size) \
+ do { \
+ ASAN_INTERCEPTOR_ENTER(ctx, memcpy); \
+ ASAN_MEMCPY_IMPL(ctx, to, from, size); \
+ } while (false)
+
+#define COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, block, c, size) \
+ do { \
+ ASAN_INTERCEPTOR_ENTER(ctx, memset); \
+ ASAN_MEMSET_IMPL(ctx, block, c, size); \
+ } while (false)
+
#include "sanitizer_common/sanitizer_common_interceptors.inc"
// Syscall interceptors don't have contexts, we don't support suppressions
@@ -389,90 +449,18 @@ INTERCEPTOR(void, __cxa_throw, void *a, void *b, void *c) {
}
#endif
-// memcpy is called during __asan_init() from the internals of printf(...).
-// We do not treat memcpy with to==from as a bug.
-// See http://llvm.org/bugs/show_bug.cgi?id=11763.
-#define ASAN_MEMCPY_IMPL(ctx, to, from, size) do { \
- if (UNLIKELY(!asan_inited)) return internal_memcpy(to, from, size); \
- if (asan_init_is_running) { \
- return REAL(memcpy)(to, from, size); \
- } \
- ENSURE_ASAN_INITED(); \
- if (flags()->replace_intrin) { \
- if (to != from) { \
- CHECK_RANGES_OVERLAP("memcpy", to, size, from, size); \
- } \
- ASAN_READ_RANGE(ctx, from, size); \
- ASAN_WRITE_RANGE(ctx, to, size); \
- } \
- return REAL(memcpy)(to, from, size); \
- } while (0)
-
-
void *__asan_memcpy(void *to, const void *from, uptr size) {
ASAN_MEMCPY_IMPL(nullptr, to, from, size);
}
-// memset is called inside Printf.
-#define ASAN_MEMSET_IMPL(ctx, block, c, size) do { \
- if (UNLIKELY(!asan_inited)) return internal_memset(block, c, size); \
- if (asan_init_is_running) { \
- return REAL(memset)(block, c, size); \
- } \
- ENSURE_ASAN_INITED(); \
- if (flags()->replace_intrin) { \
- ASAN_WRITE_RANGE(ctx, block, size); \
- } \
- return REAL(memset)(block, c, size); \
- } while (0)
-
void *__asan_memset(void *block, int c, uptr size) {
ASAN_MEMSET_IMPL(nullptr, block, c, size);
}
-#define ASAN_MEMMOVE_IMPL(ctx, to, from, size) do { \
- if (UNLIKELY(!asan_inited)) \
- return internal_memmove(to, from, size); \
- ENSURE_ASAN_INITED(); \
- if (flags()->replace_intrin) { \
- ASAN_READ_RANGE(ctx, from, size); \
- ASAN_WRITE_RANGE(ctx, to, size); \
- } \
- return internal_memmove(to, from, size); \
- } while (0)
-
void *__asan_memmove(void *to, const void *from, uptr size) {
ASAN_MEMMOVE_IMPL(nullptr, to, from, size);
}
-INTERCEPTOR(void*, memmove, void *to, const void *from, uptr size) {
- void *ctx;
- ASAN_INTERCEPTOR_ENTER(ctx, memmove);
- ASAN_MEMMOVE_IMPL(ctx, to, from, size);
-}
-
-INTERCEPTOR(void*, memcpy, void *to, const void *from, uptr size) {
- void *ctx;
- ASAN_INTERCEPTOR_ENTER(ctx, memcpy);
-#if !SANITIZER_MAC
- ASAN_MEMCPY_IMPL(ctx, to, from, size);
-#else
- // At least on 10.7 and 10.8 both memcpy() and memmove() are being replaced
- // with WRAP(memcpy). As a result, false positives are reported for memmove()
- // calls. If we just disable error reporting with
- // ASAN_OPTIONS=replace_intrin=0, memmove() is still replaced with
- // internal_memcpy(), which may lead to crashes, see
- // http://llvm.org/bugs/show_bug.cgi?id=16362.
- ASAN_MEMMOVE_IMPL(ctx, to, from, size);
-#endif // !SANITIZER_MAC
-}
-
-INTERCEPTOR(void*, memset, void *block, int c, uptr size) {
- void *ctx;
- ASAN_INTERCEPTOR_ENTER(ctx, memset);
- ASAN_MEMSET_IMPL(ctx, block, c, size);
-}
-
#if ASAN_INTERCEPT_INDEX
# if ASAN_USE_ALIAS_ATTRIBUTE_FOR_INDEX
INTERCEPTOR(char*, index, const char *string, int c)
@@ -720,19 +708,10 @@ INTERCEPTOR(int, fork, void) {
namespace __asan {
void InitializeAsanInterceptors() {
static bool was_called_once;
- CHECK(was_called_once == false);
+ CHECK(!was_called_once);
was_called_once = true;
InitializeCommonInterceptors();
- // Intercept mem* functions.
- ASAN_INTERCEPT_FUNC(memcpy);
- ASAN_INTERCEPT_FUNC(memset);
- if (PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE) {
- // In asan, REAL(memmove) is not used, but it is used in msan.
- ASAN_INTERCEPT_FUNC(memmove);
- }
- CHECK(REAL(memcpy));
-
// Intercept str* functions.
ASAN_INTERCEPT_FUNC(strcat); // NOLINT
ASAN_INTERCEPT_FUNC(strcpy); // NOLINT
diff --git a/lib/asan/asan_interface_internal.h b/lib/asan/asan_interface_internal.h
index 3cf3413dc126..8cd424cc03e8 100644
--- a/lib/asan/asan_interface_internal.h
+++ b/lib/asan/asan_interface_internal.h
@@ -23,6 +23,8 @@
#include "asan_init_version.h"
using __sanitizer::uptr;
+using __sanitizer::u64;
+using __sanitizer::u32;
extern "C" {
// This function should be called at the very beginning of the process,
@@ -79,6 +81,20 @@ extern "C" {
SANITIZER_INTERFACE_ATTRIBUTE
void __asan_after_dynamic_init();
+ // Sets bytes of the given range of the shadow memory into specific value.
+ SANITIZER_INTERFACE_ATTRIBUTE
+ void __asan_set_shadow_00(uptr addr, uptr size);
+ SANITIZER_INTERFACE_ATTRIBUTE
+ void __asan_set_shadow_f1(uptr addr, uptr size);
+ SANITIZER_INTERFACE_ATTRIBUTE
+ void __asan_set_shadow_f2(uptr addr, uptr size);
+ SANITIZER_INTERFACE_ATTRIBUTE
+ void __asan_set_shadow_f3(uptr addr, uptr size);
+ SANITIZER_INTERFACE_ATTRIBUTE
+ void __asan_set_shadow_f5(uptr addr, uptr size);
+ SANITIZER_INTERFACE_ATTRIBUTE
+ void __asan_set_shadow_f8(uptr addr, uptr size);
+
// These two functions are used by instrumented code in the
// use-after-scope mode. They mark memory for local variables as
// unaddressable when they leave scope and addressable before the
@@ -156,6 +172,9 @@ extern "C" {
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
/* OPTIONAL */ const char* __asan_default_options();
+ SANITIZER_INTERFACE_ATTRIBUTE
+ extern uptr __asan_shadow_memory_dynamic_address;
+
// Global flag, copy of ASAN_OPTIONS=detect_stack_use_after_return
SANITIZER_INTERFACE_ATTRIBUTE
extern int __asan_option_detect_stack_use_after_return;
diff --git a/lib/asan/asan_internal.h b/lib/asan/asan_internal.h
index 20142372e2e3..1dc678c0c357 100644
--- a/lib/asan/asan_internal.h
+++ b/lib/asan/asan_internal.h
@@ -36,7 +36,7 @@
// If set, values like allocator chunk size, as well as defaults for some flags
// will be changed towards less memory overhead.
#ifndef ASAN_LOW_MEMORY
-# if SANITIZER_IOS || (SANITIZER_WORDSIZE == 32)
+# if SANITIZER_IOS || SANITIZER_ANDROID
# define ASAN_LOW_MEMORY 1
# else
# define ASAN_LOW_MEMORY 0
@@ -65,6 +65,9 @@ void AsanInitFromRtl();
// asan_win.cc
void InitializePlatformExceptionHandlers();
+// asan_win.cc / asan_posix.cc
+const char *DescribeSignalOrException(int signo);
+
// asan_rtl.cc
void NORETURN ShowStatsAndAbort();
@@ -100,17 +103,6 @@ void *AsanDlSymNext(const char *sym);
void ReserveShadowMemoryRange(uptr beg, uptr end, const char *name);
-// Platform-specific options.
-#if SANITIZER_MAC
-bool PlatformHasDifferentMemcpyAndMemmove();
-# define PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE \
- (PlatformHasDifferentMemcpyAndMemmove())
-#elif SANITIZER_WINDOWS64
-# define PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE false
-#else
-# define PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE true
-#endif // SANITIZER_MAC
-
// Add convenient macro for interface functions that may be represented as
// weak hooks.
#define ASAN_MALLOC_HOOK(ptr, size) \
@@ -132,12 +124,10 @@ extern bool asan_init_is_running;
extern void (*death_callback)(void);
// These magic values are written to shadow for better error reporting.
const int kAsanHeapLeftRedzoneMagic = 0xfa;
-const int kAsanHeapRightRedzoneMagic = 0xfb;
const int kAsanHeapFreeMagic = 0xfd;
const int kAsanStackLeftRedzoneMagic = 0xf1;
const int kAsanStackMidRedzoneMagic = 0xf2;
const int kAsanStackRightRedzoneMagic = 0xf3;
-const int kAsanStackPartialRedzoneMagic = 0xf4;
const int kAsanStackAfterReturnMagic = 0xf5;
const int kAsanInitializationOrderMagic = 0xf6;
const int kAsanUserPoisonedMemoryMagic = 0xf7;
diff --git a/lib/asan/asan_mac.cc b/lib/asan/asan_mac.cc
index 525864f72d32..baf533ac96ac 100644
--- a/lib/asan/asan_mac.cc
+++ b/lib/asan/asan_mac.cc
@@ -49,15 +49,6 @@ namespace __asan {
void InitializePlatformInterceptors() {}
void InitializePlatformExceptionHandlers() {}
-bool PlatformHasDifferentMemcpyAndMemmove() {
- // On OS X 10.7 memcpy() and memmove() are both resolved
- // into memmove$VARIANT$sse42.
- // See also https://github.com/google/sanitizers/issues/34.
- // TODO(glider): need to check dynamically that memcpy() and memmove() are
- // actually the same function.
- return GetMacosVersion() == MACOS_VERSION_SNOW_LEOPARD;
-}
-
// No-op. Mac does not support static linkage anyway.
void *AsanDoesNotSupportStaticLinkage() {
return 0;
diff --git a/lib/asan/asan_malloc_linux.cc b/lib/asan/asan_malloc_linux.cc
index 162abd29f191..a78767c19f0f 100644
--- a/lib/asan/asan_malloc_linux.cc
+++ b/lib/asan/asan_malloc_linux.cc
@@ -78,7 +78,13 @@ INTERCEPTOR(void*, realloc, void *ptr, uptr size) {
if (UNLIKELY(IsInDlsymAllocPool(ptr))) {
uptr offset = (uptr)ptr - (uptr)alloc_memory_for_dlsym;
uptr copy_size = Min(size, kDlsymAllocPoolSize - offset);
- void *new_ptr = asan_malloc(size, &stack);
+ void *new_ptr;
+ if (UNLIKELY(!asan_inited)) {
+ new_ptr = AllocateFromLocalPool(size);
+ } else {
+ copy_size = size;
+ new_ptr = asan_malloc(copy_size, &stack);
+ }
internal_memcpy(new_ptr, ptr, copy_size);
return new_ptr;
}
diff --git a/lib/asan/asan_malloc_win.cc b/lib/asan/asan_malloc_win.cc
index 4a233dfe968c..05148d51e6b8 100644
--- a/lib/asan/asan_malloc_win.cc
+++ b/lib/asan/asan_malloc_win.cc
@@ -125,6 +125,11 @@ void *_recalloc(void *p, size_t n, size_t elem_size) {
}
ALLOCATION_FUNCTION_ATTRIBUTE
+void *_recalloc_base(void *p, size_t n, size_t elem_size) {
+ return _recalloc(p, n, elem_size);
+}
+
+ALLOCATION_FUNCTION_ATTRIBUTE
size_t _msize(const void *ptr) {
GET_CURRENT_PC_BP_SP;
(void)sp;
@@ -223,6 +228,7 @@ void ReplaceSystemMalloc() {
TryToOverrideFunction("_realloc_base", (uptr)realloc);
TryToOverrideFunction("_realloc_crt", (uptr)realloc);
TryToOverrideFunction("_recalloc", (uptr)_recalloc);
+ TryToOverrideFunction("_recalloc_base", (uptr)_recalloc);
TryToOverrideFunction("_recalloc_crt", (uptr)_recalloc);
TryToOverrideFunction("_msize", (uptr)_msize);
TryToOverrideFunction("_expand", (uptr)_expand);
diff --git a/lib/asan/asan_mapping.h b/lib/asan/asan_mapping.h
index 52c4f67a5d04..d8e60a4b34a9 100644
--- a/lib/asan/asan_mapping.h
+++ b/lib/asan/asan_mapping.h
@@ -125,6 +125,7 @@
// || `[0x00000000, 0x2fffffff]` || LowMem ||
static const u64 kDefaultShadowScale = 3;
+static const u64 kDefaultShadowSentinel = ~(uptr)0;
static const u64 kDefaultShadowOffset32 = 1ULL << 29; // 0x20000000
static const u64 kDefaultShadowOffset64 = 1ULL << 44;
static const u64 kDefaultShort64bitShadowOffset = 0x7FFF8000; // < 2G.
@@ -140,7 +141,6 @@ static const u64 kSystemZ_ShadowOffset64 = 1ULL << 52;
static const u64 kFreeBSD_ShadowOffset32 = 1ULL << 30; // 0x40000000
static const u64 kFreeBSD_ShadowOffset64 = 1ULL << 46; // 0x400000000000
static const u64 kWindowsShadowOffset32 = 3ULL << 28; // 0x30000000
-static const u64 kWindowsShadowOffset64 = 1ULL << 45; // 32TB
#define SHADOW_SCALE kDefaultShadowScale
@@ -168,7 +168,7 @@ static const u64 kWindowsShadowOffset64 = 1ULL << 45; // 32TB
# if SANITIZER_IOSSIM
# define SHADOW_OFFSET kIosSimShadowOffset64
# else
-# define SHADOW_OFFSET kIosShadowOffset64
+# define SHADOW_OFFSET __asan_shadow_memory_dynamic_address
# endif
# elif defined(__aarch64__)
# define SHADOW_OFFSET kAArch64_ShadowOffset64
@@ -183,7 +183,7 @@ static const u64 kWindowsShadowOffset64 = 1ULL << 45; // 32TB
# elif defined(__mips64)
# define SHADOW_OFFSET kMIPS64_ShadowOffset64
# elif SANITIZER_WINDOWS64
-# define SHADOW_OFFSET kWindowsShadowOffset64
+# define SHADOW_OFFSET __asan_shadow_memory_dynamic_address
# else
# define SHADOW_OFFSET kDefaultShort64bitShadowOffset
# endif
@@ -269,9 +269,25 @@ static inline bool AddrIsInMidMem(uptr a) {
return kMidMemBeg && a >= kMidMemBeg && a <= kMidMemEnd;
}
+static inline bool AddrIsInShadowGap(uptr a) {
+ PROFILE_ASAN_MAPPING();
+ if (kMidMemBeg) {
+ if (a <= kShadowGapEnd)
+ return SHADOW_OFFSET == 0 || a >= kShadowGapBeg;
+ return (a >= kShadowGap2Beg && a <= kShadowGap2End) ||
+ (a >= kShadowGap3Beg && a <= kShadowGap3End);
+ }
+ // In zero-based shadow mode we treat addresses near zero as addresses
+ // in shadow gap as well.
+ if (SHADOW_OFFSET == 0)
+ return a <= kShadowGapEnd;
+ return a >= kShadowGapBeg && a <= kShadowGapEnd;
+}
+
static inline bool AddrIsInMem(uptr a) {
PROFILE_ASAN_MAPPING();
- return AddrIsInLowMem(a) || AddrIsInMidMem(a) || AddrIsInHighMem(a);
+ return AddrIsInLowMem(a) || AddrIsInMidMem(a) || AddrIsInHighMem(a) ||
+ (flags()->protect_shadow_gap == 0 && AddrIsInShadowGap(a));
}
static inline uptr MemToShadow(uptr p) {
@@ -295,21 +311,6 @@ static inline bool AddrIsInShadow(uptr a) {
return AddrIsInLowShadow(a) || AddrIsInMidShadow(a) || AddrIsInHighShadow(a);
}
-static inline bool AddrIsInShadowGap(uptr a) {
- PROFILE_ASAN_MAPPING();
- if (kMidMemBeg) {
- if (a <= kShadowGapEnd)
- return SHADOW_OFFSET == 0 || a >= kShadowGapBeg;
- return (a >= kShadowGap2Beg && a <= kShadowGap2End) ||
- (a >= kShadowGap3Beg && a <= kShadowGap3End);
- }
- // In zero-based shadow mode we treat addresses near zero as addresses
- // in shadow gap as well.
- if (SHADOW_OFFSET == 0)
- return a <= kShadowGapEnd;
- return a >= kShadowGapBeg && a <= kShadowGapEnd;
-}
-
static inline bool AddrIsAlignedByGranularity(uptr a) {
PROFILE_ASAN_MAPPING();
return (a & (SHADOW_GRANULARITY - 1)) == 0;
diff --git a/lib/asan/asan_memory_profile.cc b/lib/asan/asan_memory_profile.cc
index ba0051634b92..c55264ef5d57 100644
--- a/lib/asan/asan_memory_profile.cc
+++ b/lib/asan/asan_memory_profile.cc
@@ -74,7 +74,7 @@ class HeapProfile {
static void ChunkCallback(uptr chunk, void *arg) {
HeapProfile *hp = reinterpret_cast<HeapProfile*>(arg);
- AsanChunkView cv = FindHeapChunkByAddress(chunk);
+ AsanChunkView cv = FindHeapChunkByAllocBeg(chunk);
if (!cv.IsAllocated()) return;
u32 id = cv.GetAllocStackId();
if (!id) return;
diff --git a/lib/asan/asan_new_delete.cc b/lib/asan/asan_new_delete.cc
index fef66041f606..3283fb3942cf 100644
--- a/lib/asan/asan_new_delete.cc
+++ b/lib/asan/asan_new_delete.cc
@@ -45,17 +45,30 @@
using namespace __asan; // NOLINT
+// FreeBSD prior v9.2 have wrong definition of 'size_t'.
+// http://svnweb.freebsd.org/base?view=revision&revision=232261
+#if SANITIZER_FREEBSD && SANITIZER_WORDSIZE == 32
+#include <sys/param.h>
+#if __FreeBSD_version <= 902001 // v9.2
+#define size_t unsigned
+#endif // __FreeBSD_version
+#endif // SANITIZER_FREEBSD && SANITIZER_WORDSIZE == 32
+
// This code has issues on OSX.
// See https://github.com/google/sanitizers/issues/131.
-// Fake std::nothrow_t to avoid including <new>.
+// Fake std::nothrow_t and std::align_val_t to avoid including <new>.
namespace std {
struct nothrow_t {};
+enum class align_val_t: size_t {};
} // namespace std
#define OPERATOR_NEW_BODY(type) \
GET_STACK_TRACE_MALLOC;\
return asan_memalign(0, size, &stack, type);
+#define OPERATOR_NEW_BODY_ALIGN(type) \
+ GET_STACK_TRACE_MALLOC;\
+ return asan_memalign((uptr)align, size, &stack, type);
// On OS X it's not enough to just provide our own 'operator new' and
// 'operator delete' implementations, because they're going to be in the
@@ -65,15 +78,6 @@ struct nothrow_t {};
// To make sure that C++ allocation/deallocation operators are overridden on
// OS X we need to intercept them using their mangled names.
#if !SANITIZER_MAC
-// FreeBSD prior v9.2 have wrong definition of 'size_t'.
-// http://svnweb.freebsd.org/base?view=revision&revision=232261
-#if SANITIZER_FREEBSD && SANITIZER_WORDSIZE == 32
-#include <sys/param.h>
-#if __FreeBSD_version <= 902001 // v9.2
-#define size_t unsigned
-#endif // __FreeBSD_version
-#endif // SANITIZER_FREEBSD && SANITIZER_WORDSIZE == 32
-
CXX_OPERATOR_ATTRIBUTE
void *operator new(size_t size) { OPERATOR_NEW_BODY(FROM_NEW); }
CXX_OPERATOR_ATTRIBUTE
@@ -84,6 +88,18 @@ void *operator new(size_t size, std::nothrow_t const&)
CXX_OPERATOR_ATTRIBUTE
void *operator new[](size_t size, std::nothrow_t const&)
{ OPERATOR_NEW_BODY(FROM_NEW_BR); }
+CXX_OPERATOR_ATTRIBUTE
+void *operator new(size_t size, std::align_val_t align)
+{ OPERATOR_NEW_BODY_ALIGN(FROM_NEW); }
+CXX_OPERATOR_ATTRIBUTE
+void *operator new[](size_t size, std::align_val_t align)
+{ OPERATOR_NEW_BODY_ALIGN(FROM_NEW_BR); }
+CXX_OPERATOR_ATTRIBUTE
+void *operator new(size_t size, std::align_val_t align, std::nothrow_t const&)
+{ OPERATOR_NEW_BODY_ALIGN(FROM_NEW); }
+CXX_OPERATOR_ATTRIBUTE
+void *operator new[](size_t size, std::align_val_t align, std::nothrow_t const&)
+{ OPERATOR_NEW_BODY_ALIGN(FROM_NEW_BR); }
#else // SANITIZER_MAC
INTERCEPTOR(void *, _Znwm, size_t size) {
@@ -131,6 +147,32 @@ void operator delete[](void *ptr, size_t size) NOEXCEPT {
GET_STACK_TRACE_FREE;
asan_sized_free(ptr, size, &stack, FROM_NEW_BR);
}
+CXX_OPERATOR_ATTRIBUTE
+void operator delete(void *ptr, std::align_val_t) NOEXCEPT {
+ OPERATOR_DELETE_BODY(FROM_NEW);
+}
+CXX_OPERATOR_ATTRIBUTE
+void operator delete[](void *ptr, std::align_val_t) NOEXCEPT {
+ OPERATOR_DELETE_BODY(FROM_NEW_BR);
+}
+CXX_OPERATOR_ATTRIBUTE
+void operator delete(void *ptr, std::align_val_t, std::nothrow_t const&) {
+ OPERATOR_DELETE_BODY(FROM_NEW);
+}
+CXX_OPERATOR_ATTRIBUTE
+void operator delete[](void *ptr, std::align_val_t, std::nothrow_t const&) {
+ OPERATOR_DELETE_BODY(FROM_NEW_BR);
+}
+CXX_OPERATOR_ATTRIBUTE
+void operator delete(void *ptr, size_t size, std::align_val_t) NOEXCEPT {
+ GET_STACK_TRACE_FREE;
+ asan_sized_free(ptr, size, &stack, FROM_NEW);
+}
+CXX_OPERATOR_ATTRIBUTE
+void operator delete[](void *ptr, size_t size, std::align_val_t) NOEXCEPT {
+ GET_STACK_TRACE_FREE;
+ asan_sized_free(ptr, size, &stack, FROM_NEW_BR);
+}
#else // SANITIZER_MAC
INTERCEPTOR(void, _ZdlPv, void *ptr) {
diff --git a/lib/asan/asan_poisoning.cc b/lib/asan/asan_poisoning.cc
index 50877ae59115..abb75ab3bf92 100644
--- a/lib/asan/asan_poisoning.cc
+++ b/lib/asan/asan_poisoning.cc
@@ -64,12 +64,9 @@ struct ShadowSegmentEndpoint {
};
void FlushUnneededASanShadowMemory(uptr p, uptr size) {
- // Since asan's mapping is compacting, the shadow chunk may be
- // not page-aligned, so we only flush the page-aligned portion.
- uptr page_size = GetPageSizeCached();
- uptr shadow_beg = RoundUpTo(MemToShadow(p), page_size);
- uptr shadow_end = RoundDownTo(MemToShadow(p + size), page_size);
- FlushUnneededShadowMemory(shadow_beg, shadow_end - shadow_beg);
+ // Since asan's mapping is compacting, the shadow chunk may be
+ // not page-aligned, so we only flush the page-aligned portion.
+ ReleaseMemoryPagesToOS(MemToShadow(p), MemToShadow(p + size));
}
void AsanPoisonOrUnpoisonIntraObjectRedzone(uptr ptr, uptr size, bool poison) {
@@ -117,9 +114,9 @@ void __asan_poison_memory_region(void const volatile *addr, uptr size) {
ShadowSegmentEndpoint beg(beg_addr);
ShadowSegmentEndpoint end(end_addr);
if (beg.chunk == end.chunk) {
- CHECK(beg.offset < end.offset);
+ CHECK_LT(beg.offset, end.offset);
s8 value = beg.value;
- CHECK(value == end.value);
+ CHECK_EQ(value, end.value);
// We can only poison memory if the byte in end.offset is unaddressable.
// No need to re-poison memory if it is poisoned already.
if (value > 0 && value <= end.offset) {
@@ -131,7 +128,7 @@ void __asan_poison_memory_region(void const volatile *addr, uptr size) {
}
return;
}
- CHECK(beg.chunk < end.chunk);
+ CHECK_LT(beg.chunk, end.chunk);
if (beg.offset > 0) {
// Mark bytes from beg.offset as unaddressable.
if (beg.value == 0) {
@@ -157,9 +154,9 @@ void __asan_unpoison_memory_region(void const volatile *addr, uptr size) {
ShadowSegmentEndpoint beg(beg_addr);
ShadowSegmentEndpoint end(end_addr);
if (beg.chunk == end.chunk) {
- CHECK(beg.offset < end.offset);
+ CHECK_LT(beg.offset, end.offset);
s8 value = beg.value;
- CHECK(value == end.value);
+ CHECK_EQ(value, end.value);
// We unpoison memory bytes up to enbytes up to end.offset if it is not
// unpoisoned already.
if (value != 0) {
@@ -167,7 +164,7 @@ void __asan_unpoison_memory_region(void const volatile *addr, uptr size) {
}
return;
}
- CHECK(beg.chunk < end.chunk);
+ CHECK_LT(beg.chunk, end.chunk);
if (beg.offset > 0) {
*beg.chunk = 0;
beg.chunk++;
@@ -314,6 +311,30 @@ static void PoisonAlignedStackMemory(uptr addr, uptr size, bool do_poison) {
}
}
+void __asan_set_shadow_00(uptr addr, uptr size) {
+ REAL(memset)((void *)addr, 0, size);
+}
+
+void __asan_set_shadow_f1(uptr addr, uptr size) {
+ REAL(memset)((void *)addr, 0xf1, size);
+}
+
+void __asan_set_shadow_f2(uptr addr, uptr size) {
+ REAL(memset)((void *)addr, 0xf2, size);
+}
+
+void __asan_set_shadow_f3(uptr addr, uptr size) {
+ REAL(memset)((void *)addr, 0xf3, size);
+}
+
+void __asan_set_shadow_f5(uptr addr, uptr size) {
+ REAL(memset)((void *)addr, 0xf5, size);
+}
+
+void __asan_set_shadow_f8(uptr addr, uptr size) {
+ REAL(memset)((void *)addr, 0xf8, size);
+}
+
void __asan_poison_stack_memory(uptr addr, uptr size) {
VReport(1, "poisoning: %p %zx\n", (void *)addr, size);
PoisonAlignedStackMemory(addr, size, true);
@@ -388,7 +409,7 @@ const void *__sanitizer_contiguous_container_find_bad_address(
// ending with end.
uptr kMaxRangeToCheck = 32;
uptr r1_beg = beg;
- uptr r1_end = Min(end + kMaxRangeToCheck, mid);
+ uptr r1_end = Min(beg + kMaxRangeToCheck, mid);
uptr r2_beg = Max(beg, mid - kMaxRangeToCheck);
uptr r2_end = Min(end, mid + kMaxRangeToCheck);
uptr r3_beg = Max(end - kMaxRangeToCheck, mid);
diff --git a/lib/asan/asan_poisoning.h b/lib/asan/asan_poisoning.h
index 6344225f0f64..cc3281e08a71 100644
--- a/lib/asan/asan_poisoning.h
+++ b/lib/asan/asan_poisoning.h
@@ -86,8 +86,8 @@ ALWAYS_INLINE void FastPoisonShadowPartialRightRedzone(
}
}
-// Calls __sanitizer::FlushUnneededShadowMemory() on
-// [MemToShadow(p), MemToShadow(p+size)] with proper rounding.
+// Calls __sanitizer::ReleaseMemoryPagesToOS() on
+// [MemToShadow(p), MemToShadow(p+size)].
void FlushUnneededASanShadowMemory(uptr p, uptr size);
} // namespace __asan
diff --git a/lib/asan/asan_posix.cc b/lib/asan/asan_posix.cc
index 84a29ec6a9d9..8e5676309ae0 100644
--- a/lib/asan/asan_posix.cc
+++ b/lib/asan/asan_posix.cc
@@ -33,6 +33,19 @@
namespace __asan {
+const char *DescribeSignalOrException(int signo) {
+ switch (signo) {
+ case SIGFPE:
+ return "FPE";
+ case SIGILL:
+ return "ILL";
+ case SIGABRT:
+ return "ABRT";
+ default:
+ return "SEGV";
+ }
+}
+
void AsanOnDeadlySignal(int signo, void *siginfo, void *context) {
ScopedDeadlySignal signal_scope(GetCurrentThread());
int code = (int)((siginfo_t*)siginfo)->si_code;
@@ -84,12 +97,8 @@ void AsanOnDeadlySignal(int signo, void *siginfo, void *context) {
// unaligned memory access.
if (IsStackAccess && (code == si_SEGV_MAPERR || code == si_SEGV_ACCERR))
ReportStackOverflow(sig);
- else if (signo == SIGFPE)
- ReportDeadlySignal("FPE", sig);
- else if (signo == SIGILL)
- ReportDeadlySignal("ILL", sig);
else
- ReportDeadlySignal("SEGV", sig);
+ ReportDeadlySignal(signo, sig);
}
// ---------------------- TSD ---------------- {{{1
diff --git a/lib/asan/asan_report.cc b/lib/asan/asan_report.cc
index 9f2f12d51eaa..937ba4077d43 100644
--- a/lib/asan/asan_report.cc
+++ b/lib/asan/asan_report.cc
@@ -12,7 +12,9 @@
// This file contains error reporting code.
//===----------------------------------------------------------------------===//
+#include "asan_errors.h"
#include "asan_flags.h"
+#include "asan_descriptions.h"
#include "asan_internal.h"
#include "asan_mapping.h"
#include "asan_report.h"
@@ -35,19 +37,6 @@ static BlockingMutex error_message_buf_mutex(LINKER_INITIALIZED);
static const unsigned kAsanBuggyPcPoolSize = 25;
static __sanitizer::atomic_uintptr_t AsanBuggyPcPool[kAsanBuggyPcPoolSize];
-struct ReportData {
- uptr pc;
- uptr sp;
- uptr bp;
- uptr addr;
- bool is_write;
- uptr access_size;
- const char *description;
-};
-
-static bool report_happened = false;
-static ReportData report_data = {};
-
void AppendToErrorMessageBuffer(const char *buffer) {
BlockingMutexLock l(&error_message_buf_mutex);
if (!error_message_buffer) {
@@ -65,60 +54,10 @@ void AppendToErrorMessageBuffer(const char *buffer) {
error_message_buffer_pos += Min(remaining, length);
}
-// ---------------------- Decorator ------------------------------ {{{1
-class Decorator: public __sanitizer::SanitizerCommonDecorator {
- public:
- Decorator() : SanitizerCommonDecorator() { }
- const char *Access() { return Blue(); }
- const char *EndAccess() { return Default(); }
- const char *Location() { return Green(); }
- const char *EndLocation() { return Default(); }
- const char *Allocation() { return Magenta(); }
- const char *EndAllocation() { return Default(); }
-
- const char *ShadowByte(u8 byte) {
- switch (byte) {
- case kAsanHeapLeftRedzoneMagic:
- case kAsanHeapRightRedzoneMagic:
- case kAsanArrayCookieMagic:
- return Red();
- case kAsanHeapFreeMagic:
- return Magenta();
- case kAsanStackLeftRedzoneMagic:
- case kAsanStackMidRedzoneMagic:
- case kAsanStackRightRedzoneMagic:
- case kAsanStackPartialRedzoneMagic:
- return Red();
- case kAsanStackAfterReturnMagic:
- return Magenta();
- case kAsanInitializationOrderMagic:
- return Cyan();
- case kAsanUserPoisonedMemoryMagic:
- case kAsanContiguousContainerOOBMagic:
- case kAsanAllocaLeftMagic:
- case kAsanAllocaRightMagic:
- return Blue();
- case kAsanStackUseAfterScopeMagic:
- return Magenta();
- case kAsanGlobalRedzoneMagic:
- return Red();
- case kAsanInternalHeapMagic:
- return Yellow();
- case kAsanIntraObjectRedzone:
- return Yellow();
- default:
- return Default();
- }
- }
- const char *EndShadowByte() { return Default(); }
- const char *MemoryByte() { return Magenta(); }
- const char *EndMemoryByte() { return Default(); }
-};
-
// ---------------------- Helper functions ----------------------- {{{1
-static void PrintMemoryByte(InternalScopedString *str, const char *before,
- u8 byte, bool in_shadow, const char *after = "\n") {
+void PrintMemoryByte(InternalScopedString *str, const char *before, u8 byte,
+ bool in_shadow, const char *after) {
Decorator d;
str->append("%s%s%x%x%s%s", before,
in_shadow ? d.ShadowByte(byte) : d.MemoryByte(),
@@ -126,99 +65,6 @@ static void PrintMemoryByte(InternalScopedString *str, const char *before,
in_shadow ? d.EndShadowByte() : d.EndMemoryByte(), after);
}
-static void PrintShadowByte(InternalScopedString *str, const char *before,
- u8 byte, const char *after = "\n") {
- PrintMemoryByte(str, before, byte, /*in_shadow*/true, after);
-}
-
-static void PrintShadowBytes(InternalScopedString *str, const char *before,
- u8 *bytes, u8 *guilty, uptr n) {
- Decorator d;
- if (before) str->append("%s%p:", before, bytes);
- for (uptr i = 0; i < n; i++) {
- u8 *p = bytes + i;
- const char *before =
- p == guilty ? "[" : (p - 1 == guilty && i != 0) ? "" : " ";
- const char *after = p == guilty ? "]" : "";
- PrintShadowByte(str, before, *p, after);
- }
- str->append("\n");
-}
-
-static void PrintLegend(InternalScopedString *str) {
- str->append(
- "Shadow byte legend (one shadow byte represents %d "
- "application bytes):\n",
- (int)SHADOW_GRANULARITY);
- PrintShadowByte(str, " Addressable: ", 0);
- str->append(" Partially addressable: ");
- for (u8 i = 1; i < SHADOW_GRANULARITY; i++) PrintShadowByte(str, "", i, " ");
- str->append("\n");
- PrintShadowByte(str, " Heap left redzone: ",
- kAsanHeapLeftRedzoneMagic);
- PrintShadowByte(str, " Heap right redzone: ",
- kAsanHeapRightRedzoneMagic);
- PrintShadowByte(str, " Freed heap region: ", kAsanHeapFreeMagic);
- PrintShadowByte(str, " Stack left redzone: ",
- kAsanStackLeftRedzoneMagic);
- PrintShadowByte(str, " Stack mid redzone: ",
- kAsanStackMidRedzoneMagic);
- PrintShadowByte(str, " Stack right redzone: ",
- kAsanStackRightRedzoneMagic);
- PrintShadowByte(str, " Stack partial redzone: ",
- kAsanStackPartialRedzoneMagic);
- PrintShadowByte(str, " Stack after return: ",
- kAsanStackAfterReturnMagic);
- PrintShadowByte(str, " Stack use after scope: ",
- kAsanStackUseAfterScopeMagic);
- PrintShadowByte(str, " Global redzone: ", kAsanGlobalRedzoneMagic);
- PrintShadowByte(str, " Global init order: ",
- kAsanInitializationOrderMagic);
- PrintShadowByte(str, " Poisoned by user: ",
- kAsanUserPoisonedMemoryMagic);
- PrintShadowByte(str, " Container overflow: ",
- kAsanContiguousContainerOOBMagic);
- PrintShadowByte(str, " Array cookie: ",
- kAsanArrayCookieMagic);
- PrintShadowByte(str, " Intra object redzone: ",
- kAsanIntraObjectRedzone);
- PrintShadowByte(str, " ASan internal: ", kAsanInternalHeapMagic);
- PrintShadowByte(str, " Left alloca redzone: ", kAsanAllocaLeftMagic);
- PrintShadowByte(str, " Right alloca redzone: ", kAsanAllocaRightMagic);
-}
-
-void MaybeDumpInstructionBytes(uptr pc) {
- if (!flags()->dump_instruction_bytes || (pc < GetPageSizeCached()))
- return;
- InternalScopedString str(1024);
- str.append("First 16 instruction bytes at pc: ");
- if (IsAccessibleMemoryRange(pc, 16)) {
- for (int i = 0; i < 16; ++i) {
- PrintMemoryByte(&str, "", ((u8 *)pc)[i], /*in_shadow*/false, " ");
- }
- str.append("\n");
- } else {
- str.append("unaccessible\n");
- }
- Report("%s", str.data());
-}
-
-static void PrintShadowMemoryForAddress(uptr addr) {
- if (!AddrIsInMem(addr)) return;
- uptr shadow_addr = MemToShadow(addr);
- const uptr n_bytes_per_row = 16;
- uptr aligned_shadow = shadow_addr & ~(n_bytes_per_row - 1);
- InternalScopedString str(4096 * 8);
- str.append("Shadow bytes around the buggy address:\n");
- for (int i = -5; i <= 5; i++) {
- const char *prefix = (i == 0) ? "=>" : " ";
- PrintShadowBytes(&str, prefix, (u8 *)(aligned_shadow + i * n_bytes_per_row),
- (u8 *)shadow_addr, n_bytes_per_row);
- }
- if (flags()->print_legend) PrintLegend(&str);
- Printf("%s", str.data());
-}
-
static void PrintZoneForPointer(uptr ptr, uptr zone_ptr,
const char *zone_name) {
if (zone_ptr) {
@@ -234,191 +80,8 @@ static void PrintZoneForPointer(uptr ptr, uptr zone_ptr,
}
}
-static void DescribeThread(AsanThread *t) {
- if (t)
- DescribeThread(t->context());
-}
-
// ---------------------- Address Descriptions ------------------- {{{1
-static bool IsASCII(unsigned char c) {
- return /*0x00 <= c &&*/ c <= 0x7F;
-}
-
-static const char *MaybeDemangleGlobalName(const char *name) {
- // We can spoil names of globals with C linkage, so use an heuristic
- // approach to check if the name should be demangled.
- bool should_demangle = false;
- if (name[0] == '_' && name[1] == 'Z')
- should_demangle = true;
- else if (SANITIZER_WINDOWS && name[0] == '\01' && name[1] == '?')
- should_demangle = true;
-
- return should_demangle ? Symbolizer::GetOrInit()->Demangle(name) : name;
-}
-
-// Check if the global is a zero-terminated ASCII string. If so, print it.
-static void PrintGlobalNameIfASCII(InternalScopedString *str,
- const __asan_global &g) {
- for (uptr p = g.beg; p < g.beg + g.size - 1; p++) {
- unsigned char c = *(unsigned char*)p;
- if (c == '\0' || !IsASCII(c)) return;
- }
- if (*(char*)(g.beg + g.size - 1) != '\0') return;
- str->append(" '%s' is ascii string '%s'\n", MaybeDemangleGlobalName(g.name),
- (char *)g.beg);
-}
-
-static const char *GlobalFilename(const __asan_global &g) {
- const char *res = g.module_name;
- // Prefer the filename from source location, if is available.
- if (g.location)
- res = g.location->filename;
- CHECK(res);
- return res;
-}
-
-static void PrintGlobalLocation(InternalScopedString *str,
- const __asan_global &g) {
- str->append("%s", GlobalFilename(g));
- if (!g.location)
- return;
- if (g.location->line_no)
- str->append(":%d", g.location->line_no);
- if (g.location->column_no)
- str->append(":%d", g.location->column_no);
-}
-
-static void DescribeAddressRelativeToGlobal(uptr addr, uptr size,
- const __asan_global &g) {
- InternalScopedString str(4096);
- Decorator d;
- str.append("%s", d.Location());
- if (addr < g.beg) {
- str.append("%p is located %zd bytes to the left", (void *)addr,
- g.beg - addr);
- } else if (addr + size > g.beg + g.size) {
- if (addr < g.beg + g.size)
- addr = g.beg + g.size;
- str.append("%p is located %zd bytes to the right", (void *)addr,
- addr - (g.beg + g.size));
- } else {
- // Can it happen?
- str.append("%p is located %zd bytes inside", (void *)addr, addr - g.beg);
- }
- str.append(" of global variable '%s' defined in '",
- MaybeDemangleGlobalName(g.name));
- PrintGlobalLocation(&str, g);
- str.append("' (0x%zx) of size %zu\n", g.beg, g.size);
- str.append("%s", d.EndLocation());
- PrintGlobalNameIfASCII(&str, g);
- Printf("%s", str.data());
-}
-
-static bool DescribeAddressIfGlobal(uptr addr, uptr size,
- const char *bug_type) {
- // Assume address is close to at most four globals.
- const int kMaxGlobalsInReport = 4;
- __asan_global globals[kMaxGlobalsInReport];
- u32 reg_sites[kMaxGlobalsInReport];
- int globals_num =
- GetGlobalsForAddress(addr, globals, reg_sites, ARRAY_SIZE(globals));
- if (globals_num == 0)
- return false;
- for (int i = 0; i < globals_num; i++) {
- DescribeAddressRelativeToGlobal(addr, size, globals[i]);
- if (0 == internal_strcmp(bug_type, "initialization-order-fiasco") &&
- reg_sites[i]) {
- Printf(" registered at:\n");
- StackDepotGet(reg_sites[i]).Print();
- }
- }
- return true;
-}
-
-bool DescribeAddressIfShadow(uptr addr, AddressDescription *descr, bool print) {
- if (AddrIsInMem(addr))
- return false;
- const char *area_type = nullptr;
- if (AddrIsInShadowGap(addr)) area_type = "shadow gap";
- else if (AddrIsInHighShadow(addr)) area_type = "high shadow";
- else if (AddrIsInLowShadow(addr)) area_type = "low shadow";
- if (area_type != nullptr) {
- if (print) {
- Printf("Address %p is located in the %s area.\n", addr, area_type);
- } else {
- CHECK(descr);
- descr->region_kind = area_type;
- }
- return true;
- }
- CHECK(0 && "Address is not in memory and not in shadow?");
- return false;
-}
-
-// Return " (thread_name) " or an empty string if the name is empty.
-const char *ThreadNameWithParenthesis(AsanThreadContext *t, char buff[],
- uptr buff_len) {
- const char *name = t->name;
- if (name[0] == '\0') return "";
- buff[0] = 0;
- internal_strncat(buff, " (", 3);
- internal_strncat(buff, name, buff_len - 4);
- internal_strncat(buff, ")", 2);
- return buff;
-}
-
-const char *ThreadNameWithParenthesis(u32 tid, char buff[],
- uptr buff_len) {
- if (tid == kInvalidTid) return "";
- asanThreadRegistry().CheckLocked();
- AsanThreadContext *t = GetThreadContextByTidLocked(tid);
- return ThreadNameWithParenthesis(t, buff, buff_len);
-}
-
-static void PrintAccessAndVarIntersection(const StackVarDescr &var, uptr addr,
- uptr access_size, uptr prev_var_end,
- uptr next_var_beg) {
- uptr var_end = var.beg + var.size;
- uptr addr_end = addr + access_size;
- const char *pos_descr = nullptr;
- // If the variable [var.beg, var_end) is the nearest variable to the
- // current memory access, indicate it in the log.
- if (addr >= var.beg) {
- if (addr_end <= var_end)
- pos_descr = "is inside"; // May happen if this is a use-after-return.
- else if (addr < var_end)
- pos_descr = "partially overflows";
- else if (addr_end <= next_var_beg &&
- next_var_beg - addr_end >= addr - var_end)
- pos_descr = "overflows";
- } else {
- if (addr_end > var.beg)
- pos_descr = "partially underflows";
- else if (addr >= prev_var_end &&
- addr - prev_var_end >= var.beg - addr_end)
- pos_descr = "underflows";
- }
- InternalScopedString str(1024);
- str.append(" [%zd, %zd)", var.beg, var_end);
- // Render variable name.
- str.append(" '");
- for (uptr i = 0; i < var.name_len; ++i) {
- str.append("%c", var.name_pos[i]);
- }
- str.append("'");
- if (pos_descr) {
- Decorator d;
- // FIXME: we may want to also print the size of the access here,
- // but in case of accesses generated by memset it may be confusing.
- str.append("%s <== Memory access at offset %zd %s this variable%s\n",
- d.Location(), addr, pos_descr, d.EndLocation());
- } else {
- str.append("\n");
- }
- Printf("%s", str.data());
-}
-
bool ParseFrameDescription(const char *frame_descr,
InternalMmapVector<StackVarDescr> *vars) {
CHECK(frame_descr);
@@ -446,195 +109,17 @@ bool ParseFrameDescription(const char *frame_descr,
return true;
}
-bool DescribeAddressIfStack(uptr addr, uptr access_size) {
- AsanThread *t = FindThreadByStackAddress(addr);
- if (!t) return false;
-
- Decorator d;
- char tname[128];
- Printf("%s", d.Location());
- Printf("Address %p is located in stack of thread T%d%s", addr, t->tid(),
- ThreadNameWithParenthesis(t->tid(), tname, sizeof(tname)));
-
- // Try to fetch precise stack frame for this access.
- AsanThread::StackFrameAccess access;
- if (!t->GetStackFrameAccessByAddr(addr, &access)) {
- Printf("%s\n", d.EndLocation());
- return true;
- }
- Printf(" at offset %zu in frame%s\n", access.offset, d.EndLocation());
-
- // Now we print the frame where the alloca has happened.
- // We print this frame as a stack trace with one element.
- // The symbolizer may print more than one frame if inlining was involved.
- // The frame numbers may be different than those in the stack trace printed
- // previously. That's unfortunate, but I have no better solution,
- // especially given that the alloca may be from entirely different place
- // (e.g. use-after-scope, or different thread's stack).
-#if SANITIZER_PPC64V1
- // On PowerPC64 ELFv1, the address of a function actually points to a
- // three-doubleword data structure with the first field containing
- // the address of the function's code.
- access.frame_pc = *reinterpret_cast<uptr *>(access.frame_pc);
-#endif
- access.frame_pc += 16;
- Printf("%s", d.EndLocation());
- StackTrace alloca_stack(&access.frame_pc, 1);
- alloca_stack.Print();
-
- InternalMmapVector<StackVarDescr> vars(16);
- if (!ParseFrameDescription(access.frame_descr, &vars)) {
- Printf("AddressSanitizer can't parse the stack frame "
- "descriptor: |%s|\n", access.frame_descr);
- // 'addr' is a stack address, so return true even if we can't parse frame
- return true;
- }
- uptr n_objects = vars.size();
- // Report the number of stack objects.
- Printf(" This frame has %zu object(s):\n", n_objects);
-
- // Report all objects in this frame.
- for (uptr i = 0; i < n_objects; i++) {
- uptr prev_var_end = i ? vars[i - 1].beg + vars[i - 1].size : 0;
- uptr next_var_beg = i + 1 < n_objects ? vars[i + 1].beg : ~(0UL);
- PrintAccessAndVarIntersection(vars[i], access.offset, access_size,
- prev_var_end, next_var_beg);
- }
- Printf("HINT: this may be a false positive if your program uses "
- "some custom stack unwind mechanism or swapcontext\n");
- if (SANITIZER_WINDOWS)
- Printf(" (longjmp, SEH and C++ exceptions *are* supported)\n");
- else
- Printf(" (longjmp and C++ exceptions *are* supported)\n");
-
- DescribeThread(t);
- return true;
-}
-
-static void DescribeAccessToHeapChunk(AsanChunkView chunk, uptr addr,
- uptr access_size) {
- sptr offset;
- Decorator d;
- InternalScopedString str(4096);
- str.append("%s", d.Location());
- if (chunk.AddrIsAtLeft(addr, access_size, &offset)) {
- str.append("%p is located %zd bytes to the left of", (void *)addr, offset);
- } else if (chunk.AddrIsAtRight(addr, access_size, &offset)) {
- if (offset < 0) {
- addr -= offset;
- offset = 0;
- }
- str.append("%p is located %zd bytes to the right of", (void *)addr, offset);
- } else if (chunk.AddrIsInside(addr, access_size, &offset)) {
- str.append("%p is located %zd bytes inside of", (void*)addr, offset);
- } else {
- str.append("%p is located somewhere around (this is AddressSanitizer bug!)",
- (void *)addr);
- }
- str.append(" %zu-byte region [%p,%p)\n", chunk.UsedSize(),
- (void *)(chunk.Beg()), (void *)(chunk.End()));
- str.append("%s", d.EndLocation());
- Printf("%s", str.data());
-}
-
-void DescribeHeapAddress(uptr addr, uptr access_size) {
- AsanChunkView chunk = FindHeapChunkByAddress(addr);
- if (!chunk.IsValid()) {
- Printf("AddressSanitizer can not describe address in more detail "
- "(wild memory access suspected).\n");
- return;
- }
- DescribeAccessToHeapChunk(chunk, addr, access_size);
- CHECK(chunk.AllocTid() != kInvalidTid);
- asanThreadRegistry().CheckLocked();
- AsanThreadContext *alloc_thread =
- GetThreadContextByTidLocked(chunk.AllocTid());
- StackTrace alloc_stack = chunk.GetAllocStack();
- char tname[128];
- Decorator d;
- AsanThreadContext *free_thread = nullptr;
- if (chunk.FreeTid() != kInvalidTid) {
- free_thread = GetThreadContextByTidLocked(chunk.FreeTid());
- Printf("%sfreed by thread T%d%s here:%s\n", d.Allocation(),
- free_thread->tid,
- ThreadNameWithParenthesis(free_thread, tname, sizeof(tname)),
- d.EndAllocation());
- StackTrace free_stack = chunk.GetFreeStack();
- free_stack.Print();
- Printf("%spreviously allocated by thread T%d%s here:%s\n",
- d.Allocation(), alloc_thread->tid,
- ThreadNameWithParenthesis(alloc_thread, tname, sizeof(tname)),
- d.EndAllocation());
- } else {
- Printf("%sallocated by thread T%d%s here:%s\n", d.Allocation(),
- alloc_thread->tid,
- ThreadNameWithParenthesis(alloc_thread, tname, sizeof(tname)),
- d.EndAllocation());
- }
- alloc_stack.Print();
- DescribeThread(GetCurrentThread());
- if (free_thread)
- DescribeThread(free_thread);
- DescribeThread(alloc_thread);
-}
-
-static void DescribeAddress(uptr addr, uptr access_size, const char *bug_type) {
- // Check if this is shadow or shadow gap.
- if (DescribeAddressIfShadow(addr))
- return;
- CHECK(AddrIsInMem(addr));
- if (DescribeAddressIfGlobal(addr, access_size, bug_type))
- return;
- if (DescribeAddressIfStack(addr, access_size))
- return;
- // Assume it is a heap address.
- DescribeHeapAddress(addr, access_size);
-}
-
-// ------------------- Thread description -------------------- {{{1
-
-void DescribeThread(AsanThreadContext *context) {
- CHECK(context);
- asanThreadRegistry().CheckLocked();
- // No need to announce the main thread.
- if (context->tid == 0 || context->announced) {
- return;
- }
- context->announced = true;
- char tname[128];
- InternalScopedString str(1024);
- str.append("Thread T%d%s", context->tid,
- ThreadNameWithParenthesis(context->tid, tname, sizeof(tname)));
- if (context->parent_tid == kInvalidTid) {
- str.append(" created by unknown thread\n");
- Printf("%s", str.data());
- return;
- }
- str.append(
- " created by T%d%s here:\n", context->parent_tid,
- ThreadNameWithParenthesis(context->parent_tid, tname, sizeof(tname)));
- Printf("%s", str.data());
- StackDepotGet(context->stack_id).Print();
- // Recursively described parent thread if needed.
- if (flags()->print_full_thread_history) {
- AsanThreadContext *parent_context =
- GetThreadContextByTidLocked(context->parent_tid);
- DescribeThread(parent_context);
- }
-}
-
// -------------------- Different kinds of reports ----------------- {{{1
// Use ScopedInErrorReport to run common actions just before and
// immediately after printing error report.
class ScopedInErrorReport {
public:
- explicit ScopedInErrorReport(ReportData *report = nullptr,
- bool fatal = false) {
+ explicit ScopedInErrorReport(bool fatal = false) {
halt_on_error_ = fatal || flags()->halt_on_error;
if (lock_.TryLock()) {
- StartReporting(report);
+ StartReporting();
return;
}
@@ -676,10 +161,13 @@ class ScopedInErrorReport {
lock_.Lock();
}
- StartReporting(report);
+ StartReporting();
}
~ScopedInErrorReport() {
+ ASAN_ON_ERROR();
+ if (current_error_.IsValid()) current_error_.Print();
+
// Make sure the current thread is announced.
DescribeThread(GetCurrentThread());
// We may want to grab this lock again when printing stats.
@@ -705,6 +193,12 @@ class ScopedInErrorReport {
if (error_report_callback) {
error_report_callback(buffer_copy.data());
}
+
+ // In halt_on_error = false mode, reset the current error object (before
+ // unlocking).
+ if (!halt_on_error_)
+ internal_memset(&current_error_, 0, sizeof(current_error_));
+
CommonSanitizerReportMutex.Unlock();
reporting_thread_tid_ = kInvalidTid;
lock_.Unlock();
@@ -714,11 +208,18 @@ class ScopedInErrorReport {
}
}
+ void ReportError(const ErrorDescription &description) {
+ // Can only report one error per ScopedInErrorReport.
+ CHECK_EQ(current_error_.kind, kErrorKindInvalid);
+ current_error_ = description;
+ }
+
+ static ErrorDescription &CurrentError() {
+ return current_error_;
+ }
+
private:
- void StartReporting(ReportData *report) {
- if (report) report_data = *report;
- report_happened = true;
- ASAN_ON_ERROR();
+ void StartReporting() {
// Make sure the registry and sanitizer report mutexes are locked while
// we're printing an error report.
// We can lock them only here to avoid self-deadlock in case of
@@ -732,181 +233,69 @@ class ScopedInErrorReport {
static StaticSpinMutex lock_;
static u32 reporting_thread_tid_;
+ // Error currently being reported. This enables the destructor to interact
+ // with the debugger and point it to an error description.
+ static ErrorDescription current_error_;
bool halt_on_error_;
};
StaticSpinMutex ScopedInErrorReport::lock_;
u32 ScopedInErrorReport::reporting_thread_tid_ = kInvalidTid;
+ErrorDescription ScopedInErrorReport::current_error_;
void ReportStackOverflow(const SignalContext &sig) {
- ScopedInErrorReport in_report(/*report*/ nullptr, /*fatal*/ true);
- Decorator d;
- Printf("%s", d.Warning());
- Report(
- "ERROR: AddressSanitizer: stack-overflow on address %p"
- " (pc %p bp %p sp %p T%d)\n",
- (void *)sig.addr, (void *)sig.pc, (void *)sig.bp, (void *)sig.sp,
- GetCurrentTidOrInvalid());
- Printf("%s", d.EndWarning());
- ScarinessScore::PrintSimple(10, "stack-overflow");
- GET_STACK_TRACE_SIGNAL(sig);
- stack.Print();
- ReportErrorSummary("stack-overflow", &stack);
+ ScopedInErrorReport in_report(/*fatal*/ true);
+ ErrorStackOverflow error(GetCurrentTidOrInvalid(), sig);
+ in_report.ReportError(error);
}
-void ReportDeadlySignal(const char *description, const SignalContext &sig) {
- ScopedInErrorReport in_report(/*report*/ nullptr, /*fatal*/ true);
- Decorator d;
- Printf("%s", d.Warning());
- Report(
- "ERROR: AddressSanitizer: %s on unknown address %p"
- " (pc %p bp %p sp %p T%d)\n",
- description, (void *)sig.addr, (void *)sig.pc, (void *)sig.bp,
- (void *)sig.sp, GetCurrentTidOrInvalid());
- Printf("%s", d.EndWarning());
- ScarinessScore SS;
- if (sig.pc < GetPageSizeCached())
- Report("Hint: pc points to the zero page.\n");
- if (sig.is_memory_access) {
- const char *access_type =
- sig.write_flag == SignalContext::WRITE
- ? "WRITE"
- : (sig.write_flag == SignalContext::READ ? "READ" : "UNKNOWN");
- Report("The signal is caused by a %s memory access.\n", access_type);
- if (sig.addr < GetPageSizeCached()) {
- Report("Hint: address points to the zero page.\n");
- SS.Scare(10, "null-deref");
- } else if (sig.addr == sig.pc) {
- SS.Scare(60, "wild-jump");
- } else if (sig.write_flag == SignalContext::WRITE) {
- SS.Scare(30, "wild-addr-write");
- } else if (sig.write_flag == SignalContext::READ) {
- SS.Scare(20, "wild-addr-read");
- } else {
- SS.Scare(25, "wild-addr");
- }
- } else {
- SS.Scare(10, "signal");
- }
- SS.Print();
- GET_STACK_TRACE_SIGNAL(sig);
- stack.Print();
- MaybeDumpInstructionBytes(sig.pc);
- Printf("AddressSanitizer can not provide additional info.\n");
- ReportErrorSummary(description, &stack);
+void ReportDeadlySignal(int signo, const SignalContext &sig) {
+ ScopedInErrorReport in_report(/*fatal*/ true);
+ ErrorDeadlySignal error(GetCurrentTidOrInvalid(), sig, signo);
+ in_report.ReportError(error);
}
void ReportDoubleFree(uptr addr, BufferedStackTrace *free_stack) {
ScopedInErrorReport in_report;
- Decorator d;
- Printf("%s", d.Warning());
- char tname[128];
- u32 curr_tid = GetCurrentTidOrInvalid();
- Report("ERROR: AddressSanitizer: attempting double-free on %p in "
- "thread T%d%s:\n",
- addr, curr_tid,
- ThreadNameWithParenthesis(curr_tid, tname, sizeof(tname)));
- Printf("%s", d.EndWarning());
- CHECK_GT(free_stack->size, 0);
- ScarinessScore::PrintSimple(42, "double-free");
- GET_STACK_TRACE_FATAL(free_stack->trace[0], free_stack->top_frame_bp);
- stack.Print();
- DescribeHeapAddress(addr, 1);
- ReportErrorSummary("double-free", &stack);
+ ErrorDoubleFree error(GetCurrentTidOrInvalid(), free_stack, addr);
+ in_report.ReportError(error);
}
-void ReportNewDeleteSizeMismatch(uptr addr, uptr alloc_size, uptr delete_size,
+void ReportNewDeleteSizeMismatch(uptr addr, uptr delete_size,
BufferedStackTrace *free_stack) {
ScopedInErrorReport in_report;
- Decorator d;
- Printf("%s", d.Warning());
- char tname[128];
- u32 curr_tid = GetCurrentTidOrInvalid();
- Report("ERROR: AddressSanitizer: new-delete-type-mismatch on %p in "
- "thread T%d%s:\n",
- addr, curr_tid,
- ThreadNameWithParenthesis(curr_tid, tname, sizeof(tname)));
- Printf("%s object passed to delete has wrong type:\n", d.EndWarning());
- Printf(" size of the allocated type: %zd bytes;\n"
- " size of the deallocated type: %zd bytes.\n",
- alloc_size, delete_size);
- CHECK_GT(free_stack->size, 0);
- ScarinessScore::PrintSimple(10, "new-delete-type-mismatch");
- GET_STACK_TRACE_FATAL(free_stack->trace[0], free_stack->top_frame_bp);
- stack.Print();
- DescribeHeapAddress(addr, 1);
- ReportErrorSummary("new-delete-type-mismatch", &stack);
- Report("HINT: if you don't care about these errors you may set "
- "ASAN_OPTIONS=new_delete_type_mismatch=0\n");
+ ErrorNewDeleteSizeMismatch error(GetCurrentTidOrInvalid(), free_stack, addr,
+ delete_size);
+ in_report.ReportError(error);
}
void ReportFreeNotMalloced(uptr addr, BufferedStackTrace *free_stack) {
ScopedInErrorReport in_report;
- Decorator d;
- Printf("%s", d.Warning());
- char tname[128];
- u32 curr_tid = GetCurrentTidOrInvalid();
- Report("ERROR: AddressSanitizer: attempting free on address "
- "which was not malloc()-ed: %p in thread T%d%s\n", addr,
- curr_tid, ThreadNameWithParenthesis(curr_tid, tname, sizeof(tname)));
- Printf("%s", d.EndWarning());
- CHECK_GT(free_stack->size, 0);
- ScarinessScore::PrintSimple(40, "bad-free");
- GET_STACK_TRACE_FATAL(free_stack->trace[0], free_stack->top_frame_bp);
- stack.Print();
- DescribeHeapAddress(addr, 1);
- ReportErrorSummary("bad-free", &stack);
+ ErrorFreeNotMalloced error(GetCurrentTidOrInvalid(), free_stack, addr);
+ in_report.ReportError(error);
}
void ReportAllocTypeMismatch(uptr addr, BufferedStackTrace *free_stack,
AllocType alloc_type,
AllocType dealloc_type) {
- static const char *alloc_names[] =
- {"INVALID", "malloc", "operator new", "operator new []"};
- static const char *dealloc_names[] =
- {"INVALID", "free", "operator delete", "operator delete []"};
- CHECK_NE(alloc_type, dealloc_type);
ScopedInErrorReport in_report;
- Decorator d;
- Printf("%s", d.Warning());
- Report("ERROR: AddressSanitizer: alloc-dealloc-mismatch (%s vs %s) on %p\n",
- alloc_names[alloc_type], dealloc_names[dealloc_type], addr);
- Printf("%s", d.EndWarning());
- CHECK_GT(free_stack->size, 0);
- ScarinessScore::PrintSimple(10, "alloc-dealloc-mismatch");
- GET_STACK_TRACE_FATAL(free_stack->trace[0], free_stack->top_frame_bp);
- stack.Print();
- DescribeHeapAddress(addr, 1);
- ReportErrorSummary("alloc-dealloc-mismatch", &stack);
- Report("HINT: if you don't care about these errors you may set "
- "ASAN_OPTIONS=alloc_dealloc_mismatch=0\n");
+ ErrorAllocTypeMismatch error(GetCurrentTidOrInvalid(), free_stack, addr,
+ alloc_type, dealloc_type);
+ in_report.ReportError(error);
}
void ReportMallocUsableSizeNotOwned(uptr addr, BufferedStackTrace *stack) {
ScopedInErrorReport in_report;
- Decorator d;
- Printf("%s", d.Warning());
- Report("ERROR: AddressSanitizer: attempting to call "
- "malloc_usable_size() for pointer which is "
- "not owned: %p\n", addr);
- Printf("%s", d.EndWarning());
- stack->Print();
- DescribeHeapAddress(addr, 1);
- ReportErrorSummary("bad-malloc_usable_size", stack);
+ ErrorMallocUsableSizeNotOwned error(GetCurrentTidOrInvalid(), stack, addr);
+ in_report.ReportError(error);
}
void ReportSanitizerGetAllocatedSizeNotOwned(uptr addr,
BufferedStackTrace *stack) {
ScopedInErrorReport in_report;
- Decorator d;
- Printf("%s", d.Warning());
- Report("ERROR: AddressSanitizer: attempting to call "
- "__sanitizer_get_allocated_size() for pointer which is "
- "not owned: %p\n", addr);
- Printf("%s", d.EndWarning());
- stack->Print();
- DescribeHeapAddress(addr, 1);
- ReportErrorSummary("bad-__sanitizer_get_allocated_size", stack);
+ ErrorSanitizerGetAllocatedSizeNotOwned error(GetCurrentTidOrInvalid(), stack,
+ addr);
+ in_report.ReportError(error);
}
void ReportStringFunctionMemoryRangesOverlap(const char *function,
@@ -914,96 +303,43 @@ void ReportStringFunctionMemoryRangesOverlap(const char *function,
const char *offset2, uptr length2,
BufferedStackTrace *stack) {
ScopedInErrorReport in_report;
- Decorator d;
- char bug_type[100];
- internal_snprintf(bug_type, sizeof(bug_type), "%s-param-overlap", function);
- Printf("%s", d.Warning());
- Report("ERROR: AddressSanitizer: %s: "
- "memory ranges [%p,%p) and [%p, %p) overlap\n", \
- bug_type, offset1, offset1 + length1, offset2, offset2 + length2);
- Printf("%s", d.EndWarning());
- ScarinessScore::PrintSimple(10, bug_type);
- stack->Print();
- DescribeAddress((uptr)offset1, length1, bug_type);
- DescribeAddress((uptr)offset2, length2, bug_type);
- ReportErrorSummary(bug_type, stack);
+ ErrorStringFunctionMemoryRangesOverlap error(
+ GetCurrentTidOrInvalid(), stack, (uptr)offset1, length1, (uptr)offset2,
+ length2, function);
+ in_report.ReportError(error);
}
void ReportStringFunctionSizeOverflow(uptr offset, uptr size,
BufferedStackTrace *stack) {
ScopedInErrorReport in_report;
- Decorator d;
- const char *bug_type = "negative-size-param";
- Printf("%s", d.Warning());
- Report("ERROR: AddressSanitizer: %s: (size=%zd)\n", bug_type, size);
- Printf("%s", d.EndWarning());
- ScarinessScore::PrintSimple(10, bug_type);
- stack->Print();
- DescribeAddress(offset, size, bug_type);
- ReportErrorSummary(bug_type, stack);
+ ErrorStringFunctionSizeOverflow error(GetCurrentTidOrInvalid(), stack, offset,
+ size);
+ in_report.ReportError(error);
}
void ReportBadParamsToAnnotateContiguousContainer(uptr beg, uptr end,
uptr old_mid, uptr new_mid,
BufferedStackTrace *stack) {
ScopedInErrorReport in_report;
- Report("ERROR: AddressSanitizer: bad parameters to "
- "__sanitizer_annotate_contiguous_container:\n"
- " beg : %p\n"
- " end : %p\n"
- " old_mid : %p\n"
- " new_mid : %p\n",
- beg, end, old_mid, new_mid);
- uptr granularity = SHADOW_GRANULARITY;
- if (!IsAligned(beg, granularity))
- Report("ERROR: beg is not aligned by %d\n", granularity);
- stack->Print();
- ReportErrorSummary("bad-__sanitizer_annotate_contiguous_container", stack);
+ ErrorBadParamsToAnnotateContiguousContainer error(
+ GetCurrentTidOrInvalid(), stack, beg, end, old_mid, new_mid);
+ in_report.ReportError(error);
}
void ReportODRViolation(const __asan_global *g1, u32 stack_id1,
const __asan_global *g2, u32 stack_id2) {
ScopedInErrorReport in_report;
- Decorator d;
- Printf("%s", d.Warning());
- Report("ERROR: AddressSanitizer: odr-violation (%p):\n", g1->beg);
- Printf("%s", d.EndWarning());
- InternalScopedString g1_loc(256), g2_loc(256);
- PrintGlobalLocation(&g1_loc, *g1);
- PrintGlobalLocation(&g2_loc, *g2);
- Printf(" [1] size=%zd '%s' %s\n", g1->size,
- MaybeDemangleGlobalName(g1->name), g1_loc.data());
- Printf(" [2] size=%zd '%s' %s\n", g2->size,
- MaybeDemangleGlobalName(g2->name), g2_loc.data());
- if (stack_id1 && stack_id2) {
- Printf("These globals were registered at these points:\n");
- Printf(" [1]:\n");
- StackDepotGet(stack_id1).Print();
- Printf(" [2]:\n");
- StackDepotGet(stack_id2).Print();
- }
- Report("HINT: if you don't care about these errors you may set "
- "ASAN_OPTIONS=detect_odr_violation=0\n");
- InternalScopedString error_msg(256);
- error_msg.append("odr-violation: global '%s' at %s",
- MaybeDemangleGlobalName(g1->name), g1_loc.data());
- ReportErrorSummary(error_msg.data());
+ ErrorODRViolation error(GetCurrentTidOrInvalid(), g1, stack_id1, g2,
+ stack_id2);
+ in_report.ReportError(error);
}
// ----------------------- CheckForInvalidPointerPair ----------- {{{1
-static NOINLINE void
-ReportInvalidPointerPair(uptr pc, uptr bp, uptr sp, uptr a1, uptr a2) {
+static NOINLINE void ReportInvalidPointerPair(uptr pc, uptr bp, uptr sp,
+ uptr a1, uptr a2) {
ScopedInErrorReport in_report;
- const char *bug_type = "invalid-pointer-pair";
- Decorator d;
- Printf("%s", d.Warning());
- Report("ERROR: AddressSanitizer: invalid-pointer-pair: %p %p\n", a1, a2);
- Printf("%s", d.EndWarning());
- GET_STACK_TRACE_FATAL(pc, bp);
- stack.Print();
- DescribeAddress(a1, 1, bug_type);
- DescribeAddress(a2, 1, bug_type);
- ReportErrorSummary(bug_type, &stack);
+ ErrorInvalidPointerPair error(GetCurrentTidOrInvalid(), pc, bp, sp, a1, a2);
+ in_report.ReportError(error);
}
static INLINE void CheckForInvalidPointerPair(void *p1, void *p2) {
@@ -1029,7 +365,7 @@ void ReportMacMzReallocUnknown(uptr addr, uptr zone_ptr, const char *zone_name,
addr);
PrintZoneForPointer(addr, zone_ptr, zone_name);
stack->Print();
- DescribeHeapAddress(addr, 1);
+ DescribeAddressIfHeap(addr);
}
// -------------- SuppressErrorReport -------------- {{{1
@@ -1046,34 +382,10 @@ static bool SuppressErrorReport(uptr pc) {
Die();
}
-static void PrintContainerOverflowHint() {
- Printf("HINT: if you don't care about these errors you may set "
- "ASAN_OPTIONS=detect_container_overflow=0.\n"
- "If you suspect a false positive see also: "
- "https://github.com/google/sanitizers/wiki/"
- "AddressSanitizerContainerOverflow.\n");
-}
-
-static bool AdjacentShadowValuesAreFullyPoisoned(u8 *s) {
- return s[-1] > 127 && s[1] > 127;
-}
-
void ReportGenericError(uptr pc, uptr bp, uptr sp, uptr addr, bool is_write,
uptr access_size, u32 exp, bool fatal) {
if (!fatal && SuppressErrorReport(pc)) return;
ENABLE_FRAME_POINTER;
- ScarinessScore SS;
-
- if (access_size) {
- if (access_size <= 9) {
- char desr[] = "?-byte";
- desr[0] = '0' + access_size;
- SS.Scare(access_size + access_size / 2, desr);
- } else if (access_size >= 10) {
- SS.Scare(15, "multi-byte");
- }
- is_write ? SS.Scare(20, "write") : SS.Scare(1, "read");
- }
// Optimization experiments.
// The experiments can be used to evaluate potential optimizations that remove
@@ -1084,118 +396,10 @@ void ReportGenericError(uptr pc, uptr bp, uptr sp, uptr addr, bool is_write,
// The reaction to a non-zero value of exp is to be defined.
(void)exp;
- // Determine the error type.
- const char *bug_descr = "unknown-crash";
- u8 shadow_val = 0;
- if (AddrIsInMem(addr)) {
- u8 *shadow_addr = (u8*)MemToShadow(addr);
- // If we are accessing 16 bytes, look at the second shadow byte.
- if (*shadow_addr == 0 && access_size > SHADOW_GRANULARITY)
- shadow_addr++;
- // If we are in the partial right redzone, look at the next shadow byte.
- if (*shadow_addr > 0 && *shadow_addr < 128)
- shadow_addr++;
- bool far_from_bounds = false;
- shadow_val = *shadow_addr;
- int bug_type_score = 0;
- // For use-after-frees reads are almost as bad as writes.
- int read_after_free_bonus = 0;
- switch (shadow_val) {
- case kAsanHeapLeftRedzoneMagic:
- case kAsanHeapRightRedzoneMagic:
- case kAsanArrayCookieMagic:
- bug_descr = "heap-buffer-overflow";
- bug_type_score = 10;
- far_from_bounds = AdjacentShadowValuesAreFullyPoisoned(shadow_addr);
- break;
- case kAsanHeapFreeMagic:
- bug_descr = "heap-use-after-free";
- bug_type_score = 20;
- if (!is_write) read_after_free_bonus = 18;
- break;
- case kAsanStackLeftRedzoneMagic:
- bug_descr = "stack-buffer-underflow";
- bug_type_score = 25;
- far_from_bounds = AdjacentShadowValuesAreFullyPoisoned(shadow_addr);
- break;
- case kAsanInitializationOrderMagic:
- bug_descr = "initialization-order-fiasco";
- bug_type_score = 1;
- break;
- case kAsanStackMidRedzoneMagic:
- case kAsanStackRightRedzoneMagic:
- case kAsanStackPartialRedzoneMagic:
- bug_descr = "stack-buffer-overflow";
- bug_type_score = 25;
- far_from_bounds = AdjacentShadowValuesAreFullyPoisoned(shadow_addr);
- break;
- case kAsanStackAfterReturnMagic:
- bug_descr = "stack-use-after-return";
- bug_type_score = 30;
- if (!is_write) read_after_free_bonus = 18;
- break;
- case kAsanUserPoisonedMemoryMagic:
- bug_descr = "use-after-poison";
- bug_type_score = 20;
- break;
- case kAsanContiguousContainerOOBMagic:
- bug_descr = "container-overflow";
- bug_type_score = 10;
- break;
- case kAsanStackUseAfterScopeMagic:
- bug_descr = "stack-use-after-scope";
- bug_type_score = 10;
- break;
- case kAsanGlobalRedzoneMagic:
- bug_descr = "global-buffer-overflow";
- bug_type_score = 10;
- far_from_bounds = AdjacentShadowValuesAreFullyPoisoned(shadow_addr);
- break;
- case kAsanIntraObjectRedzone:
- bug_descr = "intra-object-overflow";
- bug_type_score = 10;
- break;
- case kAsanAllocaLeftMagic:
- case kAsanAllocaRightMagic:
- bug_descr = "dynamic-stack-buffer-overflow";
- bug_type_score = 25;
- far_from_bounds = AdjacentShadowValuesAreFullyPoisoned(shadow_addr);
- break;
- }
- SS.Scare(bug_type_score + read_after_free_bonus, bug_descr);
- if (far_from_bounds)
- SS.Scare(10, "far-from-bounds");
- }
-
- ReportData report = { pc, sp, bp, addr, (bool)is_write, access_size,
- bug_descr };
- ScopedInErrorReport in_report(&report, fatal);
-
- Decorator d;
- Printf("%s", d.Warning());
- Report("ERROR: AddressSanitizer: %s on address "
- "%p at pc %p bp %p sp %p\n",
- bug_descr, (void*)addr, pc, bp, sp);
- Printf("%s", d.EndWarning());
-
- u32 curr_tid = GetCurrentTidOrInvalid();
- char tname[128];
- Printf("%s%s of size %zu at %p thread T%d%s%s\n",
- d.Access(),
- access_size ? (is_write ? "WRITE" : "READ") : "ACCESS",
- access_size, (void*)addr, curr_tid,
- ThreadNameWithParenthesis(curr_tid, tname, sizeof(tname)),
- d.EndAccess());
-
- SS.Print();
- GET_STACK_TRACE_FATAL(pc, bp);
- stack.Print();
-
- DescribeAddress(addr, access_size, bug_descr);
- if (shadow_val == kAsanContiguousContainerOOBMagic)
- PrintContainerOverflowHint();
- ReportErrorSummary(bug_descr, &stack);
- PrintShadowMemoryForAddress(addr);
+ ScopedInErrorReport in_report(fatal);
+ ErrorGeneric error(GetCurrentTidOrInvalid(), pc, bp, sp, addr, is_write,
+ access_size);
+ in_report.ReportError(error);
}
} // namespace __asan
@@ -1218,40 +422,57 @@ void NOINLINE __asan_set_error_report_callback(void (*callback)(const char*)) {
void __asan_describe_address(uptr addr) {
// Thread registry must be locked while we're describing an address.
asanThreadRegistry().Lock();
- DescribeAddress(addr, 1, "");
+ PrintAddressDescription(addr, 1, "");
asanThreadRegistry().Unlock();
}
int __asan_report_present() {
- return report_happened ? 1 : 0;
+ return ScopedInErrorReport::CurrentError().kind != kErrorKindInvalid;
}
uptr __asan_get_report_pc() {
- return report_data.pc;
+ if (ScopedInErrorReport::CurrentError().kind == kErrorKindGeneric)
+ return ScopedInErrorReport::CurrentError().Generic.pc;
+ return 0;
}
uptr __asan_get_report_bp() {
- return report_data.bp;
+ if (ScopedInErrorReport::CurrentError().kind == kErrorKindGeneric)
+ return ScopedInErrorReport::CurrentError().Generic.bp;
+ return 0;
}
uptr __asan_get_report_sp() {
- return report_data.sp;
+ if (ScopedInErrorReport::CurrentError().kind == kErrorKindGeneric)
+ return ScopedInErrorReport::CurrentError().Generic.sp;
+ return 0;
}
uptr __asan_get_report_address() {
- return report_data.addr;
+ ErrorDescription &err = ScopedInErrorReport::CurrentError();
+ if (err.kind == kErrorKindGeneric)
+ return err.Generic.addr_description.Address();
+ else if (err.kind == kErrorKindDoubleFree)
+ return err.DoubleFree.addr_description.addr;
+ return 0;
}
int __asan_get_report_access_type() {
- return report_data.is_write ? 1 : 0;
+ if (ScopedInErrorReport::CurrentError().kind == kErrorKindGeneric)
+ return ScopedInErrorReport::CurrentError().Generic.is_write;
+ return 0;
}
uptr __asan_get_report_access_size() {
- return report_data.access_size;
+ if (ScopedInErrorReport::CurrentError().kind == kErrorKindGeneric)
+ return ScopedInErrorReport::CurrentError().Generic.access_size;
+ return 0;
}
const char *__asan_get_report_description() {
- return report_data.description;
+ if (ScopedInErrorReport::CurrentError().kind == kErrorKindGeneric)
+ return ScopedInErrorReport::CurrentError().Generic.bug_descr;
+ return ScopedInErrorReport::CurrentError().Base.scariness.GetDescription();
}
extern "C" {
diff --git a/lib/asan/asan_report.h b/lib/asan/asan_report.h
index 03f096581a1c..5ebfda693d0c 100644
--- a/lib/asan/asan_report.h
+++ b/lib/asan/asan_report.h
@@ -25,35 +25,29 @@ struct StackVarDescr {
uptr name_len;
};
-struct AddressDescription {
- char *name;
- uptr name_size;
- uptr region_address;
- uptr region_size;
- const char *region_kind;
-};
-
// Returns the number of globals close to the provided address and copies
// them to "globals" array.
int GetGlobalsForAddress(uptr addr, __asan_global *globals, u32 *reg_sites,
int max_globals);
-bool GetInfoForAddressIfGlobal(uptr addr, AddressDescription *descr);
+
+const char *MaybeDemangleGlobalName(const char *name);
+void PrintGlobalNameIfASCII(InternalScopedString *str, const __asan_global &g);
+void PrintGlobalLocation(InternalScopedString *str, const __asan_global &g);
+
+void PrintMemoryByte(InternalScopedString *str, const char *before, u8 byte,
+ bool in_shadow, const char *after = "\n");
+
// The following functions prints address description depending
// on the memory type (shadow/heap/stack/global).
-void DescribeHeapAddress(uptr addr, uptr access_size);
-bool DescribeAddressIfShadow(uptr addr, AddressDescription *descr = nullptr,
- bool print = true);
bool ParseFrameDescription(const char *frame_descr,
InternalMmapVector<StackVarDescr> *vars);
-bool DescribeAddressIfStack(uptr addr, uptr access_size);
-void DescribeThread(AsanThreadContext *context);
// Different kinds of error reports.
void ReportGenericError(uptr pc, uptr bp, uptr sp, uptr addr, bool is_write,
uptr access_size, u32 exp, bool fatal);
void ReportStackOverflow(const SignalContext &sig);
-void ReportDeadlySignal(const char *description, const SignalContext &sig);
-void ReportNewDeleteSizeMismatch(uptr addr, uptr alloc_size, uptr delete_size,
+void ReportDeadlySignal(int signo, const SignalContext &sig);
+void ReportNewDeleteSizeMismatch(uptr addr, uptr delete_size,
BufferedStackTrace *free_stack);
void ReportDoubleFree(uptr addr, BufferedStackTrace *free_stack);
void ReportFreeNotMalloced(uptr addr, BufferedStackTrace *free_stack);
diff --git a/lib/asan/asan_rtl.cc b/lib/asan/asan_rtl.cc
index 4962b9ee1561..fee7b8a2dbaf 100644
--- a/lib/asan/asan_rtl.cc
+++ b/lib/asan/asan_rtl.cc
@@ -32,6 +32,7 @@
#include "ubsan/ubsan_init.h"
#include "ubsan/ubsan_platform.h"
+uptr __asan_shadow_memory_dynamic_address; // Global interface symbol.
int __asan_option_detect_stack_use_after_return; // Global interface symbol.
uptr *__asan_test_only_reported_buggy_pointer; // Used only for testing asan.
@@ -263,6 +264,7 @@ static NOINLINE void force_interface_symbols() {
volatile int fake_condition = 0; // prevent dead condition elimination.
// __asan_report_* functions are noreturn, so we need a switch to prevent
// the compiler from removing any of them.
+ // clang-format off
switch (fake_condition) {
case 1: __asan_report_load1(0); break;
case 2: __asan_report_load2(0); break;
@@ -302,7 +304,14 @@ static NOINLINE void force_interface_symbols() {
case 37: __asan_unpoison_stack_memory(0, 0); break;
case 38: __asan_region_is_poisoned(0, 0); break;
case 39: __asan_describe_address(0); break;
+ case 40: __asan_set_shadow_00(0, 0); break;
+ case 41: __asan_set_shadow_f1(0, 0); break;
+ case 42: __asan_set_shadow_f2(0, 0); break;
+ case 43: __asan_set_shadow_f3(0, 0); break;
+ case 44: __asan_set_shadow_f5(0, 0); break;
+ case 45: __asan_set_shadow_f8(0, 0); break;
}
+ // clang-format on
}
static void asan_atexit() {
@@ -326,8 +335,21 @@ static void InitializeHighMemEnd() {
}
static void ProtectGap(uptr addr, uptr size) {
- if (!flags()->protect_shadow_gap)
+ if (!flags()->protect_shadow_gap) {
+ // The shadow gap is unprotected, so there is a chance that someone
+ // is actually using this memory. Which means it needs a shadow...
+ uptr GapShadowBeg = RoundDownTo(MEM_TO_SHADOW(addr), GetPageSizeCached());
+ uptr GapShadowEnd =
+ RoundUpTo(MEM_TO_SHADOW(addr + size), GetPageSizeCached()) - 1;
+ if (Verbosity())
+ Printf("protect_shadow_gap=0:"
+ " not protecting shadow gap, allocating gap's shadow\n"
+ "|| `[%p, %p]` || ShadowGap's shadow ||\n", GapShadowBeg,
+ GapShadowEnd);
+ ReserveShadowMemoryRange(GapShadowBeg, GapShadowEnd,
+ "unprotected gap shadow");
return;
+ }
void *res = MmapFixedNoAccess(addr, size, "shadow gap");
if (addr == (uptr)res)
return;
@@ -388,6 +410,8 @@ static void PrintAddressSpaceLayout() {
Printf("redzone=%zu\n", (uptr)flags()->redzone);
Printf("max_redzone=%zu\n", (uptr)flags()->max_redzone);
Printf("quarantine_size_mb=%zuM\n", (uptr)flags()->quarantine_size_mb);
+ Printf("thread_local_quarantine_size_kb=%zuK\n",
+ (uptr)flags()->thread_local_quarantine_size_kb);
Printf("malloc_context_size=%zu\n",
(uptr)common_flags()->malloc_context_size);
@@ -401,6 +425,79 @@ static void PrintAddressSpaceLayout() {
kHighShadowBeg > kMidMemEnd);
}
+static void InitializeShadowMemory() {
+ // Set the shadow memory address to uninitialized.
+ __asan_shadow_memory_dynamic_address = kDefaultShadowSentinel;
+
+ uptr shadow_start = kLowShadowBeg;
+ // Detect if a dynamic shadow address must used and find a available location
+ // when necessary. When dynamic address is used, the macro |kLowShadowBeg|
+ // expands to |__asan_shadow_memory_dynamic_address| which is
+ // |kDefaultShadowSentinel|.
+ if (shadow_start == kDefaultShadowSentinel) {
+ __asan_shadow_memory_dynamic_address = 0;
+ CHECK_EQ(0, kLowShadowBeg);
+
+ uptr granularity = GetMmapGranularity();
+ uptr alignment = 8 * granularity;
+ uptr left_padding = granularity;
+ uptr space_size = kHighShadowEnd + left_padding;
+
+ shadow_start = FindAvailableMemoryRange(space_size, alignment, granularity);
+ CHECK_NE((uptr)0, shadow_start);
+ CHECK(IsAligned(shadow_start, alignment));
+ }
+ // Update the shadow memory address (potentially) used by instrumentation.
+ __asan_shadow_memory_dynamic_address = shadow_start;
+
+ if (kLowShadowBeg)
+ shadow_start -= GetMmapGranularity();
+ bool full_shadow_is_available =
+ MemoryRangeIsAvailable(shadow_start, kHighShadowEnd);
+
+#if SANITIZER_LINUX && defined(__x86_64__) && defined(_LP64) && \
+ !ASAN_FIXED_MAPPING
+ if (!full_shadow_is_available) {
+ kMidMemBeg = kLowMemEnd < 0x3000000000ULL ? 0x3000000000ULL : 0;
+ kMidMemEnd = kLowMemEnd < 0x3000000000ULL ? 0x4fffffffffULL : 0;
+ }
+#endif
+
+ if (Verbosity()) PrintAddressSpaceLayout();
+
+ if (full_shadow_is_available) {
+ // mmap the low shadow plus at least one page at the left.
+ if (kLowShadowBeg)
+ ReserveShadowMemoryRange(shadow_start, kLowShadowEnd, "low shadow");
+ // mmap the high shadow.
+ ReserveShadowMemoryRange(kHighShadowBeg, kHighShadowEnd, "high shadow");
+ // protect the gap.
+ ProtectGap(kShadowGapBeg, kShadowGapEnd - kShadowGapBeg + 1);
+ CHECK_EQ(kShadowGapEnd, kHighShadowBeg - 1);
+ } else if (kMidMemBeg &&
+ MemoryRangeIsAvailable(shadow_start, kMidMemBeg - 1) &&
+ MemoryRangeIsAvailable(kMidMemEnd + 1, kHighShadowEnd)) {
+ CHECK(kLowShadowBeg != kLowShadowEnd);
+ // mmap the low shadow plus at least one page at the left.
+ ReserveShadowMemoryRange(shadow_start, kLowShadowEnd, "low shadow");
+ // mmap the mid shadow.
+ ReserveShadowMemoryRange(kMidShadowBeg, kMidShadowEnd, "mid shadow");
+ // mmap the high shadow.
+ ReserveShadowMemoryRange(kHighShadowBeg, kHighShadowEnd, "high shadow");
+ // protect the gaps.
+ ProtectGap(kShadowGapBeg, kShadowGapEnd - kShadowGapBeg + 1);
+ ProtectGap(kShadowGap2Beg, kShadowGap2End - kShadowGap2Beg + 1);
+ ProtectGap(kShadowGap3Beg, kShadowGap3End - kShadowGap3Beg + 1);
+ } else {
+ Report("Shadow memory range interleaves with an existing memory mapping. "
+ "ASan cannot proceed correctly. ABORTING.\n");
+ Report("ASan shadow was supposed to be located in the [%p-%p] range.\n",
+ shadow_start, kHighShadowEnd);
+ DumpProcessMap();
+ Die();
+ }
+}
+
static void AsanInitInternal() {
if (LIKELY(asan_inited)) return;
SanitizerToolName = "AddressSanitizer";
@@ -434,7 +531,6 @@ static void AsanInitInternal() {
__sanitizer_set_report_path(common_flags()->log_path);
- // Enable UAR detection, if required.
__asan_option_detect_stack_use_after_return =
flags()->detect_stack_use_after_return;
@@ -453,61 +549,9 @@ static void AsanInitInternal() {
ReplaceSystemMalloc();
- uptr shadow_start = kLowShadowBeg;
- if (kLowShadowBeg)
- shadow_start -= GetMmapGranularity();
- bool full_shadow_is_available =
- MemoryRangeIsAvailable(shadow_start, kHighShadowEnd);
-
-#if SANITIZER_LINUX && defined(__x86_64__) && defined(_LP64) && \
- !ASAN_FIXED_MAPPING
- if (!full_shadow_is_available) {
- kMidMemBeg = kLowMemEnd < 0x3000000000ULL ? 0x3000000000ULL : 0;
- kMidMemEnd = kLowMemEnd < 0x3000000000ULL ? 0x4fffffffffULL : 0;
- }
-#elif SANITIZER_WINDOWS64
- // Disable the "mid mem" shadow layout.
- if (!full_shadow_is_available) {
- kMidMemBeg = 0;
- kMidMemEnd = 0;
- }
-#endif
-
- if (Verbosity()) PrintAddressSpaceLayout();
-
DisableCoreDumperIfNecessary();
- if (full_shadow_is_available) {
- // mmap the low shadow plus at least one page at the left.
- if (kLowShadowBeg)
- ReserveShadowMemoryRange(shadow_start, kLowShadowEnd, "low shadow");
- // mmap the high shadow.
- ReserveShadowMemoryRange(kHighShadowBeg, kHighShadowEnd, "high shadow");
- // protect the gap.
- ProtectGap(kShadowGapBeg, kShadowGapEnd - kShadowGapBeg + 1);
- CHECK_EQ(kShadowGapEnd, kHighShadowBeg - 1);
- } else if (kMidMemBeg &&
- MemoryRangeIsAvailable(shadow_start, kMidMemBeg - 1) &&
- MemoryRangeIsAvailable(kMidMemEnd + 1, kHighShadowEnd)) {
- CHECK(kLowShadowBeg != kLowShadowEnd);
- // mmap the low shadow plus at least one page at the left.
- ReserveShadowMemoryRange(shadow_start, kLowShadowEnd, "low shadow");
- // mmap the mid shadow.
- ReserveShadowMemoryRange(kMidShadowBeg, kMidShadowEnd, "mid shadow");
- // mmap the high shadow.
- ReserveShadowMemoryRange(kHighShadowBeg, kHighShadowEnd, "high shadow");
- // protect the gaps.
- ProtectGap(kShadowGapBeg, kShadowGapEnd - kShadowGapBeg + 1);
- ProtectGap(kShadowGap2Beg, kShadowGap2End - kShadowGap2Beg + 1);
- ProtectGap(kShadowGap3Beg, kShadowGap3End - kShadowGap3Beg + 1);
- } else {
- Report("Shadow memory range interleaves with an existing memory mapping. "
- "ASan cannot proceed correctly. ABORTING.\n");
- Report("ASan shadow was supposed to be located in the [%p-%p] range.\n",
- shadow_start, kHighShadowEnd);
- DumpProcessMap();
- Die();
- }
+ InitializeShadowMemory();
AsanTSDInit(PlatformTSDDtor);
InstallDeadlySignalHandlers(AsanOnDeadlySignal);
@@ -599,6 +643,9 @@ static AsanInitializer asan_initializer;
using namespace __asan; // NOLINT
void NOINLINE __asan_handle_no_return() {
+ if (asan_init_is_running)
+ return;
+
int local_stack;
AsanThread *curr_thread = GetCurrentThread();
uptr PageSize = GetPageSizeCached();
diff --git a/lib/asan/asan_scariness_score.h b/lib/asan/asan_scariness_score.h
index 492eb561c6e2..7f1571416fd5 100644
--- a/lib/asan/asan_scariness_score.h
+++ b/lib/asan/asan_scariness_score.h
@@ -34,10 +34,10 @@
#include "sanitizer_common/sanitizer_libc.h"
namespace __asan {
-class ScarinessScore {
- public:
- ScarinessScore() {
+struct ScarinessScoreBase {
+ void Clear() {
descr[0] = 0;
+ score = 0;
}
void Scare(int add_to_score, const char *reason) {
if (descr[0])
@@ -52,16 +52,23 @@ class ScarinessScore {
Printf("SCARINESS: %d (%s)\n", score, descr);
}
static void PrintSimple(int score, const char *descr) {
- ScarinessScore SS;
- SS.Scare(score, descr);
- SS.Print();
+ ScarinessScoreBase SSB;
+ SSB.Clear();
+ SSB.Scare(score, descr);
+ SSB.Print();
}
private:
- int score = 0;
+ int score;
char descr[1024];
};
+struct ScarinessScore : ScarinessScoreBase {
+ ScarinessScore() {
+ Clear();
+ }
+};
+
} // namespace __asan
#endif // ASAN_SCARINESS_SCORE_H
diff --git a/lib/asan/asan_thread.cc b/lib/asan/asan_thread.cc
index d7e2cca65660..537b53d9e0c3 100644
--- a/lib/asan/asan_thread.cc
+++ b/lib/asan/asan_thread.cc
@@ -141,7 +141,9 @@ void AsanThread::StartSwitchFiber(FakeStack **fake_stack_save, uptr bottom,
current_fake_stack->Destroy(this->tid());
}
-void AsanThread::FinishSwitchFiber(FakeStack *fake_stack_save) {
+void AsanThread::FinishSwitchFiber(FakeStack *fake_stack_save,
+ uptr *bottom_old,
+ uptr *size_old) {
if (!atomic_load(&stack_switching_, memory_order_relaxed)) {
Report("ERROR: finishing a fiber switch that has not started\n");
Die();
@@ -152,6 +154,10 @@ void AsanThread::FinishSwitchFiber(FakeStack *fake_stack_save) {
fake_stack_ = fake_stack_save;
}
+ if (bottom_old)
+ *bottom_old = stack_bottom_;
+ if (size_old)
+ *size_old = stack_top_ - stack_bottom_;
stack_bottom_ = next_stack_bottom_;
stack_top_ = next_stack_top_;
atomic_store(&stack_switching_, 0, memory_order_release);
@@ -345,7 +351,7 @@ AsanThread *GetCurrentThread() {
// limits, so only do this magic on Android, and only if the found thread
// is the main thread.
AsanThreadContext *tctx = GetThreadContextByTidLocked(0);
- if (ThreadStackContainsAddress(tctx, &context)) {
+ if (tctx && ThreadStackContainsAddress(tctx, &context)) {
SetCurrentThread(tctx->thread);
return tctx->thread;
}
@@ -447,12 +453,16 @@ void __sanitizer_start_switch_fiber(void **fakestacksave, const void *bottom,
}
SANITIZER_INTERFACE_ATTRIBUTE
-void __sanitizer_finish_switch_fiber(void* fakestack) {
+void __sanitizer_finish_switch_fiber(void* fakestack,
+ const void **bottom_old,
+ uptr *size_old) {
AsanThread *t = GetCurrentThread();
if (!t) {
VReport(1, "__asan_finish_switch_fiber called from unknown thread\n");
return;
}
- t->FinishSwitchFiber((FakeStack*)fakestack);
+ t->FinishSwitchFiber((FakeStack*)fakestack,
+ (uptr*)bottom_old,
+ (uptr*)size_old);
}
}
diff --git a/lib/asan/asan_thread.h b/lib/asan/asan_thread.h
index 92a92a2e863e..f53dfb712449 100644
--- a/lib/asan/asan_thread.h
+++ b/lib/asan/asan_thread.h
@@ -94,7 +94,8 @@ class AsanThread {
}
void StartSwitchFiber(FakeStack **fake_stack_save, uptr bottom, uptr size);
- void FinishSwitchFiber(FakeStack *fake_stack_save);
+ void FinishSwitchFiber(FakeStack *fake_stack_save, uptr *bottom_old,
+ uptr *size_old);
bool has_fake_stack() {
return !atomic_load(&stack_switching_, memory_order_relaxed) &&
diff --git a/lib/asan/asan_win.cc b/lib/asan/asan_win.cc
index 2ef78cd15508..78268d83e539 100644
--- a/lib/asan/asan_win.cc
+++ b/lib/asan/asan_win.cc
@@ -19,6 +19,7 @@
#include <stdlib.h>
+#include "asan_globals_win.h"
#include "asan_interceptors.h"
#include "asan_internal.h"
#include "asan_report.h"
@@ -37,7 +38,13 @@ int __asan_should_detect_stack_use_after_return() {
return __asan_option_detect_stack_use_after_return;
}
-// -------------------- A workaround for the abscence of weak symbols ----- {{{
+SANITIZER_INTERFACE_ATTRIBUTE
+uptr __asan_get_shadow_memory_dynamic_address() {
+ __asan_init();
+ return __asan_shadow_memory_dynamic_address;
+}
+
+// -------------------- A workaround for the absence of weak symbols ----- {{{
// We don't have a direct equivalent of weak symbols when using MSVC, but we can
// use the /alternatename directive to tell the linker to default a specific
// symbol to a specific value, which works nicely for allocator hooks and
@@ -64,14 +71,22 @@ void __asan_default_on_error() {}
// }}}
} // extern "C"
-// ---------------------- Windows-specific inteceptors ---------------- {{{
+// ---------------------- Windows-specific interceptors ---------------- {{{
+INTERCEPTOR_WINAPI(void, RtlRaiseException, EXCEPTION_RECORD *ExceptionRecord) {
+ CHECK(REAL(RtlRaiseException));
+ // This is a noreturn function, unless it's one of the exceptions raised to
+ // communicate with the debugger, such as the one from OutputDebugString.
+ if (ExceptionRecord->ExceptionCode != DBG_PRINTEXCEPTION_C)
+ __asan_handle_no_return();
+ REAL(RtlRaiseException)(ExceptionRecord);
+}
+
INTERCEPTOR_WINAPI(void, RaiseException, void *a, void *b, void *c, void *d) {
CHECK(REAL(RaiseException));
__asan_handle_no_return();
REAL(RaiseException)(a, b, c, d);
}
-
#ifdef _WIN64
INTERCEPTOR_WINAPI(int, __C_specific_handler, void *a, void *b, void *c, void *d) { // NOLINT
@@ -123,44 +138,12 @@ INTERCEPTOR_WINAPI(DWORD, CreateThread,
asan_thread_start, t, thr_flags, tid);
}
-namespace {
-BlockingMutex mu_for_thread_tracking(LINKER_INITIALIZED);
-
-void EnsureWorkerThreadRegistered() {
- // FIXME: GetCurrentThread relies on TSD, which might not play well with
- // system thread pools. We might want to use something like reference
- // counting to zero out GetCurrentThread() underlying storage when the last
- // work item finishes? Or can we disable reclaiming of threads in the pool?
- BlockingMutexLock l(&mu_for_thread_tracking);
- if (__asan::GetCurrentThread())
- return;
-
- AsanThread *t = AsanThread::Create(
- /* start_routine */ nullptr, /* arg */ nullptr,
- /* parent_tid */ -1, /* stack */ nullptr, /* detached */ true);
- t->Init();
- asanThreadRegistry().StartThread(t->tid(), 0, 0);
- SetCurrentThread(t);
-}
-} // namespace
-
-INTERCEPTOR_WINAPI(DWORD, NtWaitForWorkViaWorkerFactory, DWORD a, DWORD b) {
- // NtWaitForWorkViaWorkerFactory is called from system worker pool threads to
- // query work scheduled by BindIoCompletionCallback, QueueUserWorkItem, etc.
- // System worker pool threads are created at arbitraty point in time and
- // without using CreateThread, so we wrap NtWaitForWorkViaWorkerFactory
- // instead and don't register a specific parent_tid/stack.
- EnsureWorkerThreadRegistered();
- return REAL(NtWaitForWorkViaWorkerFactory)(a, b);
-}
-
// }}}
namespace __asan {
void InitializePlatformInterceptors() {
ASAN_INTERCEPT_FUNC(CreateThread);
- ASAN_INTERCEPT_FUNC(RaiseException);
#ifdef _WIN64
ASAN_INTERCEPT_FUNC(__C_specific_handler);
@@ -169,11 +152,15 @@ void InitializePlatformInterceptors() {
ASAN_INTERCEPT_FUNC(_except_handler4);
#endif
- // NtWaitForWorkViaWorkerFactory is always linked dynamically.
- CHECK(::__interception::OverrideFunction(
- "NtWaitForWorkViaWorkerFactory",
- (uptr)WRAP(NtWaitForWorkViaWorkerFactory),
- (uptr *)&REAL(NtWaitForWorkViaWorkerFactory)));
+ // Try to intercept kernel32!RaiseException, and if that fails, intercept
+ // ntdll!RtlRaiseException instead.
+ if (!::__interception::OverrideFunction("RaiseException",
+ (uptr)WRAP(RaiseException),
+ (uptr *)&REAL(RaiseException))) {
+ CHECK(::__interception::OverrideFunction("RtlRaiseException",
+ (uptr)WRAP(RtlRaiseException),
+ (uptr *)&REAL(RtlRaiseException)));
+ }
}
void AsanApplyToGlobals(globals_op_fptr op, const void *needle) {
@@ -229,8 +216,7 @@ void AsanOnDeadlySignal(int, void *siginfo, void *context) {
// Exception handler for dealing with shadow memory.
static LONG CALLBACK
ShadowExceptionHandler(PEXCEPTION_POINTERS exception_pointers) {
- static uptr page_size = GetPageSizeCached();
- static uptr alloc_granularity = GetMmapGranularity();
+ uptr page_size = GetPageSizeCached();
// Only handle access violations.
if (exception_pointers->ExceptionRecord->ExceptionCode !=
EXCEPTION_ACCESS_VIOLATION) {
@@ -276,22 +262,57 @@ void InitializePlatformExceptionHandlers() {
static LPTOP_LEVEL_EXCEPTION_FILTER default_seh_handler;
-static long WINAPI SEHHandler(EXCEPTION_POINTERS *info) {
- EXCEPTION_RECORD *exception_record = info->ExceptionRecord;
- CONTEXT *context = info->ContextRecord;
+// Check based on flags if we should report this exception.
+static bool ShouldReportDeadlyException(unsigned code) {
+ switch (code) {
+ case EXCEPTION_ACCESS_VIOLATION:
+ case EXCEPTION_IN_PAGE_ERROR:
+ return common_flags()->handle_segv;
+ case EXCEPTION_BREAKPOINT:
+ case EXCEPTION_ILLEGAL_INSTRUCTION: {
+ return common_flags()->handle_sigill;
+ }
+ }
+ return false;
+}
- if (exception_record->ExceptionCode == EXCEPTION_ACCESS_VIOLATION ||
- exception_record->ExceptionCode == EXCEPTION_IN_PAGE_ERROR) {
- const char *description =
- (exception_record->ExceptionCode == EXCEPTION_ACCESS_VIOLATION)
- ? "access-violation"
- : "in-page-error";
- SignalContext sig = SignalContext::Create(exception_record, context);
- ReportDeadlySignal(description, sig);
+// Return the textual name for this exception.
+const char *DescribeSignalOrException(int signo) {
+ unsigned code = signo;
+ // Get the string description of the exception if this is a known deadly
+ // exception.
+ switch (code) {
+ case EXCEPTION_ACCESS_VIOLATION:
+ return "access-violation";
+ case EXCEPTION_IN_PAGE_ERROR:
+ return "in-page-error";
+ case EXCEPTION_BREAKPOINT:
+ return "breakpoint";
+ case EXCEPTION_ILLEGAL_INSTRUCTION:
+ return "illegal-instruction";
}
+ return nullptr;
+}
+
+extern "C" SANITIZER_INTERFACE_ATTRIBUTE
+long __asan_unhandled_exception_filter(EXCEPTION_POINTERS *info) {
+ EXCEPTION_RECORD *exception_record = info->ExceptionRecord;
+ CONTEXT *context = info->ContextRecord;
+ // Continue the search if the signal wasn't deadly.
+ if (!ShouldReportDeadlyException(exception_record->ExceptionCode))
+ return EXCEPTION_CONTINUE_SEARCH;
// FIXME: Handle EXCEPTION_STACK_OVERFLOW here.
+ SignalContext sig = SignalContext::Create(exception_record, context);
+ ReportDeadlySignal(exception_record->ExceptionCode, sig);
+ UNREACHABLE("returned from reporting deadly signal");
+}
+
+static long WINAPI SEHHandler(EXCEPTION_POINTERS *info) {
+ __asan_unhandled_exception_filter(info);
+
+ // Bubble out to the default exception filter.
return default_seh_handler(info);
}
@@ -331,10 +352,25 @@ int __asan_set_seh_filter() {
// immediately after the CRT runs. This way, our exception filter is called
// first and we can delegate to their filter if appropriate.
#pragma section(".CRT$XCAB", long, read) // NOLINT
-__declspec(allocate(".CRT$XCAB"))
- int (*__intercept_seh)() = __asan_set_seh_filter;
+__declspec(allocate(".CRT$XCAB")) int (*__intercept_seh)() =
+ __asan_set_seh_filter;
+
+// Piggyback on the TLS initialization callback directory to initialize asan as
+// early as possible. Initializers in .CRT$XL* are called directly by ntdll,
+// which run before the CRT. Users also add code to .CRT$XLC, so it's important
+// to run our initializers first.
+static void NTAPI asan_thread_init(void *module, DWORD reason, void *reserved) {
+ if (reason == DLL_PROCESS_ATTACH) __asan_init();
+}
+
+#pragma section(".CRT$XLAB", long, read) // NOLINT
+__declspec(allocate(".CRT$XLAB")) void (NTAPI *__asan_tls_init)(void *,
+ unsigned long, void *) = asan_thread_init;
#endif
+
+ASAN_LINK_GLOBALS_WIN()
+
// }}}
} // namespace __asan
-#endif // _WIN32
+#endif // SANITIZER_WINDOWS
diff --git a/lib/asan/asan_win_dll_thunk.cc b/lib/asan/asan_win_dll_thunk.cc
index f55588613066..4764fd0a736c 100644
--- a/lib/asan/asan_win_dll_thunk.cc
+++ b/lib/asan/asan_win_dll_thunk.cc
@@ -15,21 +15,30 @@
// See https://github.com/google/sanitizers/issues/209 for the details.
//===----------------------------------------------------------------------===//
-// Only compile this code when buidling asan_dll_thunk.lib
+// Only compile this code when building asan_dll_thunk.lib
// Using #ifdef rather than relying on Makefiles etc.
// simplifies the build procedure.
#ifdef ASAN_DLL_THUNK
#include "asan_init_version.h"
+#include "asan_globals_win.h"
#include "interception/interception.h"
#include "sanitizer_common/sanitizer_platform_interceptors.h"
+#ifdef _M_IX86
+#define WINAPI __stdcall
+#else
+#define WINAPI
+#endif
+
// ---------- Function interception helper functions and macros ----------- {{{1
extern "C" {
-void *__stdcall GetModuleHandleA(const char *module_name);
-void *__stdcall GetProcAddress(void *module, const char *proc_name);
+void *WINAPI GetModuleHandleA(const char *module_name);
+void *WINAPI GetProcAddress(void *module, const char *proc_name);
void abort();
}
+using namespace __sanitizer;
+
static uptr getRealProcAddressOrDie(const char *name) {
uptr ret =
__interception::InternalGetProcAddress((void *)GetModuleHandleA(0), name);
@@ -105,7 +114,7 @@ static void InterceptHooks();
// ---------- Function wrapping helpers ----------------------------------- {{{1
#define WRAP_V_V(name) \
extern "C" void name() { \
- typedef void (*fntype)(); \
+ typedef decltype(name) *fntype; \
static fntype fn = (fntype)getRealProcAddressOrDie(#name); \
fn(); \
} \
@@ -113,7 +122,7 @@ static void InterceptHooks();
#define WRAP_V_W(name) \
extern "C" void name(void *arg) { \
- typedef void (*fntype)(void *arg); \
+ typedef decltype(name) *fntype; \
static fntype fn = (fntype)getRealProcAddressOrDie(#name); \
fn(arg); \
} \
@@ -121,7 +130,7 @@ static void InterceptHooks();
#define WRAP_V_WW(name) \
extern "C" void name(void *arg1, void *arg2) { \
- typedef void (*fntype)(void *, void *); \
+ typedef decltype(name) *fntype; \
static fntype fn = (fntype)getRealProcAddressOrDie(#name); \
fn(arg1, arg2); \
} \
@@ -129,7 +138,7 @@ static void InterceptHooks();
#define WRAP_V_WWW(name) \
extern "C" void name(void *arg1, void *arg2, void *arg3) { \
- typedef void *(*fntype)(void *, void *, void *); \
+ typedef decltype(name) *fntype; \
static fntype fn = (fntype)getRealProcAddressOrDie(#name); \
fn(arg1, arg2, arg3); \
} \
@@ -137,7 +146,7 @@ static void InterceptHooks();
#define WRAP_W_V(name) \
extern "C" void *name() { \
- typedef void *(*fntype)(); \
+ typedef decltype(name) *fntype; \
static fntype fn = (fntype)getRealProcAddressOrDie(#name); \
return fn(); \
} \
@@ -145,7 +154,7 @@ static void InterceptHooks();
#define WRAP_W_W(name) \
extern "C" void *name(void *arg) { \
- typedef void *(*fntype)(void *arg); \
+ typedef decltype(name) *fntype; \
static fntype fn = (fntype)getRealProcAddressOrDie(#name); \
return fn(arg); \
} \
@@ -153,7 +162,7 @@ static void InterceptHooks();
#define WRAP_W_WW(name) \
extern "C" void *name(void *arg1, void *arg2) { \
- typedef void *(*fntype)(void *, void *); \
+ typedef decltype(name) *fntype; \
static fntype fn = (fntype)getRealProcAddressOrDie(#name); \
return fn(arg1, arg2); \
} \
@@ -161,7 +170,7 @@ static void InterceptHooks();
#define WRAP_W_WWW(name) \
extern "C" void *name(void *arg1, void *arg2, void *arg3) { \
- typedef void *(*fntype)(void *, void *, void *); \
+ typedef decltype(name) *fntype; \
static fntype fn = (fntype)getRealProcAddressOrDie(#name); \
return fn(arg1, arg2, arg3); \
} \
@@ -169,7 +178,7 @@ static void InterceptHooks();
#define WRAP_W_WWWW(name) \
extern "C" void *name(void *arg1, void *arg2, void *arg3, void *arg4) { \
- typedef void *(*fntype)(void *, void *, void *, void *); \
+ typedef decltype(name) *fntype; \
static fntype fn = (fntype)getRealProcAddressOrDie(#name); \
return fn(arg1, arg2, arg3, arg4); \
} \
@@ -178,7 +187,7 @@ static void InterceptHooks();
#define WRAP_W_WWWWW(name) \
extern "C" void *name(void *arg1, void *arg2, void *arg3, void *arg4, \
void *arg5) { \
- typedef void *(*fntype)(void *, void *, void *, void *, void *); \
+ typedef decltype(name) *fntype; \
static fntype fn = (fntype)getRealProcAddressOrDie(#name); \
return fn(arg1, arg2, arg3, arg4, arg5); \
} \
@@ -187,7 +196,7 @@ static void InterceptHooks();
#define WRAP_W_WWWWWW(name) \
extern "C" void *name(void *arg1, void *arg2, void *arg3, void *arg4, \
void *arg5, void *arg6) { \
- typedef void *(*fntype)(void *, void *, void *, void *, void *, void *); \
+ typedef decltype(name) *fntype; \
static fntype fn = (fntype)getRealProcAddressOrDie(#name); \
return fn(arg1, arg2, arg3, arg4, arg5, arg6); \
} \
@@ -198,9 +207,11 @@ static void InterceptHooks();
// Don't use the INTERFACE_FUNCTION machinery for this function as we actually
// want to call it in the __asan_init interceptor.
WRAP_W_V(__asan_should_detect_stack_use_after_return)
+WRAP_W_V(__asan_get_shadow_memory_dynamic_address)
extern "C" {
int __asan_option_detect_stack_use_after_return;
+ uptr __asan_shadow_memory_dynamic_address;
// Manually wrap __asan_init as we need to initialize
// __asan_option_detect_stack_use_after_return afterwards.
@@ -214,7 +225,8 @@ extern "C" {
fn();
__asan_option_detect_stack_use_after_return =
(__asan_should_detect_stack_use_after_return() != 0);
-
+ __asan_shadow_memory_dynamic_address =
+ (uptr)__asan_get_shadow_memory_dynamic_address();
InterceptHooks();
}
}
@@ -224,6 +236,7 @@ extern "C" void __asan_version_mismatch_check() {
}
INTERFACE_FUNCTION(__asan_handle_no_return)
+INTERFACE_FUNCTION(__asan_unhandled_exception_filter)
INTERFACE_FUNCTION(__asan_report_store1)
INTERFACE_FUNCTION(__asan_report_store2)
@@ -257,6 +270,13 @@ INTERFACE_FUNCTION(__asan_memcpy);
INTERFACE_FUNCTION(__asan_memset);
INTERFACE_FUNCTION(__asan_memmove);
+INTERFACE_FUNCTION(__asan_set_shadow_00);
+INTERFACE_FUNCTION(__asan_set_shadow_f1);
+INTERFACE_FUNCTION(__asan_set_shadow_f2);
+INTERFACE_FUNCTION(__asan_set_shadow_f3);
+INTERFACE_FUNCTION(__asan_set_shadow_f5);
+INTERFACE_FUNCTION(__asan_set_shadow_f8);
+
INTERFACE_FUNCTION(__asan_alloca_poison);
INTERFACE_FUNCTION(__asan_allocas_unpoison);
@@ -306,17 +326,18 @@ INTERFACE_FUNCTION(__sanitizer_annotate_contiguous_container)
INTERFACE_FUNCTION(__sanitizer_contiguous_container_find_bad_address)
INTERFACE_FUNCTION(__sanitizer_cov)
INTERFACE_FUNCTION(__sanitizer_cov_dump)
+INTERFACE_FUNCTION(__sanitizer_dump_coverage)
+INTERFACE_FUNCTION(__sanitizer_dump_trace_pc_guard_coverage)
INTERFACE_FUNCTION(__sanitizer_cov_indir_call16)
INTERFACE_FUNCTION(__sanitizer_cov_init)
INTERFACE_FUNCTION(__sanitizer_cov_module_init)
INTERFACE_FUNCTION(__sanitizer_cov_trace_basic_block)
INTERFACE_FUNCTION(__sanitizer_cov_trace_func_enter)
-INTERFACE_FUNCTION(__sanitizer_cov_trace_cmp)
-INTERFACE_FUNCTION(__sanitizer_cov_trace_switch)
+INTERFACE_FUNCTION(__sanitizer_cov_trace_pc_guard)
+INTERFACE_FUNCTION(__sanitizer_cov_trace_pc_guard_init)
INTERFACE_FUNCTION(__sanitizer_cov_with_check)
INTERFACE_FUNCTION(__sanitizer_get_allocated_size)
INTERFACE_FUNCTION(__sanitizer_get_coverage_guards)
-INTERFACE_FUNCTION(__sanitizer_get_coverage_pc_buffer)
INTERFACE_FUNCTION(__sanitizer_get_current_allocated_bytes)
INTERFACE_FUNCTION(__sanitizer_get_estimated_allocated_size)
INTERFACE_FUNCTION(__sanitizer_get_free_bytes)
@@ -327,6 +348,8 @@ INTERFACE_FUNCTION(__sanitizer_get_total_unique_coverage)
INTERFACE_FUNCTION(__sanitizer_get_unmapped_bytes)
INTERFACE_FUNCTION(__sanitizer_maybe_open_cov_file)
INTERFACE_FUNCTION(__sanitizer_print_stack_trace)
+INTERFACE_FUNCTION(__sanitizer_symbolize_pc)
+INTERFACE_FUNCTION(__sanitizer_symbolize_global)
INTERFACE_FUNCTION(__sanitizer_ptr_cmp)
INTERFACE_FUNCTION(__sanitizer_ptr_sub)
INTERFACE_FUNCTION(__sanitizer_report_error_summary)
@@ -347,6 +370,7 @@ INTERFACE_FUNCTION(__sanitizer_verify_contiguous_container)
INTERFACE_FUNCTION(__sanitizer_install_malloc_and_free_hooks)
INTERFACE_FUNCTION(__sanitizer_start_switch_fiber)
INTERFACE_FUNCTION(__sanitizer_finish_switch_fiber)
+INTERFACE_FUNCTION(__sanitizer_get_module_and_offset_for_pc)
// TODO(timurrrr): Add more interface functions on the as-needed basis.
@@ -368,6 +392,7 @@ WRAP_W_WW(realloc)
WRAP_W_WW(_realloc_base)
WRAP_W_WWW(_realloc_dbg)
WRAP_W_WWW(_recalloc)
+WRAP_W_WWW(_recalloc_base)
WRAP_W_W(_msize)
WRAP_W_W(_expand)
@@ -444,4 +469,15 @@ static int call_asan_init() {
#pragma section(".CRT$XIB", long, read) // NOLINT
__declspec(allocate(".CRT$XIB")) int (*__asan_preinit)() = call_asan_init;
+static void WINAPI asan_thread_init(void *mod, unsigned long reason,
+ void *reserved) {
+ if (reason == /*DLL_PROCESS_ATTACH=*/1) __asan_init();
+}
+
+#pragma section(".CRT$XLAB", long, read) // NOLINT
+__declspec(allocate(".CRT$XLAB")) void (WINAPI *__asan_tls_init)(void *,
+ unsigned long, void *) = asan_thread_init;
+
+ASAN_LINK_GLOBALS_WIN()
+
#endif // ASAN_DLL_THUNK
diff --git a/lib/asan/asan_win_dynamic_runtime_thunk.cc b/lib/asan/asan_win_dynamic_runtime_thunk.cc
index 1175522e7441..8e42f03c1a0d 100644
--- a/lib/asan/asan_win_dynamic_runtime_thunk.cc
+++ b/lib/asan/asan_win_dynamic_runtime_thunk.cc
@@ -1,4 +1,4 @@
-//===-- asan_win_uar_thunk.cc ---------------------------------------------===//
+//===-- asan_win_dynamic_runtime_thunk.cc ---------------------------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -16,22 +16,25 @@
// This includes:
// - forwarding the detect_stack_use_after_return runtime option
// - working around deficiencies of the MD runtime
-// - installing a custom SEH handlerx
+// - installing a custom SEH handler
//
//===----------------------------------------------------------------------===//
-// Only compile this code when buidling asan_dynamic_runtime_thunk.lib
+// Only compile this code when building asan_dynamic_runtime_thunk.lib
// Using #ifdef rather than relying on Makefiles etc.
// simplifies the build procedure.
#ifdef ASAN_DYNAMIC_RUNTIME_THUNK
+#include "asan_globals_win.h"
#define WIN32_LEAN_AND_MEAN
#include <windows.h>
// First, declare CRT sections we'll be using in this file
+#pragma section(".CRT$XIB", long, read) // NOLINT
#pragma section(".CRT$XID", long, read) // NOLINT
#pragma section(".CRT$XCAB", long, read) // NOLINT
#pragma section(".CRT$XTW", long, read) // NOLINT
#pragma section(".CRT$XTY", long, read) // NOLINT
+#pragma section(".CRT$XLAB", long, read) // NOLINT
////////////////////////////////////////////////////////////////////////////////
// Define a copy of __asan_option_detect_stack_use_after_return that should be
@@ -42,14 +45,37 @@
// attribute adds __imp_ prefix to the symbol name of a variable.
// Since in general we don't know if a given TU is going to be used
// with a MT or MD runtime and we don't want to use ugly __imp_ names on Windows
-// just to work around this issue, let's clone the a variable that is
-// constant after initialization anyways.
+// just to work around this issue, let's clone the variable that is constant
+// after initialization anyways.
extern "C" {
__declspec(dllimport) int __asan_should_detect_stack_use_after_return();
-int __asan_option_detect_stack_use_after_return =
+int __asan_option_detect_stack_use_after_return;
+
+__declspec(dllimport) void* __asan_get_shadow_memory_dynamic_address();
+void* __asan_shadow_memory_dynamic_address;
+}
+
+static int InitializeClonedVariables() {
+ __asan_option_detect_stack_use_after_return =
__asan_should_detect_stack_use_after_return();
+ __asan_shadow_memory_dynamic_address =
+ __asan_get_shadow_memory_dynamic_address();
+ return 0;
+}
+
+static void NTAPI asan_thread_init(void *mod, unsigned long reason,
+ void *reserved) {
+ if (reason == DLL_PROCESS_ATTACH) InitializeClonedVariables();
}
+// Our cloned variables must be initialized before C/C++ constructors. If TLS
+// is used, our .CRT$XLAB initializer will run first. If not, our .CRT$XIB
+// initializer is needed as a backup.
+__declspec(allocate(".CRT$XIB")) int (*__asan_initialize_cloned_variables)() =
+ InitializeClonedVariables;
+__declspec(allocate(".CRT$XLAB")) void (NTAPI *__asan_tls_init)(void *,
+ unsigned long, void *) = asan_thread_init;
+
////////////////////////////////////////////////////////////////////////////////
// For some reason, the MD CRT doesn't call the C/C++ terminators during on DLL
// unload or on exit. ASan relies on LLVM global_dtors to call
@@ -73,6 +99,7 @@ void UnregisterGlobals() {
int ScheduleUnregisterGlobals() {
return atexit(UnregisterGlobals);
}
+} // namespace
// We need to call 'atexit(UnregisterGlobals);' as early as possible, but after
// atexit() is initialized (.CRT$XIC). As this is executed before C++
@@ -81,8 +108,6 @@ int ScheduleUnregisterGlobals() {
__declspec(allocate(".CRT$XID"))
int (*__asan_schedule_unregister_globals)() = ScheduleUnregisterGlobals;
-} // namespace
-
////////////////////////////////////////////////////////////////////////////////
// ASan SEH handling.
// We need to set the ASan-specific SEH handler at the end of CRT initialization
@@ -97,4 +122,6 @@ __declspec(allocate(".CRT$XCAB")) int (*__asan_seh_interceptor)() =
SetSEHFilter;
}
+ASAN_LINK_GLOBALS_WIN()
+
#endif // ASAN_DYNAMIC_RUNTIME_THUNK
diff --git a/lib/asan/scripts/asan_device_setup b/lib/asan/scripts/asan_device_setup
index 52794b1a441b..fdfc46f6e5b8 100755
--- a/lib/asan/scripts/asan_device_setup
+++ b/lib/asan/scripts/asan_device_setup
@@ -300,20 +300,22 @@ if [[ -n "$ASAN_RT64" ]]; then
cp "$ASAN_RT_PATH/$ASAN_RT64" "$TMPDIR/"
fi
-# FIXME: alloc_dealloc_mismatch=0 prevents a failure in libdvm startup,
-# which may or may not be a real bug (probably not).
-ASAN_OPTIONS=start_deactivated=1,alloc_dealloc_mismatch=0,malloc_context_size=0
+ASAN_OPTIONS=start_deactivated=1,malloc_context_size=0
-function generate_zygote_wrapper { # from, to, asan_rt
+# The name of a symlink to libclang_rt.asan-$ARCH-android.so used in LD_PRELOAD.
+# The idea is to have the same name in lib and lib64 to keep it from falling
+# apart when a 64-bit process spawns a 32-bit one, inheriting the environment.
+ASAN_RT_SYMLINK=symlink-to-libclang_rt.asan
+
+function generate_zygote_wrapper { # from, to
local _from=$1
local _to=$2
- local _asan_rt=$3
if [[ PRE_L -eq 0 ]]; then
# LD_PRELOAD parsing is broken in N if it starts with ":". Luckily, it is
# unset in the system environment since L.
- local _ld_preload=$_asan_rt
+ local _ld_preload=$ASAN_RT_SYMLINK
else
- local _ld_preload=\$LD_PRELOAD:$_asan_rt
+ local _ld_preload=\$LD_PRELOAD:$ASAN_RT_SYMLINK
fi
cat <<EOF >"$TMPDIR/$_from"
#!/system/bin/sh-from-zygote
@@ -342,18 +344,18 @@ if [[ -f "$TMPDIR/app_process64" ]]; then
mv "$TMPDIR/app_process32" "$TMPDIR/app_process32.real"
mv "$TMPDIR/app_process64" "$TMPDIR/app_process64.real"
fi
- generate_zygote_wrapper "app_process32" "/system/bin/app_process32.real" "$ASAN_RT"
- generate_zygote_wrapper "app_process64" "/system/bin/app_process64.real" "$ASAN_RT64"
+ generate_zygote_wrapper "app_process32" "/system/bin/app_process32.real"
+ generate_zygote_wrapper "app_process64" "/system/bin/app_process64.real"
else
# A 32-bit device.
- generate_zygote_wrapper "app_process.wrap" "/system/bin/app_process32" "$ASAN_RT"
+ generate_zygote_wrapper "app_process.wrap" "/system/bin/app_process32"
fi
# General command-line tool wrapper (use for anything that's not started as
# zygote).
cat <<EOF >"$TMPDIR/asanwrapper"
#!/system/bin/sh
-LD_PRELOAD=$ASAN_RT \\
+LD_PRELOAD=$ASAN_RT_SYMLINK \\
exec \$@
EOF
@@ -361,7 +363,7 @@ EOF
if [[ -n "$ASAN_RT64" ]]; then
cat <<EOF >"$TMPDIR/asanwrapper64"
#!/system/bin/sh
-LD_PRELOAD=$ASAN_RT64 \\
+LD_PRELOAD=$ASAN_RT_SYMLINK \\
exec \$@
EOF
@@ -412,12 +414,17 @@ if ! ( cd "$TMPDIRBASE" && diff -qr old/ new/ ) ; then
install "$TMPDIR/app_process64.real" /system/bin 755 $CTX
install "$TMPDIR/asanwrapper" /system/bin 755
install "$TMPDIR/asanwrapper64" /system/bin 755
+
+ adb_shell ln -s $ASAN_RT /system/lib/$ASAN_RT_SYMLINK
+ adb_shell ln -s $ASAN_RT64 /system/lib64/$ASAN_RT_SYMLINK
else
install "$TMPDIR/$ASAN_RT" /system/lib 644
install "$TMPDIR/app_process32" /system/bin 755 $CTX
install "$TMPDIR/app_process.wrap" /system/bin 755 $CTX
install "$TMPDIR/asanwrapper" /system/bin 755 $CTX
+ adb_shell ln -s $ASAN_RT /system/lib/$ASAN_RT_SYMLINK
+
adb_shell rm /system/bin/app_process
adb_shell ln -s /system/bin/app_process.wrap /system/bin/app_process
fi
diff --git a/lib/asan/tests/CMakeLists.txt b/lib/asan/tests/CMakeLists.txt
index e67d0fb0646f..3e56763a8041 100644
--- a/lib/asan/tests/CMakeLists.txt
+++ b/lib/asan/tests/CMakeLists.txt
@@ -106,12 +106,13 @@ set(ASAN_UNITTEST_INSTRUMENTED_LIBS)
append_list_if(ANDROID atomic ASAN_UNITTEST_INSTRUMENTED_LIBS)
set(ASAN_UNITTEST_NOINST_LINKFLAGS ${ASAN_UNITTEST_COMMON_LINKFLAGS})
-append_list_if(COMPILER_RT_HAS_LIBM -lm ASAN_UNITTEST_NOINST_LINKFLAGS)
-append_list_if(COMPILER_RT_HAS_LIBDL -ldl ASAN_UNITTEST_NOINST_LINKFLAGS)
-append_list_if(COMPILER_RT_HAS_LIBRT -lrt ASAN_UNITTEST_NOINST_LINKFLAGS)
-append_list_if(COMPILER_RT_HAS_LIBPTHREAD -pthread ASAN_UNITTEST_NOINST_LINKFLAGS)
-append_list_if(COMPILER_RT_HAS_LIBPTHREAD -pthread
- ASAN_DYNAMIC_UNITTEST_INSTRUMENTED_LINKFLAGS)
+if(NOT APPLE)
+ append_list_if(COMPILER_RT_HAS_LIBM -lm ASAN_UNITTEST_NOINST_LINKFLAGS)
+ append_list_if(COMPILER_RT_HAS_LIBDL -ldl ASAN_UNITTEST_NOINST_LINKFLAGS)
+ append_list_if(COMPILER_RT_HAS_LIBRT -lrt ASAN_UNITTEST_NOINST_LINKFLAGS)
+ append_list_if(COMPILER_RT_HAS_LIBPTHREAD -pthread ASAN_UNITTEST_NOINST_LINKFLAGS)
+ append_list_if(COMPILER_RT_HAS_LIBPTHREAD -pthread ASAN_DYNAMIC_UNITTEST_INSTRUMENTED_LINKFLAGS)
+endif()
# TODO(eugenis): move all -l flags above to _LIBS?
set(ASAN_UNITTEST_NOINST_LIBS)
@@ -193,6 +194,7 @@ set(ASAN_INST_TEST_SOURCES
asan_asm_test.cc
asan_globals_test.cc
asan_interface_test.cc
+ asan_internal_interface_test.cc
asan_test.cc
asan_oob_test.cc
asan_mem_test.cc
@@ -220,6 +222,23 @@ macro(add_asan_tests_for_arch_and_kind arch kind)
${ASAN_UNITTEST_INSTRUMENTED_CFLAGS} -ObjC ${ARGN})
endif()
+ if (MSVC)
+ # With the MSVC CRT, the choice between static and dynamic CRT is made at
+ # compile time with a macro. Simulate the effect of passing /MD to clang-cl.
+ set(ASAN_INST_DYNAMIC_TEST_OBJECTS)
+ foreach(src ${ASAN_INST_TEST_SOURCES})
+ asan_compile(ASAN_INST_DYNAMIC_TEST_OBJECTS ${src} ${arch} ${kind}
+ ${ASAN_UNITTEST_INSTRUMENTED_CFLAGS} -D_MT -D_DLL ${ARGN})
+ endforeach()
+ # Clang links the static CRT by default. Override that to use the dynamic
+ # CRT.
+ set(ASAN_DYNAMIC_UNITTEST_INSTRUMENTED_LINKFLAGS
+ ${ASAN_DYNAMIC_UNITTEST_INSTRUMENTED_LINKFLAGS}
+ -Wl,-nodefaultlib:libcmt,-defaultlib:msvcrt,-defaultlib:oldnames)
+ else()
+ set(ASAN_INST_DYNAMIC_TEST_OBJECTS ${ASAN_INST_TEST_OBJECTS})
+ endif()
+
# Create the 'default' folder where ASAN tests are produced.
if(CMAKE_CONFIGURATION_TYPES)
foreach(build_mode ${CMAKE_CONFIGURATION_TYPES})
@@ -245,7 +264,7 @@ macro(add_asan_tests_for_arch_and_kind arch kind)
add_asan_test(AsanDynamicUnitTests "Asan-${arch}${kind}-Dynamic-Test"
${arch} ${kind} SUBDIR "dynamic"
- OBJECTS ${ASAN_INST_TEST_OBJECTS}
+ OBJECTS ${ASAN_INST_DYNAMIC_TEST_OBJECTS}
LINKFLAGS ${ASAN_DYNAMIC_UNITTEST_INSTRUMENTED_LINKFLAGS})
endif()
diff --git a/lib/asan/tests/asan_asm_test.cc b/lib/asan/tests/asan_asm_test.cc
index 09af5c386079..2bb37946bb4a 100644
--- a/lib/asan/tests/asan_asm_test.cc
+++ b/lib/asan/tests/asan_asm_test.cc
@@ -57,12 +57,13 @@ template<> Type asm_read<Type>(Type *ptr) { \
return res; \
}
-#define DECLARE_ASM_REP_MOVS(Type, Movs) \
- template <> void asm_rep_movs<Type>(Type * dst, Type * src, size_t size) { \
- __asm__("rep " Movs " \n\t" \
- : \
- : "D"(dst), "S"(src), "c"(size) \
- : "rsi", "rdi", "rcx", "memory"); \
+#define DECLARE_ASM_REP_MOVS(Type, Movs) \
+ template <> \
+ void asm_rep_movs<Type>(Type * dst, Type * src, size_t size) { \
+ __asm__("rep " Movs " \n\t" \
+ : "+D"(dst), "+S"(src), "+c"(size) \
+ : \
+ : "memory"); \
}
DECLARE_ASM_WRITE(U8, "8", "movq", "r");
@@ -99,12 +100,13 @@ template<> Type asm_read<Type>(Type *ptr) { \
return res; \
}
-#define DECLARE_ASM_REP_MOVS(Type, Movs) \
- template <> void asm_rep_movs<Type>(Type * dst, Type * src, size_t size) { \
- __asm__("rep " Movs " \n\t" \
- : \
- : "D"(dst), "S"(src), "c"(size) \
- : "esi", "edi", "ecx", "memory"); \
+#define DECLARE_ASM_REP_MOVS(Type, Movs) \
+ template <> \
+ void asm_rep_movs<Type>(Type * dst, Type * src, size_t size) { \
+ __asm__("rep " Movs " \n\t" \
+ : "+D"(dst), "+S"(src), "+c"(size) \
+ : \
+ : "memory"); \
}
} // End of anonymous namespace
diff --git a/lib/asan/tests/asan_interface_test.cc b/lib/asan/tests/asan_interface_test.cc
index f5bfb8046b0a..fd43f17716b6 100644
--- a/lib/asan/tests/asan_interface_test.cc
+++ b/lib/asan/tests/asan_interface_test.cc
@@ -100,6 +100,9 @@ TEST(AddressSanitizerInterface, GetHeapSizeTest) {
}
}
+#ifndef __powerpc64__
+// FIXME: This has not reliably worked on powerpc since r279664. Re-enable
+// this once the problem is tracked down and fixed.
static const size_t kManyThreadsMallocSizes[] = {5, 1UL<<10, 1UL<<14, 357};
static const size_t kManyThreadsIterations = 250;
static const size_t kManyThreadsNumThreads =
@@ -133,6 +136,7 @@ TEST(AddressSanitizerInterface, ManyThreadsWithStatsStressTest) {
// so we can't check for equality here.
EXPECT_LT(after_test, before_test + (1UL<<20));
}
+#endif
static void DoDoubleFree() {
int *x = Ident(new int);
diff --git a/lib/asan/tests/asan_internal_interface_test.cc b/lib/asan/tests/asan_internal_interface_test.cc
new file mode 100644
index 000000000000..ae4759478170
--- /dev/null
+++ b/lib/asan/tests/asan_internal_interface_test.cc
@@ -0,0 +1,36 @@
+//===-- asan_internal_interface_test.cc -----------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of AddressSanitizer, an address sanity checker.
+//
+//===----------------------------------------------------------------------===//
+#include "asan_interface_internal.h"
+#include "asan_test_utils.h"
+
+TEST(AddressSanitizerInternalInterface, SetShadow) {
+ std::vector<char> buffer(17, 0xff);
+
+ __asan_set_shadow_00((uptr)buffer.data(), buffer.size());
+ EXPECT_EQ(std::vector<char>(buffer.size(), 0x00), buffer);
+
+ __asan_set_shadow_f1((uptr)buffer.data(), buffer.size());
+ EXPECT_EQ(std::vector<char>(buffer.size(), 0xf1), buffer);
+
+ __asan_set_shadow_f2((uptr)buffer.data(), buffer.size());
+ EXPECT_EQ(std::vector<char>(buffer.size(), 0xf2), buffer);
+
+ __asan_set_shadow_f3((uptr)buffer.data(), buffer.size());
+ EXPECT_EQ(std::vector<char>(buffer.size(), 0xf3), buffer);
+
+ __asan_set_shadow_f5((uptr)buffer.data(), buffer.size());
+ EXPECT_EQ(std::vector<char>(buffer.size(), 0xf5), buffer);
+
+ __asan_set_shadow_f8((uptr)buffer.data(), buffer.size());
+ EXPECT_EQ(std::vector<char>(buffer.size(), 0xf8), buffer);
+}
diff --git a/lib/asan/tests/asan_noinst_test.cc b/lib/asan/tests/asan_noinst_test.cc
index 3872dd7a7190..65acb2839ba1 100644
--- a/lib/asan/tests/asan_noinst_test.cc
+++ b/lib/asan/tests/asan_noinst_test.cc
@@ -26,6 +26,8 @@
#include <vector>
#include <limits>
+using namespace __sanitizer;
+
// ATTENTION!
// Please don't call intercepted functions (including malloc() and friends)
// in this test. The static runtime library is linked explicitly (without
@@ -168,6 +170,12 @@ void *ThreadedQuarantineTestWorker(void *unused) {
// Check that the thread local allocators are flushed when threads are
// destroyed.
TEST(AddressSanitizer, ThreadedQuarantineTest) {
+ // Run the routine once to warm up ASAN internal structures to get more
+ // predictable incremental memory changes.
+ pthread_t t;
+ PTHREAD_CREATE(&t, NULL, ThreadedQuarantineTestWorker, 0);
+ PTHREAD_JOIN(t, 0);
+
const int n_threads = 3000;
size_t mmaped1 = __sanitizer_get_heap_size();
for (int i = 0; i < n_threads; i++) {
@@ -175,6 +183,7 @@ TEST(AddressSanitizer, ThreadedQuarantineTest) {
PTHREAD_CREATE(&t, NULL, ThreadedQuarantineTestWorker, 0);
PTHREAD_JOIN(t, 0);
size_t mmaped2 = __sanitizer_get_heap_size();
+ // Figure out why this much memory is required.
EXPECT_LT(mmaped2 - mmaped1, 320U * (1 << 20));
}
}
diff --git a/lib/asan/tests/asan_str_test.cc b/lib/asan/tests/asan_str_test.cc
index dd755875e740..c790088f8f9e 100644
--- a/lib/asan/tests/asan_str_test.cc
+++ b/lib/asan/tests/asan_str_test.cc
@@ -127,7 +127,15 @@ TEST(AddressSanitizer, StrNLenOOBTest) {
}
#endif // SANITIZER_TEST_HAS_STRNLEN
-TEST(AddressSanitizer, StrDupOOBTest) {
+// This test fails with the WinASan dynamic runtime because we fail to intercept
+// strdup.
+#if defined(_MSC_VER) && defined(_DLL)
+#define MAYBE_StrDupOOBTest DISABLED_StrDupOOBTest
+#else
+#define MAYBE_StrDupOOBTest StrDupOOBTest
+#endif
+
+TEST(AddressSanitizer, MAYBE_StrDupOOBTest) {
size_t size = Ident(42);
char *str = MallocAndMemsetString(size);
char *new_str;
@@ -457,12 +465,14 @@ TEST(AddressSanitizer, StrArgsOverlapTest) {
#if !defined(__APPLE__) || !defined(MAC_OS_X_VERSION_10_7) || \
(MAC_OS_X_VERSION_MAX_ALLOWED < MAC_OS_X_VERSION_10_7)
// Check "memcpy". Use Ident() to avoid inlining.
+#if PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE
memset(str, 'z', size);
Ident(memcpy)(str + 1, str + 11, 10);
Ident(memcpy)(str, str, 0);
EXPECT_DEATH(Ident(memcpy)(str, str + 14, 15), OverlapErrorMessage("memcpy"));
EXPECT_DEATH(Ident(memcpy)(str + 14, str, 15), OverlapErrorMessage("memcpy"));
#endif
+#endif
// We do not treat memcpy with to==from as a bug.
// See http://llvm.org/bugs/show_bug.cgi?id=11763.
diff --git a/lib/asan/tests/asan_test.cc b/lib/asan/tests/asan_test.cc
index 6a95c3fe1049..424a79e00a4a 100644
--- a/lib/asan/tests/asan_test.cc
+++ b/lib/asan/tests/asan_test.cc
@@ -692,7 +692,7 @@ TEST(AddressSanitizer, ThreadStackReuseTest) {
PTHREAD_JOIN(t, 0);
}
-#if defined(__i686__) || defined(__x86_64__)
+#if defined(__SSE2__)
#include <emmintrin.h>
TEST(AddressSanitizer, Store128Test) {
char *a = Ident((char*)malloc(Ident(12)));
diff --git a/lib/asan/tests/asan_test_main.cc b/lib/asan/tests/asan_test_main.cc
index d4d6de77b91b..1071d4474674 100644
--- a/lib/asan/tests/asan_test_main.cc
+++ b/lib/asan/tests/asan_test_main.cc
@@ -28,9 +28,18 @@ extern "C" const char* __asan_default_options() {
namespace __sanitizer {
bool ReexecDisabled() {
+#if __has_feature(address_sanitizer) && SANITIZER_MAC
+ // Allow re-exec in instrumented unit tests on Darwin. Technically, we only
+ // need this for 10.10 and below, where re-exec is required for the
+ // interceptors to work, but to avoid duplicating the version detection logic,
+ // let's just allow re-exec for all Darwin versions. On newer OS versions,
+ // returning 'false' doesn't do anything anyway, because we don't re-exec.
+ return false;
+#else
return true;
+#endif
}
-}
+} // namespace __sanitizer
int main(int argc, char **argv) {
testing::GTEST_FLAG(death_test_style) = "threadsafe";
diff --git a/lib/asan/tests/asan_test_utils.h b/lib/asan/tests/asan_test_utils.h
index 03d17cfb26a7..f16d939c94aa 100644
--- a/lib/asan/tests/asan_test_utils.h
+++ b/lib/asan/tests/asan_test_utils.h
@@ -62,7 +62,9 @@ typedef uint64_t U8;
static const int kPageSize = 4096;
-const size_t kLargeMalloc = 1 << 24;
+// Big enough to be handled by secondary allocator and small enough to fit into
+// quarantine for all configurations.
+const size_t kLargeMalloc = 1 << 22;
extern void free_aaa(void *p);
extern void *malloc_aaa(size_t size);
diff --git a/lib/builtins/CMakeLists.txt b/lib/builtins/CMakeLists.txt
index 44a660f5d49d..b33786a858e8 100644
--- a/lib/builtins/CMakeLists.txt
+++ b/lib/builtins/CMakeLists.txt
@@ -13,6 +13,10 @@ if (CMAKE_SOURCE_DIR STREQUAL CMAKE_CURRENT_SOURCE_DIR)
"${CMAKE_SOURCE_DIR}/../../cmake/Modules")
include(base-config-ix)
include(CompilerRTUtils)
+
+ load_llvm_config()
+ construct_compiler_rt_default_triple()
+
if(APPLE)
include(CompilerRTDarwinUtils)
endif()
@@ -160,7 +164,11 @@ set(GENERIC_SOURCES
umodsi3.c
umodti3.c)
-if(COMPILER_RT_SUPPORTS_ATOMIC_KEYWORD)
+option(COMPILER_RT_EXCLUDE_ATOMIC_BUILTIN
+ "Skip the atomic builtin (this may be needed if system headers are unavailable)"
+ Off)
+
+if(COMPILER_RT_HAS_ATOMIC_KEYWORD AND NOT COMPILER_RT_EXCLUDE_ATOMIC_BUILTIN)
set(GENERIC_SOURCES
${GENERIC_SOURCES}
atomic.c)
@@ -261,8 +269,40 @@ else () # MSVC
endif () # if (NOT MSVC)
set(arm_SOURCES
- arm/adddf3vfp.S
- arm/addsf3vfp.S
+ arm/bswapdi2.S
+ arm/bswapsi2.S
+ arm/clzdi2.S
+ arm/clzsi2.S
+ arm/comparesf2.S
+ arm/divmodsi4.S
+ arm/divsi3.S
+ arm/modsi3.S
+ arm/sync_fetch_and_add_4.S
+ arm/sync_fetch_and_add_8.S
+ arm/sync_fetch_and_and_4.S
+ arm/sync_fetch_and_and_8.S
+ arm/sync_fetch_and_max_4.S
+ arm/sync_fetch_and_max_8.S
+ arm/sync_fetch_and_min_4.S
+ arm/sync_fetch_and_min_8.S
+ arm/sync_fetch_and_nand_4.S
+ arm/sync_fetch_and_nand_8.S
+ arm/sync_fetch_and_or_4.S
+ arm/sync_fetch_and_or_8.S
+ arm/sync_fetch_and_sub_4.S
+ arm/sync_fetch_and_sub_8.S
+ arm/sync_fetch_and_umax_4.S
+ arm/sync_fetch_and_umax_8.S
+ arm/sync_fetch_and_umin_4.S
+ arm/sync_fetch_and_umin_8.S
+ arm/sync_fetch_and_xor_4.S
+ arm/sync_fetch_and_xor_8.S
+ arm/udivmodsi4.S
+ arm/udivsi3.S
+ arm/umodsi3.S
+ ${GENERIC_SOURCES})
+
+set(arm_EABI_SOURCES
arm/aeabi_cdcmp.S
arm/aeabi_cdcmpeq_check_nan.c
arm/aeabi_cfcmp.S
@@ -279,16 +319,20 @@ set(arm_SOURCES
arm/aeabi_memmove.S
arm/aeabi_memset.S
arm/aeabi_uidivmod.S
- arm/aeabi_uldivmod.S
- arm/bswapdi2.S
- arm/bswapsi2.S
- arm/clzdi2.S
- arm/clzsi2.S
- arm/comparesf2.S
+ arm/aeabi_uldivmod.S)
+set(arm_Thumb1_JT_SOURCES
+ arm/switch16.S
+ arm/switch32.S
+ arm/switch8.S
+ arm/switchu8.S)
+set(arm_Thumb1_SjLj_EH_SOURCES
+ arm/restore_vfp_d8_d15_regs.S
+ arm/save_vfp_d8_d15_regs.S)
+set(arm_Thumb1_VFPv2_SOURCES
+ arm/adddf3vfp.S
+ arm/addsf3vfp.S
arm/divdf3vfp.S
- arm/divmodsi4.S
arm/divsf3vfp.S
- arm/divsi3.S
arm/eqdf2vfp.S
arm/eqsf2vfp.S
arm/extendsfdf2vfp.S
@@ -308,49 +352,56 @@ set(arm_SOURCES
arm/lesf2vfp.S
arm/ltdf2vfp.S
arm/ltsf2vfp.S
- arm/modsi3.S
arm/muldf3vfp.S
arm/mulsf3vfp.S
arm/nedf2vfp.S
arm/negdf2vfp.S
arm/negsf2vfp.S
arm/nesf2vfp.S
- arm/restore_vfp_d8_d15_regs.S
- arm/save_vfp_d8_d15_regs.S
arm/subdf3vfp.S
arm/subsf3vfp.S
- arm/switch16.S
- arm/switch32.S
- arm/switch8.S
- arm/switchu8.S
- arm/sync_fetch_and_add_4.S
- arm/sync_fetch_and_add_8.S
- arm/sync_fetch_and_and_4.S
- arm/sync_fetch_and_and_8.S
- arm/sync_fetch_and_max_4.S
- arm/sync_fetch_and_max_8.S
- arm/sync_fetch_and_min_4.S
- arm/sync_fetch_and_min_8.S
- arm/sync_fetch_and_nand_4.S
- arm/sync_fetch_and_nand_8.S
- arm/sync_fetch_and_or_4.S
- arm/sync_fetch_and_or_8.S
- arm/sync_fetch_and_sub_4.S
- arm/sync_fetch_and_sub_8.S
- arm/sync_fetch_and_umax_4.S
- arm/sync_fetch_and_umax_8.S
- arm/sync_fetch_and_umin_4.S
- arm/sync_fetch_and_umin_8.S
- arm/sync_fetch_and_xor_4.S
- arm/sync_fetch_and_xor_8.S
- arm/sync_synchronize.S
arm/truncdfsf2vfp.S
- arm/udivmodsi4.S
- arm/udivsi3.S
- arm/umodsi3.S
arm/unorddf2vfp.S
- arm/unordsf2vfp.S
- ${GENERIC_SOURCES})
+ arm/unordsf2vfp.S)
+set(arm_Thumb1_icache_SOURCES
+ arm/sync_synchronize.S)
+set(arm_Thumb1_SOURCES
+ ${arm_Thumb1_JT_SOURCES}
+ ${arm_Thumb1_SjLj_EH_SOURCES}
+ ${arm_Thumb1_VFPv2_SOURCES}
+ ${arm_Thumb1_icache_SOURCES})
+
+if(MINGW)
+ set(arm_SOURCES
+ arm/aeabi_idivmod.S
+ arm/aeabi_ldivmod.S
+ arm/aeabi_uidivmod.S
+ arm/aeabi_uldivmod.S
+ divmoddi4.c
+ divmodsi4.c
+ divdi3.c
+ divsi3.c
+ fixdfdi.c
+ fixsfdi.c
+ fixunsdfdi.c
+ fixunssfdi.c
+ floatdidf.c
+ floatdisf.c
+ floatundidf.c
+ floatundisf.c
+ mingw_fixfloat.c
+ moddi3.c
+ udivmoddi4.c
+ udivmodsi4.c
+ udivsi3.c
+ umoddi3.c)
+elseif(NOT WIN32)
+ # TODO the EABI sources should only be added to EABI targets
+ set(arm_SOURCES
+ ${arm_SOURCES}
+ ${arm_EABI_SOURCES}
+ ${arm_Thumb1_SOURCES})
+endif()
set(aarch64_SOURCES
comparetf2.c
@@ -398,7 +449,24 @@ if (APPLE)
add_subdirectory(macho_embedded)
darwin_add_builtin_libraries(${BUILTIN_SUPPORTED_OS})
else ()
- append_string_if(COMPILER_RT_HAS_STD_C99_FLAG -std=gnu99 maybe_stdc99)
+ set(BUILTIN_CFLAGS "")
+
+ append_list_if(COMPILER_RT_HAS_STD_C11_FLAG -std=c11 BUILTIN_CFLAGS)
+
+ # These flags would normally be added to CMAKE_C_FLAGS by the llvm
+ # cmake step. Add them manually if this is a standalone build.
+ if(COMPILER_RT_STANDALONE_BUILD)
+ append_list_if(COMPILER_RT_HAS_FPIC_FLAG -fPIC BUILTIN_CFLAGS)
+ append_list_if(COMPILER_RT_HAS_FNO_BUILTIN_FLAG -fno-builtin BUILTIN_CFLAGS)
+ append_list_if(COMPILER_RT_HAS_VISIBILITY_HIDDEN_FLAG -fvisibility=hidden BUILTIN_CFLAGS)
+ if(NOT COMPILER_RT_DEBUG)
+ append_list_if(COMPILER_RT_HAS_OMIT_FRAME_POINTER_FLAG -fomit-frame-pointer BUILTIN_CFLAGS)
+ endif()
+ endif()
+
+ set(BUILTIN_DEFS "")
+
+ append_list_if(COMPILER_RT_HAS_VISIBILITY_HIDDEN_FLAG VISIBILITY_HIDDEN BUILTIN_DEFS)
foreach (arch ${BUILTIN_SUPPORTED_ARCH})
if (CAN_TARGET_${arch})
@@ -413,11 +481,18 @@ else ()
endif ()
endforeach ()
+ # Needed for clear_cache on debug mode, due to r7's usage in inline asm.
+ # Release mode already sets it via -O2/3, Debug mode doesn't.
+ if (${arch} STREQUAL "armhf")
+ list(APPEND BUILTIN_CFLAGS -fomit-frame-pointer)
+ endif()
+
add_compiler_rt_runtime(clang_rt.builtins
STATIC
ARCHS ${arch}
SOURCES ${${arch}_SOURCES}
- CFLAGS ${maybe_stdc99}
+ DEFS ${BUILTIN_DEFS}
+ CFLAGS ${BUILTIN_CFLAGS}
PARENT_TARGET builtins)
endif ()
endforeach ()
diff --git a/lib/builtins/Makefile.mk b/lib/builtins/Makefile.mk
deleted file mode 100644
index 00e2f53fc401..000000000000
--- a/lib/builtins/Makefile.mk
+++ /dev/null
@@ -1,25 +0,0 @@
-#===- lib/builtins/Makefile.mk -----------------------------*- Makefile -*--===#
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-#===------------------------------------------------------------------------===#
-
-ModuleName := builtins
-SubDirs :=
-
-# Add arch specific optimized implementations.
-SubDirs += i386 ppc x86_64 arm armv6m
-
-# Add ARM64 dir.
-SubDirs += arm64
-
-# Define the variables for this specific directory.
-Sources := $(foreach file,$(wildcard $(Dir)/*.c),$(notdir $(file)))
-ObjNames := $(Sources:%.c=%.o)
-Implementation := Generic
-
-# FIXME: use automatic dependencies?
-Dependencies := $(wildcard $(Dir)/*.h)
diff --git a/lib/builtins/arm/Makefile.mk b/lib/builtins/arm/Makefile.mk
deleted file mode 100644
index ed2e8323e391..000000000000
--- a/lib/builtins/arm/Makefile.mk
+++ /dev/null
@@ -1,20 +0,0 @@
-#===- lib/builtins/arm/Makefile.mk -------------------------*- Makefile -*--===#
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-#===------------------------------------------------------------------------===#
-
-ModuleName := builtins
-SubDirs :=
-OnlyArchs := armv5 armv6 armv7 armv7k armv7m armv7em armv7s
-
-AsmSources := $(foreach file,$(wildcard $(Dir)/*.S),$(notdir $(file)))
-Sources := $(foreach file,$(wildcard $(Dir)/*.c),$(notdir $(file)))
-ObjNames := $(Sources:%.c=%.o) $(AsmSources:%.S=%.o)
-Implementation := Optimized
-
-# FIXME: use automatic dependencies?
-Dependencies := $(wildcard lib/*.h $(Dir)/*.h)
diff --git a/lib/builtins/arm/aeabi_idivmod.S b/lib/builtins/arm/aeabi_idivmod.S
index 2fcad862f73a..b43ea699058d 100644
--- a/lib/builtins/arm/aeabi_idivmod.S
+++ b/lib/builtins/arm/aeabi_idivmod.S
@@ -15,16 +15,34 @@
// return {quot, rem};
// }
+#if defined(__MINGW32__)
+#define __aeabi_idivmod __rt_sdiv
+#endif
+
.syntax unified
.p2align 2
DEFINE_COMPILERRT_FUNCTION(__aeabi_idivmod)
+#if __ARM_ARCH_ISA_THUMB == 1
+ push {r0, r1, lr}
+ bl SYMBOL_NAME(__divsi3)
+ pop {r1, r2, r3} // now r0 = quot, r1 = num, r2 = denom
+ muls r2, r2, r0 // r2 = quot * denom
+ subs r1, r1, r2
+ JMP (r3)
+#else
push { lr }
sub sp, sp, #4
mov r2, sp
+#if defined(__MINGW32__)
+ mov r3, r0
+ mov r0, r1
+ mov r1, r3
+#endif
bl SYMBOL_NAME(__divmodsi4)
ldr r1, [sp]
add sp, sp, #4
pop { pc }
+#endif // __ARM_ARCH_ISA_THUMB == 1
END_COMPILERRT_FUNCTION(__aeabi_idivmod)
NO_EXEC_STACK_DIRECTIVE
diff --git a/lib/builtins/arm/aeabi_ldivmod.S b/lib/builtins/arm/aeabi_ldivmod.S
index 9f161f3007f6..3dae14ef07ec 100644
--- a/lib/builtins/arm/aeabi_ldivmod.S
+++ b/lib/builtins/arm/aeabi_ldivmod.S
@@ -16,6 +16,10 @@
// return {quot, rem};
// }
+#if defined(__MINGW32__)
+#define __aeabi_ldivmod __rt_sdiv64
+#endif
+
.syntax unified
.p2align 2
DEFINE_COMPILERRT_FUNCTION(__aeabi_ldivmod)
@@ -23,6 +27,14 @@ DEFINE_COMPILERRT_FUNCTION(__aeabi_ldivmod)
sub sp, sp, #16
add r12, sp, #8
str r12, [sp]
+#if defined(__MINGW32__)
+ mov r12, r0
+ mov r0, r2
+ mov r2, r12
+ mov r12, r1
+ mov r1, r3
+ mov r3, r12
+#endif
bl SYMBOL_NAME(__divmoddi4)
ldr r2, [sp, #8]
ldr r3, [sp, #12]
diff --git a/lib/builtins/arm/aeabi_uidivmod.S b/lib/builtins/arm/aeabi_uidivmod.S
index e1e12d97aa00..7098bc6ff92e 100644
--- a/lib/builtins/arm/aeabi_uidivmod.S
+++ b/lib/builtins/arm/aeabi_uidivmod.S
@@ -16,16 +16,40 @@
// return {quot, rem};
// }
+#if defined(__MINGW32__)
+#define __aeabi_uidivmod __rt_udiv
+#endif
+
.syntax unified
.p2align 2
DEFINE_COMPILERRT_FUNCTION(__aeabi_uidivmod)
+#if __ARM_ARCH_ISA_THUMB == 1
+ cmp r0, r1
+ bcc LOCAL_LABEL(case_denom_larger)
+ push {r0, r1, lr}
+ bl SYMBOL_NAME(__aeabi_uidiv)
+ pop {r1, r2, r3}
+ muls r2, r2, r0 // r2 = quot * denom
+ subs r1, r1, r2
+ JMP (r3)
+LOCAL_LABEL(case_denom_larger):
+ movs r1, r0
+ movs r0, #0
+ JMP (lr)
+#else
push { lr }
sub sp, sp, #4
mov r2, sp
+#if defined(__MINGW32__)
+ mov r3, r0
+ mov r0, r1
+ mov r1, r3
+#endif
bl SYMBOL_NAME(__udivmodsi4)
ldr r1, [sp]
add sp, sp, #4
pop { pc }
+#endif
END_COMPILERRT_FUNCTION(__aeabi_uidivmod)
NO_EXEC_STACK_DIRECTIVE
diff --git a/lib/builtins/arm/aeabi_uldivmod.S b/lib/builtins/arm/aeabi_uldivmod.S
index e8aaef282e90..bc26e5674ca0 100644
--- a/lib/builtins/arm/aeabi_uldivmod.S
+++ b/lib/builtins/arm/aeabi_uldivmod.S
@@ -16,6 +16,10 @@
// return {quot, rem};
// }
+#if defined(__MINGW32__)
+#define __aeabi_uldivmod __rt_udiv64
+#endif
+
.syntax unified
.p2align 2
DEFINE_COMPILERRT_FUNCTION(__aeabi_uldivmod)
@@ -23,6 +27,14 @@ DEFINE_COMPILERRT_FUNCTION(__aeabi_uldivmod)
sub sp, sp, #16
add r12, sp, #8
str r12, [sp]
+#if defined(__MINGW32__)
+ mov r12, r0
+ mov r0, r2
+ mov r2, r12
+ mov r12, r1
+ mov r1, r3
+ mov r3, r12
+#endif
bl SYMBOL_NAME(__udivmoddi4)
ldr r2, [sp, #8]
ldr r3, [sp, #12]
diff --git a/lib/builtins/arm/comparesf2.S b/lib/builtins/arm/comparesf2.S
index 52597b673f96..6d7019545475 100644
--- a/lib/builtins/arm/comparesf2.S
+++ b/lib/builtins/arm/comparesf2.S
@@ -39,6 +39,9 @@
#include "../assembly.h"
.syntax unified
+#if __ARM_ARCH_ISA_THUMB == 2
+.thumb
+#endif
.p2align 2
DEFINE_COMPILERRT_FUNCTION(__eqsf2)
diff --git a/lib/builtins/arm/divsi3.S b/lib/builtins/arm/divsi3.S
index 7e23ba4fc237..f066f60ad96d 100644
--- a/lib/builtins/arm/divsi3.S
+++ b/lib/builtins/arm/divsi3.S
@@ -49,17 +49,37 @@ LOCAL_LABEL(divzero):
#else
ESTABLISH_FRAME
// Set aside the sign of the quotient.
+# if __ARM_ARCH_ISA_THUMB == 1
+ movs r4, r0
+ eors r4, r1
+# else
eor r4, r0, r1
+# endif
// Take absolute value of a and b via abs(x) = (x^(x >> 31)) - (x >> 31).
+# if __ARM_ARCH_ISA_THUMB == 1
+ asrs r2, r0, #31
+ asrs r3, r1, #31
+ eors r0, r2
+ eors r1, r3
+ subs r0, r0, r2
+ subs r1, r1, r3
+# else
eor r2, r0, r0, asr #31
eor r3, r1, r1, asr #31
sub r0, r2, r0, asr #31
sub r1, r3, r1, asr #31
+# endif
// abs(a) / abs(b)
bl SYMBOL_NAME(__udivsi3)
// Apply sign of quotient to result and return.
+# if __ARM_ARCH_ISA_THUMB == 1
+ asrs r4, #31
+ eors r0, r4
+ subs r0, r0, r4
+# else
eor r0, r0, r4, asr #31
sub r0, r0, r4, asr #31
+# endif
CLEAR_FRAME_AND_RETURN
#endif
END_COMPILERRT_FUNCTION(__divsi3)
diff --git a/lib/builtins/arm/udivsi3.S b/lib/builtins/arm/udivsi3.S
index 085f8fb9e2df..fcc472b4f3d9 100644
--- a/lib/builtins/arm/udivsi3.S
+++ b/lib/builtins/arm/udivsi3.S
@@ -40,12 +40,26 @@ DEFINE_COMPILERRT_FUNCTION(__udivsi3)
#else
cmp r1, #1
bcc LOCAL_LABEL(divby0)
+#if __ARM_ARCH_ISA_THUMB == 1
+ bne LOCAL_LABEL(num_neq_denom)
+ JMP(lr)
+LOCAL_LABEL(num_neq_denom):
+#else
IT(eq)
JMPc(lr, eq)
+#endif
cmp r0, r1
+#if __ARM_ARCH_ISA_THUMB == 1
+ bhs LOCAL_LABEL(num_ge_denom)
+ movs r0, #0
+ JMP(lr)
+LOCAL_LABEL(num_ge_denom):
+#else
ITT(cc)
movcc r0, #0
JMPc(lr, cc)
+#endif
+
/*
* Implement division using binary long division algorithm.
*
@@ -62,7 +76,7 @@ DEFINE_COMPILERRT_FUNCTION(__udivsi3)
* that (r0 << shift) < 2 * r1. The quotient is stored in r3.
*/
-# ifdef __ARM_FEATURE_CLZ
+# if defined(__ARM_FEATURE_CLZ)
clz ip, r0
clz r3, r1
/* r0 >= r1 implies clz(r0) <= clz(r1), so ip <= r3. */
@@ -77,49 +91,128 @@ DEFINE_COMPILERRT_FUNCTION(__udivsi3)
sub ip, ip, r3, lsl #3
mov r3, #0
bx ip
-# else
+# else /* No CLZ Feature */
# if __ARM_ARCH_ISA_THUMB == 2
# error THUMB mode requires CLZ or UDIV
# endif
+# if __ARM_ARCH_ISA_THUMB == 1
+# define BLOCK_SIZE 10
+# else
+# define BLOCK_SIZE 12
+# endif
+
mov r2, r0
+# if __ARM_ARCH_ISA_THUMB == 1
+ mov ip, r0
+ adr r0, LOCAL_LABEL(div0block)
+ adds r0, #1
+# else
adr ip, LOCAL_LABEL(div0block)
-
- lsr r3, r2, #16
+# endif
+ lsrs r3, r2, #16
cmp r3, r1
+# if __ARM_ARCH_ISA_THUMB == 1
+ blo LOCAL_LABEL(skip_16)
+ movs r2, r3
+ subs r0, r0, #(16 * BLOCK_SIZE)
+LOCAL_LABEL(skip_16):
+# else
movhs r2, r3
- subhs ip, ip, #(16 * 12)
+ subhs ip, ip, #(16 * BLOCK_SIZE)
+# endif
- lsr r3, r2, #8
+ lsrs r3, r2, #8
cmp r3, r1
+# if __ARM_ARCH_ISA_THUMB == 1
+ blo LOCAL_LABEL(skip_8)
+ movs r2, r3
+ subs r0, r0, #(8 * BLOCK_SIZE)
+LOCAL_LABEL(skip_8):
+# else
movhs r2, r3
- subhs ip, ip, #(8 * 12)
+ subhs ip, ip, #(8 * BLOCK_SIZE)
+# endif
- lsr r3, r2, #4
+ lsrs r3, r2, #4
cmp r3, r1
+# if __ARM_ARCH_ISA_THUMB == 1
+ blo LOCAL_LABEL(skip_4)
+ movs r2, r3
+ subs r0, r0, #(4 * BLOCK_SIZE)
+LOCAL_LABEL(skip_4):
+# else
movhs r2, r3
- subhs ip, #(4 * 12)
+ subhs ip, #(4 * BLOCK_SIZE)
+# endif
- lsr r3, r2, #2
+ lsrs r3, r2, #2
cmp r3, r1
+# if __ARM_ARCH_ISA_THUMB == 1
+ blo LOCAL_LABEL(skip_2)
+ movs r2, r3
+ subs r0, r0, #(2 * BLOCK_SIZE)
+LOCAL_LABEL(skip_2):
+# else
movhs r2, r3
- subhs ip, ip, #(2 * 12)
+ subhs ip, ip, #(2 * BLOCK_SIZE)
+# endif
/* Last block, no need to update r2 or r3. */
+# if __ARM_ARCH_ISA_THUMB == 1
+ lsrs r3, r2, #1
+ cmp r3, r1
+ blo LOCAL_LABEL(skip_1)
+ subs r0, r0, #(1 * BLOCK_SIZE)
+LOCAL_LABEL(skip_1):
+ movs r2, r0
+ mov r0, ip
+ movs r3, #0
+ JMP (r2)
+
+# else
cmp r1, r2, lsr #1
- subls ip, ip, #(1 * 12)
+ subls ip, ip, #(1 * BLOCK_SIZE)
- mov r3, #0
+ movs r3, #0
JMP(ip)
-# endif
+# endif
+# endif /* __ARM_FEATURE_CLZ */
+
#define IMM #
+ /* due to the range limit of branch in Thumb1, we have to place the
+ block closer */
+LOCAL_LABEL(divby0):
+ movs r0, #0
+# if defined(__ARM_EABI__)
+ bl __aeabi_idiv0 // due to relocation limit, can't use b.
+# endif
+ JMP(lr)
+
+#if __ARM_ARCH_ISA_THUMB == 1
+#define block(shift) \
+ lsls r2, r1, IMM shift; \
+ cmp r0, r2; \
+ blo LOCAL_LABEL(block_skip_##shift); \
+ subs r0, r0, r2; \
+ LOCAL_LABEL(block_skip_##shift) :; \
+ adcs r3, r3 /* same as ((r3 << 1) | Carry). Carry is set if r0 >= r2. */
+
+ /* TODO: if current location counter is not not word aligned, we don't
+ need the .p2align and nop */
+ /* Label div0block must be word-aligned. First align block 31 */
+ .p2align 2
+ nop /* Padding to align div0block as 31 blocks = 310 bytes */
+
+#else
#define block(shift) \
cmp r0, r1, lsl IMM shift; \
ITT(hs); \
WIDE(addhs) r3, r3, IMM (1 << shift); \
WIDE(subhs) r0, r0, r1, lsl IMM shift
+#endif
block(31)
block(30)
@@ -159,12 +252,14 @@ LOCAL_LABEL(div0block):
JMP(lr)
#endif /* __ARM_ARCH_EXT_IDIV__ */
+#if __ARM_ARCH_EXT_IDIV__
LOCAL_LABEL(divby0):
- mov r0, #0
-#ifdef __ARM_EABI__
- b __aeabi_idiv0
-#else
- JMP(lr)
+ mov r0, #0
+# ifdef __ARM_EABI__
+ b __aeabi_idiv0
+# else
+ JMP(lr)
+# endif
#endif
END_COMPILERRT_FUNCTION(__udivsi3)
diff --git a/lib/builtins/arm64/Makefile.mk b/lib/builtins/arm64/Makefile.mk
deleted file mode 100644
index 7f7e38661303..000000000000
--- a/lib/builtins/arm64/Makefile.mk
+++ /dev/null
@@ -1,20 +0,0 @@
-#===- lib/builtins/arm64/Makefile.mk -----------------------*- Makefile -*--===#
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-#===------------------------------------------------------------------------===#
-
-ModuleName := builtins
-SubDirs :=
-OnlyArchs := arm64
-
-AsmSources := $(foreach file,$(wildcard $(Dir)/*.S),$(notdir $(file)))
-Sources := $(foreach file,$(wildcard $(Dir)/*.c),$(notdir $(file)))
-ObjNames := $(Sources:%.c=%.o) $(AsmSources:%.S=%.o)
-Implementation := Optimized
-
-# FIXME: use automatic dependencies?
-Dependencies := $(wildcard lib/*.h $(Dir)/*.h)
diff --git a/lib/builtins/armv6m/Makefile.mk b/lib/builtins/armv6m/Makefile.mk
deleted file mode 100644
index f3c1807f01b8..000000000000
--- a/lib/builtins/armv6m/Makefile.mk
+++ /dev/null
@@ -1,20 +0,0 @@
-#===- lib/builtins/arm/Makefile.mk -------------------------*- Makefile -*--===#
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-#===------------------------------------------------------------------------===#
-
-ModuleName := builtins
-SubDirs :=
-OnlyArchs := armv6m
-
-AsmSources := $(foreach file,$(wildcard $(Dir)/*.S),$(notdir $(file)))
-Sources := $(foreach file,$(wildcard $(Dir)/*.c),$(notdir $(file)))
-ObjNames := $(Sources:%.c=%.o) $(AsmSources:%.S=%.o)
-Implementation := Optimized
-
-# FIXME: use automatic dependencies?
-Dependencies := $(wildcard lib/*.h $(Dir)/*.h)
diff --git a/lib/builtins/assembly.h b/lib/builtins/assembly.h
index 5fc74f68f603..29d9f8844a6a 100644
--- a/lib/builtins/assembly.h
+++ b/lib/builtins/assembly.h
@@ -70,7 +70,7 @@
#if defined(__ARM_ARCH_4T__) || __ARM_ARCH >= 5
#define ARM_HAS_BX
#endif
-#if !defined(__ARM_FEATURE_CLZ) && \
+#if !defined(__ARM_FEATURE_CLZ) && __ARM_ARCH_ISA_THUMB != 1 && \
(__ARM_ARCH >= 6 || (__ARM_ARCH == 5 && !defined(__ARM_ARCH_5__)))
#define __ARM_FEATURE_CLZ
#endif
@@ -149,6 +149,7 @@
#define DEFINE_COMPILERRT_FUNCTION_ALIAS(name, target) \
.globl SYMBOL_NAME(name) SEPARATOR \
SYMBOL_IS_FUNC(SYMBOL_NAME(name)) SEPARATOR \
+ DECLARE_SYMBOL_VISIBILITY(SYMBOL_NAME(name)) SEPARATOR \
.set SYMBOL_NAME(name), SYMBOL_NAME(target) SEPARATOR
#if defined(__ARM_EABI__)
diff --git a/lib/builtins/atomic.c b/lib/builtins/atomic.c
index f1ddc3e0c522..ee35e342eda5 100644
--- a/lib/builtins/atomic.c
+++ b/lib/builtins/atomic.c
@@ -229,13 +229,20 @@ void __atomic_exchange_c(int size, void *ptr, void *val, void *old, int model) {
// Where the size is known at compile time, the compiler may emit calls to
// specialised versions of the above functions.
////////////////////////////////////////////////////////////////////////////////
+#ifdef __SIZEOF_INT128__
#define OPTIMISED_CASES\
OPTIMISED_CASE(1, IS_LOCK_FREE_1, uint8_t)\
OPTIMISED_CASE(2, IS_LOCK_FREE_2, uint16_t)\
OPTIMISED_CASE(4, IS_LOCK_FREE_4, uint32_t)\
OPTIMISED_CASE(8, IS_LOCK_FREE_8, uint64_t)\
- /* FIXME: __uint128_t isn't available on 32 bit platforms.
- OPTIMISED_CASE(16, IS_LOCK_FREE_16, __uint128_t)*/\
+ OPTIMISED_CASE(16, IS_LOCK_FREE_16, __uint128_t)
+#else
+#define OPTIMISED_CASES\
+ OPTIMISED_CASE(1, IS_LOCK_FREE_1, uint8_t)\
+ OPTIMISED_CASE(2, IS_LOCK_FREE_2, uint16_t)\
+ OPTIMISED_CASE(4, IS_LOCK_FREE_4, uint32_t)\
+ OPTIMISED_CASE(8, IS_LOCK_FREE_8, uint64_t)
+#endif
#define OPTIMISED_CASE(n, lockfree, type)\
type __atomic_load_##n(type *src, int model) {\
diff --git a/lib/builtins/clear_cache.c b/lib/builtins/clear_cache.c
index 55bbdd375891..4c2ac3b1eb05 100644
--- a/lib/builtins/clear_cache.c
+++ b/lib/builtins/clear_cache.c
@@ -110,10 +110,12 @@ void __clear_cache(void *start, void *end) {
#elif defined(__linux__)
register int start_reg __asm("r0") = (int) (intptr_t) start;
const register int end_reg __asm("r1") = (int) (intptr_t) end;
+ const register int flags __asm("r2") = 0;
const register int syscall_nr __asm("r7") = __ARM_NR_cacheflush;
__asm __volatile("svc 0x0"
: "=r"(start_reg)
- : "r"(syscall_nr), "r"(start_reg), "r"(end_reg));
+ : "r"(syscall_nr), "r"(start_reg), "r"(end_reg),
+ "r"(flags));
if (start_reg != 0) {
compilerrt_abort();
}
diff --git a/lib/builtins/i386/Makefile.mk b/lib/builtins/i386/Makefile.mk
deleted file mode 100644
index f3776a02c0de..000000000000
--- a/lib/builtins/i386/Makefile.mk
+++ /dev/null
@@ -1,20 +0,0 @@
-#===- lib/builtins/i386/Makefile.mk ------------------------*- Makefile -*--===#
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-#===------------------------------------------------------------------------===#
-
-ModuleName := builtins
-SubDirs :=
-OnlyArchs := i386
-
-AsmSources := $(foreach file,$(wildcard $(Dir)/*.S),$(notdir $(file)))
-Sources := $(foreach file,$(wildcard $(Dir)/*.c),$(notdir $(file)))
-ObjNames := $(Sources:%.c=%.o) $(AsmSources:%.S=%.o)
-Implementation := Optimized
-
-# FIXME: use automatic dependencies?
-Dependencies := $(wildcard lib/*.h $(Dir)/*.h)
diff --git a/lib/builtins/int_lib.h b/lib/builtins/int_lib.h
index 8dfe5672d131..39eee18d9149 100644
--- a/lib/builtins/int_lib.h
+++ b/lib/builtins/int_lib.h
@@ -91,14 +91,14 @@ COMPILER_RT_ABI tu_int __udivmodti4(tu_int a, tu_int b, tu_int* rem);
#include <intrin.h>
uint32_t __inline __builtin_ctz(uint32_t value) {
- uint32_t trailing_zero = 0;
+ unsigned long trailing_zero = 0;
if (_BitScanForward(&trailing_zero, value))
return trailing_zero;
return 32;
}
uint32_t __inline __builtin_clz(uint32_t value) {
- uint32_t leading_zero = 0;
+ unsigned long leading_zero = 0;
if (_BitScanReverse(&leading_zero, value))
return 31 - leading_zero;
return 32;
@@ -106,7 +106,7 @@ uint32_t __inline __builtin_clz(uint32_t value) {
#if defined(_M_ARM) || defined(_M_X64)
uint32_t __inline __builtin_clzll(uint64_t value) {
- uint32_t leading_zero = 0;
+ unsigned long leading_zero = 0;
if (_BitScanReverse64(&leading_zero, value))
return 63 - leading_zero;
return 64;
diff --git a/lib/builtins/mingw_fixfloat.c b/lib/builtins/mingw_fixfloat.c
new file mode 100644
index 000000000000..c462e0dbf654
--- /dev/null
+++ b/lib/builtins/mingw_fixfloat.c
@@ -0,0 +1,36 @@
+/* ===-- mingw_fixfloat.c - Wrap int/float conversions for arm/windows -----===
+ *
+ * The LLVM Compiler Infrastructure
+ *
+ * This file is dual licensed under the MIT and the University of Illinois Open
+ * Source Licenses. See LICENSE.TXT for details.
+ *
+ * ===----------------------------------------------------------------------===
+ */
+
+#include "int_lib.h"
+
+COMPILER_RT_ABI di_int __fixdfdi(double a);
+COMPILER_RT_ABI di_int __fixsfdi(float a);
+COMPILER_RT_ABI du_int __fixunsdfdi(double a);
+COMPILER_RT_ABI du_int __fixunssfdi(float a);
+COMPILER_RT_ABI double __floatdidf(di_int a);
+COMPILER_RT_ABI float __floatdisf(di_int a);
+COMPILER_RT_ABI double __floatundidf(du_int a);
+COMPILER_RT_ABI float __floatundisf(du_int a);
+
+COMPILER_RT_ABI di_int __dtoi64(double a) { return __fixdfdi(a); }
+
+COMPILER_RT_ABI di_int __stoi64(float a) { return __fixsfdi(a); }
+
+COMPILER_RT_ABI du_int __dtou64(double a) { return __fixunsdfdi(a); }
+
+COMPILER_RT_ABI du_int __stou64(float a) { return __fixunssfdi(a); }
+
+COMPILER_RT_ABI double __i64tod(di_int a) { return __floatdidf(a); }
+
+COMPILER_RT_ABI float __i64tos(di_int a) { return __floatdisf(a); }
+
+COMPILER_RT_ABI double __u64tod(du_int a) { return __floatundidf(a); }
+
+COMPILER_RT_ABI float __u64tos(du_int a) { return __floatundisf(a); }
diff --git a/lib/builtins/ppc/Makefile.mk b/lib/builtins/ppc/Makefile.mk
deleted file mode 100644
index 0adc623aa041..000000000000
--- a/lib/builtins/ppc/Makefile.mk
+++ /dev/null
@@ -1,20 +0,0 @@
-#===- lib/builtins/ppc/Makefile.mk -------------------------*- Makefile -*--===#
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-#===------------------------------------------------------------------------===#
-
-ModuleName := builtins
-SubDirs :=
-OnlyArchs := ppc
-
-AsmSources := $(foreach file,$(wildcard $(Dir)/*.S),$(notdir $(file)))
-Sources := $(foreach file,$(wildcard $(Dir)/*.c),$(notdir $(file)))
-ObjNames := $(Sources:%.c=%.o) $(AsmSources:%.S=%.o)
-Implementation := Optimized
-
-# FIXME: use automatic dependencies?
-Dependencies := $(wildcard lib/*.h $(Dir)/*.h)
diff --git a/lib/builtins/x86_64/Makefile.mk b/lib/builtins/x86_64/Makefile.mk
deleted file mode 100644
index 83848dddd964..000000000000
--- a/lib/builtins/x86_64/Makefile.mk
+++ /dev/null
@@ -1,20 +0,0 @@
-#===- lib/builtins/x86_64/Makefile.mk ----------------------*- Makefile -*--===#
-#
-# The LLVM Compiler Infrastructure
-#
-# This file is distributed under the University of Illinois Open Source
-# License. See LICENSE.TXT for details.
-#
-#===------------------------------------------------------------------------===#
-
-ModuleName := builtins
-SubDirs :=
-OnlyArchs := x86_64 x86_64h
-
-AsmSources := $(foreach file,$(wildcard $(Dir)/*.S),$(notdir $(file)))
-Sources := $(foreach file,$(wildcard $(Dir)/*.c),$(notdir $(file)))
-ObjNames := $(Sources:%.c=%.o) $(AsmSources:%.S=%.o)
-Implementation := Optimized
-
-# FIXME: use automatic dependencies?
-Dependencies := $(wildcard lib/*.h $(Dir)/*.h)
diff --git a/lib/cfi/CMakeLists.txt b/lib/cfi/CMakeLists.txt
index 56ef88204424..206400201466 100644
--- a/lib/cfi/CMakeLists.txt
+++ b/lib/cfi/CMakeLists.txt
@@ -1,5 +1,4 @@
-add_custom_target(cfi)
-set_target_properties(cfi PROPERTIES FOLDER "Compiler-RT Misc")
+add_compiler_rt_component(cfi)
set(CFI_SOURCES cfi.cc)
@@ -36,4 +35,3 @@ foreach(arch ${CFI_SUPPORTED_ARCH})
endforeach()
add_compiler_rt_resource_file(cfi_blacklist cfi_blacklist.txt cfi)
-add_dependencies(compiler-rt cfi)
diff --git a/lib/cfi/cfi.cc b/lib/cfi/cfi.cc
index ca2cf8f30617..d463ca8daf50 100644
--- a/lib/cfi/cfi.cc
+++ b/lib/cfi/cfi.cc
@@ -30,6 +30,8 @@ typedef ElfW(Ehdr) Elf_Ehdr;
#include "ubsan/ubsan_handlers.h"
#endif
+using namespace __sanitizer;
+
namespace __cfi {
#define kCfiShadowLimitsStorageSize 4096 // 1 page
diff --git a/lib/dfsan/CMakeLists.txt b/lib/dfsan/CMakeLists.txt
index eca402dd3b03..2c486bff821b 100644
--- a/lib/dfsan/CMakeLists.txt
+++ b/lib/dfsan/CMakeLists.txt
@@ -11,8 +11,7 @@ append_rtti_flag(OFF DFSAN_COMMON_CFLAGS)
append_list_if(COMPILER_RT_HAS_FFREESTANDING_FLAG -ffreestanding DFSAN_COMMON_CFLAGS)
# Static runtime library.
-add_custom_target(dfsan)
-set_target_properties(dfsan PROPERTIES FOLDER "Compiler-RT Misc")
+add_compiler_rt_component(dfsan)
foreach(arch ${DFSAN_SUPPORTED_ARCH})
set(DFSAN_CFLAGS ${DFSAN_COMMON_CFLAGS})
@@ -46,5 +45,3 @@ add_custom_command(OUTPUT ${dfsan_abilist_filename}
add_dependencies(dfsan dfsan_abilist)
install(FILES ${dfsan_abilist_filename}
DESTINATION ${COMPILER_RT_INSTALL_PATH})
-
-add_dependencies(compiler-rt dfsan)
diff --git a/lib/dfsan/dfsan.cc b/lib/dfsan/dfsan.cc
index 4156000a1cce..3aa99b7f9c27 100644
--- a/lib/dfsan/dfsan.cc
+++ b/lib/dfsan/dfsan.cc
@@ -114,6 +114,26 @@ SANITIZER_INTERFACE_ATTRIBUTE uptr __dfsan_shadow_ptr_mask;
// | reserved by kernel |
// +--------------------+ 0x0000000000
+// On Linux/AArch64 (48-bit VMA), memory is laid out as follow:
+//
+// +--------------------+ 0x1000000000000 (top of memory)
+// | application memory |
+// +--------------------+ 0xffff00008000 (kAppAddr)
+// | unused |
+// +--------------------+ 0xaaaab0000000 (top of PIE address)
+// | application PIE |
+// +--------------------+ 0xaaaaa0000000 (top of PIE address)
+// | |
+// | unused |
+// | |
+// +--------------------+ 0x1200000000 (kUnusedAddr)
+// | union table |
+// +--------------------+ 0x8000000000 (kUnionTableAddr)
+// | shadow memory |
+// +--------------------+ 0x0000010000 (kShadowAddr)
+// | reserved by kernel |
+// +--------------------+ 0x0000000000
+
typedef atomic_dfsan_label dfsan_union_table_t[kNumLabels][kNumLabels];
#ifdef DFSAN_RUNTIME_VMA
@@ -372,11 +392,12 @@ static void InitializePlatformEarly() {
#ifdef DFSAN_RUNTIME_VMA
__dfsan::vmaSize =
(MostSignificantSetBitIndex(GET_CURRENT_FRAME()) + 1);
- if (__dfsan::vmaSize == 39 || __dfsan::vmaSize == 42) {
+ if (__dfsan::vmaSize == 39 || __dfsan::vmaSize == 42 ||
+ __dfsan::vmaSize == 48) {
__dfsan_shadow_ptr_mask = ShadowMask();
} else {
Printf("FATAL: DataFlowSanitizer: unsupported VMA range\n");
- Printf("FATAL: Found %d - Supported 39 and 42\n", __dfsan::vmaSize);
+ Printf("FATAL: Found %d - Supported 39, 42, and 48\n", __dfsan::vmaSize);
Die();
}
#endif
diff --git a/lib/dfsan/dfsan.h b/lib/dfsan/dfsan.h
index 81f949e3019e..33145deef12e 100644
--- a/lib/dfsan/dfsan.h
+++ b/lib/dfsan/dfsan.h
@@ -18,6 +18,9 @@
#include "sanitizer_common/sanitizer_internal_defs.h"
#include "dfsan_platform.h"
+using __sanitizer::uptr;
+using __sanitizer::u16;
+
// Copy declarations from public sanitizer/dfsan_interface.h header here.
typedef u16 dfsan_label;
diff --git a/lib/dfsan/dfsan_interceptors.cc b/lib/dfsan/dfsan_interceptors.cc
index 8b7d64e25a39..5ecbb43e7c46 100644
--- a/lib/dfsan/dfsan_interceptors.cc
+++ b/lib/dfsan/dfsan_interceptors.cc
@@ -16,6 +16,8 @@
#include "interception/interception.h"
#include "sanitizer_common/sanitizer_common.h"
+using namespace __sanitizer;
+
INTERCEPTOR(void *, mmap, void *addr, SIZE_T length, int prot, int flags,
int fd, OFF_T offset) {
void *res = REAL(mmap)(addr, length, prot, flags, fd, offset);
diff --git a/lib/dfsan/dfsan_platform.h b/lib/dfsan/dfsan_platform.h
index f1d9f108e908..98284bafd4ff 100644
--- a/lib/dfsan/dfsan_platform.h
+++ b/lib/dfsan/dfsan_platform.h
@@ -46,6 +46,13 @@ struct Mapping42 {
static const uptr kShadowMask = ~0x3c000000000;
};
+struct Mapping48 {
+ static const uptr kShadowAddr = 0x10000;
+ static const uptr kUnionTableAddr = 0x8000000000;
+ static const uptr kAppAddr = 0xffff00008000;
+ static const uptr kShadowMask = ~0xfffff0000000;
+};
+
extern int vmaSize;
# define DFSAN_RUNTIME_VMA 1
#else
@@ -72,11 +79,13 @@ uptr MappingImpl(void) {
template<int Type>
uptr MappingArchImpl(void) {
#ifdef __aarch64__
- if (vmaSize == 39)
- return MappingImpl<Mapping39, Type>();
- else
- return MappingImpl<Mapping42, Type>();
+ switch (vmaSize) {
+ case 39: return MappingImpl<Mapping39, Type>();
+ case 42: return MappingImpl<Mapping42, Type>();
+ case 48: return MappingImpl<Mapping48, Type>();
+ }
DCHECK(0);
+ return 0;
#else
return MappingImpl<Mapping, Type>();
#endif
diff --git a/lib/dfsan/done_abilist.txt b/lib/dfsan/done_abilist.txt
index 7ca8aeba32fe..a00dc5426cd0 100644
--- a/lib/dfsan/done_abilist.txt
+++ b/lib/dfsan/done_abilist.txt
@@ -266,6 +266,14 @@ fun:reflect.makeFuncStub=discard
# Replaces __sanitizer_cov_trace_cmp with __dfsw___sanitizer_cov_trace_cmp
fun:__sanitizer_cov_trace_cmp=custom
fun:__sanitizer_cov_trace_cmp=uninstrumented
+fun:__sanitizer_cov_trace_cmp1=custom
+fun:__sanitizer_cov_trace_cmp1=uninstrumented
+fun:__sanitizer_cov_trace_cmp2=custom
+fun:__sanitizer_cov_trace_cmp2=uninstrumented
+fun:__sanitizer_cov_trace_cmp4=custom
+fun:__sanitizer_cov_trace_cmp4=uninstrumented
+fun:__sanitizer_cov_trace_cmp8=custom
+fun:__sanitizer_cov_trace_cmp8=uninstrumented
# Similar for __sanitizer_cov_trace_switch
fun:__sanitizer_cov_trace_switch=custom
fun:__sanitizer_cov_trace_switch=uninstrumented
diff --git a/lib/esan/CMakeLists.txt b/lib/esan/CMakeLists.txt
index 2a0a71b2e348..2012ab642bf1 100644
--- a/lib/esan/CMakeLists.txt
+++ b/lib/esan/CMakeLists.txt
@@ -1,7 +1,6 @@
# Build for the EfficiencySanitizer runtime support library.
-add_custom_target(esan)
-set_target_properties(esan PROPERTIES FOLDER "Compiler-RT Misc")
+add_compiler_rt_component(esan)
set(ESAN_RTL_CFLAGS ${SANITIZER_COMMON_CFLAGS})
append_rtti_flag(OFF ESAN_RTL_CFLAGS)
@@ -36,8 +35,6 @@ foreach (arch ${ESAN_SUPPORTED_ARCH})
clang_rt.esan-${arch}-symbols)
endforeach()
-add_dependencies(compiler-rt esan)
-
if (COMPILER_RT_INCLUDE_TESTS)
# TODO(bruening): add tests via add_subdirectory(tests)
endif()
diff --git a/lib/esan/cache_frag.cpp b/lib/esan/cache_frag.cpp
index a3e612daceb1..5fa5c7d54244 100644
--- a/lib/esan/cache_frag.cpp
+++ b/lib/esan/cache_frag.cpp
@@ -94,8 +94,8 @@ static void reportStructCounter(StructHashMap::Handle &Handle) {
type = "struct";
start = &Struct->StructName[7];
}
- // Remove the suffixes with '#' during print.
- end = strchr(start, '#');
+ // Remove the suffixes with '$' during print.
+ end = strchr(start, '$');
CHECK(end != nullptr);
Report(" %s %.*s\n", type, end - start, start);
Report(" size = %u, count = %llu, ratio = %llu, array access = %llu\n",
diff --git a/lib/esan/esan.cpp b/lib/esan/esan.cpp
index 2fb77894d4fb..09b530b6645f 100644
--- a/lib/esan/esan.cpp
+++ b/lib/esan/esan.cpp
@@ -141,9 +141,17 @@ static bool verifyShadowScheme() {
}
#endif
+uptr VmaSize;
+
static void initializeShadow() {
verifyAddressSpace();
+ // This is based on the assumption that the intial stack is always allocated
+ // in the topmost segment of the user address space and the assumption
+ // holds true on all the platforms currently supported.
+ VmaSize =
+ (MostSignificantSetBitIndex(GET_CURRENT_FRAME()) + 1);
+
DCHECK(verifyShadowScheme());
Mapping.initialize(ShadowScale[__esan_which_tool]);
diff --git a/lib/esan/esan.h b/lib/esan/esan.h
index 5a0dde627888..e73b21e56ff1 100644
--- a/lib/esan/esan.h
+++ b/lib/esan/esan.h
@@ -34,6 +34,7 @@ namespace __esan {
extern bool EsanIsInitialized;
extern bool EsanDuringInit;
+extern uptr VmaSize;
void initializeLibrary(ToolType Tool);
int finalizeLibrary();
diff --git a/lib/esan/esan_flags.cpp b/lib/esan/esan_flags.cpp
index 3b047e28be22..c90bf2493f7b 100644
--- a/lib/esan/esan_flags.cpp
+++ b/lib/esan/esan_flags.cpp
@@ -17,6 +17,8 @@
#include "sanitizer_common/sanitizer_flag_parser.h"
#include "sanitizer_common/sanitizer_flags.h"
+using namespace __sanitizer;
+
namespace __esan {
static const char EsanOptsEnv[] = "ESAN_OPTIONS";
diff --git a/lib/esan/esan_hashtable.h b/lib/esan/esan_hashtable.h
new file mode 100644
index 000000000000..7bd829740400
--- /dev/null
+++ b/lib/esan/esan_hashtable.h
@@ -0,0 +1,381 @@
+//===-- esan_hashtable.h ----------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of EfficiencySanitizer, a family of performance tuners.
+//
+// Generic resizing hashtable.
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_common/sanitizer_allocator_internal.h"
+#include "sanitizer_common/sanitizer_internal_defs.h"
+#include "sanitizer_common/sanitizer_mutex.h"
+#include <stddef.h>
+
+namespace __esan {
+
+//===----------------------------------------------------------------------===//
+// Default hash and comparison functions
+//===----------------------------------------------------------------------===//
+
+template <typename T> struct DefaultHash {
+ size_t operator()(const T &Key) const {
+ return (size_t)Key;
+ }
+};
+
+template <typename T> struct DefaultEqual {
+ bool operator()(const T &Key1, const T &Key2) const {
+ return Key1 == Key2;
+ }
+};
+
+//===----------------------------------------------------------------------===//
+// HashTable declaration
+//===----------------------------------------------------------------------===//
+
+// A simple resizing and mutex-locked hashtable.
+//
+// If the default hash functor is used, KeyTy must have an operator size_t().
+// If the default comparison functor is used, KeyTy must have an operator ==.
+//
+// By default all operations are internally-synchronized with a mutex, with no
+// synchronization for payloads once hashtable functions return. If
+// ExternalLock is set to true, the caller should call the lock() and unlock()
+// routines around all hashtable operations and subsequent manipulation of
+// payloads.
+template <typename KeyTy, typename DataTy, bool ExternalLock = false,
+ typename HashFuncTy = DefaultHash<KeyTy>,
+ typename EqualFuncTy = DefaultEqual<KeyTy> >
+class HashTable {
+public:
+ // InitialCapacity must be a power of 2.
+ // ResizeFactor must be between 1 and 99 and indicates the
+ // maximum percentage full that the table should ever be.
+ HashTable(u32 InitialCapacity = 2048, u32 ResizeFactor = 70);
+ ~HashTable();
+ bool lookup(const KeyTy &Key, DataTy &Payload); // Const except for Mutex.
+ bool add(const KeyTy &Key, const DataTy &Payload);
+ bool remove(const KeyTy &Key);
+ u32 size(); // Const except for Mutex.
+ // If the table is internally-synchronized, this lock must not be held
+ // while a hashtable function is called as it will deadlock: the lock
+ // is not recursive. This is meant for use with externally-synchronized
+ // tables or with an iterator.
+ void lock();
+ void unlock();
+
+private:
+ struct HashEntry {
+ KeyTy Key;
+ DataTy Payload;
+ HashEntry *Next;
+ };
+
+public:
+ struct HashPair {
+ HashPair(KeyTy Key, DataTy Data) : Key(Key), Data(Data) {}
+ KeyTy Key;
+ DataTy Data;
+ };
+
+ // This iterator does not perform any synchronization.
+ // It expects the caller to lock the table across the whole iteration.
+ // Calling HashTable functions while using the iterator is not supported.
+ // The iterator returns copies of the keys and data.
+ class iterator {
+ public:
+ iterator(
+ HashTable<KeyTy, DataTy, ExternalLock, HashFuncTy, EqualFuncTy> *Table);
+ iterator(const iterator &Src) = default;
+ iterator &operator=(const iterator &Src) = default;
+ HashPair operator*();
+ iterator &operator++();
+ iterator &operator++(int);
+ bool operator==(const iterator &Cmp) const;
+ bool operator!=(const iterator &Cmp) const;
+
+ private:
+ iterator(
+ HashTable<KeyTy, DataTy, ExternalLock, HashFuncTy, EqualFuncTy> *Table,
+ int Idx);
+ friend HashTable<KeyTy, DataTy, ExternalLock, HashFuncTy, EqualFuncTy>;
+ HashTable<KeyTy, DataTy, ExternalLock, HashFuncTy, EqualFuncTy> *Table;
+ int Idx;
+ HashTable<KeyTy, DataTy, ExternalLock, HashFuncTy, EqualFuncTy>::HashEntry
+ *Entry;
+ };
+
+ // No erase or insert iterator supported
+ iterator begin();
+ iterator end();
+
+private:
+ void resize();
+
+ HashEntry **Table;
+ u32 Capacity;
+ u32 Entries;
+ const u32 ResizeFactor;
+ BlockingMutex Mutex;
+ const HashFuncTy HashFunc;
+ const EqualFuncTy EqualFunc;
+};
+
+//===----------------------------------------------------------------------===//
+// Hashtable implementation
+//===----------------------------------------------------------------------===//
+
+template <typename KeyTy, typename DataTy, bool ExternalLock,
+ typename HashFuncTy, typename EqualFuncTy>
+HashTable<KeyTy, DataTy, ExternalLock, HashFuncTy, EqualFuncTy>::HashTable(
+ u32 InitialCapacity, u32 ResizeFactor)
+ : Capacity(InitialCapacity), Entries(0), ResizeFactor(ResizeFactor),
+ HashFunc(HashFuncTy()), EqualFunc(EqualFuncTy()) {
+ CHECK(IsPowerOfTwo(Capacity));
+ CHECK(ResizeFactor >= 1 && ResizeFactor <= 99);
+ Table = (HashEntry **)InternalAlloc(Capacity * sizeof(HashEntry *));
+ internal_memset(Table, 0, Capacity * sizeof(HashEntry *));
+}
+
+template <typename KeyTy, typename DataTy, bool ExternalLock,
+ typename HashFuncTy, typename EqualFuncTy>
+HashTable<KeyTy, DataTy, ExternalLock, HashFuncTy, EqualFuncTy>::~HashTable() {
+ for (u32 i = 0; i < Capacity; ++i) {
+ HashEntry *Entry = Table[i];
+ while (Entry != nullptr) {
+ HashEntry *Next = Entry->Next;
+ Entry->Payload.~DataTy();
+ InternalFree(Entry);
+ Entry = Next;
+ }
+ }
+ InternalFree(Table);
+}
+
+template <typename KeyTy, typename DataTy, bool ExternalLock,
+ typename HashFuncTy, typename EqualFuncTy>
+u32 HashTable<KeyTy, DataTy, ExternalLock, HashFuncTy, EqualFuncTy>::size() {
+ u32 Res;
+ if (!ExternalLock)
+ Mutex.Lock();
+ Res = Entries;
+ if (!ExternalLock)
+ Mutex.Unlock();
+ return Res;
+}
+
+template <typename KeyTy, typename DataTy, bool ExternalLock,
+ typename HashFuncTy, typename EqualFuncTy>
+bool HashTable<KeyTy, DataTy, ExternalLock, HashFuncTy, EqualFuncTy>::lookup(
+ const KeyTy &Key, DataTy &Payload) {
+ if (!ExternalLock)
+ Mutex.Lock();
+ bool Found = false;
+ size_t Hash = HashFunc(Key) % Capacity;
+ HashEntry *Entry = Table[Hash];
+ for (; Entry != nullptr; Entry = Entry->Next) {
+ if (EqualFunc(Entry->Key, Key)) {
+ Payload = Entry->Payload;
+ Found = true;
+ break;
+ }
+ }
+ if (!ExternalLock)
+ Mutex.Unlock();
+ return Found;
+}
+
+template <typename KeyTy, typename DataTy, bool ExternalLock,
+ typename HashFuncTy, typename EqualFuncTy>
+void HashTable<KeyTy, DataTy, ExternalLock, HashFuncTy, EqualFuncTy>::resize() {
+ if (!ExternalLock)
+ Mutex.CheckLocked();
+ size_t OldCapacity = Capacity;
+ HashEntry **OldTable = Table;
+ Capacity *= 2;
+ Table = (HashEntry **)InternalAlloc(Capacity * sizeof(HashEntry *));
+ internal_memset(Table, 0, Capacity * sizeof(HashEntry *));
+ // Re-hash
+ for (u32 i = 0; i < OldCapacity; ++i) {
+ HashEntry *OldEntry = OldTable[i];
+ while (OldEntry != nullptr) {
+ HashEntry *Next = OldEntry->Next;
+ size_t Hash = HashFunc(OldEntry->Key) % Capacity;
+ OldEntry->Next = Table[Hash];
+ Table[Hash] = OldEntry;
+ OldEntry = Next;
+ }
+ }
+}
+
+template <typename KeyTy, typename DataTy, bool ExternalLock,
+ typename HashFuncTy, typename EqualFuncTy>
+bool HashTable<KeyTy, DataTy, ExternalLock, HashFuncTy, EqualFuncTy>::add(
+ const KeyTy &Key, const DataTy &Payload) {
+ if (!ExternalLock)
+ Mutex.Lock();
+ bool Exists = false;
+ size_t Hash = HashFunc(Key) % Capacity;
+ HashEntry *Entry = Table[Hash];
+ for (; Entry != nullptr; Entry = Entry->Next) {
+ if (EqualFunc(Entry->Key, Key)) {
+ Exists = true;
+ break;
+ }
+ }
+ if (!Exists) {
+ Entries++;
+ if (Entries * 100 >= Capacity * ResizeFactor) {
+ resize();
+ Hash = HashFunc(Key) % Capacity;
+ }
+ HashEntry *Add = (HashEntry *)InternalAlloc(sizeof(*Add));
+ Add->Key = Key;
+ Add->Payload = Payload;
+ Add->Next = Table[Hash];
+ Table[Hash] = Add;
+ }
+ if (!ExternalLock)
+ Mutex.Unlock();
+ return !Exists;
+}
+
+template <typename KeyTy, typename DataTy, bool ExternalLock,
+ typename HashFuncTy, typename EqualFuncTy>
+bool HashTable<KeyTy, DataTy, ExternalLock, HashFuncTy, EqualFuncTy>::remove(
+ const KeyTy &Key) {
+ if (!ExternalLock)
+ Mutex.Lock();
+ bool Found = false;
+ size_t Hash = HashFunc(Key) % Capacity;
+ HashEntry *Entry = Table[Hash];
+ HashEntry *Prev = nullptr;
+ for (; Entry != nullptr; Prev = Entry, Entry = Entry->Next) {
+ if (EqualFunc(Entry->Key, Key)) {
+ Found = true;
+ Entries--;
+ if (Prev == nullptr)
+ Table[Hash] = Entry->Next;
+ else
+ Prev->Next = Entry->Next;
+ Entry->Payload.~DataTy();
+ InternalFree(Entry);
+ break;
+ }
+ }
+ if (!ExternalLock)
+ Mutex.Unlock();
+ return Found;
+}
+
+template <typename KeyTy, typename DataTy, bool ExternalLock,
+ typename HashFuncTy, typename EqualFuncTy>
+void HashTable<KeyTy, DataTy, ExternalLock, HashFuncTy, EqualFuncTy>::lock() {
+ Mutex.Lock();
+}
+
+template <typename KeyTy, typename DataTy, bool ExternalLock,
+ typename HashFuncTy, typename EqualFuncTy>
+void HashTable<KeyTy, DataTy, ExternalLock, HashFuncTy, EqualFuncTy>::unlock() {
+ Mutex.Unlock();
+}
+
+//===----------------------------------------------------------------------===//
+// Iterator implementation
+//===----------------------------------------------------------------------===//
+
+template <typename KeyTy, typename DataTy, bool ExternalLock,
+ typename HashFuncTy, typename EqualFuncTy>
+HashTable<KeyTy, DataTy, ExternalLock, HashFuncTy, EqualFuncTy>::iterator::
+ iterator(
+ HashTable<KeyTy, DataTy, ExternalLock, HashFuncTy, EqualFuncTy> *Table)
+ : Table(Table), Idx(-1), Entry(nullptr) {
+ operator++();
+}
+
+template <typename KeyTy, typename DataTy, bool ExternalLock,
+ typename HashFuncTy, typename EqualFuncTy>
+HashTable<KeyTy, DataTy, ExternalLock, HashFuncTy, EqualFuncTy>::iterator::
+ iterator(
+ HashTable<KeyTy, DataTy, ExternalLock, HashFuncTy, EqualFuncTy> *Table,
+ int Idx)
+ : Table(Table), Idx(Idx), Entry(nullptr) {
+ CHECK(Idx >= (int)Table->Capacity); // Only used to create end().
+}
+
+template <typename KeyTy, typename DataTy, bool ExternalLock,
+ typename HashFuncTy, typename EqualFuncTy>
+typename HashTable<KeyTy, DataTy, ExternalLock, HashFuncTy,
+ EqualFuncTy>::HashPair
+ HashTable<KeyTy, DataTy, ExternalLock, HashFuncTy, EqualFuncTy>::iterator::
+ operator*() {
+ CHECK(Idx >= 0 && Idx < (int)Table->Capacity);
+ CHECK(Entry != nullptr);
+ return HashPair(Entry->Key, Entry->Payload);
+}
+
+template <typename KeyTy, typename DataTy, bool ExternalLock,
+ typename HashFuncTy, typename EqualFuncTy>
+typename HashTable<KeyTy, DataTy, ExternalLock, HashFuncTy,
+ EqualFuncTy>::iterator &
+ HashTable<KeyTy, DataTy, ExternalLock, HashFuncTy, EqualFuncTy>::iterator::
+ operator++() {
+ if (Entry != nullptr)
+ Entry = Entry->Next;
+ while (Entry == nullptr) {
+ ++Idx;
+ if (Idx >= (int)Table->Capacity)
+ break; // At end().
+ Entry = Table->Table[Idx];
+ }
+ return *this;
+}
+
+template <typename KeyTy, typename DataTy, bool ExternalLock,
+ typename HashFuncTy, typename EqualFuncTy>
+typename HashTable<KeyTy, DataTy, ExternalLock, HashFuncTy,
+ EqualFuncTy>::iterator &
+ HashTable<KeyTy, DataTy, ExternalLock, HashFuncTy, EqualFuncTy>::iterator::
+ operator++(int) {
+ iterator Temp(*this);
+ operator++();
+ return Temp;
+}
+
+template <typename KeyTy, typename DataTy, bool ExternalLock,
+ typename HashFuncTy, typename EqualFuncTy>
+bool HashTable<KeyTy, DataTy, ExternalLock, HashFuncTy, EqualFuncTy>::iterator::
+operator==(const iterator &Cmp) const {
+ return Cmp.Table == Table && Cmp.Idx == Idx && Cmp.Entry == Entry;
+}
+
+template <typename KeyTy, typename DataTy, bool ExternalLock,
+ typename HashFuncTy, typename EqualFuncTy>
+bool HashTable<KeyTy, DataTy, ExternalLock, HashFuncTy, EqualFuncTy>::iterator::
+operator!=(const iterator &Cmp) const {
+ return Cmp.Table != Table || Cmp.Idx != Idx || Cmp.Entry != Entry;
+}
+
+template <typename KeyTy, typename DataTy, bool ExternalLock,
+ typename HashFuncTy, typename EqualFuncTy>
+typename HashTable<KeyTy, DataTy, ExternalLock, HashFuncTy,
+ EqualFuncTy>::iterator
+HashTable<KeyTy, DataTy, ExternalLock, HashFuncTy, EqualFuncTy>::begin() {
+ return iterator(this);
+}
+
+template <typename KeyTy, typename DataTy, bool ExternalLock,
+ typename HashFuncTy, typename EqualFuncTy>
+typename HashTable<KeyTy, DataTy, ExternalLock, HashFuncTy,
+ EqualFuncTy>::iterator
+HashTable<KeyTy, DataTy, ExternalLock, HashFuncTy, EqualFuncTy>::end() {
+ return iterator(this, Capacity);
+}
+
+} // namespace __esan
diff --git a/lib/esan/esan_interceptors.cpp b/lib/esan/esan_interceptors.cpp
index 647f010852b0..9ae5482a3cad 100644
--- a/lib/esan/esan_interceptors.cpp
+++ b/lib/esan/esan_interceptors.cpp
@@ -461,28 +461,35 @@ INTERCEPTOR(int, pthread_sigmask, int how, __sanitizer_sigset_t *set,
// Malloc interceptors
//===----------------------------------------------------------------------===//
-static char early_alloc_buf[128];
-static bool used_early_alloc_buf;
+static const uptr early_alloc_buf_size = 4096;
+static uptr allocated_bytes;
+static char early_alloc_buf[early_alloc_buf_size];
+
+static bool isInEarlyAllocBuf(const void *ptr) {
+ return ((uptr)ptr >= (uptr)early_alloc_buf &&
+ ((uptr)ptr - (uptr)early_alloc_buf) < sizeof(early_alloc_buf));
+}
static void *handleEarlyAlloc(uptr size) {
// If esan is initialized during an interceptor (which happens with some
// tcmalloc implementations that call pthread_mutex_lock), the call from
- // dlsym to calloc will deadlock. There is only one such calloc (dlsym
- // allocates a single pthread key), so we work around it by using a
- // static buffer for the calloc request. The loader currently needs
- // 32 bytes but we size at 128 to allow for future changes.
+ // dlsym to calloc will deadlock.
+ // dlsym may also call malloc before REAL(malloc) is retrieved from dlsym.
+ // We work around it by using a static buffer for the early malloc/calloc
+ // requests.
// This solution will also allow us to deliberately intercept malloc & family
// in the future (to perform tool actions on each allocation, without
// replacing the allocator), as it also solves the problem of intercepting
// calloc when it will itself be called before its REAL pointer is
// initialized.
- CHECK(!used_early_alloc_buf && size < sizeof(early_alloc_buf));
// We do not handle multiple threads here. This only happens at process init
// time, and while it's possible for a shared library to create early threads
// that race here, we consider that to be a corner case extreme enough that
// it's not worth the effort to handle.
- used_early_alloc_buf = true;
- return (void *)early_alloc_buf;
+ void *mem = (void *)&early_alloc_buf[allocated_bytes];
+ allocated_bytes += size;
+ CHECK_LT(allocated_bytes, early_alloc_buf_size);
+ return mem;
}
INTERCEPTOR(void*, calloc, uptr size, uptr n) {
@@ -496,14 +503,20 @@ INTERCEPTOR(void*, calloc, uptr size, uptr n) {
return res;
}
+INTERCEPTOR(void*, malloc, uptr size) {
+ if (EsanDuringInit && REAL(malloc) == nullptr)
+ return handleEarlyAlloc(size);
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, malloc, size);
+ return REAL(malloc)(size);
+}
+
INTERCEPTOR(void, free, void *p) {
void *ctx;
- COMMON_INTERCEPTOR_ENTER(ctx, free, p);
- if (p == (void *)early_alloc_buf) {
- // We expect just a singleton use but we clear this for cleanliness.
- used_early_alloc_buf = false;
+ // There are only a few early allocation requests, so we simply skip the free.
+ if (isInEarlyAllocBuf(p))
return;
- }
+ COMMON_INTERCEPTOR_ENTER(ctx, free, p);
REAL(free)(p);
}
@@ -534,6 +547,7 @@ void initializeInterceptors() {
ESAN_MAYBE_INTERCEPT_PTHREAD_SIGMASK;
INTERCEPT_FUNCTION(calloc);
+ INTERCEPT_FUNCTION(malloc);
INTERCEPT_FUNCTION(free);
// TODO(bruening): intercept routines that other sanitizers intercept that
diff --git a/lib/esan/esan_interface_internal.h b/lib/esan/esan_interface_internal.h
index 3b915d03e07a..df51aa609aff 100644
--- a/lib/esan/esan_interface_internal.h
+++ b/lib/esan/esan_interface_internal.h
@@ -21,6 +21,9 @@
// This header should NOT include any other headers.
// All functions in this header are extern "C" and start with __esan_.
+using __sanitizer::uptr;
+using __sanitizer::u32;
+
extern "C" {
// This should be kept consistent with LLVM's EfficiencySanitizerOptions.
diff --git a/lib/esan/esan_linux.cpp b/lib/esan/esan_linux.cpp
index aa961b66116b..014205ce01eb 100644
--- a/lib/esan/esan_linux.cpp
+++ b/lib/esan/esan_linux.cpp
@@ -25,7 +25,7 @@
namespace __esan {
void verifyAddressSpace() {
-#if SANITIZER_LINUX && defined(__x86_64__)
+#if SANITIZER_LINUX && (defined(__x86_64__) || SANITIZER_MIPS64)
// The kernel determines its mmap base from the stack size limit.
// Our Linux 64-bit shadow mapping assumes the stack limit is less than a
// terabyte, which keeps the mmap region above 0x7e00'.
diff --git a/lib/esan/esan_shadow.h b/lib/esan/esan_shadow.h
index f8f154ef7cca..72a919a190d8 100644
--- a/lib/esan/esan_shadow.h
+++ b/lib/esan/esan_shadow.h
@@ -15,6 +15,7 @@
#ifndef ESAN_SHADOW_H
#define ESAN_SHADOW_H
+#include "esan.h"
#include <sanitizer_common/sanitizer_platform.h>
#if SANITIZER_WORDSIZE != 64
@@ -23,6 +24,12 @@
namespace __esan {
+struct ApplicationRegion {
+ uptr Start;
+ uptr End;
+ bool ShadowMergedWithPrev;
+};
+
#if SANITIZER_LINUX && defined(__x86_64__)
// Linux x86_64
//
@@ -89,12 +96,6 @@ namespace __esan {
// [0x000015ff'ff601000, 0x00001600'00000000]
// [0x000015ff'ff600000, 0x000015ff'ff601000]
-struct ApplicationRegion {
- uptr Start;
- uptr End;
- bool ShadowMergedWithPrev;
-};
-
static const struct ApplicationRegion AppRegions[] = {
{0x0000000000000000ull, 0x0000010000000000u, false},
{0x0000550000000000u, 0x0000570000000000u, false},
@@ -105,6 +106,52 @@ static const struct ApplicationRegion AppRegions[] = {
{0x00007fffff601000u, 0x0000800000000000u, true},
{0xffffffffff600000u, 0xffffffffff601000u, true},
};
+
+#elif SANITIZER_LINUX && SANITIZER_MIPS64
+
+// Application memory falls into these 3 regions
+//
+// [0x00000001'00000000, 0x00000002'00000000) non-PIE + heap
+// [0x000000aa'00000000, 0x000000ab'00000000) PIE
+// [0x000000ff'00000000, 0x000000ff'ffffffff) libraries + stack
+//
+// This formula translates from application memory to shadow memory:
+//
+// shadow(app) = ((app & 0x00000f'ffffffff) + offset) >> scale
+//
+// Where the offset for 1:1 is 0x000013'00000000. For other scales, the
+// offset is shifted left by the scale, except for scales of 1 and 2 where
+// it must be tweaked in order to pass the double-shadow test
+// (see the "shadow(shadow)" comments below):
+// scale == 0: 0x000013'00000000
+// scale == 1: 0x000022'00000000
+// scale == 2: 0x000044'00000000
+// scale >= 3: (0x000013'00000000 << scale)
+//
+// The resulting shadow memory regions for a 0 scaling are:
+//
+// [0x00000014'00000000, 0x00000015'00000000)
+// [0x0000001d'00000000, 0x0000001e'00000000)
+// [0x00000022'00000000, 0x00000022'ffffffff)
+//
+// We also want to ensure that a wild access by the application into the shadow
+// regions will not corrupt our own shadow memory. shadow(shadow) ends up
+// disjoint from shadow(app):
+//
+// [0x00000017'00000000, 0x00000018'00000000)
+// [0x00000020'00000000, 0x00000021'00000000)
+// [0x00000015'00000000, 0x00000015'ffffffff]
+
+static const struct ApplicationRegion AppRegions[] = {
+ {0x0100000000u, 0x0200000000u, false},
+ {0xaa00000000u, 0xab00000000u, false},
+ {0xff00000000u, 0xffffffffffu, false},
+};
+
+#else
+#error Platform not supported
+#endif
+
static const u32 NumAppRegions = sizeof(AppRegions)/sizeof(AppRegions[0]);
// See the comment above: we do not currently support a stack size rlimit
@@ -113,29 +160,59 @@ static const uptr MaxStackSize = (1ULL << 40) - 4096;
class ShadowMapping {
public:
- static const uptr Mask = 0x00000fffffffffffu;
+
// The scale and offset vary by tool.
uptr Scale;
uptr Offset;
+
+ // TODO(sagar.thakur): Try to hardcode the mask as done in the compiler
+ // instrumentation to reduce the runtime cost of appToShadow.
+ struct ShadowMemoryMask40 {
+ static const uptr Mask = 0x0000000fffffffffu;
+ };
+
+ struct ShadowMemoryMask47 {
+ static const uptr Mask = 0x00000fffffffffffu;
+ };
+
void initialize(uptr ShadowScale) {
- static const uptr OffsetArray[3] = {
- 0x0000130000000000u,
- 0x0000220000000000u,
- 0x0000440000000000u,
+
+ const uptr OffsetArray40[3] = {
+ 0x0000001300000000u,
+ 0x0000002200000000u,
+ 0x0000004400000000u,
};
+
+ const uptr OffsetArray47[3] = {
+ 0x0000130000000000u,
+ 0x0000220000000000u,
+ 0x0000440000000000u,
+ };
+
Scale = ShadowScale;
- if (Scale <= 2)
- Offset = OffsetArray[Scale];
- else
- Offset = OffsetArray[0] << Scale;
+ switch (VmaSize) {
+ case 40: {
+ if (Scale <= 2)
+ Offset = OffsetArray40[Scale];
+ else
+ Offset = OffsetArray40[0] << Scale;
+ }
+ break;
+ case 47: {
+ if (Scale <= 2)
+ Offset = OffsetArray47[Scale];
+ else
+ Offset = OffsetArray47[0] << Scale;
+ }
+ break;
+ default: {
+ Printf("ERROR: %d-bit virtual memory address size not supported\n", VmaSize);
+ Die();
+ }
+ }
}
};
extern ShadowMapping Mapping;
-#else
-// We'll want to use templatized functions over the ShadowMapping once
-// we support more platforms.
-#error Platform not supported
-#endif
static inline bool getAppRegion(u32 i, uptr *Start, uptr *End) {
if (i >= NumAppRegions)
@@ -154,9 +231,21 @@ bool isAppMem(uptr Mem) {
return false;
}
+template<typename Params>
+uptr appToShadowImpl(uptr App) {
+ return (((App & Params::Mask) + Mapping.Offset) >> Mapping.Scale);
+}
+
ALWAYS_INLINE
uptr appToShadow(uptr App) {
- return (((App & ShadowMapping::Mask) + Mapping.Offset) >> Mapping.Scale);
+ switch (VmaSize) {
+ case 40: return appToShadowImpl<ShadowMapping::ShadowMemoryMask40>(App);
+ case 47: return appToShadowImpl<ShadowMapping::ShadowMemoryMask47>(App);
+ default: {
+ Printf("ERROR: %d-bit virtual memory address size not supported\n", VmaSize);
+ Die();
+ }
+ }
}
static inline bool getShadowRegion(u32 i, uptr *Start, uptr *End) {
diff --git a/lib/interception/interception.h b/lib/interception/interception.h
index 9e9aca215c4d..d79fa67babfa 100644
--- a/lib/interception/interception.h
+++ b/lib/interception/interception.h
@@ -92,8 +92,8 @@ typedef __sanitizer::OFF64_T OFF64_T;
// Just a pair of pointers.
struct interpose_substitution {
- const uptr replacement;
- const uptr original;
+ const __sanitizer::uptr replacement;
+ const __sanitizer::uptr original;
};
// For a function foo() create a global pair of pointers { wrap_foo, foo } in
@@ -158,10 +158,12 @@ const interpose_substitution substitution_##func_name[] \
namespace __interception { \
extern FUNC_TYPE(func) PTR_TO_REAL(func); \
}
+# define ASSIGN_REAL(dst, src) REAL(dst) = REAL(src)
#else // __APPLE__
# define REAL(x) x
# define DECLARE_REAL(ret_type, func, ...) \
extern "C" ret_type func(__VA_ARGS__);
+# define ASSIGN_REAL(x, y)
#endif // __APPLE__
#define DECLARE_REAL_AND_INTERCEPTOR(ret_type, func, ...) \
diff --git a/lib/interception/interception_win.cc b/lib/interception/interception_win.cc
index 8977d59ac4f1..91abecf6de5f 100644
--- a/lib/interception/interception_win.cc
+++ b/lib/interception/interception_win.cc
@@ -148,10 +148,16 @@ static void InterceptionFailed() {
}
static bool DistanceIsWithin2Gig(uptr from, uptr target) {
+#if SANITIZER_WINDOWS64
if (from < target)
return target - from <= (uptr)0x7FFFFFFFU;
else
return from - target <= (uptr)0x80000000U;
+#else
+ // In a 32-bit address space, the address calculation will wrap, so this check
+ // is unnecessary.
+ return true;
+#endif
}
static uptr GetMmapGranularity() {
@@ -167,6 +173,21 @@ static uptr RoundUpTo(uptr size, uptr boundary) {
// FIXME: internal_str* and internal_mem* functions should be moved from the
// ASan sources into interception/.
+static size_t _strlen(const char *str) {
+ const char* p = str;
+ while (*p != '\0') ++p;
+ return p - str;
+}
+
+static char* _strchr(char* str, char c) {
+ while (*str) {
+ if (*str == c)
+ return str;
+ ++str;
+ }
+ return nullptr;
+}
+
static void _memset(void *p, int value, size_t sz) {
for (size_t i = 0; i < sz; ++i)
((char*)p)[i] = (char)value;
@@ -229,10 +250,6 @@ static void WritePadding(uptr from, uptr size) {
_memset((void*)from, 0xCC, (size_t)size);
}
-static void CopyInstructions(uptr from, uptr to, uptr size) {
- _memcpy((void*)from, (void*)to, (size_t)size);
-}
-
static void WriteJumpInstruction(uptr from, uptr target) {
if (!DistanceIsWithin2Gig(from + kJumpInstructionLength, target))
InterceptionFailed();
@@ -294,7 +311,7 @@ struct TrampolineMemoryRegion {
uptr max_size;
};
-static const uptr kTrampolineScanLimitRange = 1 << 30; // 1 gig
+static const uptr kTrampolineScanLimitRange = 1 << 31; // 2 gig
static const int kMaxTrampolineRegion = 1024;
static TrampolineMemoryRegion TrampolineRegions[kMaxTrampolineRegion];
@@ -384,7 +401,7 @@ static uptr AllocateMemoryForTrampoline(uptr image_address, size_t size) {
}
// Returns 0 on error.
-static size_t GetInstructionSize(uptr address) {
+static size_t GetInstructionSize(uptr address, size_t* rel_offset = nullptr) {
switch (*(u64*)address) {
case 0x90909090909006EB: // stub: jmp over 6 x nop.
return 8;
@@ -410,7 +427,6 @@ static size_t GetInstructionSize(uptr address) {
case 0xb8: // b8 XX XX XX XX : mov eax, XX XX XX XX
case 0xB9: // b9 XX XX XX XX : mov ecx, XX XX XX XX
- case 0xA1: // A1 XX XX XX XX : mov eax, dword ptr ds:[XXXXXXXX]
return 5;
// Cannot overwrite control-instruction. Return 0 to indicate failure.
@@ -452,7 +468,18 @@ static size_t GetInstructionSize(uptr address) {
return 0;
}
+ switch (0x00FFFFFF & *(u32*)address) {
+ case 0x24A48D: // 8D A4 24 XX XX XX XX : lea esp, [esp + XX XX XX XX]
+ return 7;
+ }
+
#if SANITIZER_WINDOWS64
+ switch (*(u8*)address) {
+ case 0xA1: // A1 XX XX XX XX XX XX XX XX :
+ // movabs eax, dword ptr ds:[XXXXXXXX]
+ return 8;
+ }
+
switch (*(u16*)address) {
case 0x5040: // push rax
case 0x5140: // push rcx
@@ -477,17 +504,20 @@ static size_t GetInstructionSize(uptr address) {
case 0xd9f748: // 48 f7 d9 : neg rcx
case 0xd12b48: // 48 2b d1 : sub rdx, rcx
case 0x07c1f6: // f6 c1 07 : test cl, 0x7
+ case 0xc98548: // 48 85 C9 : test rcx, rcx
case 0xc0854d: // 4d 85 c0 : test r8, r8
case 0xc2b60f: // 0f b6 c2 : movzx eax, dl
case 0xc03345: // 45 33 c0 : xor r8d, r8d
+ case 0xdb3345: // 45 33 DB : xor r11d, r11d
case 0xd98b4c: // 4c 8b d9 : mov r11, rcx
case 0xd28b4c: // 4c 8b d2 : mov r10, rdx
+ case 0xc98b4c: // 4C 8B C9 : mov r9, rcx
case 0xd2b60f: // 0f b6 d2 : movzx edx, dl
case 0xca2b48: // 48 2b ca : sub rcx, rdx
case 0x10b70f: // 0f b7 10 : movzx edx, WORD PTR [rax]
case 0xc00b4d: // 3d 0b c0 : or r8, r8
case 0xd18b48: // 48 8b d1 : mov rdx, rcx
- case 0xdc8b4c: // 4c 8b dc : mov r11,rsp
+ case 0xdc8b4c: // 4c 8b dc : mov r11, rsp
case 0xd18b4c: // 4c 8b d1 : mov r10, rcx
return 3;
@@ -496,11 +526,22 @@ static size_t GetInstructionSize(uptr address) {
case 0x588948: // 48 89 58 XX : mov QWORD PTR[rax + XX], rbx
return 4;
+ case 0xec8148: // 48 81 EC XX XX XX XX : sub rsp, XXXXXXXX
+ return 7;
+
case 0x058b48: // 48 8b 05 XX XX XX XX :
// mov rax, QWORD PTR [rip + XXXXXXXX]
case 0x25ff48: // 48 ff 25 XX XX XX XX :
// rex.W jmp QWORD PTR [rip + XXXXXXXX]
+
+ // Instructions having offset relative to 'rip' need offset adjustment.
+ if (rel_offset)
+ *rel_offset = 3;
return 7;
+
+ case 0x2444c7: // C7 44 24 XX YY YY YY YY
+ // mov dword ptr [rsp + XX], YYYYYYYY
+ return 8;
}
switch (*(u32*)(address)) {
@@ -513,6 +554,10 @@ static size_t GetInstructionSize(uptr address) {
#else
+ switch (*(u8*)address) {
+ case 0xA1: // A1 XX XX XX XX : mov eax, dword ptr ds:[XXXXXXXX]
+ return 5;
+ }
switch (*(u16*)address) {
case 0x458B: // 8B 45 XX : mov eax, dword ptr [ebp + XX]
case 0x5D8B: // 8B 5D XX : mov ebx, dword ptr [ebp + XX]
@@ -566,6 +611,28 @@ static size_t RoundUpToInstrBoundary(size_t size, uptr address) {
return cursor;
}
+static bool CopyInstructions(uptr to, uptr from, size_t size) {
+ size_t cursor = 0;
+ while (cursor != size) {
+ size_t rel_offset = 0;
+ size_t instruction_size = GetInstructionSize(from + cursor, &rel_offset);
+ _memcpy((void*)(to + cursor), (void*)(from + cursor),
+ (size_t)instruction_size);
+ if (rel_offset) {
+ uptr delta = to - from;
+ uptr relocated_offset = *(u32*)(to + cursor + rel_offset) - delta;
+#if SANITIZER_WINDOWS64
+ if (relocated_offset + 0x80000000U >= 0xFFFFFFFFU)
+ return false;
+#endif
+ *(u32*)(to + cursor + rel_offset) = relocated_offset;
+ }
+ cursor += instruction_size;
+ }
+ return true;
+}
+
+
#if !SANITIZER_WINDOWS64
bool OverrideFunctionWithDetour(
uptr old_func, uptr new_func, uptr *orig_old_func) {
@@ -656,7 +723,8 @@ bool OverrideFunctionWithHotPatch(
uptr trampoline = AllocateMemoryForTrampoline(old_func, trampoline_length);
if (!trampoline)
return false;
- CopyInstructions(trampoline, old_func, instruction_size);
+ if (!CopyInstructions(trampoline, old_func, instruction_size))
+ return false;
WriteDirectBranch(trampoline + instruction_size,
old_func + instruction_size);
*orig_old_func = trampoline;
@@ -705,7 +773,8 @@ bool OverrideFunctionWithTrampoline(
uptr trampoline = AllocateMemoryForTrampoline(old_func, trampoline_length);
if (!trampoline)
return false;
- CopyInstructions(trampoline, old_func, instructions_length);
+ if (!CopyInstructions(trampoline, old_func, instructions_length))
+ return false;
WriteDirectBranch(trampoline + instructions_length,
old_func + instructions_length);
*orig_old_func = trampoline;
@@ -820,6 +889,32 @@ uptr InternalGetProcAddress(void *module, const char *func_name) {
if (!strcmp(func_name, name)) {
DWORD index = ordinals[i];
RVAPtr<char> func(module, functions[index]);
+
+ // Handle forwarded functions.
+ DWORD offset = functions[index];
+ if (offset >= export_directory->VirtualAddress &&
+ offset < export_directory->VirtualAddress + export_directory->Size) {
+ // An entry for a forwarded function is a string with the following
+ // format: "<module> . <function_name>" that is stored into the
+ // exported directory.
+ char function_name[256];
+ size_t funtion_name_length = _strlen(func);
+ if (funtion_name_length >= sizeof(function_name) - 1)
+ InterceptionFailed();
+
+ _memcpy(function_name, func, funtion_name_length);
+ function_name[funtion_name_length] = '\0';
+ char* separator = _strchr(function_name, '.');
+ if (!separator)
+ InterceptionFailed();
+ *separator = '\0';
+
+ void* redirected_module = GetModuleHandleA(function_name);
+ if (!redirected_module)
+ InterceptionFailed();
+ return InternalGetProcAddress(redirected_module, separator + 1);
+ }
+
return (uptr)(char *)func;
}
}
@@ -827,19 +922,18 @@ uptr InternalGetProcAddress(void *module, const char *func_name) {
return 0;
}
-static bool GetFunctionAddressInDLLs(const char *func_name, uptr *func_addr) {
- *func_addr = 0;
+bool OverrideFunction(
+ const char *func_name, uptr new_func, uptr *orig_old_func) {
+ bool hooked = false;
void **DLLs = InterestingDLLsAvailable();
- for (size_t i = 0; *func_addr == 0 && DLLs[i]; ++i)
- *func_addr = InternalGetProcAddress(DLLs[i], func_name);
- return (*func_addr != 0);
-}
-
-bool OverrideFunction(const char *name, uptr new_func, uptr *orig_old_func) {
- uptr orig_func;
- if (!GetFunctionAddressInDLLs(name, &orig_func))
- return false;
- return OverrideFunction(orig_func, new_func, orig_old_func);
+ for (size_t i = 0; DLLs[i]; ++i) {
+ uptr func_addr = InternalGetProcAddress(DLLs[i], func_name);
+ if (func_addr &&
+ OverrideFunction(func_addr, new_func, orig_old_func)) {
+ hooked = true;
+ }
+ }
+ return hooked;
}
bool OverrideImportedFunction(const char *module_to_patch,
diff --git a/lib/interception/tests/CMakeLists.txt b/lib/interception/tests/CMakeLists.txt
index bfe41fed2fed..5ea943f9a82a 100644
--- a/lib/interception/tests/CMakeLists.txt
+++ b/lib/interception/tests/CMakeLists.txt
@@ -29,6 +29,7 @@ else()
endif()
if(MSVC)
list(APPEND INTERCEPTION_TEST_CFLAGS_COMMON -gcodeview)
+ list(APPEND INTERCEPTION_TEST_LINK_FLAGS_COMMON -Wl,-largeaddressaware)
endif()
list(APPEND INTERCEPTION_TEST_LINK_FLAGS_COMMON -g)
diff --git a/lib/interception/tests/interception_linux_test.cc b/lib/interception/tests/interception_linux_test.cc
index 4a1ae785d16f..cc09aa09df3a 100644
--- a/lib/interception/tests/interception_linux_test.cc
+++ b/lib/interception/tests/interception_linux_test.cc
@@ -11,6 +11,10 @@
// Tests for interception_linux.h.
//
//===----------------------------------------------------------------------===//
+
+// Do not declare isdigit in ctype.h.
+#define __NO_CTYPE
+
#include "interception/interception.h"
#include "gtest/gtest.h"
@@ -31,10 +35,9 @@ INTERCEPTOR(int, isdigit, int d) {
namespace __interception {
TEST(Interception, GetRealFunctionAddress) {
- uptr expected_malloc_address = (uptr)(void*)&malloc;
uptr malloc_address = 0;
EXPECT_TRUE(GetRealFunctionAddress("malloc", &malloc_address, 0, 0));
- EXPECT_EQ(expected_malloc_address, malloc_address);
+ EXPECT_NE(0U, malloc_address);
uptr dummy_address = 0;
EXPECT_TRUE(
diff --git a/lib/interception/tests/interception_win_test.cc b/lib/interception/tests/interception_win_test.cc
index 611354f03d12..684ee0303559 100644
--- a/lib/interception/tests/interception_win_test.cc
+++ b/lib/interception/tests/interception_win_test.cc
@@ -163,6 +163,13 @@ const u8 kPatchableCode4[] = {
0x90, 0x90, 0x90, 0x90,
};
+const u8 kPatchableCode5[] = {
+ 0x55, // push ebp
+ 0x8b, 0xec, // mov ebp,esp
+ 0x8d, 0xa4, 0x24, 0x30, 0xfd, 0xff, 0xff, // lea esp,[esp-2D0h]
+ 0x54, // push esp
+};
+
const u8 kUnpatchableCode1[] = {
0xC3, // ret
};
@@ -197,7 +204,29 @@ const u8 kUnpatchableCode6[] = {
// A buffer holding the dynamically generated code under test.
u8* ActiveCode;
-size_t ActiveCodeLength = 4096;
+const size_t ActiveCodeLength = 4096;
+
+int InterceptorFunction(int x);
+
+/// Allocate code memory more than 2GB away from Base.
+u8 *AllocateCode2GBAway(u8 *Base) {
+ // Find a 64K aligned location after Base plus 2GB.
+ size_t TwoGB = 0x80000000;
+ size_t AllocGranularity = 0x10000;
+ Base = (u8 *)((((uptr)Base + TwoGB + AllocGranularity)) & ~(AllocGranularity - 1));
+
+ // Check if that location is free, and if not, loop over regions until we find
+ // one that is.
+ MEMORY_BASIC_INFORMATION mbi = {};
+ while (sizeof(mbi) == VirtualQuery(Base, &mbi, sizeof(mbi))) {
+ if (mbi.State & MEM_FREE) break;
+ Base += mbi.RegionSize;
+ }
+
+ // Allocate one RWX page at the free location.
+ return (u8 *)::VirtualAlloc(Base, ActiveCodeLength, MEM_COMMIT | MEM_RESERVE,
+ PAGE_EXECUTE_READWRITE);
+}
template<class T>
static void LoadActiveCode(
@@ -205,11 +234,8 @@ static void LoadActiveCode(
uptr *entry_point,
FunctionPrefixKind prefix_kind = FunctionPrefixNone) {
if (ActiveCode == nullptr) {
- ActiveCode =
- (u8*)::VirtualAlloc(nullptr, ActiveCodeLength,
- MEM_COMMIT | MEM_RESERVE,
- PAGE_EXECUTE_READWRITE);
- ASSERT_NE(ActiveCode, nullptr);
+ ActiveCode = AllocateCode2GBAway((u8*)&InterceptorFunction);
+ ASSERT_NE(ActiveCode, nullptr) << "failed to allocate RWX memory 2GB away";
}
size_t position = 0;
@@ -474,6 +500,7 @@ TEST(Interception, PatchableFunction) {
EXPECT_TRUE(TestFunctionPatching(kPatchableCode3, override));
#endif
EXPECT_TRUE(TestFunctionPatching(kPatchableCode4, override));
+ EXPECT_TRUE(TestFunctionPatching(kPatchableCode5, override));
EXPECT_FALSE(TestFunctionPatching(kUnpatchableCode1, override));
EXPECT_FALSE(TestFunctionPatching(kUnpatchableCode2, override));
diff --git a/lib/lsan/CMakeLists.txt b/lib/lsan/CMakeLists.txt
index 9412c7a42c2f..73e475d2fdba 100644
--- a/lib/lsan/CMakeLists.txt
+++ b/lib/lsan/CMakeLists.txt
@@ -16,9 +16,6 @@ set(LSAN_SOURCES
set(LSAN_SRC_DIR ${CMAKE_CURRENT_SOURCE_DIR})
-add_custom_target(lsan)
-set_target_properties(lsan PROPERTIES FOLDER "Compiler-RT Misc")
-
add_compiler_rt_object_libraries(RTLSanCommon
OS ${SANITIZER_COMMON_SUPPORTED_OS}
ARCHS ${LSAN_COMMON_SUPPORTED_ARCH}
@@ -27,6 +24,8 @@ add_compiler_rt_object_libraries(RTLSanCommon
if(COMPILER_RT_HAS_LSAN)
foreach(arch ${LSAN_SUPPORTED_ARCH})
+ add_compiler_rt_component(lsan)
+
add_compiler_rt_runtime(clang_rt.lsan
STATIC
ARCHS ${arch}
@@ -39,5 +38,3 @@ if(COMPILER_RT_HAS_LSAN)
PARENT_TARGET lsan)
endforeach()
endif()
-
-add_dependencies(compiler-rt lsan)
diff --git a/lib/lsan/lsan_allocator.cc b/lib/lsan/lsan_allocator.cc
index a5220f1a34b1..1f6efc0f8d93 100644
--- a/lib/lsan/lsan_allocator.cc
+++ b/lib/lsan/lsan_allocator.cc
@@ -43,10 +43,17 @@ typedef SizeClassAllocator32<0, SANITIZER_MMAP_RANGE_SIZE,
PrimaryAllocator;
#else
static const uptr kMaxAllowedMallocSize = 8UL << 30;
-static const uptr kAllocatorSpace = 0x600000000000ULL;
-static const uptr kAllocatorSize = 0x40000000000ULL; // 4T.
-typedef SizeClassAllocator64<kAllocatorSpace, kAllocatorSize,
- sizeof(ChunkMetadata), DefaultSizeClassMap> PrimaryAllocator;
+
+struct AP64 { // Allocator64 parameters. Deliberately using a short name.
+ static const uptr kSpaceBeg = 0x600000000000ULL;
+ static const uptr kSpaceSize = 0x40000000000ULL; // 4T.
+ static const uptr kMetadataSize = sizeof(ChunkMetadata);
+ typedef DefaultSizeClassMap SizeClassMap;
+ typedef NoOpMapUnmapCallback MapUnmapCallback;
+ static const uptr kFlags = 0;
+};
+
+typedef SizeClassAllocator64<AP64> PrimaryAllocator;
#endif
typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache;
typedef LargeMmapAllocator<> SecondaryAllocator;
@@ -57,7 +64,9 @@ static Allocator allocator;
static THREADLOCAL AllocatorCache cache;
void InitializeAllocator() {
- allocator.InitLinkerInitialized(common_flags()->allocator_may_return_null);
+ allocator.InitLinkerInitialized(
+ common_flags()->allocator_may_return_null,
+ common_flags()->allocator_release_to_os_interval_ms);
}
void AllocatorThreadFinish() {
diff --git a/lib/lsan/lsan_common.cc b/lib/lsan/lsan_common.cc
index 888a25b206c8..b20941ef23f6 100644
--- a/lib/lsan/lsan_common.cc
+++ b/lib/lsan/lsan_common.cc
@@ -32,6 +32,7 @@ namespace __lsan {
// also to protect the global list of root regions.
BlockingMutex global_mutex(LINKER_INITIALIZED);
+__attribute__((tls_model("initial-exec")))
THREADLOCAL int disable_counter;
bool DisabledInThisThread() { return disable_counter > 0; }
void DisableInThisThread() { disable_counter++; }
@@ -449,6 +450,8 @@ static bool CheckForLeaks() {
Report(
"HINT: For debugging, try setting environment variable "
"LSAN_OPTIONS=verbosity=1:log_threads=1\n");
+ Report(
+ "HINT: LeakSanitizer does not work under ptrace (strace, gdb, etc)\n");
Die();
}
param.leak_report.ApplySuppressions();
diff --git a/lib/lsan/lsan_common_linux.cc b/lib/lsan/lsan_common_linux.cc
index 1f5430395b88..f6154d8b97d1 100644
--- a/lib/lsan/lsan_common_linux.cc
+++ b/lib/lsan/lsan_common_linux.cc
@@ -71,7 +71,7 @@ static int ProcessGlobalRegionsCallback(struct dl_phdr_info *info, size_t size,
GetAllocatorGlobalRange(&allocator_begin, &allocator_end);
if (begin <= allocator_begin && allocator_begin < end) {
CHECK_LE(allocator_begin, allocator_end);
- CHECK_LT(allocator_end, end);
+ CHECK_LE(allocator_end, end);
if (begin < allocator_begin)
ScanRangeForPointers(begin, allocator_begin, frontier, "GLOBAL",
kReachable);
diff --git a/lib/lsan/lsan_thread.cc b/lib/lsan/lsan_thread.cc
index 8bd6d90edc92..5dff4f748106 100644
--- a/lib/lsan/lsan_thread.cc
+++ b/lib/lsan/lsan_thread.cc
@@ -36,7 +36,7 @@ static const uptr kMaxThreads = 1 << 13;
static const uptr kThreadQuarantineSize = 64;
void InitializeThreadRegistry() {
- static char thread_registry_placeholder[sizeof(ThreadRegistry)] ALIGNED(64);
+ static ALIGNED(64) char thread_registry_placeholder[sizeof(ThreadRegistry)];
thread_registry = new(thread_registry_placeholder)
ThreadRegistry(CreateThreadContext, kMaxThreads, kThreadQuarantineSize);
}
diff --git a/lib/msan/CMakeLists.txt b/lib/msan/CMakeLists.txt
index e7f2877d1b2a..598ae54588c1 100644
--- a/lib/msan/CMakeLists.txt
+++ b/lib/msan/CMakeLists.txt
@@ -25,8 +25,7 @@ append_list_if(COMPILER_RT_HAS_FFREESTANDING_FLAG -ffreestanding MSAN_RTL_CFLAGS
set(MSAN_RUNTIME_LIBRARIES)
# Static runtime library.
-add_custom_target(msan)
-set_target_properties(msan PROPERTIES FOLDER "Compiler-RT Misc")
+add_compiler_rt_component(msan)
foreach(arch ${MSAN_SUPPORTED_ARCH})
add_compiler_rt_runtime(clang_rt.msan
@@ -61,7 +60,6 @@ foreach(arch ${MSAN_SUPPORTED_ARCH})
endforeach()
add_compiler_rt_resource_file(msan_blacklist msan_blacklist.txt msan)
-add_dependencies(compiler-rt msan)
if(COMPILER_RT_INCLUDE_TESTS)
add_subdirectory(tests)
diff --git a/lib/msan/msan.h b/lib/msan/msan.h
index 1f2ff59ca686..0709260eebe2 100644
--- a/lib/msan/msan.h
+++ b/lib/msan/msan.h
@@ -42,27 +42,43 @@ struct MappingDesc {
#if SANITIZER_LINUX && defined(__mips64)
-// Everything is above 0x00e000000000.
+// MIPS64 maps:
+// - 0x0000000000-0x0200000000: Program own segments
+// - 0xa200000000-0xc000000000: PIE program segments
+// - 0xe200000000-0xffffffffff: libraries segments.
const MappingDesc kMemoryLayout[] = {
- {0x000000000000ULL, 0x00a000000000ULL, MappingDesc::INVALID, "invalid"},
- {0x00a000000000ULL, 0x00c000000000ULL, MappingDesc::SHADOW, "shadow"},
- {0x00c000000000ULL, 0x00e000000000ULL, MappingDesc::ORIGIN, "origin"},
- {0x00e000000000ULL, 0x010000000000ULL, MappingDesc::APP, "app"}};
-
-#define MEM_TO_SHADOW(mem) (((uptr)(mem)) & ~0x4000000000ULL)
-#define SHADOW_TO_ORIGIN(shadow) (((uptr)(shadow)) + 0x002000000000)
+ {0x000000000000ULL, 0x000200000000ULL, MappingDesc::APP, "app-1"},
+ {0x000200000000ULL, 0x002200000000ULL, MappingDesc::INVALID, "invalid"},
+ {0x002200000000ULL, 0x004000000000ULL, MappingDesc::SHADOW, "shadow-2"},
+ {0x004000000000ULL, 0x004200000000ULL, MappingDesc::INVALID, "invalid"},
+ {0x004200000000ULL, 0x006000000000ULL, MappingDesc::ORIGIN, "origin-2"},
+ {0x006000000000ULL, 0x006200000000ULL, MappingDesc::INVALID, "invalid"},
+ {0x006200000000ULL, 0x008000000000ULL, MappingDesc::SHADOW, "shadow-3"},
+ {0x008000000000ULL, 0x008200000000ULL, MappingDesc::SHADOW, "shadow-1"},
+ {0x008200000000ULL, 0x00a000000000ULL, MappingDesc::ORIGIN, "origin-3"},
+ {0x00a000000000ULL, 0x00a200000000ULL, MappingDesc::ORIGIN, "origin-1"},
+ {0x00a200000000ULL, 0x00c000000000ULL, MappingDesc::APP, "app-2"},
+ {0x00c000000000ULL, 0x00e200000000ULL, MappingDesc::INVALID, "invalid"},
+ {0x00e200000000ULL, 0x00ffffffffffULL, MappingDesc::APP, "app-3"}};
+
+#define MEM_TO_SHADOW(mem) (((uptr)(mem)) ^ 0x8000000000ULL)
+#define SHADOW_TO_ORIGIN(shadow) (((uptr)(shadow)) + 0x2000000000ULL)
#elif SANITIZER_LINUX && defined(__aarch64__)
-// The mapping describes both 39-bits and 42-bits. AArch64 maps:
-// - 0x00000000000-0x00010000000: 39/42-bits program own segments
-// - 0x05500000000-0x05600000000: 39-bits PIE program segments
-// - 0x07f80000000-0x07fffffffff: 39-bits libraries segments
-// - 0x2aa00000000-0x2ab00000000: 42-bits PIE program segments
-// - 0x3ff00000000-0x3ffffffffff: 42-bits libraries segments
+// The mapping describes both 39-bits, 42-bits, and 48-bits VMA. AArch64
+// maps:
+// - 0x0000000000000-0x0000010000000: 39/42/48-bits program own segments
+// - 0x0005500000000-0x0005600000000: 39-bits PIE program segments
+// - 0x0007f80000000-0x0007fffffffff: 39-bits libraries segments
+// - 0x002aa00000000-0x002ab00000000: 42-bits PIE program segments
+// - 0x003ff00000000-0x003ffffffffff: 42-bits libraries segments
+// - 0x0aaaaa0000000-0x0aaab00000000: 48-bits PIE program segments
+// - 0xffff000000000-0x1000000000000: 48-bits libraries segments
// It is fragmented in multiples segments to increase the memory available
// on 42-bits (12.21% of total VMA available for 42-bits and 13.28 for
-// 39 bits).
+// 39 bits). The 48-bits segments only cover the usual PIE/default segments
+// plus some more segments (262144GB total, 0.39% total VMA).
const MappingDesc kMemoryLayout[] = {
{0x00000000000ULL, 0x01000000000ULL, MappingDesc::INVALID, "invalid"},
{0x01000000000ULL, 0x02000000000ULL, MappingDesc::SHADOW, "shadow-2"},
@@ -103,6 +119,42 @@ const MappingDesc kMemoryLayout[] = {
{0x3D000000000ULL, 0x3E000000000ULL, MappingDesc::SHADOW, "shadow-8"},
{0x3E000000000ULL, 0x3F000000000ULL, MappingDesc::ORIGIN, "origin-8"},
{0x3F000000000ULL, 0x40000000000ULL, MappingDesc::APP, "app-9"},
+ // The mappings below are used only for 48-bits VMA.
+ // TODO(unknown): 48-bit mapping ony covers the usual PIE, non-PIE
+ // segments and some more segments totalizing 262144GB of VMA (which cover
+ // only 0.32% of all 48-bit VMA). Memory avaliability can be increase by
+ // adding multiple application segments like 39 and 42 mapping.
+ {0x0040000000000ULL, 0x0041000000000ULL, MappingDesc::INVALID, "invalid"},
+ {0x0041000000000ULL, 0x0042000000000ULL, MappingDesc::APP, "app-10"},
+ {0x0042000000000ULL, 0x0047000000000ULL, MappingDesc::INVALID, "invalid"},
+ {0x0047000000000ULL, 0x0048000000000ULL, MappingDesc::SHADOW, "shadow-10"},
+ {0x0048000000000ULL, 0x0049000000000ULL, MappingDesc::ORIGIN, "origin-10"},
+ {0x0049000000000ULL, 0x0050000000000ULL, MappingDesc::INVALID, "invalid"},
+ {0x0050000000000ULL, 0x0051000000000ULL, MappingDesc::APP, "app-11"},
+ {0x0051000000000ULL, 0x0056000000000ULL, MappingDesc::INVALID, "invalid"},
+ {0x0056000000000ULL, 0x0057000000000ULL, MappingDesc::SHADOW, "shadow-11"},
+ {0x0057000000000ULL, 0x0058000000000ULL, MappingDesc::ORIGIN, "origin-11"},
+ {0x0058000000000ULL, 0x0059000000000ULL, MappingDesc::APP, "app-12"},
+ {0x0059000000000ULL, 0x005E000000000ULL, MappingDesc::INVALID, "invalid"},
+ {0x005E000000000ULL, 0x005F000000000ULL, MappingDesc::SHADOW, "shadow-12"},
+ {0x005F000000000ULL, 0x0060000000000ULL, MappingDesc::ORIGIN, "origin-12"},
+ {0x0060000000000ULL, 0x0061000000000ULL, MappingDesc::INVALID, "invalid"},
+ {0x0061000000000ULL, 0x0062000000000ULL, MappingDesc::APP, "app-13"},
+ {0x0062000000000ULL, 0x0067000000000ULL, MappingDesc::INVALID, "invalid"},
+ {0x0067000000000ULL, 0x0068000000000ULL, MappingDesc::SHADOW, "shadow-13"},
+ {0x0068000000000ULL, 0x0069000000000ULL, MappingDesc::ORIGIN, "origin-13"},
+ {0x0069000000000ULL, 0x0AAAAA0000000ULL, MappingDesc::INVALID, "invalid"},
+ {0x0AAAAA0000000ULL, 0x0AAAB00000000ULL, MappingDesc::APP, "app-14"},
+ {0x0AAAB00000000ULL, 0x0AACAA0000000ULL, MappingDesc::INVALID, "invalid"},
+ {0x0AACAA0000000ULL, 0x0AACB00000000ULL, MappingDesc::SHADOW, "shadow-14"},
+ {0x0AACB00000000ULL, 0x0AADAA0000000ULL, MappingDesc::INVALID, "invalid"},
+ {0x0AADAA0000000ULL, 0x0AADB00000000ULL, MappingDesc::ORIGIN, "origin-14"},
+ {0x0AADB00000000ULL, 0x0FF9F00000000ULL, MappingDesc::INVALID, "invalid"},
+ {0x0FF9F00000000ULL, 0x0FFA000000000ULL, MappingDesc::SHADOW, "shadow-15"},
+ {0x0FFA000000000ULL, 0x0FFAF00000000ULL, MappingDesc::INVALID, "invalid"},
+ {0x0FFAF00000000ULL, 0x0FFB000000000ULL, MappingDesc::ORIGIN, "origin-15"},
+ {0x0FFB000000000ULL, 0x0FFFF00000000ULL, MappingDesc::INVALID, "invalid"},
+ {0x0FFFF00000000ULL, 0x1000000000000ULL, MappingDesc::APP, "app-15"},
};
# define MEM_TO_SHADOW(mem) ((uptr)mem ^ 0x6000000000ULL)
# define SHADOW_TO_ORIGIN(shadow) (((uptr)(shadow)) + 0x1000000000ULL)
@@ -277,11 +329,20 @@ const int STACK_TRACE_TAG_POISON = StackTrace::TAG_CUSTOM + 1;
StackTrace::GetCurrentPc(), GET_CURRENT_FRAME(), \
common_flags()->fast_unwind_on_malloc)
+// For platforms which support slow unwinder only, we restrict the store context
+// size to 1, basically only storing the current pc. We do this because the slow
+// unwinder which is based on libunwind is not async signal safe and causes
+// random freezes in forking applications as well as in signal handlers.
#define GET_STORE_STACK_TRACE_PC_BP(pc, bp) \
BufferedStackTrace stack; \
- if (__msan_get_track_origins() > 1 && msan_inited) \
- GetStackTrace(&stack, flags()->store_context_size, pc, bp, \
- common_flags()->fast_unwind_on_malloc)
+ if (__msan_get_track_origins() > 1 && msan_inited) { \
+ if (!SANITIZER_CAN_FAST_UNWIND) \
+ GetStackTrace(&stack, Min(1, flags()->store_context_size), pc, bp, \
+ false); \
+ else \
+ GetStackTrace(&stack, flags()->store_context_size, pc, bp, \
+ common_flags()->fast_unwind_on_malloc); \
+ }
#define GET_FATAL_STACK_TRACE_PC_BP(pc, bp) \
BufferedStackTrace stack; \
diff --git a/lib/msan/msan_allocator.cc b/lib/msan/msan_allocator.cc
index b7d394729bfc..6c389f008cf7 100644
--- a/lib/msan/msan_allocator.cc
+++ b/lib/msan/msan_allocator.cc
@@ -33,9 +33,12 @@ struct MsanMapUnmapCallback {
// We are about to unmap a chunk of user memory.
// Mark the corresponding shadow memory as not needed.
- FlushUnneededShadowMemory(MEM_TO_SHADOW(p), size);
- if (__msan_get_track_origins())
- FlushUnneededShadowMemory(MEM_TO_ORIGIN(p), size);
+ uptr shadow_p = MEM_TO_SHADOW(p);
+ ReleaseMemoryPagesToOS(shadow_p, shadow_p + size);
+ if (__msan_get_track_origins()) {
+ uptr origin_p = MEM_TO_ORIGIN(p);
+ ReleaseMemoryPagesToOS(origin_p, origin_p + size);
+ }
}
};
@@ -56,23 +59,32 @@ struct MsanMapUnmapCallback {
#else
static const uptr kAllocatorSpace = 0x600000000000ULL;
#endif
- static const uptr kAllocatorSize = 0x80000000000; // 8T.
- static const uptr kMetadataSize = sizeof(Metadata);
static const uptr kMaxAllowedMallocSize = 8UL << 30;
- typedef SizeClassAllocator64<kAllocatorSpace, kAllocatorSize, kMetadataSize,
- DefaultSizeClassMap,
- MsanMapUnmapCallback> PrimaryAllocator;
+ struct AP64 { // Allocator64 parameters. Deliberately using a short name.
+ static const uptr kSpaceBeg = kAllocatorSpace;
+ static const uptr kSpaceSize = 0x40000000000; // 4T.
+ static const uptr kMetadataSize = sizeof(Metadata);
+ typedef DefaultSizeClassMap SizeClassMap;
+ typedef MsanMapUnmapCallback MapUnmapCallback;
+ static const uptr kFlags = 0;
+ };
+
+ typedef SizeClassAllocator64<AP64> PrimaryAllocator;
#elif defined(__powerpc64__)
- static const uptr kAllocatorSpace = 0x300000000000;
- static const uptr kAllocatorSize = 0x020000000000; // 2T
- static const uptr kMetadataSize = sizeof(Metadata);
static const uptr kMaxAllowedMallocSize = 2UL << 30; // 2G
- typedef SizeClassAllocator64<kAllocatorSpace, kAllocatorSize, kMetadataSize,
- DefaultSizeClassMap,
- MsanMapUnmapCallback> PrimaryAllocator;
+ struct AP64 { // Allocator64 parameters. Deliberately using a short name.
+ static const uptr kSpaceBeg = 0x300000000000;
+ static const uptr kSpaceSize = 0x020000000000; // 2T.
+ static const uptr kMetadataSize = sizeof(Metadata);
+ typedef DefaultSizeClassMap SizeClassMap;
+ typedef MsanMapUnmapCallback MapUnmapCallback;
+ static const uptr kFlags = 0;
+ };
+
+ typedef SizeClassAllocator64<AP64> PrimaryAllocator;
#elif defined(__aarch64__)
static const uptr kMaxAllowedMallocSize = 2UL << 30; // 2G
static const uptr kRegionSizeLog = 20;
@@ -94,7 +106,9 @@ static AllocatorCache fallback_allocator_cache;
static SpinMutex fallback_mutex;
void MsanAllocatorInit() {
- allocator.Init(common_flags()->allocator_may_return_null);
+ allocator.Init(
+ common_flags()->allocator_may_return_null,
+ common_flags()->allocator_release_to_os_interval_ms);
}
AllocatorCache *GetAllocatorCache(MsanThreadLocalMallocStorage *ms) {
@@ -112,7 +126,7 @@ static void *MsanAllocate(StackTrace *stack, uptr size, uptr alignment,
if (size > kMaxAllowedMallocSize) {
Report("WARNING: MemorySanitizer failed to allocate %p bytes\n",
(void *)size);
- return allocator.ReturnNullOrDie();
+ return allocator.ReturnNullOrDieOnBadRequest();
}
MsanThread *t = GetCurrentThread();
void *allocated;
@@ -170,7 +184,7 @@ void MsanDeallocate(StackTrace *stack, void *p) {
void *MsanCalloc(StackTrace *stack, uptr nmemb, uptr size) {
if (CallocShouldReturnNullDueToOverflow(size, nmemb))
- return allocator.ReturnNullOrDie();
+ return allocator.ReturnNullOrDieOnBadRequest();
return MsanReallocate(stack, nullptr, nmemb * size, sizeof(u64), true);
}
diff --git a/lib/msan/msan_interceptors.cc b/lib/msan/msan_interceptors.cc
index f23d3eeb3eda..6447bb1b270e 100644
--- a/lib/msan/msan_interceptors.cc
+++ b/lib/msan/msan_interceptors.cc
@@ -45,6 +45,8 @@ using __sanitizer::atomic_uintptr_t;
DECLARE_REAL(SIZE_T, strlen, const char *s)
DECLARE_REAL(SIZE_T, strnlen, const char *s, SIZE_T maxlen)
+DECLARE_REAL(void *, memcpy, void *dest, const void *src, uptr n)
+DECLARE_REAL(void *, memset, void *dest, int c, uptr n)
#if SANITIZER_FREEBSD
#define __errno_location __error
@@ -64,6 +66,23 @@ bool IsInInterceptorScope() {
return in_interceptor_scope;
}
+static uptr allocated_for_dlsym;
+static const uptr kDlsymAllocPoolSize = 1024;
+static uptr alloc_memory_for_dlsym[kDlsymAllocPoolSize];
+
+static bool IsInDlsymAllocPool(const void *ptr) {
+ uptr off = (uptr)ptr - (uptr)alloc_memory_for_dlsym;
+ return off < sizeof(alloc_memory_for_dlsym);
+}
+
+static void *AllocateFromLocalPool(uptr size_in_bytes) {
+ uptr size_in_words = RoundUpTo(size_in_bytes, kWordSize) / kWordSize;
+ void *mem = (void *)&alloc_memory_for_dlsym[allocated_for_dlsym];
+ allocated_for_dlsym += size_in_words;
+ CHECK_LT(allocated_for_dlsym, kDlsymAllocPoolSize);
+ return mem;
+}
+
#define ENSURE_MSAN_INITED() do { \
CHECK(!msan_init_is_running); \
if (!msan_inited) { \
@@ -135,10 +154,6 @@ INTERCEPTOR(SSIZE_T, readlink, const char *path, char *buf, SIZE_T bufsiz) {
return res;
}
-INTERCEPTOR(void *, memcpy, void *dest, const void *src, SIZE_T n) {
- return __msan_memcpy(dest, src, n);
-}
-
INTERCEPTOR(void *, mempcpy, void *dest, const void *src, SIZE_T n) {
return (char *)__msan_memcpy(dest, src, n) + n;
}
@@ -153,14 +168,6 @@ INTERCEPTOR(void *, memccpy, void *dest, const void *src, int c, SIZE_T n) {
return res;
}
-INTERCEPTOR(void *, memmove, void *dest, const void *src, SIZE_T n) {
- return __msan_memmove(dest, src, n);
-}
-
-INTERCEPTOR(void *, memset, void *s, int c, SIZE_T n) {
- return __msan_memset(s, c, n);
-}
-
INTERCEPTOR(void *, bcopy, const void *src, void *dest, SIZE_T n) {
return __msan_memmove(dest, src, n);
}
@@ -227,14 +234,14 @@ INTERCEPTOR(void *, pvalloc, SIZE_T size) {
INTERCEPTOR(void, free, void *ptr) {
GET_MALLOC_STACK_TRACE;
- if (!ptr) return;
+ if (!ptr || UNLIKELY(IsInDlsymAllocPool(ptr))) return;
MsanDeallocate(&stack, ptr);
}
#if !SANITIZER_FREEBSD
INTERCEPTOR(void, cfree, void *ptr) {
GET_MALLOC_STACK_TRACE;
- if (!ptr) return;
+ if (!ptr || UNLIKELY(IsInDlsymAllocPool(ptr))) return;
MsanDeallocate(&stack, ptr);
}
#define MSAN_MAYBE_INTERCEPT_CFREE INTERCEPT_FUNCTION(cfree)
@@ -907,27 +914,35 @@ INTERCEPTOR(int, epoll_pwait, int epfd, void *events, int maxevents,
INTERCEPTOR(void *, calloc, SIZE_T nmemb, SIZE_T size) {
GET_MALLOC_STACK_TRACE;
- if (UNLIKELY(!msan_inited)) {
+ if (UNLIKELY(!msan_inited))
// Hack: dlsym calls calloc before REAL(calloc) is retrieved from dlsym.
- const SIZE_T kCallocPoolSize = 1024;
- static uptr calloc_memory_for_dlsym[kCallocPoolSize];
- static SIZE_T allocated;
- SIZE_T size_in_words = ((nmemb * size) + kWordSize - 1) / kWordSize;
- void *mem = (void*)&calloc_memory_for_dlsym[allocated];
- allocated += size_in_words;
- CHECK(allocated < kCallocPoolSize);
- return mem;
- }
+ return AllocateFromLocalPool(nmemb * size);
return MsanCalloc(&stack, nmemb, size);
}
INTERCEPTOR(void *, realloc, void *ptr, SIZE_T size) {
GET_MALLOC_STACK_TRACE;
+ if (UNLIKELY(IsInDlsymAllocPool(ptr))) {
+ uptr offset = (uptr)ptr - (uptr)alloc_memory_for_dlsym;
+ uptr copy_size = Min(size, kDlsymAllocPoolSize - offset);
+ void *new_ptr;
+ if (UNLIKELY(!msan_inited)) {
+ new_ptr = AllocateFromLocalPool(copy_size);
+ } else {
+ copy_size = size;
+ new_ptr = MsanReallocate(&stack, nullptr, copy_size, sizeof(u64), false);
+ }
+ internal_memcpy(new_ptr, ptr, copy_size);
+ return new_ptr;
+ }
return MsanReallocate(&stack, ptr, size, sizeof(u64), false);
}
INTERCEPTOR(void *, malloc, SIZE_T size) {
GET_MALLOC_STACK_TRACE;
+ if (UNLIKELY(!msan_inited))
+ // Hack: dlsym calls malloc before REAL(malloc) is retrieved from dlsym.
+ return AllocateFromLocalPool(size);
return MsanReallocate(&stack, nullptr, size, sizeof(u64), false);
}
@@ -1329,11 +1344,23 @@ int OnExit() {
*begin = *end = 0; \
}
+#define COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, block, c, size) \
+ { \
+ (void)ctx; \
+ return __msan_memset(block, c, size); \
+ }
+#define COMMON_INTERCEPTOR_MEMMOVE_IMPL(ctx, to, from, size) \
+ { \
+ (void)ctx; \
+ return __msan_memmove(to, from, size); \
+ }
+#define COMMON_INTERCEPTOR_MEMCPY_IMPL(ctx, to, from, size) \
+ { \
+ (void)ctx; \
+ return __msan_memcpy(to, from, size); \
+ }
+
#include "sanitizer_common/sanitizer_platform_interceptors.h"
-// Msan needs custom handling of these:
-#undef SANITIZER_INTERCEPT_MEMSET
-#undef SANITIZER_INTERCEPT_MEMMOVE
-#undef SANITIZER_INTERCEPT_MEMCPY
#include "sanitizer_common/sanitizer_common_interceptors.inc"
#define COMMON_SYSCALL_PRE_READ_RANGE(p, s) CHECK_UNPOISONED(p, s)
@@ -1489,11 +1516,8 @@ void InitializeInterceptors() {
INTERCEPT_FUNCTION(fread);
MSAN_MAYBE_INTERCEPT_FREAD_UNLOCKED;
INTERCEPT_FUNCTION(readlink);
- INTERCEPT_FUNCTION(memcpy);
INTERCEPT_FUNCTION(memccpy);
INTERCEPT_FUNCTION(mempcpy);
- INTERCEPT_FUNCTION(memset);
- INTERCEPT_FUNCTION(memmove);
INTERCEPT_FUNCTION(bcopy);
INTERCEPT_FUNCTION(wmemset);
INTERCEPT_FUNCTION(wmemcpy);
diff --git a/lib/msan/msan_interface_internal.h b/lib/msan/msan_interface_internal.h
index c1e02ce72bf4..c6990db243c1 100644
--- a/lib/msan/msan_interface_internal.h
+++ b/lib/msan/msan_interface_internal.h
@@ -37,6 +37,16 @@ void __msan_warning();
SANITIZER_INTERFACE_ATTRIBUTE __attribute__((noreturn))
void __msan_warning_noreturn();
+using __sanitizer::uptr;
+using __sanitizer::sptr;
+using __sanitizer::uu64;
+using __sanitizer::uu32;
+using __sanitizer::uu16;
+using __sanitizer::u64;
+using __sanitizer::u32;
+using __sanitizer::u16;
+using __sanitizer::u8;
+
SANITIZER_INTERFACE_ATTRIBUTE
void __msan_maybe_warning_1(u8 s, u32 o);
SANITIZER_INTERFACE_ATTRIBUTE
diff --git a/lib/msan/msan_linux.cc b/lib/msan/msan_linux.cc
index d6a95889ad0f..0a687f620c94 100644
--- a/lib/msan/msan_linux.cc
+++ b/lib/msan/msan_linux.cc
@@ -66,7 +66,8 @@ static bool ProtectMemoryRange(uptr beg, uptr size, const char *name) {
}
if ((uptr)addr != beg) {
uptr end = beg + size - 1;
- Printf("FATAL: Cannot protect memory range %p - %p.\n", beg, end);
+ Printf("FATAL: Cannot protect memory range %p - %p (%s).\n", beg, end,
+ name);
return false;
}
}
diff --git a/lib/profile/CMakeLists.txt b/lib/profile/CMakeLists.txt
index ccf79d7e7267..006285b34943 100644
--- a/lib/profile/CMakeLists.txt
+++ b/lib/profile/CMakeLists.txt
@@ -38,8 +38,7 @@ int main() {
" COMPILER_RT_TARGET_HAS_FCNTL_LCK)
-add_custom_target(profile)
-set_target_properties(profile PROPERTIES FOLDER "Compiler-RT Misc")
+add_compiler_rt_component(profile)
set(PROFILE_SOURCES
GCDAProfiling.c
@@ -99,5 +98,3 @@ else()
SOURCES ${PROFILE_SOURCES}
PARENT_TARGET profile)
endif()
-
-add_dependencies(compiler-rt profile)
diff --git a/lib/profile/GCDAProfiling.c b/lib/profile/GCDAProfiling.c
index 2756084f5fd3..138af6ec4033 100644
--- a/lib/profile/GCDAProfiling.c
+++ b/lib/profile/GCDAProfiling.c
@@ -20,7 +20,6 @@
|*
\*===----------------------------------------------------------------------===*/
-#include "InstrProfilingInternal.h"
#include "InstrProfilingPort.h"
#include "InstrProfilingUtil.h"
@@ -35,6 +34,9 @@
#else
#include <sys/mman.h>
#include <sys/file.h>
+#ifndef MAP_FILE
+#define MAP_FILE 0
+#endif
#endif
#if defined(__FreeBSD__) && defined(__i386__)
diff --git a/lib/profile/InstrProfData.inc b/lib/profile/InstrProfData.inc
index 93d14ac4f6f9..f7c22d10763c 100644
--- a/lib/profile/InstrProfData.inc
+++ b/lib/profile/InstrProfData.inc
@@ -72,7 +72,7 @@
#endif
INSTR_PROF_DATA(const uint64_t, llvm::Type::getInt64Ty(Ctx), NameRef, \
ConstantInt::get(llvm::Type::getInt64Ty(Ctx), \
- IndexedInstrProf::ComputeHash(getPGOFuncNameVarInitializer(Inc->getName()))))
+ IndexedInstrProf::ComputeHash(getPGOFuncNameVarInitializer(Inc->getName()))))
INSTR_PROF_DATA(const uint64_t, llvm::Type::getInt64Ty(Ctx), FuncHash, \
ConstantInt::get(llvm::Type::getInt64Ty(Ctx), \
Inc->getHash()->getZExtValue()))
@@ -204,7 +204,7 @@ COVMAP_FUNC_RECORD(const uint32_t, llvm::Type::getInt32Ty(Ctx), NameSize, \
#else
COVMAP_FUNC_RECORD(const int64_t, llvm::Type::getInt64Ty(Ctx), NameRef, \
llvm::ConstantInt::get(llvm::Type::getInt64Ty(Ctx), \
- llvm::IndexedInstrProf::ComputeHash(NameValue)))
+ llvm::IndexedInstrProf::ComputeHash(NameValue)))
#endif
COVMAP_FUNC_RECORD(const uint32_t, llvm::Type::getInt32Ty(Ctx), DataSize, \
llvm::ConstantInt::get(llvm::Type::getInt32Ty(Ctx),\
@@ -603,7 +603,12 @@ serializeValueProfDataFrom(ValueProfRecordClosure *Closure,
#define VARIANT_MASKS_ALL 0xff00000000000000ULL
#define GET_VERSION(V) ((V) & ~VARIANT_MASKS_ALL)
#define VARIANT_MASK_IR_PROF (0x1ULL << 56)
-#define IR_LEVEL_PROF_VERSION_VAR __llvm_profile_raw_version
+#define INSTR_PROF_RAW_VERSION_VAR __llvm_profile_raw_version
+#define INSTR_PROF_PROFILE_RUNTIME_VAR __llvm_profile_runtime
+
+/* The variable that holds the name of the profile data
+ * specified via command line. */
+#define INSTR_PROF_PROFILE_NAME_VAR __llvm_profile_filename
/* Runtime section names and name strings. */
#define INSTR_PROF_DATA_SECT_NAME __llvm_prf_data
diff --git a/lib/profile/InstrProfiling.c b/lib/profile/InstrProfiling.c
index c763a44233a0..6828a3d27f34 100644
--- a/lib/profile/InstrProfiling.c
+++ b/lib/profile/InstrProfiling.c
@@ -16,15 +16,26 @@
#define INSTR_PROF_VALUE_PROF_DATA
#include "InstrProfData.inc"
-COMPILER_RT_VISIBILITY char *(*GetEnvHook)(const char *) = 0;
-COMPILER_RT_WEAK uint64_t __llvm_profile_raw_version = INSTR_PROF_RAW_VERSION;
+COMPILER_RT_WEAK uint64_t INSTR_PROF_RAW_VERSION_VAR = INSTR_PROF_RAW_VERSION;
+
+COMPILER_RT_WEAK char INSTR_PROF_PROFILE_NAME_VAR[1] = {0};
COMPILER_RT_VISIBILITY uint64_t __llvm_profile_get_magic(void) {
return sizeof(void *) == sizeof(uint64_t) ? (INSTR_PROF_RAW_MAGIC_64)
: (INSTR_PROF_RAW_MAGIC_32);
}
+static unsigned ProfileDumped = 0;
+
+COMPILER_RT_VISIBILITY unsigned lprofProfileDumped() {
+ return ProfileDumped;
+}
+
+COMPILER_RT_VISIBILITY void lprofSetProfileDumped() {
+ ProfileDumped = 1;
+}
+
/* Return the number of bytes needed to add to SizeInBytes to make it
* the result a multiple of 8.
*/
@@ -66,4 +77,5 @@ COMPILER_RT_VISIBILITY void __llvm_profile_reset_counters(void) {
}
}
}
+ ProfileDumped = 0;
}
diff --git a/lib/profile/InstrProfiling.h b/lib/profile/InstrProfiling.h
index b23bed8ea3a8..945f1c4ac38d 100644
--- a/lib/profile/InstrProfiling.h
+++ b/lib/profile/InstrProfiling.h
@@ -112,35 +112,43 @@ void INSTR_PROF_VALUE_PROF_FUNC(
* Writes to the file with the last name given to \a *
* __llvm_profile_set_filename(),
* or if it hasn't been called, the \c LLVM_PROFILE_FILE environment variable,
- * or if that's not set, the last name given to
- * \a __llvm_profile_override_default_filename(), or if that's not set,
- * \c "default.profraw".
+ * or if that's not set, the last name set to INSTR_PROF_PROFILE_NAME_VAR,
+ * or if that's not set, \c "default.profraw".
*/
int __llvm_profile_write_file(void);
/*!
- * \brief Set the filename for writing instrumentation data.
+ * \brief this is a wrapper interface to \c __llvm_profile_write_file.
+ * After this interface is invoked, a arleady dumped flag will be set
+ * so that profile won't be dumped again during program exit.
+ * Invocation of interface __llvm_profile_reset_counters will clear
+ * the flag. This interface is designed to be used to collect profile
+ * data from user selected hot regions. The use model is
+ * __llvm_profile_reset_counters();
+ * ... hot region 1
+ * __llvm_profile_dump();
+ * .. some other code
+ * __llvm_profile_reset_counters();
+ * ... hot region 2
+ * __llvm_profile_dump();
*
- * Sets the filename to be used for subsequent calls to
- * \a __llvm_profile_write_file().
- *
- * \c Name is not copied, so it must remain valid. Passing NULL resets the
- * filename logic to the default behaviour.
+ * It is expected that on-line profile merging is on with \c %m specifier
+ * used in profile filename . If merging is not turned on, user is expected
+ * to invoke __llvm_profile_set_filename to specify different profile names
+ * for different regions before dumping to avoid profile write clobbering.
*/
-void __llvm_profile_set_filename(const char *Name);
+int __llvm_profile_dump(void);
/*!
- * \brief Set the filename for writing instrumentation data, unless the
- * \c LLVM_PROFILE_FILE environment variable was set.
+ * \brief Set the filename for writing instrumentation data.
*
- * Unless overridden, sets the filename to be used for subsequent calls to
+ * Sets the filename to be used for subsequent calls to
* \a __llvm_profile_write_file().
*
* \c Name is not copied, so it must remain valid. Passing NULL resets the
- * filename logic to the default behaviour (unless the \c LLVM_PROFILE_FILE
- * was set in which case it has no effect).
+ * filename logic to the default behaviour.
*/
-void __llvm_profile_override_default_filename(const char *Name);
+void __llvm_profile_set_filename(const char *Name);
/*! \brief Register to write instrumentation data to file at exit. */
int __llvm_profile_register_write_file_atexit(void);
@@ -148,6 +156,16 @@ int __llvm_profile_register_write_file_atexit(void);
/*! \brief Initialize file handling. */
void __llvm_profile_initialize_file(void);
+/*!
+ * \brief Return path prefix (excluding the base filename) of the profile data.
+ * This is useful for users using \c -fprofile-generate=./path_prefix who do
+ * not care about the default raw profile name. It is also useful to collect
+ * more than more profile data files dumped in the same directory (Online
+ * merge mode is turned on for instrumented programs with shared libs).
+ * Side-effect: this API call will invoke malloc with dynamic memory allocation.
+ */
+const char *__llvm_profile_get_path_prefix();
+
/*! \brief Get the magic token for the file format. */
uint64_t __llvm_profile_get_magic(void);
@@ -166,8 +184,8 @@ uint64_t __llvm_profile_get_data_size(const __llvm_profile_data *Begin,
* Note that this variable's visibility needs to be hidden so that the
* definition of this variable in an instrumented shared library won't
* affect runtime initialization decision of the main program.
- */
-COMPILER_RT_VISIBILITY extern int __llvm_profile_runtime;
+ * __llvm_profile_profile_runtime. */
+COMPILER_RT_VISIBILITY extern int INSTR_PROF_PROFILE_RUNTIME_VAR;
/*!
* This variable is defined in InstrProfiling.c. Its main purpose is to
@@ -179,6 +197,13 @@ COMPILER_RT_VISIBILITY extern int __llvm_profile_runtime;
* main program are expected to be instrumented in the same way), there is
* no need for this variable to be hidden.
*/
-extern uint64_t __llvm_profile_raw_version;
+extern uint64_t INSTR_PROF_RAW_VERSION_VAR; /* __llvm_profile_raw_version */
+
+/*!
+ * This variable is a weak symbol defined in InstrProfiling.c. It allows
+ * compiler instrumentation to provide overriding definition with value
+ * from compiler command line. This variable has default visibility.
+ */
+extern char INSTR_PROF_PROFILE_NAME_VAR[1]; /* __llvm_profile_filename. */
#endif /* PROFILE_INSTRPROFILING_H_ */
diff --git a/lib/profile/InstrProfilingFile.c b/lib/profile/InstrProfilingFile.c
index 32762d14eef2..f82080c98aac 100644
--- a/lib/profile/InstrProfilingFile.c
+++ b/lib/profile/InstrProfilingFile.c
@@ -59,10 +59,14 @@ static const char *getPNSStr(ProfileNameSpecifier PNS) {
}
#define MAX_PID_SIZE 16
-/* Data structure holding the result of parsed filename pattern. */
+/* Data structure holding the result of parsed filename pattern. */
typedef struct lprofFilename {
/* File name string possibly with %p or %h specifiers. */
const char *FilenamePat;
+ /* A flag indicating if FilenamePat's memory is allocated
+ * by runtime. */
+ unsigned OwnsFilenamePat;
+ const char *ProfilePathPrefix;
char PidChars[MAX_PID_SIZE];
char Hostname[COMPILER_RT_MAX_HOSTLEN];
unsigned NumPids;
@@ -78,7 +82,8 @@ typedef struct lprofFilename {
ProfileNameSpecifier PNS;
} lprofFilename;
-lprofFilename lprofCurFilename = {0, {0}, {0}, 0, 0, 0, PNS_unknown};
+COMPILER_RT_WEAK lprofFilename lprofCurFilename = {0, 0, 0, {0}, {0},
+ 0, 0, 0, PNS_unknown};
int getpid(void);
static int getCurFilenameLength();
@@ -229,16 +234,17 @@ static void truncateCurrentFile(void) {
return;
/* Create the directory holding the file, if needed. */
- if (strchr(Filename, DIR_SEPARATOR)
-#if defined(DIR_SEPARATOR_2)
- || strchr(Filename, DIR_SEPARATOR_2)
-#endif
- ) {
+ if (lprofFindFirstDirSeparator(Filename)) {
char *Copy = (char *)COMPILER_RT_ALLOCA(Length + 1);
strncpy(Copy, Filename, Length + 1);
__llvm_profile_recursive_mkdir(Copy);
}
+ /* By pass file truncation to allow online raw profile
+ * merging. */
+ if (lprofCurFilename.MergePoolSize)
+ return;
+
/* Truncate the file. Later we'll reopen and append. */
File = fopen(Filename, "w");
if (!File)
@@ -248,6 +254,9 @@ static void truncateCurrentFile(void) {
static const char *DefaultProfileName = "default.profraw";
static void resetFilenameToDefault(void) {
+ if (lprofCurFilename.FilenamePat && lprofCurFilename.OwnsFilenamePat) {
+ free((void *)lprofCurFilename.FilenamePat);
+ }
memset(&lprofCurFilename, 0, sizeof(lprofCurFilename));
lprofCurFilename.FilenamePat = DefaultProfileName;
lprofCurFilename.PNS = PNS_default;
@@ -263,31 +272,46 @@ static int containsMergeSpecifier(const char *FilenamePat, int I) {
/* Parses the pattern string \p FilenamePat and stores the result to
* lprofcurFilename structure. */
-static int parseFilenamePattern(const char *FilenamePat) {
+static int parseFilenamePattern(const char *FilenamePat,
+ unsigned CopyFilenamePat) {
int NumPids = 0, NumHosts = 0, I;
char *PidChars = &lprofCurFilename.PidChars[0];
char *Hostname = &lprofCurFilename.Hostname[0];
int MergingEnabled = 0;
- lprofCurFilename.FilenamePat = FilenamePat;
+ /* Clean up cached prefix. */
+ if (lprofCurFilename.ProfilePathPrefix)
+ free((void *)lprofCurFilename.ProfilePathPrefix);
+ memset(&lprofCurFilename, 0, sizeof(lprofCurFilename));
+
+ if (lprofCurFilename.FilenamePat && lprofCurFilename.OwnsFilenamePat) {
+ free((void *)lprofCurFilename.FilenamePat);
+ }
+
+ if (!CopyFilenamePat)
+ lprofCurFilename.FilenamePat = FilenamePat;
+ else {
+ lprofCurFilename.FilenamePat = strdup(FilenamePat);
+ lprofCurFilename.OwnsFilenamePat = 1;
+ }
/* Check the filename for "%p", which indicates a pid-substitution. */
for (I = 0; FilenamePat[I]; ++I)
if (FilenamePat[I] == '%') {
if (FilenamePat[++I] == 'p') {
if (!NumPids++) {
if (snprintf(PidChars, MAX_PID_SIZE, "%d", getpid()) <= 0) {
- PROF_WARN(
- "Unable to parse filename pattern %s. Using the default name.",
- FilenamePat);
+ PROF_WARN("Unable to get pid for filename pattern %s. Using the "
+ "default name.",
+ FilenamePat);
return -1;
}
}
} else if (FilenamePat[I] == 'h') {
if (!NumHosts++)
if (COMPILER_RT_GETHOSTNAME(Hostname, COMPILER_RT_MAX_HOSTLEN)) {
- PROF_WARN(
- "Unable to parse filename pattern %s. Using the default name.",
- FilenamePat);
+ PROF_WARN("Unable to get hostname for filename pattern %s. Using "
+ "the default name.",
+ FilenamePat);
return -1;
}
} else if (containsMergeSpecifier(FilenamePat, I)) {
@@ -312,7 +336,8 @@ static int parseFilenamePattern(const char *FilenamePat) {
}
static void parseAndSetFilename(const char *FilenamePat,
- ProfileNameSpecifier PNS) {
+ ProfileNameSpecifier PNS,
+ unsigned CopyFilenamePat) {
const char *OldFilenamePat = lprofCurFilename.FilenamePat;
ProfileNameSpecifier OldPNS = lprofCurFilename.PNS;
@@ -323,33 +348,28 @@ static void parseAndSetFilename(const char *FilenamePat,
if (!FilenamePat)
FilenamePat = DefaultProfileName;
- /* When -fprofile-instr-generate=<path> is specified on the
- * command line, each module will be instrumented with runtime
- * init call to __llvm_profile_init function which calls
- * __llvm_profile_override_default_filename. In most of the cases,
- * the path will be identical, so bypass the parsing completely.
- */
if (OldFilenamePat && !strcmp(OldFilenamePat, FilenamePat)) {
lprofCurFilename.PNS = PNS;
return;
}
/* When PNS >= OldPNS, the last one wins. */
- if (!FilenamePat || parseFilenamePattern(FilenamePat))
+ if (!FilenamePat || parseFilenamePattern(FilenamePat, CopyFilenamePat))
resetFilenameToDefault();
lprofCurFilename.PNS = PNS;
if (!OldFilenamePat) {
- PROF_NOTE("Set profile file path to \"%s\" via %s.\n",
- lprofCurFilename.FilenamePat, getPNSStr(PNS));
+ if (getenv("LLVM_PROFILE_VERBOSE"))
+ PROF_NOTE("Set profile file path to \"%s\" via %s.\n",
+ lprofCurFilename.FilenamePat, getPNSStr(PNS));
} else {
- PROF_NOTE("Override old profile path \"%s\" via %s to \"%s\" via %s.\n",
- OldFilenamePat, getPNSStr(OldPNS), lprofCurFilename.FilenamePat,
- getPNSStr(PNS));
+ if (getenv("LLVM_PROFILE_VERBOSE"))
+ PROF_NOTE("Override old profile path \"%s\" via %s to \"%s\" via %s.\n",
+ OldFilenamePat, getPNSStr(OldPNS), lprofCurFilename.FilenamePat,
+ getPNSStr(PNS));
}
- if (!lprofCurFilename.MergePoolSize)
- truncateCurrentFile();
+ truncateCurrentFile();
}
/* Return buffer length that is required to store the current profile
@@ -429,16 +449,61 @@ static const char *getFilenamePatFromEnv(void) {
return Filename;
}
+COMPILER_RT_VISIBILITY
+const char *__llvm_profile_get_path_prefix(void) {
+ int Length;
+ char *FilenameBuf, *Prefix;
+ const char *Filename, *PrefixEnd;
+
+ if (lprofCurFilename.ProfilePathPrefix)
+ return lprofCurFilename.ProfilePathPrefix;
+
+ Length = getCurFilenameLength();
+ FilenameBuf = (char *)COMPILER_RT_ALLOCA(Length + 1);
+ Filename = getCurFilename(FilenameBuf);
+ if (!Filename)
+ return "\0";
+
+ PrefixEnd = lprofFindLastDirSeparator(Filename);
+ if (!PrefixEnd)
+ return "\0";
+
+ Length = PrefixEnd - Filename + 1;
+ Prefix = (char *)malloc(Length + 1);
+ if (!Prefix) {
+ PROF_ERR("Failed to %s\n", "allocate memory.");
+ return "\0";
+ }
+ memcpy(Prefix, Filename, Length);
+ Prefix[Length] = '\0';
+ lprofCurFilename.ProfilePathPrefix = Prefix;
+ return Prefix;
+}
+
/* This method is invoked by the runtime initialization hook
* InstrProfilingRuntime.o if it is linked in. Both user specified
* profile path via -fprofile-instr-generate= and LLVM_PROFILE_FILE
* environment variable can override this default value. */
COMPILER_RT_VISIBILITY
void __llvm_profile_initialize_file(void) {
- const char *FilenamePat;
+ const char *EnvFilenamePat;
+ const char *SelectedPat = NULL;
+ ProfileNameSpecifier PNS = PNS_unknown;
+ int hasCommandLineOverrider = (INSTR_PROF_PROFILE_NAME_VAR[0] != 0);
+
+ EnvFilenamePat = getFilenamePatFromEnv();
+ if (EnvFilenamePat) {
+ SelectedPat = EnvFilenamePat;
+ PNS = PNS_environment;
+ } else if (hasCommandLineOverrider) {
+ SelectedPat = INSTR_PROF_PROFILE_NAME_VAR;
+ PNS = PNS_command_line;
+ } else {
+ SelectedPat = NULL;
+ PNS = PNS_default;
+ }
- FilenamePat = getFilenamePatFromEnv();
- parseAndSetFilename(FilenamePat, FilenamePat ? PNS_environment : PNS_default);
+ parseAndSetFilename(SelectedPat, PNS, 0);
}
/* This API is directly called by the user application code. It has the
@@ -447,18 +512,7 @@ void __llvm_profile_initialize_file(void) {
*/
COMPILER_RT_VISIBILITY
void __llvm_profile_set_filename(const char *FilenamePat) {
- parseAndSetFilename(FilenamePat, PNS_runtime_api);
-}
-
-/*
- * This API is invoked by the global initializers emitted by Clang/LLVM when
- * -fprofile-instr-generate=<..> is specified (vs -fprofile-instr-generate
- * without an argument). This option has lower precedence than the
- * LLVM_PROFILE_FILE environment variable.
- */
-COMPILER_RT_VISIBILITY
-void __llvm_profile_override_default_filename(const char *FilenamePat) {
- parseAndSetFilename(FilenamePat, PNS_command_line);
+ parseAndSetFilename(FilenamePat, PNS_runtime_api, 1);
}
/* The public API for writing profile data into the file with name
@@ -471,6 +525,12 @@ int __llvm_profile_write_file(void) {
const char *Filename;
char *FilenameBuf;
+ if (lprofProfileDumped()) {
+ PROF_NOTE("Profile data not written to file: %s.\n",
+ "already written");
+ return 0;
+ }
+
Length = getCurFilenameLength();
FilenameBuf = (char *)COMPILER_RT_ALLOCA(Length + 1);
Filename = getCurFilename(FilenameBuf);
@@ -497,6 +557,18 @@ int __llvm_profile_write_file(void) {
return rc;
}
+COMPILER_RT_VISIBILITY
+int __llvm_profile_dump(void) {
+ if (!doMerging())
+ PROF_WARN("Later invocation of __llvm_profile_dump can lead to clobbering "
+ " of previously dumped profile data : %s. Either use %%m "
+ "in profile name or change profile name before dumping.\n",
+ "online profile merging is not on");
+ int rc = __llvm_profile_write_file();
+ lprofSetProfileDumped();
+ return rc;
+}
+
static void writeFileWithoutReturn(void) { __llvm_profile_write_file(); }
COMPILER_RT_VISIBILITY
diff --git a/lib/profile/InstrProfilingInternal.h b/lib/profile/InstrProfilingInternal.h
index 44f308206ca8..c73b29101302 100644
--- a/lib/profile/InstrProfilingInternal.h
+++ b/lib/profile/InstrProfilingInternal.h
@@ -163,21 +163,13 @@ void lprofSetupValueProfiler();
* to dump merged profile data into its own profile file. */
uint64_t lprofGetLoadModuleSignature();
-/* GCOV_PREFIX and GCOV_PREFIX_STRIP support */
-/* Return the path prefix specified by GCOV_PREFIX environment variable.
- * If GCOV_PREFIX_STRIP is also specified, the strip level (integer value)
- * is returned via \c *PrefixStrip. The prefix length is stored in *PrefixLen.
+/*
+ * Return non zero value if the profile data has already been
+ * dumped to the file.
*/
-const char *lprofGetPathPrefix(int *PrefixStrip, size_t *PrefixLen);
-/* Apply the path prefix specified in \c Prefix to path string in \c PathStr,
- * and store the result to buffer pointed to by \c Buffer. If \c PrefixStrip
- * is not zero, path prefixes are stripped from \c PathStr (the level of
- * stripping is specified by \c PrefixStrip) before \c Prefix is added.
- */
-void lprofApplyPathPrefix(char *Dest, const char *PathStr, const char *Prefix,
- size_t PrefixLen, int PrefixStrip);
+unsigned lprofProfileDumped();
+void lprofSetProfileDumped();
-COMPILER_RT_VISIBILITY extern char *(*GetEnvHook)(const char *);
COMPILER_RT_VISIBILITY extern void (*FreeHook)(void *);
COMPILER_RT_VISIBILITY extern uint8_t *DynamicBufferIOBuffer;
COMPILER_RT_VISIBILITY extern uint32_t VPBufferSize;
diff --git a/lib/profile/InstrProfilingPort.h b/lib/profile/InstrProfilingPort.h
index c947153e2517..5789351956b9 100644
--- a/lib/profile/InstrProfilingPort.h
+++ b/lib/profile/InstrProfilingPort.h
@@ -40,14 +40,14 @@
#endif
#define COMPILER_RT_MAX_HOSTLEN 128
-#ifdef _MSC_VER
-#define COMPILER_RT_GETHOSTNAME(Name, Len) gethostname(Name, Len)
-#elif defined(__ORBIS__)
+#ifdef __ORBIS__
#define COMPILER_RT_GETHOSTNAME(Name, Len) ((void)(Name), (void)(Len), (-1))
#else
#define COMPILER_RT_GETHOSTNAME(Name, Len) lprofGetHostName(Name, Len)
+#ifndef _MSC_VER
#define COMPILER_RT_HAS_UNAME 1
#endif
+#endif
#if COMPILER_RT_HAS_ATOMICS == 1
#ifdef _MSC_VER
diff --git a/lib/profile/InstrProfilingRuntime.cc b/lib/profile/InstrProfilingRuntime.cc
index 12ad9f1573f4..eb83074983ba 100644
--- a/lib/profile/InstrProfilingRuntime.cc
+++ b/lib/profile/InstrProfilingRuntime.cc
@@ -11,7 +11,8 @@ extern "C" {
#include "InstrProfiling.h"
-COMPILER_RT_VISIBILITY int __llvm_profile_runtime;
+/* int __llvm_profile_runtime */
+COMPILER_RT_VISIBILITY int INSTR_PROF_PROFILE_RUNTIME_VAR;
}
namespace {
diff --git a/lib/profile/InstrProfilingUtil.c b/lib/profile/InstrProfilingUtil.c
index 5c66933bc1af..321c7192cc60 100644
--- a/lib/profile/InstrProfilingUtil.c
+++ b/lib/profile/InstrProfilingUtil.c
@@ -35,7 +35,7 @@ void __llvm_profile_recursive_mkdir(char *path) {
for (i = 1; path[i] != '\0'; ++i) {
char save = path[i];
- if (!(path[i] == '/' || path[i] == '\\'))
+ if (!IS_DIR_SEPARATOR(path[i]))
continue;
path[i] = '\0';
#ifdef _WIN32
@@ -66,7 +66,19 @@ void *lprofPtrFetchAdd(void **Mem, long ByteIncr) {
#endif
-#ifdef COMPILER_RT_HAS_UNAME
+#ifdef _MSC_VER
+COMPILER_RT_VISIBILITY int lprofGetHostName(char *Name, int Len) {
+ WCHAR Buffer[COMPILER_RT_MAX_HOSTLEN];
+ DWORD BufferSize = sizeof(Buffer);
+ BOOL Result =
+ GetComputerNameExW(ComputerNameDnsFullyQualified, Buffer, &BufferSize);
+ if (!Result)
+ return -1;
+ if (WideCharToMultiByte(CP_UTF8, 0, Buffer, -1, Name, Len, NULL, NULL) == 0)
+ return -1;
+ return 0;
+}
+#elif defined(COMPILER_RT_HAS_UNAME)
COMPILER_RT_VISIBILITY int lprofGetHostName(char *Name, int Len) {
struct utsname N;
int R;
@@ -184,3 +196,26 @@ lprofApplyPathPrefix(char *Dest, const char *PathStr, const char *Prefix,
memcpy(Dest + PrefixLen, StrippedPathStr, strlen(StrippedPathStr) + 1);
}
+
+COMPILER_RT_VISIBILITY const char *
+lprofFindFirstDirSeparator(const char *Path) {
+ const char *Sep;
+ Sep = strchr(Path, DIR_SEPARATOR);
+ if (Sep)
+ return Sep;
+#if defined(DIR_SEPARATOR_2)
+ Sep = strchr(Path, DIR_SEPARATOR_2);
+#endif
+ return Sep;
+}
+
+COMPILER_RT_VISIBILITY const char *lprofFindLastDirSeparator(const char *Path) {
+ const char *Sep;
+ Sep = strrchr(Path, DIR_SEPARATOR);
+ if (Sep)
+ return Sep;
+#if defined(DIR_SEPARATOR_2)
+ Sep = strrchr(Path, DIR_SEPARATOR_2);
+#endif
+ return Sep;
+}
diff --git a/lib/profile/InstrProfilingUtil.h b/lib/profile/InstrProfilingUtil.h
index 16d3fbf420f2..a80fde77e16a 100644
--- a/lib/profile/InstrProfilingUtil.h
+++ b/lib/profile/InstrProfilingUtil.h
@@ -25,6 +25,27 @@ FILE *lprofOpenFileEx(const char *Filename);
static inline char *getenv(const char *name) { return NULL; }
#endif /* #if __ORBIS__ */
+/* GCOV_PREFIX and GCOV_PREFIX_STRIP support */
+/* Return the path prefix specified by GCOV_PREFIX environment variable.
+ * If GCOV_PREFIX_STRIP is also specified, the strip level (integer value)
+ * is returned via \c *PrefixStrip. The prefix length is stored in *PrefixLen.
+ */
+const char *lprofGetPathPrefix(int *PrefixStrip, size_t *PrefixLen);
+/* Apply the path prefix specified in \c Prefix to path string in \c PathStr,
+ * and store the result to buffer pointed to by \c Buffer. If \c PrefixStrip
+ * is not zero, path prefixes are stripped from \c PathStr (the level of
+ * stripping is specified by \c PrefixStrip) before \c Prefix is added.
+ */
+void lprofApplyPathPrefix(char *Dest, const char *PathStr, const char *Prefix,
+ size_t PrefixLen, int PrefixStrip);
+
+/* Returns a pointer to the first occurrence of \c DIR_SEPARATOR char in
+ * the string \c Path, or NULL if the char is not found. */
+const char *lprofFindFirstDirSeparator(const char *Path);
+/* Returns a pointer to the last occurrence of \c DIR_SEPARATOR char in
+ * the string \c Path, or NULL if the char is not found. */
+const char *lprofFindLastDirSeparator(const char *Path);
+
int lprofGetHostName(char *Name, int Len);
unsigned lprofBoolCmpXchg(void **Ptr, void *OldV, void *NewV);
diff --git a/lib/profile/InstrProfilingValue.c b/lib/profile/InstrProfilingValue.c
index 93957e323762..6648f8923584 100644
--- a/lib/profile/InstrProfilingValue.c
+++ b/lib/profile/InstrProfilingValue.c
@@ -192,7 +192,7 @@ __llvm_profile_instrument_target(uint64_t TargetValue, void *Data,
* the runtime can wipe out more than one lowest count entries
* to give space for hot targets.
*/
- if (!(--MinCountVNode->Count)) {
+ if (!MinCountVNode->Count || !(--MinCountVNode->Count)) {
CurVNode = MinCountVNode;
CurVNode->Value = TargetValue;
CurVNode->Count++;
diff --git a/lib/profile/WindowsMMap.c b/lib/profile/WindowsMMap.c
index 1f7342050032..f81d7da5389e 100644
--- a/lib/profile/WindowsMMap.c
+++ b/lib/profile/WindowsMMap.c
@@ -20,6 +20,9 @@
#include "WindowsMMap.h"
#include "InstrProfiling.h"
+#define WIN32_LEAN_AND_MEAN
+#include <windows.h>
+
#ifdef __USE_FILE_OFFSET64
# define DWORD_HI(x) (x >> 32)
# define DWORD_LO(x) ((x) & 0xffffffff)
diff --git a/lib/safestack/CMakeLists.txt b/lib/safestack/CMakeLists.txt
index a3870ab80357..5a1bac2912b7 100644
--- a/lib/safestack/CMakeLists.txt
+++ b/lib/safestack/CMakeLists.txt
@@ -1,6 +1,4 @@
-add_custom_target(safestack)
-set_target_properties(safestack PROPERTIES
- FOLDER "Compiler-RT Misc")
+add_compiler_rt_component(safestack)
set(SAFESTACK_SOURCES safestack.cc)
diff --git a/lib/safestack/safestack.cc b/lib/safestack/safestack.cc
index 92c24b35d6d0..b194b6cfa8f9 100644
--- a/lib/safestack/safestack.cc
+++ b/lib/safestack/safestack.cc
@@ -92,6 +92,8 @@ static __thread void *unsafe_stack_start = nullptr;
static __thread size_t unsafe_stack_size = 0;
static __thread size_t unsafe_stack_guard = 0;
+using namespace __sanitizer;
+
static inline void *unsafe_stack_alloc(size_t size, size_t guard) {
CHECK_GE(size + guard, size);
void *addr = MmapOrDie(size + guard, "unsafe_stack_alloc");
diff --git a/lib/sanitizer_common/.clang-tidy b/lib/sanitizer_common/.clang-tidy
index aa695cc924a4..6c71abff0d38 100644
--- a/lib/sanitizer_common/.clang-tidy
+++ b/lib/sanitizer_common/.clang-tidy
@@ -8,5 +8,9 @@ CheckOptions:
value: CamelCase
- key: readability-identifier-naming.UnionCase
value: CamelCase
+ - key: readability-identifier-naming.GlobalConstantCase
+ value: CamelCase
+ - key: readability-identifier-naming.GlobalConstantPrefix
+ value: "k"
- key: readability-identifier-naming.VariableCase
value: lower_case
diff --git a/lib/sanitizer_common/CMakeLists.txt b/lib/sanitizer_common/CMakeLists.txt
index 4af0009196e8..0d9a7f067a77 100644
--- a/lib/sanitizer_common/CMakeLists.txt
+++ b/lib/sanitizer_common/CMakeLists.txt
@@ -37,6 +37,8 @@ set(SANITIZER_SOURCES_NOTERMINATION
if(UNIX AND NOT APPLE)
list(APPEND SANITIZER_SOURCES_NOTERMINATION
sanitizer_linux_x86_64.S)
+ list(APPEND SANITIZER_SOURCES_NOTERMINATION
+ sanitizer_linux_mips64.S)
endif()
set(SANITIZER_SOURCES
@@ -51,6 +53,7 @@ set(SANITIZER_NOLIBC_SOURCES
set(SANITIZER_LIBCDEP_SOURCES
sanitizer_common_libcdep.cc
sanitizer_coverage_libcdep.cc
+ sanitizer_coverage_libcdep_new.cc
sanitizer_coverage_mapping_libcdep.cc
sanitizer_linux_libcdep.cc
sanitizer_posix_libcdep.cc
@@ -66,8 +69,16 @@ set(SANITIZER_LIBCDEP_SOURCES
set(SANITIZER_HEADERS
sanitizer_addrhashmap.h
sanitizer_allocator.h
+ sanitizer_allocator_bytemap.h
+ sanitizer_allocator_combined.h
sanitizer_allocator_interface.h
sanitizer_allocator_internal.h
+ sanitizer_allocator_local_cache.h
+ sanitizer_allocator_primary32.h
+ sanitizer_allocator_primary64.h
+ sanitizer_allocator_secondary.h
+ sanitizer_allocator_size_class_map.h
+ sanitizer_allocator_stats.h
sanitizer_atomic.h
sanitizer_atomic_clang.h
sanitizer_atomic_msvc.h
@@ -118,14 +129,6 @@ set(SANITIZER_HEADERS
set(SANITIZER_COMMON_DEFINITIONS)
-if(MSVC)
- list(APPEND SANITIZER_COMMON_DEFINITIONS
- SANITIZER_NEEDS_SEGV=0)
-else()
- list(APPEND SANITIZER_COMMON_DEFINITIONS
- SANITIZER_NEEDS_SEGV=1)
-endif()
-
include(CheckIncludeFile)
append_have_file_definition(rpc/xdr.h HAVE_RPC_XDR_H SANITIZER_COMMON_DEFINITIONS)
append_have_file_definition(tirpc/rpc/xdr.h HAVE_TIRPC_RPC_XDR_H SANITIZER_COMMON_DEFINITIONS)
@@ -147,6 +150,8 @@ if (LLVM_ENABLE_PEDANTIC AND UNIX AND NOT APPLE)
# CMAKE_C*_FLAGS and re-add as a source property to all the non-.S files).
set_source_files_properties(sanitizer_linux_x86_64.S
PROPERTIES COMPILE_FLAGS "-w")
+ set_source_files_properties(sanitizer_linux_mips64.S
+ PROPERTIES COMPILE_FLAGS "-w")
endif ()
if(APPLE)
diff --git a/lib/sanitizer_common/sanitizer_addrhashmap.h b/lib/sanitizer_common/sanitizer_addrhashmap.h
index e55fc4f95a9a..2ca3c405bff3 100644
--- a/lib/sanitizer_common/sanitizer_addrhashmap.h
+++ b/lib/sanitizer_common/sanitizer_addrhashmap.h
@@ -73,6 +73,8 @@ class AddrHashMap {
~Handle();
T *operator->();
+ T &operator*();
+ const T &operator*() const;
bool created() const;
bool exists() const;
@@ -136,6 +138,16 @@ T *AddrHashMap<T, kSize>::Handle::operator->() {
return &cell_->val;
}
+template <typename T, uptr kSize>
+const T &AddrHashMap<T, kSize>::Handle::operator*() const {
+ return cell_->val;
+}
+
+template <typename T, uptr kSize>
+T &AddrHashMap<T, kSize>::Handle::operator*() {
+ return cell_->val;
+}
+
template<typename T, uptr kSize>
bool AddrHashMap<T, kSize>::Handle::created() const {
return created_;
diff --git a/lib/sanitizer_common/sanitizer_allocator.cc b/lib/sanitizer_common/sanitizer_allocator.cc
index df298c62271d..d47b5b41413c 100644
--- a/lib/sanitizer_common/sanitizer_allocator.cc
+++ b/lib/sanitizer_common/sanitizer_allocator.cc
@@ -13,27 +13,33 @@
//===----------------------------------------------------------------------===//
#include "sanitizer_allocator.h"
+
#include "sanitizer_allocator_internal.h"
+#include "sanitizer_atomic.h"
#include "sanitizer_common.h"
namespace __sanitizer {
// ThreadSanitizer for Go uses libc malloc/free.
-#if defined(SANITIZER_GO) || defined(SANITIZER_USE_MALLOC)
+#if SANITIZER_GO || defined(SANITIZER_USE_MALLOC)
# if SANITIZER_LINUX && !SANITIZER_ANDROID
extern "C" void *__libc_malloc(uptr size);
+# if !SANITIZER_GO
extern "C" void *__libc_memalign(uptr alignment, uptr size);
+# endif
extern "C" void *__libc_realloc(void *ptr, uptr size);
extern "C" void __libc_free(void *ptr);
# else
# include <stdlib.h>
# define __libc_malloc malloc
+# if !SANITIZER_GO
static void *__libc_memalign(uptr alignment, uptr size) {
void *p;
uptr error = posix_memalign(&p, alignment, size);
if (error) return nullptr;
return p;
}
+# endif
# define __libc_realloc realloc
# define __libc_free free
# endif
@@ -41,10 +47,20 @@ static void *__libc_memalign(uptr alignment, uptr size) {
static void *RawInternalAlloc(uptr size, InternalAllocatorCache *cache,
uptr alignment) {
(void)cache;
+#if !SANITIZER_GO
if (alignment == 0)
return __libc_malloc(size);
else
return __libc_memalign(alignment, size);
+#else
+ // Windows does not provide __libc_memalign/posix_memalign. It provides
+ // __aligned_malloc, but the allocated blocks can't be passed to free,
+ // they need to be passed to __aligned_free. InternalAlloc interface does
+ // not account for such requirement. Alignemnt does not seem to be used
+ // anywhere in runtime, so just call __libc_malloc for now.
+ DCHECK_EQ(alignment, 0);
+ return __libc_malloc(size);
+#endif
}
static void *RawInternalRealloc(void *ptr, uptr size,
@@ -62,7 +78,7 @@ InternalAllocator *internal_allocator() {
return 0;
}
-#else // defined(SANITIZER_GO) || defined(SANITIZER_USE_MALLOC)
+#else // SANITIZER_GO || defined(SANITIZER_USE_MALLOC)
static ALIGNED(64) char internal_alloc_placeholder[sizeof(InternalAllocator)];
static atomic_uint8_t internal_allocator_initialized;
@@ -78,7 +94,8 @@ InternalAllocator *internal_allocator() {
SpinMutexLock l(&internal_alloc_init_mu);
if (atomic_load(&internal_allocator_initialized, memory_order_relaxed) ==
0) {
- internal_allocator_instance->Init(/* may_return_null*/ false);
+ internal_allocator_instance->Init(
+ /* may_return_null */ false, kReleaseToOSIntervalNever);
atomic_store(&internal_allocator_initialized, 1, memory_order_release);
}
}
@@ -115,7 +132,7 @@ static void RawInternalFree(void *ptr, InternalAllocatorCache *cache) {
internal_allocator()->Deallocate(cache, ptr);
}
-#endif // defined(SANITIZER_GO) || defined(SANITIZER_USE_MALLOC)
+#endif // SANITIZER_GO || defined(SANITIZER_USE_MALLOC)
const u64 kBlockMagic = 0x6A6CB03ABCEBC041ull;
@@ -145,7 +162,7 @@ void *InternalRealloc(void *addr, uptr size, InternalAllocatorCache *cache) {
void *InternalCalloc(uptr count, uptr size, InternalAllocatorCache *cache) {
if (CallocShouldReturnNullDueToOverflow(count, size))
- return internal_allocator()->ReturnNullOrDie();
+ return internal_allocator()->ReturnNullOrDieOnBadRequest();
void *p = InternalAlloc(count * size, cache);
if (p) internal_memset(p, 0, count * size);
return p;
@@ -192,7 +209,12 @@ bool CallocShouldReturnNullDueToOverflow(uptr size, uptr n) {
return (max / size) < n;
}
-void NORETURN ReportAllocatorCannotReturnNull() {
+static atomic_uint8_t reporting_out_of_memory = {0};
+
+bool IsReportingOOM() { return atomic_load_relaxed(&reporting_out_of_memory); }
+
+void NORETURN ReportAllocatorCannotReturnNull(bool out_of_memory) {
+ if (out_of_memory) atomic_store_relaxed(&reporting_out_of_memory, 1);
Report("%s's allocator is terminating the process instead of returning 0\n",
SanitizerToolName);
Report("If you don't like this behavior set allocator_may_return_null=1\n");
diff --git a/lib/sanitizer_common/sanitizer_allocator.h b/lib/sanitizer_common/sanitizer_allocator.h
index f0f002004709..9a37a2f2145f 100644
--- a/lib/sanitizer_common/sanitizer_allocator.h
+++ b/lib/sanitizer_common/sanitizer_allocator.h
@@ -20,271 +20,16 @@
#include "sanitizer_list.h"
#include "sanitizer_mutex.h"
#include "sanitizer_lfstack.h"
+#include "sanitizer_procmaps.h"
namespace __sanitizer {
-// Prints error message and kills the program.
-void NORETURN ReportAllocatorCannotReturnNull();
-
-// SizeClassMap maps allocation sizes into size classes and back.
-// Class 0 corresponds to size 0.
-// Classes 1 - 16 correspond to sizes 16 to 256 (size = class_id * 16).
-// Next 4 classes: 256 + i * 64 (i = 1 to 4).
-// Next 4 classes: 512 + i * 128 (i = 1 to 4).
-// ...
-// Next 4 classes: 2^k + i * 2^(k-2) (i = 1 to 4).
-// Last class corresponds to kMaxSize = 1 << kMaxSizeLog.
-//
-// This structure of the size class map gives us:
-// - Efficient table-free class-to-size and size-to-class functions.
-// - Difference between two consequent size classes is betweed 14% and 25%
-//
-// This class also gives a hint to a thread-caching allocator about the amount
-// of chunks that need to be cached per-thread:
-// - kMaxNumCached is the maximal number of chunks per size class.
-// - (1 << kMaxBytesCachedLog) is the maximal number of bytes per size class.
-//
-// Part of output of SizeClassMap::Print():
-// c00 => s: 0 diff: +0 00% l 0 cached: 0 0; id 0
-// c01 => s: 16 diff: +16 00% l 4 cached: 256 4096; id 1
-// c02 => s: 32 diff: +16 100% l 5 cached: 256 8192; id 2
-// c03 => s: 48 diff: +16 50% l 5 cached: 256 12288; id 3
-// c04 => s: 64 diff: +16 33% l 6 cached: 256 16384; id 4
-// c05 => s: 80 diff: +16 25% l 6 cached: 256 20480; id 5
-// c06 => s: 96 diff: +16 20% l 6 cached: 256 24576; id 6
-// c07 => s: 112 diff: +16 16% l 6 cached: 256 28672; id 7
-//
-// c08 => s: 128 diff: +16 14% l 7 cached: 256 32768; id 8
-// c09 => s: 144 diff: +16 12% l 7 cached: 256 36864; id 9
-// c10 => s: 160 diff: +16 11% l 7 cached: 256 40960; id 10
-// c11 => s: 176 diff: +16 10% l 7 cached: 256 45056; id 11
-// c12 => s: 192 diff: +16 09% l 7 cached: 256 49152; id 12
-// c13 => s: 208 diff: +16 08% l 7 cached: 256 53248; id 13
-// c14 => s: 224 diff: +16 07% l 7 cached: 256 57344; id 14
-// c15 => s: 240 diff: +16 07% l 7 cached: 256 61440; id 15
-//
-// c16 => s: 256 diff: +16 06% l 8 cached: 256 65536; id 16
-// c17 => s: 320 diff: +64 25% l 8 cached: 204 65280; id 17
-// c18 => s: 384 diff: +64 20% l 8 cached: 170 65280; id 18
-// c19 => s: 448 diff: +64 16% l 8 cached: 146 65408; id 19
-//
-// c20 => s: 512 diff: +64 14% l 9 cached: 128 65536; id 20
-// c21 => s: 640 diff: +128 25% l 9 cached: 102 65280; id 21
-// c22 => s: 768 diff: +128 20% l 9 cached: 85 65280; id 22
-// c23 => s: 896 diff: +128 16% l 9 cached: 73 65408; id 23
-//
-// c24 => s: 1024 diff: +128 14% l 10 cached: 64 65536; id 24
-// c25 => s: 1280 diff: +256 25% l 10 cached: 51 65280; id 25
-// c26 => s: 1536 diff: +256 20% l 10 cached: 42 64512; id 26
-// c27 => s: 1792 diff: +256 16% l 10 cached: 36 64512; id 27
-//
-// ...
-//
-// c48 => s: 65536 diff: +8192 14% l 16 cached: 1 65536; id 48
-// c49 => s: 81920 diff: +16384 25% l 16 cached: 1 81920; id 49
-// c50 => s: 98304 diff: +16384 20% l 16 cached: 1 98304; id 50
-// c51 => s: 114688 diff: +16384 16% l 16 cached: 1 114688; id 51
-//
-// c52 => s: 131072 diff: +16384 14% l 17 cached: 1 131072; id 52
-
-template <uptr kMaxSizeLog, uptr kMaxNumCachedT, uptr kMaxBytesCachedLog>
-class SizeClassMap {
- static const uptr kMinSizeLog = 4;
- static const uptr kMidSizeLog = kMinSizeLog + 4;
- static const uptr kMinSize = 1 << kMinSizeLog;
- static const uptr kMidSize = 1 << kMidSizeLog;
- static const uptr kMidClass = kMidSize / kMinSize;
- static const uptr S = 2;
- static const uptr M = (1 << S) - 1;
-
- public:
- static const uptr kMaxNumCached = kMaxNumCachedT;
- // We transfer chunks between central and thread-local free lists in batches.
- // For small size classes we allocate batches separately.
- // For large size classes we use one of the chunks to store the batch.
- struct TransferBatch {
- TransferBatch *next;
- uptr count;
- void *batch[kMaxNumCached];
- };
-
- static const uptr kMaxSize = 1UL << kMaxSizeLog;
- static const uptr kNumClasses =
- kMidClass + ((kMaxSizeLog - kMidSizeLog) << S) + 1;
- COMPILER_CHECK(kNumClasses >= 32 && kNumClasses <= 256);
- static const uptr kNumClassesRounded =
- kNumClasses == 32 ? 32 :
- kNumClasses <= 64 ? 64 :
- kNumClasses <= 128 ? 128 : 256;
-
- static uptr Size(uptr class_id) {
- if (class_id <= kMidClass)
- return kMinSize * class_id;
- class_id -= kMidClass;
- uptr t = kMidSize << (class_id >> S);
- return t + (t >> S) * (class_id & M);
- }
-
- static uptr ClassID(uptr size) {
- if (size <= kMidSize)
- return (size + kMinSize - 1) >> kMinSizeLog;
- if (size > kMaxSize) return 0;
- uptr l = MostSignificantSetBitIndex(size);
- uptr hbits = (size >> (l - S)) & M;
- uptr lbits = size & ((1 << (l - S)) - 1);
- uptr l1 = l - kMidSizeLog;
- return kMidClass + (l1 << S) + hbits + (lbits > 0);
- }
-
- static uptr MaxCached(uptr class_id) {
- if (class_id == 0) return 0;
- uptr n = (1UL << kMaxBytesCachedLog) / Size(class_id);
- return Max<uptr>(1, Min(kMaxNumCached, n));
- }
-
- static void Print() {
- uptr prev_s = 0;
- uptr total_cached = 0;
- for (uptr i = 0; i < kNumClasses; i++) {
- uptr s = Size(i);
- if (s >= kMidSize / 2 && (s & (s - 1)) == 0)
- Printf("\n");
- uptr d = s - prev_s;
- uptr p = prev_s ? (d * 100 / prev_s) : 0;
- uptr l = s ? MostSignificantSetBitIndex(s) : 0;
- uptr cached = MaxCached(i) * s;
- Printf("c%02zd => s: %zd diff: +%zd %02zd%% l %zd "
- "cached: %zd %zd; id %zd\n",
- i, Size(i), d, p, l, MaxCached(i), cached, ClassID(s));
- total_cached += cached;
- prev_s = s;
- }
- Printf("Total cached: %zd\n", total_cached);
- }
-
- static bool SizeClassRequiresSeparateTransferBatch(uptr class_id) {
- return Size(class_id) < sizeof(TransferBatch) -
- sizeof(uptr) * (kMaxNumCached - MaxCached(class_id));
- }
-
- static void Validate() {
- for (uptr c = 1; c < kNumClasses; c++) {
- // Printf("Validate: c%zd\n", c);
- uptr s = Size(c);
- CHECK_NE(s, 0U);
- CHECK_EQ(ClassID(s), c);
- if (c != kNumClasses - 1)
- CHECK_EQ(ClassID(s + 1), c + 1);
- CHECK_EQ(ClassID(s - 1), c);
- if (c)
- CHECK_GT(Size(c), Size(c-1));
- }
- CHECK_EQ(ClassID(kMaxSize + 1), 0);
-
- for (uptr s = 1; s <= kMaxSize; s++) {
- uptr c = ClassID(s);
- // Printf("s%zd => c%zd\n", s, c);
- CHECK_LT(c, kNumClasses);
- CHECK_GE(Size(c), s);
- if (c > 0)
- CHECK_LT(Size(c-1), s);
- }
- }
-};
+// Returns true if ReportAllocatorCannotReturnNull(true) was called.
+// Can be use to avoid memory hungry operations.
+bool IsReportingOOM();
-typedef SizeClassMap<17, 128, 16> DefaultSizeClassMap;
-typedef SizeClassMap<17, 64, 14> CompactSizeClassMap;
-template<class SizeClassAllocator> struct SizeClassAllocatorLocalCache;
-
-// Memory allocator statistics
-enum AllocatorStat {
- AllocatorStatAllocated,
- AllocatorStatMapped,
- AllocatorStatCount
-};
-
-typedef uptr AllocatorStatCounters[AllocatorStatCount];
-
-// Per-thread stats, live in per-thread cache.
-class AllocatorStats {
- public:
- void Init() {
- internal_memset(this, 0, sizeof(*this));
- }
- void InitLinkerInitialized() {}
-
- void Add(AllocatorStat i, uptr v) {
- v += atomic_load(&stats_[i], memory_order_relaxed);
- atomic_store(&stats_[i], v, memory_order_relaxed);
- }
-
- void Sub(AllocatorStat i, uptr v) {
- v = atomic_load(&stats_[i], memory_order_relaxed) - v;
- atomic_store(&stats_[i], v, memory_order_relaxed);
- }
-
- void Set(AllocatorStat i, uptr v) {
- atomic_store(&stats_[i], v, memory_order_relaxed);
- }
-
- uptr Get(AllocatorStat i) const {
- return atomic_load(&stats_[i], memory_order_relaxed);
- }
-
- private:
- friend class AllocatorGlobalStats;
- AllocatorStats *next_;
- AllocatorStats *prev_;
- atomic_uintptr_t stats_[AllocatorStatCount];
-};
-
-// Global stats, used for aggregation and querying.
-class AllocatorGlobalStats : public AllocatorStats {
- public:
- void InitLinkerInitialized() {
- next_ = this;
- prev_ = this;
- }
- void Init() {
- internal_memset(this, 0, sizeof(*this));
- InitLinkerInitialized();
- }
-
- void Register(AllocatorStats *s) {
- SpinMutexLock l(&mu_);
- s->next_ = next_;
- s->prev_ = this;
- next_->prev_ = s;
- next_ = s;
- }
-
- void Unregister(AllocatorStats *s) {
- SpinMutexLock l(&mu_);
- s->prev_->next_ = s->next_;
- s->next_->prev_ = s->prev_;
- for (int i = 0; i < AllocatorStatCount; i++)
- Add(AllocatorStat(i), s->Get(AllocatorStat(i)));
- }
-
- void Get(AllocatorStatCounters s) const {
- internal_memset(s, 0, AllocatorStatCount * sizeof(uptr));
- SpinMutexLock l(&mu_);
- const AllocatorStats *stats = this;
- for (;;) {
- for (int i = 0; i < AllocatorStatCount; i++)
- s[i] += stats->Get(AllocatorStat(i));
- stats = stats->next_;
- if (stats == this)
- break;
- }
- // All stats must be non-negative.
- for (int i = 0; i < AllocatorStatCount; i++)
- s[i] = ((sptr)s[i]) >= 0 ? s[i] : 0;
- }
-
- private:
- mutable SpinMutex mu_;
-};
+// Prints error message and kills the program.
+void NORETURN ReportAllocatorCannotReturnNull(bool out_of_memory);
// Allocators call these callbacks on mmap/munmap.
struct NoOpMapUnmapCallback {
@@ -295,1185 +40,18 @@ struct NoOpMapUnmapCallback {
// Callback type for iterating over chunks.
typedef void (*ForEachChunkCallback)(uptr chunk, void *arg);
-// SizeClassAllocator64 -- allocator for 64-bit address space.
-//
-// Space: a portion of address space of kSpaceSize bytes starting at SpaceBeg.
-// If kSpaceBeg is ~0 then SpaceBeg is chosen dynamically my mmap.
-// Otherwise SpaceBeg=kSpaceBeg (fixed address).
-// kSpaceSize is a power of two.
-// At the beginning the entire space is mprotect-ed, then small parts of it
-// are mapped on demand.
-//
-// Region: a part of Space dedicated to a single size class.
-// There are kNumClasses Regions of equal size.
-//
-// UserChunk: a piece of memory returned to user.
-// MetaChunk: kMetadataSize bytes of metadata associated with a UserChunk.
-//
-// A Region looks like this:
-// UserChunk1 ... UserChunkN <gap> MetaChunkN ... MetaChunk1
-template <const uptr kSpaceBeg, const uptr kSpaceSize,
- const uptr kMetadataSize, class SizeClassMap,
- class MapUnmapCallback = NoOpMapUnmapCallback>
-class SizeClassAllocator64 {
- public:
- typedef typename SizeClassMap::TransferBatch Batch;
- typedef SizeClassAllocator64<kSpaceBeg, kSpaceSize, kMetadataSize,
- SizeClassMap, MapUnmapCallback> ThisT;
- typedef SizeClassAllocatorLocalCache<ThisT> AllocatorCache;
-
- void Init() {
- uptr TotalSpaceSize = kSpaceSize + AdditionalSize();
- if (kUsingConstantSpaceBeg) {
- CHECK_EQ(kSpaceBeg, reinterpret_cast<uptr>(
- MmapFixedNoAccess(kSpaceBeg, TotalSpaceSize)));
- } else {
- NonConstSpaceBeg =
- reinterpret_cast<uptr>(MmapNoAccess(TotalSpaceSize));
- CHECK_NE(NonConstSpaceBeg, ~(uptr)0);
- }
- MapWithCallback(SpaceEnd(), AdditionalSize());
- }
-
- void MapWithCallback(uptr beg, uptr size) {
- CHECK_EQ(beg, reinterpret_cast<uptr>(MmapFixedOrDie(beg, size)));
- MapUnmapCallback().OnMap(beg, size);
- }
-
- void UnmapWithCallback(uptr beg, uptr size) {
- MapUnmapCallback().OnUnmap(beg, size);
- UnmapOrDie(reinterpret_cast<void *>(beg), size);
- }
-
- static bool CanAllocate(uptr size, uptr alignment) {
- return size <= SizeClassMap::kMaxSize &&
- alignment <= SizeClassMap::kMaxSize;
- }
-
- NOINLINE Batch* AllocateBatch(AllocatorStats *stat, AllocatorCache *c,
- uptr class_id) {
- CHECK_LT(class_id, kNumClasses);
- RegionInfo *region = GetRegionInfo(class_id);
- Batch *b = region->free_list.Pop();
- if (!b)
- b = PopulateFreeList(stat, c, class_id, region);
- region->n_allocated += b->count;
- return b;
- }
-
- NOINLINE void DeallocateBatch(AllocatorStats *stat, uptr class_id, Batch *b) {
- RegionInfo *region = GetRegionInfo(class_id);
- CHECK_GT(b->count, 0);
- region->free_list.Push(b);
- region->n_freed += b->count;
- }
-
- bool PointerIsMine(const void *p) {
- uptr P = reinterpret_cast<uptr>(p);
- if (kUsingConstantSpaceBeg && (kSpaceBeg % kSpaceSize) == 0)
- return P / kSpaceSize == kSpaceBeg / kSpaceSize;
- return P >= SpaceBeg() && P < SpaceEnd();
- }
-
- uptr GetSizeClass(const void *p) {
- if (kUsingConstantSpaceBeg && (kSpaceBeg % kSpaceSize) == 0)
- return ((reinterpret_cast<uptr>(p)) / kRegionSize) % kNumClassesRounded;
- return ((reinterpret_cast<uptr>(p) - SpaceBeg()) / kRegionSize) %
- kNumClassesRounded;
- }
-
- void *GetBlockBegin(const void *p) {
- uptr class_id = GetSizeClass(p);
- uptr size = SizeClassMap::Size(class_id);
- if (!size) return nullptr;
- uptr chunk_idx = GetChunkIdx((uptr)p, size);
- uptr reg_beg = (uptr)p & ~(kRegionSize - 1);
- uptr beg = chunk_idx * size;
- uptr next_beg = beg + size;
- if (class_id >= kNumClasses) return nullptr;
- RegionInfo *region = GetRegionInfo(class_id);
- if (region->mapped_user >= next_beg)
- return reinterpret_cast<void*>(reg_beg + beg);
- return nullptr;
- }
-
- uptr GetActuallyAllocatedSize(void *p) {
- CHECK(PointerIsMine(p));
- return SizeClassMap::Size(GetSizeClass(p));
- }
-
- uptr ClassID(uptr size) { return SizeClassMap::ClassID(size); }
-
- void *GetMetaData(const void *p) {
- uptr class_id = GetSizeClass(p);
- uptr size = SizeClassMap::Size(class_id);
- uptr chunk_idx = GetChunkIdx(reinterpret_cast<uptr>(p), size);
- return reinterpret_cast<void *>(SpaceBeg() +
- (kRegionSize * (class_id + 1)) -
- (1 + chunk_idx) * kMetadataSize);
- }
-
- uptr TotalMemoryUsed() {
- uptr res = 0;
- for (uptr i = 0; i < kNumClasses; i++)
- res += GetRegionInfo(i)->allocated_user;
- return res;
- }
-
- // Test-only.
- void TestOnlyUnmap() {
- UnmapWithCallback(SpaceBeg(), kSpaceSize + AdditionalSize());
- }
-
- void PrintStats() {
- uptr total_mapped = 0;
- uptr n_allocated = 0;
- uptr n_freed = 0;
- for (uptr class_id = 1; class_id < kNumClasses; class_id++) {
- RegionInfo *region = GetRegionInfo(class_id);
- total_mapped += region->mapped_user;
- n_allocated += region->n_allocated;
- n_freed += region->n_freed;
- }
- Printf("Stats: SizeClassAllocator64: %zdM mapped in %zd allocations; "
- "remains %zd\n",
- total_mapped >> 20, n_allocated, n_allocated - n_freed);
- for (uptr class_id = 1; class_id < kNumClasses; class_id++) {
- RegionInfo *region = GetRegionInfo(class_id);
- if (region->mapped_user == 0) continue;
- Printf(" %02zd (%zd): total: %zd K allocs: %zd remains: %zd\n",
- class_id,
- SizeClassMap::Size(class_id),
- region->mapped_user >> 10,
- region->n_allocated,
- region->n_allocated - region->n_freed);
- }
- }
-
- // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
- // introspection API.
- void ForceLock() {
- for (uptr i = 0; i < kNumClasses; i++) {
- GetRegionInfo(i)->mutex.Lock();
- }
- }
-
- void ForceUnlock() {
- for (int i = (int)kNumClasses - 1; i >= 0; i--) {
- GetRegionInfo(i)->mutex.Unlock();
- }
- }
-
- // Iterate over all existing chunks.
- // The allocator must be locked when calling this function.
- void ForEachChunk(ForEachChunkCallback callback, void *arg) {
- for (uptr class_id = 1; class_id < kNumClasses; class_id++) {
- RegionInfo *region = GetRegionInfo(class_id);
- uptr chunk_size = SizeClassMap::Size(class_id);
- uptr region_beg = SpaceBeg() + class_id * kRegionSize;
- for (uptr chunk = region_beg;
- chunk < region_beg + region->allocated_user;
- chunk += chunk_size) {
- // Too slow: CHECK_EQ((void *)chunk, GetBlockBegin((void *)chunk));
- callback(chunk, arg);
- }
- }
- }
-
- static uptr AdditionalSize() {
- return RoundUpTo(sizeof(RegionInfo) * kNumClassesRounded,
- GetPageSizeCached());
- }
-
- typedef SizeClassMap SizeClassMapT;
- static const uptr kNumClasses = SizeClassMap::kNumClasses;
- static const uptr kNumClassesRounded = SizeClassMap::kNumClassesRounded;
-
- private:
- static const uptr kRegionSize = kSpaceSize / kNumClassesRounded;
-
- static const bool kUsingConstantSpaceBeg = kSpaceBeg != ~(uptr)0;
- uptr NonConstSpaceBeg;
- uptr SpaceBeg() const {
- return kUsingConstantSpaceBeg ? kSpaceBeg : NonConstSpaceBeg;
- }
- uptr SpaceEnd() const { return SpaceBeg() + kSpaceSize; }
- // kRegionSize must be >= 2^32.
- COMPILER_CHECK((kRegionSize) >= (1ULL << (SANITIZER_WORDSIZE / 2)));
- // Populate the free list with at most this number of bytes at once
- // or with one element if its size is greater.
- static const uptr kPopulateSize = 1 << 14;
- // Call mmap for user memory with at least this size.
- static const uptr kUserMapSize = 1 << 16;
- // Call mmap for metadata memory with at least this size.
- static const uptr kMetaMapSize = 1 << 16;
-
- struct RegionInfo {
- BlockingMutex mutex;
- LFStack<Batch> free_list;
- uptr allocated_user; // Bytes allocated for user memory.
- uptr allocated_meta; // Bytes allocated for metadata.
- uptr mapped_user; // Bytes mapped for user memory.
- uptr mapped_meta; // Bytes mapped for metadata.
- uptr n_allocated, n_freed; // Just stats.
- };
- COMPILER_CHECK(sizeof(RegionInfo) >= kCacheLineSize);
-
- RegionInfo *GetRegionInfo(uptr class_id) {
- CHECK_LT(class_id, kNumClasses);
- RegionInfo *regions =
- reinterpret_cast<RegionInfo *>(SpaceBeg() + kSpaceSize);
- return &regions[class_id];
- }
-
- static uptr GetChunkIdx(uptr chunk, uptr size) {
- uptr offset = chunk % kRegionSize;
- // Here we divide by a non-constant. This is costly.
- // size always fits into 32-bits. If the offset fits too, use 32-bit div.
- if (offset >> (SANITIZER_WORDSIZE / 2))
- return offset / size;
- return (u32)offset / (u32)size;
- }
-
- NOINLINE Batch* PopulateFreeList(AllocatorStats *stat, AllocatorCache *c,
- uptr class_id, RegionInfo *region) {
- BlockingMutexLock l(&region->mutex);
- Batch *b = region->free_list.Pop();
- if (b)
- return b;
- uptr size = SizeClassMap::Size(class_id);
- uptr count = size < kPopulateSize ? SizeClassMap::MaxCached(class_id) : 1;
- uptr beg_idx = region->allocated_user;
- uptr end_idx = beg_idx + count * size;
- uptr region_beg = SpaceBeg() + kRegionSize * class_id;
- if (end_idx + size > region->mapped_user) {
- // Do the mmap for the user memory.
- uptr map_size = kUserMapSize;
- while (end_idx + size > region->mapped_user + map_size)
- map_size += kUserMapSize;
- CHECK_GE(region->mapped_user + map_size, end_idx);
- MapWithCallback(region_beg + region->mapped_user, map_size);
- stat->Add(AllocatorStatMapped, map_size);
- region->mapped_user += map_size;
- }
- uptr total_count = (region->mapped_user - beg_idx - size)
- / size / count * count;
- region->allocated_meta += total_count * kMetadataSize;
- if (region->allocated_meta > region->mapped_meta) {
- uptr map_size = kMetaMapSize;
- while (region->allocated_meta > region->mapped_meta + map_size)
- map_size += kMetaMapSize;
- // Do the mmap for the metadata.
- CHECK_GE(region->mapped_meta + map_size, region->allocated_meta);
- MapWithCallback(region_beg + kRegionSize -
- region->mapped_meta - map_size, map_size);
- region->mapped_meta += map_size;
- }
- CHECK_LE(region->allocated_meta, region->mapped_meta);
- if (region->mapped_user + region->mapped_meta > kRegionSize) {
- Printf("%s: Out of memory. Dying. ", SanitizerToolName);
- Printf("The process has exhausted %zuMB for size class %zu.\n",
- kRegionSize / 1024 / 1024, size);
- Die();
- }
- for (;;) {
- if (SizeClassMap::SizeClassRequiresSeparateTransferBatch(class_id))
- b = (Batch*)c->Allocate(this, SizeClassMap::ClassID(sizeof(Batch)));
- else
- b = (Batch*)(region_beg + beg_idx);
- b->count = count;
- for (uptr i = 0; i < count; i++)
- b->batch[i] = (void*)(region_beg + beg_idx + i * size);
- region->allocated_user += count * size;
- CHECK_LE(region->allocated_user, region->mapped_user);
- beg_idx += count * size;
- if (beg_idx + count * size + size > region->mapped_user)
- break;
- CHECK_GT(b->count, 0);
- region->free_list.Push(b);
- }
- return b;
- }
-};
-
-// Maps integers in rage [0, kSize) to u8 values.
-template<u64 kSize>
-class FlatByteMap {
- public:
- void TestOnlyInit() {
- internal_memset(map_, 0, sizeof(map_));
- }
-
- void set(uptr idx, u8 val) {
- CHECK_LT(idx, kSize);
- CHECK_EQ(0U, map_[idx]);
- map_[idx] = val;
- }
- u8 operator[] (uptr idx) {
- CHECK_LT(idx, kSize);
- // FIXME: CHECK may be too expensive here.
- return map_[idx];
- }
- private:
- u8 map_[kSize];
-};
-
-// TwoLevelByteMap maps integers in range [0, kSize1*kSize2) to u8 values.
-// It is implemented as a two-dimensional array: array of kSize1 pointers
-// to kSize2-byte arrays. The secondary arrays are mmaped on demand.
-// Each value is initially zero and can be set to something else only once.
-// Setting and getting values from multiple threads is safe w/o extra locking.
-template <u64 kSize1, u64 kSize2, class MapUnmapCallback = NoOpMapUnmapCallback>
-class TwoLevelByteMap {
- public:
- void TestOnlyInit() {
- internal_memset(map1_, 0, sizeof(map1_));
- mu_.Init();
- }
-
- void TestOnlyUnmap() {
- for (uptr i = 0; i < kSize1; i++) {
- u8 *p = Get(i);
- if (!p) continue;
- MapUnmapCallback().OnUnmap(reinterpret_cast<uptr>(p), kSize2);
- UnmapOrDie(p, kSize2);
- }
- }
-
- uptr size() const { return kSize1 * kSize2; }
- uptr size1() const { return kSize1; }
- uptr size2() const { return kSize2; }
-
- void set(uptr idx, u8 val) {
- CHECK_LT(idx, kSize1 * kSize2);
- u8 *map2 = GetOrCreate(idx / kSize2);
- CHECK_EQ(0U, map2[idx % kSize2]);
- map2[idx % kSize2] = val;
- }
-
- u8 operator[] (uptr idx) const {
- CHECK_LT(idx, kSize1 * kSize2);
- u8 *map2 = Get(idx / kSize2);
- if (!map2) return 0;
- return map2[idx % kSize2];
- }
-
- private:
- u8 *Get(uptr idx) const {
- CHECK_LT(idx, kSize1);
- return reinterpret_cast<u8 *>(
- atomic_load(&map1_[idx], memory_order_acquire));
- }
-
- u8 *GetOrCreate(uptr idx) {
- u8 *res = Get(idx);
- if (!res) {
- SpinMutexLock l(&mu_);
- if (!(res = Get(idx))) {
- res = (u8*)MmapOrDie(kSize2, "TwoLevelByteMap");
- MapUnmapCallback().OnMap(reinterpret_cast<uptr>(res), kSize2);
- atomic_store(&map1_[idx], reinterpret_cast<uptr>(res),
- memory_order_release);
- }
- }
- return res;
- }
-
- atomic_uintptr_t map1_[kSize1];
- StaticSpinMutex mu_;
-};
-
-// SizeClassAllocator32 -- allocator for 32-bit address space.
-// This allocator can theoretically be used on 64-bit arch, but there it is less
-// efficient than SizeClassAllocator64.
-//
-// [kSpaceBeg, kSpaceBeg + kSpaceSize) is the range of addresses which can
-// be returned by MmapOrDie().
-//
-// Region:
-// a result of a single call to MmapAlignedOrDie(kRegionSize, kRegionSize).
-// Since the regions are aligned by kRegionSize, there are exactly
-// kNumPossibleRegions possible regions in the address space and so we keep
-// a ByteMap possible_regions to store the size classes of each Region.
-// 0 size class means the region is not used by the allocator.
-//
-// One Region is used to allocate chunks of a single size class.
-// A Region looks like this:
-// UserChunk1 .. UserChunkN <gap> MetaChunkN .. MetaChunk1
-//
-// In order to avoid false sharing the objects of this class should be
-// chache-line aligned.
-template <const uptr kSpaceBeg, const u64 kSpaceSize,
- const uptr kMetadataSize, class SizeClassMap,
- const uptr kRegionSizeLog,
- class ByteMap,
- class MapUnmapCallback = NoOpMapUnmapCallback>
-class SizeClassAllocator32 {
- public:
- typedef typename SizeClassMap::TransferBatch Batch;
- typedef SizeClassAllocator32<kSpaceBeg, kSpaceSize, kMetadataSize,
- SizeClassMap, kRegionSizeLog, ByteMap, MapUnmapCallback> ThisT;
- typedef SizeClassAllocatorLocalCache<ThisT> AllocatorCache;
-
- void Init() {
- possible_regions.TestOnlyInit();
- internal_memset(size_class_info_array, 0, sizeof(size_class_info_array));
- }
-
- void *MapWithCallback(uptr size) {
- size = RoundUpTo(size, GetPageSizeCached());
- void *res = MmapOrDie(size, "SizeClassAllocator32");
- MapUnmapCallback().OnMap((uptr)res, size);
- return res;
- }
-
- void UnmapWithCallback(uptr beg, uptr size) {
- MapUnmapCallback().OnUnmap(beg, size);
- UnmapOrDie(reinterpret_cast<void *>(beg), size);
- }
-
- static bool CanAllocate(uptr size, uptr alignment) {
- return size <= SizeClassMap::kMaxSize &&
- alignment <= SizeClassMap::kMaxSize;
- }
-
- void *GetMetaData(const void *p) {
- CHECK(PointerIsMine(p));
- uptr mem = reinterpret_cast<uptr>(p);
- uptr beg = ComputeRegionBeg(mem);
- uptr size = SizeClassMap::Size(GetSizeClass(p));
- u32 offset = mem - beg;
- uptr n = offset / (u32)size; // 32-bit division
- uptr meta = (beg + kRegionSize) - (n + 1) * kMetadataSize;
- return reinterpret_cast<void*>(meta);
- }
-
- NOINLINE Batch* AllocateBatch(AllocatorStats *stat, AllocatorCache *c,
- uptr class_id) {
- CHECK_LT(class_id, kNumClasses);
- SizeClassInfo *sci = GetSizeClassInfo(class_id);
- SpinMutexLock l(&sci->mutex);
- if (sci->free_list.empty())
- PopulateFreeList(stat, c, sci, class_id);
- CHECK(!sci->free_list.empty());
- Batch *b = sci->free_list.front();
- sci->free_list.pop_front();
- return b;
- }
-
- NOINLINE void DeallocateBatch(AllocatorStats *stat, uptr class_id, Batch *b) {
- CHECK_LT(class_id, kNumClasses);
- SizeClassInfo *sci = GetSizeClassInfo(class_id);
- SpinMutexLock l(&sci->mutex);
- CHECK_GT(b->count, 0);
- sci->free_list.push_front(b);
- }
-
- bool PointerIsMine(const void *p) {
- uptr mem = reinterpret_cast<uptr>(p);
- if (mem < kSpaceBeg || mem >= kSpaceBeg + kSpaceSize)
- return false;
- return GetSizeClass(p) != 0;
- }
-
- uptr GetSizeClass(const void *p) {
- return possible_regions[ComputeRegionId(reinterpret_cast<uptr>(p))];
- }
-
- void *GetBlockBegin(const void *p) {
- CHECK(PointerIsMine(p));
- uptr mem = reinterpret_cast<uptr>(p);
- uptr beg = ComputeRegionBeg(mem);
- uptr size = SizeClassMap::Size(GetSizeClass(p));
- u32 offset = mem - beg;
- u32 n = offset / (u32)size; // 32-bit division
- uptr res = beg + (n * (u32)size);
- return reinterpret_cast<void*>(res);
- }
-
- uptr GetActuallyAllocatedSize(void *p) {
- CHECK(PointerIsMine(p));
- return SizeClassMap::Size(GetSizeClass(p));
- }
-
- uptr ClassID(uptr size) { return SizeClassMap::ClassID(size); }
-
- uptr TotalMemoryUsed() {
- // No need to lock here.
- uptr res = 0;
- for (uptr i = 0; i < kNumPossibleRegions; i++)
- if (possible_regions[i])
- res += kRegionSize;
- return res;
- }
-
- void TestOnlyUnmap() {
- for (uptr i = 0; i < kNumPossibleRegions; i++)
- if (possible_regions[i])
- UnmapWithCallback((i * kRegionSize), kRegionSize);
- }
-
- // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
- // introspection API.
- void ForceLock() {
- for (uptr i = 0; i < kNumClasses; i++) {
- GetSizeClassInfo(i)->mutex.Lock();
- }
- }
-
- void ForceUnlock() {
- for (int i = kNumClasses - 1; i >= 0; i--) {
- GetSizeClassInfo(i)->mutex.Unlock();
- }
- }
-
- // Iterate over all existing chunks.
- // The allocator must be locked when calling this function.
- void ForEachChunk(ForEachChunkCallback callback, void *arg) {
- for (uptr region = 0; region < kNumPossibleRegions; region++)
- if (possible_regions[region]) {
- uptr chunk_size = SizeClassMap::Size(possible_regions[region]);
- uptr max_chunks_in_region = kRegionSize / (chunk_size + kMetadataSize);
- uptr region_beg = region * kRegionSize;
- for (uptr chunk = region_beg;
- chunk < region_beg + max_chunks_in_region * chunk_size;
- chunk += chunk_size) {
- // Too slow: CHECK_EQ((void *)chunk, GetBlockBegin((void *)chunk));
- callback(chunk, arg);
- }
- }
- }
-
- void PrintStats() {
- }
-
- static uptr AdditionalSize() {
- return 0;
- }
-
- typedef SizeClassMap SizeClassMapT;
- static const uptr kNumClasses = SizeClassMap::kNumClasses;
-
- private:
- static const uptr kRegionSize = 1 << kRegionSizeLog;
- static const uptr kNumPossibleRegions = kSpaceSize / kRegionSize;
-
- struct SizeClassInfo {
- SpinMutex mutex;
- IntrusiveList<Batch> free_list;
- char padding[kCacheLineSize - sizeof(uptr) - sizeof(IntrusiveList<Batch>)];
- };
- COMPILER_CHECK(sizeof(SizeClassInfo) == kCacheLineSize);
-
- uptr ComputeRegionId(uptr mem) {
- uptr res = mem >> kRegionSizeLog;
- CHECK_LT(res, kNumPossibleRegions);
- return res;
- }
-
- uptr ComputeRegionBeg(uptr mem) {
- return mem & ~(kRegionSize - 1);
- }
-
- uptr AllocateRegion(AllocatorStats *stat, uptr class_id) {
- CHECK_LT(class_id, kNumClasses);
- uptr res = reinterpret_cast<uptr>(MmapAlignedOrDie(kRegionSize, kRegionSize,
- "SizeClassAllocator32"));
- MapUnmapCallback().OnMap(res, kRegionSize);
- stat->Add(AllocatorStatMapped, kRegionSize);
- CHECK_EQ(0U, (res & (kRegionSize - 1)));
- possible_regions.set(ComputeRegionId(res), static_cast<u8>(class_id));
- return res;
- }
-
- SizeClassInfo *GetSizeClassInfo(uptr class_id) {
- CHECK_LT(class_id, kNumClasses);
- return &size_class_info_array[class_id];
- }
-
- void PopulateFreeList(AllocatorStats *stat, AllocatorCache *c,
- SizeClassInfo *sci, uptr class_id) {
- uptr size = SizeClassMap::Size(class_id);
- uptr reg = AllocateRegion(stat, class_id);
- uptr n_chunks = kRegionSize / (size + kMetadataSize);
- uptr max_count = SizeClassMap::MaxCached(class_id);
- Batch *b = nullptr;
- for (uptr i = reg; i < reg + n_chunks * size; i += size) {
- if (!b) {
- if (SizeClassMap::SizeClassRequiresSeparateTransferBatch(class_id))
- b = (Batch*)c->Allocate(this, SizeClassMap::ClassID(sizeof(Batch)));
- else
- b = (Batch*)i;
- b->count = 0;
- }
- b->batch[b->count++] = (void*)i;
- if (b->count == max_count) {
- CHECK_GT(b->count, 0);
- sci->free_list.push_back(b);
- b = nullptr;
- }
- }
- if (b) {
- CHECK_GT(b->count, 0);
- sci->free_list.push_back(b);
- }
- }
-
- ByteMap possible_regions;
- SizeClassInfo size_class_info_array[kNumClasses];
-};
-
-// Objects of this type should be used as local caches for SizeClassAllocator64
-// or SizeClassAllocator32. Since the typical use of this class is to have one
-// object per thread in TLS, is has to be POD.
-template<class SizeClassAllocator>
-struct SizeClassAllocatorLocalCache {
- typedef SizeClassAllocator Allocator;
- static const uptr kNumClasses = SizeClassAllocator::kNumClasses;
-
- void Init(AllocatorGlobalStats *s) {
- stats_.Init();
- if (s)
- s->Register(&stats_);
- }
-
- void Destroy(SizeClassAllocator *allocator, AllocatorGlobalStats *s) {
- Drain(allocator);
- if (s)
- s->Unregister(&stats_);
- }
-
- void *Allocate(SizeClassAllocator *allocator, uptr class_id) {
- CHECK_NE(class_id, 0UL);
- CHECK_LT(class_id, kNumClasses);
- stats_.Add(AllocatorStatAllocated, SizeClassMap::Size(class_id));
- PerClass *c = &per_class_[class_id];
- if (UNLIKELY(c->count == 0))
- Refill(allocator, class_id);
- void *res = c->batch[--c->count];
- PREFETCH(c->batch[c->count - 1]);
- return res;
- }
-
- void Deallocate(SizeClassAllocator *allocator, uptr class_id, void *p) {
- CHECK_NE(class_id, 0UL);
- CHECK_LT(class_id, kNumClasses);
- // If the first allocator call on a new thread is a deallocation, then
- // max_count will be zero, leading to check failure.
- InitCache();
- stats_.Sub(AllocatorStatAllocated, SizeClassMap::Size(class_id));
- PerClass *c = &per_class_[class_id];
- CHECK_NE(c->max_count, 0UL);
- if (UNLIKELY(c->count == c->max_count))
- Drain(allocator, class_id);
- c->batch[c->count++] = p;
- }
-
- void Drain(SizeClassAllocator *allocator) {
- for (uptr class_id = 0; class_id < kNumClasses; class_id++) {
- PerClass *c = &per_class_[class_id];
- while (c->count > 0)
- Drain(allocator, class_id);
- }
- }
-
- // private:
- typedef typename SizeClassAllocator::SizeClassMapT SizeClassMap;
- typedef typename SizeClassMap::TransferBatch Batch;
- struct PerClass {
- uptr count;
- uptr max_count;
- void *batch[2 * SizeClassMap::kMaxNumCached];
- };
- PerClass per_class_[kNumClasses];
- AllocatorStats stats_;
-
- void InitCache() {
- if (per_class_[1].max_count)
- return;
- for (uptr i = 0; i < kNumClasses; i++) {
- PerClass *c = &per_class_[i];
- c->max_count = 2 * SizeClassMap::MaxCached(i);
- }
- }
-
- NOINLINE void Refill(SizeClassAllocator *allocator, uptr class_id) {
- InitCache();
- PerClass *c = &per_class_[class_id];
- Batch *b = allocator->AllocateBatch(&stats_, this, class_id);
- CHECK_GT(b->count, 0);
- for (uptr i = 0; i < b->count; i++)
- c->batch[i] = b->batch[i];
- c->count = b->count;
- if (SizeClassMap::SizeClassRequiresSeparateTransferBatch(class_id))
- Deallocate(allocator, SizeClassMap::ClassID(sizeof(Batch)), b);
- }
-
- NOINLINE void Drain(SizeClassAllocator *allocator, uptr class_id) {
- InitCache();
- PerClass *c = &per_class_[class_id];
- Batch *b;
- if (SizeClassMap::SizeClassRequiresSeparateTransferBatch(class_id))
- b = (Batch*)Allocate(allocator, SizeClassMap::ClassID(sizeof(Batch)));
- else
- b = (Batch*)c->batch[0];
- uptr cnt = Min(c->max_count / 2, c->count);
- for (uptr i = 0; i < cnt; i++) {
- b->batch[i] = c->batch[i];
- c->batch[i] = c->batch[i + c->max_count / 2];
- }
- b->count = cnt;
- c->count -= cnt;
- CHECK_GT(b->count, 0);
- allocator->DeallocateBatch(&stats_, class_id, b);
- }
-};
-
-// This class can (de)allocate only large chunks of memory using mmap/unmap.
-// The main purpose of this allocator is to cover large and rare allocation
-// sizes not covered by more efficient allocators (e.g. SizeClassAllocator64).
-template <class MapUnmapCallback = NoOpMapUnmapCallback>
-class LargeMmapAllocator {
- public:
- void InitLinkerInitialized(bool may_return_null) {
- page_size_ = GetPageSizeCached();
- atomic_store(&may_return_null_, may_return_null, memory_order_relaxed);
- }
-
- void Init(bool may_return_null) {
- internal_memset(this, 0, sizeof(*this));
- InitLinkerInitialized(may_return_null);
- }
-
- void *Allocate(AllocatorStats *stat, uptr size, uptr alignment) {
- CHECK(IsPowerOfTwo(alignment));
- uptr map_size = RoundUpMapSize(size);
- if (alignment > page_size_)
- map_size += alignment;
- // Overflow.
- if (map_size < size)
- return ReturnNullOrDie();
- uptr map_beg = reinterpret_cast<uptr>(
- MmapOrDie(map_size, "LargeMmapAllocator"));
- CHECK(IsAligned(map_beg, page_size_));
- MapUnmapCallback().OnMap(map_beg, map_size);
- uptr map_end = map_beg + map_size;
- uptr res = map_beg + page_size_;
- if (res & (alignment - 1)) // Align.
- res += alignment - (res & (alignment - 1));
- CHECK(IsAligned(res, alignment));
- CHECK(IsAligned(res, page_size_));
- CHECK_GE(res + size, map_beg);
- CHECK_LE(res + size, map_end);
- Header *h = GetHeader(res);
- h->size = size;
- h->map_beg = map_beg;
- h->map_size = map_size;
- uptr size_log = MostSignificantSetBitIndex(map_size);
- CHECK_LT(size_log, ARRAY_SIZE(stats.by_size_log));
- {
- SpinMutexLock l(&mutex_);
- uptr idx = n_chunks_++;
- chunks_sorted_ = false;
- CHECK_LT(idx, kMaxNumChunks);
- h->chunk_idx = idx;
- chunks_[idx] = h;
- stats.n_allocs++;
- stats.currently_allocated += map_size;
- stats.max_allocated = Max(stats.max_allocated, stats.currently_allocated);
- stats.by_size_log[size_log]++;
- stat->Add(AllocatorStatAllocated, map_size);
- stat->Add(AllocatorStatMapped, map_size);
- }
- return reinterpret_cast<void*>(res);
- }
-
- void *ReturnNullOrDie() {
- if (atomic_load(&may_return_null_, memory_order_acquire))
- return nullptr;
- ReportAllocatorCannotReturnNull();
- }
-
- void SetMayReturnNull(bool may_return_null) {
- atomic_store(&may_return_null_, may_return_null, memory_order_release);
- }
-
- void Deallocate(AllocatorStats *stat, void *p) {
- Header *h = GetHeader(p);
- {
- SpinMutexLock l(&mutex_);
- uptr idx = h->chunk_idx;
- CHECK_EQ(chunks_[idx], h);
- CHECK_LT(idx, n_chunks_);
- chunks_[idx] = chunks_[n_chunks_ - 1];
- chunks_[idx]->chunk_idx = idx;
- n_chunks_--;
- chunks_sorted_ = false;
- stats.n_frees++;
- stats.currently_allocated -= h->map_size;
- stat->Sub(AllocatorStatAllocated, h->map_size);
- stat->Sub(AllocatorStatMapped, h->map_size);
- }
- MapUnmapCallback().OnUnmap(h->map_beg, h->map_size);
- UnmapOrDie(reinterpret_cast<void*>(h->map_beg), h->map_size);
- }
-
- uptr TotalMemoryUsed() {
- SpinMutexLock l(&mutex_);
- uptr res = 0;
- for (uptr i = 0; i < n_chunks_; i++) {
- Header *h = chunks_[i];
- CHECK_EQ(h->chunk_idx, i);
- res += RoundUpMapSize(h->size);
- }
- return res;
- }
-
- bool PointerIsMine(const void *p) {
- return GetBlockBegin(p) != nullptr;
- }
-
- uptr GetActuallyAllocatedSize(void *p) {
- return RoundUpTo(GetHeader(p)->size, page_size_);
- }
-
- // At least page_size_/2 metadata bytes is available.
- void *GetMetaData(const void *p) {
- // Too slow: CHECK_EQ(p, GetBlockBegin(p));
- if (!IsAligned(reinterpret_cast<uptr>(p), page_size_)) {
- Printf("%s: bad pointer %p\n", SanitizerToolName, p);
- CHECK(IsAligned(reinterpret_cast<uptr>(p), page_size_));
- }
- return GetHeader(p) + 1;
- }
-
- void *GetBlockBegin(const void *ptr) {
- uptr p = reinterpret_cast<uptr>(ptr);
- SpinMutexLock l(&mutex_);
- uptr nearest_chunk = 0;
- // Cache-friendly linear search.
- for (uptr i = 0; i < n_chunks_; i++) {
- uptr ch = reinterpret_cast<uptr>(chunks_[i]);
- if (p < ch) continue; // p is at left to this chunk, skip it.
- if (p - ch < p - nearest_chunk)
- nearest_chunk = ch;
- }
- if (!nearest_chunk)
- return nullptr;
- Header *h = reinterpret_cast<Header *>(nearest_chunk);
- CHECK_GE(nearest_chunk, h->map_beg);
- CHECK_LT(nearest_chunk, h->map_beg + h->map_size);
- CHECK_LE(nearest_chunk, p);
- if (h->map_beg + h->map_size <= p)
- return nullptr;
- return GetUser(h);
- }
-
- // This function does the same as GetBlockBegin, but is much faster.
- // Must be called with the allocator locked.
- void *GetBlockBeginFastLocked(void *ptr) {
- mutex_.CheckLocked();
- uptr p = reinterpret_cast<uptr>(ptr);
- uptr n = n_chunks_;
- if (!n) return nullptr;
- if (!chunks_sorted_) {
- // Do one-time sort. chunks_sorted_ is reset in Allocate/Deallocate.
- SortArray(reinterpret_cast<uptr*>(chunks_), n);
- for (uptr i = 0; i < n; i++)
- chunks_[i]->chunk_idx = i;
- chunks_sorted_ = true;
- min_mmap_ = reinterpret_cast<uptr>(chunks_[0]);
- max_mmap_ = reinterpret_cast<uptr>(chunks_[n - 1]) +
- chunks_[n - 1]->map_size;
- }
- if (p < min_mmap_ || p >= max_mmap_)
- return nullptr;
- uptr beg = 0, end = n - 1;
- // This loop is a log(n) lower_bound. It does not check for the exact match
- // to avoid expensive cache-thrashing loads.
- while (end - beg >= 2) {
- uptr mid = (beg + end) / 2; // Invariant: mid >= beg + 1
- if (p < reinterpret_cast<uptr>(chunks_[mid]))
- end = mid - 1; // We are not interested in chunks_[mid].
- else
- beg = mid; // chunks_[mid] may still be what we want.
- }
-
- if (beg < end) {
- CHECK_EQ(beg + 1, end);
- // There are 2 chunks left, choose one.
- if (p >= reinterpret_cast<uptr>(chunks_[end]))
- beg = end;
- }
-
- Header *h = chunks_[beg];
- if (h->map_beg + h->map_size <= p || p < h->map_beg)
- return nullptr;
- return GetUser(h);
- }
-
- void PrintStats() {
- Printf("Stats: LargeMmapAllocator: allocated %zd times, "
- "remains %zd (%zd K) max %zd M; by size logs: ",
- stats.n_allocs, stats.n_allocs - stats.n_frees,
- stats.currently_allocated >> 10, stats.max_allocated >> 20);
- for (uptr i = 0; i < ARRAY_SIZE(stats.by_size_log); i++) {
- uptr c = stats.by_size_log[i];
- if (!c) continue;
- Printf("%zd:%zd; ", i, c);
- }
- Printf("\n");
- }
-
- // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
- // introspection API.
- void ForceLock() {
- mutex_.Lock();
- }
-
- void ForceUnlock() {
- mutex_.Unlock();
- }
-
- // Iterate over all existing chunks.
- // The allocator must be locked when calling this function.
- void ForEachChunk(ForEachChunkCallback callback, void *arg) {
- for (uptr i = 0; i < n_chunks_; i++)
- callback(reinterpret_cast<uptr>(GetUser(chunks_[i])), arg);
- }
-
- private:
- static const int kMaxNumChunks = 1 << FIRST_32_SECOND_64(15, 18);
- struct Header {
- uptr map_beg;
- uptr map_size;
- uptr size;
- uptr chunk_idx;
- };
-
- Header *GetHeader(uptr p) {
- CHECK(IsAligned(p, page_size_));
- return reinterpret_cast<Header*>(p - page_size_);
- }
- Header *GetHeader(const void *p) {
- return GetHeader(reinterpret_cast<uptr>(p));
- }
-
- void *GetUser(Header *h) {
- CHECK(IsAligned((uptr)h, page_size_));
- return reinterpret_cast<void*>(reinterpret_cast<uptr>(h) + page_size_);
- }
-
- uptr RoundUpMapSize(uptr size) {
- return RoundUpTo(size, page_size_) + page_size_;
- }
-
- uptr page_size_;
- Header *chunks_[kMaxNumChunks];
- uptr n_chunks_;
- uptr min_mmap_, max_mmap_;
- bool chunks_sorted_;
- struct Stats {
- uptr n_allocs, n_frees, currently_allocated, max_allocated, by_size_log[64];
- } stats;
- atomic_uint8_t may_return_null_;
- SpinMutex mutex_;
-};
-
-// This class implements a complete memory allocator by using two
-// internal allocators:
-// PrimaryAllocator is efficient, but may not allocate some sizes (alignments).
-// When allocating 2^x bytes it should return 2^x aligned chunk.
-// PrimaryAllocator is used via a local AllocatorCache.
-// SecondaryAllocator can allocate anything, but is not efficient.
-template <class PrimaryAllocator, class AllocatorCache,
- class SecondaryAllocator> // NOLINT
-class CombinedAllocator {
- public:
- void InitCommon(bool may_return_null) {
- primary_.Init();
- atomic_store(&may_return_null_, may_return_null, memory_order_relaxed);
- }
-
- void InitLinkerInitialized(bool may_return_null) {
- secondary_.InitLinkerInitialized(may_return_null);
- stats_.InitLinkerInitialized();
- InitCommon(may_return_null);
- }
-
- void Init(bool may_return_null) {
- secondary_.Init(may_return_null);
- stats_.Init();
- InitCommon(may_return_null);
- }
-
- void *Allocate(AllocatorCache *cache, uptr size, uptr alignment,
- bool cleared = false, bool check_rss_limit = false) {
- // Returning 0 on malloc(0) may break a lot of code.
- if (size == 0)
- size = 1;
- if (size + alignment < size)
- return ReturnNullOrDie();
- if (check_rss_limit && RssLimitIsExceeded())
- return ReturnNullOrDie();
- if (alignment > 8)
- size = RoundUpTo(size, alignment);
- void *res;
- bool from_primary = primary_.CanAllocate(size, alignment);
- if (from_primary)
- res = cache->Allocate(&primary_, primary_.ClassID(size));
- else
- res = secondary_.Allocate(&stats_, size, alignment);
- if (alignment > 8)
- CHECK_EQ(reinterpret_cast<uptr>(res) & (alignment - 1), 0);
- if (cleared && res && from_primary)
- internal_bzero_aligned16(res, RoundUpTo(size, 16));
- return res;
- }
-
- bool MayReturnNull() const {
- return atomic_load(&may_return_null_, memory_order_acquire);
- }
-
- void *ReturnNullOrDie() {
- if (MayReturnNull())
- return nullptr;
- ReportAllocatorCannotReturnNull();
- }
-
- void SetMayReturnNull(bool may_return_null) {
- secondary_.SetMayReturnNull(may_return_null);
- atomic_store(&may_return_null_, may_return_null, memory_order_release);
- }
-
- bool RssLimitIsExceeded() {
- return atomic_load(&rss_limit_is_exceeded_, memory_order_acquire);
- }
-
- void SetRssLimitIsExceeded(bool rss_limit_is_exceeded) {
- atomic_store(&rss_limit_is_exceeded_, rss_limit_is_exceeded,
- memory_order_release);
- }
-
- void Deallocate(AllocatorCache *cache, void *p) {
- if (!p) return;
- if (primary_.PointerIsMine(p))
- cache->Deallocate(&primary_, primary_.GetSizeClass(p), p);
- else
- secondary_.Deallocate(&stats_, p);
- }
-
- void *Reallocate(AllocatorCache *cache, void *p, uptr new_size,
- uptr alignment) {
- if (!p)
- return Allocate(cache, new_size, alignment);
- if (!new_size) {
- Deallocate(cache, p);
- return nullptr;
- }
- CHECK(PointerIsMine(p));
- uptr old_size = GetActuallyAllocatedSize(p);
- uptr memcpy_size = Min(new_size, old_size);
- void *new_p = Allocate(cache, new_size, alignment);
- if (new_p)
- internal_memcpy(new_p, p, memcpy_size);
- Deallocate(cache, p);
- return new_p;
- }
-
- bool PointerIsMine(void *p) {
- if (primary_.PointerIsMine(p))
- return true;
- return secondary_.PointerIsMine(p);
- }
-
- bool FromPrimary(void *p) {
- return primary_.PointerIsMine(p);
- }
-
- void *GetMetaData(const void *p) {
- if (primary_.PointerIsMine(p))
- return primary_.GetMetaData(p);
- return secondary_.GetMetaData(p);
- }
-
- void *GetBlockBegin(const void *p) {
- if (primary_.PointerIsMine(p))
- return primary_.GetBlockBegin(p);
- return secondary_.GetBlockBegin(p);
- }
-
- // This function does the same as GetBlockBegin, but is much faster.
- // Must be called with the allocator locked.
- void *GetBlockBeginFastLocked(void *p) {
- if (primary_.PointerIsMine(p))
- return primary_.GetBlockBegin(p);
- return secondary_.GetBlockBeginFastLocked(p);
- }
-
- uptr GetActuallyAllocatedSize(void *p) {
- if (primary_.PointerIsMine(p))
- return primary_.GetActuallyAllocatedSize(p);
- return secondary_.GetActuallyAllocatedSize(p);
- }
-
- uptr TotalMemoryUsed() {
- return primary_.TotalMemoryUsed() + secondary_.TotalMemoryUsed();
- }
-
- void TestOnlyUnmap() { primary_.TestOnlyUnmap(); }
-
- void InitCache(AllocatorCache *cache) {
- cache->Init(&stats_);
- }
-
- void DestroyCache(AllocatorCache *cache) {
- cache->Destroy(&primary_, &stats_);
- }
-
- void SwallowCache(AllocatorCache *cache) {
- cache->Drain(&primary_);
- }
-
- void GetStats(AllocatorStatCounters s) const {
- stats_.Get(s);
- }
-
- void PrintStats() {
- primary_.PrintStats();
- secondary_.PrintStats();
- }
-
- // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
- // introspection API.
- void ForceLock() {
- primary_.ForceLock();
- secondary_.ForceLock();
- }
-
- void ForceUnlock() {
- secondary_.ForceUnlock();
- primary_.ForceUnlock();
- }
-
- // Iterate over all existing chunks.
- // The allocator must be locked when calling this function.
- void ForEachChunk(ForEachChunkCallback callback, void *arg) {
- primary_.ForEachChunk(callback, arg);
- secondary_.ForEachChunk(callback, arg);
- }
-
- private:
- PrimaryAllocator primary_;
- SecondaryAllocator secondary_;
- AllocatorGlobalStats stats_;
- atomic_uint8_t may_return_null_;
- atomic_uint8_t rss_limit_is_exceeded_;
-};
-
// Returns true if calloc(size, n) should return 0 due to overflow in size*n.
bool CallocShouldReturnNullDueToOverflow(uptr size, uptr n);
+#include "sanitizer_allocator_size_class_map.h"
+#include "sanitizer_allocator_stats.h"
+#include "sanitizer_allocator_primary64.h"
+#include "sanitizer_allocator_bytemap.h"
+#include "sanitizer_allocator_primary32.h"
+#include "sanitizer_allocator_local_cache.h"
+#include "sanitizer_allocator_secondary.h"
+#include "sanitizer_allocator_combined.h"
+
} // namespace __sanitizer
#endif // SANITIZER_ALLOCATOR_H
diff --git a/lib/sanitizer_common/sanitizer_allocator_bytemap.h b/lib/sanitizer_common/sanitizer_allocator_bytemap.h
new file mode 100644
index 000000000000..92472cdf5150
--- /dev/null
+++ b/lib/sanitizer_common/sanitizer_allocator_bytemap.h
@@ -0,0 +1,103 @@
+//===-- sanitizer_allocator_bytemap.h ---------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Part of the Sanitizer Allocator.
+//
+//===----------------------------------------------------------------------===//
+#ifndef SANITIZER_ALLOCATOR_H
+#error This file must be included inside sanitizer_allocator.h
+#endif
+
+// Maps integers in rage [0, kSize) to u8 values.
+template<u64 kSize>
+class FlatByteMap {
+ public:
+ void TestOnlyInit() {
+ internal_memset(map_, 0, sizeof(map_));
+ }
+
+ void set(uptr idx, u8 val) {
+ CHECK_LT(idx, kSize);
+ CHECK_EQ(0U, map_[idx]);
+ map_[idx] = val;
+ }
+ u8 operator[] (uptr idx) {
+ CHECK_LT(idx, kSize);
+ // FIXME: CHECK may be too expensive here.
+ return map_[idx];
+ }
+ private:
+ u8 map_[kSize];
+};
+
+// TwoLevelByteMap maps integers in range [0, kSize1*kSize2) to u8 values.
+// It is implemented as a two-dimensional array: array of kSize1 pointers
+// to kSize2-byte arrays. The secondary arrays are mmaped on demand.
+// Each value is initially zero and can be set to something else only once.
+// Setting and getting values from multiple threads is safe w/o extra locking.
+template <u64 kSize1, u64 kSize2, class MapUnmapCallback = NoOpMapUnmapCallback>
+class TwoLevelByteMap {
+ public:
+ void TestOnlyInit() {
+ internal_memset(map1_, 0, sizeof(map1_));
+ mu_.Init();
+ }
+
+ void TestOnlyUnmap() {
+ for (uptr i = 0; i < kSize1; i++) {
+ u8 *p = Get(i);
+ if (!p) continue;
+ MapUnmapCallback().OnUnmap(reinterpret_cast<uptr>(p), kSize2);
+ UnmapOrDie(p, kSize2);
+ }
+ }
+
+ uptr size() const { return kSize1 * kSize2; }
+ uptr size1() const { return kSize1; }
+ uptr size2() const { return kSize2; }
+
+ void set(uptr idx, u8 val) {
+ CHECK_LT(idx, kSize1 * kSize2);
+ u8 *map2 = GetOrCreate(idx / kSize2);
+ CHECK_EQ(0U, map2[idx % kSize2]);
+ map2[idx % kSize2] = val;
+ }
+
+ u8 operator[] (uptr idx) const {
+ CHECK_LT(idx, kSize1 * kSize2);
+ u8 *map2 = Get(idx / kSize2);
+ if (!map2) return 0;
+ return map2[idx % kSize2];
+ }
+
+ private:
+ u8 *Get(uptr idx) const {
+ CHECK_LT(idx, kSize1);
+ return reinterpret_cast<u8 *>(
+ atomic_load(&map1_[idx], memory_order_acquire));
+ }
+
+ u8 *GetOrCreate(uptr idx) {
+ u8 *res = Get(idx);
+ if (!res) {
+ SpinMutexLock l(&mu_);
+ if (!(res = Get(idx))) {
+ res = (u8*)MmapOrDie(kSize2, "TwoLevelByteMap");
+ MapUnmapCallback().OnMap(reinterpret_cast<uptr>(res), kSize2);
+ atomic_store(&map1_[idx], reinterpret_cast<uptr>(res),
+ memory_order_release);
+ }
+ }
+ return res;
+ }
+
+ atomic_uintptr_t map1_[kSize1];
+ StaticSpinMutex mu_;
+};
+
diff --git a/lib/sanitizer_common/sanitizer_allocator_combined.h b/lib/sanitizer_common/sanitizer_allocator_combined.h
new file mode 100644
index 000000000000..19e1ae9b9f75
--- /dev/null
+++ b/lib/sanitizer_common/sanitizer_allocator_combined.h
@@ -0,0 +1,233 @@
+//===-- sanitizer_allocator_combined.h --------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Part of the Sanitizer Allocator.
+//
+//===----------------------------------------------------------------------===//
+#ifndef SANITIZER_ALLOCATOR_H
+#error This file must be included inside sanitizer_allocator.h
+#endif
+
+// This class implements a complete memory allocator by using two
+// internal allocators:
+// PrimaryAllocator is efficient, but may not allocate some sizes (alignments).
+// When allocating 2^x bytes it should return 2^x aligned chunk.
+// PrimaryAllocator is used via a local AllocatorCache.
+// SecondaryAllocator can allocate anything, but is not efficient.
+template <class PrimaryAllocator, class AllocatorCache,
+ class SecondaryAllocator> // NOLINT
+class CombinedAllocator {
+ public:
+ void InitCommon(bool may_return_null, s32 release_to_os_interval_ms) {
+ primary_.Init(release_to_os_interval_ms);
+ atomic_store(&may_return_null_, may_return_null, memory_order_relaxed);
+ }
+
+ void InitLinkerInitialized(
+ bool may_return_null, s32 release_to_os_interval_ms) {
+ secondary_.InitLinkerInitialized(may_return_null);
+ stats_.InitLinkerInitialized();
+ InitCommon(may_return_null, release_to_os_interval_ms);
+ }
+
+ void Init(bool may_return_null, s32 release_to_os_interval_ms) {
+ secondary_.Init(may_return_null);
+ stats_.Init();
+ InitCommon(may_return_null, release_to_os_interval_ms);
+ }
+
+ void *Allocate(AllocatorCache *cache, uptr size, uptr alignment,
+ bool cleared = false, bool check_rss_limit = false) {
+ // Returning 0 on malloc(0) may break a lot of code.
+ if (size == 0)
+ size = 1;
+ if (size + alignment < size) return ReturnNullOrDieOnBadRequest();
+ if (check_rss_limit && RssLimitIsExceeded()) return ReturnNullOrDieOnOOM();
+ uptr original_size = size;
+ // If alignment requirements are to be fulfilled by the frontend allocator
+ // rather than by the primary or secondary, passing an alignment lower than
+ // or equal to 8 will prevent any further rounding up, as well as the later
+ // alignment check.
+ if (alignment > 8)
+ size = RoundUpTo(size, alignment);
+ void *res;
+ bool from_primary = primary_.CanAllocate(size, alignment);
+ // The primary allocator should return a 2^x aligned allocation when
+ // requested 2^x bytes, hence using the rounded up 'size' when being
+ // serviced by the primary (this is no longer true when the primary is
+ // using a non-fixed base address). The secondary takes care of the
+ // alignment without such requirement, and allocating 'size' would use
+ // extraneous memory, so we employ 'original_size'.
+ if (from_primary)
+ res = cache->Allocate(&primary_, primary_.ClassID(size));
+ else
+ res = secondary_.Allocate(&stats_, original_size, alignment);
+ if (alignment > 8)
+ CHECK_EQ(reinterpret_cast<uptr>(res) & (alignment - 1), 0);
+ // When serviced by the secondary, the chunk comes from a mmap allocation
+ // and will be zero'd out anyway. We only need to clear our the chunk if
+ // it was serviced by the primary, hence using the rounded up 'size'.
+ if (cleared && res && from_primary)
+ internal_bzero_aligned16(res, RoundUpTo(size, 16));
+ return res;
+ }
+
+ bool MayReturnNull() const {
+ return atomic_load(&may_return_null_, memory_order_acquire);
+ }
+
+ void *ReturnNullOrDieOnBadRequest() {
+ if (MayReturnNull())
+ return nullptr;
+ ReportAllocatorCannotReturnNull(false);
+ }
+
+ void *ReturnNullOrDieOnOOM() {
+ if (MayReturnNull()) return nullptr;
+ ReportAllocatorCannotReturnNull(true);
+ }
+
+ void SetMayReturnNull(bool may_return_null) {
+ secondary_.SetMayReturnNull(may_return_null);
+ atomic_store(&may_return_null_, may_return_null, memory_order_release);
+ }
+
+ s32 ReleaseToOSIntervalMs() const {
+ return primary_.ReleaseToOSIntervalMs();
+ }
+
+ void SetReleaseToOSIntervalMs(s32 release_to_os_interval_ms) {
+ primary_.SetReleaseToOSIntervalMs(release_to_os_interval_ms);
+ }
+
+ bool RssLimitIsExceeded() {
+ return atomic_load(&rss_limit_is_exceeded_, memory_order_acquire);
+ }
+
+ void SetRssLimitIsExceeded(bool rss_limit_is_exceeded) {
+ atomic_store(&rss_limit_is_exceeded_, rss_limit_is_exceeded,
+ memory_order_release);
+ }
+
+ void Deallocate(AllocatorCache *cache, void *p) {
+ if (!p) return;
+ if (primary_.PointerIsMine(p))
+ cache->Deallocate(&primary_, primary_.GetSizeClass(p), p);
+ else
+ secondary_.Deallocate(&stats_, p);
+ }
+
+ void *Reallocate(AllocatorCache *cache, void *p, uptr new_size,
+ uptr alignment) {
+ if (!p)
+ return Allocate(cache, new_size, alignment);
+ if (!new_size) {
+ Deallocate(cache, p);
+ return nullptr;
+ }
+ CHECK(PointerIsMine(p));
+ uptr old_size = GetActuallyAllocatedSize(p);
+ uptr memcpy_size = Min(new_size, old_size);
+ void *new_p = Allocate(cache, new_size, alignment);
+ if (new_p)
+ internal_memcpy(new_p, p, memcpy_size);
+ Deallocate(cache, p);
+ return new_p;
+ }
+
+ bool PointerIsMine(void *p) {
+ if (primary_.PointerIsMine(p))
+ return true;
+ return secondary_.PointerIsMine(p);
+ }
+
+ bool FromPrimary(void *p) {
+ return primary_.PointerIsMine(p);
+ }
+
+ void *GetMetaData(const void *p) {
+ if (primary_.PointerIsMine(p))
+ return primary_.GetMetaData(p);
+ return secondary_.GetMetaData(p);
+ }
+
+ void *GetBlockBegin(const void *p) {
+ if (primary_.PointerIsMine(p))
+ return primary_.GetBlockBegin(p);
+ return secondary_.GetBlockBegin(p);
+ }
+
+ // This function does the same as GetBlockBegin, but is much faster.
+ // Must be called with the allocator locked.
+ void *GetBlockBeginFastLocked(void *p) {
+ if (primary_.PointerIsMine(p))
+ return primary_.GetBlockBegin(p);
+ return secondary_.GetBlockBeginFastLocked(p);
+ }
+
+ uptr GetActuallyAllocatedSize(void *p) {
+ if (primary_.PointerIsMine(p))
+ return primary_.GetActuallyAllocatedSize(p);
+ return secondary_.GetActuallyAllocatedSize(p);
+ }
+
+ uptr TotalMemoryUsed() {
+ return primary_.TotalMemoryUsed() + secondary_.TotalMemoryUsed();
+ }
+
+ void TestOnlyUnmap() { primary_.TestOnlyUnmap(); }
+
+ void InitCache(AllocatorCache *cache) {
+ cache->Init(&stats_);
+ }
+
+ void DestroyCache(AllocatorCache *cache) {
+ cache->Destroy(&primary_, &stats_);
+ }
+
+ void SwallowCache(AllocatorCache *cache) {
+ cache->Drain(&primary_);
+ }
+
+ void GetStats(AllocatorStatCounters s) const {
+ stats_.Get(s);
+ }
+
+ void PrintStats() {
+ primary_.PrintStats();
+ secondary_.PrintStats();
+ }
+
+ // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
+ // introspection API.
+ void ForceLock() {
+ primary_.ForceLock();
+ secondary_.ForceLock();
+ }
+
+ void ForceUnlock() {
+ secondary_.ForceUnlock();
+ primary_.ForceUnlock();
+ }
+
+ // Iterate over all existing chunks.
+ // The allocator must be locked when calling this function.
+ void ForEachChunk(ForEachChunkCallback callback, void *arg) {
+ primary_.ForEachChunk(callback, arg);
+ secondary_.ForEachChunk(callback, arg);
+ }
+
+ private:
+ PrimaryAllocator primary_;
+ SecondaryAllocator secondary_;
+ AllocatorGlobalStats stats_;
+ atomic_uint8_t may_return_null_;
+ atomic_uint8_t rss_limit_is_exceeded_;
+};
+
diff --git a/lib/sanitizer_common/sanitizer_allocator_interface.h b/lib/sanitizer_common/sanitizer_allocator_interface.h
index 797c38a79885..5ff6edba0a1a 100644
--- a/lib/sanitizer_common/sanitizer_allocator_interface.h
+++ b/lib/sanitizer_common/sanitizer_allocator_interface.h
@@ -37,6 +37,10 @@ SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
/* OPTIONAL */ void __sanitizer_malloc_hook(void *ptr, uptr size);
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
/* OPTIONAL */ void __sanitizer_free_hook(void *ptr);
+
+
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+ void __sanitizer_print_memory_profile(int top_percent);
} // extern "C"
#endif // SANITIZER_ALLOCATOR_INTERFACE_H
diff --git a/lib/sanitizer_common/sanitizer_allocator_internal.h b/lib/sanitizer_common/sanitizer_allocator_internal.h
index a7ea454ff17b..e939cbe01c3c 100644
--- a/lib/sanitizer_common/sanitizer_allocator_internal.h
+++ b/lib/sanitizer_common/sanitizer_allocator_internal.h
@@ -61,8 +61,8 @@ enum InternalAllocEnum {
} // namespace __sanitizer
inline void *operator new(__sanitizer::operator_new_size_type size,
- InternalAllocEnum) {
- return InternalAlloc(size);
+ __sanitizer::InternalAllocEnum) {
+ return __sanitizer::InternalAlloc(size);
}
#endif // SANITIZER_ALLOCATOR_INTERNAL_H
diff --git a/lib/sanitizer_common/sanitizer_allocator_local_cache.h b/lib/sanitizer_common/sanitizer_allocator_local_cache.h
new file mode 100644
index 000000000000..e1172e0c2820
--- /dev/null
+++ b/lib/sanitizer_common/sanitizer_allocator_local_cache.h
@@ -0,0 +1,249 @@
+//===-- sanitizer_allocator_local_cache.h -----------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Part of the Sanitizer Allocator.
+//
+//===----------------------------------------------------------------------===//
+#ifndef SANITIZER_ALLOCATOR_H
+#error This file must be included inside sanitizer_allocator.h
+#endif
+
+// Objects of this type should be used as local caches for SizeClassAllocator64
+// or SizeClassAllocator32. Since the typical use of this class is to have one
+// object per thread in TLS, is has to be POD.
+template<class SizeClassAllocator>
+struct SizeClassAllocatorLocalCache
+ : SizeClassAllocator::AllocatorCache {
+};
+
+// Cache used by SizeClassAllocator64.
+template <class SizeClassAllocator>
+struct SizeClassAllocator64LocalCache {
+ typedef SizeClassAllocator Allocator;
+ static const uptr kNumClasses = SizeClassAllocator::kNumClasses;
+ typedef typename Allocator::SizeClassMapT SizeClassMap;
+ typedef typename Allocator::CompactPtrT CompactPtrT;
+
+ void Init(AllocatorGlobalStats *s) {
+ stats_.Init();
+ if (s)
+ s->Register(&stats_);
+ }
+
+ void Destroy(SizeClassAllocator *allocator, AllocatorGlobalStats *s) {
+ Drain(allocator);
+ if (s)
+ s->Unregister(&stats_);
+ }
+
+ void *Allocate(SizeClassAllocator *allocator, uptr class_id) {
+ CHECK_NE(class_id, 0UL);
+ CHECK_LT(class_id, kNumClasses);
+ stats_.Add(AllocatorStatAllocated, Allocator::ClassIdToSize(class_id));
+ PerClass *c = &per_class_[class_id];
+ if (UNLIKELY(c->count == 0))
+ Refill(c, allocator, class_id);
+ CHECK_GT(c->count, 0);
+ CompactPtrT chunk = c->chunks[--c->count];
+ void *res = reinterpret_cast<void *>(allocator->CompactPtrToPointer(
+ allocator->GetRegionBeginBySizeClass(class_id), chunk));
+ return res;
+ }
+
+ void Deallocate(SizeClassAllocator *allocator, uptr class_id, void *p) {
+ CHECK_NE(class_id, 0UL);
+ CHECK_LT(class_id, kNumClasses);
+ // If the first allocator call on a new thread is a deallocation, then
+ // max_count will be zero, leading to check failure.
+ InitCache();
+ stats_.Sub(AllocatorStatAllocated, Allocator::ClassIdToSize(class_id));
+ PerClass *c = &per_class_[class_id];
+ CHECK_NE(c->max_count, 0UL);
+ if (UNLIKELY(c->count == c->max_count))
+ Drain(c, allocator, class_id, c->max_count / 2);
+ CompactPtrT chunk = allocator->PointerToCompactPtr(
+ allocator->GetRegionBeginBySizeClass(class_id),
+ reinterpret_cast<uptr>(p));
+ c->chunks[c->count++] = chunk;
+ }
+
+ void Drain(SizeClassAllocator *allocator) {
+ for (uptr class_id = 0; class_id < kNumClasses; class_id++) {
+ PerClass *c = &per_class_[class_id];
+ while (c->count > 0)
+ Drain(c, allocator, class_id, c->count);
+ }
+ }
+
+ // private:
+ struct PerClass {
+ u32 count;
+ u32 max_count;
+ CompactPtrT chunks[2 * SizeClassMap::kMaxNumCachedHint];
+ };
+ PerClass per_class_[kNumClasses];
+ AllocatorStats stats_;
+
+ void InitCache() {
+ if (per_class_[1].max_count)
+ return;
+ for (uptr i = 0; i < kNumClasses; i++) {
+ PerClass *c = &per_class_[i];
+ c->max_count = 2 * SizeClassMap::MaxCachedHint(i);
+ }
+ }
+
+ NOINLINE void Refill(PerClass *c, SizeClassAllocator *allocator,
+ uptr class_id) {
+ InitCache();
+ uptr num_requested_chunks = SizeClassMap::MaxCachedHint(class_id);
+ allocator->GetFromAllocator(&stats_, class_id, c->chunks,
+ num_requested_chunks);
+ c->count = num_requested_chunks;
+ }
+
+ NOINLINE void Drain(PerClass *c, SizeClassAllocator *allocator, uptr class_id,
+ uptr count) {
+ InitCache();
+ CHECK_GE(c->count, count);
+ uptr first_idx_to_drain = c->count - count;
+ c->count -= count;
+ allocator->ReturnToAllocator(&stats_, class_id,
+ &c->chunks[first_idx_to_drain], count);
+ }
+};
+
+// Cache used by SizeClassAllocator32.
+template <class SizeClassAllocator>
+struct SizeClassAllocator32LocalCache {
+ typedef SizeClassAllocator Allocator;
+ typedef typename Allocator::TransferBatch TransferBatch;
+ static const uptr kNumClasses = SizeClassAllocator::kNumClasses;
+
+ void Init(AllocatorGlobalStats *s) {
+ stats_.Init();
+ if (s)
+ s->Register(&stats_);
+ }
+
+ void Destroy(SizeClassAllocator *allocator, AllocatorGlobalStats *s) {
+ Drain(allocator);
+ if (s)
+ s->Unregister(&stats_);
+ }
+
+ void *Allocate(SizeClassAllocator *allocator, uptr class_id) {
+ CHECK_NE(class_id, 0UL);
+ CHECK_LT(class_id, kNumClasses);
+ stats_.Add(AllocatorStatAllocated, Allocator::ClassIdToSize(class_id));
+ PerClass *c = &per_class_[class_id];
+ if (UNLIKELY(c->count == 0))
+ Refill(allocator, class_id);
+ void *res = c->batch[--c->count];
+ PREFETCH(c->batch[c->count - 1]);
+ return res;
+ }
+
+ void Deallocate(SizeClassAllocator *allocator, uptr class_id, void *p) {
+ CHECK_NE(class_id, 0UL);
+ CHECK_LT(class_id, kNumClasses);
+ // If the first allocator call on a new thread is a deallocation, then
+ // max_count will be zero, leading to check failure.
+ InitCache();
+ stats_.Sub(AllocatorStatAllocated, Allocator::ClassIdToSize(class_id));
+ PerClass *c = &per_class_[class_id];
+ CHECK_NE(c->max_count, 0UL);
+ if (UNLIKELY(c->count == c->max_count))
+ Drain(allocator, class_id);
+ c->batch[c->count++] = p;
+ }
+
+ void Drain(SizeClassAllocator *allocator) {
+ for (uptr class_id = 0; class_id < kNumClasses; class_id++) {
+ PerClass *c = &per_class_[class_id];
+ while (c->count > 0)
+ Drain(allocator, class_id);
+ }
+ }
+
+ // private:
+ typedef typename SizeClassAllocator::SizeClassMapT SizeClassMap;
+ struct PerClass {
+ uptr count;
+ uptr max_count;
+ void *batch[2 * TransferBatch::kMaxNumCached];
+ };
+ PerClass per_class_[kNumClasses];
+ AllocatorStats stats_;
+
+ void InitCache() {
+ if (per_class_[1].max_count)
+ return;
+ for (uptr i = 0; i < kNumClasses; i++) {
+ PerClass *c = &per_class_[i];
+ c->max_count = 2 * TransferBatch::MaxCached(i);
+ }
+ }
+
+ // TransferBatch class is declared in SizeClassAllocator.
+ // We transfer chunks between central and thread-local free lists in batches.
+ // For small size classes we allocate batches separately.
+ // For large size classes we may use one of the chunks to store the batch.
+ // sizeof(TransferBatch) must be a power of 2 for more efficient allocation.
+ static uptr SizeClassForTransferBatch(uptr class_id) {
+ if (Allocator::ClassIdToSize(class_id) <
+ TransferBatch::AllocationSizeRequiredForNElements(
+ TransferBatch::MaxCached(class_id)))
+ return SizeClassMap::ClassID(sizeof(TransferBatch));
+ return 0;
+ }
+
+ // Returns a TransferBatch suitable for class_id.
+ // For small size classes allocates the batch from the allocator.
+ // For large size classes simply returns b.
+ TransferBatch *CreateBatch(uptr class_id, SizeClassAllocator *allocator,
+ TransferBatch *b) {
+ if (uptr batch_class_id = SizeClassForTransferBatch(class_id))
+ return (TransferBatch*)Allocate(allocator, batch_class_id);
+ return b;
+ }
+
+ // Destroys TransferBatch b.
+ // For small size classes deallocates b to the allocator.
+ // Does notthing for large size classes.
+ void DestroyBatch(uptr class_id, SizeClassAllocator *allocator,
+ TransferBatch *b) {
+ if (uptr batch_class_id = SizeClassForTransferBatch(class_id))
+ Deallocate(allocator, batch_class_id, b);
+ }
+
+ NOINLINE void Refill(SizeClassAllocator *allocator, uptr class_id) {
+ InitCache();
+ PerClass *c = &per_class_[class_id];
+ TransferBatch *b = allocator->AllocateBatch(&stats_, this, class_id);
+ CHECK_GT(b->Count(), 0);
+ b->CopyToArray(c->batch);
+ c->count = b->Count();
+ DestroyBatch(class_id, allocator, b);
+ }
+
+ NOINLINE void Drain(SizeClassAllocator *allocator, uptr class_id) {
+ InitCache();
+ PerClass *c = &per_class_[class_id];
+ uptr cnt = Min(c->max_count / 2, c->count);
+ uptr first_idx_to_drain = c->count - cnt;
+ TransferBatch *b = CreateBatch(
+ class_id, allocator, (TransferBatch *)c->batch[first_idx_to_drain]);
+ b->SetFromArray(allocator->GetRegionBeginBySizeClass(class_id),
+ &c->batch[first_idx_to_drain], cnt);
+ c->count -= cnt;
+ allocator->DeallocateBatch(&stats_, class_id, b);
+ }
+};
+
diff --git a/lib/sanitizer_common/sanitizer_allocator_primary32.h b/lib/sanitizer_common/sanitizer_allocator_primary32.h
new file mode 100644
index 000000000000..2882afd1fe1d
--- /dev/null
+++ b/lib/sanitizer_common/sanitizer_allocator_primary32.h
@@ -0,0 +1,310 @@
+//===-- sanitizer_allocator_primary32.h -------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Part of the Sanitizer Allocator.
+//
+//===----------------------------------------------------------------------===//
+#ifndef SANITIZER_ALLOCATOR_H
+#error This file must be included inside sanitizer_allocator.h
+#endif
+
+template<class SizeClassAllocator> struct SizeClassAllocator32LocalCache;
+
+// SizeClassAllocator32 -- allocator for 32-bit address space.
+// This allocator can theoretically be used on 64-bit arch, but there it is less
+// efficient than SizeClassAllocator64.
+//
+// [kSpaceBeg, kSpaceBeg + kSpaceSize) is the range of addresses which can
+// be returned by MmapOrDie().
+//
+// Region:
+// a result of a single call to MmapAlignedOrDie(kRegionSize, kRegionSize).
+// Since the regions are aligned by kRegionSize, there are exactly
+// kNumPossibleRegions possible regions in the address space and so we keep
+// a ByteMap possible_regions to store the size classes of each Region.
+// 0 size class means the region is not used by the allocator.
+//
+// One Region is used to allocate chunks of a single size class.
+// A Region looks like this:
+// UserChunk1 .. UserChunkN <gap> MetaChunkN .. MetaChunk1
+//
+// In order to avoid false sharing the objects of this class should be
+// chache-line aligned.
+template <const uptr kSpaceBeg, const u64 kSpaceSize,
+ const uptr kMetadataSize, class SizeClassMap,
+ const uptr kRegionSizeLog,
+ class ByteMap,
+ class MapUnmapCallback = NoOpMapUnmapCallback>
+class SizeClassAllocator32 {
+ public:
+ struct TransferBatch {
+ static const uptr kMaxNumCached = SizeClassMap::kMaxNumCachedHint - 2;
+ void SetFromArray(uptr region_beg_unused, void *batch[], uptr count) {
+ count_ = count;
+ CHECK_LE(count_, kMaxNumCached);
+ for (uptr i = 0; i < count; i++)
+ batch_[i] = batch[i];
+ }
+ uptr Count() const { return count_; }
+ void Clear() { count_ = 0; }
+ void Add(void *ptr) {
+ batch_[count_++] = ptr;
+ CHECK_LE(count_, kMaxNumCached);
+ }
+ void CopyToArray(void *to_batch[]) {
+ for (uptr i = 0, n = Count(); i < n; i++)
+ to_batch[i] = batch_[i];
+ }
+
+ // How much memory do we need for a batch containing n elements.
+ static uptr AllocationSizeRequiredForNElements(uptr n) {
+ return sizeof(uptr) * 2 + sizeof(void *) * n;
+ }
+ static uptr MaxCached(uptr class_id) {
+ return Min(kMaxNumCached, SizeClassMap::MaxCachedHint(class_id));
+ }
+
+ TransferBatch *next;
+
+ private:
+ uptr count_;
+ void *batch_[kMaxNumCached];
+ };
+
+ static const uptr kBatchSize = sizeof(TransferBatch);
+ COMPILER_CHECK((kBatchSize & (kBatchSize - 1)) == 0);
+ COMPILER_CHECK(sizeof(TransferBatch) ==
+ SizeClassMap::kMaxNumCachedHint * sizeof(uptr));
+
+ static uptr ClassIdToSize(uptr class_id) {
+ return SizeClassMap::Size(class_id);
+ }
+
+ typedef SizeClassAllocator32<kSpaceBeg, kSpaceSize, kMetadataSize,
+ SizeClassMap, kRegionSizeLog, ByteMap, MapUnmapCallback> ThisT;
+ typedef SizeClassAllocator32LocalCache<ThisT> AllocatorCache;
+
+ void Init(s32 release_to_os_interval_ms) {
+ possible_regions.TestOnlyInit();
+ internal_memset(size_class_info_array, 0, sizeof(size_class_info_array));
+ }
+
+ s32 ReleaseToOSIntervalMs() const {
+ return kReleaseToOSIntervalNever;
+ }
+
+ void SetReleaseToOSIntervalMs(s32 release_to_os_interval_ms) {
+ // This is empty here. Currently only implemented in 64-bit allocator.
+ }
+
+ void *MapWithCallback(uptr size) {
+ size = RoundUpTo(size, GetPageSizeCached());
+ void *res = MmapOrDie(size, "SizeClassAllocator32");
+ MapUnmapCallback().OnMap((uptr)res, size);
+ return res;
+ }
+
+ void UnmapWithCallback(uptr beg, uptr size) {
+ MapUnmapCallback().OnUnmap(beg, size);
+ UnmapOrDie(reinterpret_cast<void *>(beg), size);
+ }
+
+ static bool CanAllocate(uptr size, uptr alignment) {
+ return size <= SizeClassMap::kMaxSize &&
+ alignment <= SizeClassMap::kMaxSize;
+ }
+
+ void *GetMetaData(const void *p) {
+ CHECK(PointerIsMine(p));
+ uptr mem = reinterpret_cast<uptr>(p);
+ uptr beg = ComputeRegionBeg(mem);
+ uptr size = ClassIdToSize(GetSizeClass(p));
+ u32 offset = mem - beg;
+ uptr n = offset / (u32)size; // 32-bit division
+ uptr meta = (beg + kRegionSize) - (n + 1) * kMetadataSize;
+ return reinterpret_cast<void*>(meta);
+ }
+
+ NOINLINE TransferBatch *AllocateBatch(AllocatorStats *stat, AllocatorCache *c,
+ uptr class_id) {
+ CHECK_LT(class_id, kNumClasses);
+ SizeClassInfo *sci = GetSizeClassInfo(class_id);
+ SpinMutexLock l(&sci->mutex);
+ if (sci->free_list.empty())
+ PopulateFreeList(stat, c, sci, class_id);
+ CHECK(!sci->free_list.empty());
+ TransferBatch *b = sci->free_list.front();
+ sci->free_list.pop_front();
+ return b;
+ }
+
+ NOINLINE void DeallocateBatch(AllocatorStats *stat, uptr class_id,
+ TransferBatch *b) {
+ CHECK_LT(class_id, kNumClasses);
+ SizeClassInfo *sci = GetSizeClassInfo(class_id);
+ SpinMutexLock l(&sci->mutex);
+ CHECK_GT(b->Count(), 0);
+ sci->free_list.push_front(b);
+ }
+
+ uptr GetRegionBeginBySizeClass(uptr class_id) { return 0; }
+
+ bool PointerIsMine(const void *p) {
+ uptr mem = reinterpret_cast<uptr>(p);
+ if (mem < kSpaceBeg || mem >= kSpaceBeg + kSpaceSize)
+ return false;
+ return GetSizeClass(p) != 0;
+ }
+
+ uptr GetSizeClass(const void *p) {
+ return possible_regions[ComputeRegionId(reinterpret_cast<uptr>(p))];
+ }
+
+ void *GetBlockBegin(const void *p) {
+ CHECK(PointerIsMine(p));
+ uptr mem = reinterpret_cast<uptr>(p);
+ uptr beg = ComputeRegionBeg(mem);
+ uptr size = ClassIdToSize(GetSizeClass(p));
+ u32 offset = mem - beg;
+ u32 n = offset / (u32)size; // 32-bit division
+ uptr res = beg + (n * (u32)size);
+ return reinterpret_cast<void*>(res);
+ }
+
+ uptr GetActuallyAllocatedSize(void *p) {
+ CHECK(PointerIsMine(p));
+ return ClassIdToSize(GetSizeClass(p));
+ }
+
+ uptr ClassID(uptr size) { return SizeClassMap::ClassID(size); }
+
+ uptr TotalMemoryUsed() {
+ // No need to lock here.
+ uptr res = 0;
+ for (uptr i = 0; i < kNumPossibleRegions; i++)
+ if (possible_regions[i])
+ res += kRegionSize;
+ return res;
+ }
+
+ void TestOnlyUnmap() {
+ for (uptr i = 0; i < kNumPossibleRegions; i++)
+ if (possible_regions[i])
+ UnmapWithCallback((i * kRegionSize), kRegionSize);
+ }
+
+ // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
+ // introspection API.
+ void ForceLock() {
+ for (uptr i = 0; i < kNumClasses; i++) {
+ GetSizeClassInfo(i)->mutex.Lock();
+ }
+ }
+
+ void ForceUnlock() {
+ for (int i = kNumClasses - 1; i >= 0; i--) {
+ GetSizeClassInfo(i)->mutex.Unlock();
+ }
+ }
+
+ // Iterate over all existing chunks.
+ // The allocator must be locked when calling this function.
+ void ForEachChunk(ForEachChunkCallback callback, void *arg) {
+ for (uptr region = 0; region < kNumPossibleRegions; region++)
+ if (possible_regions[region]) {
+ uptr chunk_size = ClassIdToSize(possible_regions[region]);
+ uptr max_chunks_in_region = kRegionSize / (chunk_size + kMetadataSize);
+ uptr region_beg = region * kRegionSize;
+ for (uptr chunk = region_beg;
+ chunk < region_beg + max_chunks_in_region * chunk_size;
+ chunk += chunk_size) {
+ // Too slow: CHECK_EQ((void *)chunk, GetBlockBegin((void *)chunk));
+ callback(chunk, arg);
+ }
+ }
+ }
+
+ void PrintStats() {
+ }
+
+ static uptr AdditionalSize() {
+ return 0;
+ }
+
+ typedef SizeClassMap SizeClassMapT;
+ static const uptr kNumClasses = SizeClassMap::kNumClasses;
+
+ private:
+ static const uptr kRegionSize = 1 << kRegionSizeLog;
+ static const uptr kNumPossibleRegions = kSpaceSize / kRegionSize;
+
+ struct SizeClassInfo {
+ SpinMutex mutex;
+ IntrusiveList<TransferBatch> free_list;
+ char padding[kCacheLineSize - sizeof(uptr) -
+ sizeof(IntrusiveList<TransferBatch>)];
+ };
+ COMPILER_CHECK(sizeof(SizeClassInfo) == kCacheLineSize);
+
+ uptr ComputeRegionId(uptr mem) {
+ uptr res = mem >> kRegionSizeLog;
+ CHECK_LT(res, kNumPossibleRegions);
+ return res;
+ }
+
+ uptr ComputeRegionBeg(uptr mem) {
+ return mem & ~(kRegionSize - 1);
+ }
+
+ uptr AllocateRegion(AllocatorStats *stat, uptr class_id) {
+ CHECK_LT(class_id, kNumClasses);
+ uptr res = reinterpret_cast<uptr>(MmapAlignedOrDie(kRegionSize, kRegionSize,
+ "SizeClassAllocator32"));
+ MapUnmapCallback().OnMap(res, kRegionSize);
+ stat->Add(AllocatorStatMapped, kRegionSize);
+ CHECK_EQ(0U, (res & (kRegionSize - 1)));
+ possible_regions.set(ComputeRegionId(res), static_cast<u8>(class_id));
+ return res;
+ }
+
+ SizeClassInfo *GetSizeClassInfo(uptr class_id) {
+ CHECK_LT(class_id, kNumClasses);
+ return &size_class_info_array[class_id];
+ }
+
+ void PopulateFreeList(AllocatorStats *stat, AllocatorCache *c,
+ SizeClassInfo *sci, uptr class_id) {
+ uptr size = ClassIdToSize(class_id);
+ uptr reg = AllocateRegion(stat, class_id);
+ uptr n_chunks = kRegionSize / (size + kMetadataSize);
+ uptr max_count = TransferBatch::MaxCached(class_id);
+ TransferBatch *b = nullptr;
+ for (uptr i = reg; i < reg + n_chunks * size; i += size) {
+ if (!b) {
+ b = c->CreateBatch(class_id, this, (TransferBatch*)i);
+ b->Clear();
+ }
+ b->Add((void*)i);
+ if (b->Count() == max_count) {
+ CHECK_GT(b->Count(), 0);
+ sci->free_list.push_back(b);
+ b = nullptr;
+ }
+ }
+ if (b) {
+ CHECK_GT(b->Count(), 0);
+ sci->free_list.push_back(b);
+ }
+ }
+
+ ByteMap possible_regions;
+ SizeClassInfo size_class_info_array[kNumClasses];
+};
+
+
diff --git a/lib/sanitizer_common/sanitizer_allocator_primary64.h b/lib/sanitizer_common/sanitizer_allocator_primary64.h
new file mode 100644
index 000000000000..f2d94a07a523
--- /dev/null
+++ b/lib/sanitizer_common/sanitizer_allocator_primary64.h
@@ -0,0 +1,522 @@
+//===-- sanitizer_allocator_primary64.h -------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Part of the Sanitizer Allocator.
+//
+//===----------------------------------------------------------------------===//
+#ifndef SANITIZER_ALLOCATOR_H
+#error This file must be included inside sanitizer_allocator.h
+#endif
+
+template<class SizeClassAllocator> struct SizeClassAllocator64LocalCache;
+
+// SizeClassAllocator64 -- allocator for 64-bit address space.
+// The template parameter Params is a class containing the actual parameters.
+//
+// Space: a portion of address space of kSpaceSize bytes starting at SpaceBeg.
+// If kSpaceBeg is ~0 then SpaceBeg is chosen dynamically my mmap.
+// Otherwise SpaceBeg=kSpaceBeg (fixed address).
+// kSpaceSize is a power of two.
+// At the beginning the entire space is mprotect-ed, then small parts of it
+// are mapped on demand.
+//
+// Region: a part of Space dedicated to a single size class.
+// There are kNumClasses Regions of equal size.
+//
+// UserChunk: a piece of memory returned to user.
+// MetaChunk: kMetadataSize bytes of metadata associated with a UserChunk.
+
+// FreeArray is an array free-d chunks (stored as 4-byte offsets)
+//
+// A Region looks like this:
+// UserChunk1 ... UserChunkN <gap> MetaChunkN ... MetaChunk1 FreeArray
+
+struct SizeClassAllocator64FlagMasks { // Bit masks.
+ enum {
+ kRandomShuffleChunks = 1,
+ };
+};
+
+template <class Params>
+class SizeClassAllocator64 {
+ public:
+ static const uptr kSpaceBeg = Params::kSpaceBeg;
+ static const uptr kSpaceSize = Params::kSpaceSize;
+ static const uptr kMetadataSize = Params::kMetadataSize;
+ typedef typename Params::SizeClassMap SizeClassMap;
+ typedef typename Params::MapUnmapCallback MapUnmapCallback;
+
+ static const bool kRandomShuffleChunks =
+ Params::kFlags & SizeClassAllocator64FlagMasks::kRandomShuffleChunks;
+
+ typedef SizeClassAllocator64<Params> ThisT;
+ typedef SizeClassAllocator64LocalCache<ThisT> AllocatorCache;
+
+ // When we know the size class (the region base) we can represent a pointer
+ // as a 4-byte integer (offset from the region start shifted right by 4).
+ typedef u32 CompactPtrT;
+ static const uptr kCompactPtrScale = 4;
+ CompactPtrT PointerToCompactPtr(uptr base, uptr ptr) {
+ return static_cast<CompactPtrT>((ptr - base) >> kCompactPtrScale);
+ }
+ uptr CompactPtrToPointer(uptr base, CompactPtrT ptr32) {
+ return base + (static_cast<uptr>(ptr32) << kCompactPtrScale);
+ }
+
+ void Init(s32 release_to_os_interval_ms) {
+ uptr TotalSpaceSize = kSpaceSize + AdditionalSize();
+ if (kUsingConstantSpaceBeg) {
+ CHECK_EQ(kSpaceBeg, reinterpret_cast<uptr>(
+ MmapFixedNoAccess(kSpaceBeg, TotalSpaceSize)));
+ } else {
+ NonConstSpaceBeg =
+ reinterpret_cast<uptr>(MmapNoAccess(TotalSpaceSize));
+ CHECK_NE(NonConstSpaceBeg, ~(uptr)0);
+ }
+ SetReleaseToOSIntervalMs(release_to_os_interval_ms);
+ MapWithCallback(SpaceEnd(), AdditionalSize());
+ }
+
+ s32 ReleaseToOSIntervalMs() const {
+ return atomic_load(&release_to_os_interval_ms_, memory_order_relaxed);
+ }
+
+ void SetReleaseToOSIntervalMs(s32 release_to_os_interval_ms) {
+ atomic_store(&release_to_os_interval_ms_, release_to_os_interval_ms,
+ memory_order_relaxed);
+ }
+
+ void MapWithCallback(uptr beg, uptr size) {
+ CHECK_EQ(beg, reinterpret_cast<uptr>(MmapFixedOrDie(beg, size)));
+ MapUnmapCallback().OnMap(beg, size);
+ }
+
+ void UnmapWithCallback(uptr beg, uptr size) {
+ MapUnmapCallback().OnUnmap(beg, size);
+ UnmapOrDie(reinterpret_cast<void *>(beg), size);
+ }
+
+ static bool CanAllocate(uptr size, uptr alignment) {
+ return size <= SizeClassMap::kMaxSize &&
+ alignment <= SizeClassMap::kMaxSize;
+ }
+
+ NOINLINE void ReturnToAllocator(AllocatorStats *stat, uptr class_id,
+ const CompactPtrT *chunks, uptr n_chunks) {
+ RegionInfo *region = GetRegionInfo(class_id);
+ uptr region_beg = GetRegionBeginBySizeClass(class_id);
+ CompactPtrT *free_array = GetFreeArray(region_beg);
+
+ BlockingMutexLock l(&region->mutex);
+ uptr old_num_chunks = region->num_freed_chunks;
+ uptr new_num_freed_chunks = old_num_chunks + n_chunks;
+ EnsureFreeArraySpace(region, region_beg, new_num_freed_chunks);
+ for (uptr i = 0; i < n_chunks; i++)
+ free_array[old_num_chunks + i] = chunks[i];
+ region->num_freed_chunks = new_num_freed_chunks;
+ region->n_freed += n_chunks;
+
+ MaybeReleaseToOS(class_id);
+ }
+
+ NOINLINE void GetFromAllocator(AllocatorStats *stat, uptr class_id,
+ CompactPtrT *chunks, uptr n_chunks) {
+ RegionInfo *region = GetRegionInfo(class_id);
+ uptr region_beg = GetRegionBeginBySizeClass(class_id);
+ CompactPtrT *free_array = GetFreeArray(region_beg);
+
+ BlockingMutexLock l(&region->mutex);
+ if (UNLIKELY(region->num_freed_chunks < n_chunks)) {
+ PopulateFreeArray(stat, class_id, region,
+ n_chunks - region->num_freed_chunks);
+ CHECK_GE(region->num_freed_chunks, n_chunks);
+ }
+ region->num_freed_chunks -= n_chunks;
+ uptr base_idx = region->num_freed_chunks;
+ for (uptr i = 0; i < n_chunks; i++)
+ chunks[i] = free_array[base_idx + i];
+ region->n_allocated += n_chunks;
+ }
+
+
+ bool PointerIsMine(const void *p) {
+ uptr P = reinterpret_cast<uptr>(p);
+ if (kUsingConstantSpaceBeg && (kSpaceBeg % kSpaceSize) == 0)
+ return P / kSpaceSize == kSpaceBeg / kSpaceSize;
+ return P >= SpaceBeg() && P < SpaceEnd();
+ }
+
+ uptr GetRegionBegin(const void *p) {
+ if (kUsingConstantSpaceBeg)
+ return reinterpret_cast<uptr>(p) & ~(kRegionSize - 1);
+ uptr space_beg = SpaceBeg();
+ return ((reinterpret_cast<uptr>(p) - space_beg) & ~(kRegionSize - 1)) +
+ space_beg;
+ }
+
+ uptr GetRegionBeginBySizeClass(uptr class_id) {
+ return SpaceBeg() + kRegionSize * class_id;
+ }
+
+ uptr GetSizeClass(const void *p) {
+ if (kUsingConstantSpaceBeg && (kSpaceBeg % kSpaceSize) == 0)
+ return ((reinterpret_cast<uptr>(p)) / kRegionSize) % kNumClassesRounded;
+ return ((reinterpret_cast<uptr>(p) - SpaceBeg()) / kRegionSize) %
+ kNumClassesRounded;
+ }
+
+ void *GetBlockBegin(const void *p) {
+ uptr class_id = GetSizeClass(p);
+ uptr size = ClassIdToSize(class_id);
+ if (!size) return nullptr;
+ uptr chunk_idx = GetChunkIdx((uptr)p, size);
+ uptr reg_beg = GetRegionBegin(p);
+ uptr beg = chunk_idx * size;
+ uptr next_beg = beg + size;
+ if (class_id >= kNumClasses) return nullptr;
+ RegionInfo *region = GetRegionInfo(class_id);
+ if (region->mapped_user >= next_beg)
+ return reinterpret_cast<void*>(reg_beg + beg);
+ return nullptr;
+ }
+
+ uptr GetActuallyAllocatedSize(void *p) {
+ CHECK(PointerIsMine(p));
+ return ClassIdToSize(GetSizeClass(p));
+ }
+
+ uptr ClassID(uptr size) { return SizeClassMap::ClassID(size); }
+
+ void *GetMetaData(const void *p) {
+ uptr class_id = GetSizeClass(p);
+ uptr size = ClassIdToSize(class_id);
+ uptr chunk_idx = GetChunkIdx(reinterpret_cast<uptr>(p), size);
+ uptr region_beg = GetRegionBeginBySizeClass(class_id);
+ return reinterpret_cast<void *>(GetMetadataEnd(region_beg) -
+ (1 + chunk_idx) * kMetadataSize);
+ }
+
+ uptr TotalMemoryUsed() {
+ uptr res = 0;
+ for (uptr i = 0; i < kNumClasses; i++)
+ res += GetRegionInfo(i)->allocated_user;
+ return res;
+ }
+
+ // Test-only.
+ void TestOnlyUnmap() {
+ UnmapWithCallback(SpaceBeg(), kSpaceSize + AdditionalSize());
+ }
+
+ static void FillMemoryProfile(uptr start, uptr rss, bool file, uptr *stats,
+ uptr stats_size) {
+ for (uptr class_id = 0; class_id < stats_size; class_id++)
+ if (stats[class_id] == start)
+ stats[class_id] = rss;
+ }
+
+ void PrintStats(uptr class_id, uptr rss) {
+ RegionInfo *region = GetRegionInfo(class_id);
+ if (region->mapped_user == 0) return;
+ uptr in_use = region->n_allocated - region->n_freed;
+ uptr avail_chunks = region->allocated_user / ClassIdToSize(class_id);
+ Printf(
+ " %02zd (%zd): mapped: %zdK allocs: %zd frees: %zd inuse: %zd "
+ "num_freed_chunks %zd"
+ " avail: %zd rss: %zdK releases: %zd\n",
+ class_id, ClassIdToSize(class_id), region->mapped_user >> 10,
+ region->n_allocated, region->n_freed, in_use,
+ region->num_freed_chunks, avail_chunks, rss >> 10,
+ region->rtoi.num_releases);
+ }
+
+ void PrintStats() {
+ uptr total_mapped = 0;
+ uptr n_allocated = 0;
+ uptr n_freed = 0;
+ for (uptr class_id = 1; class_id < kNumClasses; class_id++) {
+ RegionInfo *region = GetRegionInfo(class_id);
+ total_mapped += region->mapped_user;
+ n_allocated += region->n_allocated;
+ n_freed += region->n_freed;
+ }
+ Printf("Stats: SizeClassAllocator64: %zdM mapped in %zd allocations; "
+ "remains %zd\n",
+ total_mapped >> 20, n_allocated, n_allocated - n_freed);
+ uptr rss_stats[kNumClasses];
+ for (uptr class_id = 0; class_id < kNumClasses; class_id++)
+ rss_stats[class_id] = SpaceBeg() + kRegionSize * class_id;
+ GetMemoryProfile(FillMemoryProfile, rss_stats, kNumClasses);
+ for (uptr class_id = 1; class_id < kNumClasses; class_id++)
+ PrintStats(class_id, rss_stats[class_id]);
+ }
+
+ // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
+ // introspection API.
+ void ForceLock() {
+ for (uptr i = 0; i < kNumClasses; i++) {
+ GetRegionInfo(i)->mutex.Lock();
+ }
+ }
+
+ void ForceUnlock() {
+ for (int i = (int)kNumClasses - 1; i >= 0; i--) {
+ GetRegionInfo(i)->mutex.Unlock();
+ }
+ }
+
+ // Iterate over all existing chunks.
+ // The allocator must be locked when calling this function.
+ void ForEachChunk(ForEachChunkCallback callback, void *arg) {
+ for (uptr class_id = 1; class_id < kNumClasses; class_id++) {
+ RegionInfo *region = GetRegionInfo(class_id);
+ uptr chunk_size = ClassIdToSize(class_id);
+ uptr region_beg = SpaceBeg() + class_id * kRegionSize;
+ for (uptr chunk = region_beg;
+ chunk < region_beg + region->allocated_user;
+ chunk += chunk_size) {
+ // Too slow: CHECK_EQ((void *)chunk, GetBlockBegin((void *)chunk));
+ callback(chunk, arg);
+ }
+ }
+ }
+
+ static uptr ClassIdToSize(uptr class_id) {
+ return SizeClassMap::Size(class_id);
+ }
+
+ static uptr AdditionalSize() {
+ return RoundUpTo(sizeof(RegionInfo) * kNumClassesRounded,
+ GetPageSizeCached());
+ }
+
+ typedef SizeClassMap SizeClassMapT;
+ static const uptr kNumClasses = SizeClassMap::kNumClasses;
+ static const uptr kNumClassesRounded = SizeClassMap::kNumClassesRounded;
+
+ private:
+ static const uptr kRegionSize = kSpaceSize / kNumClassesRounded;
+ // FreeArray is the array of free-d chunks (stored as 4-byte offsets).
+ // In the worst case it may reguire kRegionSize/SizeClassMap::kMinSize
+ // elements, but in reality this will not happen. For simplicity we
+ // dedicate 1/8 of the region's virtual space to FreeArray.
+ static const uptr kFreeArraySize = kRegionSize / 8;
+
+ static const bool kUsingConstantSpaceBeg = kSpaceBeg != ~(uptr)0;
+ uptr NonConstSpaceBeg;
+ uptr SpaceBeg() const {
+ return kUsingConstantSpaceBeg ? kSpaceBeg : NonConstSpaceBeg;
+ }
+ uptr SpaceEnd() const { return SpaceBeg() + kSpaceSize; }
+ // kRegionSize must be >= 2^32.
+ COMPILER_CHECK((kRegionSize) >= (1ULL << (SANITIZER_WORDSIZE / 2)));
+ // kRegionSize must be <= 2^36, see CompactPtrT.
+ COMPILER_CHECK((kRegionSize) <= (1ULL << (SANITIZER_WORDSIZE / 2 + 4)));
+ // Call mmap for user memory with at least this size.
+ static const uptr kUserMapSize = 1 << 16;
+ // Call mmap for metadata memory with at least this size.
+ static const uptr kMetaMapSize = 1 << 16;
+ // Call mmap for free array memory with at least this size.
+ static const uptr kFreeArrayMapSize = 1 << 16;
+
+ atomic_sint32_t release_to_os_interval_ms_;
+
+ struct ReleaseToOsInfo {
+ uptr n_freed_at_last_release;
+ uptr num_releases;
+ u64 last_release_at_ns;
+ };
+
+ struct RegionInfo {
+ BlockingMutex mutex;
+ uptr num_freed_chunks; // Number of elements in the freearray.
+ uptr mapped_free_array; // Bytes mapped for freearray.
+ uptr allocated_user; // Bytes allocated for user memory.
+ uptr allocated_meta; // Bytes allocated for metadata.
+ uptr mapped_user; // Bytes mapped for user memory.
+ uptr mapped_meta; // Bytes mapped for metadata.
+ u32 rand_state; // Seed for random shuffle, used if kRandomShuffleChunks.
+ uptr n_allocated, n_freed; // Just stats.
+ ReleaseToOsInfo rtoi;
+ };
+ COMPILER_CHECK(sizeof(RegionInfo) >= kCacheLineSize);
+
+ u32 Rand(u32 *state) { // ANSI C linear congruential PRNG.
+ return (*state = *state * 1103515245 + 12345) >> 16;
+ }
+
+ u32 RandN(u32 *state, u32 n) { return Rand(state) % n; } // [0, n)
+
+ void RandomShuffle(u32 *a, u32 n, u32 *rand_state) {
+ if (n <= 1) return;
+ for (u32 i = n - 1; i > 0; i--)
+ Swap(a[i], a[RandN(rand_state, i + 1)]);
+ }
+
+ RegionInfo *GetRegionInfo(uptr class_id) {
+ CHECK_LT(class_id, kNumClasses);
+ RegionInfo *regions =
+ reinterpret_cast<RegionInfo *>(SpaceBeg() + kSpaceSize);
+ return &regions[class_id];
+ }
+
+ uptr GetMetadataEnd(uptr region_beg) {
+ return region_beg + kRegionSize - kFreeArraySize;
+ }
+
+ uptr GetChunkIdx(uptr chunk, uptr size) {
+ if (!kUsingConstantSpaceBeg)
+ chunk -= SpaceBeg();
+
+ uptr offset = chunk % kRegionSize;
+ // Here we divide by a non-constant. This is costly.
+ // size always fits into 32-bits. If the offset fits too, use 32-bit div.
+ if (offset >> (SANITIZER_WORDSIZE / 2))
+ return offset / size;
+ return (u32)offset / (u32)size;
+ }
+
+ CompactPtrT *GetFreeArray(uptr region_beg) {
+ return reinterpret_cast<CompactPtrT *>(region_beg + kRegionSize -
+ kFreeArraySize);
+ }
+
+ void EnsureFreeArraySpace(RegionInfo *region, uptr region_beg,
+ uptr num_freed_chunks) {
+ uptr needed_space = num_freed_chunks * sizeof(CompactPtrT);
+ if (region->mapped_free_array < needed_space) {
+ CHECK_LE(needed_space, kFreeArraySize);
+ uptr new_mapped_free_array = RoundUpTo(needed_space, kFreeArrayMapSize);
+ uptr current_map_end = reinterpret_cast<uptr>(GetFreeArray(region_beg)) +
+ region->mapped_free_array;
+ uptr new_map_size = new_mapped_free_array - region->mapped_free_array;
+ MapWithCallback(current_map_end, new_map_size);
+ region->mapped_free_array = new_mapped_free_array;
+ }
+ }
+
+
+ NOINLINE void PopulateFreeArray(AllocatorStats *stat, uptr class_id,
+ RegionInfo *region, uptr requested_count) {
+ // region->mutex is held.
+ uptr size = ClassIdToSize(class_id);
+ uptr beg_idx = region->allocated_user;
+ uptr end_idx = beg_idx + requested_count * size;
+ uptr region_beg = GetRegionBeginBySizeClass(class_id);
+ if (end_idx > region->mapped_user) {
+ if (!kUsingConstantSpaceBeg && region->mapped_user == 0)
+ region->rand_state = static_cast<u32>(region_beg >> 12); // From ASLR.
+ // Do the mmap for the user memory.
+ uptr map_size = kUserMapSize;
+ while (end_idx > region->mapped_user + map_size)
+ map_size += kUserMapSize;
+ CHECK_GE(region->mapped_user + map_size, end_idx);
+ MapWithCallback(region_beg + region->mapped_user, map_size);
+ stat->Add(AllocatorStatMapped, map_size);
+ region->mapped_user += map_size;
+ }
+ CompactPtrT *free_array = GetFreeArray(region_beg);
+ uptr total_count = (region->mapped_user - beg_idx) / size;
+ uptr num_freed_chunks = region->num_freed_chunks;
+ EnsureFreeArraySpace(region, region_beg, num_freed_chunks + total_count);
+ for (uptr i = 0; i < total_count; i++) {
+ uptr chunk = beg_idx + i * size;
+ free_array[num_freed_chunks + total_count - 1 - i] =
+ PointerToCompactPtr(0, chunk);
+ }
+ if (kRandomShuffleChunks)
+ RandomShuffle(&free_array[num_freed_chunks], total_count,
+ &region->rand_state);
+ region->num_freed_chunks += total_count;
+ region->allocated_user += total_count * size;
+ CHECK_LE(region->allocated_user, region->mapped_user);
+
+ region->allocated_meta += total_count * kMetadataSize;
+ if (region->allocated_meta > region->mapped_meta) {
+ uptr map_size = kMetaMapSize;
+ while (region->allocated_meta > region->mapped_meta + map_size)
+ map_size += kMetaMapSize;
+ // Do the mmap for the metadata.
+ CHECK_GE(region->mapped_meta + map_size, region->allocated_meta);
+ MapWithCallback(GetMetadataEnd(region_beg) -
+ region->mapped_meta - map_size, map_size);
+ region->mapped_meta += map_size;
+ }
+ CHECK_LE(region->allocated_meta, region->mapped_meta);
+ if (region->mapped_user + region->mapped_meta >
+ kRegionSize - kFreeArraySize) {
+ Printf("%s: Out of memory. Dying. ", SanitizerToolName);
+ Printf("The process has exhausted %zuMB for size class %zu.\n",
+ kRegionSize / 1024 / 1024, size);
+ Die();
+ }
+ }
+
+ void MaybeReleaseChunkRange(uptr region_beg, uptr chunk_size,
+ CompactPtrT first, CompactPtrT last) {
+ uptr beg_ptr = CompactPtrToPointer(region_beg, first);
+ uptr end_ptr = CompactPtrToPointer(region_beg, last) + chunk_size;
+ ReleaseMemoryPagesToOS(beg_ptr, end_ptr);
+ }
+
+ // Attempts to release some RAM back to OS. The region is expected to be
+ // locked.
+ // Algorithm:
+ // * Sort the chunks.
+ // * Find ranges fully covered by free-d chunks
+ // * Release them to OS with madvise.
+ void MaybeReleaseToOS(uptr class_id) {
+ RegionInfo *region = GetRegionInfo(class_id);
+ const uptr chunk_size = ClassIdToSize(class_id);
+ const uptr page_size = GetPageSizeCached();
+
+ uptr n = region->num_freed_chunks;
+ if (n * chunk_size < page_size)
+ return; // No chance to release anything.
+ if ((region->n_freed - region->rtoi.n_freed_at_last_release) * chunk_size <
+ page_size) {
+ return; // Nothing new to release.
+ }
+
+ s32 interval_ms = ReleaseToOSIntervalMs();
+ if (interval_ms < 0)
+ return;
+
+ u64 now_ns = NanoTime();
+ if (region->rtoi.last_release_at_ns + interval_ms * 1000000ULL > now_ns)
+ return; // Memory was returned recently.
+ region->rtoi.last_release_at_ns = now_ns;
+
+ uptr region_beg = GetRegionBeginBySizeClass(class_id);
+ CompactPtrT *free_array = GetFreeArray(region_beg);
+ SortArray(free_array, n);
+
+ const uptr scaled_chunk_size = chunk_size >> kCompactPtrScale;
+ const uptr kScaledGranularity = page_size >> kCompactPtrScale;
+
+ uptr range_beg = free_array[0];
+ uptr prev = free_array[0];
+ for (uptr i = 1; i < n; i++) {
+ uptr chunk = free_array[i];
+ CHECK_GT(chunk, prev);
+ if (chunk - prev != scaled_chunk_size) {
+ CHECK_GT(chunk - prev, scaled_chunk_size);
+ if (prev + scaled_chunk_size - range_beg >= kScaledGranularity) {
+ MaybeReleaseChunkRange(region_beg, chunk_size, range_beg, prev);
+ region->rtoi.n_freed_at_last_release = region->n_freed;
+ region->rtoi.num_releases++;
+ }
+ range_beg = chunk;
+ }
+ prev = chunk;
+ }
+ }
+};
+
+
diff --git a/lib/sanitizer_common/sanitizer_allocator_secondary.h b/lib/sanitizer_common/sanitizer_allocator_secondary.h
new file mode 100644
index 000000000000..2e98e591b432
--- /dev/null
+++ b/lib/sanitizer_common/sanitizer_allocator_secondary.h
@@ -0,0 +1,282 @@
+//===-- sanitizer_allocator_secondary.h -------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Part of the Sanitizer Allocator.
+//
+//===----------------------------------------------------------------------===//
+#ifndef SANITIZER_ALLOCATOR_H
+#error This file must be included inside sanitizer_allocator.h
+#endif
+
+// This class can (de)allocate only large chunks of memory using mmap/unmap.
+// The main purpose of this allocator is to cover large and rare allocation
+// sizes not covered by more efficient allocators (e.g. SizeClassAllocator64).
+template <class MapUnmapCallback = NoOpMapUnmapCallback>
+class LargeMmapAllocator {
+ public:
+ void InitLinkerInitialized(bool may_return_null) {
+ page_size_ = GetPageSizeCached();
+ atomic_store(&may_return_null_, may_return_null, memory_order_relaxed);
+ }
+
+ void Init(bool may_return_null) {
+ internal_memset(this, 0, sizeof(*this));
+ InitLinkerInitialized(may_return_null);
+ }
+
+ void *Allocate(AllocatorStats *stat, uptr size, uptr alignment) {
+ CHECK(IsPowerOfTwo(alignment));
+ uptr map_size = RoundUpMapSize(size);
+ if (alignment > page_size_)
+ map_size += alignment;
+ // Overflow.
+ if (map_size < size) return ReturnNullOrDieOnBadRequest();
+ uptr map_beg = reinterpret_cast<uptr>(
+ MmapOrDie(map_size, "LargeMmapAllocator"));
+ CHECK(IsAligned(map_beg, page_size_));
+ MapUnmapCallback().OnMap(map_beg, map_size);
+ uptr map_end = map_beg + map_size;
+ uptr res = map_beg + page_size_;
+ if (res & (alignment - 1)) // Align.
+ res += alignment - (res & (alignment - 1));
+ CHECK(IsAligned(res, alignment));
+ CHECK(IsAligned(res, page_size_));
+ CHECK_GE(res + size, map_beg);
+ CHECK_LE(res + size, map_end);
+ Header *h = GetHeader(res);
+ h->size = size;
+ h->map_beg = map_beg;
+ h->map_size = map_size;
+ uptr size_log = MostSignificantSetBitIndex(map_size);
+ CHECK_LT(size_log, ARRAY_SIZE(stats.by_size_log));
+ {
+ SpinMutexLock l(&mutex_);
+ uptr idx = n_chunks_++;
+ chunks_sorted_ = false;
+ CHECK_LT(idx, kMaxNumChunks);
+ h->chunk_idx = idx;
+ chunks_[idx] = h;
+ stats.n_allocs++;
+ stats.currently_allocated += map_size;
+ stats.max_allocated = Max(stats.max_allocated, stats.currently_allocated);
+ stats.by_size_log[size_log]++;
+ stat->Add(AllocatorStatAllocated, map_size);
+ stat->Add(AllocatorStatMapped, map_size);
+ }
+ return reinterpret_cast<void*>(res);
+ }
+
+ bool MayReturnNull() const {
+ return atomic_load(&may_return_null_, memory_order_acquire);
+ }
+
+ void *ReturnNullOrDieOnBadRequest() {
+ if (MayReturnNull()) return nullptr;
+ ReportAllocatorCannotReturnNull(false);
+ }
+
+ void *ReturnNullOrDieOnOOM() {
+ if (MayReturnNull()) return nullptr;
+ ReportAllocatorCannotReturnNull(true);
+ }
+
+ void SetMayReturnNull(bool may_return_null) {
+ atomic_store(&may_return_null_, may_return_null, memory_order_release);
+ }
+
+ void Deallocate(AllocatorStats *stat, void *p) {
+ Header *h = GetHeader(p);
+ {
+ SpinMutexLock l(&mutex_);
+ uptr idx = h->chunk_idx;
+ CHECK_EQ(chunks_[idx], h);
+ CHECK_LT(idx, n_chunks_);
+ chunks_[idx] = chunks_[n_chunks_ - 1];
+ chunks_[idx]->chunk_idx = idx;
+ n_chunks_--;
+ chunks_sorted_ = false;
+ stats.n_frees++;
+ stats.currently_allocated -= h->map_size;
+ stat->Sub(AllocatorStatAllocated, h->map_size);
+ stat->Sub(AllocatorStatMapped, h->map_size);
+ }
+ MapUnmapCallback().OnUnmap(h->map_beg, h->map_size);
+ UnmapOrDie(reinterpret_cast<void*>(h->map_beg), h->map_size);
+ }
+
+ uptr TotalMemoryUsed() {
+ SpinMutexLock l(&mutex_);
+ uptr res = 0;
+ for (uptr i = 0; i < n_chunks_; i++) {
+ Header *h = chunks_[i];
+ CHECK_EQ(h->chunk_idx, i);
+ res += RoundUpMapSize(h->size);
+ }
+ return res;
+ }
+
+ bool PointerIsMine(const void *p) {
+ return GetBlockBegin(p) != nullptr;
+ }
+
+ uptr GetActuallyAllocatedSize(void *p) {
+ return RoundUpTo(GetHeader(p)->size, page_size_);
+ }
+
+ // At least page_size_/2 metadata bytes is available.
+ void *GetMetaData(const void *p) {
+ // Too slow: CHECK_EQ(p, GetBlockBegin(p));
+ if (!IsAligned(reinterpret_cast<uptr>(p), page_size_)) {
+ Printf("%s: bad pointer %p\n", SanitizerToolName, p);
+ CHECK(IsAligned(reinterpret_cast<uptr>(p), page_size_));
+ }
+ return GetHeader(p) + 1;
+ }
+
+ void *GetBlockBegin(const void *ptr) {
+ uptr p = reinterpret_cast<uptr>(ptr);
+ SpinMutexLock l(&mutex_);
+ uptr nearest_chunk = 0;
+ // Cache-friendly linear search.
+ for (uptr i = 0; i < n_chunks_; i++) {
+ uptr ch = reinterpret_cast<uptr>(chunks_[i]);
+ if (p < ch) continue; // p is at left to this chunk, skip it.
+ if (p - ch < p - nearest_chunk)
+ nearest_chunk = ch;
+ }
+ if (!nearest_chunk)
+ return nullptr;
+ Header *h = reinterpret_cast<Header *>(nearest_chunk);
+ CHECK_GE(nearest_chunk, h->map_beg);
+ CHECK_LT(nearest_chunk, h->map_beg + h->map_size);
+ CHECK_LE(nearest_chunk, p);
+ if (h->map_beg + h->map_size <= p)
+ return nullptr;
+ return GetUser(h);
+ }
+
+ void EnsureSortedChunks() {
+ if (chunks_sorted_) return;
+ SortArray(reinterpret_cast<uptr*>(chunks_), n_chunks_);
+ for (uptr i = 0; i < n_chunks_; i++)
+ chunks_[i]->chunk_idx = i;
+ chunks_sorted_ = true;
+ }
+
+ // This function does the same as GetBlockBegin, but is much faster.
+ // Must be called with the allocator locked.
+ void *GetBlockBeginFastLocked(void *ptr) {
+ mutex_.CheckLocked();
+ uptr p = reinterpret_cast<uptr>(ptr);
+ uptr n = n_chunks_;
+ if (!n) return nullptr;
+ EnsureSortedChunks();
+ auto min_mmap_ = reinterpret_cast<uptr>(chunks_[0]);
+ auto max_mmap_ =
+ reinterpret_cast<uptr>(chunks_[n - 1]) + chunks_[n - 1]->map_size;
+ if (p < min_mmap_ || p >= max_mmap_)
+ return nullptr;
+ uptr beg = 0, end = n - 1;
+ // This loop is a log(n) lower_bound. It does not check for the exact match
+ // to avoid expensive cache-thrashing loads.
+ while (end - beg >= 2) {
+ uptr mid = (beg + end) / 2; // Invariant: mid >= beg + 1
+ if (p < reinterpret_cast<uptr>(chunks_[mid]))
+ end = mid - 1; // We are not interested in chunks_[mid].
+ else
+ beg = mid; // chunks_[mid] may still be what we want.
+ }
+
+ if (beg < end) {
+ CHECK_EQ(beg + 1, end);
+ // There are 2 chunks left, choose one.
+ if (p >= reinterpret_cast<uptr>(chunks_[end]))
+ beg = end;
+ }
+
+ Header *h = chunks_[beg];
+ if (h->map_beg + h->map_size <= p || p < h->map_beg)
+ return nullptr;
+ return GetUser(h);
+ }
+
+ void PrintStats() {
+ Printf("Stats: LargeMmapAllocator: allocated %zd times, "
+ "remains %zd (%zd K) max %zd M; by size logs: ",
+ stats.n_allocs, stats.n_allocs - stats.n_frees,
+ stats.currently_allocated >> 10, stats.max_allocated >> 20);
+ for (uptr i = 0; i < ARRAY_SIZE(stats.by_size_log); i++) {
+ uptr c = stats.by_size_log[i];
+ if (!c) continue;
+ Printf("%zd:%zd; ", i, c);
+ }
+ Printf("\n");
+ }
+
+ // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
+ // introspection API.
+ void ForceLock() {
+ mutex_.Lock();
+ }
+
+ void ForceUnlock() {
+ mutex_.Unlock();
+ }
+
+ // Iterate over all existing chunks.
+ // The allocator must be locked when calling this function.
+ void ForEachChunk(ForEachChunkCallback callback, void *arg) {
+ EnsureSortedChunks(); // Avoid doing the sort while iterating.
+ for (uptr i = 0; i < n_chunks_; i++) {
+ auto t = chunks_[i];
+ callback(reinterpret_cast<uptr>(GetUser(chunks_[i])), arg);
+ // Consistency check: verify that the array did not change.
+ CHECK_EQ(chunks_[i], t);
+ CHECK_EQ(chunks_[i]->chunk_idx, i);
+ }
+ }
+
+ private:
+ static const int kMaxNumChunks = 1 << FIRST_32_SECOND_64(15, 18);
+ struct Header {
+ uptr map_beg;
+ uptr map_size;
+ uptr size;
+ uptr chunk_idx;
+ };
+
+ Header *GetHeader(uptr p) {
+ CHECK(IsAligned(p, page_size_));
+ return reinterpret_cast<Header*>(p - page_size_);
+ }
+ Header *GetHeader(const void *p) {
+ return GetHeader(reinterpret_cast<uptr>(p));
+ }
+
+ void *GetUser(Header *h) {
+ CHECK(IsAligned((uptr)h, page_size_));
+ return reinterpret_cast<void*>(reinterpret_cast<uptr>(h) + page_size_);
+ }
+
+ uptr RoundUpMapSize(uptr size) {
+ return RoundUpTo(size, page_size_) + page_size_;
+ }
+
+ uptr page_size_;
+ Header *chunks_[kMaxNumChunks];
+ uptr n_chunks_;
+ bool chunks_sorted_;
+ struct Stats {
+ uptr n_allocs, n_frees, currently_allocated, max_allocated, by_size_log[64];
+ } stats;
+ atomic_uint8_t may_return_null_;
+ SpinMutex mutex_;
+};
+
+
diff --git a/lib/sanitizer_common/sanitizer_allocator_size_class_map.h b/lib/sanitizer_common/sanitizer_allocator_size_class_map.h
new file mode 100644
index 000000000000..7151a4636056
--- /dev/null
+++ b/lib/sanitizer_common/sanitizer_allocator_size_class_map.h
@@ -0,0 +1,217 @@
+//===-- sanitizer_allocator_size_class_map.h --------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Part of the Sanitizer Allocator.
+//
+//===----------------------------------------------------------------------===//
+#ifndef SANITIZER_ALLOCATOR_H
+#error This file must be included inside sanitizer_allocator.h
+#endif
+
+// SizeClassMap maps allocation sizes into size classes and back.
+// Class 0 always corresponds to size 0.
+// The other sizes are controlled by the template parameters:
+// kMinSizeLog: defines the class 1 as 2^kMinSizeLog.
+// kMaxSizeLog: defines the last class as 2^kMaxSizeLog.
+// kMidSizeLog: the classes starting from 1 increase with step
+// 2^kMinSizeLog until 2^kMidSizeLog.
+// kNumBits: the number of non-zero bits in sizes after 2^kMidSizeLog.
+// E.g. with kNumBits==3 all size classes after 2^kMidSizeLog
+// look like 0b1xx0..0, where x is either 0 or 1.
+//
+// Example: kNumBits=3, kMidSizeLog=4, kMidSizeLog=8, kMaxSizeLog=17:
+//
+// Classes 1 - 16 correspond to sizes 16 to 256 (size = class_id * 16).
+// Next 4 classes: 256 + i * 64 (i = 1 to 4).
+// Next 4 classes: 512 + i * 128 (i = 1 to 4).
+// ...
+// Next 4 classes: 2^k + i * 2^(k-2) (i = 1 to 4).
+// Last class corresponds to kMaxSize = 1 << kMaxSizeLog.
+//
+// This structure of the size class map gives us:
+// - Efficient table-free class-to-size and size-to-class functions.
+// - Difference between two consequent size classes is between 14% and 25%
+//
+// This class also gives a hint to a thread-caching allocator about the amount
+// of chunks that need to be cached per-thread:
+// - kMaxNumCachedHint is a hint for maximal number of chunks per size class.
+// The actual number is computed in TransferBatch.
+// - (1 << kMaxBytesCachedLog) is the maximal number of bytes per size class.
+//
+// Part of output of SizeClassMap::Print():
+// c00 => s: 0 diff: +0 00% l 0 cached: 0 0; id 0
+// c01 => s: 16 diff: +16 00% l 4 cached: 256 4096; id 1
+// c02 => s: 32 diff: +16 100% l 5 cached: 256 8192; id 2
+// c03 => s: 48 diff: +16 50% l 5 cached: 256 12288; id 3
+// c04 => s: 64 diff: +16 33% l 6 cached: 256 16384; id 4
+// c05 => s: 80 diff: +16 25% l 6 cached: 256 20480; id 5
+// c06 => s: 96 diff: +16 20% l 6 cached: 256 24576; id 6
+// c07 => s: 112 diff: +16 16% l 6 cached: 256 28672; id 7
+//
+// c08 => s: 128 diff: +16 14% l 7 cached: 256 32768; id 8
+// c09 => s: 144 diff: +16 12% l 7 cached: 256 36864; id 9
+// c10 => s: 160 diff: +16 11% l 7 cached: 256 40960; id 10
+// c11 => s: 176 diff: +16 10% l 7 cached: 256 45056; id 11
+// c12 => s: 192 diff: +16 09% l 7 cached: 256 49152; id 12
+// c13 => s: 208 diff: +16 08% l 7 cached: 256 53248; id 13
+// c14 => s: 224 diff: +16 07% l 7 cached: 256 57344; id 14
+// c15 => s: 240 diff: +16 07% l 7 cached: 256 61440; id 15
+//
+// c16 => s: 256 diff: +16 06% l 8 cached: 256 65536; id 16
+// c17 => s: 320 diff: +64 25% l 8 cached: 204 65280; id 17
+// c18 => s: 384 diff: +64 20% l 8 cached: 170 65280; id 18
+// c19 => s: 448 diff: +64 16% l 8 cached: 146 65408; id 19
+//
+// c20 => s: 512 diff: +64 14% l 9 cached: 128 65536; id 20
+// c21 => s: 640 diff: +128 25% l 9 cached: 102 65280; id 21
+// c22 => s: 768 diff: +128 20% l 9 cached: 85 65280; id 22
+// c23 => s: 896 diff: +128 16% l 9 cached: 73 65408; id 23
+//
+// c24 => s: 1024 diff: +128 14% l 10 cached: 64 65536; id 24
+// c25 => s: 1280 diff: +256 25% l 10 cached: 51 65280; id 25
+// c26 => s: 1536 diff: +256 20% l 10 cached: 42 64512; id 26
+// c27 => s: 1792 diff: +256 16% l 10 cached: 36 64512; id 27
+//
+// ...
+//
+// c48 => s: 65536 diff: +8192 14% l 16 cached: 1 65536; id 48
+// c49 => s: 81920 diff: +16384 25% l 16 cached: 1 81920; id 49
+// c50 => s: 98304 diff: +16384 20% l 16 cached: 1 98304; id 50
+// c51 => s: 114688 diff: +16384 16% l 16 cached: 1 114688; id 51
+//
+// c52 => s: 131072 diff: +16384 14% l 17 cached: 1 131072; id 52
+//
+//
+// Another example (kNumBits=2):
+// c00 => s: 0 diff: +0 00% l 0 cached: 0 0; id 0
+// c01 => s: 32 diff: +32 00% l 5 cached: 64 2048; id 1
+// c02 => s: 64 diff: +32 100% l 6 cached: 64 4096; id 2
+// c03 => s: 96 diff: +32 50% l 6 cached: 64 6144; id 3
+// c04 => s: 128 diff: +32 33% l 7 cached: 64 8192; id 4
+// c05 => s: 160 diff: +32 25% l 7 cached: 64 10240; id 5
+// c06 => s: 192 diff: +32 20% l 7 cached: 64 12288; id 6
+// c07 => s: 224 diff: +32 16% l 7 cached: 64 14336; id 7
+// c08 => s: 256 diff: +32 14% l 8 cached: 64 16384; id 8
+// c09 => s: 384 diff: +128 50% l 8 cached: 42 16128; id 9
+// c10 => s: 512 diff: +128 33% l 9 cached: 32 16384; id 10
+// c11 => s: 768 diff: +256 50% l 9 cached: 21 16128; id 11
+// c12 => s: 1024 diff: +256 33% l 10 cached: 16 16384; id 12
+// c13 => s: 1536 diff: +512 50% l 10 cached: 10 15360; id 13
+// c14 => s: 2048 diff: +512 33% l 11 cached: 8 16384; id 14
+// c15 => s: 3072 diff: +1024 50% l 11 cached: 5 15360; id 15
+// c16 => s: 4096 diff: +1024 33% l 12 cached: 4 16384; id 16
+// c17 => s: 6144 diff: +2048 50% l 12 cached: 2 12288; id 17
+// c18 => s: 8192 diff: +2048 33% l 13 cached: 2 16384; id 18
+// c19 => s: 12288 diff: +4096 50% l 13 cached: 1 12288; id 19
+// c20 => s: 16384 diff: +4096 33% l 14 cached: 1 16384; id 20
+// c21 => s: 24576 diff: +8192 50% l 14 cached: 1 24576; id 21
+// c22 => s: 32768 diff: +8192 33% l 15 cached: 1 32768; id 22
+// c23 => s: 49152 diff: +16384 50% l 15 cached: 1 49152; id 23
+// c24 => s: 65536 diff: +16384 33% l 16 cached: 1 65536; id 24
+// c25 => s: 98304 diff: +32768 50% l 16 cached: 1 98304; id 25
+// c26 => s: 131072 diff: +32768 33% l 17 cached: 1 131072; id 26
+
+template <uptr kNumBits, uptr kMinSizeLog, uptr kMidSizeLog, uptr kMaxSizeLog,
+ uptr kMaxNumCachedHintT, uptr kMaxBytesCachedLog>
+class SizeClassMap {
+ static const uptr kMinSize = 1 << kMinSizeLog;
+ static const uptr kMidSize = 1 << kMidSizeLog;
+ static const uptr kMidClass = kMidSize / kMinSize;
+ static const uptr S = kNumBits - 1;
+ static const uptr M = (1 << S) - 1;
+
+ public:
+ // kMaxNumCachedHintT is a power of two. It serves as a hint
+ // for the size of TransferBatch, the actual size could be a bit smaller.
+ static const uptr kMaxNumCachedHint = kMaxNumCachedHintT;
+ COMPILER_CHECK((kMaxNumCachedHint & (kMaxNumCachedHint - 1)) == 0);
+
+ static const uptr kMaxSize = 1UL << kMaxSizeLog;
+ static const uptr kNumClasses =
+ kMidClass + ((kMaxSizeLog - kMidSizeLog) << S) + 1;
+ static const uptr kLargestClassID = kNumClasses - 2;
+ COMPILER_CHECK(kNumClasses >= 16 && kNumClasses <= 256);
+ static const uptr kNumClassesRounded =
+ kNumClasses <= 32 ? 32 :
+ kNumClasses <= 64 ? 64 :
+ kNumClasses <= 128 ? 128 : 256;
+
+ static uptr Size(uptr class_id) {
+ if (class_id <= kMidClass)
+ return kMinSize * class_id;
+ class_id -= kMidClass;
+ uptr t = kMidSize << (class_id >> S);
+ return t + (t >> S) * (class_id & M);
+ }
+
+ static uptr ClassID(uptr size) {
+ if (size <= kMidSize)
+ return (size + kMinSize - 1) >> kMinSizeLog;
+ if (size > kMaxSize) return 0;
+ uptr l = MostSignificantSetBitIndex(size);
+ uptr hbits = (size >> (l - S)) & M;
+ uptr lbits = size & ((1 << (l - S)) - 1);
+ uptr l1 = l - kMidSizeLog;
+ return kMidClass + (l1 << S) + hbits + (lbits > 0);
+ }
+
+ static uptr MaxCachedHint(uptr class_id) {
+ if (class_id == 0) return 0;
+ uptr n = (1UL << kMaxBytesCachedLog) / Size(class_id);
+ return Max<uptr>(1, Min(kMaxNumCachedHint, n));
+ }
+
+ static void Print() {
+ uptr prev_s = 0;
+ uptr total_cached = 0;
+ for (uptr i = 0; i < kNumClasses; i++) {
+ uptr s = Size(i);
+ if (s >= kMidSize / 2 && (s & (s - 1)) == 0)
+ Printf("\n");
+ uptr d = s - prev_s;
+ uptr p = prev_s ? (d * 100 / prev_s) : 0;
+ uptr l = s ? MostSignificantSetBitIndex(s) : 0;
+ uptr cached = MaxCachedHint(i) * s;
+ Printf("c%02zd => s: %zd diff: +%zd %02zd%% l %zd "
+ "cached: %zd %zd; id %zd\n",
+ i, Size(i), d, p, l, MaxCachedHint(i), cached, ClassID(s));
+ total_cached += cached;
+ prev_s = s;
+ }
+ Printf("Total cached: %zd\n", total_cached);
+ }
+
+ static void Validate() {
+ for (uptr c = 1; c < kNumClasses; c++) {
+ // Printf("Validate: c%zd\n", c);
+ uptr s = Size(c);
+ CHECK_NE(s, 0U);
+ CHECK_EQ(ClassID(s), c);
+ if (c != kNumClasses - 1)
+ CHECK_EQ(ClassID(s + 1), c + 1);
+ CHECK_EQ(ClassID(s - 1), c);
+ if (c)
+ CHECK_GT(Size(c), Size(c-1));
+ }
+ CHECK_EQ(ClassID(kMaxSize + 1), 0);
+
+ for (uptr s = 1; s <= kMaxSize; s++) {
+ uptr c = ClassID(s);
+ // Printf("s%zd => c%zd\n", s, c);
+ CHECK_LT(c, kNumClasses);
+ CHECK_GE(Size(c), s);
+ if (c > 0)
+ CHECK_LT(Size(c-1), s);
+ }
+ }
+};
+
+typedef SizeClassMap<3, 4, 8, 17, 128, 16> DefaultSizeClassMap;
+typedef SizeClassMap<3, 4, 8, 17, 64, 14> CompactSizeClassMap;
+typedef SizeClassMap<2, 5, 9, 16, 64, 14> VeryCompactSizeClassMap;
diff --git a/lib/sanitizer_common/sanitizer_allocator_stats.h b/lib/sanitizer_common/sanitizer_allocator_stats.h
new file mode 100644
index 000000000000..38b088b8446e
--- /dev/null
+++ b/lib/sanitizer_common/sanitizer_allocator_stats.h
@@ -0,0 +1,107 @@
+//===-- sanitizer_allocator_stats.h -----------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Part of the Sanitizer Allocator.
+//
+//===----------------------------------------------------------------------===//
+#ifndef SANITIZER_ALLOCATOR_H
+#error This file must be included inside sanitizer_allocator.h
+#endif
+
+// Memory allocator statistics
+enum AllocatorStat {
+ AllocatorStatAllocated,
+ AllocatorStatMapped,
+ AllocatorStatCount
+};
+
+typedef uptr AllocatorStatCounters[AllocatorStatCount];
+
+// Per-thread stats, live in per-thread cache.
+class AllocatorStats {
+ public:
+ void Init() {
+ internal_memset(this, 0, sizeof(*this));
+ }
+ void InitLinkerInitialized() {}
+
+ void Add(AllocatorStat i, uptr v) {
+ v += atomic_load(&stats_[i], memory_order_relaxed);
+ atomic_store(&stats_[i], v, memory_order_relaxed);
+ }
+
+ void Sub(AllocatorStat i, uptr v) {
+ v = atomic_load(&stats_[i], memory_order_relaxed) - v;
+ atomic_store(&stats_[i], v, memory_order_relaxed);
+ }
+
+ void Set(AllocatorStat i, uptr v) {
+ atomic_store(&stats_[i], v, memory_order_relaxed);
+ }
+
+ uptr Get(AllocatorStat i) const {
+ return atomic_load(&stats_[i], memory_order_relaxed);
+ }
+
+ private:
+ friend class AllocatorGlobalStats;
+ AllocatorStats *next_;
+ AllocatorStats *prev_;
+ atomic_uintptr_t stats_[AllocatorStatCount];
+};
+
+// Global stats, used for aggregation and querying.
+class AllocatorGlobalStats : public AllocatorStats {
+ public:
+ void InitLinkerInitialized() {
+ next_ = this;
+ prev_ = this;
+ }
+ void Init() {
+ internal_memset(this, 0, sizeof(*this));
+ InitLinkerInitialized();
+ }
+
+ void Register(AllocatorStats *s) {
+ SpinMutexLock l(&mu_);
+ s->next_ = next_;
+ s->prev_ = this;
+ next_->prev_ = s;
+ next_ = s;
+ }
+
+ void Unregister(AllocatorStats *s) {
+ SpinMutexLock l(&mu_);
+ s->prev_->next_ = s->next_;
+ s->next_->prev_ = s->prev_;
+ for (int i = 0; i < AllocatorStatCount; i++)
+ Add(AllocatorStat(i), s->Get(AllocatorStat(i)));
+ }
+
+ void Get(AllocatorStatCounters s) const {
+ internal_memset(s, 0, AllocatorStatCount * sizeof(uptr));
+ SpinMutexLock l(&mu_);
+ const AllocatorStats *stats = this;
+ for (;;) {
+ for (int i = 0; i < AllocatorStatCount; i++)
+ s[i] += stats->Get(AllocatorStat(i));
+ stats = stats->next_;
+ if (stats == this)
+ break;
+ }
+ // All stats must be non-negative.
+ for (int i = 0; i < AllocatorStatCount; i++)
+ s[i] = ((sptr)s[i]) >= 0 ? s[i] : 0;
+ }
+
+ private:
+ mutable SpinMutex mu_;
+};
+
+
diff --git a/lib/sanitizer_common/sanitizer_atomic.h b/lib/sanitizer_common/sanitizer_atomic.h
index b26693e24f8d..8f400acc999c 100644
--- a/lib/sanitizer_common/sanitizer_atomic.h
+++ b/lib/sanitizer_common/sanitizer_atomic.h
@@ -37,6 +37,11 @@ struct atomic_uint16_t {
volatile Type val_dont_use;
};
+struct atomic_sint32_t {
+ typedef s32 Type;
+ volatile Type val_dont_use;
+};
+
struct atomic_uint32_t {
typedef u32 Type;
volatile Type val_dont_use;
diff --git a/lib/sanitizer_common/sanitizer_common.cc b/lib/sanitizer_common/sanitizer_common.cc
index 79fcbb1183f9..1c6fc3ef86a3 100644
--- a/lib/sanitizer_common/sanitizer_common.cc
+++ b/lib/sanitizer_common/sanitizer_common.cc
@@ -114,7 +114,7 @@ void NORETURN ReportMmapFailureAndDie(uptr size, const char *mem_type,
Report("ERROR: %s failed to "
"%s 0x%zx (%zd) bytes of %s (error code: %d)\n",
SanitizerToolName, mmap_type, size, size, mem_type, err);
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
DumpProcessMap();
#endif
UNREACHABLE("unable to mmap");
@@ -157,6 +157,7 @@ bool ReadFileToBuffer(const char *file_name, char **buff, uptr *buff_size,
}
typedef bool UptrComparisonFunction(const uptr &a, const uptr &b);
+typedef bool U32ComparisonFunction(const u32 &a, const u32 &b);
template<class T>
static inline bool CompareLess(const T &a, const T &b) {
@@ -167,6 +168,10 @@ void SortArray(uptr *array, uptr size) {
InternalSort<uptr*, UptrComparisonFunction>(&array, size, CompareLess);
}
+void SortArray(u32 *array, uptr size) {
+ InternalSort<u32*, U32ComparisonFunction>(&array, size, CompareLess);
+}
+
const char *StripPathPrefix(const char *filepath,
const char *strip_path_prefix) {
if (!filepath) return nullptr;
@@ -202,7 +207,7 @@ void ReportErrorSummary(const char *error_message) {
__sanitizer_report_error_summary(buff.data());
}
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
void ReportErrorSummary(const char *error_type, const AddressInfo &info) {
if (!common_flags()->print_summary)
return;
@@ -254,9 +259,18 @@ void LoadedModule::set(const char *module_name, uptr base_address) {
base_address_ = base_address;
}
+void LoadedModule::set(const char *module_name, uptr base_address,
+ ModuleArch arch, u8 uuid[kModuleUUIDSize]) {
+ set(module_name, base_address);
+ arch_ = arch;
+ internal_memcpy(uuid_, uuid, sizeof(uuid_));
+}
+
void LoadedModule::clear() {
InternalFree(full_name_);
full_name_ = nullptr;
+ arch_ = kModuleArchUnknown;
+ internal_memset(uuid_, 0, kModuleUUIDSize);
while (!ranges_.empty()) {
AddressRange *r = ranges_.front();
ranges_.pop_front();
@@ -483,4 +497,11 @@ int __sanitizer_install_malloc_and_free_hooks(void (*malloc_hook)(const void *,
void (*free_hook)(const void *)) {
return InstallMallocFreeHooks(malloc_hook, free_hook);
}
+
+#if !SANITIZER_GO && !SANITIZER_SUPPORTS_WEAK_HOOKS
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+void __sanitizer_print_memory_profile(int top_percent) {
+ (void)top_percent;
+}
+#endif
} // extern "C"
diff --git a/lib/sanitizer_common/sanitizer_common.h b/lib/sanitizer_common/sanitizer_common.h
index 6c1d6a00a10c..66c2d26fa4f5 100644
--- a/lib/sanitizer_common/sanitizer_common.h
+++ b/lib/sanitizer_common/sanitizer_common.h
@@ -98,9 +98,14 @@ void *MmapAlignedOrDie(uptr size, uptr alignment, const char *mem_type);
bool MprotectNoAccess(uptr addr, uptr size);
bool MprotectReadOnly(uptr addr, uptr size);
+// Find an available address space.
+uptr FindAvailableMemoryRange(uptr size, uptr alignment, uptr left_padding);
+
// Used to check if we can map shadow memory to a fixed location.
bool MemoryRangeIsAvailable(uptr range_start, uptr range_end);
-void FlushUnneededShadowMemory(uptr addr, uptr size);
+// Releases memory pages entirely within the [beg, end] address range. Noop if
+// the provided range does not contain at least one entire page.
+void ReleaseMemoryPagesToOS(uptr beg, uptr end);
void IncreaseTotalMmap(uptr size);
void DecreaseTotalMmap(uptr size);
uptr GetRSS();
@@ -115,16 +120,14 @@ void RunFreeHooks(const void *ptr);
// keep frame size low.
// FIXME: use InternalAlloc instead of MmapOrDie once
// InternalAlloc is made libc-free.
-template<typename T>
+template <typename T>
class InternalScopedBuffer {
public:
explicit InternalScopedBuffer(uptr cnt) {
cnt_ = cnt;
- ptr_ = (T*)MmapOrDie(cnt * sizeof(T), "InternalScopedBuffer");
- }
- ~InternalScopedBuffer() {
- UnmapOrDie(ptr_, cnt_ * sizeof(T));
+ ptr_ = (T *)MmapOrDie(cnt * sizeof(T), "InternalScopedBuffer");
}
+ ~InternalScopedBuffer() { UnmapOrDie(ptr_, cnt_ * sizeof(T)); }
T &operator[](uptr i) { return ptr_[i]; }
T *data() { return ptr_; }
uptr size() { return cnt_ * sizeof(T); }
@@ -132,9 +135,11 @@ class InternalScopedBuffer {
private:
T *ptr_;
uptr cnt_;
- // Disallow evil constructors.
- InternalScopedBuffer(const InternalScopedBuffer&);
- void operator=(const InternalScopedBuffer&);
+ // Disallow copies and moves.
+ InternalScopedBuffer(const InternalScopedBuffer &) = delete;
+ InternalScopedBuffer &operator=(const InternalScopedBuffer &) = delete;
+ InternalScopedBuffer(InternalScopedBuffer &&) = delete;
+ InternalScopedBuffer &operator=(InternalScopedBuffer &&) = delete;
};
class InternalScopedString : public InternalScopedBuffer<char> {
@@ -330,6 +335,7 @@ void SleepForMillis(int millis);
u64 NanoTime();
int Atexit(void (*function)(void));
void SortArray(uptr *array, uptr size);
+void SortArray(u32 *array, uptr size);
bool TemplateMatch(const char *templ, const char *str);
// Exit
@@ -389,7 +395,7 @@ void ReportErrorSummary(const char *error_message);
// error_type file:line[:column][ function]
void ReportErrorSummary(const char *error_type, const AddressInfo &info);
// Same as above, but obtains AddressInfo by symbolizing top stack trace frame.
-void ReportErrorSummary(const char *error_type, StackTrace *trace);
+void ReportErrorSummary(const char *error_type, const StackTrace *trace);
// Math
#if SANITIZER_WINDOWS && !defined(__clang__) && !defined(__GNUC__)
@@ -446,8 +452,8 @@ INLINE uptr RoundUpToPowerOfTwo(uptr size) {
if (IsPowerOfTwo(size)) return size;
uptr up = MostSignificantSetBitIndex(size);
- CHECK(size < (1ULL << (up + 1)));
- CHECK(size > (1ULL << up));
+ CHECK_LT(size, (1ULL << (up + 1)));
+ CHECK_GT(size, (1ULL << up));
return 1ULL << (up + 1);
}
@@ -541,6 +547,13 @@ class InternalMmapVectorNoCtor {
uptr capacity() const {
return capacity_;
}
+ void resize(uptr new_size) {
+ Resize(new_size);
+ if (new_size > size_) {
+ internal_memset(&data_[size_], 0, sizeof(T) * (new_size - size_));
+ }
+ size_ = new_size;
+ }
void clear() { size_ = 0; }
bool empty() const { return size() == 0; }
@@ -625,34 +638,55 @@ void InternalSort(Container *v, uptr size, Compare comp) {
}
}
-template<class Container, class Value, class Compare>
-uptr InternalBinarySearch(const Container &v, uptr first, uptr last,
- const Value &val, Compare comp) {
- uptr not_found = last + 1;
- while (last >= first) {
+// Works like std::lower_bound: finds the first element that is not less
+// than the val.
+template <class Container, class Value, class Compare>
+uptr InternalLowerBound(const Container &v, uptr first, uptr last,
+ const Value &val, Compare comp) {
+ while (last > first) {
uptr mid = (first + last) / 2;
if (comp(v[mid], val))
first = mid + 1;
- else if (comp(val, v[mid]))
- last = mid - 1;
else
- return mid;
+ last = mid;
}
- return not_found;
+ return first;
}
+enum ModuleArch {
+ kModuleArchUnknown,
+ kModuleArchI386,
+ kModuleArchX86_64,
+ kModuleArchX86_64H,
+ kModuleArchARMV6,
+ kModuleArchARMV7,
+ kModuleArchARMV7S,
+ kModuleArchARMV7K,
+ kModuleArchARM64
+};
+
+const uptr kModuleUUIDSize = 16;
+
// Represents a binary loaded into virtual memory (e.g. this can be an
// executable or a shared object).
class LoadedModule {
public:
- LoadedModule() : full_name_(nullptr), base_address_(0) { ranges_.clear(); }
+ LoadedModule()
+ : full_name_(nullptr), base_address_(0), arch_(kModuleArchUnknown) {
+ internal_memset(uuid_, 0, kModuleUUIDSize);
+ ranges_.clear();
+ }
void set(const char *module_name, uptr base_address);
+ void set(const char *module_name, uptr base_address, ModuleArch arch,
+ u8 uuid[kModuleUUIDSize]);
void clear();
void addAddressRange(uptr beg, uptr end, bool executable);
bool containsAddress(uptr address) const;
const char *full_name() const { return full_name_; }
uptr base_address() const { return base_address_; }
+ ModuleArch arch() const { return arch_; }
+ const u8 *uuid() const { return uuid_; }
struct AddressRange {
AddressRange *next;
@@ -669,6 +703,8 @@ class LoadedModule {
private:
char *full_name_; // Owned.
uptr base_address_;
+ ModuleArch arch_;
+ u8 uuid_[kModuleUUIDSize];
IntrusiveList<AddressRange> ranges_;
};
@@ -789,6 +825,8 @@ struct SignalContext {
is_memory_access(is_memory_access),
write_flag(write_flag) {}
+ static void DumpAllRegisters(void *context);
+
// Creates signal context in a platform-specific manner.
static SignalContext Create(void *siginfo, void *context);
@@ -827,6 +865,15 @@ void AvoidCVE_2016_2143();
INLINE void AvoidCVE_2016_2143() {}
#endif
+struct StackDepotStats {
+ uptr n_uniq_ids;
+ uptr allocated;
+};
+
+// The default value for allocator_release_to_os_interval_ms common flag to
+// indicate that sanitizer allocator should not attempt to release memory to OS.
+const s32 kReleaseToOSIntervalNever = -1;
+
} // namespace __sanitizer
inline void *operator new(__sanitizer::operator_new_size_type size,
@@ -834,9 +881,4 @@ inline void *operator new(__sanitizer::operator_new_size_type size,
return alloc.Allocate(size);
}
-struct StackDepotStats {
- uptr n_uniq_ids;
- uptr allocated;
-};
-
#endif // SANITIZER_COMMON_H
diff --git a/lib/sanitizer_common/sanitizer_common_interceptors.inc b/lib/sanitizer_common/sanitizer_common_interceptors.inc
index c95b3580af2c..ca571d1a9fd5 100644
--- a/lib/sanitizer_common/sanitizer_common_interceptors.inc
+++ b/lib/sanitizer_common/sanitizer_common_interceptors.inc
@@ -30,6 +30,9 @@
// COMMON_INTERCEPTOR_SET_PTHREAD_NAME
// COMMON_INTERCEPTOR_HANDLE_RECVMSG
// COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED
+// COMMON_INTERCEPTOR_MEMSET_IMPL
+// COMMON_INTERCEPTOR_MEMMOVE_IMPL
+// COMMON_INTERCEPTOR_MEMCPY_IMPL
//===----------------------------------------------------------------------===//
#include "interception/interception.h"
@@ -67,6 +70,19 @@
#define iconv __bsd_iconv
#endif
+// Platform-specific options.
+#if SANITIZER_MAC
+namespace __sanitizer {
+bool PlatformHasDifferentMemcpyAndMemmove();
+}
+#define PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE \
+ (__sanitizer::PlatformHasDifferentMemcpyAndMemmove())
+#elif SANITIZER_WINDOWS64
+#define PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE false
+#else
+#define PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE true
+#endif // SANITIZER_MAC
+
#ifndef COMMON_INTERCEPTOR_INITIALIZE_RANGE
#define COMMON_INTERCEPTOR_INITIALIZE_RANGE(p, size) {}
#endif
@@ -163,6 +179,47 @@
COMMON_INTERCEPT_FUNCTION(fn)
#endif
+#ifndef COMMON_INTERCEPTOR_MEMSET_IMPL
+#define COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, dst, v, size) \
+ { \
+ if (COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED) \
+ return internal_memset(dst, v, size); \
+ COMMON_INTERCEPTOR_ENTER(ctx, memset, dst, v, size); \
+ if (common_flags()->intercept_intrin) \
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, size); \
+ return REAL(memset)(dst, v, size); \
+ }
+#endif
+
+#ifndef COMMON_INTERCEPTOR_MEMMOVE_IMPL
+#define COMMON_INTERCEPTOR_MEMMOVE_IMPL(ctx, dst, src, size) \
+ { \
+ if (COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED) \
+ return internal_memmove(dst, src, size); \
+ COMMON_INTERCEPTOR_ENTER(ctx, memmove, dst, src, size); \
+ if (common_flags()->intercept_intrin) { \
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, size); \
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, src, size); \
+ } \
+ return REAL(memmove)(dst, src, size); \
+ }
+#endif
+
+#ifndef COMMON_INTERCEPTOR_MEMCPY_IMPL
+#define COMMON_INTERCEPTOR_MEMCPY_IMPL(ctx, dst, src, size) \
+ { \
+ if (COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED) { \
+ return internal_memmove(dst, src, size); \
+ } \
+ COMMON_INTERCEPTOR_ENTER(ctx, memcpy, dst, src, size); \
+ if (common_flags()->intercept_intrin) { \
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, size); \
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, src, size); \
+ } \
+ return REAL(memcpy)(dst, src, size); \
+ }
+#endif
+
struct FileMetadata {
// For open_memstream().
char **addr;
@@ -304,8 +361,14 @@ INTERCEPTOR(int, strncmp, const char *s1, const char *s2, uptr size) {
c2 = (unsigned char)s2[i];
if (c1 != c2 || c1 == '\0') break;
}
- COMMON_INTERCEPTOR_READ_RANGE(ctx, s1, Min(i + 1, size));
- COMMON_INTERCEPTOR_READ_RANGE(ctx, s2, Min(i + 1, size));
+ uptr i1 = i;
+ uptr i2 = i;
+ if (common_flags()->strict_string_checks) {
+ for (; i1 < size && s1[i1]; i1++) {}
+ for (; i2 < size && s2[i2]; i2++) {}
+ }
+ COMMON_INTERCEPTOR_READ_RANGE((ctx), (s1), Min(i1 + 1, size));
+ COMMON_INTERCEPTOR_READ_RANGE((ctx), (s2), Min(i2 + 1, size));
int result = CharCmpX(c1, c2);
CALL_WEAK_INTERCEPTOR_HOOK(__sanitizer_weak_hook_strncmp, GET_CALLER_PC(), s1,
s2, size, result);
@@ -348,24 +411,30 @@ INTERCEPTOR(int, strcasecmp, const char *s1, const char *s2) {
}
DECLARE_WEAK_INTERCEPTOR_HOOK(__sanitizer_weak_hook_strncasecmp, uptr called_pc,
- const char *s1, const char *s2, uptr n,
+ const char *s1, const char *s2, uptr size,
int result)
-INTERCEPTOR(int, strncasecmp, const char *s1, const char *s2, SIZE_T n) {
+INTERCEPTOR(int, strncasecmp, const char *s1, const char *s2, SIZE_T size) {
void *ctx;
- COMMON_INTERCEPTOR_ENTER(ctx, strncasecmp, s1, s2, n);
+ COMMON_INTERCEPTOR_ENTER(ctx, strncasecmp, s1, s2, size);
unsigned char c1 = 0, c2 = 0;
uptr i;
- for (i = 0; i < n; i++) {
+ for (i = 0; i < size; i++) {
c1 = (unsigned char)s1[i];
c2 = (unsigned char)s2[i];
if (CharCaseCmp(c1, c2) != 0 || c1 == '\0') break;
}
- COMMON_INTERCEPTOR_READ_RANGE(ctx, s1, Min(i + 1, n));
- COMMON_INTERCEPTOR_READ_RANGE(ctx, s2, Min(i + 1, n));
+ uptr i1 = i;
+ uptr i2 = i;
+ if (common_flags()->strict_string_checks) {
+ for (; i1 < size && s1[i1]; i1++) {}
+ for (; i2 < size && s2[i2]; i2++) {}
+ }
+ COMMON_INTERCEPTOR_READ_RANGE((ctx), (s1), Min(i1 + 1, size));
+ COMMON_INTERCEPTOR_READ_RANGE((ctx), (s2), Min(i2 + 1, size));
int result = CharCaseCmp(c1, c2);
CALL_WEAK_INTERCEPTOR_HOOK(__sanitizer_weak_hook_strncasecmp, GET_CALLER_PC(),
- s1, s2, n, result);
+ s1, s2, size, result);
return result;
}
@@ -390,7 +459,7 @@ static inline void StrstrCheck(void *ctx, char *r, const char *s1,
#if SANITIZER_INTERCEPT_STRSTR
DECLARE_WEAK_INTERCEPTOR_HOOK(__sanitizer_weak_hook_strstr, uptr called_pc,
- const char *s1, const char *s2, char *result);
+ const char *s1, const char *s2, char *result)
INTERCEPTOR(char*, strstr, const char *s1, const char *s2) {
if (COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED)
@@ -413,7 +482,7 @@ INTERCEPTOR(char*, strstr, const char *s1, const char *s2) {
#if SANITIZER_INTERCEPT_STRCASESTR
DECLARE_WEAK_INTERCEPTOR_HOOK(__sanitizer_weak_hook_strcasestr, uptr called_pc,
- const char *s1, const char *s2, char *result);
+ const char *s1, const char *s2, char *result)
INTERCEPTOR(char*, strcasestr, const char *s1, const char *s2) {
void *ctx;
@@ -434,7 +503,7 @@ INTERCEPTOR(char*, strcasestr, const char *s1, const char *s2) {
#if SANITIZER_INTERCEPT_MEMMEM
DECLARE_WEAK_INTERCEPTOR_HOOK(__sanitizer_weak_hook_memmem, uptr called_pc,
const void *s1, SIZE_T len1, const void *s2,
- SIZE_T len2, void *result);
+ SIZE_T len2, void *result)
INTERCEPTOR(void*, memmem, const void *s1, SIZE_T len1, const void *s2,
SIZE_T len2) {
@@ -553,14 +622,9 @@ INTERCEPTOR(char *, strpbrk, const char *s1, const char *s2) {
#endif
#if SANITIZER_INTERCEPT_MEMSET
-INTERCEPTOR(void*, memset, void *dst, int v, uptr size) {
- if (COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED)
- return internal_memset(dst, v, size);
+INTERCEPTOR(void *, memset, void *dst, int v, uptr size) {
void *ctx;
- COMMON_INTERCEPTOR_ENTER(ctx, memset, dst, v, size);
- if (common_flags()->intercept_intrin)
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, size);
- return REAL(memset)(dst, v, size);
+ COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, dst, v, size);
}
#define INIT_MEMSET COMMON_INTERCEPT_FUNCTION(memset)
@@ -569,16 +633,9 @@ INTERCEPTOR(void*, memset, void *dst, int v, uptr size) {
#endif
#if SANITIZER_INTERCEPT_MEMMOVE
-INTERCEPTOR(void*, memmove, void *dst, const void *src, uptr size) {
- if (COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED)
- return internal_memmove(dst, src, size);
+INTERCEPTOR(void *, memmove, void *dst, const void *src, uptr size) {
void *ctx;
- COMMON_INTERCEPTOR_ENTER(ctx, memmove, dst, src, size);
- if (common_flags()->intercept_intrin) {
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, size);
- COMMON_INTERCEPTOR_READ_RANGE(ctx, src, size);
- }
- return REAL(memmove)(dst, src, size);
+ COMMON_INTERCEPTOR_MEMMOVE_IMPL(ctx, dst, src, size);
}
#define INIT_MEMMOVE COMMON_INTERCEPT_FUNCTION(memmove)
@@ -587,25 +644,30 @@ INTERCEPTOR(void*, memmove, void *dst, const void *src, uptr size) {
#endif
#if SANITIZER_INTERCEPT_MEMCPY
-INTERCEPTOR(void*, memcpy, void *dst, const void *src, uptr size) {
- if (COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED) {
- // On OS X, calling internal_memcpy here will cause memory corruptions,
- // because memcpy and memmove are actually aliases of the same
- // implementation. We need to use internal_memmove here.
- return internal_memmove(dst, src, size);
- }
- void *ctx;
- COMMON_INTERCEPTOR_ENTER(ctx, memcpy, dst, src, size);
- if (common_flags()->intercept_intrin) {
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, size);
- COMMON_INTERCEPTOR_READ_RANGE(ctx, src, size);
- }
+INTERCEPTOR(void *, memcpy, void *dst, const void *src, uptr size) {
+ // On OS X, calling internal_memcpy here will cause memory corruptions,
+ // because memcpy and memmove are actually aliases of the same
+ // implementation. We need to use internal_memmove here.
// N.B.: If we switch this to internal_ we'll have to use internal_memmove
// due to memcpy being an alias of memmove on OS X.
- return REAL(memcpy)(dst, src, size);
+ void *ctx;
+ if (PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE) {
+ COMMON_INTERCEPTOR_MEMCPY_IMPL(ctx, dst, src, size);
+ } else {
+ COMMON_INTERCEPTOR_MEMMOVE_IMPL(ctx, dst, src, size);
+ }
}
-#define INIT_MEMCPY COMMON_INTERCEPT_FUNCTION(memcpy)
+#define INIT_MEMCPY \
+ do { \
+ if (PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE) { \
+ COMMON_INTERCEPT_FUNCTION(memcpy); \
+ } else { \
+ ASSIGN_REAL(memcpy, memmove); \
+ } \
+ CHECK(REAL(memcpy)); \
+ } while (false)
+
#else
#define INIT_MEMCPY
#endif
@@ -663,7 +725,16 @@ INTERCEPTOR(void*, memchr, const void *s, int c, SIZE_T n) {
return internal_memchr(s, c, n);
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, memchr, s, c, n);
+#if SANITIZER_WINDOWS
+ void *res;
+ if (REAL(memchr)) {
+ res = REAL(memchr)(s, c, n);
+ } else {
+ res = internal_memchr(s, c, n);
+ }
+#else
void *res = REAL(memchr)(s, c, n);
+#endif
uptr len = res ? (char *)res - (const char *)s + 1 : n;
COMMON_INTERCEPTOR_READ_RANGE(ctx, s, len);
return res;
@@ -1218,12 +1289,12 @@ FORMAT_INTERCEPTOR_IMPL(__isoc99_sscanf, __isoc99_vsscanf, str, format)
#if SANITIZER_INTERCEPT_SCANF
#define INIT_SCANF \
- COMMON_INTERCEPT_FUNCTION(scanf); \
- COMMON_INTERCEPT_FUNCTION(sscanf); \
- COMMON_INTERCEPT_FUNCTION(fscanf); \
- COMMON_INTERCEPT_FUNCTION(vscanf); \
- COMMON_INTERCEPT_FUNCTION(vsscanf); \
- COMMON_INTERCEPT_FUNCTION(vfscanf);
+ COMMON_INTERCEPT_FUNCTION_LDBL(scanf); \
+ COMMON_INTERCEPT_FUNCTION_LDBL(sscanf); \
+ COMMON_INTERCEPT_FUNCTION_LDBL(fscanf); \
+ COMMON_INTERCEPT_FUNCTION_LDBL(vscanf); \
+ COMMON_INTERCEPT_FUNCTION_LDBL(vsscanf); \
+ COMMON_INTERCEPT_FUNCTION_LDBL(vfscanf);
#else
#define INIT_SCANF
#endif
@@ -1396,16 +1467,16 @@ FORMAT_INTERCEPTOR_IMPL(__isoc99_snprintf, __isoc99_vsnprintf, str, size,
#if SANITIZER_INTERCEPT_PRINTF
#define INIT_PRINTF \
- COMMON_INTERCEPT_FUNCTION(printf); \
- COMMON_INTERCEPT_FUNCTION(sprintf); \
- COMMON_INTERCEPT_FUNCTION(snprintf); \
- COMMON_INTERCEPT_FUNCTION(asprintf); \
- COMMON_INTERCEPT_FUNCTION(fprintf); \
- COMMON_INTERCEPT_FUNCTION(vprintf); \
- COMMON_INTERCEPT_FUNCTION(vsprintf); \
- COMMON_INTERCEPT_FUNCTION(vsnprintf); \
- COMMON_INTERCEPT_FUNCTION(vasprintf); \
- COMMON_INTERCEPT_FUNCTION(vfprintf);
+ COMMON_INTERCEPT_FUNCTION_LDBL(printf); \
+ COMMON_INTERCEPT_FUNCTION_LDBL(sprintf); \
+ COMMON_INTERCEPT_FUNCTION_LDBL(snprintf); \
+ COMMON_INTERCEPT_FUNCTION_LDBL(asprintf); \
+ COMMON_INTERCEPT_FUNCTION_LDBL(fprintf); \
+ COMMON_INTERCEPT_FUNCTION_LDBL(vprintf); \
+ COMMON_INTERCEPT_FUNCTION_LDBL(vsprintf); \
+ COMMON_INTERCEPT_FUNCTION_LDBL(vsnprintf); \
+ COMMON_INTERCEPT_FUNCTION_LDBL(vasprintf); \
+ COMMON_INTERCEPT_FUNCTION_LDBL(vfprintf);
#else
#define INIT_PRINTF
#endif
@@ -4174,6 +4245,20 @@ INTERCEPTOR(char *, tmpnam_r, char *s) {
#define INIT_TMPNAM_R
#endif
+#if SANITIZER_INTERCEPT_TTYNAME_R
+INTERCEPTOR(int, ttyname_r, int fd, char *name, SIZE_T namesize) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, ttyname_r, fd, name, namesize);
+ int res = REAL(ttyname_r)(fd, name, namesize);
+ if (res == 0)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, name, REAL(strlen)(name) + 1);
+ return res;
+}
+#define INIT_TTYNAME_R COMMON_INTERCEPT_FUNCTION(ttyname_r);
+#else
+#define INIT_TTYNAME_R
+#endif
+
#if SANITIZER_INTERCEPT_TEMPNAM
INTERCEPTOR(char *, tempnam, char *dir, char *pfx) {
void *ctx;
@@ -4802,47 +4887,67 @@ INTERCEPTOR(int, capset, void *hdrp, const void *datap) {
#endif
#if SANITIZER_INTERCEPT_AEABI_MEM
-DECLARE_REAL_AND_INTERCEPTOR(void *, memmove, void *, const void *, uptr)
-DECLARE_REAL_AND_INTERCEPTOR(void *, memcpy, void *, const void *, uptr)
-DECLARE_REAL_AND_INTERCEPTOR(void *, memset, void *, int, uptr)
-
INTERCEPTOR(void *, __aeabi_memmove, void *to, const void *from, uptr size) {
- return WRAP(memmove)(to, from, size);
+ void *ctx;
+ COMMON_INTERCEPTOR_MEMMOVE_IMPL(ctx, to, from, size);
}
+
INTERCEPTOR(void *, __aeabi_memmove4, void *to, const void *from, uptr size) {
- return WRAP(memmove)(to, from, size);
+ void *ctx;
+ COMMON_INTERCEPTOR_MEMMOVE_IMPL(ctx, to, from, size);
}
+
INTERCEPTOR(void *, __aeabi_memmove8, void *to, const void *from, uptr size) {
- return WRAP(memmove)(to, from, size);
+ void *ctx;
+ COMMON_INTERCEPTOR_MEMMOVE_IMPL(ctx, to, from, size);
}
+
INTERCEPTOR(void *, __aeabi_memcpy, void *to, const void *from, uptr size) {
- return WRAP(memcpy)(to, from, size);
+ void *ctx;
+ COMMON_INTERCEPTOR_MEMCPY_IMPL(ctx, to, from, size);
}
+
INTERCEPTOR(void *, __aeabi_memcpy4, void *to, const void *from, uptr size) {
- return WRAP(memcpy)(to, from, size);
+ void *ctx;
+ COMMON_INTERCEPTOR_MEMCPY_IMPL(ctx, to, from, size);
}
+
INTERCEPTOR(void *, __aeabi_memcpy8, void *to, const void *from, uptr size) {
- return WRAP(memcpy)(to, from, size);
+ void *ctx;
+ COMMON_INTERCEPTOR_MEMCPY_IMPL(ctx, to, from, size);
}
+
// Note the argument order.
INTERCEPTOR(void *, __aeabi_memset, void *block, uptr size, int c) {
- return WRAP(memset)(block, c, size);
+ void *ctx;
+ COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, block, c, size);
}
+
INTERCEPTOR(void *, __aeabi_memset4, void *block, uptr size, int c) {
- return WRAP(memset)(block, c, size);
+ void *ctx;
+ COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, block, c, size);
}
+
INTERCEPTOR(void *, __aeabi_memset8, void *block, uptr size, int c) {
- return WRAP(memset)(block, c, size);
+ void *ctx;
+ COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, block, c, size);
}
+
INTERCEPTOR(void *, __aeabi_memclr, void *block, uptr size) {
- return WRAP(memset)(block, 0, size);
+ void *ctx;
+ COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, block, 0, size);
}
+
INTERCEPTOR(void *, __aeabi_memclr4, void *block, uptr size) {
- return WRAP(memset)(block, 0, size);
+ void *ctx;
+ COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, block, 0, size);
}
+
INTERCEPTOR(void *, __aeabi_memclr8, void *block, uptr size) {
- return WRAP(memset)(block, 0, size);
+ void *ctx;
+ COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, block, 0, size);
}
+
#define INIT_AEABI_MEM \
COMMON_INTERCEPT_FUNCTION(__aeabi_memmove); \
COMMON_INTERCEPT_FUNCTION(__aeabi_memmove4); \
@@ -4861,11 +4966,11 @@ INTERCEPTOR(void *, __aeabi_memclr8, void *block, uptr size) {
#endif // SANITIZER_INTERCEPT_AEABI_MEM
#if SANITIZER_INTERCEPT___BZERO
-DECLARE_REAL_AND_INTERCEPTOR(void *, memset, void *, int, uptr);
-
INTERCEPTOR(void *, __bzero, void *block, uptr size) {
- return WRAP(memset)(block, 0, size);
+ void *ctx;
+ COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, block, 0, size);
}
+
#define INIT___BZERO COMMON_INTERCEPT_FUNCTION(__bzero);
#else
#define INIT___BZERO
@@ -5855,6 +5960,72 @@ INTERCEPTOR(int, __lxstat64, int version, const char *path, void *buf) {
// FIXME: add other *stat interceptor
+#if SANITIZER_INTERCEPT_UTMP
+INTERCEPTOR(void *, getutent, int dummy) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, getutent, dummy);
+ void *res = REAL(getutent)(dummy);
+ if (res)
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, __sanitizer::struct_utmp_sz);
+ return res;
+}
+INTERCEPTOR(void *, getutid, void *ut) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, getutid, ut);
+ void *res = REAL(getutid)(ut);
+ if (res)
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, __sanitizer::struct_utmp_sz);
+ return res;
+}
+INTERCEPTOR(void *, getutline, void *ut) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, getutline, ut);
+ void *res = REAL(getutline)(ut);
+ if (res)
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, __sanitizer::struct_utmp_sz);
+ return res;
+}
+#define INIT_UTMP \
+ COMMON_INTERCEPT_FUNCTION(getutent); \
+ COMMON_INTERCEPT_FUNCTION(getutid); \
+ COMMON_INTERCEPT_FUNCTION(getutline);
+#else
+#define INIT_UTMP
+#endif
+
+#if SANITIZER_INTERCEPT_UTMPX
+INTERCEPTOR(void *, getutxent, int dummy) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, getutxent, dummy);
+ void *res = REAL(getutxent)(dummy);
+ if (res)
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, __sanitizer::struct_utmpx_sz);
+ return res;
+}
+INTERCEPTOR(void *, getutxid, void *ut) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, getutxid, ut);
+ void *res = REAL(getutxid)(ut);
+ if (res)
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, __sanitizer::struct_utmpx_sz);
+ return res;
+}
+INTERCEPTOR(void *, getutxline, void *ut) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, getutxline, ut);
+ void *res = REAL(getutxline)(ut);
+ if (res)
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, __sanitizer::struct_utmpx_sz);
+ return res;
+}
+#define INIT_UTMPX \
+ COMMON_INTERCEPT_FUNCTION(getutxent); \
+ COMMON_INTERCEPT_FUNCTION(getutxid); \
+ COMMON_INTERCEPT_FUNCTION(getutxline);
+#else
+#define INIT_UTMPX
+#endif
+
static void InitializeCommonInterceptors() {
static u64 metadata_mem[sizeof(MetadataHashMap) / sizeof(u64) + 1];
interceptor_metadata_map = new((void *)&metadata_mem) MetadataHashMap();
@@ -5999,6 +6170,7 @@ static void InitializeCommonInterceptors() {
INIT_PTHREAD_BARRIERATTR_GETPSHARED;
INIT_TMPNAM;
INIT_TMPNAM_R;
+ INIT_TTYNAME_R;
INIT_TEMPNAM;
INIT_PTHREAD_SETNAME_NP;
INIT_SINCOS;
@@ -6050,4 +6222,6 @@ static void InitializeCommonInterceptors() {
INIT___LXSTAT;
INIT___LXSTAT64;
// FIXME: add other *stat interceptors.
+ INIT_UTMP;
+ INIT_UTMPX;
}
diff --git a/lib/sanitizer_common/sanitizer_common_interceptors_format.inc b/lib/sanitizer_common/sanitizer_common_interceptors_format.inc
index 92318cda35fd..12563499c515 100644
--- a/lib/sanitizer_common/sanitizer_common_interceptors_format.inc
+++ b/lib/sanitizer_common/sanitizer_common_interceptors_format.inc
@@ -435,10 +435,6 @@ static const char *printf_parse_next(const char *p, PrintfDirective *dir) {
}
static int printf_get_value_size(PrintfDirective *dir) {
- if (dir->convSpecifier == 'm') {
- return sizeof(char *);
- }
-
if (char_is_one_of(dir->convSpecifier, "cCsS")) {
unsigned charSize =
format_get_char_size(dir->convSpecifier, dir->lengthModifier);
@@ -519,6 +515,9 @@ static void printf_common(void *ctx, const char *format, va_list aq) {
// Dynamic precision
SKIP_SCALAR_ARG(&aq, 'd', sizeof(int));
}
+ // %m does not require an argument: strlen(errno).
+ if (dir.convSpecifier == 'm')
+ continue;
int size = printf_get_value_size(&dir);
if (size == FSS_INVALID) {
Report("WARNING: unexpected format specifier in printf "
diff --git a/lib/sanitizer_common/sanitizer_common_interceptors_ioctl.inc b/lib/sanitizer_common/sanitizer_common_interceptors_ioctl.inc
index 959c622a32f9..4ed9afedf84a 100755
--- a/lib/sanitizer_common/sanitizer_common_interceptors_ioctl.inc
+++ b/lib/sanitizer_common/sanitizer_common_interceptors_ioctl.inc
@@ -583,7 +583,8 @@ static void ioctl_common_pre(void *ctx, const ioctl_desc *desc, int d,
return;
if (request == IOCTL_SIOCGIFCONF) {
struct __sanitizer_ifconf *ifc = (__sanitizer_ifconf *)arg;
- COMMON_INTERCEPTOR_READ_RANGE(ctx, &ifc->ifc_len, sizeof(ifc->ifc_len));
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, (char*)&ifc->ifc_len,
+ sizeof(ifc->ifc_len));
}
}
diff --git a/lib/sanitizer_common/sanitizer_common_libcdep.cc b/lib/sanitizer_common/sanitizer_common_libcdep.cc
index 596f5bcd3173..49ca961f3cb0 100644
--- a/lib/sanitizer_common/sanitizer_common_libcdep.cc
+++ b/lib/sanitizer_common/sanitizer_common_libcdep.cc
@@ -13,6 +13,7 @@
#include "sanitizer_common.h"
+#include "sanitizer_allocator_interface.h"
#include "sanitizer_flags.h"
#include "sanitizer_stackdepot.h"
#include "sanitizer_stacktrace.h"
@@ -46,7 +47,7 @@ void SetSandboxingCallback(void (*f)()) {
sandboxing_callback = f;
}
-void ReportErrorSummary(const char *error_type, StackTrace *stack) {
+void ReportErrorSummary(const char *error_type, const StackTrace *stack) {
#if !SANITIZER_GO
if (!common_flags()->print_summary)
return;
@@ -69,12 +70,15 @@ void SetSoftRssLimitExceededCallback(void (*Callback)(bool exceeded)) {
SoftRssLimitExceededCallback = Callback;
}
+#if SANITIZER_LINUX && !SANITIZER_GO
void BackgroundThread(void *arg) {
uptr hard_rss_limit_mb = common_flags()->hard_rss_limit_mb;
uptr soft_rss_limit_mb = common_flags()->soft_rss_limit_mb;
+ bool heap_profile = common_flags()->heap_profile;
uptr prev_reported_rss = 0;
uptr prev_reported_stack_depot_size = 0;
bool reached_soft_rss_limit = false;
+ uptr rss_during_last_reported_profile = 0;
while (true) {
SleepForMillis(100);
uptr current_rss_mb = GetRSS() >> 20;
@@ -116,8 +120,15 @@ void BackgroundThread(void *arg) {
SoftRssLimitExceededCallback(false);
}
}
+ if (heap_profile &&
+ current_rss_mb > rss_during_last_reported_profile * 1.1) {
+ Printf("\n\nHEAP PROFILE at RSS %zdMb\n", current_rss_mb);
+ __sanitizer_print_memory_profile(90);
+ rss_during_last_reported_profile = current_rss_mb;
+ }
}
}
+#endif
void WriteToSyslog(const char *msg) {
InternalScopedString msg_copy(kErrorMessageBufferSize);
@@ -142,7 +153,8 @@ void MaybeStartBackgroudThread() {
!SANITIZER_GO // Need to implement/test on other platforms.
// Start the background thread if one of the rss limits is given.
if (!common_flags()->hard_rss_limit_mb &&
- !common_flags()->soft_rss_limit_mb) return;
+ !common_flags()->soft_rss_limit_mb &&
+ !common_flags()->heap_profile) return;
if (!&real_pthread_create) return; // Can't spawn the thread anyway.
internal_start_thread(BackgroundThread, nullptr);
#endif
@@ -152,7 +164,7 @@ void MaybeStartBackgroudThread() {
void NOINLINE
__sanitizer_sandbox_on_notify(__sanitizer_sandbox_arguments *args) {
- PrepareForSandboxing(args);
- if (sandboxing_callback)
- sandboxing_callback();
+ __sanitizer::PrepareForSandboxing(args);
+ if (__sanitizer::sandboxing_callback)
+ __sanitizer::sandboxing_callback();
}
diff --git a/lib/sanitizer_common/sanitizer_common_nolibc.cc b/lib/sanitizer_common/sanitizer_common_nolibc.cc
index e24cf998ec69..ba54c739a9e0 100644
--- a/lib/sanitizer_common/sanitizer_common_nolibc.cc
+++ b/lib/sanitizer_common/sanitizer_common_nolibc.cc
@@ -17,6 +17,9 @@
namespace __sanitizer {
+// The Windows implementations of these functions use the win32 API directly,
+// bypassing libc.
+#if !SANITIZER_WINDOWS
#if SANITIZER_LINUX
bool ShouldLogAfterPrintf() { return false; }
void LogMessageOnPrintf(const char *str) {}
@@ -24,5 +27,10 @@ void LogMessageOnPrintf(const char *str) {}
void WriteToSyslog(const char *buffer) {}
void Abort() { internal__exit(1); }
void SleepForSeconds(int seconds) { internal_sleep(seconds); }
+#endif // !SANITIZER_WINDOWS
+
+#if !SANITIZER_WINDOWS && !SANITIZER_MAC
+void ListOfModules::init() {}
+#endif
} // namespace __sanitizer
diff --git a/lib/sanitizer_common/sanitizer_coverage_libcdep.cc b/lib/sanitizer_common/sanitizer_coverage_libcdep.cc
index 51b53d345ab8..ebdee33d7d5b 100644
--- a/lib/sanitizer_common/sanitizer_coverage_libcdep.cc
+++ b/lib/sanitizer_common/sanitizer_coverage_libcdep.cc
@@ -47,6 +47,8 @@
#include "sanitizer_symbolizer.h"
#include "sanitizer_flags.h"
+using namespace __sanitizer;
+
static const u64 kMagic64 = 0xC0BFFFFFFFFFFF64ULL;
static const u64 kMagic32 = 0xC0BFFFFFFFFFFF32ULL;
static const uptr kNumWordsForMagic = SANITIZER_WORDSIZE == 64 ? 1 : 2;
@@ -110,7 +112,6 @@ class CoverageData {
uptr *data();
uptr size() const;
- uptr *buffer() const { return pc_buffer; }
private:
struct NamedPcRange {
@@ -125,9 +126,8 @@ class CoverageData {
// Maximal size pc array may ever grow.
// We MmapNoReserve this space to ensure that the array is contiguous.
- static const uptr kPcArrayMaxSize = FIRST_32_SECOND_64(
- 1 << (SANITIZER_ANDROID ? 24 : (SANITIZER_WINDOWS ? 27 : 26)),
- 1 << 27);
+ static const uptr kPcArrayMaxSize =
+ FIRST_32_SECOND_64(1 << (SANITIZER_ANDROID ? 24 : 26), 1 << 27);
// The amount file mapping for the pc array is grown by.
static const uptr kPcArrayMmapSize = 64 * 1024;
@@ -143,8 +143,6 @@ class CoverageData {
// Descriptor of the file mapped pc array.
fd_t pc_fd;
- uptr *pc_buffer;
-
// Vector of coverage guard arrays, protected by mu.
InternalMmapVectorNoCtor<s32*> guard_array_vec;
@@ -216,11 +214,6 @@ void CoverageData::Enable() {
atomic_store(&pc_array_size, kPcArrayMaxSize, memory_order_relaxed);
}
- pc_buffer = nullptr;
- if (common_flags()->coverage_pc_buffer)
- pc_buffer = reinterpret_cast<uptr *>(MmapNoReserveOrDie(
- sizeof(uptr) * kPcArrayMaxSize, "CovInit::pc_buffer"));
-
cc_array = reinterpret_cast<uptr **>(MmapNoReserveOrDie(
sizeof(uptr *) * kCcArrayMaxSize, "CovInit::cc_array"));
atomic_store(&cc_array_size, kCcArrayMaxSize, memory_order_relaxed);
@@ -258,10 +251,6 @@ void CoverageData::Disable() {
UnmapOrDie(cc_array, sizeof(uptr *) * kCcArrayMaxSize);
cc_array = nullptr;
}
- if (pc_buffer) {
- UnmapOrDie(pc_buffer, sizeof(uptr) * kPcArrayMaxSize);
- pc_buffer = nullptr;
- }
if (tr_event_array) {
UnmapOrDie(tr_event_array,
sizeof(tr_event_array[0]) * kTrEventArrayMaxSize +
@@ -430,7 +419,6 @@ void CoverageData::Add(uptr pc, u32 *guard) {
atomic_load(&pc_array_size, memory_order_acquire));
uptr counter = atomic_fetch_add(&coverage_counter, 1, memory_order_relaxed);
pc_array[idx] = BundlePcAndCounter(pc, counter);
- if (pc_buffer) pc_buffer[counter] = pc;
}
// Registers a pair caller=>callee.
@@ -966,6 +954,7 @@ SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov_init() {
}
SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov_dump() {
coverage_data.DumpAll();
+ __sanitizer_dump_trace_pc_guard_coverage();
}
SANITIZER_INTERFACE_ATTRIBUTE void
__sanitizer_cov_module_init(s32 *guards, uptr npcs, u8 *counters,
@@ -1019,12 +1008,6 @@ uptr __sanitizer_get_coverage_guards(uptr **data) {
}
SANITIZER_INTERFACE_ATTRIBUTE
-uptr __sanitizer_get_coverage_pc_buffer(uptr **data) {
- *data = coverage_data.buffer();
- return __sanitizer_get_total_unique_coverage();
-}
-
-SANITIZER_INTERFACE_ATTRIBUTE
uptr __sanitizer_get_number_of_counters() {
return coverage_data.GetNumberOf8bitCounters();
}
@@ -1034,8 +1017,26 @@ uptr __sanitizer_update_counter_bitset_and_clear_counters(u8 *bitset) {
return coverage_data.Update8bitCounterBitsetAndClearCounters(bitset);
}
// Default empty implementations (weak). Users should redefine them.
+#if !SANITIZER_WINDOWS // weak does not work on Windows.
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
void __sanitizer_cov_trace_cmp() {}
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+void __sanitizer_cov_trace_cmp1() {}
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+void __sanitizer_cov_trace_cmp2() {}
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+void __sanitizer_cov_trace_cmp4() {}
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+void __sanitizer_cov_trace_cmp8() {}
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
void __sanitizer_cov_trace_switch() {}
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+void __sanitizer_cov_trace_div4() {}
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+void __sanitizer_cov_trace_div8() {}
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+void __sanitizer_cov_trace_gep() {}
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+void __sanitizer_cov_trace_pc_indir() {}
+#endif // !SANITIZER_WINDOWS
} // extern "C"
diff --git a/lib/sanitizer_common/sanitizer_coverage_libcdep_new.cc b/lib/sanitizer_common/sanitizer_coverage_libcdep_new.cc
new file mode 100644
index 000000000000..d83b77917bda
--- /dev/null
+++ b/lib/sanitizer_common/sanitizer_coverage_libcdep_new.cc
@@ -0,0 +1,165 @@
+//===-- sanitizer_coverage_libcdep_new.cc ---------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+// Sanitizer Coverage Controller for Trace PC Guard.
+
+#include "sanitizer_allocator_internal.h"
+#include "sanitizer_atomic.h"
+#include "sanitizer_common.h"
+#include "sanitizer_symbolizer.h"
+
+using namespace __sanitizer;
+
+using AddressRange = LoadedModule::AddressRange;
+
+namespace {
+
+static const u64 Magic64 = 0xC0BFFFFFFFFFFF64ULL;
+static const u64 Magic32 = 0xC0BFFFFFFFFFFF32ULL;
+static const u64 Magic = SANITIZER_WORDSIZE == 64 ? Magic64 : Magic32;
+
+static fd_t OpenFile(const char* path) {
+ error_t err;
+ fd_t fd = OpenFile(path, WrOnly, &err);
+ if (fd == kInvalidFd)
+ Report("SanitizerCoverage: failed to open %s for writing (reason: %d)\n",
+ path, err);
+ return fd;
+}
+
+static void GetCoverageFilename(char* path, const char* name,
+ const char* extension) {
+ CHECK(name);
+ internal_snprintf(path, kMaxPathLength, "%s/%s.%zd.%s",
+ common_flags()->coverage_dir, name, internal_getpid(),
+ extension);
+}
+
+static void WriteModuleCoverage(char* file_path, const char* module_name,
+ const uptr* pcs, uptr len) {
+ GetCoverageFilename(file_path, StripModuleName(module_name), "sancov");
+ fd_t fd = OpenFile(file_path);
+ WriteToFile(fd, &Magic, sizeof(Magic));
+ WriteToFile(fd, pcs, len * sizeof(*pcs));
+ CloseFile(fd);
+ Printf("SanitizerCoverage: %s %zd PCs written\n", file_path, len);
+}
+
+static void SanitizerDumpCoverage(const uptr* unsorted_pcs, uptr len) {
+ if (!len) return;
+
+ char* file_path = static_cast<char*>(InternalAlloc(kMaxPathLength));
+ char* module_name = static_cast<char*>(InternalAlloc(kMaxPathLength));
+ uptr* pcs = static_cast<uptr*>(InternalAlloc(len * sizeof(uptr)));
+
+ internal_memcpy(pcs, unsorted_pcs, len * sizeof(uptr));
+ SortArray(pcs, len);
+
+ bool module_found = false;
+ uptr last_base = 0;
+ uptr module_start_idx = 0;
+
+ for (uptr i = 0; i < len; ++i) {
+ const uptr pc = pcs[i];
+ if (!pc) continue;
+
+ if (!__sanitizer_get_module_and_offset_for_pc(pc, nullptr, 0, &pcs[i])) {
+ Printf("ERROR: bad pc %x\n", pc);
+ continue;
+ }
+ uptr module_base = pc - pcs[i];
+
+ if (module_base != last_base || !module_found) {
+ if (module_found) {
+ WriteModuleCoverage(file_path, module_name, &pcs[module_start_idx],
+ i - module_start_idx);
+ }
+
+ last_base = module_base;
+ module_start_idx = i;
+ module_found = true;
+ __sanitizer_get_module_and_offset_for_pc(pc, module_name, kMaxPathLength,
+ &pcs[i]);
+ }
+ }
+
+ if (module_found) {
+ WriteModuleCoverage(file_path, module_name, &pcs[module_start_idx],
+ len - module_start_idx);
+ }
+
+ InternalFree(file_path);
+ InternalFree(module_name);
+ InternalFree(pcs);
+}
+
+// Collects trace-pc guard coverage.
+// This class relies on zero-initialization.
+class TracePcGuardController {
+ public:
+ void Initialize() {
+ CHECK(!initialized);
+
+ initialized = true;
+ pc_vector.Initialize(0);
+ }
+
+ void InitTracePcGuard(u32* start, u32* end) {
+ if (!initialized) Initialize();
+ CHECK(!*start);
+ CHECK_NE(start, end);
+
+ u32 i = pc_vector.size();
+ for (u32* p = start; p < end; p++) *p = ++i;
+ pc_vector.resize(i);
+ }
+
+ void TracePcGuard(u32* guard, uptr pc) {
+ atomic_uint32_t* guard_ptr = reinterpret_cast<atomic_uint32_t*>(guard);
+ u32 idx = atomic_exchange(guard_ptr, 0, memory_order_relaxed);
+ if (!idx) return;
+ // we start indices from 1.
+ pc_vector[idx - 1] = pc;
+ }
+
+ void Dump() {
+ if (!initialized || !common_flags()->coverage) return;
+ __sanitizer_dump_coverage(pc_vector.data(), pc_vector.size());
+ }
+
+ private:
+ bool initialized;
+ InternalMmapVectorNoCtor<uptr> pc_vector;
+};
+
+static TracePcGuardController pc_guard_controller;
+
+} // namespace
+
+extern "C" {
+SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_dump_coverage( // NOLINT
+ const uptr* pcs, uptr len) {
+ return SanitizerDumpCoverage(pcs, len);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
+__sanitizer_cov_trace_pc_guard(u32* guard) {
+ if (!*guard) return;
+ pc_guard_controller.TracePcGuard(guard, GET_CALLER_PC() - 1);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
+__sanitizer_cov_trace_pc_guard_init(u32* start, u32* end) {
+ if (start == end || *start) return;
+ pc_guard_controller.InitTracePcGuard(start, end);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_dump_trace_pc_guard_coverage() {
+ pc_guard_controller.Dump();
+}
+} // extern "C"
diff --git a/lib/sanitizer_common/sanitizer_dbghelp.h b/lib/sanitizer_common/sanitizer_dbghelp.h
new file mode 100644
index 000000000000..1689edbf92db
--- /dev/null
+++ b/lib/sanitizer_common/sanitizer_dbghelp.h
@@ -0,0 +1,42 @@
+//===-- sanitizer_dbghelp.h ------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Wrappers for lazy loaded dbghelp.dll. Provides function pointers and a
+// callback to initialize them.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_SYMBOLIZER_WIN_H
+#define SANITIZER_SYMBOLIZER_WIN_H
+
+#if !SANITIZER_WINDOWS
+#error "sanitizer_dbghelp.h is a Windows-only header"
+#endif
+
+#define WIN32_LEAN_AND_MEAN
+#include <windows.h>
+#include <dbghelp.h>
+
+namespace __sanitizer {
+
+extern decltype(::StackWalk64) *StackWalk64;
+extern decltype(::SymCleanup) *SymCleanup;
+extern decltype(::SymFromAddr) *SymFromAddr;
+extern decltype(::SymFunctionTableAccess64) *SymFunctionTableAccess64;
+extern decltype(::SymGetLineFromAddr64) *SymGetLineFromAddr64;
+extern decltype(::SymGetModuleBase64) *SymGetModuleBase64;
+extern decltype(::SymGetSearchPathW) *SymGetSearchPathW;
+extern decltype(::SymInitialize) *SymInitialize;
+extern decltype(::SymSetOptions) *SymSetOptions;
+extern decltype(::SymSetSearchPathW) *SymSetSearchPathW;
+extern decltype(::UnDecorateSymbolName) *UnDecorateSymbolName;
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_SYMBOLIZER_WIN_H
diff --git a/lib/sanitizer_common/sanitizer_flags.cc b/lib/sanitizer_common/sanitizer_flags.cc
index c2f19d425bdb..913ce3cb423e 100644
--- a/lib/sanitizer_common/sanitizer_flags.cc
+++ b/lib/sanitizer_common/sanitizer_flags.cc
@@ -30,11 +30,6 @@ struct FlagDescription {
IntrusiveList<FlagDescription> flag_descriptions;
-// If set, the tool will install its own SEGV signal handler by default.
-#ifndef SANITIZER_NEEDS_SEGV
-# define SANITIZER_NEEDS_SEGV 1
-#endif
-
void CommonFlags::SetDefaults() {
#define COMMON_FLAG(Type, Name, DefaultValue, Description) Name = DefaultValue;
#include "sanitizer_flags.inc"
diff --git a/lib/sanitizer_common/sanitizer_flags.inc b/lib/sanitizer_common/sanitizer_flags.inc
index 203f41ca3d2a..43900f87b330 100644
--- a/lib/sanitizer_common/sanitizer_flags.inc
+++ b/lib/sanitizer_common/sanitizer_flags.inc
@@ -75,7 +75,7 @@ COMMON_FLAG(bool, print_summary, true,
"If false, disable printing error summaries in addition to error "
"reports.")
COMMON_FLAG(bool, check_printf, true, "Check printf arguments.")
-COMMON_FLAG(bool, handle_segv, SANITIZER_NEEDS_SEGV,
+COMMON_FLAG(bool, handle_segv, true,
"If set, registers the tool's custom SIGSEGV/SIGBUS handler.")
COMMON_FLAG(bool, handle_abort, false,
"If set, registers the tool's custom SIGABRT handler.")
@@ -118,6 +118,12 @@ COMMON_FLAG(uptr, soft_rss_limit_mb, 0,
" until the RSS goes below the soft limit."
" This limit does not affect memory allocations other than"
" malloc/new.")
+COMMON_FLAG(bool, heap_profile, false, "Experimental heap profiler, asan-only")
+COMMON_FLAG(s32, allocator_release_to_os_interval_ms, kReleaseToOSIntervalNever,
+ "Experimental. Only affects a 64-bit allocator. If set, tries to "
+ "release unused memory to the OS, but not more often than this "
+ "interval (in milliseconds). Negative values mean do not attempt "
+ "to release memory to the OS.\n")
COMMON_FLAG(bool, can_use_proc_maps_statm, true,
"If false, do not attempt to read /proc/maps/statm."
" Mostly useful for testing sanitizers.")
@@ -144,19 +150,16 @@ COMMON_FLAG(bool, coverage_direct, SANITIZER_ANDROID,
COMMON_FLAG(const char *, coverage_dir, ".",
"Target directory for coverage dumps. Defaults to the current "
"directory.")
-COMMON_FLAG(bool, coverage_pc_buffer, true,
- "If set (and if 'coverage' is set too), the pcs would be collected "
- "in a buffer.")
COMMON_FLAG(bool, full_address_space, false,
"Sanitize complete address space; "
"by default kernel area on 32-bit platforms will not be sanitized")
COMMON_FLAG(bool, print_suppressions, true,
"Print matched suppressions at exit.")
COMMON_FLAG(
- bool, disable_coredump, (SANITIZER_WORDSIZE == 64),
- "Disable core dumping. By default, disable_core=1 on 64-bit to avoid "
- "dumping a 16T+ core file. Ignored on OSes that don't dump core by"
- "default and for sanitizers that don't reserve lots of virtual memory.")
+ bool, disable_coredump, (SANITIZER_WORDSIZE == 64) && !SANITIZER_GO,
+ "Disable core dumping. By default, disable_coredump=1 on 64-bit to avoid"
+ " dumping a 16T+ core file. Ignored on OSes that don't dump core by"
+ " default and for sanitizers that don't reserve lots of virtual memory.")
COMMON_FLAG(bool, use_madv_dontdump, true,
"If set, instructs kernel to not store the (huge) shadow "
"in core file.")
@@ -216,7 +219,7 @@ COMMON_FLAG(bool, decorate_proc_maps, false, "If set, decorate sanitizer "
COMMON_FLAG(int, exitcode, 1, "Override the program exit status if the tool "
"found an error")
COMMON_FLAG(
- bool, abort_on_error, SANITIZER_MAC,
+ bool, abort_on_error, SANITIZER_ANDROID || SANITIZER_MAC,
"If set, the tool calls abort() instead of _exit() after printing the "
"error report.")
COMMON_FLAG(bool, suppress_equal_pcs, true,
diff --git a/lib/sanitizer_common/sanitizer_interface_internal.h b/lib/sanitizer_common/sanitizer_interface_internal.h
index 7f43c84c2e7d..174d5e92ba44 100644
--- a/lib/sanitizer_common/sanitizer_interface_internal.h
+++ b/lib/sanitizer_common/sanitizer_interface_internal.h
@@ -46,8 +46,12 @@ extern "C" {
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
void __sanitizer_report_error_summary(const char *error_summary);
- SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov_dump();
SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov_init();
+ SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov_dump();
+ SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_dump_coverage(
+ const __sanitizer::uptr *pcs, const __sanitizer::uptr len);
+ SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_dump_trace_pc_guard_coverage();
+
SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov(__sanitizer::u32 *guard);
SANITIZER_INTERFACE_ATTRIBUTE
void __sanitizer_annotate_contiguous_container(const void *beg,
@@ -60,6 +64,11 @@ extern "C" {
SANITIZER_INTERFACE_ATTRIBUTE
const void *__sanitizer_contiguous_container_find_bad_address(
const void *beg, const void *mid, const void *end);
+
+ SANITIZER_INTERFACE_ATTRIBUTE
+ int __sanitizer_get_module_and_offset_for_pc(
+ __sanitizer::uptr pc, char *module_path,
+ __sanitizer::uptr module_path_len, __sanitizer::uptr *pc_offset);
} // extern "C"
#endif // SANITIZER_INTERFACE_INTERNAL_H
diff --git a/lib/sanitizer_common/sanitizer_internal_defs.h b/lib/sanitizer_common/sanitizer_internal_defs.h
index 720672d2908a..02a1e527312e 100644
--- a/lib/sanitizer_common/sanitizer_internal_defs.h
+++ b/lib/sanitizer_common/sanitizer_internal_defs.h
@@ -24,7 +24,7 @@
# define SANITIZER_INTERFACE_ATTRIBUTE __declspec(dllexport)
// FIXME find out what we need on Windows, if anything.
# define SANITIZER_WEAK_ATTRIBUTE
-#elif defined(SANITIZER_GO)
+#elif SANITIZER_GO
# define SANITIZER_INTERFACE_ATTRIBUTE
# define SANITIZER_WEAK_ATTRIBUTE
#else
@@ -32,7 +32,7 @@</