aboutsummaryrefslogtreecommitdiff
path: root/contrib/llvm-project/compiler-rt/lib
diff options
context:
space:
mode:
Diffstat (limited to 'contrib/llvm-project/compiler-rt/lib')
-rw-r--r--contrib/llvm-project/compiler-rt/lib/asan/asan_allocator.cpp479
-rw-r--r--contrib/llvm-project/compiler-rt/lib/asan/asan_allocator.h11
-rw-r--r--contrib/llvm-project/compiler-rt/lib/asan/asan_flags.cpp10
-rw-r--r--contrib/llvm-project/compiler-rt/lib/asan/asan_fuchsia.cpp35
-rw-r--r--contrib/llvm-project/compiler-rt/lib/asan/asan_interceptors.cpp41
-rw-r--r--contrib/llvm-project/compiler-rt/lib/asan/asan_interceptors.h22
-rw-r--r--contrib/llvm-project/compiler-rt/lib/asan/asan_interceptors_vfork.S3
-rw-r--r--contrib/llvm-project/compiler-rt/lib/asan/asan_interface_internal.h4
-rw-r--r--contrib/llvm-project/compiler-rt/lib/asan/asan_internal.h2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/asan/asan_linux.cpp45
-rw-r--r--contrib/llvm-project/compiler-rt/lib/asan/asan_mac.cpp48
-rw-r--r--contrib/llvm-project/compiler-rt/lib/asan/asan_malloc_linux.cpp10
-rw-r--r--contrib/llvm-project/compiler-rt/lib/asan/asan_malloc_local.h2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/asan/asan_mapping.h21
-rw-r--r--contrib/llvm-project/compiler-rt/lib/asan/asan_poisoning.cpp6
-rw-r--r--contrib/llvm-project/compiler-rt/lib/asan/asan_premap_shadow.cpp18
-rw-r--r--contrib/llvm-project/compiler-rt/lib/asan/asan_report.cpp5
-rw-r--r--contrib/llvm-project/compiler-rt/lib/asan/asan_rtems.cpp6
-rw-r--r--contrib/llvm-project/compiler-rt/lib/asan/asan_rtl.cpp5
-rw-r--r--contrib/llvm-project/compiler-rt/lib/asan/asan_shadow_setup.cpp45
-rw-r--r--contrib/llvm-project/compiler-rt/lib/asan/asan_stack.h5
-rw-r--r--contrib/llvm-project/compiler-rt/lib/asan/asan_thread.cpp27
-rw-r--r--contrib/llvm-project/compiler-rt/lib/asan/asan_thread.h7
-rw-r--r--contrib/llvm-project/compiler-rt/lib/asan/asan_win.cpp19
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/README.txt2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/aarch64/lse.S236
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/assembly.h93
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/atomic.c93
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/clear_cache.c10
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/cpu_model.c63
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/divdf3.c189
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/divdi3.c15
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/divmoddi4.c13
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/divmodsi4.c13
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/divmodti4.c32
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/divsf3.c174
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/divsi3.c25
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/divtf3.c203
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/divti3.c15
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/emutls.c7
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/extendhfsf2.c6
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/extendhftf2.c23
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/fp_div_impl.inc419
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/fp_extend.h4
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/fp_lib.h7
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/fp_trunc.h4
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/int_div_impl.inc25
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/int_mulo_impl.inc49
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/int_mulv_impl.inc47
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/int_util.h16
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/moddi3.c16
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/modti3.c16
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/mulodi4.c33
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/mulosi4.c33
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/muloti4.c33
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/mulvdi3.c33
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/mulvsi3.c33
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/mulvti3.c33
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/os_version_check.c108
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/paritydi2.c6
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/parityti2.c8
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/riscv/int_mul_impl.inc6
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/truncdfhf2.c4
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/truncsfhf2.c6
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/trunctfhf2.c23
-rw-r--r--contrib/llvm-project/compiler-rt/lib/cfi/cfi.cpp2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/crt/crtbegin.c8
-rw-r--r--contrib/llvm-project/compiler-rt/lib/dfsan/dfsan.cpp100
-rw-r--r--contrib/llvm-project/compiler-rt/lib/dfsan/dfsan_custom.cpp201
-rw-r--r--contrib/llvm-project/compiler-rt/lib/dfsan/dfsan_flags.inc4
-rw-r--r--contrib/llvm-project/compiler-rt/lib/dfsan/dfsan_interceptors.cpp41
-rw-r--r--contrib/llvm-project/compiler-rt/lib/dfsan/done_abilist.txt85
-rw-r--r--contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerCorpus.h62
-rw-r--r--contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerDataFlowTrace.cpp2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerDriver.cpp91
-rw-r--r--contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerExtFunctionsWeak.cpp2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerExtraCounters.cpp2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerFlags.def37
-rw-r--r--contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerFork.cpp14
-rw-r--r--contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerIO.cpp45
-rw-r--r--contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerIO.h6
-rw-r--r--contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerIOPosix.cpp8
-rw-r--r--contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerIOWindows.cpp14
-rw-r--r--contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerInterceptors.cpp253
-rw-r--r--contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerInternal.h4
-rw-r--r--contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerLoop.cpp69
-rw-r--r--contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerMutate.cpp40
-rw-r--r--contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerMutate.h14
-rw-r--r--contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerOptions.h9
-rw-r--r--contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerPlatform.h18
-rw-r--r--contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerTracePC.cpp34
-rw-r--r--contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerTracePC.h6
-rw-r--r--contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerUtil.h6
-rw-r--r--contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerUtilFuchsia.cpp19
-rw-r--r--contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerUtilLinux.cpp2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerUtilPosix.cpp4
-rw-r--r--contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerUtilWindows.cpp14
-rw-r--r--contrib/llvm-project/compiler-rt/lib/gwp_asan/common.cpp3
-rw-r--r--contrib/llvm-project/compiler-rt/lib/gwp_asan/crash_handler.cpp24
-rw-r--r--contrib/llvm-project/compiler-rt/lib/gwp_asan/crash_handler.h2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/gwp_asan/definitions.h2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/gwp_asan/guarded_pool_allocator.cpp114
-rw-r--r--contrib/llvm-project/compiler-rt/lib/gwp_asan/guarded_pool_allocator.h78
-rw-r--r--contrib/llvm-project/compiler-rt/lib/gwp_asan/mutex.h14
-rw-r--r--contrib/llvm-project/compiler-rt/lib/gwp_asan/optional/backtrace.h48
-rw-r--r--contrib/llvm-project/compiler-rt/lib/gwp_asan/optional/backtrace_fuchsia.cpp21
-rw-r--r--contrib/llvm-project/compiler-rt/lib/gwp_asan/optional/backtrace_linux_libc.cpp25
-rw-r--r--contrib/llvm-project/compiler-rt/lib/gwp_asan/optional/backtrace_sanitizer_common.cpp49
-rw-r--r--contrib/llvm-project/compiler-rt/lib/gwp_asan/optional/options_parser.cpp249
-rw-r--r--contrib/llvm-project/compiler-rt/lib/gwp_asan/optional/options_parser.h12
-rw-r--r--contrib/llvm-project/compiler-rt/lib/gwp_asan/optional/printf.h33
-rw-r--r--contrib/llvm-project/compiler-rt/lib/gwp_asan/optional/segv_handler.h70
-rw-r--r--contrib/llvm-project/compiler-rt/lib/gwp_asan/optional/segv_handler_fuchsia.cpp22
-rw-r--r--contrib/llvm-project/compiler-rt/lib/gwp_asan/optional/segv_handler_posix.cpp182
-rw-r--r--contrib/llvm-project/compiler-rt/lib/gwp_asan/options.inc30
-rw-r--r--contrib/llvm-project/compiler-rt/lib/gwp_asan/platform_specific/common_fuchsia.cpp15
-rw-r--r--contrib/llvm-project/compiler-rt/lib/gwp_asan/platform_specific/common_posix.cpp6
-rw-r--r--contrib/llvm-project/compiler-rt/lib/gwp_asan/platform_specific/guarded_pool_allocator_fuchsia.cpp103
-rw-r--r--contrib/llvm-project/compiler-rt/lib/gwp_asan/platform_specific/guarded_pool_allocator_fuchsia.h22
-rw-r--r--contrib/llvm-project/compiler-rt/lib/gwp_asan/platform_specific/guarded_pool_allocator_posix.cpp64
-rw-r--r--contrib/llvm-project/compiler-rt/lib/gwp_asan/platform_specific/guarded_pool_allocator_posix.h18
-rw-r--r--contrib/llvm-project/compiler-rt/lib/gwp_asan/platform_specific/guarded_pool_allocator_tls.h55
-rw-r--r--contrib/llvm-project/compiler-rt/lib/gwp_asan/platform_specific/mutex_fuchsia.cpp21
-rw-r--r--contrib/llvm-project/compiler-rt/lib/gwp_asan/platform_specific/mutex_fuchsia.h23
-rw-r--r--contrib/llvm-project/compiler-rt/lib/gwp_asan/platform_specific/mutex_posix.h23
-rw-r--r--contrib/llvm-project/compiler-rt/lib/gwp_asan/platform_specific/utilities_fuchsia.cpp19
-rw-r--r--contrib/llvm-project/compiler-rt/lib/gwp_asan/platform_specific/utilities_posix.cpp65
-rw-r--r--contrib/llvm-project/compiler-rt/lib/gwp_asan/random.cpp31
-rw-r--r--contrib/llvm-project/compiler-rt/lib/gwp_asan/random.h23
-rw-r--r--contrib/llvm-project/compiler-rt/lib/gwp_asan/utilities.cpp63
-rw-r--r--contrib/llvm-project/compiler-rt/lib/gwp_asan/utilities.h18
-rw-r--r--contrib/llvm-project/compiler-rt/lib/hwasan/hwasan.cpp4
-rw-r--r--contrib/llvm-project/compiler-rt/lib/hwasan/hwasan.h9
-rw-r--r--contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_allocator.cpp20
-rw-r--r--contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_allocator.h14
-rw-r--r--contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_dynamic_shadow.cpp50
-rw-r--r--contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_interceptors_vfork.S1
-rw-r--r--contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_interface_internal.h3
-rw-r--r--contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_linux.cpp74
-rw-r--r--contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_malloc_bisect.h2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_mapping.h9
-rw-r--r--contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_new_delete.cpp39
-rw-r--r--contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_report.cpp25
-rw-r--r--contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_thread.h2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_thread_list.h63
-rw-r--r--contrib/llvm-project/compiler-rt/lib/interception/interception.h4
-rw-r--r--contrib/llvm-project/compiler-rt/lib/interception/interception_linux.cpp10
-rw-r--r--contrib/llvm-project/compiler-rt/lib/interception/interception_linux.h10
-rw-r--r--contrib/llvm-project/compiler-rt/lib/interception/interception_win.cpp6
-rw-r--r--contrib/llvm-project/compiler-rt/lib/lsan/lsan.cpp4
-rw-r--r--contrib/llvm-project/compiler-rt/lib/lsan/lsan_allocator.cpp10
-rw-r--r--contrib/llvm-project/compiler-rt/lib/lsan/lsan_common.cpp298
-rw-r--r--contrib/llvm-project/compiler-rt/lib/lsan/lsan_common.h20
-rw-r--r--contrib/llvm-project/compiler-rt/lib/lsan/lsan_common_fuchsia.cpp7
-rw-r--r--contrib/llvm-project/compiler-rt/lib/lsan/lsan_common_linux.cpp5
-rw-r--r--contrib/llvm-project/compiler-rt/lib/lsan/lsan_fuchsia.h2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/lsan/lsan_interceptors.cpp17
-rw-r--r--contrib/llvm-project/compiler-rt/lib/lsan/lsan_posix.h2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/lsan/lsan_thread.cpp5
-rw-r--r--contrib/llvm-project/compiler-rt/lib/lsan/lsan_thread.h2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/memprof/README.txt17
-rw-r--r--contrib/llvm-project/compiler-rt/lib/memprof/memprof.syms.extra1
-rw-r--r--contrib/llvm-project/compiler-rt/lib/memprof/memprof_allocator.cpp905
-rw-r--r--contrib/llvm-project/compiler-rt/lib/memprof/memprof_allocator.h105
-rw-r--r--contrib/llvm-project/compiler-rt/lib/memprof/memprof_descriptions.cpp70
-rw-r--r--contrib/llvm-project/compiler-rt/lib/memprof/memprof_descriptions.h45
-rw-r--r--contrib/llvm-project/compiler-rt/lib/memprof/memprof_flags.cpp93
-rw-r--r--contrib/llvm-project/compiler-rt/lib/memprof/memprof_flags.h45
-rw-r--r--contrib/llvm-project/compiler-rt/lib/memprof/memprof_flags.inc49
-rw-r--r--contrib/llvm-project/compiler-rt/lib/memprof/memprof_init_version.h26
-rw-r--r--contrib/llvm-project/compiler-rt/lib/memprof/memprof_interceptors.cpp366
-rw-r--r--contrib/llvm-project/compiler-rt/lib/memprof/memprof_interceptors.h54
-rw-r--r--contrib/llvm-project/compiler-rt/lib/memprof/memprof_interceptors_memintrinsics.cpp29
-rw-r--r--contrib/llvm-project/compiler-rt/lib/memprof/memprof_interceptors_memintrinsics.h79
-rw-r--r--contrib/llvm-project/compiler-rt/lib/memprof/memprof_interface_internal.h64
-rw-r--r--contrib/llvm-project/compiler-rt/lib/memprof/memprof_internal.h104
-rw-r--r--contrib/llvm-project/compiler-rt/lib/memprof/memprof_linux.cpp80
-rw-r--r--contrib/llvm-project/compiler-rt/lib/memprof/memprof_malloc_linux.cpp226
-rw-r--r--contrib/llvm-project/compiler-rt/lib/memprof/memprof_mapping.h113
-rw-r--r--contrib/llvm-project/compiler-rt/lib/memprof/memprof_new_delete.cpp145
-rw-r--r--contrib/llvm-project/compiler-rt/lib/memprof/memprof_posix.cpp55
-rw-r--r--contrib/llvm-project/compiler-rt/lib/memprof/memprof_preinit.cpp23
-rw-r--r--contrib/llvm-project/compiler-rt/lib/memprof/memprof_rtl.cpp321
-rw-r--r--contrib/llvm-project/compiler-rt/lib/memprof/memprof_shadow_setup.cpp62
-rw-r--r--contrib/llvm-project/compiler-rt/lib/memprof/memprof_stack.cpp59
-rw-r--r--contrib/llvm-project/compiler-rt/lib/memprof/memprof_stack.h75
-rw-r--r--contrib/llvm-project/compiler-rt/lib/memprof/memprof_stats.cpp157
-rw-r--r--contrib/llvm-project/compiler-rt/lib/memprof/memprof_stats.h61
-rw-r--r--contrib/llvm-project/compiler-rt/lib/memprof/memprof_thread.cpp220
-rw-r--r--contrib/llvm-project/compiler-rt/lib/memprof/memprof_thread.h138
-rw-r--r--contrib/llvm-project/compiler-rt/lib/memprof/weak_symbols.txt1
-rw-r--r--contrib/llvm-project/compiler-rt/lib/msan/msan.cpp51
-rw-r--r--contrib/llvm-project/compiler-rt/lib/msan/msan_interceptors.cpp10
-rw-r--r--contrib/llvm-project/compiler-rt/lib/msan/msan_interface_internal.h10
-rw-r--r--contrib/llvm-project/compiler-rt/lib/msan/msan_linux.cpp3
-rw-r--r--contrib/llvm-project/compiler-rt/lib/msan/msan_poisoning.cpp81
-rw-r--r--contrib/llvm-project/compiler-rt/lib/msan/msan_thread.cpp58
-rw-r--r--contrib/llvm-project/compiler-rt/lib/msan/msan_thread.h24
-rw-r--r--contrib/llvm-project/compiler-rt/lib/profile/GCDAProfiling.c75
-rw-r--r--contrib/llvm-project/compiler-rt/lib/profile/InstrProfiling.c6
-rw-r--r--contrib/llvm-project/compiler-rt/lib/profile/InstrProfiling.h9
-rw-r--r--contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingBuffer.c24
-rw-r--r--contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingFile.c35
-rw-r--r--contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingInternal.c3
-rw-r--r--contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingPlatformDarwin.c3
-rw-r--r--contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingPlatformLinux.c2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingPort.h10
-rw-r--r--contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingValue.c39
-rw-r--r--contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingVersionVar.c17
-rw-r--r--contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingWriter.c3
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_allocator.cpp39
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_allocator.h6
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_allocator_checks.h10
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_allocator_primary32.h1
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_allocator_primary64.h3
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_allocator_report.cpp8
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_allocator_report.h1
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_allocator_secondary.h8
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_atomic.h4
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_atomic_clang.h14
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_atomic_clang_mips.h10
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_atomic_clang_other.h24
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_atomic_clang_x86.h6
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_atomic_msvc.h36
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common.h119
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors.inc199
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors_format.inc6
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors_ioctl.inc15
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors_vfork_riscv64.inc.S56
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common_interface.inc1
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common_libcdep.cpp53
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common_nolibc.cpp4
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common_syscalls.inc14
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_deadlock_detector1.cpp2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_deadlock_detector2.cpp2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_deadlock_detector_interface.h6
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_errno.h2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_errno_codes.h1
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_file.cpp37
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_file.h1
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_flag_parser.h2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_flags.cpp9
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_flags.h4
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_flags.inc19
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_fuchsia.cpp21
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_getauxval.h5
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_interface_internal.h4
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_internal_defs.h21
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_libignore.cpp2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_linux.cpp229
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_linux.h11
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_linux_libcdep.cpp217
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_mac.cpp213
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_mac.h3
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_netbsd.cpp5
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_openbsd.cpp115
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform.h37
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_interceptors.h347
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_freebsd.cpp2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_netbsd.cpp155
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_netbsd.h6
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_openbsd.cpp279
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_openbsd.h382
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_posix.cpp118
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_posix.h18
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_solaris.cpp3
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_posix.cpp12
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_posix.h2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_posix_libcdep.cpp29
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_procmaps.h2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_procmaps_bsd.cpp31
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_procmaps_common.cpp2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_procmaps_solaris.cpp7
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_ptrauth.h2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_rtems.cpp2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_signal_interceptors.inc5
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_solaris.cpp18
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stackdepot.cpp9
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stackdepot.h1
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stackdepotbase.h20
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stacktrace.cpp37
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stacktrace.h25
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stacktrace_libcdep.cpp21
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stacktrace_printer.cpp92
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stacktrace_printer.h4
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stoptheworld.h13
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stoptheworld_fuchsia.cpp3
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stoptheworld_fuchsia.h20
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stoptheworld_linux_libcdep.cpp106
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stoptheworld_mac.cpp20
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stoptheworld_netbsd_libcdep.cpp16
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_internal.h7
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_libbacktrace.h2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_libcdep.cpp9
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_mac.cpp21
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_mac.h4
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_markup.cpp7
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_posix_libcdep.cpp8
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_report.cpp9
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_win.cpp11
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_syscall_generic.inc2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_syscall_linux_riscv64.inc174
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_syscalls_netbsd.inc119
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_thread_registry.h5
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_tls_get_addr.cpp86
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_tls_get_addr.h21
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_unwind_win.cpp11
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_win.cpp46
-rwxr-xr-xcontrib/llvm-project/compiler-rt/lib/sanitizer_common/symbolizer/scripts/build_symbolizer.sh2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/symbolizer/scripts/global_symbols.txt2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/scudo_allocator.cpp35
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/scudo_crc32.h2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/scudo_tsd.h8
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/scudo_utils.cpp2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/scudo_utils.h2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/allocator_config.h84
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/atomic_helpers.h22
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/chunk.h3
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/combined.h277
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/common.h10
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/flags.cpp15
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/fuzz/get_error_info_fuzzer.cpp12
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/include/scudo/interface.h53
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/internal_defs.h1
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/linux.cpp11
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/linux.h45
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/local_cache.h1
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/memtag.h137
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/options.h74
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/primary32.h229
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/primary64.h175
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/release.cpp2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/release.h162
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/secondary.h128
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/stack_depot.h2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/string_utils.cpp20
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/tsd_exclusive.h37
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/tsd_shared.h164
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/wrappers_c.h8
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/wrappers_c.inc37
-rw-r--r--contrib/llvm-project/compiler-rt/lib/tsan/dd/dd_interceptors.cpp11
-rw-r--r--contrib/llvm-project/compiler-rt/lib/tsan/dd/dd_rtl.h2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_dispatch_defs.h11
-rw-r--r--contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_external.cpp11
-rw-r--r--contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_flags.cpp2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_interceptors.h2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_interceptors_libdispatch.cpp32
-rw-r--r--contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_interceptors_mac.cpp1
-rw-r--r--contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_interceptors_mach_vm.cpp19
-rw-r--r--contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp106
-rw-r--r--contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_interface.cpp13
-rw-r--r--contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_interface_inl.h23
-rw-r--r--contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_platform.h31
-rw-r--r--contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_platform_linux.cpp24
-rw-r--r--contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_platform_mac.cpp9
-rw-r--r--contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_platform_posix.cpp47
-rw-r--r--contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_report.cpp6
-rw-r--r--contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_rtl.cpp16
-rw-r--r--contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_rtl.h12
-rw-r--r--contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_rtl_mutex.cpp2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_rtl_report.cpp4
-rw-r--r--contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_sync.cpp2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/ubsan/ubsan_flags.cpp6
-rw-r--r--contrib/llvm-project/compiler-rt/lib/ubsan/ubsan_flags.h2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/ubsan/ubsan_platform.h2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/ubsan/ubsan_type_hash_itanium.cpp2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/ubsan/ubsan_type_hash_win.cpp2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/ubsan_minimal/ubsan_minimal_handlers.cpp2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/xray/xray_basic_logging.cpp2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/xray/xray_mips.cpp45
-rw-r--r--contrib/llvm-project/compiler-rt/lib/xray/xray_mips64.cpp64
-rw-r--r--contrib/llvm-project/compiler-rt/lib/xray/xray_x86_64.cpp13
-rw-r--r--contrib/llvm-project/compiler-rt/lib/xray/xray_x86_64.inc1
373 files changed, 11951 insertions, 5075 deletions
diff --git a/contrib/llvm-project/compiler-rt/lib/asan/asan_allocator.cpp b/contrib/llvm-project/compiler-rt/lib/asan/asan_allocator.cpp
index 126d26d0823b..cd97b37652f8 100644
--- a/contrib/llvm-project/compiler-rt/lib/asan/asan_allocator.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/asan/asan_allocator.cpp
@@ -15,20 +15,21 @@
//===----------------------------------------------------------------------===//
#include "asan_allocator.h"
+
#include "asan_mapping.h"
#include "asan_poisoning.h"
#include "asan_report.h"
#include "asan_stack.h"
#include "asan_thread.h"
+#include "lsan/lsan_common.h"
#include "sanitizer_common/sanitizer_allocator_checks.h"
#include "sanitizer_common/sanitizer_allocator_interface.h"
#include "sanitizer_common/sanitizer_errno.h"
#include "sanitizer_common/sanitizer_flags.h"
#include "sanitizer_common/sanitizer_internal_defs.h"
#include "sanitizer_common/sanitizer_list.h"
-#include "sanitizer_common/sanitizer_stackdepot.h"
#include "sanitizer_common/sanitizer_quarantine.h"
-#include "lsan/lsan_common.h"
+#include "sanitizer_common/sanitizer_stackdepot.h"
namespace __asan {
@@ -50,6 +51,22 @@ static u32 RZSize2Log(u32 rz_size) {
static AsanAllocator &get_allocator();
+static void AtomicContextStore(volatile atomic_uint64_t *atomic_context,
+ u32 tid, u32 stack) {
+ u64 context = tid;
+ context <<= 32;
+ context += stack;
+ atomic_store(atomic_context, context, memory_order_relaxed);
+}
+
+static void AtomicContextLoad(const volatile atomic_uint64_t *atomic_context,
+ u32 &tid, u32 &stack) {
+ u64 context = atomic_load(atomic_context, memory_order_relaxed);
+ stack = context;
+ context >>= 32;
+ tid = context;
+}
+
// The memory chunk allocated from the underlying allocator looks like this:
// L L L L L L H H U U U U U U R R
// L -- left redzone words (0 or more bytes)
@@ -67,32 +84,59 @@ static AsanAllocator &get_allocator();
// ---------------------|
// M -- magic value kAllocBegMagic
// B -- address of ChunkHeader pointing to the first 'H'
-static const uptr kAllocBegMagic = 0xCC6E96B9;
-
-struct ChunkHeader {
- // 1-st 8 bytes.
- u32 chunk_state : 8; // Must be first.
- u32 alloc_tid : 24;
-
- u32 free_tid : 24;
- u32 from_memalign : 1;
- u32 alloc_type : 2;
- u32 rz_log : 3;
- u32 lsan_tag : 2;
- // 2-nd 8 bytes
- // This field is used for small sizes. For large sizes it is equal to
- // SizeClassMap::kMaxSize and the actual size is stored in the
- // SecondaryAllocator's metadata.
- u32 user_requested_size : 29;
+
+class ChunkHeader {
+ public:
+ atomic_uint8_t chunk_state;
+ u8 alloc_type : 2;
+ u8 lsan_tag : 2;
+
// align < 8 -> 0
// else -> log2(min(align, 512)) - 2
- u32 user_requested_alignment_log : 3;
- u32 alloc_context_id;
+ u8 user_requested_alignment_log : 3;
+
+ private:
+ u16 user_requested_size_hi;
+ u32 user_requested_size_lo;
+ atomic_uint64_t alloc_context_id;
+
+ public:
+ uptr UsedSize() const {
+ uptr R = user_requested_size_lo;
+ if (sizeof(uptr) > sizeof(user_requested_size_lo))
+ R += (uptr)user_requested_size_hi << (8 * sizeof(user_requested_size_lo));
+ return R;
+ }
+
+ void SetUsedSize(uptr size) {
+ user_requested_size_lo = size;
+ if (sizeof(uptr) > sizeof(user_requested_size_lo)) {
+ size >>= (8 * sizeof(user_requested_size_lo));
+ user_requested_size_hi = size;
+ CHECK_EQ(user_requested_size_hi, size);
+ }
+ }
+
+ void SetAllocContext(u32 tid, u32 stack) {
+ AtomicContextStore(&alloc_context_id, tid, stack);
+ }
+
+ void GetAllocContext(u32 &tid, u32 &stack) const {
+ AtomicContextLoad(&alloc_context_id, tid, stack);
+ }
};
-struct ChunkBase : ChunkHeader {
- // Header2, intersects with user memory.
- u32 free_context_id;
+class ChunkBase : public ChunkHeader {
+ atomic_uint64_t free_context_id;
+
+ public:
+ void SetFreeContext(u32 tid, u32 stack) {
+ AtomicContextStore(&free_context_id, tid, stack);
+ }
+
+ void GetFreeContext(u32 &tid, u32 &stack) const {
+ AtomicContextLoad(&free_context_id, tid, stack);
+ }
};
static const uptr kChunkHeaderSize = sizeof(ChunkHeader);
@@ -100,35 +144,50 @@ static const uptr kChunkHeader2Size = sizeof(ChunkBase) - kChunkHeaderSize;
COMPILER_CHECK(kChunkHeaderSize == 16);
COMPILER_CHECK(kChunkHeader2Size <= 16);
-// Every chunk of memory allocated by this allocator can be in one of 3 states:
-// CHUNK_AVAILABLE: the chunk is in the free list and ready to be allocated.
-// CHUNK_ALLOCATED: the chunk is allocated and not yet freed.
-// CHUNK_QUARANTINE: the chunk was freed and put into quarantine zone.
enum {
- CHUNK_AVAILABLE = 0, // 0 is the default value even if we didn't set it.
- CHUNK_ALLOCATED = 2,
- CHUNK_QUARANTINE = 3
+ // Either just allocated by underlying allocator, but AsanChunk is not yet
+ // ready, or almost returned to undelying allocator and AsanChunk is already
+ // meaningless.
+ CHUNK_INVALID = 0,
+ // The chunk is allocated and not yet freed.
+ CHUNK_ALLOCATED = 2,
+ // The chunk was freed and put into quarantine zone.
+ CHUNK_QUARANTINE = 3,
};
-struct AsanChunk: ChunkBase {
+class AsanChunk : public ChunkBase {
+ public:
uptr Beg() { return reinterpret_cast<uptr>(this) + kChunkHeaderSize; }
- uptr UsedSize(bool locked_version = false) {
- if (user_requested_size != SizeClassMap::kMaxSize)
- return user_requested_size;
- return *reinterpret_cast<uptr *>(
- get_allocator().GetMetaData(AllocBeg(locked_version)));
+ bool AddrIsInside(uptr addr) {
+ return (addr >= Beg()) && (addr < Beg() + UsedSize());
}
- void *AllocBeg(bool locked_version = false) {
- if (from_memalign) {
- if (locked_version)
- return get_allocator().GetBlockBeginFastLocked(
- reinterpret_cast<void *>(this));
- return get_allocator().GetBlockBegin(reinterpret_cast<void *>(this));
- }
- return reinterpret_cast<void*>(Beg() - RZLog2Size(rz_log));
+};
+
+class LargeChunkHeader {
+ static constexpr uptr kAllocBegMagic =
+ FIRST_32_SECOND_64(0xCC6E96B9, 0xCC6E96B9CC6E96B9ULL);
+ atomic_uintptr_t magic;
+ AsanChunk *chunk_header;
+
+ public:
+ AsanChunk *Get() const {
+ return atomic_load(&magic, memory_order_acquire) == kAllocBegMagic
+ ? chunk_header
+ : nullptr;
}
- bool AddrIsInside(uptr addr, bool locked_version = false) {
- return (addr >= Beg()) && (addr < Beg() + UsedSize(locked_version));
+
+ void Set(AsanChunk *p) {
+ if (p) {
+ chunk_header = p;
+ atomic_store(&magic, kAllocBegMagic, memory_order_release);
+ return;
+ }
+
+ uptr old = kAllocBegMagic;
+ if (!atomic_compare_exchange_strong(&magic, &old, 0,
+ memory_order_release)) {
+ CHECK_EQ(old, kAllocBegMagic);
+ }
}
};
@@ -139,23 +198,23 @@ struct QuarantineCallback {
}
void Recycle(AsanChunk *m) {
- CHECK_EQ(m->chunk_state, CHUNK_QUARANTINE);
- atomic_store((atomic_uint8_t*)m, CHUNK_AVAILABLE, memory_order_relaxed);
- CHECK_NE(m->alloc_tid, kInvalidTid);
- CHECK_NE(m->free_tid, kInvalidTid);
- PoisonShadow(m->Beg(),
- RoundUpTo(m->UsedSize(), SHADOW_GRANULARITY),
- kAsanHeapLeftRedzoneMagic);
- void *p = reinterpret_cast<void *>(m->AllocBeg());
+ void *p = get_allocator().GetBlockBegin(m);
if (p != m) {
- uptr *alloc_magic = reinterpret_cast<uptr *>(p);
- CHECK_EQ(alloc_magic[0], kAllocBegMagic);
// Clear the magic value, as allocator internals may overwrite the
// contents of deallocated chunk, confusing GetAsanChunk lookup.
- alloc_magic[0] = 0;
- CHECK_EQ(alloc_magic[1], reinterpret_cast<uptr>(m));
+ reinterpret_cast<LargeChunkHeader *>(p)->Set(nullptr);
}
+ u8 old_chunk_state = CHUNK_QUARANTINE;
+ if (!atomic_compare_exchange_strong(&m->chunk_state, &old_chunk_state,
+ CHUNK_INVALID, memory_order_acquire)) {
+ CHECK_EQ(old_chunk_state, CHUNK_QUARANTINE);
+ }
+
+ PoisonShadow(m->Beg(),
+ RoundUpTo(m->UsedSize(), SHADOW_GRANULARITY),
+ kAsanHeapLeftRedzoneMagic);
+
// Statistics.
AsanStats &thread_stats = GetCurrentThreadStats();
thread_stats.real_frees++;
@@ -299,23 +358,26 @@ struct Allocator {
// This could be a user-facing chunk (with redzones), or some internal
// housekeeping chunk, like TransferBatch. Start by assuming the former.
AsanChunk *ac = GetAsanChunk((void *)chunk);
- uptr allocated_size = allocator.GetActuallyAllocatedSize((void *)ac);
- uptr beg = ac->Beg();
- uptr end = ac->Beg() + ac->UsedSize(true);
- uptr chunk_end = chunk + allocated_size;
- if (chunk < beg && beg < end && end <= chunk_end &&
- ac->chunk_state == CHUNK_ALLOCATED) {
- // Looks like a valid AsanChunk in use, poison redzones only.
- PoisonShadow(chunk, beg - chunk, kAsanHeapLeftRedzoneMagic);
- uptr end_aligned_down = RoundDownTo(end, SHADOW_GRANULARITY);
- FastPoisonShadowPartialRightRedzone(
- end_aligned_down, end - end_aligned_down,
- chunk_end - end_aligned_down, kAsanHeapLeftRedzoneMagic);
- } else {
- // This is either not an AsanChunk or freed or quarantined AsanChunk.
- // In either case, poison everything.
- PoisonShadow(chunk, allocated_size, kAsanHeapLeftRedzoneMagic);
+ uptr allocated_size = allocator.GetActuallyAllocatedSize((void *)chunk);
+ if (ac && atomic_load(&ac->chunk_state, memory_order_acquire) ==
+ CHUNK_ALLOCATED) {
+ uptr beg = ac->Beg();
+ uptr end = ac->Beg() + ac->UsedSize();
+ uptr chunk_end = chunk + allocated_size;
+ if (chunk < beg && beg < end && end <= chunk_end) {
+ // Looks like a valid AsanChunk in use, poison redzones only.
+ PoisonShadow(chunk, beg - chunk, kAsanHeapLeftRedzoneMagic);
+ uptr end_aligned_down = RoundDownTo(end, SHADOW_GRANULARITY);
+ FastPoisonShadowPartialRightRedzone(
+ end_aligned_down, end - end_aligned_down,
+ chunk_end - end_aligned_down, kAsanHeapLeftRedzoneMagic);
+ return;
+ }
}
+
+ // This is either not an AsanChunk or freed or quarantined AsanChunk.
+ // In either case, poison everything.
+ PoisonShadow(chunk, allocated_size, kAsanHeapLeftRedzoneMagic);
}
void ReInitialize(const AllocatorOptions &options) {
@@ -348,17 +410,18 @@ struct Allocator {
// -------------------- Helper methods. -------------------------
uptr ComputeRZLog(uptr user_requested_size) {
- u32 rz_log =
- user_requested_size <= 64 - 16 ? 0 :
- user_requested_size <= 128 - 32 ? 1 :
- user_requested_size <= 512 - 64 ? 2 :
- user_requested_size <= 4096 - 128 ? 3 :
- user_requested_size <= (1 << 14) - 256 ? 4 :
- user_requested_size <= (1 << 15) - 512 ? 5 :
- user_requested_size <= (1 << 16) - 1024 ? 6 : 7;
- u32 min_rz = atomic_load(&min_redzone, memory_order_acquire);
- u32 max_rz = atomic_load(&max_redzone, memory_order_acquire);
- return Min(Max(rz_log, RZSize2Log(min_rz)), RZSize2Log(max_rz));
+ u32 rz_log = user_requested_size <= 64 - 16 ? 0
+ : user_requested_size <= 128 - 32 ? 1
+ : user_requested_size <= 512 - 64 ? 2
+ : user_requested_size <= 4096 - 128 ? 3
+ : user_requested_size <= (1 << 14) - 256 ? 4
+ : user_requested_size <= (1 << 15) - 512 ? 5
+ : user_requested_size <= (1 << 16) - 1024 ? 6
+ : 7;
+ u32 hdr_log = RZSize2Log(RoundUpToPowerOfTwo(sizeof(ChunkHeader)));
+ u32 min_log = RZSize2Log(atomic_load(&min_redzone, memory_order_acquire));
+ u32 max_log = RZSize2Log(atomic_load(&max_redzone, memory_order_acquire));
+ return Min(Max(rz_log, Max(min_log, hdr_log)), Max(max_log, hdr_log));
}
static uptr ComputeUserRequestedAlignmentLog(uptr user_requested_alignment) {
@@ -378,16 +441,23 @@ struct Allocator {
// We have an address between two chunks, and we want to report just one.
AsanChunk *ChooseChunk(uptr addr, AsanChunk *left_chunk,
AsanChunk *right_chunk) {
+ if (!left_chunk)
+ return right_chunk;
+ if (!right_chunk)
+ return left_chunk;
// Prefer an allocated chunk over freed chunk and freed chunk
// over available chunk.
- if (left_chunk->chunk_state != right_chunk->chunk_state) {
- if (left_chunk->chunk_state == CHUNK_ALLOCATED)
+ u8 left_state = atomic_load(&left_chunk->chunk_state, memory_order_relaxed);
+ u8 right_state =
+ atomic_load(&right_chunk->chunk_state, memory_order_relaxed);
+ if (left_state != right_state) {
+ if (left_state == CHUNK_ALLOCATED)
return left_chunk;
- if (right_chunk->chunk_state == CHUNK_ALLOCATED)
+ if (right_state == CHUNK_ALLOCATED)
return right_chunk;
- if (left_chunk->chunk_state == CHUNK_QUARANTINE)
+ if (left_state == CHUNK_QUARANTINE)
return left_chunk;
- if (right_chunk->chunk_state == CHUNK_QUARANTINE)
+ if (right_state == CHUNK_QUARANTINE)
return right_chunk;
}
// Same chunk_state: choose based on offset.
@@ -402,10 +472,11 @@ struct Allocator {
bool UpdateAllocationStack(uptr addr, BufferedStackTrace *stack) {
AsanChunk *m = GetAsanChunkByAddr(addr);
if (!m) return false;
- if (m->chunk_state != CHUNK_ALLOCATED) return false;
+ if (atomic_load(&m->chunk_state, memory_order_acquire) != CHUNK_ALLOCATED)
+ return false;
if (m->Beg() != addr) return false;
- atomic_store((atomic_uint32_t *)&m->alloc_context_id, StackDepotPut(*stack),
- memory_order_relaxed);
+ AsanThread *t = GetCurrentThread();
+ m->SetAllocContext(t ? t->tid() : 0, StackDepotPut(*stack));
return true;
}
@@ -442,13 +513,10 @@ struct Allocator {
uptr needed_size = rounded_size + rz_size;
if (alignment > min_alignment)
needed_size += alignment;
- bool using_primary_allocator = true;
// If we are allocating from the secondary allocator, there will be no
// automatic right redzone, so add the right redzone manually.
- if (!PrimaryAllocator::CanAllocate(needed_size, alignment)) {
+ if (!PrimaryAllocator::CanAllocate(needed_size, alignment))
needed_size += rz_size;
- using_primary_allocator = false;
- }
CHECK(IsAligned(needed_size, min_alignment));
if (size > kMaxAllowedMallocSize || needed_size > kMaxAllowedMallocSize ||
size > max_user_defined_malloc_size) {
@@ -490,8 +558,7 @@ struct Allocator {
uptr alloc_beg = reinterpret_cast<uptr>(allocated);
uptr alloc_end = alloc_beg + needed_size;
- uptr beg_plus_redzone = alloc_beg + rz_size;
- uptr user_beg = beg_plus_redzone;
+ uptr user_beg = alloc_beg + rz_size;
if (!IsAligned(user_beg, alignment))
user_beg = RoundUpTo(user_beg, alignment);
uptr user_end = user_beg + size;
@@ -499,31 +566,11 @@ struct Allocator {
uptr chunk_beg = user_beg - kChunkHeaderSize;
AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
m->alloc_type = alloc_type;
- m->rz_log = rz_log;
- u32 alloc_tid = t ? t->tid() : 0;
- m->alloc_tid = alloc_tid;
- CHECK_EQ(alloc_tid, m->alloc_tid); // Does alloc_tid fit into the bitfield?
- m->free_tid = kInvalidTid;
- m->from_memalign = user_beg != beg_plus_redzone;
- if (alloc_beg != chunk_beg) {
- CHECK_LE(alloc_beg+ 2 * sizeof(uptr), chunk_beg);
- reinterpret_cast<uptr *>(alloc_beg)[0] = kAllocBegMagic;
- reinterpret_cast<uptr *>(alloc_beg)[1] = chunk_beg;
- }
- if (using_primary_allocator) {
- CHECK(size);
- m->user_requested_size = size;
- CHECK(allocator.FromPrimary(allocated));
- } else {
- CHECK(!allocator.FromPrimary(allocated));
- m->user_requested_size = SizeClassMap::kMaxSize;
- uptr *meta = reinterpret_cast<uptr *>(allocator.GetMetaData(allocated));
- meta[0] = size;
- meta[1] = chunk_beg;
- }
+ CHECK(size);
+ m->SetUsedSize(size);
m->user_requested_alignment_log = user_requested_alignment_log;
- m->alloc_context_id = StackDepotPut(*stack);
+ m->SetAllocContext(t ? t->tid() : 0, StackDepotPut(*stack));
uptr size_rounded_down_to_granularity =
RoundDownTo(size, SHADOW_GRANULARITY);
@@ -556,7 +603,11 @@ struct Allocator {
: __lsan::kDirectlyLeaked;
#endif
// Must be the last mutation of metadata in this function.
- atomic_store((atomic_uint8_t *)m, CHUNK_ALLOCATED, memory_order_release);
+ atomic_store(&m->chunk_state, CHUNK_ALLOCATED, memory_order_release);
+ if (alloc_beg != chunk_beg) {
+ CHECK_LE(alloc_beg + sizeof(LargeChunkHeader), chunk_beg);
+ reinterpret_cast<LargeChunkHeader *>(alloc_beg)->Set(m);
+ }
ASAN_MALLOC_HOOK(res, size);
return res;
}
@@ -564,10 +615,10 @@ struct Allocator {
// Set quarantine flag if chunk is allocated, issue ASan error report on
// available and quarantined chunks. Return true on success, false otherwise.
bool AtomicallySetQuarantineFlagIfAllocated(AsanChunk *m, void *ptr,
- BufferedStackTrace *stack) {
+ BufferedStackTrace *stack) {
u8 old_chunk_state = CHUNK_ALLOCATED;
// Flip the chunk_state atomically to avoid race on double-free.
- if (!atomic_compare_exchange_strong((atomic_uint8_t *)m, &old_chunk_state,
+ if (!atomic_compare_exchange_strong(&m->chunk_state, &old_chunk_state,
CHUNK_QUARANTINE,
memory_order_acquire)) {
ReportInvalidFree(ptr, old_chunk_state, stack);
@@ -575,19 +626,18 @@ struct Allocator {
return false;
}
CHECK_EQ(CHUNK_ALLOCATED, old_chunk_state);
+ // It was a user data.
+ m->SetFreeContext(kInvalidTid, 0);
return true;
}
// Expects the chunk to already be marked as quarantined by using
// AtomicallySetQuarantineFlagIfAllocated.
void QuarantineChunk(AsanChunk *m, void *ptr, BufferedStackTrace *stack) {
- CHECK_EQ(m->chunk_state, CHUNK_QUARANTINE);
- CHECK_GE(m->alloc_tid, 0);
- if (SANITIZER_WORDSIZE == 64) // On 32-bits this resides in user area.
- CHECK_EQ(m->free_tid, kInvalidTid);
+ CHECK_EQ(atomic_load(&m->chunk_state, memory_order_relaxed),
+ CHUNK_QUARANTINE);
AsanThread *t = GetCurrentThread();
- m->free_tid = t ? t->tid() : 0;
- m->free_context_id = StackDepotPut(*stack);
+ m->SetFreeContext(t ? t->tid() : 0, StackDepotPut(*stack));
Flags &fl = *flags();
if (fl.max_free_fill_size > 0) {
@@ -676,7 +726,7 @@ struct Allocator {
void *new_ptr = Allocate(new_size, 8, stack, FROM_MALLOC, true);
if (new_ptr) {
- u8 chunk_state = m->chunk_state;
+ u8 chunk_state = atomic_load(&m->chunk_state, memory_order_acquire);
if (chunk_state != CHUNK_ALLOCATED)
ReportInvalidFree(old_ptr, chunk_state, stack);
CHECK_NE(REAL(memcpy), nullptr);
@@ -719,17 +769,24 @@ struct Allocator {
// -------------------------- Chunk lookup ----------------------
// Assumes alloc_beg == allocator.GetBlockBegin(alloc_beg).
+ // Returns nullptr if AsanChunk is not yet initialized just after
+ // get_allocator().Allocate(), or is being destroyed just before
+ // get_allocator().Deallocate().
AsanChunk *GetAsanChunk(void *alloc_beg) {
- if (!alloc_beg) return nullptr;
- if (!allocator.FromPrimary(alloc_beg)) {
- uptr *meta = reinterpret_cast<uptr *>(allocator.GetMetaData(alloc_beg));
- AsanChunk *m = reinterpret_cast<AsanChunk *>(meta[1]);
- return m;
+ if (!alloc_beg)
+ return nullptr;
+ AsanChunk *p = reinterpret_cast<LargeChunkHeader *>(alloc_beg)->Get();
+ if (!p) {
+ if (!allocator.FromPrimary(alloc_beg))
+ return nullptr;
+ p = reinterpret_cast<AsanChunk *>(alloc_beg);
}
- uptr *alloc_magic = reinterpret_cast<uptr *>(alloc_beg);
- if (alloc_magic[0] == kAllocBegMagic)
- return reinterpret_cast<AsanChunk *>(alloc_magic[1]);
- return reinterpret_cast<AsanChunk *>(alloc_beg);
+ u8 state = atomic_load(&p->chunk_state, memory_order_relaxed);
+ // It does not guaranty that Chunk is initialized, but it's
+ // definitely not for any other value.
+ if (state == CHUNK_ALLOCATED || state == CHUNK_QUARANTINE)
+ return p;
+ return nullptr;
}
AsanChunk *GetAsanChunkByAddr(uptr p) {
@@ -747,16 +804,16 @@ struct Allocator {
uptr AllocationSize(uptr p) {
AsanChunk *m = GetAsanChunkByAddr(p);
if (!m) return 0;
- if (m->chunk_state != CHUNK_ALLOCATED) return 0;
+ if (atomic_load(&m->chunk_state, memory_order_acquire) != CHUNK_ALLOCATED)
+ return 0;
if (m->Beg() != p) return 0;
return m->UsedSize();
}
AsanChunkView FindHeapChunkByAddress(uptr addr) {
AsanChunk *m1 = GetAsanChunkByAddr(addr);
- if (!m1) return AsanChunkView(m1);
sptr offset = 0;
- if (AsanChunkView(m1).AddrIsAtLeft(addr, 1, &offset)) {
+ if (!m1 || AsanChunkView(m1).AddrIsAtLeft(addr, 1, &offset)) {
// The address is in the chunk's left redzone, so maybe it is actually
// a right buffer overflow from the other chunk to the left.
// Search a bit to the left to see if there is another chunk.
@@ -813,13 +870,16 @@ static AsanAllocator &get_allocator() {
}
bool AsanChunkView::IsValid() const {
- return chunk_ && chunk_->chunk_state != CHUNK_AVAILABLE;
+ return chunk_ && atomic_load(&chunk_->chunk_state, memory_order_relaxed) !=
+ CHUNK_INVALID;
}
bool AsanChunkView::IsAllocated() const {
- return chunk_ && chunk_->chunk_state == CHUNK_ALLOCATED;
+ return chunk_ && atomic_load(&chunk_->chunk_state, memory_order_relaxed) ==
+ CHUNK_ALLOCATED;
}
bool AsanChunkView::IsQuarantined() const {
- return chunk_ && chunk_->chunk_state == CHUNK_QUARANTINE;
+ return chunk_ && atomic_load(&chunk_->chunk_state, memory_order_relaxed) ==
+ CHUNK_QUARANTINE;
}
uptr AsanChunkView::Beg() const { return chunk_->Beg(); }
uptr AsanChunkView::End() const { return Beg() + UsedSize(); }
@@ -827,8 +887,23 @@ uptr AsanChunkView::UsedSize() const { return chunk_->UsedSize(); }
u32 AsanChunkView::UserRequestedAlignment() const {
return Allocator::ComputeUserAlignment(chunk_->user_requested_alignment_log);
}
-uptr AsanChunkView::AllocTid() const { return chunk_->alloc_tid; }
-uptr AsanChunkView::FreeTid() const { return chunk_->free_tid; }
+
+uptr AsanChunkView::AllocTid() const {
+ u32 tid = 0;
+ u32 stack = 0;
+ chunk_->GetAllocContext(tid, stack);
+ return tid;
+}
+
+uptr AsanChunkView::FreeTid() const {
+ if (!IsQuarantined())
+ return kInvalidTid;
+ u32 tid = 0;
+ u32 stack = 0;
+ chunk_->GetFreeContext(tid, stack);
+ return tid;
+}
+
AllocType AsanChunkView::GetAllocType() const {
return (AllocType)chunk_->alloc_type;
}
@@ -840,8 +915,21 @@ static StackTrace GetStackTraceFromId(u32 id) {
return res;
}
-u32 AsanChunkView::GetAllocStackId() const { return chunk_->alloc_context_id; }
-u32 AsanChunkView::GetFreeStackId() const { return chunk_->free_context_id; }
+u32 AsanChunkView::GetAllocStackId() const {
+ u32 tid = 0;
+ u32 stack = 0;
+ chunk_->GetAllocContext(tid, stack);
+ return stack;
+}
+
+u32 AsanChunkView::GetFreeStackId() const {
+ if (!IsQuarantined())
+ return 0;
+ u32 tid = 0;
+ u32 stack = 0;
+ chunk_->GetFreeContext(tid, stack);
+ return stack;
+}
StackTrace AsanChunkView::GetAllocStack() const {
return GetStackTraceFromId(GetAllocStackId());
@@ -1005,7 +1093,7 @@ void AsanSoftRssLimitExceededCallback(bool limit_exceeded) {
instance.SetRssLimitExceeded(limit_exceeded);
}
-} // namespace __asan
+} // namespace __asan
// --- Implementation of LSan-specific functions --- {{{1
namespace __lsan {
@@ -1022,45 +1110,36 @@ void GetAllocatorGlobalRange(uptr *begin, uptr *end) {
*end = *begin + sizeof(__asan::get_allocator());
}
-uptr PointsIntoChunk(void* p) {
+uptr PointsIntoChunk(void *p) {
uptr addr = reinterpret_cast<uptr>(p);
__asan::AsanChunk *m = __asan::instance.GetAsanChunkByAddrFastLocked(addr);
- if (!m) return 0;
- uptr chunk = m->Beg();
- if (m->chunk_state != __asan::CHUNK_ALLOCATED)
+ if (!m || atomic_load(&m->chunk_state, memory_order_acquire) !=
+ __asan::CHUNK_ALLOCATED)
return 0;
- if (m->AddrIsInside(addr, /*locked_version=*/true))
+ uptr chunk = m->Beg();
+ if (m->AddrIsInside(addr))
return chunk;
- if (IsSpecialCaseOfOperatorNew0(chunk, m->UsedSize(/*locked_version*/ true),
- addr))
+ if (IsSpecialCaseOfOperatorNew0(chunk, m->UsedSize(), addr))
return chunk;
return 0;
}
-// Debug code. Delete once issue #1193 is chased down.
-extern "C" SANITIZER_WEAK_ATTRIBUTE const char *__lsan_current_stage;
-
uptr GetUserBegin(uptr chunk) {
__asan::AsanChunk *m = __asan::instance.GetAsanChunkByAddrFastLocked(chunk);
- if (!m)
- Printf(
- "ASAN is about to crash with a CHECK failure.\n"
- "The ASAN developers are trying to chase down this bug,\n"
- "so if you've encountered this bug please let us know.\n"
- "See also: https://github.com/google/sanitizers/issues/1193\n"
- "chunk: %p caller %p __lsan_current_stage %s\n",
- chunk, GET_CALLER_PC(), __lsan_current_stage);
- CHECK(m);
- return m->Beg();
+ return m ? m->Beg() : 0;
}
LsanMetadata::LsanMetadata(uptr chunk) {
- metadata_ = reinterpret_cast<void *>(chunk - __asan::kChunkHeaderSize);
+ metadata_ = chunk ? reinterpret_cast<void *>(chunk - __asan::kChunkHeaderSize)
+ : nullptr;
}
bool LsanMetadata::allocated() const {
+ if (!metadata_)
+ return false;
__asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
- return m->chunk_state == __asan::CHUNK_ALLOCATED;
+ return atomic_load(&m->chunk_state, memory_order_relaxed) ==
+ __asan::CHUNK_ALLOCATED;
}
ChunkTag LsanMetadata::tag() const {
@@ -1075,12 +1154,15 @@ void LsanMetadata::set_tag(ChunkTag value) {
uptr LsanMetadata::requested_size() const {
__asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
- return m->UsedSize(/*locked_version=*/true);
+ return m->UsedSize();
}
u32 LsanMetadata::stack_trace_id() const {
__asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
- return m->alloc_context_id;
+ u32 tid = 0;
+ u32 stack = 0;
+ m->GetAllocContext(tid, stack);
+ return stack;
}
void ForEachChunk(ForEachChunkCallback callback, void *arg) {
@@ -1090,16 +1172,45 @@ void ForEachChunk(ForEachChunkCallback callback, void *arg) {
IgnoreObjectResult IgnoreObjectLocked(const void *p) {
uptr addr = reinterpret_cast<uptr>(p);
__asan::AsanChunk *m = __asan::instance.GetAsanChunkByAddr(addr);
- if (!m) return kIgnoreObjectInvalid;
- if ((m->chunk_state == __asan::CHUNK_ALLOCATED) && m->AddrIsInside(addr)) {
- if (m->lsan_tag == kIgnored)
- return kIgnoreObjectAlreadyIgnored;
- m->lsan_tag = __lsan::kIgnored;
- return kIgnoreObjectSuccess;
- } else {
+ if (!m ||
+ (atomic_load(&m->chunk_state, memory_order_acquire) !=
+ __asan::CHUNK_ALLOCATED) ||
+ !m->AddrIsInside(addr)) {
return kIgnoreObjectInvalid;
}
+ if (m->lsan_tag == kIgnored)
+ return kIgnoreObjectAlreadyIgnored;
+ m->lsan_tag = __lsan::kIgnored;
+ return kIgnoreObjectSuccess;
+}
+
+void GetAdditionalThreadContextPtrs(ThreadContextBase *tctx, void *ptrs) {
+ // Look for the arg pointer of threads that have been created or are running.
+ // This is necessary to prevent false positive leaks due to the AsanThread
+ // holding the only live reference to a heap object. This can happen because
+ // the `pthread_create()` interceptor doesn't wait for the child thread to
+ // start before returning and thus loosing the the only live reference to the
+ // heap object on the stack.
+
+ __asan::AsanThreadContext *atctx =
+ reinterpret_cast<__asan::AsanThreadContext *>(tctx);
+ __asan::AsanThread *asan_thread = atctx->thread;
+
+ // Note ThreadStatusRunning is required because there is a small window where
+ // the thread status switches to `ThreadStatusRunning` but the `arg` pointer
+ // still isn't on the stack yet.
+ if (atctx->status != ThreadStatusCreated &&
+ atctx->status != ThreadStatusRunning)
+ return;
+
+ uptr thread_arg = reinterpret_cast<uptr>(asan_thread->get_arg());
+ if (!thread_arg)
+ return;
+
+ auto ptrsVec = reinterpret_cast<InternalMmapVector<uptr> *>(ptrs);
+ ptrsVec->push_back(thread_arg);
}
+
} // namespace __lsan
// ---------------------- Interface ---------------- {{{1
diff --git a/contrib/llvm-project/compiler-rt/lib/asan/asan_allocator.h b/contrib/llvm-project/compiler-rt/lib/asan/asan_allocator.h
index b37d8ef4e8d2..2963e979b55c 100644
--- a/contrib/llvm-project/compiler-rt/lib/asan/asan_allocator.h
+++ b/contrib/llvm-project/compiler-rt/lib/asan/asan_allocator.h
@@ -15,10 +15,11 @@
#define ASAN_ALLOCATOR_H
#include "asan_flags.h"
-#include "asan_internal.h"
#include "asan_interceptors.h"
+#include "asan_internal.h"
#include "sanitizer_common/sanitizer_allocator.h"
#include "sanitizer_common/sanitizer_list.h"
+#include "sanitizer_common/sanitizer_platform.h"
namespace __asan {
@@ -28,7 +29,7 @@ enum AllocType {
FROM_NEW_BR = 3 // Memory block came from operator new [ ]
};
-struct AsanChunk;
+class AsanChunk;
struct AllocatorOptions {
u32 quarantine_size_mb;
@@ -132,6 +133,10 @@ typedef DefaultSizeClassMap SizeClassMap;
const uptr kAllocatorSpace = ~(uptr)0;
const uptr kAllocatorSize = 0x2000000000ULL; // 128G.
typedef VeryCompactSizeClassMap SizeClassMap;
+#elif SANITIZER_RISCV64
+const uptr kAllocatorSpace = ~(uptr)0;
+const uptr kAllocatorSize = 0x2000000000ULL; // 128G.
+typedef VeryDenseSizeClassMap SizeClassMap;
# elif defined(__aarch64__)
// AArch64/SANITIZER_CAN_USE_ALLOCATOR64 is only for 42-bit VMA
// so no need to different values for different VMA.
@@ -171,7 +176,7 @@ template <typename AddressSpaceViewTy>
struct AP32 {
static const uptr kSpaceBeg = 0;
static const u64 kSpaceSize = SANITIZER_MMAP_RANGE_SIZE;
- static const uptr kMetadataSize = 16;
+ static const uptr kMetadataSize = 0;
typedef __asan::SizeClassMap SizeClassMap;
static const uptr kRegionSizeLog = 20;
using AddressSpaceView = AddressSpaceViewTy;
diff --git a/contrib/llvm-project/compiler-rt/lib/asan/asan_flags.cpp b/contrib/llvm-project/compiler-rt/lib/asan/asan_flags.cpp
index c5c70eaed737..cb6a89fe32ce 100644
--- a/contrib/llvm-project/compiler-rt/lib/asan/asan_flags.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/asan/asan_flags.cpp
@@ -26,10 +26,6 @@ namespace __asan {
Flags asan_flags_dont_use_directly; // use via flags().
-static const char *MaybeCallAsanDefaultOptions() {
- return (&__asan_default_options) ? __asan_default_options() : "";
-}
-
static const char *MaybeUseAsanDefaultOptionsCompileDefinition() {
#ifdef ASAN_DEFAULT_OPTIONS
return SANITIZER_STRINGIFY(ASAN_DEFAULT_OPTIONS);
@@ -108,14 +104,14 @@ void InitializeFlags() {
asan_parser.ParseString(asan_compile_def);
// Override from user-specified string.
- const char *asan_default_options = MaybeCallAsanDefaultOptions();
+ const char *asan_default_options = __asan_default_options();
asan_parser.ParseString(asan_default_options);
#if CAN_SANITIZE_UB
- const char *ubsan_default_options = __ubsan::MaybeCallUbsanDefaultOptions();
+ const char *ubsan_default_options = __ubsan_default_options();
ubsan_parser.ParseString(ubsan_default_options);
#endif
#if CAN_SANITIZE_LEAKS
- const char *lsan_default_options = __lsan::MaybeCallLsanDefaultOptions();
+ const char *lsan_default_options = __lsan_default_options();
lsan_parser.ParseString(lsan_default_options);
#endif
diff --git a/contrib/llvm-project/compiler-rt/lib/asan/asan_fuchsia.cpp b/contrib/llvm-project/compiler-rt/lib/asan/asan_fuchsia.cpp
index 64f6dcbcefeb..6c61344f87cf 100644
--- a/contrib/llvm-project/compiler-rt/lib/asan/asan_fuchsia.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/asan/asan_fuchsia.cpp
@@ -91,8 +91,7 @@ struct AsanThread::InitOptions {
// Shared setup between thread creation and startup for the initial thread.
static AsanThread *CreateAsanThread(StackTrace *stack, u32 parent_tid,
uptr user_id, bool detached,
- const char *name, uptr stack_bottom,
- uptr stack_size) {
+ const char *name) {
// In lieu of AsanThread::Create.
AsanThread *thread = (AsanThread *)MmapOrDie(AsanThreadMmapSize(), __func__);
@@ -101,12 +100,6 @@ static AsanThread *CreateAsanThread(StackTrace *stack, u32 parent_tid,
asanThreadRegistry().CreateThread(user_id, detached, parent_tid, &args);
asanThreadRegistry().SetThreadName(tid, name);
- // On other systems, AsanThread::Init() is called from the new
- // thread itself. But on Fuchsia we already know the stack address
- // range beforehand, so we can do most of the setup right now.
- const AsanThread::InitOptions options = {stack_bottom, stack_size};
- thread->Init(&options);
-
return thread;
}
@@ -135,9 +128,16 @@ AsanThread *CreateMainThread() {
_zx_object_get_property(thrd_get_zx_handle(self), ZX_PROP_NAME, name,
sizeof(name)) == ZX_OK
? name
- : nullptr,
- __sanitizer::MainThreadStackBase, __sanitizer::MainThreadStackSize);
+ : nullptr);
+ // We need to set the current thread before calling AsanThread::Init() below,
+ // since it reads the thread ID.
SetCurrentThread(t);
+ DCHECK_EQ(t->tid(), 0);
+
+ const AsanThread::InitOptions options = {__sanitizer::MainThreadStackBase,
+ __sanitizer::MainThreadStackSize};
+ t->Init(&options);
+
return t;
}
@@ -153,8 +153,15 @@ static void *BeforeThreadCreateHook(uptr user_id, bool detached,
GET_STACK_TRACE_THREAD;
u32 parent_tid = GetCurrentTidOrInvalid();
- return CreateAsanThread(&stack, parent_tid, user_id, detached, name,
- stack_bottom, stack_size);
+ AsanThread *thread =
+ CreateAsanThread(&stack, parent_tid, user_id, detached, name);
+
+ // On other systems, AsanThread::Init() is called from the new
+ // thread itself. But on Fuchsia we already know the stack address
+ // range beforehand, so we can do most of the setup right now.
+ const AsanThread::InitOptions options = {stack_bottom, stack_size};
+ thread->Init(&options);
+ return thread;
}
// This is called after creating a new thread (in the creating thread),
@@ -198,6 +205,10 @@ bool HandleDlopenInit() {
return false;
}
+void FlushUnneededASanShadowMemory(uptr p, uptr size) {
+ __sanitizer_fill_shadow(p, size, 0, 0);
+}
+
} // namespace __asan
// These are declared (in extern "C") by <zircon/sanitizer.h>.
diff --git a/contrib/llvm-project/compiler-rt/lib/asan/asan_interceptors.cpp b/contrib/llvm-project/compiler-rt/lib/asan/asan_interceptors.cpp
index b19cf25c7cd0..cd07d51878b1 100644
--- a/contrib/llvm-project/compiler-rt/lib/asan/asan_interceptors.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/asan/asan_interceptors.cpp
@@ -189,20 +189,11 @@ DECLARE_REAL_AND_INTERCEPTOR(void, free, void *)
#include "sanitizer_common/sanitizer_common_syscalls.inc"
#include "sanitizer_common/sanitizer_syscalls_netbsd.inc"
-struct ThreadStartParam {
- atomic_uintptr_t t;
- atomic_uintptr_t is_registered;
-};
-
#if ASAN_INTERCEPT_PTHREAD_CREATE
static thread_return_t THREAD_CALLING_CONV asan_thread_start(void *arg) {
- ThreadStartParam *param = reinterpret_cast<ThreadStartParam *>(arg);
- AsanThread *t = nullptr;
- while ((t = reinterpret_cast<AsanThread *>(
- atomic_load(&param->t, memory_order_acquire))) == nullptr)
- internal_sched_yield();
+ AsanThread *t = (AsanThread *)arg;
SetCurrentThread(t);
- return t->ThreadStart(GetTid(), &param->is_registered);
+ return t->ThreadStart(GetTid());
}
INTERCEPTOR(int, pthread_create, void *thread,
@@ -215,9 +206,11 @@ INTERCEPTOR(int, pthread_create, void *thread,
int detached = 0;
if (attr)
REAL(pthread_attr_getdetachstate)(attr, &detached);
- ThreadStartParam param;
- atomic_store(&param.t, 0, memory_order_relaxed);
- atomic_store(&param.is_registered, 0, memory_order_relaxed);
+
+ u32 current_tid = GetCurrentTidOrInvalid();
+ AsanThread *t =
+ AsanThread::Create(start_routine, arg, current_tid, &stack, detached);
+
int result;
{
// Ignore all allocations made by pthread_create: thread stack/TLS may be
@@ -227,21 +220,13 @@ INTERCEPTOR(int, pthread_create, void *thread,
#if CAN_SANITIZE_LEAKS
__lsan::ScopedInterceptorDisabler disabler;
#endif
- result = REAL(pthread_create)(thread, attr, asan_thread_start, &param);
+ result = REAL(pthread_create)(thread, attr, asan_thread_start, t);
}
- if (result == 0) {
- u32 current_tid = GetCurrentTidOrInvalid();
- AsanThread *t =
- AsanThread::Create(start_routine, arg, current_tid, &stack, detached);
- atomic_store(&param.t, reinterpret_cast<uptr>(t), memory_order_release);
- // Wait until the AsanThread object is initialized and the ThreadRegistry
- // entry is in "started" state. One reason for this is that after this
- // interceptor exits, the child thread's stack may be the only thing holding
- // the |arg| pointer. This may cause LSan to report a leak if leak checking
- // happens at a point when the interceptor has already exited, but the stack
- // range for the child thread is not yet known.
- while (atomic_load(&param.is_registered, memory_order_acquire) == 0)
- internal_sched_yield();
+ if (result != 0) {
+ // If the thread didn't start delete the AsanThread to avoid leaking it.
+ // Note AsanThreadContexts never get destroyed so the AsanThreadContext
+ // that was just created for the AsanThread is wasted.
+ t->Destroy();
}
return result;
}
diff --git a/contrib/llvm-project/compiler-rt/lib/asan/asan_interceptors.h b/contrib/llvm-project/compiler-rt/lib/asan/asan_interceptors.h
index 344a64bd83d3..45cdb80b1b64 100644
--- a/contrib/llvm-project/compiler-rt/lib/asan/asan_interceptors.h
+++ b/contrib/llvm-project/compiler-rt/lib/asan/asan_interceptors.h
@@ -13,9 +13,10 @@
#ifndef ASAN_INTERCEPTORS_H
#define ASAN_INTERCEPTORS_H
-#include "asan_internal.h"
#include "asan_interceptors_memintrinsics.h"
+#include "asan_internal.h"
#include "interception/interception.h"
+#include "sanitizer_common/sanitizer_platform.h"
#include "sanitizer_common/sanitizer_platform_interceptors.h"
namespace __asan {
@@ -59,7 +60,7 @@ void InitializePlatformInterceptors();
# define ASAN_USE_ALIAS_ATTRIBUTE_FOR_INDEX 0
#endif
-#if (SANITIZER_LINUX && !SANITIZER_ANDROID) || SANITIZER_SOLARIS
+#if SANITIZER_GLIBC || SANITIZER_SOLARIS
# define ASAN_INTERCEPT_SWAPCONTEXT 1
#else
# define ASAN_INTERCEPT_SWAPCONTEXT 0
@@ -71,7 +72,7 @@ void InitializePlatformInterceptors();
# define ASAN_INTERCEPT_SIGLONGJMP 0
#endif
-#if SANITIZER_LINUX && !SANITIZER_ANDROID
+#if SANITIZER_GLIBC
# define ASAN_INTERCEPT___LONGJMP_CHK 1
#else
# define ASAN_INTERCEPT___LONGJMP_CHK 0
@@ -105,14 +106,15 @@ void InitializePlatformInterceptors();
# define ASAN_INTERCEPT_ATEXIT 0
#endif
-#if SANITIZER_LINUX && !SANITIZER_ANDROID
+#if SANITIZER_GLIBC
# define ASAN_INTERCEPT___STRDUP 1
#else
# define ASAN_INTERCEPT___STRDUP 0
#endif
-#if SANITIZER_LINUX && (defined(__arm__) || defined(__aarch64__) || \
- defined(__i386__) || defined(__x86_64__))
+#if SANITIZER_LINUX && \
+ (defined(__arm__) || defined(__aarch64__) || defined(__i386__) || \
+ defined(__x86_64__) || SANITIZER_RISCV64)
# define ASAN_INTERCEPT_VFORK 1
#else
# define ASAN_INTERCEPT_VFORK 0
@@ -132,10 +134,10 @@ DECLARE_REAL(uptr, strnlen, const char *s, uptr maxlen)
DECLARE_REAL(char*, strstr, const char *s1, const char *s2)
#if !SANITIZER_MAC
-#define ASAN_INTERCEPT_FUNC(name) \
- do { \
- if (!INTERCEPT_FUNCTION(name)) \
- VReport(1, "AddressSanitizer: failed to intercept '%s'\n'", #name); \
+#define ASAN_INTERCEPT_FUNC(name) \
+ do { \
+ if (!INTERCEPT_FUNCTION(name)) \
+ VReport(1, "AddressSanitizer: failed to intercept '%s'\n", #name); \
} while (0)
#define ASAN_INTERCEPT_FUNC_VER(name, ver) \
do { \
diff --git a/contrib/llvm-project/compiler-rt/lib/asan/asan_interceptors_vfork.S b/contrib/llvm-project/compiler-rt/lib/asan/asan_interceptors_vfork.S
index 90a169d4b609..3ae5503e83cd 100644
--- a/contrib/llvm-project/compiler-rt/lib/asan/asan_interceptors_vfork.S
+++ b/contrib/llvm-project/compiler-rt/lib/asan/asan_interceptors_vfork.S
@@ -5,8 +5,9 @@
#define COMMON_INTERCEPTOR_HANDLE_VFORK __asan_handle_vfork
#include "sanitizer_common/sanitizer_common_interceptors_vfork_aarch64.inc.S"
#include "sanitizer_common/sanitizer_common_interceptors_vfork_arm.inc.S"
-#include "sanitizer_common/sanitizer_common_interceptors_vfork_x86_64.inc.S"
#include "sanitizer_common/sanitizer_common_interceptors_vfork_i386.inc.S"
+#include "sanitizer_common/sanitizer_common_interceptors_vfork_riscv64.inc.S"
+#include "sanitizer_common/sanitizer_common_interceptors_vfork_x86_64.inc.S"
#endif
NO_EXEC_STACK_DIRECTIVE
diff --git a/contrib/llvm-project/compiler-rt/lib/asan/asan_interface_internal.h b/contrib/llvm-project/compiler-rt/lib/asan/asan_interface_internal.h
index f14cbbcb76a3..3e6e66028874 100644
--- a/contrib/llvm-project/compiler-rt/lib/asan/asan_interface_internal.h
+++ b/contrib/llvm-project/compiler-rt/lib/asan/asan_interface_internal.h
@@ -173,8 +173,8 @@ extern "C" {
SANITIZER_INTERFACE_ATTRIBUTE void __asan_print_accumulated_stats();
- SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
- const char* __asan_default_options();
+ SANITIZER_INTERFACE_ATTRIBUTE
+ const char *__asan_default_options();
SANITIZER_INTERFACE_ATTRIBUTE
extern uptr __asan_shadow_memory_dynamic_address;
diff --git a/contrib/llvm-project/compiler-rt/lib/asan/asan_internal.h b/contrib/llvm-project/compiler-rt/lib/asan/asan_internal.h
index d4bfe996b664..cfb54927c6cf 100644
--- a/contrib/llvm-project/compiler-rt/lib/asan/asan_internal.h
+++ b/contrib/llvm-project/compiler-rt/lib/asan/asan_internal.h
@@ -118,8 +118,6 @@ void AppendToErrorMessageBuffer(const char *buffer);
void *AsanDlSymNext(const char *sym);
-void ReserveShadowMemoryRange(uptr beg, uptr end, const char *name);
-
// Returns `true` iff most of ASan init process should be skipped due to the
// ASan library being loaded via `dlopen()`. Platforms may perform any
// `dlopen()` specific initialization inside this function.
diff --git a/contrib/llvm-project/compiler-rt/lib/asan/asan_linux.cpp b/contrib/llvm-project/compiler-rt/lib/asan/asan_linux.cpp
index ce5e873dc518..4bcbe5d02e33 100644
--- a/contrib/llvm-project/compiler-rt/lib/asan/asan_linux.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/asan/asan_linux.cpp
@@ -55,6 +55,7 @@ extern Elf_Dyn _DYNAMIC;
#else
#include <sys/ucontext.h>
#include <link.h>
+extern ElfW(Dyn) _DYNAMIC[];
#endif
// x86-64 FreeBSD 9.2 and older define 'ucontext_t' incorrectly in
@@ -84,28 +85,15 @@ bool IsSystemHeapAddress (uptr addr) { return false; }
void *AsanDoesNotSupportStaticLinkage() {
// This will fail to link with -static.
- return &_DYNAMIC; // defined in link.h
-}
-
-static void UnmapFromTo(uptr from, uptr to) {
- CHECK(to >= from);
- if (to == from) return;
- uptr res = internal_munmap(reinterpret_cast<void *>(from), to - from);
- if (UNLIKELY(internal_iserror(res))) {
- Report(
- "ERROR: AddresSanitizer failed to unmap 0x%zx (%zd) bytes at address "
- "%p\n",
- to - from, to - from, from);
- CHECK("unable to unmap" && 0);
- }
+ return &_DYNAMIC;
}
#if ASAN_PREMAP_SHADOW
-uptr FindPremappedShadowStart() {
+uptr FindPremappedShadowStart(uptr shadow_size_bytes) {
uptr granularity = GetMmapGranularity();
uptr shadow_start = reinterpret_cast<uptr>(&__asan_shadow);
uptr premap_shadow_size = PremapShadowSize();
- uptr shadow_size = RoundUpTo(kHighShadowEnd, granularity);
+ uptr shadow_size = RoundUpTo(shadow_size_bytes, granularity);
// We may have mapped too much. Release extra memory.
UnmapFromTo(shadow_start + shadow_size, shadow_start + premap_shadow_size);
return shadow_start;
@@ -113,31 +101,26 @@ uptr FindPremappedShadowStart() {
#endif
uptr FindDynamicShadowStart() {
+ uptr shadow_size_bytes = MemToShadowSize(kHighMemEnd);
#if ASAN_PREMAP_SHADOW
if (!PremapShadowFailed())
- return FindPremappedShadowStart();
+ return FindPremappedShadowStart(shadow_size_bytes);
#endif
- uptr granularity = GetMmapGranularity();
- uptr alignment = granularity * 8;
- uptr left_padding = granularity;
- uptr shadow_size = RoundUpTo(kHighShadowEnd, granularity);
- uptr map_size = shadow_size + left_padding + alignment;
-
- uptr map_start = (uptr)MmapNoAccess(map_size);
- CHECK_NE(map_start, ~(uptr)0);
-
- uptr shadow_start = RoundUpTo(map_start + left_padding, alignment);
- UnmapFromTo(map_start, shadow_start - left_padding);
- UnmapFromTo(shadow_start + shadow_size, map_start + map_size);
-
- return shadow_start;
+ return MapDynamicShadow(shadow_size_bytes, SHADOW_SCALE,
+ /*min_shadow_base_alignment*/ 0, kHighMemEnd);
}
void AsanApplyToGlobals(globals_op_fptr op, const void *needle) {
UNIMPLEMENTED();
}
+void FlushUnneededASanShadowMemory(uptr p, uptr size) {
+ // Since asan's mapping is compacting, the shadow chunk may be
+ // not page-aligned, so we only flush the page-aligned portion.
+ ReleaseMemoryPagesToOS(MemToShadow(p), MemToShadow(p + size));
+}
+
#if SANITIZER_ANDROID
// FIXME: should we do anything for Android?
void AsanCheckDynamicRTPrereqs() {}
diff --git a/contrib/llvm-project/compiler-rt/lib/asan/asan_mac.cpp b/contrib/llvm-project/compiler-rt/lib/asan/asan_mac.cpp
index a8d3f5d3473c..c6950547f089 100644
--- a/contrib/llvm-project/compiler-rt/lib/asan/asan_mac.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/asan/asan_mac.cpp
@@ -55,46 +55,8 @@ void *AsanDoesNotSupportStaticLinkage() {
}
uptr FindDynamicShadowStart() {
- uptr granularity = GetMmapGranularity();
- uptr alignment = 8 * granularity;
- uptr left_padding = granularity;
- uptr space_size = kHighShadowEnd + left_padding;
-
- uptr largest_gap_found = 0;
- uptr max_occupied_addr = 0;
- VReport(2, "FindDynamicShadowStart, space_size = %p\n", space_size);
- uptr shadow_start =
- FindAvailableMemoryRange(space_size, alignment, granularity,
- &largest_gap_found, &max_occupied_addr);
- // If the shadow doesn't fit, restrict the address space to make it fit.
- if (shadow_start == 0) {
- VReport(
- 2,
- "Shadow doesn't fit, largest_gap_found = %p, max_occupied_addr = %p\n",
- largest_gap_found, max_occupied_addr);
- uptr new_max_vm = RoundDownTo(largest_gap_found << SHADOW_SCALE, alignment);
- if (new_max_vm < max_occupied_addr) {
- Report("Unable to find a memory range for dynamic shadow.\n");
- Report(
- "space_size = %p, largest_gap_found = %p, max_occupied_addr = %p, "
- "new_max_vm = %p\n",
- space_size, largest_gap_found, max_occupied_addr, new_max_vm);
- CHECK(0 && "cannot place shadow");
- }
- RestrictMemoryToMaxAddress(new_max_vm);
- kHighMemEnd = new_max_vm - 1;
- space_size = kHighShadowEnd + left_padding;
- VReport(2, "FindDynamicShadowStart, space_size = %p\n", space_size);
- shadow_start = FindAvailableMemoryRange(space_size, alignment, granularity,
- nullptr, nullptr);
- if (shadow_start == 0) {
- Report("Unable to find a memory range after restricting VM.\n");
- CHECK(0 && "cannot place shadow after restricting vm");
- }
- }
- CHECK_NE((uptr)0, shadow_start);
- CHECK(IsAligned(shadow_start, alignment));
- return shadow_start;
+ return MapDynamicShadow(MemToShadowSize(kHighMemEnd), SHADOW_SCALE,
+ /*min_shadow_base_alignment*/ 0, kHighMemEnd);
}
// No-op. Mac does not support static linkage anyway.
@@ -127,6 +89,12 @@ void AsanApplyToGlobals(globals_op_fptr op, const void *needle) {
op(globals, size / sizeof(__asan_global));
}
+void FlushUnneededASanShadowMemory(uptr p, uptr size) {
+ // Since asan's mapping is compacting, the shadow chunk may be
+ // not page-aligned, so we only flush the page-aligned portion.
+ ReleaseMemoryPagesToOS(MemToShadow(p), MemToShadow(p + size));
+}
+
void ReadContextStack(void *context, uptr *stack, uptr *ssize) {
UNIMPLEMENTED();
}
diff --git a/contrib/llvm-project/compiler-rt/lib/asan/asan_malloc_linux.cpp b/contrib/llvm-project/compiler-rt/lib/asan/asan_malloc_linux.cpp
index faa8968a5d00..9c3f0a5338ee 100644
--- a/contrib/llvm-project/compiler-rt/lib/asan/asan_malloc_linux.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/asan/asan_malloc_linux.cpp
@@ -34,7 +34,7 @@ static uptr last_dlsym_alloc_size_in_words;
static const uptr kDlsymAllocPoolSize = SANITIZER_RTEMS ? 4096 : 1024;
static uptr alloc_memory_for_dlsym[kDlsymAllocPoolSize];
-static INLINE bool IsInDlsymAllocPool(const void *ptr) {
+static inline bool IsInDlsymAllocPool(const void *ptr) {
uptr off = (uptr)ptr - (uptr)alloc_memory_for_dlsym;
return off < allocated_for_dlsym * sizeof(alloc_memory_for_dlsym[0]);
}
@@ -95,12 +95,12 @@ bool IsFromLocalPool(const void *ptr) {
}
#endif
-static INLINE bool MaybeInDlsym() {
+static inline bool MaybeInDlsym() {
// Fuchsia doesn't use dlsym-based interceptors.
return !SANITIZER_FUCHSIA && asan_init_is_running;
}
-static INLINE bool UseLocalPool() {
+static inline bool UseLocalPool() {
return EarlyMalloc() || MaybeInDlsym();
}
@@ -120,19 +120,19 @@ static void *ReallocFromLocalPool(void *ptr, uptr size) {
}
INTERCEPTOR(void, free, void *ptr) {
- GET_STACK_TRACE_FREE;
if (UNLIKELY(IsInDlsymAllocPool(ptr))) {
DeallocateFromLocalPool(ptr);
return;
}
+ GET_STACK_TRACE_FREE;
asan_free(ptr, &stack, FROM_MALLOC);
}
#if SANITIZER_INTERCEPT_CFREE
INTERCEPTOR(void, cfree, void *ptr) {
- GET_STACK_TRACE_FREE;
if (UNLIKELY(IsInDlsymAllocPool(ptr)))
return;
+ GET_STACK_TRACE_FREE;
asan_free(ptr, &stack, FROM_MALLOC);
}
#endif // SANITIZER_INTERCEPT_CFREE
diff --git a/contrib/llvm-project/compiler-rt/lib/asan/asan_malloc_local.h b/contrib/llvm-project/compiler-rt/lib/asan/asan_malloc_local.h
index 3f784b90c739..e2c9be0379f2 100644
--- a/contrib/llvm-project/compiler-rt/lib/asan/asan_malloc_local.h
+++ b/contrib/llvm-project/compiler-rt/lib/asan/asan_malloc_local.h
@@ -17,7 +17,7 @@
#include "sanitizer_common/sanitizer_platform.h"
#include "asan_internal.h"
-static INLINE bool EarlyMalloc() {
+static inline bool EarlyMalloc() {
return SANITIZER_RTEMS &&
(!__asan::asan_inited || __asan::asan_init_is_running);
}
diff --git a/contrib/llvm-project/compiler-rt/lib/asan/asan_mapping.h b/contrib/llvm-project/compiler-rt/lib/asan/asan_mapping.h
index 41fb49ee46d4..f239c3ee2ff9 100644
--- a/contrib/llvm-project/compiler-rt/lib/asan/asan_mapping.h
+++ b/contrib/llvm-project/compiler-rt/lib/asan/asan_mapping.h
@@ -79,6 +79,20 @@
// || `[0x1000000000, 0x11ffffffff]` || lowshadow ||
// || `[0x0000000000, 0x0fffffffff]` || lowmem ||
//
+// RISC-V has only 38 bits for task size
+// Low mem size is set with kRiscv64_ShadowOffset64 in
+// compiler-rt/lib/asan/asan_allocator.h and in
+// llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp with
+// kRiscv64_ShadowOffset64, High mem top border is set with
+// GetMaxVirtualAddress() in
+// compiler-rt/lib/sanitizer_common/sanitizer_linux.cpp
+// Default Linux/RISCV64 Sv39/Sv48 mapping:
+// || `[0x000820000000, 0x003fffffffff]` || HighMem ||
+// || `[0x000124000000, 0x00081fffffff]` || HighShadow ||
+// || `[0x000024000000, 0x000123ffffff]` || ShadowGap ||
+// || `[0x000020000000, 0x000023ffffff]` || LowShadow ||
+// || `[0x000000000000, 0x00001fffffff]` || LowMem ||
+//
// Default Linux/AArch64 (42-bit VMA) mapping:
// || `[0x10000000000, 0x3ffffffffff]` || highmem ||
// || `[0x0a000000000, 0x0ffffffffff]` || highshadow ||
@@ -161,6 +175,7 @@ static const u64 kDefaultShadowOffset64 = 1ULL << 44;
static const u64 kDefaultShort64bitShadowOffset =
0x7FFFFFFF & (~0xFFFULL << kDefaultShadowScale); // < 2G.
static const u64 kAArch64_ShadowOffset64 = 1ULL << 36;
+static const u64 kRiscv64_ShadowOffset64 = 0x20000000;
static const u64 kMIPS32_ShadowOffset32 = 0x0aaa0000;
static const u64 kMIPS64_ShadowOffset64 = 1ULL << 37;
static const u64 kPPC64_ShadowOffset64 = 1ULL << 44;
@@ -206,6 +221,10 @@ static const u64 kMyriadCacheBitMask32 = 0x40000000ULL;
#else
# if SANITIZER_IOS
# define SHADOW_OFFSET __asan_shadow_memory_dynamic_address
+# elif SANITIZER_MAC && defined(__aarch64__)
+# define SHADOW_OFFSET __asan_shadow_memory_dynamic_address
+#elif SANITIZER_RISCV64
+#define SHADOW_OFFSET kRiscv64_ShadowOffset64
# elif defined(__aarch64__)
# define SHADOW_OFFSET kAArch64_ShadowOffset64
# elif defined(__powerpc64__)
@@ -355,6 +374,8 @@ static inline bool AddrIsInShadowGap(uptr a) {
namespace __asan {
+static inline uptr MemToShadowSize(uptr size) { return size >> SHADOW_SCALE; }
+
static inline bool AddrIsInMem(uptr a) {
PROFILE_ASAN_MAPPING();
return AddrIsInLowMem(a) || AddrIsInMidMem(a) || AddrIsInHighMem(a) ||
diff --git a/contrib/llvm-project/compiler-rt/lib/asan/asan_poisoning.cpp b/contrib/llvm-project/compiler-rt/lib/asan/asan_poisoning.cpp
index f3fbe684e2cb..44f872ef6190 100644
--- a/contrib/llvm-project/compiler-rt/lib/asan/asan_poisoning.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/asan/asan_poisoning.cpp
@@ -62,12 +62,6 @@ struct ShadowSegmentEndpoint {
}
};
-void FlushUnneededASanShadowMemory(uptr p, uptr size) {
- // Since asan's mapping is compacting, the shadow chunk may be
- // not page-aligned, so we only flush the page-aligned portion.
- ReleaseMemoryPagesToOS(MemToShadow(p), MemToShadow(p + size));
-}
-
void AsanPoisonOrUnpoisonIntraObjectRedzone(uptr ptr, uptr size, bool poison) {
uptr end = ptr + size;
if (Verbosity()) {
diff --git a/contrib/llvm-project/compiler-rt/lib/asan/asan_premap_shadow.cpp b/contrib/llvm-project/compiler-rt/lib/asan/asan_premap_shadow.cpp
index 7835e99748ff..666bb9b34bd3 100644
--- a/contrib/llvm-project/compiler-rt/lib/asan/asan_premap_shadow.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/asan/asan_premap_shadow.cpp
@@ -32,22 +32,8 @@ uptr PremapShadowSize() {
// Returns an address aligned to 8 pages, such that one page on the left and
// PremapShadowSize() bytes on the right of it are mapped r/o.
uptr PremapShadow() {
- uptr granularity = GetMmapGranularity();
- uptr alignment = granularity * 8;
- uptr left_padding = granularity;
- uptr shadow_size = PremapShadowSize();
- uptr map_size = shadow_size + left_padding + alignment;
-
- uptr map_start = (uptr)MmapNoAccess(map_size);
- CHECK_NE(map_start, ~(uptr)0);
-
- uptr shadow_start = RoundUpTo(map_start + left_padding, alignment);
- uptr shadow_end = shadow_start + shadow_size;
- internal_munmap(reinterpret_cast<void *>(map_start),
- shadow_start - left_padding - map_start);
- internal_munmap(reinterpret_cast<void *>(shadow_end),
- map_start + map_size - shadow_end);
- return shadow_start;
+ return MapDynamicShadow(PremapShadowSize(), /*mmap_alignment_scale*/ 3,
+ /*min_shadow_base_alignment*/ 0, kHighMemEnd);
}
bool PremapShadowFailed() {
diff --git a/contrib/llvm-project/compiler-rt/lib/asan/asan_report.cpp b/contrib/llvm-project/compiler-rt/lib/asan/asan_report.cpp
index 99e8678aa785..03f1ed2b0186 100644
--- a/contrib/llvm-project/compiler-rt/lib/asan/asan_report.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/asan/asan_report.cpp
@@ -151,7 +151,8 @@ class ScopedInErrorReport {
if (common_flags()->print_cmdline)
PrintCmdline();
- if (common_flags()->print_module_map == 2) PrintModuleMap();
+ if (common_flags()->print_module_map == 2)
+ DumpProcessMap();
// Copy the message buffer so that we could start logging without holding a
// lock that gets aquired during printing.
@@ -411,7 +412,7 @@ static bool IsInvalidPointerPair(uptr a1, uptr a2) {
return false;
}
-static INLINE void CheckForInvalidPointerPair(void *p1, void *p2) {
+static inline void CheckForInvalidPointerPair(void *p1, void *p2) {
switch (flags()->detect_invalid_pointer_pairs) {
case 0:
return;
diff --git a/contrib/llvm-project/compiler-rt/lib/asan/asan_rtems.cpp b/contrib/llvm-project/compiler-rt/lib/asan/asan_rtems.cpp
index 2e5b2f0a3b21..ea0b4ad9db68 100644
--- a/contrib/llvm-project/compiler-rt/lib/asan/asan_rtems.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/asan/asan_rtems.cpp
@@ -50,6 +50,12 @@ void AsanApplyToGlobals(globals_op_fptr op, const void *needle) {
UNIMPLEMENTED();
}
+void FlushUnneededASanShadowMemory(uptr p, uptr size) {
+ // Since asan's mapping is compacting, the shadow chunk may be
+ // not page-aligned, so we only flush the page-aligned portion.
+ ReleaseMemoryPagesToOS(MemToShadow(p), MemToShadow(p + size));
+}
+
void AsanCheckDynamicRTPrereqs() {}
void AsanCheckIncompatibleRT() {}
void InitializeAsanInterceptors() {}
diff --git a/contrib/llvm-project/compiler-rt/lib/asan/asan_rtl.cpp b/contrib/llvm-project/compiler-rt/lib/asan/asan_rtl.cpp
index 463bfa02f9f1..7b5a929963c6 100644
--- a/contrib/llvm-project/compiler-rt/lib/asan/asan_rtl.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/asan/asan_rtl.cpp
@@ -45,7 +45,8 @@ static void AsanDie() {
// Don't die twice - run a busy loop.
while (1) { }
}
- if (common_flags()->print_module_map >= 1) PrintModuleMap();
+ if (common_flags()->print_module_map >= 1)
+ DumpProcessMap();
if (flags()->sleep_before_dying) {
Report("Sleeping for %d second(s)\n", flags()->sleep_before_dying);
SleepForSeconds(flags()->sleep_before_dying);
@@ -319,7 +320,7 @@ static void InitializeHighMemEnd() {
kHighMemEnd = GetMaxUserVirtualAddress();
// Increase kHighMemEnd to make sure it's properly
// aligned together with kHighMemBeg:
- kHighMemEnd |= SHADOW_GRANULARITY * GetMmapGranularity() - 1;
+ kHighMemEnd |= (GetMmapGranularity() << SHADOW_SCALE) - 1;
#endif // !ASAN_FIXED_MAPPING
CHECK_EQ((kHighMemBeg % GetMmapGranularity()), 0);
#endif // !SANITIZER_MYRIAD2
diff --git a/contrib/llvm-project/compiler-rt/lib/asan/asan_shadow_setup.cpp b/contrib/llvm-project/compiler-rt/lib/asan/asan_shadow_setup.cpp
index 17324932a86f..2ead4425add8 100644
--- a/contrib/llvm-project/compiler-rt/lib/asan/asan_shadow_setup.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/asan/asan_shadow_setup.cpp
@@ -22,24 +22,6 @@
namespace __asan {
-// ---------------------- mmap -------------------- {{{1
-// Reserve memory range [beg, end].
-// We need to use inclusive range because end+1 may not be representable.
-void ReserveShadowMemoryRange(uptr beg, uptr end, const char *name) {
- CHECK_EQ((beg % GetMmapGranularity()), 0);
- CHECK_EQ(((end + 1) % GetMmapGranularity()), 0);
- uptr size = end - beg + 1;
- DecreaseTotalMmap(size); // Don't count the shadow against mmap_limit_mb.
- if (!MmapFixedSuperNoReserve(beg, size, name)) {
- Report(
- "ReserveShadowMemoryRange failed while trying to map 0x%zx bytes. "
- "Perhaps you're using ulimit -v\n",
- size);
- Abort();
- }
- if (common_flags()->use_madv_dontdump) DontDumpShadowMemory(beg, size);
-}
-
static void ProtectGap(uptr addr, uptr size) {
if (!flags()->protect_shadow_gap) {
// The shadow gap is unprotected, so there is a chance that someone
@@ -57,30 +39,13 @@ static void ProtectGap(uptr addr, uptr size) {
"unprotected gap shadow");
return;
}
- void *res = MmapFixedNoAccess(addr, size, "shadow gap");
- if (addr == (uptr)res) return;
- // A few pages at the start of the address space can not be protected.
- // But we really want to protect as much as possible, to prevent this memory
- // being returned as a result of a non-FIXED mmap().
- if (addr == kZeroBaseShadowStart) {
- uptr step = GetMmapGranularity();
- while (size > step && addr < kZeroBaseMaxShadowStart) {
- addr += step;
- size -= step;
- void *res = MmapFixedNoAccess(addr, size, "shadow gap");
- if (addr == (uptr)res) return;
- }
- }
-
- Report(
- "ERROR: Failed to protect the shadow gap. "
- "ASan cannot proceed correctly. ABORTING.\n");
- DumpProcessMap();
- Die();
+ __sanitizer::ProtectGap(addr, size, kZeroBaseShadowStart,
+ kZeroBaseMaxShadowStart);
}
static void MaybeReportLinuxPIEBug() {
-#if SANITIZER_LINUX && (defined(__x86_64__) || defined(__aarch64__))
+#if SANITIZER_LINUX && \
+ (defined(__x86_64__) || defined(__aarch64__) || SANITIZER_RISCV64)
Report("This might be related to ELF_ET_DYN_BASE change in Linux 4.12.\n");
Report(
"See https://github.com/google/sanitizers/issues/856 for possible "
@@ -99,8 +64,6 @@ void InitializeShadowMemory() {
// |kDefaultShadowSentinel|.
bool full_shadow_is_available = false;
if (shadow_start == kDefaultShadowSentinel) {
- __asan_shadow_memory_dynamic_address = 0;
- CHECK_EQ(0, kLowShadowBeg);
shadow_start = FindDynamicShadowStart();
if (SANITIZER_LINUX) full_shadow_is_available = true;
}
diff --git a/contrib/llvm-project/compiler-rt/lib/asan/asan_stack.h b/contrib/llvm-project/compiler-rt/lib/asan/asan_stack.h
index 4089d3d7340e..47ca85a16443 100644
--- a/contrib/llvm-project/compiler-rt/lib/asan/asan_stack.h
+++ b/contrib/llvm-project/compiler-rt/lib/asan/asan_stack.h
@@ -51,11 +51,6 @@ u32 GetMallocContextSize();
stack.Unwind(pc, bp, nullptr, \
common_flags()->fast_unwind_on_fatal)
-#define GET_STACK_TRACE_SIGNAL(sig) \
- BufferedStackTrace stack; \
- stack.Unwind((sig).pc, (sig).bp, (sig).context, \
- common_flags()->fast_unwind_on_fatal)
-
#define GET_STACK_TRACE_FATAL_HERE \
GET_STACK_TRACE(kStackTraceMax, common_flags()->fast_unwind_on_fatal)
diff --git a/contrib/llvm-project/compiler-rt/lib/asan/asan_thread.cpp b/contrib/llvm-project/compiler-rt/lib/asan/asan_thread.cpp
index f0df8bd4b374..19ac6c1627ca 100644
--- a/contrib/llvm-project/compiler-rt/lib/asan/asan_thread.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/asan/asan_thread.cpp
@@ -188,7 +188,7 @@ uptr AsanThread::stack_size() {
return bounds.top - bounds.bottom;
}
-// We want to create the FakeStack lazyly on the first use, but not eralier
+// We want to create the FakeStack lazily on the first use, but not earlier
// than the stack size is known and the procedure has to be async-signal safe.
FakeStack *AsanThread::AsyncSignalSafeLazyInitFakeStack() {
uptr stack_size = this->stack_size();
@@ -211,6 +211,7 @@ FakeStack *AsanThread::AsyncSignalSafeLazyInitFakeStack() {
stack_size_log =
Max(stack_size_log, static_cast<uptr>(flags()->min_uar_stack_size_log));
fake_stack_ = FakeStack::Create(stack_size_log);
+ DCHECK_EQ(GetCurrentThread(), this);
SetTLSFakeStack(fake_stack_);
return fake_stack_;
}
@@ -218,6 +219,7 @@ FakeStack *AsanThread::AsyncSignalSafeLazyInitFakeStack() {
}
void AsanThread::Init(const InitOptions *options) {
+ DCHECK_NE(tid(), ThreadRegistry::kUnknownTid);
next_stack_top_ = next_stack_bottom_ = 0;
atomic_store(&stack_switching_, false, memory_order_release);
CHECK_EQ(this->stack_size(), 0U);
@@ -229,8 +231,17 @@ void AsanThread::Init(const InitOptions *options) {
}
ClearShadowForThreadStackAndTLS();
fake_stack_ = nullptr;
- if (__asan_option_detect_stack_use_after_return)
+ if (__asan_option_detect_stack_use_after_return &&
+ tid() == GetCurrentTidOrInvalid()) {
+ // AsyncSignalSafeLazyInitFakeStack makes use of threadlocals and must be
+ // called from the context of the thread it is initializing, not its parent.
+ // Most platforms call AsanThread::Init on the newly-spawned thread, but
+ // Fuchsia calls this function from the parent thread. To support that
+ // approach, we avoid calling AsyncSignalSafeLazyInitFakeStack here; it will
+ // be called by the new thread when it first attempts to access the fake
+ // stack.
AsyncSignalSafeLazyInitFakeStack();
+ }
int local = 0;
VReport(1, "T%d: stack [%p,%p) size 0x%zx; local=%p\n", tid(),
(void *)stack_bottom_, (void *)stack_top_, stack_top_ - stack_bottom_,
@@ -242,12 +253,9 @@ void AsanThread::Init(const InitOptions *options) {
// SetThreadStackAndTls.
#if !SANITIZER_FUCHSIA && !SANITIZER_RTEMS
-thread_return_t AsanThread::ThreadStart(
- tid_t os_id, atomic_uintptr_t *signal_thread_is_registered) {
+thread_return_t AsanThread::ThreadStart(tid_t os_id) {
Init();
asanThreadRegistry().StartThread(tid(), os_id, ThreadType::Regular, nullptr);
- if (signal_thread_is_registered)
- atomic_store(signal_thread_is_registered, 1, memory_order_release);
if (common_flags()->use_sigaltstack) SetAlternateSignalStack();
@@ -277,8 +285,7 @@ AsanThread *CreateMainThread() {
/* start_routine */ nullptr, /* arg */ nullptr, /* parent_tid */ 0,
/* stack */ nullptr, /* detached */ true);
SetCurrentThread(main_thread);
- main_thread->ThreadStart(internal_getpid(),
- /* signal_thread_is_registered */ nullptr);
+ main_thread->ThreadStart(internal_getpid());
return main_thread;
}
@@ -366,7 +373,9 @@ uptr AsanThread::GetStackVariableShadowStart(uptr addr) {
bottom = stack_bottom();
} else if (has_fake_stack()) {
bottom = fake_stack()->AddrIsInFakeStack(addr);
- CHECK(bottom);
+ if (bottom == 0) {
+ return 0;
+ }
} else {
return 0;
}
diff --git a/contrib/llvm-project/compiler-rt/lib/asan/asan_thread.h b/contrib/llvm-project/compiler-rt/lib/asan/asan_thread.h
index c503f507059d..c33955eee367 100644
--- a/contrib/llvm-project/compiler-rt/lib/asan/asan_thread.h
+++ b/contrib/llvm-project/compiler-rt/lib/asan/asan_thread.h
@@ -35,7 +35,7 @@ class AsanThread;
// These objects are created for every thread and are never deleted,
// so we can find them by tid even if the thread is long dead.
-class AsanThreadContext : public ThreadContextBase {
+class AsanThreadContext final : public ThreadContextBase {
public:
explicit AsanThreadContext(int tid)
: ThreadContextBase(tid), announced(false),
@@ -69,8 +69,7 @@ class AsanThread {
struct InitOptions;
void Init(const InitOptions *options = nullptr);
- thread_return_t ThreadStart(tid_t os_id,
- atomic_uintptr_t *signal_thread_is_registered);
+ thread_return_t ThreadStart(tid_t os_id);
uptr stack_top();
uptr stack_bottom();
@@ -132,6 +131,8 @@ class AsanThread {
void *extra_spill_area() { return &extra_spill_area_; }
+ void *get_arg() { return arg_; }
+
private:
// NOTE: There is no AsanThread constructor. It is allocated
// via mmap() and *must* be valid in zero-initialized state.
diff --git a/contrib/llvm-project/compiler-rt/lib/asan/asan_win.cpp b/contrib/llvm-project/compiler-rt/lib/asan/asan_win.cpp
index 03feddbe86b4..1577c83cf994 100644
--- a/contrib/llvm-project/compiler-rt/lib/asan/asan_win.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/asan/asan_win.cpp
@@ -134,7 +134,7 @@ INTERCEPTOR(int, _except_handler4, void *a, void *b, void *c, void *d) {
static thread_return_t THREAD_CALLING_CONV asan_thread_start(void *arg) {
AsanThread *t = (AsanThread *)arg;
SetCurrentThread(t);
- return t->ThreadStart(GetTid(), /* signal_thread_is_registered */ nullptr);
+ return t->ThreadStart(GetTid());
}
INTERCEPTOR_WINAPI(HANDLE, CreateThread, LPSECURITY_ATTRIBUTES security,
@@ -191,6 +191,12 @@ void AsanApplyToGlobals(globals_op_fptr op, const void *needle) {
UNIMPLEMENTED();
}
+void FlushUnneededASanShadowMemory(uptr p, uptr size) {
+ // Since asan's mapping is compacting, the shadow chunk may be
+ // not page-aligned, so we only flush the page-aligned portion.
+ ReleaseMemoryPagesToOS(MemToShadow(p), MemToShadow(p + size));
+}
+
// ---------------------- TSD ---------------- {{{
static bool tsd_key_inited = false;
@@ -247,15 +253,8 @@ void *AsanDoesNotSupportStaticLinkage() {
}
uptr FindDynamicShadowStart() {
- uptr granularity = GetMmapGranularity();
- uptr alignment = 8 * granularity;
- uptr left_padding = granularity;
- uptr space_size = kHighShadowEnd + left_padding;
- uptr shadow_start = FindAvailableMemoryRange(space_size, alignment,
- granularity, nullptr, nullptr);
- CHECK_NE((uptr)0, shadow_start);
- CHECK(IsAligned(shadow_start, alignment));
- return shadow_start;
+ return MapDynamicShadow(MemToShadowSize(kHighMemEnd), SHADOW_SCALE,
+ /*min_shadow_base_alignment*/ 0, kHighMemEnd);
}
void AsanCheckDynamicRTPrereqs() {}
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/README.txt b/contrib/llvm-project/compiler-rt/lib/builtins/README.txt
index f9e1bc805092..d66d725e7ab5 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/README.txt
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/README.txt
@@ -87,6 +87,8 @@ du_int __udivmoddi4(du_int a, du_int b, du_int* rem); // a / b, *rem = a % b u
tu_int __udivmodti4(tu_int a, tu_int b, tu_int* rem); // a / b, *rem = a % b unsigned
su_int __udivmodsi4(su_int a, su_int b, su_int* rem); // a / b, *rem = a % b unsigned
si_int __divmodsi4(si_int a, si_int b, si_int* rem); // a / b, *rem = a % b signed
+di_int __divmoddi4(di_int a, di_int b, di_int* rem); // a / b, *rem = a % b signed
+ti_int __divmodti4(ti_int a, ti_int b, ti_int* rem); // a / b, *rem = a % b signed
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/aarch64/lse.S b/contrib/llvm-project/compiler-rt/lib/builtins/aarch64/lse.S
new file mode 100644
index 000000000000..5dc0d5320b5a
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/aarch64/lse.S
@@ -0,0 +1,236 @@
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+
+#include "assembly.h"
+
+// Out-of-line LSE atomics helpers. Ported from libgcc library.
+// N = {1, 2, 4, 8}
+// M = {1, 2, 4, 8, 16}
+// ORDER = {'relax', 'acq', 'rel', 'acq_rel'}
+// Routines implemented:
+//
+// iM __aarch64_casM_ORDER(iM expected, iM desired, iM *ptr)
+// iN __aarch64_swpN_ORDER(iN val, iN *ptr)
+// iN __aarch64_ldaddN_ORDER(iN val, iN *ptr)
+// iN __aarch64_ldclrN_ORDER(iN val, iN *ptr)
+// iN __aarch64_ldeorN_ORDER(iN val, iN *ptr)
+// iN __aarch64_ldsetN_ORDER(iN val, iN *ptr)
+//
+// Routines may modify temporary registers tmp0, tmp1, tmp2,
+// return value x0 and the flags only.
+
+#ifdef __aarch64__
+
+#ifdef HAS_ASM_LSE
+.arch armv8-a+lse
+#else
+.arch armv8-a
+#endif
+
+#if !defined(__APPLE__)
+HIDDEN(__aarch64_have_lse_atomics)
+#else
+HIDDEN(___aarch64_have_lse_atomics)
+#endif
+
+// Generate mnemonics for
+// L_cas: SIZE: 1,2,4,8,16 MODEL: 1,2,3,4
+// L_swp L_ldadd L_ldclr L_ldeor L_ldset: SIZE: 1,2,4,8 MODEL: 1,2,3,4
+
+#if SIZE == 1
+#define S b
+#define UXT uxtb
+#define B 0x00000000
+#elif SIZE == 2
+#define S h
+#define UXT uxth
+#define B 0x40000000
+#elif SIZE == 4 || SIZE == 8 || SIZE == 16
+#define S
+#define UXT mov
+#if SIZE == 4
+#define B 0x80000000
+#elif SIZE == 8
+#define B 0xc0000000
+#endif
+#else
+#error
+#endif // SIZE
+
+#if MODEL == 1
+#define SUFF _relax
+#define A
+#define L
+#define M 0x000000
+#define N 0x000000
+#elif MODEL == 2
+#define SUFF _acq
+#define A a
+#define L
+#define M 0x400000
+#define N 0x800000
+#elif MODEL == 3
+#define SUFF _rel
+#define A
+#define L l
+#define M 0x008000
+#define N 0x400000
+#elif MODEL == 4
+#define SUFF _acq_rel
+#define A a
+#define L l
+#define M 0x408000
+#define N 0xc00000
+#else
+#error
+#endif // MODEL
+
+// Define register size.
+#define x(N) GLUE2(x, N)
+#define w(N) GLUE2(w, N)
+#if SIZE < 8
+#define s(N) w(N)
+#else
+#define s(N) x(N)
+#endif
+
+#define NAME(BASE) GLUE4(__aarch64_, BASE, SIZE, SUFF)
+#define LDXR GLUE4(ld, A, xr, S)
+#define STXR GLUE4(st, L, xr, S)
+
+// Define temporary registers.
+#define tmp0 16
+#define tmp1 17
+#define tmp2 15
+
+// Macro for branch to label if no LSE available
+.macro JUMP_IF_NOT_LSE label
+#if !defined(__APPLE__)
+ adrp x(tmp0), __aarch64_have_lse_atomics
+ ldrb w(tmp0), [x(tmp0), :lo12:__aarch64_have_lse_atomics]
+#else
+ adrp x(tmp0), ___aarch64_have_lse_atomics@page
+ ldrb w(tmp0), [x(tmp0), ___aarch64_have_lse_atomics@pageoff]
+#endif
+ cbz w(tmp0), \label
+.endm
+
+#ifdef L_cas
+DEFINE_COMPILERRT_OUTLINE_FUNCTION_UNMANGLED(NAME(cas))
+ JUMP_IF_NOT_LSE 8f
+#if SIZE < 16
+#ifdef HAS_ASM_LSE
+#define CAS GLUE4(cas, A, L, S) s(0), s(1), [x2]
+#else
+#define CAS .inst 0x08a07c41 + B + M
+#endif
+ CAS // s(0), s(1), [x2]
+ ret
+8:
+ UXT s(tmp0), s(0)
+0:
+ LDXR s(0), [x2]
+ cmp s(0), s(tmp0)
+ bne 1f
+ STXR w(tmp1), s(1), [x2]
+ cbnz w(tmp1), 0b
+1:
+ ret
+#else
+#define LDXP GLUE3(ld, A, xp)
+#define STXP GLUE3(st, L, xp)
+#ifdef HAS_ASM_LSE
+#define CASP GLUE3(casp, A, L) x0, x1, x2, x3, [x4]
+#else
+#define CASP .inst 0x48207c82 + M
+#endif
+
+ CASP // x0, x1, x2, x3, [x4]
+ ret
+8:
+ mov x(tmp0), x0
+ mov x(tmp1), x1
+0:
+ LDXP x0, x1, [x4]
+ cmp x0, x(tmp0)
+ ccmp x1, x(tmp1), #0, eq
+ bne 1f
+ STXP w(tmp2), x2, x3, [x4]
+ cbnz w(tmp2), 0b
+1:
+ ret
+#endif
+END_COMPILERRT_OUTLINE_FUNCTION(NAME(cas))
+#endif // L_cas
+
+#ifdef L_swp
+#ifdef HAS_ASM_LSE
+#define SWP GLUE4(swp, A, L, S) s(0), s(0), [x1]
+#else
+#define SWP .inst 0x38208020 + B + N
+#endif
+DEFINE_COMPILERRT_OUTLINE_FUNCTION_UNMANGLED(NAME(swp))
+ JUMP_IF_NOT_LSE 8f
+ SWP // s(0), s(0), [x1]
+ ret
+8:
+ mov s(tmp0), s(0)
+0:
+ LDXR s(0), [x1]
+ STXR w(tmp1), s(tmp0), [x1]
+ cbnz w(tmp1), 0b
+ ret
+END_COMPILERRT_OUTLINE_FUNCTION(NAME(swp))
+#endif // L_swp
+
+#if defined(L_ldadd) || defined(L_ldclr) || \
+ defined(L_ldeor) || defined(L_ldset)
+
+#ifdef L_ldadd
+#define LDNM ldadd
+#define OP add
+#define OPN 0x0000
+#elif defined(L_ldclr)
+#define LDNM ldclr
+#define OP bic
+#define OPN 0x1000
+#elif defined(L_ldeor)
+#define LDNM ldeor
+#define OP eor
+#define OPN 0x2000
+#elif defined(L_ldset)
+#define LDNM ldset
+#define OP orr
+#define OPN 0x3000
+#else
+#error
+#endif
+
+#ifdef HAS_ASM_LSE
+#define LDOP GLUE4(LDNM, A, L, S) s(0), s(0), [x1]
+#else
+#define LDOP .inst 0x38200020 + OPN + B + N
+#endif
+
+DEFINE_COMPILERRT_OUTLINE_FUNCTION_UNMANGLED(NAME(LDNM))
+ JUMP_IF_NOT_LSE 8f
+ LDOP // s(0), s(0), [x1]
+ ret
+8:
+ mov s(tmp0), s(0)
+0:
+ LDXR s(0), [x1]
+ OP s(tmp1), s(0), s(tmp0)
+ STXR w(tmp2), s(tmp1), [x1]
+ cbnz w(tmp2), 0b
+ ret
+END_COMPILERRT_OUTLINE_FUNCTION(NAME(LDNM))
+#endif // L_ldadd L_ldclr L_ldeor L_ldset
+
+NO_EXEC_STACK_DIRECTIVE
+
+// GNU property note for BTI and PAC
+GNU_PROPERTY_BTI_PAC
+
+#endif // __aarch64__
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/assembly.h b/contrib/llvm-project/compiler-rt/lib/builtins/assembly.h
index f437cb87f60a..f6ce6a9fccff 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/assembly.h
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/assembly.h
@@ -14,8 +14,8 @@
#ifndef COMPILERRT_ASSEMBLY_H
#define COMPILERRT_ASSEMBLY_H
-#if defined(__POWERPC__) || defined(__powerpc__) || defined(__ppc__)
-#define SEPARATOR @
+#if defined(__APPLE__) && defined(__aarch64__)
+#define SEPARATOR %%
#else
#define SEPARATOR ;
#endif
@@ -35,14 +35,14 @@
#define HIDDEN(name) .hidden name
#define LOCAL_LABEL(name) .L_##name
#define FILE_LEVEL_DIRECTIVE
-#if defined(__arm__)
+#if defined(__arm__) || defined(__aarch64__)
#define SYMBOL_IS_FUNC(name) .type name,%function
#else
#define SYMBOL_IS_FUNC(name) .type name,@function
#endif
#define CONST_SECTION .section .rodata
-#if defined(__GNU__) || defined(__FreeBSD__) || defined(__Fuchsia__) || \
+#if defined(__GNU__) || defined(__FreeBSD__) || defined(__Fuchsia__) || \
defined(__linux__)
#define NO_EXEC_STACK_DIRECTIVE .section .note.GNU-stack,"",%progbits
#else
@@ -65,6 +65,66 @@
#endif
+#if defined(__arm__) || defined(__aarch64__)
+#define FUNC_ALIGN \
+ .text SEPARATOR \
+ .balign 16 SEPARATOR
+#else
+#define FUNC_ALIGN
+#endif
+
+// BTI and PAC gnu property note
+#define NT_GNU_PROPERTY_TYPE_0 5
+#define GNU_PROPERTY_AARCH64_FEATURE_1_AND 0xc0000000
+#define GNU_PROPERTY_AARCH64_FEATURE_1_BTI 1
+#define GNU_PROPERTY_AARCH64_FEATURE_1_PAC 2
+
+#if defined(__ARM_FEATURE_BTI_DEFAULT)
+#define BTI_FLAG GNU_PROPERTY_AARCH64_FEATURE_1_BTI
+#else
+#define BTI_FLAG 0
+#endif
+
+#if __ARM_FEATURE_PAC_DEFAULT & 3
+#define PAC_FLAG GNU_PROPERTY_AARCH64_FEATURE_1_PAC
+#else
+#define PAC_FLAG 0
+#endif
+
+#define GNU_PROPERTY(type, value) \
+ .pushsection .note.gnu.property, "a" SEPARATOR \
+ .p2align 3 SEPARATOR \
+ .word 4 SEPARATOR \
+ .word 16 SEPARATOR \
+ .word NT_GNU_PROPERTY_TYPE_0 SEPARATOR \
+ .asciz "GNU" SEPARATOR \
+ .word type SEPARATOR \
+ .word 4 SEPARATOR \
+ .word value SEPARATOR \
+ .word 0 SEPARATOR \
+ .popsection
+
+#if BTI_FLAG != 0
+#define BTI_C bti c
+#else
+#define BTI_C
+#endif
+
+#if (BTI_FLAG | PAC_FLAG) != 0
+#define GNU_PROPERTY_BTI_PAC \
+ GNU_PROPERTY(GNU_PROPERTY_AARCH64_FEATURE_1_AND, BTI_FLAG | PAC_FLAG)
+#else
+#define GNU_PROPERTY_BTI_PAC
+#endif
+
+#if defined(__clang__) || defined(__GCC_HAVE_DWARF2_CFI_ASM)
+#define CFI_START .cfi_startproc
+#define CFI_END .cfi_endproc
+#else
+#define CFI_START
+#define CFI_END
+#endif
+
#if defined(__arm__)
// Determine actual [ARM][THUMB[1][2]] ISA using compiler predefined macros:
@@ -131,8 +191,14 @@
#define DEFINE_CODE_STATE
#endif
-#define GLUE2(a, b) a##b
-#define GLUE(a, b) GLUE2(a, b)
+#define GLUE2_(a, b) a##b
+#define GLUE(a, b) GLUE2_(a, b)
+#define GLUE2(a, b) GLUE2_(a, b)
+#define GLUE3_(a, b, c) a##b##c
+#define GLUE3(a, b, c) GLUE3_(a, b, c)
+#define GLUE4_(a, b, c, d) a##b##c##d
+#define GLUE4(a, b, c, d) GLUE4_(a, b, c, d)
+
#define SYMBOL_NAME(name) GLUE(__USER_LABEL_PREFIX__, name)
#ifdef VISIBILITY_HIDDEN
@@ -177,6 +243,16 @@
DECLARE_FUNC_ENCODING \
name:
+#define DEFINE_COMPILERRT_OUTLINE_FUNCTION_UNMANGLED(name) \
+ DEFINE_CODE_STATE \
+ FUNC_ALIGN \
+ .globl name SEPARATOR \
+ SYMBOL_IS_FUNC(name) SEPARATOR \
+ DECLARE_SYMBOL_VISIBILITY(name) SEPARATOR \
+ CFI_START SEPARATOR \
+ DECLARE_FUNC_ENCODING \
+ name: SEPARATOR BTI_C
+
#define DEFINE_COMPILERRT_FUNCTION_ALIAS(name, target) \
.globl SYMBOL_NAME(name) SEPARATOR \
SYMBOL_IS_FUNC(SYMBOL_NAME(name)) SEPARATOR \
@@ -193,8 +269,13 @@
#ifdef __ELF__
#define END_COMPILERRT_FUNCTION(name) \
.size SYMBOL_NAME(name), . - SYMBOL_NAME(name)
+#define END_COMPILERRT_OUTLINE_FUNCTION(name) \
+ CFI_END SEPARATOR \
+ .size SYMBOL_NAME(name), . - SYMBOL_NAME(name)
#else
#define END_COMPILERRT_FUNCTION(name)
+#define END_COMPILERRT_OUTLINE_FUNCTION(name) \
+ CFI_END
#endif
#endif // COMPILERRT_ASSEMBLY_H
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/atomic.c b/contrib/llvm-project/compiler-rt/lib/builtins/atomic.c
index 84d116a36cd9..f48cdc10ccf7 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/atomic.c
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/atomic.c
@@ -36,6 +36,8 @@
#pragma redefine_extname __atomic_exchange_c SYMBOL_NAME(__atomic_exchange)
#pragma redefine_extname __atomic_compare_exchange_c SYMBOL_NAME( \
__atomic_compare_exchange)
+#pragma redefine_extname __atomic_is_lock_free_c SYMBOL_NAME( \
+ __atomic_is_lock_free)
/// Number of locks. This allocates one page on 32-bit platforms, two on
/// 64-bit. This can be specified externally if a different trade between
@@ -121,62 +123,57 @@ static __inline Lock *lock_for_pointer(void *ptr) {
}
/// Macros for determining whether a size is lock free.
-#define IS_LOCK_FREE_1 __c11_atomic_is_lock_free(1)
-#define IS_LOCK_FREE_2 __c11_atomic_is_lock_free(2)
-#define IS_LOCK_FREE_4 __c11_atomic_is_lock_free(4)
+#define ATOMIC_ALWAYS_LOCK_FREE_OR_ALIGNED_LOCK_FREE(size, p) \
+ (__atomic_always_lock_free(size, p) || \
+ (__atomic_always_lock_free(size, 0) && ((uintptr_t)p % size) == 0))
+#define IS_LOCK_FREE_1(p) ATOMIC_ALWAYS_LOCK_FREE_OR_ALIGNED_LOCK_FREE(1, p)
+#define IS_LOCK_FREE_2(p) ATOMIC_ALWAYS_LOCK_FREE_OR_ALIGNED_LOCK_FREE(2, p)
+#define IS_LOCK_FREE_4(p) ATOMIC_ALWAYS_LOCK_FREE_OR_ALIGNED_LOCK_FREE(4, p)
+#define IS_LOCK_FREE_8(p) ATOMIC_ALWAYS_LOCK_FREE_OR_ALIGNED_LOCK_FREE(8, p)
+#define IS_LOCK_FREE_16(p) ATOMIC_ALWAYS_LOCK_FREE_OR_ALIGNED_LOCK_FREE(16, p)
-/// 32 bit MIPS and PowerPC don't support 8-byte lock_free atomics
-#if defined(__mips__) || (!defined(__powerpc64__) && defined(__powerpc__))
-#define IS_LOCK_FREE_8 0
+/// Macro that calls the compiler-generated lock-free versions of functions
+/// when they exist.
+#define TRY_LOCK_FREE_CASE(n, type, ptr) \
+ case n: \
+ if (IS_LOCK_FREE_##n(ptr)) { \
+ LOCK_FREE_ACTION(type); \
+ } \
+ break;
+#ifdef __SIZEOF_INT128__
+#define TRY_LOCK_FREE_CASE_16(p) TRY_LOCK_FREE_CASE(16, __uint128_t, p)
#else
-#define IS_LOCK_FREE_8 __c11_atomic_is_lock_free(8)
+#define TRY_LOCK_FREE_CASE_16(p) /* __uint128_t not available */
#endif
-/// Clang can not yet codegen __atomic_is_lock_free(16), so for now we assume
-/// 16-byte values are not lock free.
-#define IS_LOCK_FREE_16 0
-
-/// Macro that calls the compiler-generated lock-free versions of functions
-/// when they exist.
-#define LOCK_FREE_CASES() \
+#define LOCK_FREE_CASES(ptr) \
do { \
switch (size) { \
- case 1: \
- if (IS_LOCK_FREE_1) { \
- LOCK_FREE_ACTION(uint8_t); \
- } \
- break; \
- case 2: \
- if (IS_LOCK_FREE_2) { \
- LOCK_FREE_ACTION(uint16_t); \
- } \
- break; \
- case 4: \
- if (IS_LOCK_FREE_4) { \
- LOCK_FREE_ACTION(uint32_t); \
- } \
- break; \
- case 8: \
- if (IS_LOCK_FREE_8) { \
- LOCK_FREE_ACTION(uint64_t); \
- } \
- break; \
- case 16: \
- if (IS_LOCK_FREE_16) { \
- /* FIXME: __uint128_t isn't available on 32 bit platforms. \
- LOCK_FREE_ACTION(__uint128_t);*/ \
- } \
+ TRY_LOCK_FREE_CASE(1, uint8_t, ptr) \
+ TRY_LOCK_FREE_CASE(2, uint16_t, ptr) \
+ TRY_LOCK_FREE_CASE(4, uint32_t, ptr) \
+ TRY_LOCK_FREE_CASE(8, uint64_t, ptr) \
+ TRY_LOCK_FREE_CASE_16(ptr) /* __uint128_t may not be supported */ \
+ default: \
break; \
} \
} while (0)
+/// Whether atomic operations for the given size (and alignment) are lock-free.
+bool __atomic_is_lock_free_c(size_t size, void *ptr) {
+#define LOCK_FREE_ACTION(type) return true;
+ LOCK_FREE_CASES(ptr);
+#undef LOCK_FREE_ACTION
+ return false;
+}
+
/// An atomic load operation. This is atomic with respect to the source
/// pointer only.
void __atomic_load_c(int size, void *src, void *dest, int model) {
#define LOCK_FREE_ACTION(type) \
*((type *)dest) = __c11_atomic_load((_Atomic(type) *)src, model); \
return;
- LOCK_FREE_CASES();
+ LOCK_FREE_CASES(src);
#undef LOCK_FREE_ACTION
Lock *l = lock_for_pointer(src);
lock(l);
@@ -190,7 +187,7 @@ void __atomic_store_c(int size, void *dest, void *src, int model) {
#define LOCK_FREE_ACTION(type) \
__c11_atomic_store((_Atomic(type) *)dest, *(type *)src, model); \
return;
- LOCK_FREE_CASES();
+ LOCK_FREE_CASES(dest);
#undef LOCK_FREE_ACTION
Lock *l = lock_for_pointer(dest);
lock(l);
@@ -209,7 +206,7 @@ int __atomic_compare_exchange_c(int size, void *ptr, void *expected,
return __c11_atomic_compare_exchange_strong( \
(_Atomic(type) *)ptr, (type *)expected, *(type *)desired, success, \
failure)
- LOCK_FREE_CASES();
+ LOCK_FREE_CASES(ptr);
#undef LOCK_FREE_ACTION
Lock *l = lock_for_pointer(ptr);
lock(l);
@@ -230,7 +227,7 @@ void __atomic_exchange_c(int size, void *ptr, void *val, void *old, int model) {
*(type *)old = \
__c11_atomic_exchange((_Atomic(type) *)ptr, *(type *)val, model); \
return;
- LOCK_FREE_CASES();
+ LOCK_FREE_CASES(ptr);
#undef LOCK_FREE_ACTION
Lock *l = lock_for_pointer(ptr);
lock(l);
@@ -260,7 +257,7 @@ void __atomic_exchange_c(int size, void *ptr, void *val, void *old, int model) {
#define OPTIMISED_CASE(n, lockfree, type) \
type __atomic_load_##n(type *src, int model) { \
- if (lockfree) \
+ if (lockfree(src)) \
return __c11_atomic_load((_Atomic(type) *)src, model); \
Lock *l = lock_for_pointer(src); \
lock(l); \
@@ -273,7 +270,7 @@ OPTIMISED_CASES
#define OPTIMISED_CASE(n, lockfree, type) \
void __atomic_store_##n(type *dest, type val, int model) { \
- if (lockfree) { \
+ if (lockfree(dest)) { \
__c11_atomic_store((_Atomic(type) *)dest, val, model); \
return; \
} \
@@ -288,7 +285,7 @@ OPTIMISED_CASES
#define OPTIMISED_CASE(n, lockfree, type) \
type __atomic_exchange_##n(type *dest, type val, int model) { \
- if (lockfree) \
+ if (lockfree(dest)) \
return __c11_atomic_exchange((_Atomic(type) *)dest, val, model); \
Lock *l = lock_for_pointer(dest); \
lock(l); \
@@ -303,7 +300,7 @@ OPTIMISED_CASES
#define OPTIMISED_CASE(n, lockfree, type) \
bool __atomic_compare_exchange_##n(type *ptr, type *expected, type desired, \
int success, int failure) { \
- if (lockfree) \
+ if (lockfree(ptr)) \
return __c11_atomic_compare_exchange_strong( \
(_Atomic(type) *)ptr, expected, desired, success, failure); \
Lock *l = lock_for_pointer(ptr); \
@@ -325,7 +322,7 @@ OPTIMISED_CASES
////////////////////////////////////////////////////////////////////////////////
#define ATOMIC_RMW(n, lockfree, type, opname, op) \
type __atomic_fetch_##opname##_##n(type *ptr, type val, int model) { \
- if (lockfree) \
+ if (lockfree(ptr)) \
return __c11_atomic_fetch_##opname((_Atomic(type) *)ptr, val, model); \
Lock *l = lock_for_pointer(ptr); \
lock(l); \
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/clear_cache.c b/contrib/llvm-project/compiler-rt/lib/builtins/clear_cache.c
index 29e31f55d499..5a443ddd4b03 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/clear_cache.c
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/clear_cache.c
@@ -46,6 +46,11 @@ uintptr_t GetCurrentProcess(void);
#include <unistd.h>
#endif
+#if defined(__linux__) && defined(__riscv)
+// to get platform-specific syscall definitions
+#include <linux/unistd.h>
+#endif
+
// The compiler generates calls to __clear_cache() when creating
// trampoline functions on the stack for use with nested functions.
// It is expected to invalidate the instruction cache for the
@@ -148,9 +153,10 @@ void __clear_cache(void *start, void *end) {
for (uintptr_t dword = start_dword; dword < end_dword; dword += dword_size)
__asm__ volatile("flush %0" : : "r"(dword));
#elif defined(__riscv) && defined(__linux__)
-#define __NR_riscv_flush_icache (244 + 15)
+ // See: arch/riscv/include/asm/cacheflush.h, arch/riscv/kernel/sys_riscv.c
register void *start_reg __asm("a0") = start;
const register void *end_reg __asm("a1") = end;
+ // "0" means that we clear cache for all threads (SYS_RISCV_FLUSH_ICACHE_ALL)
const register long flags __asm("a2") = 0;
const register long syscall_nr __asm("a7") = __NR_riscv_flush_icache;
__asm __volatile("ecall"
@@ -161,6 +167,8 @@ void __clear_cache(void *start, void *end) {
#if __APPLE__
// On Darwin, sys_icache_invalidate() provides this functionality
sys_icache_invalidate(start, end - start);
+#elif defined(__ve__)
+ __asm__ volatile("fencec 2");
#else
compilerrt_abort();
#endif
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/cpu_model.c b/contrib/llvm-project/compiler-rt/lib/builtins/cpu_model.c
index 468bcc84cbcb..51bedd98c3d3 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/cpu_model.c
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/cpu_model.c
@@ -8,10 +8,21 @@
//
// This file is based on LLVM's lib/Support/Host.cpp.
// It implements the operating system Host concept and builtin
-// __cpu_model for the compiler_rt library, for x86 only.
+// __cpu_model for the compiler_rt library for x86 and
+// __aarch64_have_lse_atomics for AArch64.
//
//===----------------------------------------------------------------------===//
+#if defined(HAVE_INIT_PRIORITY)
+#define CONSTRUCTOR_ATTRIBUTE __attribute__((__constructor__ 101))
+#elif __has_attribute(__constructor__)
+#define CONSTRUCTOR_ATTRIBUTE __attribute__((__constructor__))
+#else
+// FIXME: For MSVC, we should make a function pointer global in .CRT$X?? so that
+// this runs during initialization.
+#define CONSTRUCTOR_ATTRIBUTE
+#endif
+
#if (defined(__i386__) || defined(_M_IX86) || defined(__x86_64__) || \
defined(_M_X64)) && \
(defined(__GNUC__) || defined(__clang__) || defined(_MSC_VER))
@@ -57,6 +68,7 @@ enum ProcessorTypes {
INTEL_GOLDMONT,
INTEL_GOLDMONT_PLUS,
INTEL_TREMONT,
+ AMDFAM19H,
CPU_TYPE_MAX
};
@@ -84,6 +96,9 @@ enum ProcessorSubtypes {
INTEL_COREI7_CASCADELAKE,
INTEL_COREI7_TIGERLAKE,
INTEL_COREI7_COOPERLAKE,
+ INTEL_COREI7_SAPPHIRERAPIDS,
+ INTEL_COREI7_ALDERLAKE,
+ AMDFAM19H_ZNVER3,
CPU_SUBTYPE_MAX
};
@@ -407,6 +422,13 @@ getIntelProcessorTypeAndSubtype(unsigned Family, unsigned Model,
*Subtype = INTEL_COREI7_ICELAKE_SERVER;
break;
+ // Sapphire Rapids:
+ case 0x8f:
+ CPU = "sapphirerapids";
+ *Type = INTEL_COREI7;
+ *Subtype = INTEL_COREI7_SAPPHIRERAPIDS;
+ break;
+
case 0x1c: // Most 45 nm Intel Atom processors
case 0x26: // 45 nm Atom Lincroft
case 0x27: // 32 nm Atom Medfield
@@ -530,6 +552,14 @@ getAMDProcessorTypeAndSubtype(unsigned Family, unsigned Model,
break; // 00h-0Fh: Zen1
}
break;
+ case 25:
+ CPU = "znver3";
+ *Type = AMDFAM19H;
+ if (Model <= 0x0f) {
+ *Subtype = AMDFAM19H_ZNVER3;
+ break; // 00h-0Fh: Zen3
+ }
+ break;
default:
break; // Unknown AMD CPU.
}
@@ -656,16 +686,6 @@ static void getAvailableFeatures(unsigned ECX, unsigned EDX, unsigned MaxLeaf,
#undef setFeature
}
-#if defined(HAVE_INIT_PRIORITY)
-#define CONSTRUCTOR_ATTRIBUTE __attribute__((__constructor__ 101))
-#elif __has_attribute(__constructor__)
-#define CONSTRUCTOR_ATTRIBUTE __attribute__((__constructor__))
-#else
-// FIXME: For MSVC, we should make a function pointer global in .CRT$X?? so that
-// this runs during initialization.
-#define CONSTRUCTOR_ATTRIBUTE
-#endif
-
#ifndef _WIN32
__attribute__((visibility("hidden")))
#endif
@@ -740,5 +760,24 @@ int CONSTRUCTOR_ATTRIBUTE __cpu_indicator_init(void) {
return 0;
}
-
+#elif defined(__aarch64__)
+// LSE support detection for out-of-line atomics
+// using HWCAP and Auxiliary vector
+_Bool __aarch64_have_lse_atomics
+ __attribute__((visibility("hidden"), nocommon));
+#if defined(__has_include)
+#if __has_include(<sys/auxv.h>)
+#include <sys/auxv.h>
+#ifndef AT_HWCAP
+#define AT_HWCAP 16
+#endif
+#ifndef HWCAP_ATOMICS
+#define HWCAP_ATOMICS (1 << 8)
#endif
+static void CONSTRUCTOR_ATTRIBUTE init_have_lse_atomics(void) {
+ unsigned long hwcap = getauxval(AT_HWCAP);
+ __aarch64_have_lse_atomics = (hwcap & HWCAP_ATOMICS) != 0;
+}
+#endif // defined(__has_include)
+#endif // __has_include(<sys/auxv.h>)
+#endif // defined(__aarch64__)
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/divdf3.c b/contrib/llvm-project/compiler-rt/lib/builtins/divdf3.c
index 1dea3b534f5a..4c11759e0c4a 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/divdf3.c
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/divdf3.c
@@ -9,197 +9,16 @@
// This file implements double-precision soft-float division
// with the IEEE-754 default rounding (to nearest, ties to even).
//
-// For simplicity, this implementation currently flushes denormals to zero.
-// It should be a fairly straightforward exercise to implement gradual
-// underflow with correct rounding.
-//
//===----------------------------------------------------------------------===//
#define DOUBLE_PRECISION
-#include "fp_lib.h"
-
-COMPILER_RT_ABI fp_t __divdf3(fp_t a, fp_t b) {
-
- const unsigned int aExponent = toRep(a) >> significandBits & maxExponent;
- const unsigned int bExponent = toRep(b) >> significandBits & maxExponent;
- const rep_t quotientSign = (toRep(a) ^ toRep(b)) & signBit;
-
- rep_t aSignificand = toRep(a) & significandMask;
- rep_t bSignificand = toRep(b) & significandMask;
- int scale = 0;
-
- // Detect if a or b is zero, denormal, infinity, or NaN.
- if (aExponent - 1U >= maxExponent - 1U ||
- bExponent - 1U >= maxExponent - 1U) {
-
- const rep_t aAbs = toRep(a) & absMask;
- const rep_t bAbs = toRep(b) & absMask;
-
- // NaN / anything = qNaN
- if (aAbs > infRep)
- return fromRep(toRep(a) | quietBit);
- // anything / NaN = qNaN
- if (bAbs > infRep)
- return fromRep(toRep(b) | quietBit);
-
- if (aAbs == infRep) {
- // infinity / infinity = NaN
- if (bAbs == infRep)
- return fromRep(qnanRep);
- // infinity / anything else = +/- infinity
- else
- return fromRep(aAbs | quotientSign);
- }
-
- // anything else / infinity = +/- 0
- if (bAbs == infRep)
- return fromRep(quotientSign);
-
- if (!aAbs) {
- // zero / zero = NaN
- if (!bAbs)
- return fromRep(qnanRep);
- // zero / anything else = +/- zero
- else
- return fromRep(quotientSign);
- }
- // anything else / zero = +/- infinity
- if (!bAbs)
- return fromRep(infRep | quotientSign);
-
- // One or both of a or b is denormal. The other (if applicable) is a
- // normal number. Renormalize one or both of a and b, and set scale to
- // include the necessary exponent adjustment.
- if (aAbs < implicitBit)
- scale += normalize(&aSignificand);
- if (bAbs < implicitBit)
- scale -= normalize(&bSignificand);
- }
-
- // Set the implicit significand bit. If we fell through from the
- // denormal path it was already set by normalize( ), but setting it twice
- // won't hurt anything.
- aSignificand |= implicitBit;
- bSignificand |= implicitBit;
- int quotientExponent = aExponent - bExponent + scale;
-
- // Align the significand of b as a Q31 fixed-point number in the range
- // [1, 2.0) and get a Q32 approximate reciprocal using a small minimax
- // polynomial approximation: reciprocal = 3/4 + 1/sqrt(2) - b/2. This
- // is accurate to about 3.5 binary digits.
- const uint32_t q31b = bSignificand >> 21;
- uint32_t recip32 = UINT32_C(0x7504f333) - q31b;
- // 0x7504F333 / 2^32 + 1 = 3/4 + 1/sqrt(2)
-
- // Now refine the reciprocal estimate using a Newton-Raphson iteration:
- //
- // x1 = x0 * (2 - x0 * b)
- //
- // This doubles the number of correct binary digits in the approximation
- // with each iteration.
- uint32_t correction32;
- correction32 = -((uint64_t)recip32 * q31b >> 32);
- recip32 = (uint64_t)recip32 * correction32 >> 31;
- correction32 = -((uint64_t)recip32 * q31b >> 32);
- recip32 = (uint64_t)recip32 * correction32 >> 31;
- correction32 = -((uint64_t)recip32 * q31b >> 32);
- recip32 = (uint64_t)recip32 * correction32 >> 31;
-
- // The reciprocal may have overflowed to zero if the upper half of b is
- // exactly 1.0. This would sabatoge the full-width final stage of the
- // computation that follows, so we adjust the reciprocal down by one bit.
- recip32--;
-
- // We need to perform one more iteration to get us to 56 binary digits.
- // The last iteration needs to happen with extra precision.
- const uint32_t q63blo = bSignificand << 11;
- uint64_t correction, reciprocal;
- correction = -((uint64_t)recip32 * q31b + ((uint64_t)recip32 * q63blo >> 32));
- uint32_t cHi = correction >> 32;
- uint32_t cLo = correction;
- reciprocal = (uint64_t)recip32 * cHi + ((uint64_t)recip32 * cLo >> 32);
-
- // Adjust the final 64-bit reciprocal estimate downward to ensure that it is
- // strictly smaller than the infinitely precise exact reciprocal. Because
- // the computation of the Newton-Raphson step is truncating at every step,
- // this adjustment is small; most of the work is already done.
- reciprocal -= 2;
-
- // The numerical reciprocal is accurate to within 2^-56, lies in the
- // interval [0.5, 1.0), and is strictly smaller than the true reciprocal
- // of b. Multiplying a by this reciprocal thus gives a numerical q = a/b
- // in Q53 with the following properties:
- //
- // 1. q < a/b
- // 2. q is in the interval [0.5, 2.0)
- // 3. The error in q is bounded away from 2^-53 (actually, we have a
- // couple of bits to spare, but this is all we need).
-
- // We need a 64 x 64 multiply high to compute q, which isn't a basic
- // operation in C, so we need to be a little bit fussy.
- rep_t quotient, quotientLo;
- wideMultiply(aSignificand << 2, reciprocal, &quotient, &quotientLo);
-
- // Two cases: quotient is in [0.5, 1.0) or quotient is in [1.0, 2.0).
- // In either case, we are going to compute a residual of the form
- //
- // r = a - q*b
- //
- // We know from the construction of q that r satisfies:
- //
- // 0 <= r < ulp(q)*b
- //
- // If r is greater than 1/2 ulp(q)*b, then q rounds up. Otherwise, we
- // already have the correct result. The exact halfway case cannot occur.
- // We also take this time to right shift quotient if it falls in the [1,2)
- // range and adjust the exponent accordingly.
- rep_t residual;
- if (quotient < (implicitBit << 1)) {
- residual = (aSignificand << 53) - quotient * bSignificand;
- quotientExponent--;
- } else {
- quotient >>= 1;
- residual = (aSignificand << 52) - quotient * bSignificand;
- }
-
- const int writtenExponent = quotientExponent + exponentBias;
- if (writtenExponent >= maxExponent) {
- // If we have overflowed the exponent, return infinity.
- return fromRep(infRep | quotientSign);
- }
+#define NUMBER_OF_HALF_ITERATIONS 3
+#define NUMBER_OF_FULL_ITERATIONS 1
- else if (writtenExponent < 1) {
- if (writtenExponent == 0) {
- // Check whether the rounded result is normal.
- const bool round = (residual << 1) > bSignificand;
- // Clear the implicit bit.
- rep_t absResult = quotient & significandMask;
- // Round.
- absResult += round;
- if (absResult & ~significandMask) {
- // The rounded result is normal; return it.
- return fromRep(absResult | quotientSign);
- }
- }
- // Flush denormals to zero. In the future, it would be nice to add
- // code to round them correctly.
- return fromRep(quotientSign);
- }
+#include "fp_div_impl.inc"
- else {
- const bool round = (residual << 1) > bSignificand;
- // Clear the implicit bit.
- rep_t absResult = quotient & significandMask;
- // Insert the exponent.
- absResult |= (rep_t)writtenExponent << significandBits;
- // Round.
- absResult += round;
- // Insert the sign and return.
- const double result = fromRep(absResult | quotientSign);
- return result;
- }
-}
+COMPILER_RT_ABI fp_t __divdf3(fp_t a, fp_t b) { return __divXf3__(a, b); }
#if defined(__ARM_EABI__)
#if defined(COMPILER_RT_ARMHF_TARGET)
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/divdi3.c b/contrib/llvm-project/compiler-rt/lib/builtins/divdi3.c
index ee08d6557783..d71e138d995c 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/divdi3.c
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/divdi3.c
@@ -14,12 +14,9 @@
// Returns: a / b
-COMPILER_RT_ABI di_int __divdi3(di_int a, di_int b) {
- const int bits_in_dword_m1 = (int)(sizeof(di_int) * CHAR_BIT) - 1;
- di_int s_a = a >> bits_in_dword_m1; // s_a = a < 0 ? -1 : 0
- di_int s_b = b >> bits_in_dword_m1; // s_b = b < 0 ? -1 : 0
- a = (a ^ s_a) - s_a; // negate if s_a == -1
- b = (b ^ s_b) - s_b; // negate if s_b == -1
- s_a ^= s_b; // sign of quotient
- return (__udivmoddi4(a, b, (du_int *)0) ^ s_a) - s_a; // negate if s_a == -1
-}
+#define fixint_t di_int
+#define fixuint_t du_int
+#define COMPUTE_UDIV(a, b) __udivmoddi4((a), (b), (du_int *)0)
+#include "int_div_impl.inc"
+
+COMPILER_RT_ABI di_int __divdi3(di_int a, di_int b) { return __divXi3(a, b); }
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/divmoddi4.c b/contrib/llvm-project/compiler-rt/lib/builtins/divmoddi4.c
index 7f333510c003..e7cbbb1aaa30 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/divmoddi4.c
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/divmoddi4.c
@@ -15,7 +15,14 @@
// Returns: a / b, *rem = a % b
COMPILER_RT_ABI di_int __divmoddi4(di_int a, di_int b, di_int *rem) {
- di_int d = __divdi3(a, b);
- *rem = a - (d * b);
- return d;
+ const int bits_in_dword_m1 = (int)(sizeof(di_int) * CHAR_BIT) - 1;
+ di_int s_a = a >> bits_in_dword_m1; // s_a = a < 0 ? -1 : 0
+ di_int s_b = b >> bits_in_dword_m1; // s_b = b < 0 ? -1 : 0
+ a = (a ^ s_a) - s_a; // negate if s_a == -1
+ b = (b ^ s_b) - s_b; // negate if s_b == -1
+ s_b ^= s_a; // sign of quotient
+ du_int r;
+ di_int q = (__udivmoddi4(a, b, &r) ^ s_b) - s_b; // negate if s_b == -1
+ *rem = (r ^ s_a) - s_a; // negate if s_a == -1
+ return q;
}
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/divmodsi4.c b/contrib/llvm-project/compiler-rt/lib/builtins/divmodsi4.c
index 402eed22fe7a..a85e2993b4e9 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/divmodsi4.c
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/divmodsi4.c
@@ -16,7 +16,14 @@
// Returns: a / b, *rem = a % b
COMPILER_RT_ABI si_int __divmodsi4(si_int a, si_int b, si_int *rem) {
- si_int d = __divsi3(a, b);
- *rem = a - (d * b);
- return d;
+ const int bits_in_word_m1 = (int)(sizeof(si_int) * CHAR_BIT) - 1;
+ si_int s_a = a >> bits_in_word_m1; // s_a = a < 0 ? -1 : 0
+ si_int s_b = b >> bits_in_word_m1; // s_b = b < 0 ? -1 : 0
+ a = (a ^ s_a) - s_a; // negate if s_a == -1
+ b = (b ^ s_b) - s_b; // negate if s_b == -1
+ s_b ^= s_a; // sign of quotient
+ su_int r;
+ si_int q = (__udivmodsi4(a, b, &r) ^ s_b) - s_b; // negate if s_b == -1
+ *rem = (r ^ s_a) - s_a; // negate if s_a == -1
+ return q;
}
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/divmodti4.c b/contrib/llvm-project/compiler-rt/lib/builtins/divmodti4.c
new file mode 100644
index 000000000000..b243ba4ef853
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/divmodti4.c
@@ -0,0 +1,32 @@
+//===-- divmodti4.c - Implement __divmodti4 -------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements __divmodti4 for the compiler_rt library.
+//
+//===----------------------------------------------------------------------===//
+
+#include "int_lib.h"
+
+#ifdef CRT_HAS_128BIT
+
+// Returns: a / b, *rem = a % b
+
+COMPILER_RT_ABI ti_int __divmodti4(ti_int a, ti_int b, ti_int *rem) {
+ const int bits_in_tword_m1 = (int)(sizeof(ti_int) * CHAR_BIT) - 1;
+ ti_int s_a = a >> bits_in_tword_m1; // s_a = a < 0 ? -1 : 0
+ ti_int s_b = b >> bits_in_tword_m1; // s_b = b < 0 ? -1 : 0
+ a = (a ^ s_a) - s_a; // negate if s_a == -1
+ b = (b ^ s_b) - s_b; // negate if s_b == -1
+ s_b ^= s_a; // sign of quotient
+ tu_int r;
+ ti_int q = (__udivmodti4(a, b, &r) ^ s_b) - s_b; // negate if s_b == -1
+ *rem = (r ^ s_a) - s_a; // negate if s_a == -1
+ return q;
+}
+
+#endif // CRT_HAS_128BIT
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/divsf3.c b/contrib/llvm-project/compiler-rt/lib/builtins/divsf3.c
index 593f93b45ac2..5744c015240b 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/divsf3.c
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/divsf3.c
@@ -9,181 +9,17 @@
// This file implements single-precision soft-float division
// with the IEEE-754 default rounding (to nearest, ties to even).
//
-// For simplicity, this implementation currently flushes denormals to zero.
-// It should be a fairly straightforward exercise to implement gradual
-// underflow with correct rounding.
-//
//===----------------------------------------------------------------------===//
#define SINGLE_PRECISION
-#include "fp_lib.h"
-
-COMPILER_RT_ABI fp_t __divsf3(fp_t a, fp_t b) {
-
- const unsigned int aExponent = toRep(a) >> significandBits & maxExponent;
- const unsigned int bExponent = toRep(b) >> significandBits & maxExponent;
- const rep_t quotientSign = (toRep(a) ^ toRep(b)) & signBit;
-
- rep_t aSignificand = toRep(a) & significandMask;
- rep_t bSignificand = toRep(b) & significandMask;
- int scale = 0;
-
- // Detect if a or b is zero, denormal, infinity, or NaN.
- if (aExponent - 1U >= maxExponent - 1U ||
- bExponent - 1U >= maxExponent - 1U) {
-
- const rep_t aAbs = toRep(a) & absMask;
- const rep_t bAbs = toRep(b) & absMask;
-
- // NaN / anything = qNaN
- if (aAbs > infRep)
- return fromRep(toRep(a) | quietBit);
- // anything / NaN = qNaN
- if (bAbs > infRep)
- return fromRep(toRep(b) | quietBit);
-
- if (aAbs == infRep) {
- // infinity / infinity = NaN
- if (bAbs == infRep)
- return fromRep(qnanRep);
- // infinity / anything else = +/- infinity
- else
- return fromRep(aAbs | quotientSign);
- }
-
- // anything else / infinity = +/- 0
- if (bAbs == infRep)
- return fromRep(quotientSign);
-
- if (!aAbs) {
- // zero / zero = NaN
- if (!bAbs)
- return fromRep(qnanRep);
- // zero / anything else = +/- zero
- else
- return fromRep(quotientSign);
- }
- // anything else / zero = +/- infinity
- if (!bAbs)
- return fromRep(infRep | quotientSign);
-
- // One or both of a or b is denormal. The other (if applicable) is a
- // normal number. Renormalize one or both of a and b, and set scale to
- // include the necessary exponent adjustment.
- if (aAbs < implicitBit)
- scale += normalize(&aSignificand);
- if (bAbs < implicitBit)
- scale -= normalize(&bSignificand);
- }
-
- // Set the implicit significand bit. If we fell through from the
- // denormal path it was already set by normalize( ), but setting it twice
- // won't hurt anything.
- aSignificand |= implicitBit;
- bSignificand |= implicitBit;
- int quotientExponent = aExponent - bExponent + scale;
- // 0x7504F333 / 2^32 + 1 = 3/4 + 1/sqrt(2)
-
- // Align the significand of b as a Q31 fixed-point number in the range
- // [1, 2.0) and get a Q32 approximate reciprocal using a small minimax
- // polynomial approximation: reciprocal = 3/4 + 1/sqrt(2) - b/2. This
- // is accurate to about 3.5 binary digits.
- uint32_t q31b = bSignificand << 8;
- uint32_t reciprocal = UINT32_C(0x7504f333) - q31b;
-
- // Now refine the reciprocal estimate using a Newton-Raphson iteration:
- //
- // x1 = x0 * (2 - x0 * b)
- //
- // This doubles the number of correct binary digits in the approximation
- // with each iteration.
- uint32_t correction;
- correction = -((uint64_t)reciprocal * q31b >> 32);
- reciprocal = (uint64_t)reciprocal * correction >> 31;
- correction = -((uint64_t)reciprocal * q31b >> 32);
- reciprocal = (uint64_t)reciprocal * correction >> 31;
- correction = -((uint64_t)reciprocal * q31b >> 32);
- reciprocal = (uint64_t)reciprocal * correction >> 31;
-
- // Adust the final 32-bit reciprocal estimate downward to ensure that it is
- // strictly smaller than the infinitely precise exact reciprocal. Because
- // the computation of the Newton-Raphson step is truncating at every step,
- // this adjustment is small; most of the work is already done.
- reciprocal -= 2;
-
- // The numerical reciprocal is accurate to within 2^-28, lies in the
- // interval [0x1.000000eep-1, 0x1.fffffffcp-1], and is strictly smaller
- // than the true reciprocal of b. Multiplying a by this reciprocal thus
- // gives a numerical q = a/b in Q24 with the following properties:
- //
- // 1. q < a/b
- // 2. q is in the interval [0x1.000000eep-1, 0x1.fffffffcp0)
- // 3. The error in q is at most 2^-24 + 2^-27 -- the 2^24 term comes
- // from the fact that we truncate the product, and the 2^27 term
- // is the error in the reciprocal of b scaled by the maximum
- // possible value of a. As a consequence of this error bound,
- // either q or nextafter(q) is the correctly rounded.
- rep_t quotient = (uint64_t)reciprocal * (aSignificand << 1) >> 32;
-
- // Two cases: quotient is in [0.5, 1.0) or quotient is in [1.0, 2.0).
- // In either case, we are going to compute a residual of the form
- //
- // r = a - q*b
- //
- // We know from the construction of q that r satisfies:
- //
- // 0 <= r < ulp(q)*b
- //
- // If r is greater than 1/2 ulp(q)*b, then q rounds up. Otherwise, we
- // already have the correct result. The exact halfway case cannot occur.
- // We also take this time to right shift quotient if it falls in the [1,2)
- // range and adjust the exponent accordingly.
- rep_t residual;
- if (quotient < (implicitBit << 1)) {
- residual = (aSignificand << 24) - quotient * bSignificand;
- quotientExponent--;
- } else {
- quotient >>= 1;
- residual = (aSignificand << 23) - quotient * bSignificand;
- }
-
- const int writtenExponent = quotientExponent + exponentBias;
- if (writtenExponent >= maxExponent) {
- // If we have overflowed the exponent, return infinity.
- return fromRep(infRep | quotientSign);
- }
+#define NUMBER_OF_HALF_ITERATIONS 0
+#define NUMBER_OF_FULL_ITERATIONS 3
+#define USE_NATIVE_FULL_ITERATIONS
- else if (writtenExponent < 1) {
- if (writtenExponent == 0) {
- // Check whether the rounded result is normal.
- const bool round = (residual << 1) > bSignificand;
- // Clear the implicit bit.
- rep_t absResult = quotient & significandMask;
- // Round.
- absResult += round;
- if (absResult & ~significandMask) {
- // The rounded result is normal; return it.
- return fromRep(absResult | quotientSign);
- }
- }
- // Flush denormals to zero. In the future, it would be nice to add
- // code to round them correctly.
- return fromRep(quotientSign);
- }
+#include "fp_div_impl.inc"
- else {
- const bool round = (residual << 1) > bSignificand;
- // Clear the implicit bit.
- rep_t absResult = quotient & significandMask;
- // Insert the exponent.
- absResult |= (rep_t)writtenExponent << significandBits;
- // Round.
- absResult += round;
- // Insert the sign and return.
- return fromRep(absResult | quotientSign);
- }
-}
+COMPILER_RT_ABI fp_t __divsf3(fp_t a, fp_t b) { return __divXf3__(a, b); }
#if defined(__ARM_EABI__)
#if defined(COMPILER_RT_ARMHF_TARGET)
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/divsi3.c b/contrib/llvm-project/compiler-rt/lib/builtins/divsi3.c
index b97e11119f0e..f514407477f3 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/divsi3.c
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/divsi3.c
@@ -14,21 +14,16 @@
// Returns: a / b
-COMPILER_RT_ABI si_int __divsi3(si_int a, si_int b) {
- const int bits_in_word_m1 = (int)(sizeof(si_int) * CHAR_BIT) - 1;
- si_int s_a = a >> bits_in_word_m1; // s_a = a < 0 ? -1 : 0
- si_int s_b = b >> bits_in_word_m1; // s_b = b < 0 ? -1 : 0
- a = (a ^ s_a) - s_a; // negate if s_a == -1
- b = (b ^ s_b) - s_b; // negate if s_b == -1
- s_a ^= s_b; // sign of quotient
- //
- // On CPUs without unsigned hardware division support,
- // this calls __udivsi3 (notice the cast to su_int).
- // On CPUs with unsigned hardware division support,
- // this uses the unsigned division instruction.
- //
- return ((su_int)a / (su_int)b ^ s_a) - s_a; // negate if s_a == -1
-}
+#define fixint_t si_int
+#define fixuint_t su_int
+// On CPUs without unsigned hardware division support,
+// this calls __udivsi3 (notice the cast to su_int).
+// On CPUs with unsigned hardware division support,
+// this uses the unsigned division instruction.
+#define COMPUTE_UDIV(a, b) ((su_int)(a) / (su_int)(b))
+#include "int_div_impl.inc"
+
+COMPILER_RT_ABI si_int __divsi3(si_int a, si_int b) { return __divXi3(a, b); }
#if defined(__ARM_EABI__)
COMPILER_RT_ALIAS(__divsi3, __aeabi_idiv)
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/divtf3.c b/contrib/llvm-project/compiler-rt/lib/builtins/divtf3.c
index ce462d4d46c1..5bcc9a8e4aa1 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/divtf3.c
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/divtf3.c
@@ -9,213 +9,18 @@
// This file implements quad-precision soft-float division
// with the IEEE-754 default rounding (to nearest, ties to even).
//
-// For simplicity, this implementation currently flushes denormals to zero.
-// It should be a fairly straightforward exercise to implement gradual
-// underflow with correct rounding.
-//
//===----------------------------------------------------------------------===//
#define QUAD_PRECISION
#include "fp_lib.h"
#if defined(CRT_HAS_128BIT) && defined(CRT_LDBL_128BIT)
-COMPILER_RT_ABI fp_t __divtf3(fp_t a, fp_t b) {
-
- const unsigned int aExponent = toRep(a) >> significandBits & maxExponent;
- const unsigned int bExponent = toRep(b) >> significandBits & maxExponent;
- const rep_t quotientSign = (toRep(a) ^ toRep(b)) & signBit;
-
- rep_t aSignificand = toRep(a) & significandMask;
- rep_t bSignificand = toRep(b) & significandMask;
- int scale = 0;
-
- // Detect if a or b is zero, denormal, infinity, or NaN.
- if (aExponent - 1U >= maxExponent - 1U ||
- bExponent - 1U >= maxExponent - 1U) {
-
- const rep_t aAbs = toRep(a) & absMask;
- const rep_t bAbs = toRep(b) & absMask;
-
- // NaN / anything = qNaN
- if (aAbs > infRep)
- return fromRep(toRep(a) | quietBit);
- // anything / NaN = qNaN
- if (bAbs > infRep)
- return fromRep(toRep(b) | quietBit);
-
- if (aAbs == infRep) {
- // infinity / infinity = NaN
- if (bAbs == infRep)
- return fromRep(qnanRep);
- // infinity / anything else = +/- infinity
- else
- return fromRep(aAbs | quotientSign);
- }
-
- // anything else / infinity = +/- 0
- if (bAbs == infRep)
- return fromRep(quotientSign);
-
- if (!aAbs) {
- // zero / zero = NaN
- if (!bAbs)
- return fromRep(qnanRep);
- // zero / anything else = +/- zero
- else
- return fromRep(quotientSign);
- }
- // anything else / zero = +/- infinity
- if (!bAbs)
- return fromRep(infRep | quotientSign);
-
- // One or both of a or b is denormal. The other (if applicable) is a
- // normal number. Renormalize one or both of a and b, and set scale to
- // include the necessary exponent adjustment.
- if (aAbs < implicitBit)
- scale += normalize(&aSignificand);
- if (bAbs < implicitBit)
- scale -= normalize(&bSignificand);
- }
-
- // Set the implicit significand bit. If we fell through from the
- // denormal path it was already set by normalize( ), but setting it twice
- // won't hurt anything.
- aSignificand |= implicitBit;
- bSignificand |= implicitBit;
- int quotientExponent = aExponent - bExponent + scale;
-
- // Align the significand of b as a Q63 fixed-point number in the range
- // [1, 2.0) and get a Q64 approximate reciprocal using a small minimax
- // polynomial approximation: reciprocal = 3/4 + 1/sqrt(2) - b/2. This
- // is accurate to about 3.5 binary digits.
- const uint64_t q63b = bSignificand >> 49;
- uint64_t recip64 = UINT64_C(0x7504f333F9DE6484) - q63b;
- // 0x7504f333F9DE6484 / 2^64 + 1 = 3/4 + 1/sqrt(2)
-
- // Now refine the reciprocal estimate using a Newton-Raphson iteration:
- //
- // x1 = x0 * (2 - x0 * b)
- //
- // This doubles the number of correct binary digits in the approximation
- // with each iteration.
- uint64_t correction64;
- correction64 = -((rep_t)recip64 * q63b >> 64);
- recip64 = (rep_t)recip64 * correction64 >> 63;
- correction64 = -((rep_t)recip64 * q63b >> 64);
- recip64 = (rep_t)recip64 * correction64 >> 63;
- correction64 = -((rep_t)recip64 * q63b >> 64);
- recip64 = (rep_t)recip64 * correction64 >> 63;
- correction64 = -((rep_t)recip64 * q63b >> 64);
- recip64 = (rep_t)recip64 * correction64 >> 63;
- correction64 = -((rep_t)recip64 * q63b >> 64);
- recip64 = (rep_t)recip64 * correction64 >> 63;
-
- // The reciprocal may have overflowed to zero if the upper half of b is
- // exactly 1.0. This would sabatoge the full-width final stage of the
- // computation that follows, so we adjust the reciprocal down by one bit.
- recip64--;
-
- // We need to perform one more iteration to get us to 112 binary digits;
- // The last iteration needs to happen with extra precision.
- const uint64_t q127blo = bSignificand << 15;
- rep_t correction, reciprocal;
-
- // NOTE: This operation is equivalent to __multi3, which is not implemented
- // in some architechure
- rep_t r64q63, r64q127, r64cH, r64cL, dummy;
- wideMultiply((rep_t)recip64, (rep_t)q63b, &dummy, &r64q63);
- wideMultiply((rep_t)recip64, (rep_t)q127blo, &dummy, &r64q127);
-
- correction = -(r64q63 + (r64q127 >> 64));
-
- uint64_t cHi = correction >> 64;
- uint64_t cLo = correction;
-
- wideMultiply((rep_t)recip64, (rep_t)cHi, &dummy, &r64cH);
- wideMultiply((rep_t)recip64, (rep_t)cLo, &dummy, &r64cL);
-
- reciprocal = r64cH + (r64cL >> 64);
-
- // Adjust the final 128-bit reciprocal estimate downward to ensure that it
- // is strictly smaller than the infinitely precise exact reciprocal. Because
- // the computation of the Newton-Raphson step is truncating at every step,
- // this adjustment is small; most of the work is already done.
- reciprocal -= 2;
-
- // The numerical reciprocal is accurate to within 2^-112, lies in the
- // interval [0.5, 1.0), and is strictly smaller than the true reciprocal
- // of b. Multiplying a by this reciprocal thus gives a numerical q = a/b
- // in Q127 with the following properties:
- //
- // 1. q < a/b
- // 2. q is in the interval [0.5, 2.0)
- // 3. The error in q is bounded away from 2^-113 (actually, we have a
- // couple of bits to spare, but this is all we need).
-
- // We need a 128 x 128 multiply high to compute q, which isn't a basic
- // operation in C, so we need to be a little bit fussy.
- rep_t quotient, quotientLo;
- wideMultiply(aSignificand << 2, reciprocal, &quotient, &quotientLo);
-
- // Two cases: quotient is in [0.5, 1.0) or quotient is in [1.0, 2.0).
- // In either case, we are going to compute a residual of the form
- //
- // r = a - q*b
- //
- // We know from the construction of q that r satisfies:
- //
- // 0 <= r < ulp(q)*b
- //
- // If r is greater than 1/2 ulp(q)*b, then q rounds up. Otherwise, we
- // already have the correct result. The exact halfway case cannot occur.
- // We also take this time to right shift quotient if it falls in the [1,2)
- // range and adjust the exponent accordingly.
- rep_t residual;
- rep_t qb;
- if (quotient < (implicitBit << 1)) {
- wideMultiply(quotient, bSignificand, &dummy, &qb);
- residual = (aSignificand << 113) - qb;
- quotientExponent--;
- } else {
- quotient >>= 1;
- wideMultiply(quotient, bSignificand, &dummy, &qb);
- residual = (aSignificand << 112) - qb;
- }
+#define NUMBER_OF_HALF_ITERATIONS 4
+#define NUMBER_OF_FULL_ITERATIONS 1
- const int writtenExponent = quotientExponent + exponentBias;
+#include "fp_div_impl.inc"
- if (writtenExponent >= maxExponent) {
- // If we have overflowed the exponent, return infinity.
- return fromRep(infRep | quotientSign);
- } else if (writtenExponent < 1) {
- if (writtenExponent == 0) {
- // Check whether the rounded result is normal.
- const bool round = (residual << 1) > bSignificand;
- // Clear the implicit bit.
- rep_t absResult = quotient & significandMask;
- // Round.
- absResult += round;
- if (absResult & ~significandMask) {
- // The rounded result is normal; return it.
- return fromRep(absResult | quotientSign);
- }
- }
- // Flush denormals to zero. In the future, it would be nice to add
- // code to round them correctly.
- return fromRep(quotientSign);
- } else {
- const bool round = (residual << 1) >= bSignificand;
- // Clear the implicit bit.
- rep_t absResult = quotient & significandMask;
- // Insert the exponent.
- absResult |= (rep_t)writtenExponent << significandBits;
- // Round.
- absResult += round;
- // Insert the sign and return.
- const fp_t result = fromRep(absResult | quotientSign);
- return result;
- }
-}
+COMPILER_RT_ABI fp_t __divtf3(fp_t a, fp_t b) { return __divXf3__(a, b); }
#endif
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/divti3.c b/contrib/llvm-project/compiler-rt/lib/builtins/divti3.c
index 6d007fe34654..80f2130b590e 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/divti3.c
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/divti3.c
@@ -16,14 +16,11 @@
// Returns: a / b
-COMPILER_RT_ABI ti_int __divti3(ti_int a, ti_int b) {
- const int bits_in_tword_m1 = (int)(sizeof(ti_int) * CHAR_BIT) - 1;
- ti_int s_a = a >> bits_in_tword_m1; // s_a = a < 0 ? -1 : 0
- ti_int s_b = b >> bits_in_tword_m1; // s_b = b < 0 ? -1 : 0
- a = (a ^ s_a) - s_a; // negate if s_a == -1
- b = (b ^ s_b) - s_b; // negate if s_b == -1
- s_a ^= s_b; // sign of quotient
- return (__udivmodti4(a, b, (tu_int *)0) ^ s_a) - s_a; // negate if s_a == -1
-}
+#define fixint_t ti_int
+#define fixuint_t tu_int
+#define COMPUTE_UDIV(a, b) __udivmodti4((a), (b), (tu_int *)0)
+#include "int_div_impl.inc"
+
+COMPILER_RT_ABI ti_int __divti3(ti_int a, ti_int b) { return __divXi3(a, b); }
#endif // CRT_HAS_128BIT
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/emutls.c b/contrib/llvm-project/compiler-rt/lib/builtins/emutls.c
index e0aa19155f7d..98cabd917d6c 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/emutls.c
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/emutls.c
@@ -182,9 +182,10 @@ static void emutls_exit(void) {
}
}
-#pragma warning(push)
-#pragma warning(disable : 4100)
static BOOL CALLBACK emutls_init(PINIT_ONCE p0, PVOID p1, PVOID *p2) {
+ (void)p0;
+ (void)p1;
+ (void)p2;
emutls_mutex =
(LPCRITICAL_SECTION)_aligned_malloc(sizeof(CRITICAL_SECTION), 16);
if (!emutls_mutex) {
@@ -251,8 +252,6 @@ static __inline void __atomic_store_n(void *ptr, uintptr_t val, unsigned type) {
#endif // __ATOMIC_RELEASE
-#pragma warning(pop)
-
#endif // _WIN32
static size_t emutls_num_object = 0; // number of allocated TLS objects
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/extendhfsf2.c b/contrib/llvm-project/compiler-rt/lib/builtins/extendhfsf2.c
index 7c1a76eb5851..0159ab09d3eb 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/extendhfsf2.c
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/extendhfsf2.c
@@ -12,15 +12,15 @@
// Use a forwarding definition and noinline to implement a poor man's alias,
// as there isn't a good cross-platform way of defining one.
-COMPILER_RT_ABI NOINLINE float __extendhfsf2(uint16_t a) {
+COMPILER_RT_ABI NOINLINE float __extendhfsf2(src_t a) {
return __extendXfYf2__(a);
}
-COMPILER_RT_ABI float __gnu_h2f_ieee(uint16_t a) { return __extendhfsf2(a); }
+COMPILER_RT_ABI float __gnu_h2f_ieee(src_t a) { return __extendhfsf2(a); }
#if defined(__ARM_EABI__)
#if defined(COMPILER_RT_ARMHF_TARGET)
-AEABI_RTABI float __aeabi_h2f(uint16_t a) { return __extendhfsf2(a); }
+AEABI_RTABI float __aeabi_h2f(src_t a) { return __extendhfsf2(a); }
#else
COMPILER_RT_ALIAS(__extendhfsf2, __aeabi_h2f)
#endif
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/extendhftf2.c b/contrib/llvm-project/compiler-rt/lib/builtins/extendhftf2.c
new file mode 100644
index 000000000000..aefe9737d34f
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/extendhftf2.c
@@ -0,0 +1,23 @@
+//===-- lib/extendhftf2.c - half -> quad conversion ---------------*- C -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is dual licensed under the MIT and the University of Illinois Open
+// Source Licenses. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#define QUAD_PRECISION
+#include "fp_lib.h"
+
+#if defined(CRT_HAS_128BIT) && defined(CRT_LDBL_128BIT) && \
+ defined(COMPILER_RT_HAS_FLOAT16)
+#define SRC_HALF
+#define DST_QUAD
+#include "fp_extend_impl.inc"
+
+COMPILER_RT_ABI long double __extendhftf2(_Float16 a) {
+ return __extendXfYf2__(a);
+}
+
+#endif
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/fp_div_impl.inc b/contrib/llvm-project/compiler-rt/lib/builtins/fp_div_impl.inc
new file mode 100644
index 000000000000..29bcd1920edf
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/fp_div_impl.inc
@@ -0,0 +1,419 @@
+//===-- fp_div_impl.inc - Floating point division -----------------*- C -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements soft-float division with the IEEE-754 default
+// rounding (to nearest, ties to even).
+//
+//===----------------------------------------------------------------------===//
+
+#include "fp_lib.h"
+
+// The __divXf3__ function implements Newton-Raphson floating point division.
+// It uses 3 iterations for float32, 4 for float64 and 5 for float128,
+// respectively. Due to number of significant bits being roughly doubled
+// every iteration, the two modes are supported: N full-width iterations (as
+// it is done for float32 by default) and (N-1) half-width iteration plus one
+// final full-width iteration. It is expected that half-width integer
+// operations (w.r.t rep_t size) can be performed faster for some hardware but
+// they require error estimations to be computed separately due to larger
+// computational errors caused by truncating intermediate results.
+
+// Half the bit-size of rep_t
+#define HW (typeWidth / 2)
+// rep_t-sized bitmask with lower half of bits set to ones
+#define loMask (REP_C(-1) >> HW)
+
+#if NUMBER_OF_FULL_ITERATIONS < 1
+#error At least one full iteration is required
+#endif
+
+static __inline fp_t __divXf3__(fp_t a, fp_t b) {
+
+ const unsigned int aExponent = toRep(a) >> significandBits & maxExponent;
+ const unsigned int bExponent = toRep(b) >> significandBits & maxExponent;
+ const rep_t quotientSign = (toRep(a) ^ toRep(b)) & signBit;
+
+ rep_t aSignificand = toRep(a) & significandMask;
+ rep_t bSignificand = toRep(b) & significandMask;
+ int scale = 0;
+
+ // Detect if a or b is zero, denormal, infinity, or NaN.
+ if (aExponent - 1U >= maxExponent - 1U ||
+ bExponent - 1U >= maxExponent - 1U) {
+
+ const rep_t aAbs = toRep(a) & absMask;
+ const rep_t bAbs = toRep(b) & absMask;
+
+ // NaN / anything = qNaN
+ if (aAbs > infRep)
+ return fromRep(toRep(a) | quietBit);
+ // anything / NaN = qNaN
+ if (bAbs > infRep)
+ return fromRep(toRep(b) | quietBit);
+
+ if (aAbs == infRep) {
+ // infinity / infinity = NaN
+ if (bAbs == infRep)
+ return fromRep(qnanRep);
+ // infinity / anything else = +/- infinity
+ else
+ return fromRep(aAbs | quotientSign);
+ }
+
+ // anything else / infinity = +/- 0
+ if (bAbs == infRep)
+ return fromRep(quotientSign);
+
+ if (!aAbs) {
+ // zero / zero = NaN
+ if (!bAbs)
+ return fromRep(qnanRep);
+ // zero / anything else = +/- zero
+ else
+ return fromRep(quotientSign);
+ }
+ // anything else / zero = +/- infinity
+ if (!bAbs)
+ return fromRep(infRep | quotientSign);
+
+ // One or both of a or b is denormal. The other (if applicable) is a
+ // normal number. Renormalize one or both of a and b, and set scale to
+ // include the necessary exponent adjustment.
+ if (aAbs < implicitBit)
+ scale += normalize(&aSignificand);
+ if (bAbs < implicitBit)
+ scale -= normalize(&bSignificand);
+ }
+
+ // Set the implicit significand bit. If we fell through from the
+ // denormal path it was already set by normalize( ), but setting it twice
+ // won't hurt anything.
+ aSignificand |= implicitBit;
+ bSignificand |= implicitBit;
+
+ int writtenExponent = (aExponent - bExponent + scale) + exponentBias;
+
+ const rep_t b_UQ1 = bSignificand << (typeWidth - significandBits - 1);
+
+ // Align the significand of b as a UQ1.(n-1) fixed-point number in the range
+ // [1.0, 2.0) and get a UQ0.n approximate reciprocal using a small minimax
+ // polynomial approximation: x0 = 3/4 + 1/sqrt(2) - b/2.
+ // The max error for this approximation is achieved at endpoints, so
+ // abs(x0(b) - 1/b) <= abs(x0(1) - 1/1) = 3/4 - 1/sqrt(2) = 0.04289...,
+ // which is about 4.5 bits.
+ // The initial approximation is between x0(1.0) = 0.9571... and x0(2.0) = 0.4571...
+
+ // Then, refine the reciprocal estimate using a quadratically converging
+ // Newton-Raphson iteration:
+ // x_{n+1} = x_n * (2 - x_n * b)
+ //
+ // Let b be the original divisor considered "in infinite precision" and
+ // obtained from IEEE754 representation of function argument (with the
+ // implicit bit set). Corresponds to rep_t-sized b_UQ1 represented in
+ // UQ1.(W-1).
+ //
+ // Let b_hw be an infinitely precise number obtained from the highest (HW-1)
+ // bits of divisor significand (with the implicit bit set). Corresponds to
+ // half_rep_t-sized b_UQ1_hw represented in UQ1.(HW-1) that is a **truncated**
+ // version of b_UQ1.
+ //
+ // Let e_n := x_n - 1/b_hw
+ // E_n := x_n - 1/b
+ // abs(E_n) <= abs(e_n) + (1/b_hw - 1/b)
+ // = abs(e_n) + (b - b_hw) / (b*b_hw)
+ // <= abs(e_n) + 2 * 2^-HW
+
+ // rep_t-sized iterations may be slower than the corresponding half-width
+ // variant depending on the handware and whether single/double/quad precision
+ // is selected.
+ // NB: Using half-width iterations increases computation errors due to
+ // rounding, so error estimations have to be computed taking the selected
+ // mode into account!
+#if NUMBER_OF_HALF_ITERATIONS > 0
+ // Starting with (n-1) half-width iterations
+ const half_rep_t b_UQ1_hw = bSignificand >> (significandBits + 1 - HW);
+
+ // C is (3/4 + 1/sqrt(2)) - 1 truncated to W0 fractional bits as UQ0.HW
+ // with W0 being either 16 or 32 and W0 <= HW.
+ // That is, C is the aforementioned 3/4 + 1/sqrt(2) constant (from which
+ // b/2 is subtracted to obtain x0) wrapped to [0, 1) range.
+#if defined(SINGLE_PRECISION)
+ // Use 16-bit initial estimation in case we are using half-width iterations
+ // for float32 division. This is expected to be useful for some 16-bit
+ // targets. Not used by default as it requires performing more work during
+ // rounding and would hardly help on regular 32- or 64-bit targets.
+ const half_rep_t C_hw = HALF_REP_C(0x7504);
+#else
+ // HW is at least 32. Shifting into the highest bits if needed.
+ const half_rep_t C_hw = HALF_REP_C(0x7504F333) << (HW - 32);
+#endif
+
+ // b >= 1, thus an upper bound for 3/4 + 1/sqrt(2) - b/2 is about 0.9572,
+ // so x0 fits to UQ0.HW without wrapping.
+ half_rep_t x_UQ0_hw = C_hw - (b_UQ1_hw /* exact b_hw/2 as UQ0.HW */);
+ // An e_0 error is comprised of errors due to
+ // * x0 being an inherently imprecise first approximation of 1/b_hw
+ // * C_hw being some (irrational) number **truncated** to W0 bits
+ // Please note that e_0 is calculated against the infinitely precise
+ // reciprocal of b_hw (that is, **truncated** version of b).
+ //
+ // e_0 <= 3/4 - 1/sqrt(2) + 2^-W0
+
+ // By construction, 1 <= b < 2
+ // f(x) = x * (2 - b*x) = 2*x - b*x^2
+ // f'(x) = 2 * (1 - b*x)
+ //
+ // On the [0, 1] interval, f(0) = 0,
+ // then it increses until f(1/b) = 1 / b, maximum on (0, 1),
+ // then it decreses to f(1) = 2 - b
+ //
+ // Let g(x) = x - f(x) = b*x^2 - x.
+ // On (0, 1/b), g(x) < 0 <=> f(x) > x
+ // On (1/b, 1], g(x) > 0 <=> f(x) < x
+ //
+ // For half-width iterations, b_hw is used instead of b.
+ REPEAT_N_TIMES(NUMBER_OF_HALF_ITERATIONS, {
+ // corr_UQ1_hw can be **larger** than 2 - b_hw*x by at most 1*Ulp
+ // of corr_UQ1_hw.
+ // "0.0 - (...)" is equivalent to "2.0 - (...)" in UQ1.(HW-1).
+ // On the other hand, corr_UQ1_hw should not overflow from 2.0 to 0.0 provided
+ // no overflow occurred earlier: ((rep_t)x_UQ0_hw * b_UQ1_hw >> HW) is
+ // expected to be strictly positive because b_UQ1_hw has its highest bit set
+ // and x_UQ0_hw should be rather large (it converges to 1/2 < 1/b_hw <= 1).
+ half_rep_t corr_UQ1_hw = 0 - ((rep_t)x_UQ0_hw * b_UQ1_hw >> HW);
+
+ // Now, we should multiply UQ0.HW and UQ1.(HW-1) numbers, naturally
+ // obtaining an UQ1.(HW-1) number and proving its highest bit could be
+ // considered to be 0 to be able to represent it in UQ0.HW.
+ // From the above analysis of f(x), if corr_UQ1_hw would be represented
+ // without any intermediate loss of precision (that is, in twice_rep_t)
+ // x_UQ0_hw could be at most [1.]000... if b_hw is exactly 1.0 and strictly
+ // less otherwise. On the other hand, to obtain [1.]000..., one have to pass
+ // 1/b_hw == 1.0 to f(x), so this cannot occur at all without overflow (due
+ // to 1.0 being not representable as UQ0.HW).
+ // The fact corr_UQ1_hw was virtually round up (due to result of
+ // multiplication being **first** truncated, then negated - to improve
+ // error estimations) can increase x_UQ0_hw by up to 2*Ulp of x_UQ0_hw.
+ x_UQ0_hw = (rep_t)x_UQ0_hw * corr_UQ1_hw >> (HW - 1);
+ // Now, either no overflow occurred or x_UQ0_hw is 0 or 1 in its half_rep_t
+ // representation. In the latter case, x_UQ0_hw will be either 0 or 1 after
+ // any number of iterations, so just subtract 2 from the reciprocal
+ // approximation after last iteration.
+
+ // In infinite precision, with 0 <= eps1, eps2 <= U = 2^-HW:
+ // corr_UQ1_hw = 2 - (1/b_hw + e_n) * b_hw + 2*eps1
+ // = 1 - e_n * b_hw + 2*eps1
+ // x_UQ0_hw = (1/b_hw + e_n) * (1 - e_n*b_hw + 2*eps1) - eps2
+ // = 1/b_hw - e_n + 2*eps1/b_hw + e_n - e_n^2*b_hw + 2*e_n*eps1 - eps2
+ // = 1/b_hw + 2*eps1/b_hw - e_n^2*b_hw + 2*e_n*eps1 - eps2
+ // e_{n+1} = -e_n^2*b_hw + 2*eps1/b_hw + 2*e_n*eps1 - eps2
+ // = 2*e_n*eps1 - (e_n^2*b_hw + eps2) + 2*eps1/b_hw
+ // \------ >0 -------/ \-- >0 ---/
+ // abs(e_{n+1}) <= 2*abs(e_n)*U + max(2*e_n^2 + U, 2 * U)
+ })
+ // For initial half-width iterations, U = 2^-HW
+ // Let abs(e_n) <= u_n * U,
+ // then abs(e_{n+1}) <= 2 * u_n * U^2 + max(2 * u_n^2 * U^2 + U, 2 * U)
+ // u_{n+1} <= 2 * u_n * U + max(2 * u_n^2 * U + 1, 2)
+
+ // Account for possible overflow (see above). For an overflow to occur for the
+ // first time, for "ideal" corr_UQ1_hw (that is, without intermediate
+ // truncation), the result of x_UQ0_hw * corr_UQ1_hw should be either maximum
+ // value representable in UQ0.HW or less by 1. This means that 1/b_hw have to
+ // be not below that value (see g(x) above), so it is safe to decrement just
+ // once after the final iteration. On the other hand, an effective value of
+ // divisor changes after this point (from b_hw to b), so adjust here.
+ x_UQ0_hw -= 1U;
+ rep_t x_UQ0 = (rep_t)x_UQ0_hw << HW;
+ x_UQ0 -= 1U;
+
+#else
+ // C is (3/4 + 1/sqrt(2)) - 1 truncated to 32 fractional bits as UQ0.n
+ const rep_t C = REP_C(0x7504F333) << (typeWidth - 32);
+ rep_t x_UQ0 = C - b_UQ1;
+ // E_0 <= 3/4 - 1/sqrt(2) + 2 * 2^-32
+#endif
+
+ // Error estimations for full-precision iterations are calculated just
+ // as above, but with U := 2^-W and taking extra decrementing into account.
+ // We need at least one such iteration.
+
+#ifdef USE_NATIVE_FULL_ITERATIONS
+ REPEAT_N_TIMES(NUMBER_OF_FULL_ITERATIONS, {
+ rep_t corr_UQ1 = 0 - ((twice_rep_t)x_UQ0 * b_UQ1 >> typeWidth);
+ x_UQ0 = (twice_rep_t)x_UQ0 * corr_UQ1 >> (typeWidth - 1);
+ })
+#else
+#if NUMBER_OF_FULL_ITERATIONS != 1
+#error Only a single emulated full iteration is supported
+#endif
+#if !(NUMBER_OF_HALF_ITERATIONS > 0)
+ // Cannot normally reach here: only one full-width iteration is requested and
+ // the total number of iterations should be at least 3 even for float32.
+#error Check NUMBER_OF_HALF_ITERATIONS, NUMBER_OF_FULL_ITERATIONS and USE_NATIVE_FULL_ITERATIONS.
+#endif
+ // Simulating operations on a twice_rep_t to perform a single final full-width
+ // iteration. Using ad-hoc multiplication implementations to take advantage
+ // of particular structure of operands.
+ rep_t blo = b_UQ1 & loMask;
+ // x_UQ0 = x_UQ0_hw * 2^HW - 1
+ // x_UQ0 * b_UQ1 = (x_UQ0_hw * 2^HW) * (b_UQ1_hw * 2^HW + blo) - b_UQ1
+ //
+ // <--- higher half ---><--- lower half --->
+ // [x_UQ0_hw * b_UQ1_hw]
+ // + [ x_UQ0_hw * blo ]
+ // - [ b_UQ1 ]
+ // = [ result ][.... discarded ...]
+ rep_t corr_UQ1 = 0U - ( (rep_t)x_UQ0_hw * b_UQ1_hw
+ + ((rep_t)x_UQ0_hw * blo >> HW)
+ - REP_C(1)); // account for *possible* carry
+ rep_t lo_corr = corr_UQ1 & loMask;
+ rep_t hi_corr = corr_UQ1 >> HW;
+ // x_UQ0 * corr_UQ1 = (x_UQ0_hw * 2^HW) * (hi_corr * 2^HW + lo_corr) - corr_UQ1
+ x_UQ0 = ((rep_t)x_UQ0_hw * hi_corr << 1)
+ + ((rep_t)x_UQ0_hw * lo_corr >> (HW - 1))
+ - REP_C(2); // 1 to account for the highest bit of corr_UQ1 can be 1
+ // 1 to account for possible carry
+ // Just like the case of half-width iterations but with possibility
+ // of overflowing by one extra Ulp of x_UQ0.
+ x_UQ0 -= 1U;
+ // ... and then traditional fixup by 2 should work
+
+ // On error estimation:
+ // abs(E_{N-1}) <= (u_{N-1} + 2 /* due to conversion e_n -> E_n */) * 2^-HW
+ // + (2^-HW + 2^-W))
+ // abs(E_{N-1}) <= (u_{N-1} + 3.01) * 2^-HW
+
+ // Then like for the half-width iterations:
+ // With 0 <= eps1, eps2 < 2^-W
+ // E_N = 4 * E_{N-1} * eps1 - (E_{N-1}^2 * b + 4 * eps2) + 4 * eps1 / b
+ // abs(E_N) <= 2^-W * [ 4 * abs(E_{N-1}) + max(2 * abs(E_{N-1})^2 * 2^W + 4, 8)) ]
+ // abs(E_N) <= 2^-W * [ 4 * (u_{N-1} + 3.01) * 2^-HW + max(4 + 2 * (u_{N-1} + 3.01)^2, 8) ]
+#endif
+
+ // Finally, account for possible overflow, as explained above.
+ x_UQ0 -= 2U;
+
+ // u_n for different precisions (with N-1 half-width iterations):
+ // W0 is the precision of C
+ // u_0 = (3/4 - 1/sqrt(2) + 2^-W0) * 2^HW
+
+ // Estimated with bc:
+ // define half1(un) { return 2.0 * (un + un^2) / 2.0^hw + 1.0; }
+ // define half2(un) { return 2.0 * un / 2.0^hw + 2.0; }
+ // define full1(un) { return 4.0 * (un + 3.01) / 2.0^hw + 2.0 * (un + 3.01)^2 + 4.0; }
+ // define full2(un) { return 4.0 * (un + 3.01) / 2.0^hw + 8.0; }
+
+ // | f32 (0 + 3) | f32 (2 + 1) | f64 (3 + 1) | f128 (4 + 1)
+ // u_0 | < 184224974 | < 2812.1 | < 184224974 | < 791240234244348797
+ // u_1 | < 15804007 | < 242.7 | < 15804007 | < 67877681371350440
+ // u_2 | < 116308 | < 2.81 | < 116308 | < 499533100252317
+ // u_3 | < 7.31 | | < 7.31 | < 27054456580
+ // u_4 | | | | < 80.4
+ // Final (U_N) | same as u_3 | < 72 | < 218 | < 13920
+
+ // Add 2 to U_N due to final decrement.
+
+#if defined(SINGLE_PRECISION) && NUMBER_OF_HALF_ITERATIONS == 2 && NUMBER_OF_FULL_ITERATIONS == 1
+#define RECIPROCAL_PRECISION REP_C(74)
+#elif defined(SINGLE_PRECISION) && NUMBER_OF_HALF_ITERATIONS == 0 && NUMBER_OF_FULL_ITERATIONS == 3
+#define RECIPROCAL_PRECISION REP_C(10)
+#elif defined(DOUBLE_PRECISION) && NUMBER_OF_HALF_ITERATIONS == 3 && NUMBER_OF_FULL_ITERATIONS == 1
+#define RECIPROCAL_PRECISION REP_C(220)
+#elif defined(QUAD_PRECISION) && NUMBER_OF_HALF_ITERATIONS == 4 && NUMBER_OF_FULL_ITERATIONS == 1
+#define RECIPROCAL_PRECISION REP_C(13922)
+#else
+#error Invalid number of iterations
+#endif
+
+ // Suppose 1/b - P * 2^-W < x < 1/b + P * 2^-W
+ x_UQ0 -= RECIPROCAL_PRECISION;
+ // Now 1/b - (2*P) * 2^-W < x < 1/b
+ // FIXME Is x_UQ0 still >= 0.5?
+
+ rep_t quotient_UQ1, dummy;
+ wideMultiply(x_UQ0, aSignificand << 1, &quotient_UQ1, &dummy);
+ // Now, a/b - 4*P * 2^-W < q < a/b for q=<quotient_UQ1:dummy> in UQ1.(SB+1+W).
+
+ // quotient_UQ1 is in [0.5, 2.0) as UQ1.(SB+1),
+ // adjust it to be in [1.0, 2.0) as UQ1.SB.
+ rep_t residualLo;
+ if (quotient_UQ1 < (implicitBit << 1)) {
+ // Highest bit is 0, so just reinterpret quotient_UQ1 as UQ1.SB,
+ // effectively doubling its value as well as its error estimation.
+ residualLo = (aSignificand << (significandBits + 1)) - quotient_UQ1 * bSignificand;
+ writtenExponent -= 1;
+ aSignificand <<= 1;
+ } else {
+ // Highest bit is 1 (the UQ1.(SB+1) value is in [1, 2)), convert it
+ // to UQ1.SB by right shifting by 1. Least significant bit is omitted.
+ quotient_UQ1 >>= 1;
+ residualLo = (aSignificand << significandBits) - quotient_UQ1 * bSignificand;
+ }
+ // NB: residualLo is calculated above for the normal result case.
+ // It is re-computed on denormal path that is expected to be not so
+ // performance-sensitive.
+
+ // Now, q cannot be greater than a/b and can differ by at most 8*P * 2^-W + 2^-SB
+ // Each NextAfter() increments the floating point value by at least 2^-SB
+ // (more, if exponent was incremented).
+ // Different cases (<---> is of 2^-SB length, * = a/b that is shown as a midpoint):
+ // q
+ // | | * | | | | |
+ // <---> 2^t
+ // | | | | | * | |
+ // q
+ // To require at most one NextAfter(), an error should be less than 1.5 * 2^-SB.
+ // (8*P) * 2^-W + 2^-SB < 1.5 * 2^-SB
+ // (8*P) * 2^-W < 0.5 * 2^-SB
+ // P < 2^(W-4-SB)
+ // Generally, for at most R NextAfter() to be enough,
+ // P < (2*R - 1) * 2^(W-4-SB)
+ // For f32 (0+3): 10 < 32 (OK)
+ // For f32 (2+1): 32 < 74 < 32 * 3, so two NextAfter() are required
+ // For f64: 220 < 256 (OK)
+ // For f128: 4096 * 3 < 13922 < 4096 * 5 (three NextAfter() are required)
+
+ // If we have overflowed the exponent, return infinity
+ if (writtenExponent >= maxExponent)
+ return fromRep(infRep | quotientSign);
+
+ // Now, quotient_UQ1_SB <= the correctly-rounded result
+ // and may need taking NextAfter() up to 3 times (see error estimates above)
+ // r = a - b * q
+ rep_t absResult;
+ if (writtenExponent > 0) {
+ // Clear the implicit bit
+ absResult = quotient_UQ1 & significandMask;
+ // Insert the exponent
+ absResult |= (rep_t)writtenExponent << significandBits;
+ residualLo <<= 1;
+ } else {
+ // Prevent shift amount from being negative
+ if (significandBits + writtenExponent < 0)
+ return fromRep(quotientSign);
+
+ absResult = quotient_UQ1 >> (-writtenExponent + 1);
+
+ // multiplied by two to prevent shift amount to be negative
+ residualLo = (aSignificand << (significandBits + writtenExponent)) - (absResult * bSignificand << 1);
+ }
+
+ // Round
+ residualLo += absResult & 1; // tie to even
+ // The above line conditionally turns the below LT comparison into LTE
+ absResult += residualLo > bSignificand;
+#if defined(QUAD_PRECISION) || (defined(SINGLE_PRECISION) && NUMBER_OF_HALF_ITERATIONS > 0)
+ // Do not round Infinity to NaN
+ absResult += absResult < infRep && residualLo > (2 + 1) * bSignificand;
+#endif
+#if defined(QUAD_PRECISION)
+ absResult += absResult < infRep && residualLo > (4 + 1) * bSignificand;
+#endif
+ return fromRep(absResult | quotientSign);
+}
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/fp_extend.h b/contrib/llvm-project/compiler-rt/lib/builtins/fp_extend.h
index fb512672e35e..aad4436730dd 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/fp_extend.h
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/fp_extend.h
@@ -40,7 +40,11 @@ static __inline int src_rep_t_clz(src_rep_t a) {
}
#elif defined SRC_HALF
+#ifdef COMPILER_RT_HAS_FLOAT16
+typedef _Float16 src_t;
+#else
typedef uint16_t src_t;
+#endif
typedef uint16_t src_rep_t;
#define SRC_REP_C UINT16_C
static const int srcSigBits = 10;
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/fp_lib.h b/contrib/llvm-project/compiler-rt/lib/builtins/fp_lib.h
index bd1f180f499e..f22feafa4e69 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/fp_lib.h
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/fp_lib.h
@@ -40,9 +40,12 @@
#if defined SINGLE_PRECISION
+typedef uint16_t half_rep_t;
typedef uint32_t rep_t;
+typedef uint64_t twice_rep_t;
typedef int32_t srep_t;
typedef float fp_t;
+#define HALF_REP_C UINT16_C
#define REP_C UINT32_C
#define significandBits 23
@@ -58,9 +61,11 @@ COMPILER_RT_ABI fp_t __addsf3(fp_t a, fp_t b);
#elif defined DOUBLE_PRECISION
+typedef uint32_t half_rep_t;
typedef uint64_t rep_t;
typedef int64_t srep_t;
typedef double fp_t;
+#define HALF_REP_C UINT32_C
#define REP_C UINT64_C
#define significandBits 52
@@ -102,9 +107,11 @@ COMPILER_RT_ABI fp_t __adddf3(fp_t a, fp_t b);
#elif defined QUAD_PRECISION
#if __LDBL_MANT_DIG__ == 113 && defined(__SIZEOF_INT128__)
#define CRT_LDBL_128BIT
+typedef uint64_t half_rep_t;
typedef __uint128_t rep_t;
typedef __int128_t srep_t;
typedef long double fp_t;
+#define HALF_REP_C UINT64_C
#define REP_C (__uint128_t)
// Note: Since there is no explicit way to tell compiler the constant is a
// 128-bit integer, we let the constant be casted to 128-bit integer
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/fp_trunc.h b/contrib/llvm-project/compiler-rt/lib/builtins/fp_trunc.h
index aca4c9b6e677..00595edd5e01 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/fp_trunc.h
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/fp_trunc.h
@@ -50,7 +50,11 @@ typedef uint32_t dst_rep_t;
static const int dstSigBits = 23;
#elif defined DST_HALF
+#ifdef COMPILER_RT_HAS_FLOAT16
+typedef _Float16 dst_t;
+#else
typedef uint16_t dst_t;
+#endif
typedef uint16_t dst_rep_t;
#define DST_REP_C UINT16_C
static const int dstSigBits = 10;
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/int_div_impl.inc b/contrib/llvm-project/compiler-rt/lib/builtins/int_div_impl.inc
index de0373889078..dc1f97cbeae5 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/int_div_impl.inc
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/int_div_impl.inc
@@ -68,3 +68,28 @@ static __inline fixuint_t __umodXi3(fixuint_t n, fixuint_t d) {
}
return r;
}
+
+#ifdef COMPUTE_UDIV
+static __inline fixint_t __divXi3(fixint_t a, fixint_t b) {
+ const int N = (int)(sizeof(fixint_t) * CHAR_BIT) - 1;
+ fixint_t s_a = a >> N; // s_a = a < 0 ? -1 : 0
+ fixint_t s_b = b >> N; // s_b = b < 0 ? -1 : 0
+ fixuint_t a_u = (fixuint_t)(a ^ s_a) + (-s_a); // negate if s_a == -1
+ fixuint_t b_u = (fixuint_t)(b ^ s_b) + (-s_b); // negate if s_b == -1
+ s_a ^= s_b; // sign of quotient
+ return (COMPUTE_UDIV(a_u, b_u) ^ s_a) + (-s_a); // negate if s_a == -1
+}
+#endif // COMPUTE_UDIV
+
+#ifdef ASSIGN_UMOD
+static __inline fixint_t __modXi3(fixint_t a, fixint_t b) {
+ const int N = (int)(sizeof(fixint_t) * CHAR_BIT) - 1;
+ fixint_t s = b >> N; // s = b < 0 ? -1 : 0
+ fixuint_t b_u = (fixuint_t)(b ^ s) + (-s); // negate if s == -1
+ s = a >> N; // s = a < 0 ? -1 : 0
+ fixuint_t a_u = (fixuint_t)(a ^ s) + (-s); // negate if s == -1
+ fixuint_t res;
+ ASSIGN_UMOD(res, a_u, b_u);
+ return (res ^ s) + (-s); // negate if s == -1
+}
+#endif // ASSIGN_UMOD
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/int_mulo_impl.inc b/contrib/llvm-project/compiler-rt/lib/builtins/int_mulo_impl.inc
new file mode 100644
index 000000000000..567d8b9e6e60
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/int_mulo_impl.inc
@@ -0,0 +1,49 @@
+//===-- int_mulo_impl.inc - Implement __mulo[sdt]i4 ---------------*- C -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Helper used by __mulosi4, __mulodi4 and __muloti4.
+//
+//===----------------------------------------------------------------------===//
+
+#include "int_lib.h"
+
+// Returns: a * b
+
+// Effects: sets *overflow to 1 if a * b overflows
+
+static __inline fixint_t __muloXi4(fixint_t a, fixint_t b, int *overflow) {
+ const int N = (int)(sizeof(fixint_t) * CHAR_BIT);
+ const fixint_t MIN = (fixint_t)1 << (N - 1);
+ const fixint_t MAX = ~MIN;
+ *overflow = 0;
+ fixint_t result = a * b;
+ if (a == MIN) {
+ if (b != 0 && b != 1)
+ *overflow = 1;
+ return result;
+ }
+ if (b == MIN) {
+ if (a != 0 && a != 1)
+ *overflow = 1;
+ return result;
+ }
+ fixint_t sa = a >> (N - 1);
+ fixint_t abs_a = (a ^ sa) - sa;
+ fixint_t sb = b >> (N - 1);
+ fixint_t abs_b = (b ^ sb) - sb;
+ if (abs_a < 2 || abs_b < 2)
+ return result;
+ if (sa == sb) {
+ if (abs_a > MAX / abs_b)
+ *overflow = 1;
+ } else {
+ if (abs_a > MIN / -abs_b)
+ *overflow = 1;
+ }
+ return result;
+}
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/int_mulv_impl.inc b/contrib/llvm-project/compiler-rt/lib/builtins/int_mulv_impl.inc
new file mode 100644
index 000000000000..1e920716ec49
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/int_mulv_impl.inc
@@ -0,0 +1,47 @@
+//===-- int_mulv_impl.inc - Implement __mulv[sdt]i3 ---------------*- C -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Helper used by __mulvsi3, __mulvdi3 and __mulvti3.
+//
+//===----------------------------------------------------------------------===//
+
+#include "int_lib.h"
+
+// Returns: a * b
+
+// Effects: aborts if a * b overflows
+
+static __inline fixint_t __mulvXi3(fixint_t a, fixint_t b) {
+ const int N = (int)(sizeof(fixint_t) * CHAR_BIT);
+ const fixint_t MIN = (fixint_t)1 << (N - 1);
+ const fixint_t MAX = ~MIN;
+ if (a == MIN) {
+ if (b == 0 || b == 1)
+ return a * b;
+ compilerrt_abort();
+ }
+ if (b == MIN) {
+ if (a == 0 || a == 1)
+ return a * b;
+ compilerrt_abort();
+ }
+ fixint_t sa = a >> (N - 1);
+ fixint_t abs_a = (a ^ sa) - sa;
+ fixint_t sb = b >> (N - 1);
+ fixint_t abs_b = (b ^ sb) - sb;
+ if (abs_a < 2 || abs_b < 2)
+ return a * b;
+ if (sa == sb) {
+ if (abs_a > MAX / abs_b)
+ compilerrt_abort();
+ } else {
+ if (abs_a > MIN / -abs_b)
+ compilerrt_abort();
+ }
+ return a * b;
+}
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/int_util.h b/contrib/llvm-project/compiler-rt/lib/builtins/int_util.h
index 5fbdfb57c1e7..c372c2edc637 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/int_util.h
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/int_util.h
@@ -28,4 +28,20 @@ NORETURN void __compilerrt_abort_impl(const char *file, int line,
#define COMPILE_TIME_ASSERT2(expr, cnt) \
typedef char ct_assert_##cnt[(expr) ? 1 : -1] UNUSED
+// Force unrolling the code specified to be repeated N times.
+#define REPEAT_0_TIMES(code_to_repeat) /* do nothing */
+#define REPEAT_1_TIMES(code_to_repeat) code_to_repeat
+#define REPEAT_2_TIMES(code_to_repeat) \
+ REPEAT_1_TIMES(code_to_repeat) \
+ code_to_repeat
+#define REPEAT_3_TIMES(code_to_repeat) \
+ REPEAT_2_TIMES(code_to_repeat) \
+ code_to_repeat
+#define REPEAT_4_TIMES(code_to_repeat) \
+ REPEAT_3_TIMES(code_to_repeat) \
+ code_to_repeat
+
+#define REPEAT_N_TIMES_(N, code_to_repeat) REPEAT_##N##_TIMES(code_to_repeat)
+#define REPEAT_N_TIMES(N, code_to_repeat) REPEAT_N_TIMES_(N, code_to_repeat)
+
#endif // INT_UTIL_H
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/moddi3.c b/contrib/llvm-project/compiler-rt/lib/builtins/moddi3.c
index 92b0996077c6..15cf80b99555 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/moddi3.c
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/moddi3.c
@@ -14,13 +14,9 @@
// Returns: a % b
-COMPILER_RT_ABI di_int __moddi3(di_int a, di_int b) {
- const int bits_in_dword_m1 = (int)(sizeof(di_int) * CHAR_BIT) - 1;
- di_int s = b >> bits_in_dword_m1; // s = b < 0 ? -1 : 0
- b = (b ^ s) - s; // negate if s == -1
- s = a >> bits_in_dword_m1; // s = a < 0 ? -1 : 0
- a = (a ^ s) - s; // negate if s == -1
- du_int r;
- __udivmoddi4(a, b, &r);
- return ((di_int)r ^ s) - s; // negate if s == -1
-}
+#define fixint_t di_int
+#define fixuint_t du_int
+#define ASSIGN_UMOD(res, a, b) __udivmoddi4((a), (b), &(res))
+#include "int_div_impl.inc"
+
+COMPILER_RT_ABI di_int __moddi3(di_int a, di_int b) { return __modXi3(a, b); }
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/modti3.c b/contrib/llvm-project/compiler-rt/lib/builtins/modti3.c
index d11fe220b769..7c10cfd39027 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/modti3.c
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/modti3.c
@@ -16,15 +16,11 @@
// Returns: a % b
-COMPILER_RT_ABI ti_int __modti3(ti_int a, ti_int b) {
- const int bits_in_tword_m1 = (int)(sizeof(ti_int) * CHAR_BIT) - 1;
- ti_int s = b >> bits_in_tword_m1; // s = b < 0 ? -1 : 0
- b = (b ^ s) - s; // negate if s == -1
- s = a >> bits_in_tword_m1; // s = a < 0 ? -1 : 0
- a = (a ^ s) - s; // negate if s == -1
- tu_int r;
- __udivmodti4(a, b, &r);
- return ((ti_int)r ^ s) - s; // negate if s == -1
-}
+#define fixint_t ti_int
+#define fixuint_t tu_int
+#define ASSIGN_UMOD(res, a, b) __udivmodti4((a), (b), &(res))
+#include "int_div_impl.inc"
+
+COMPILER_RT_ABI ti_int __modti3(ti_int a, ti_int b) { return __modXi3(a, b); }
#endif // CRT_HAS_128BIT
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/mulodi4.c b/contrib/llvm-project/compiler-rt/lib/builtins/mulodi4.c
index 23f5571ac468..7209676a327e 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/mulodi4.c
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/mulodi4.c
@@ -10,40 +10,13 @@
//
//===----------------------------------------------------------------------===//
-#include "int_lib.h"
+#define fixint_t di_int
+#include "int_mulo_impl.inc"
// Returns: a * b
// Effects: sets *overflow to 1 if a * b overflows
COMPILER_RT_ABI di_int __mulodi4(di_int a, di_int b, int *overflow) {
- const int N = (int)(sizeof(di_int) * CHAR_BIT);
- const di_int MIN = (di_int)1 << (N - 1);
- const di_int MAX = ~MIN;
- *overflow = 0;
- di_int result = a * b;
- if (a == MIN) {
- if (b != 0 && b != 1)
- *overflow = 1;
- return result;
- }
- if (b == MIN) {
- if (a != 0 && a != 1)
- *overflow = 1;
- return result;
- }
- di_int sa = a >> (N - 1);
- di_int abs_a = (a ^ sa) - sa;
- di_int sb = b >> (N - 1);
- di_int abs_b = (b ^ sb) - sb;
- if (abs_a < 2 || abs_b < 2)
- return result;
- if (sa == sb) {
- if (abs_a > MAX / abs_b)
- *overflow = 1;
- } else {
- if (abs_a > MIN / -abs_b)
- *overflow = 1;
- }
- return result;
+ return __muloXi4(a, b, overflow);
}
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/mulosi4.c b/contrib/llvm-project/compiler-rt/lib/builtins/mulosi4.c
index fea4311296f8..4e03c24455d6 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/mulosi4.c
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/mulosi4.c
@@ -10,40 +10,13 @@
//
//===----------------------------------------------------------------------===//
-#include "int_lib.h"
+#define fixint_t si_int
+#include "int_mulo_impl.inc"
// Returns: a * b
// Effects: sets *overflow to 1 if a * b overflows
COMPILER_RT_ABI si_int __mulosi4(si_int a, si_int b, int *overflow) {
- const int N = (int)(sizeof(si_int) * CHAR_BIT);
- const si_int MIN = (si_int)1 << (N - 1);
- const si_int MAX = ~MIN;
- *overflow = 0;
- si_int result = a * b;
- if (a == MIN) {
- if (b != 0 && b != 1)
- *overflow = 1;
- return result;
- }
- if (b == MIN) {
- if (a != 0 && a != 1)
- *overflow = 1;
- return result;
- }
- si_int sa = a >> (N - 1);
- si_int abs_a = (a ^ sa) - sa;
- si_int sb = b >> (N - 1);
- si_int abs_b = (b ^ sb) - sb;
- if (abs_a < 2 || abs_b < 2)
- return result;
- if (sa == sb) {
- if (abs_a > MAX / abs_b)
- *overflow = 1;
- } else {
- if (abs_a > MIN / -abs_b)
- *overflow = 1;
- }
- return result;
+ return __muloXi4(a, b, overflow);
}
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/muloti4.c b/contrib/llvm-project/compiler-rt/lib/builtins/muloti4.c
index 9bdd5b649908..9a7aa85b022b 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/muloti4.c
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/muloti4.c
@@ -18,36 +18,11 @@
// Effects: sets *overflow to 1 if a * b overflows
+#define fixint_t ti_int
+#include "int_mulo_impl.inc"
+
COMPILER_RT_ABI ti_int __muloti4(ti_int a, ti_int b, int *overflow) {
- const int N = (int)(sizeof(ti_int) * CHAR_BIT);
- const ti_int MIN = (ti_int)1 << (N - 1);
- const ti_int MAX = ~MIN;
- *overflow = 0;
- ti_int result = a * b;
- if (a == MIN) {
- if (b != 0 && b != 1)
- *overflow = 1;
- return result;
- }
- if (b == MIN) {
- if (a != 0 && a != 1)
- *overflow = 1;
- return result;
- }
- ti_int sa = a >> (N - 1);
- ti_int abs_a = (a ^ sa) - sa;
- ti_int sb = b >> (N - 1);
- ti_int abs_b = (b ^ sb) - sb;
- if (abs_a < 2 || abs_b < 2)
- return result;
- if (sa == sb) {
- if (abs_a > MAX / abs_b)
- *overflow = 1;
- } else {
- if (abs_a > MIN / -abs_b)
- *overflow = 1;
- }
- return result;
+ return __muloXi4(a, b, overflow);
}
#endif // CRT_HAS_128BIT
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/mulvdi3.c b/contrib/llvm-project/compiler-rt/lib/builtins/mulvdi3.c
index cecc97ccf22e..1d672c6dc155 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/mulvdi3.c
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/mulvdi3.c
@@ -10,38 +10,11 @@
//
//===----------------------------------------------------------------------===//
-#include "int_lib.h"
+#define fixint_t di_int
+#include "int_mulv_impl.inc"
// Returns: a * b
// Effects: aborts if a * b overflows
-COMPILER_RT_ABI di_int __mulvdi3(di_int a, di_int b) {
- const int N = (int)(sizeof(di_int) * CHAR_BIT);
- const di_int MIN = (di_int)1 << (N - 1);
- const di_int MAX = ~MIN;
- if (a == MIN) {
- if (b == 0 || b == 1)
- return a * b;
- compilerrt_abort();
- }
- if (b == MIN) {
- if (a == 0 || a == 1)
- return a * b;
- compilerrt_abort();
- }
- di_int sa = a >> (N - 1);
- di_int abs_a = (a ^ sa) - sa;
- di_int sb = b >> (N - 1);
- di_int abs_b = (b ^ sb) - sb;
- if (abs_a < 2 || abs_b < 2)
- return a * b;
- if (sa == sb) {
- if (abs_a > MAX / abs_b)
- compilerrt_abort();
- } else {
- if (abs_a > MIN / -abs_b)
- compilerrt_abort();
- }
- return a * b;
-}
+COMPILER_RT_ABI di_int __mulvdi3(di_int a, di_int b) { return __mulvXi3(a, b); }
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/mulvsi3.c b/contrib/llvm-project/compiler-rt/lib/builtins/mulvsi3.c
index 0d6b18ad01a4..00b2e50eeca9 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/mulvsi3.c
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/mulvsi3.c
@@ -10,38 +10,11 @@
//
//===----------------------------------------------------------------------===//
-#include "int_lib.h"
+#define fixint_t si_int
+#include "int_mulv_impl.inc"
// Returns: a * b
// Effects: aborts if a * b overflows
-COMPILER_RT_ABI si_int __mulvsi3(si_int a, si_int b) {
- const int N = (int)(sizeof(si_int) * CHAR_BIT);
- const si_int MIN = (si_int)1 << (N - 1);
- const si_int MAX = ~MIN;
- if (a == MIN) {
- if (b == 0 || b == 1)
- return a * b;
- compilerrt_abort();
- }
- if (b == MIN) {
- if (a == 0 || a == 1)
- return a * b;
- compilerrt_abort();
- }
- si_int sa = a >> (N - 1);
- si_int abs_a = (a ^ sa) - sa;
- si_int sb = b >> (N - 1);
- si_int abs_b = (b ^ sb) - sb;
- if (abs_a < 2 || abs_b < 2)
- return a * b;
- if (sa == sb) {
- if (abs_a > MAX / abs_b)
- compilerrt_abort();
- } else {
- if (abs_a > MIN / -abs_b)
- compilerrt_abort();
- }
- return a * b;
-}
+COMPILER_RT_ABI si_int __mulvsi3(si_int a, si_int b) { return __mulvXi3(a, b); }
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/mulvti3.c b/contrib/llvm-project/compiler-rt/lib/builtins/mulvti3.c
index 03963a0ca694..ba355149f9a7 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/mulvti3.c
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/mulvti3.c
@@ -18,34 +18,9 @@
// Effects: aborts if a * b overflows
-COMPILER_RT_ABI ti_int __mulvti3(ti_int a, ti_int b) {
- const int N = (int)(sizeof(ti_int) * CHAR_BIT);
- const ti_int MIN = (ti_int)1 << (N - 1);
- const ti_int MAX = ~MIN;
- if (a == MIN) {
- if (b == 0 || b == 1)
- return a * b;
- compilerrt_abort();
- }
- if (b == MIN) {
- if (a == 0 || a == 1)
- return a * b;
- compilerrt_abort();
- }
- ti_int sa = a >> (N - 1);
- ti_int abs_a = (a ^ sa) - sa;
- ti_int sb = b >> (N - 1);
- ti_int abs_b = (b ^ sb) - sb;
- if (abs_a < 2 || abs_b < 2)
- return a * b;
- if (sa == sb) {
- if (abs_a > MAX / abs_b)
- compilerrt_abort();
- } else {
- if (abs_a > MIN / -abs_b)
- compilerrt_abort();
- }
- return a * b;
-}
+#define fixint_t ti_int
+#include "int_mulv_impl.inc"
+
+COMPILER_RT_ABI ti_int __mulvti3(ti_int a, ti_int b) { return __mulvXi3(a, b); }
#endif // CRT_HAS_128BIT
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/os_version_check.c b/contrib/llvm-project/compiler-rt/lib/builtins/os_version_check.c
index 3794b979434c..d7194b99ae54 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/os_version_check.c
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/os_version_check.c
@@ -24,6 +24,20 @@
// These three variables hold the host's OS version.
static int32_t GlobalMajor, GlobalMinor, GlobalSubminor;
static dispatch_once_t DispatchOnceCounter;
+static dispatch_once_t CompatibilityDispatchOnceCounter;
+
+// _availability_version_check darwin API support.
+typedef uint32_t dyld_platform_t;
+
+typedef struct {
+ dyld_platform_t platform;
+ uint32_t version;
+} dyld_build_version_t;
+
+typedef bool (*AvailabilityVersionCheckFuncTy)(uint32_t count,
+ dyld_build_version_t versions[]);
+
+static AvailabilityVersionCheckFuncTy AvailabilityVersionCheck;
// We can't include <CoreFoundation/CoreFoundation.h> directly from here, so
// just forward declare everything that we need from it.
@@ -72,9 +86,25 @@ typedef Boolean (*CFStringGetCStringFuncTy)(CFStringRef, char *, CFIndex,
CFStringEncoding);
typedef void (*CFReleaseFuncTy)(CFTypeRef);
-// Find and parse the SystemVersion.plist file.
-static void parseSystemVersionPList(void *Unused) {
- (void)Unused;
+static void _initializeAvailabilityCheck(bool LoadPlist) {
+ if (AvailabilityVersionCheck && !LoadPlist) {
+ // New API is supported and we're not being asked to load the plist,
+ // exit early!
+ return;
+ }
+
+ // Use the new API if it's is available.
+ AvailabilityVersionCheck = (AvailabilityVersionCheckFuncTy)dlsym(
+ RTLD_DEFAULT, "_availability_version_check");
+
+ if (AvailabilityVersionCheck && !LoadPlist) {
+ // New API is supported and we're not being asked to load the plist,
+ // exit early!
+ return;
+ }
+ // Still load the PLIST to ensure that the existing calls to
+ // __isOSVersionAtLeast still work even with new compiler-rt and old OSes.
+
// Load CoreFoundation dynamically
const void *NullAllocator = dlsym(RTLD_DEFAULT, "kCFAllocatorNull");
if (!NullAllocator)
@@ -201,9 +231,24 @@ Fail:
fclose(PropertyList);
}
+// Find and parse the SystemVersion.plist file.
+static void compatibilityInitializeAvailabilityCheck(void *Unused) {
+ (void)Unused;
+ _initializeAvailabilityCheck(/*LoadPlist=*/true);
+}
+
+static void initializeAvailabilityCheck(void *Unused) {
+ (void)Unused;
+ _initializeAvailabilityCheck(/*LoadPlist=*/false);
+}
+
+// This old API entry point is no longer used by Clang for Darwin. We still need
+// to keep it around to ensure that object files that reference it are still
+// usable when linked with new compiler-rt.
int32_t __isOSVersionAtLeast(int32_t Major, int32_t Minor, int32_t Subminor) {
// Populate the global version variables, if they haven't already.
- dispatch_once_f(&DispatchOnceCounter, NULL, parseSystemVersionPList);
+ dispatch_once_f(&CompatibilityDispatchOnceCounter, NULL,
+ compatibilityInitializeAvailabilityCheck);
if (Major < GlobalMajor)
return 1;
@@ -216,6 +261,61 @@ int32_t __isOSVersionAtLeast(int32_t Major, int32_t Minor, int32_t Subminor) {
return Subminor <= GlobalSubminor;
}
+static inline uint32_t ConstructVersion(uint32_t Major, uint32_t Minor,
+ uint32_t Subminor) {
+ return ((Major & 0xffff) << 16) | ((Minor & 0xff) << 8) | (Subminor & 0xff);
+}
+
+int32_t __isPlatformVersionAtLeast(uint32_t Platform, uint32_t Major,
+ uint32_t Minor, uint32_t Subminor) {
+ dispatch_once_f(&DispatchOnceCounter, NULL, initializeAvailabilityCheck);
+
+ if (!AvailabilityVersionCheck) {
+ return __isOSVersionAtLeast(Major, Minor, Subminor);
+ }
+ dyld_build_version_t Versions[] = {
+ {Platform, ConstructVersion(Major, Minor, Subminor)}};
+ return AvailabilityVersionCheck(1, Versions);
+}
+
+#elif __ANDROID__
+
+#include <pthread.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/system_properties.h>
+
+static int SdkVersion;
+static int IsPreRelease;
+
+static void readSystemProperties(void) {
+ char buf[PROP_VALUE_MAX];
+
+ if (__system_property_get("ro.build.version.sdk", buf) == 0) {
+ // When the system property doesn't exist, defaults to future API level.
+ SdkVersion = __ANDROID_API_FUTURE__;
+ } else {
+ SdkVersion = atoi(buf);
+ }
+
+ if (__system_property_get("ro.build.version.codename", buf) == 0) {
+ IsPreRelease = 1;
+ } else {
+ IsPreRelease = strcmp(buf, "REL") != 0;
+ }
+ return;
+}
+
+int32_t __isOSVersionAtLeast(int32_t Major, int32_t Minor, int32_t Subminor) {
+ (int32_t) Minor;
+ (int32_t) Subminor;
+ static pthread_once_t once = PTHREAD_ONCE_INIT;
+ pthread_once(&once, readSystemProperties);
+
+ return SdkVersion >= Major ||
+ (IsPreRelease && Major == __ANDROID_API_FUTURE__);
+}
+
#else
// Silence an empty translation unit warning.
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/paritydi2.c b/contrib/llvm-project/compiler-rt/lib/builtins/paritydi2.c
index 58e85f89e043..350dceb8cef5 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/paritydi2.c
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/paritydi2.c
@@ -17,5 +17,9 @@
COMPILER_RT_ABI int __paritydi2(di_int a) {
dwords x;
x.all = a;
- return __paritysi2(x.s.high ^ x.s.low);
+ su_int x2 = x.s.high ^ x.s.low;
+ x2 ^= x2 >> 16;
+ x2 ^= x2 >> 8;
+ x2 ^= x2 >> 4;
+ return (0x6996 >> (x2 & 0xF)) & 1;
}
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/parityti2.c b/contrib/llvm-project/compiler-rt/lib/builtins/parityti2.c
index 79e920d8a02d..011c8dd45562 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/parityti2.c
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/parityti2.c
@@ -18,8 +18,14 @@
COMPILER_RT_ABI int __parityti2(ti_int a) {
twords x;
+ dwords x2;
x.all = a;
- return __paritydi2(x.s.high ^ x.s.low);
+ x2.all = x.s.high ^ x.s.low;
+ su_int x3 = x2.s.high ^ x2.s.low;
+ x3 ^= x3 >> 16;
+ x3 ^= x3 >> 8;
+ x3 ^= x3 >> 4;
+ return (0x6996 >> (x3 & 0xF)) & 1;
}
#endif // CRT_HAS_128BIT
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/riscv/int_mul_impl.inc b/contrib/llvm-project/compiler-rt/lib/builtins/riscv/int_mul_impl.inc
index 50951d5f4195..53699b356f6a 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/riscv/int_mul_impl.inc
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/riscv/int_mul_impl.inc
@@ -10,7 +10,10 @@
//
//===----------------------------------------------------------------------===//
-#if !defined(__riscv_mul)
+#ifndef __mulxi3
+#error "__mulxi3 must be defined to use this generic implementation"
+#endif
+
.text
.align 2
@@ -28,4 +31,3 @@ __mulxi3:
slli a2, a2, 1
bnez a1, .L1
ret
-#endif
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/truncdfhf2.c b/contrib/llvm-project/compiler-rt/lib/builtins/truncdfhf2.c
index 90c418a4387f..24c6e62f715f 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/truncdfhf2.c
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/truncdfhf2.c
@@ -10,11 +10,11 @@
#define DST_HALF
#include "fp_trunc_impl.inc"
-COMPILER_RT_ABI uint16_t __truncdfhf2(double a) { return __truncXfYf2__(a); }
+COMPILER_RT_ABI dst_t __truncdfhf2(double a) { return __truncXfYf2__(a); }
#if defined(__ARM_EABI__)
#if defined(COMPILER_RT_ARMHF_TARGET)
-AEABI_RTABI uint16_t __aeabi_d2h(double a) { return __truncdfhf2(a); }
+AEABI_RTABI dst_t __aeabi_d2h(double a) { return __truncdfhf2(a); }
#else
COMPILER_RT_ALIAS(__truncdfhf2, __aeabi_d2h)
#endif
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/truncsfhf2.c b/contrib/llvm-project/compiler-rt/lib/builtins/truncsfhf2.c
index 1f17194c38e5..379e7cb6f784 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/truncsfhf2.c
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/truncsfhf2.c
@@ -12,15 +12,15 @@
// Use a forwarding definition and noinline to implement a poor man's alias,
// as there isn't a good cross-platform way of defining one.
-COMPILER_RT_ABI NOINLINE uint16_t __truncsfhf2(float a) {
+COMPILER_RT_ABI NOINLINE dst_t __truncsfhf2(float a) {
return __truncXfYf2__(a);
}
-COMPILER_RT_ABI uint16_t __gnu_f2h_ieee(float a) { return __truncsfhf2(a); }
+COMPILER_RT_ABI dst_t __gnu_f2h_ieee(float a) { return __truncsfhf2(a); }
#if defined(__ARM_EABI__)
#if defined(COMPILER_RT_ARMHF_TARGET)
-AEABI_RTABI uint16_t __aeabi_f2h(float a) { return __truncsfhf2(a); }
+AEABI_RTABI dst_t __aeabi_f2h(float a) { return __truncsfhf2(a); }
#else
COMPILER_RT_ALIAS(__truncsfhf2, __aeabi_f2h)
#endif
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/trunctfhf2.c b/contrib/llvm-project/compiler-rt/lib/builtins/trunctfhf2.c
new file mode 100644
index 000000000000..e3a2309d954b
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/trunctfhf2.c
@@ -0,0 +1,23 @@
+//===-- lib/trunctfhf2.c - quad -> half conversion ----------------*- C -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is dual licensed under the MIT and the University of Illinois Open
+// Source Licenses. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#define QUAD_PRECISION
+#include "fp_lib.h"
+
+#if defined(CRT_HAS_128BIT) && defined(CRT_LDBL_128BIT) && \
+ defined(COMPILER_RT_HAS_FLOAT16)
+#define SRC_QUAD
+#define DST_HALF
+#include "fp_trunc_impl.inc"
+
+COMPILER_RT_ABI _Float16 __trunctfhf2(long double a) {
+ return __truncXfYf2__(a);
+}
+
+#endif
diff --git a/contrib/llvm-project/compiler-rt/lib/cfi/cfi.cpp b/contrib/llvm-project/compiler-rt/lib/cfi/cfi.cpp
index fd48f71643b6..b75c72b215c2 100644
--- a/contrib/llvm-project/compiler-rt/lib/cfi/cfi.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/cfi/cfi.cpp
@@ -379,7 +379,7 @@ void InitializeFlags() {
__ubsan::RegisterUbsanFlags(&ubsan_parser, uf);
RegisterCommonFlags(&ubsan_parser);
- const char *ubsan_default_options = __ubsan::MaybeCallUbsanDefaultOptions();
+ const char *ubsan_default_options = __ubsan_default_options();
ubsan_parser.ParseString(ubsan_default_options);
ubsan_parser.ParseStringFromEnv("UBSAN_OPTIONS");
#endif
diff --git a/contrib/llvm-project/compiler-rt/lib/crt/crtbegin.c b/contrib/llvm-project/compiler-rt/lib/crt/crtbegin.c
index 24bea1a2c3a7..481c158ac777 100644
--- a/contrib/llvm-project/compiler-rt/lib/crt/crtbegin.c
+++ b/contrib/llvm-project/compiler-rt/lib/crt/crtbegin.c
@@ -52,6 +52,10 @@ __attribute__((section(".init_array"),
__asm__(".pushsection .init,\"ax\",@progbits\n\t"
"call " __USER_LABEL_PREFIX__ "__do_init\n\t"
".popsection");
+#elif defined(__riscv)
+__asm__(".pushsection .init,\"ax\",%progbits\n\t"
+ "call " __USER_LABEL_PREFIX__ "__do_init\n\t"
+ ".popsection");
#elif defined(__arm__) || defined(__aarch64__)
__asm__(".pushsection .init,\"ax\",%progbits\n\t"
"bl " __USER_LABEL_PREFIX__ "__do_init\n\t"
@@ -110,6 +114,10 @@ __asm__(".pushsection .fini,\"ax\",@progbits\n\t"
"bl " __USER_LABEL_PREFIX__ "__do_fini\n\t"
"nop\n\t"
".popsection");
+#elif defined(__riscv)
+__asm__(".pushsection .fini,\"ax\",@progbits\n\t"
+ "call " __USER_LABEL_PREFIX__ "__do_fini\n\t"
+ ".popsection");
#elif defined(__sparc__)
__asm__(".pushsection .fini,\"ax\",@progbits\n\t"
"call " __USER_LABEL_PREFIX__ "__do_fini\n\t"
diff --git a/contrib/llvm-project/compiler-rt/lib/dfsan/dfsan.cpp b/contrib/llvm-project/compiler-rt/lib/dfsan/dfsan.cpp
index 0e2fb9f5f334..c17bfe0ccb32 100644
--- a/contrib/llvm-project/compiler-rt/lib/dfsan/dfsan.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/dfsan/dfsan.cpp
@@ -18,14 +18,16 @@
// prefixed __dfsan_.
//===----------------------------------------------------------------------===//
+#include "dfsan/dfsan.h"
+
#include "sanitizer_common/sanitizer_atomic.h"
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_file.h"
-#include "sanitizer_common/sanitizer_flags.h"
#include "sanitizer_common/sanitizer_flag_parser.h"
+#include "sanitizer_common/sanitizer_flags.h"
+#include "sanitizer_common/sanitizer_internal_defs.h"
#include "sanitizer_common/sanitizer_libc.h"
-
-#include "dfsan/dfsan.h"
+#include "sanitizer_common/sanitizer_stacktrace.h"
using namespace __dfsan;
@@ -39,8 +41,15 @@ static dfsan_label_info __dfsan_label_info[kNumLabels];
Flags __dfsan::flags_data;
-SANITIZER_INTERFACE_ATTRIBUTE THREADLOCAL dfsan_label __dfsan_retval_tls;
-SANITIZER_INTERFACE_ATTRIBUTE THREADLOCAL dfsan_label __dfsan_arg_tls[64];
+// The size of TLS variables. These constants must be kept in sync with the ones
+// in DataFlowSanitizer.cpp.
+static const int kDFsanArgTlsSize = 800;
+static const int kDFsanRetvalTlsSize = 800;
+
+SANITIZER_INTERFACE_ATTRIBUTE THREADLOCAL u64
+ __dfsan_retval_tls[kDFsanRetvalTlsSize / sizeof(u64)];
+SANITIZER_INTERFACE_ATTRIBUTE THREADLOCAL u64
+ __dfsan_arg_tls[kDFsanArgTlsSize / sizeof(u64)];
SANITIZER_INTERFACE_ATTRIBUTE uptr __dfsan_shadow_ptr_mask;
@@ -142,8 +151,7 @@ int __dfsan::vmaSize;
#endif
static uptr UnusedAddr() {
- return MappingArchImpl<MAPPING_UNION_TABLE_ADDR>()
- + sizeof(dfsan_union_table_t);
+ return UnionTableAddr() + sizeof(dfsan_union_table_t);
}
static atomic_dfsan_label *union_table(dfsan_label l1, dfsan_label l2) {
@@ -162,8 +170,6 @@ static void dfsan_check_label(dfsan_label label) {
// this function (the instrumentation pass inlines the equality test).
extern "C" SANITIZER_INTERFACE_ATTRIBUTE
dfsan_label __dfsan_union(dfsan_label l1, dfsan_label l2) {
- if (flags().fast16labels)
- return l1 | l2;
DCHECK_NE(l1, l2);
if (l1 == 0)
@@ -171,6 +177,11 @@ dfsan_label __dfsan_union(dfsan_label l1, dfsan_label l2) {
if (l2 == 0)
return l1;
+ // If no labels have been created, yet l1 and l2 are non-zero, we are using
+ // fast16labels mode.
+ if (atomic_load(&__dfsan_last_label, memory_order_relaxed) == 0)
+ return l1 | l2;
+
if (l1 > l2)
Swap(l1, l2);
@@ -219,6 +230,14 @@ dfsan_label __dfsan_union_load(const dfsan_label *ls, uptr n) {
}
extern "C" SANITIZER_INTERFACE_ATTRIBUTE
+dfsan_label __dfsan_union_load_fast16labels(const dfsan_label *ls, uptr n) {
+ dfsan_label label = ls[0];
+ for (uptr i = 1; i != n; ++i)
+ label |= ls[i];
+ return label;
+}
+
+extern "C" SANITIZER_INTERFACE_ATTRIBUTE
void __dfsan_unimplemented(char *fname) {
if (flags().warn_unimplemented)
Report("WARNING: DataFlowSanitizer: call to uninstrumented function %s\n",
@@ -254,7 +273,7 @@ dfsan_union(dfsan_label l1, dfsan_label l2) {
extern "C" SANITIZER_INTERFACE_ATTRIBUTE
dfsan_label dfsan_create_label(const char *desc, void *userdata) {
dfsan_label label =
- atomic_fetch_add(&__dfsan_last_label, 1, memory_order_relaxed) + 1;
+ atomic_fetch_add(&__dfsan_last_label, 1, memory_order_relaxed) + 1;
dfsan_check_label(label);
__dfsan_label_info[label].l1 = __dfsan_label_info[label].l2 = 0;
__dfsan_label_info[label].desc = desc;
@@ -262,9 +281,10 @@ dfsan_label dfsan_create_label(const char *desc, void *userdata) {
return label;
}
-extern "C" SANITIZER_INTERFACE_ATTRIBUTE
-void __dfsan_set_label(dfsan_label label, void *addr, uptr size) {
- for (dfsan_label *labelp = shadow_for(addr); size != 0; --size, ++labelp) {
+static void WriteShadowIfDifferent(dfsan_label label, uptr shadow_addr,
+ uptr size) {
+ dfsan_label *labelp = (dfsan_label *)shadow_addr;
+ for (; size != 0; --size, ++labelp) {
// Don't write the label if it is already the value we need it to be.
// In a program where most addresses are not labeled, it is common that
// a page of shadow memory is entirely zeroed. The Linux copy-on-write
@@ -280,6 +300,38 @@ void __dfsan_set_label(dfsan_label label, void *addr, uptr size) {
}
}
+extern "C" SANITIZER_INTERFACE_ATTRIBUTE void __dfsan_set_label(
+ dfsan_label label, void *addr, uptr size) {
+ const uptr beg_shadow_addr = (uptr)__dfsan::shadow_for(addr);
+
+ if (0 != label) {
+ WriteShadowIfDifferent(label, beg_shadow_addr, size);
+ return;
+ }
+
+ // If label is 0, releases the pages within the shadow address range, and sets
+ // the shadow addresses not on the pages to be 0.
+ const void *end_addr = (void *)((uptr)addr + size);
+ const uptr end_shadow_addr = (uptr)__dfsan::shadow_for(end_addr);
+ const uptr page_size = GetPageSizeCached();
+ const uptr beg_aligned = RoundUpTo(beg_shadow_addr, page_size);
+ const uptr end_aligned = RoundDownTo(end_shadow_addr, page_size);
+
+ // dfsan_set_label can be called from the following cases
+ // 1) mapped ranges by new/delete and malloc/free. This case has shadow memory
+ // size > 100k, and happens less frequently.
+ // 2) zero-filling internal data structures by utility libraries. This case
+ // has shadow memory size < 32k, and happens more often.
+ // Set kNumPagesThreshold to be 8 to avoid releasing small pages.
+ const int kNumPagesThreshold = 8;
+ if (beg_aligned + kNumPagesThreshold * page_size >= end_aligned)
+ return WriteShadowIfDifferent(label, beg_shadow_addr, size);
+
+ WriteShadowIfDifferent(label, beg_shadow_addr, beg_aligned - beg_shadow_addr);
+ ReleaseMemoryPagesToOS(beg_aligned, end_aligned);
+ WriteShadowIfDifferent(label, end_aligned, end_shadow_addr - end_aligned);
+}
+
SANITIZER_INTERFACE_ATTRIBUTE
void dfsan_set_label(dfsan_label label, void *addr, uptr size) {
__dfsan_set_label(label, addr, size);
@@ -349,7 +401,6 @@ extern "C" SANITIZER_INTERFACE_ATTRIBUTE void
dfsan_dump_labels(int fd) {
dfsan_label last_label =
atomic_load(&__dfsan_last_label, memory_order_relaxed);
-
for (uptr l = 1; l <= last_label; ++l) {
char buf[64];
internal_snprintf(buf, sizeof(buf), "%u %u %u ", l,
@@ -363,6 +414,22 @@ dfsan_dump_labels(int fd) {
}
}
+#define GET_FATAL_STACK_TRACE_PC_BP(pc, bp) \
+ BufferedStackTrace stack; \
+ stack.Unwind(pc, bp, nullptr, common_flags()->fast_unwind_on_fatal);
+
+void __sanitizer::BufferedStackTrace::UnwindImpl(uptr pc, uptr bp,
+ void *context,
+ bool request_fast,
+ u32 max_depth) {
+ Unwind(max_depth, pc, bp, context, 0, 0, false);
+}
+
+extern "C" SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_print_stack_trace() {
+ GET_FATAL_STACK_TRACE_PC_BP(StackTrace::GetCurrentPc(), GET_CURRENT_FRAME());
+ stack.Print();
+}
+
void Flags::SetDefaults() {
#define DFSAN_FLAG(Type, Name, DefaultValue, Description) Name = DefaultValue;
#include "dfsan_flags.inc"
@@ -422,7 +489,6 @@ static void dfsan_fini() {
}
extern "C" void dfsan_flush() {
- UnmapOrDie((void*)ShadowAddr(), UnusedAddr() - ShadowAddr());
if (!MmapFixedNoReserve(ShadowAddr(), UnusedAddr() - ShadowAddr()))
Die();
}
@@ -432,8 +498,10 @@ static void dfsan_init(int argc, char **argv, char **envp) {
::InitializePlatformEarly();
- if (!MmapFixedNoReserve(ShadowAddr(), UnusedAddr() - ShadowAddr()))
+ if (!MmapFixedSuperNoReserve(ShadowAddr(), UnusedAddr() - ShadowAddr()))
Die();
+ if (common_flags()->use_madv_dontdump)
+ DontDumpShadowMemory(ShadowAddr(), UnusedAddr() - ShadowAddr());
// Protect the region of memory we don't use, to preserve the one-to-one
// mapping from application to shadow memory. But if ASLR is disabled, Linux
diff --git a/contrib/llvm-project/compiler-rt/lib/dfsan/dfsan_custom.cpp b/contrib/llvm-project/compiler-rt/lib/dfsan/dfsan_custom.cpp
index 1acd2d47d154..94901cee0d5c 100644
--- a/contrib/llvm-project/compiler-rt/lib/dfsan/dfsan_custom.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/dfsan/dfsan_custom.cpp
@@ -11,12 +11,6 @@
// This file defines the custom functions listed in done_abilist.txt.
//===----------------------------------------------------------------------===//
-#include "sanitizer_common/sanitizer_common.h"
-#include "sanitizer_common/sanitizer_internal_defs.h"
-#include "sanitizer_common/sanitizer_linux.h"
-
-#include "dfsan/dfsan.h"
-
#include <arpa/inet.h>
#include <assert.h>
#include <ctype.h>
@@ -32,14 +26,21 @@
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
+#include <sys/epoll.h>
#include <sys/resource.h>
#include <sys/select.h>
+#include <sys/socket.h>
#include <sys/stat.h>
#include <sys/time.h>
#include <sys/types.h>
#include <time.h>
#include <unistd.h>
+#include "dfsan/dfsan.h"
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_internal_defs.h"
+#include "sanitizer_common/sanitizer_linux.h"
+
using namespace __dfsan;
#define CALL_WEAK_INTERCEPTOR_HOOK(f, ...) \
@@ -95,18 +96,27 @@ SANITIZER_INTERFACE_ATTRIBUTE char *__dfsw_strchr(const char *s, int c,
}
}
-DECLARE_WEAK_INTERCEPTOR_HOOK(dfsan_weak_hook_memcmp, uptr caller_pc,
- const void *s1, const void *s2, size_t n,
- dfsan_label s1_label, dfsan_label s2_label,
- dfsan_label n_label)
+SANITIZER_INTERFACE_ATTRIBUTE char *__dfsw_strpbrk(const char *s,
+ const char *accept,
+ dfsan_label s_label,
+ dfsan_label accept_label,
+ dfsan_label *ret_label) {
+ const char *ret = strpbrk(s, accept);
+ if (flags().strict_data_dependencies) {
+ *ret_label = ret ? s_label : 0;
+ } else {
+ size_t s_bytes_read = (ret ? ret - s : strlen(s)) + 1;
+ *ret_label =
+ dfsan_union(dfsan_read_label(s, s_bytes_read),
+ dfsan_union(dfsan_read_label(accept, strlen(accept) + 1),
+ dfsan_union(s_label, accept_label)));
+ }
+ return const_cast<char *>(ret);
+}
-SANITIZER_INTERFACE_ATTRIBUTE int __dfsw_memcmp(const void *s1, const void *s2,
- size_t n, dfsan_label s1_label,
- dfsan_label s2_label,
- dfsan_label n_label,
- dfsan_label *ret_label) {
- CALL_WEAK_INTERCEPTOR_HOOK(dfsan_weak_hook_memcmp, GET_CALLER_PC(), s1, s2, n,
- s1_label, s2_label, n_label);
+static int dfsan_memcmp_bcmp(const void *s1, const void *s2, size_t n,
+ dfsan_label s1_label, dfsan_label s2_label,
+ dfsan_label n_label, dfsan_label *ret_label) {
const char *cs1 = (const char *) s1, *cs2 = (const char *) s2;
for (size_t i = 0; i != n; ++i) {
if (cs1[i] != cs2[i]) {
@@ -129,6 +139,29 @@ SANITIZER_INTERFACE_ATTRIBUTE int __dfsw_memcmp(const void *s1, const void *s2,
return 0;
}
+DECLARE_WEAK_INTERCEPTOR_HOOK(dfsan_weak_hook_memcmp, uptr caller_pc,
+ const void *s1, const void *s2, size_t n,
+ dfsan_label s1_label, dfsan_label s2_label,
+ dfsan_label n_label)
+
+SANITIZER_INTERFACE_ATTRIBUTE int __dfsw_memcmp(const void *s1, const void *s2,
+ size_t n, dfsan_label s1_label,
+ dfsan_label s2_label,
+ dfsan_label n_label,
+ dfsan_label *ret_label) {
+ CALL_WEAK_INTERCEPTOR_HOOK(dfsan_weak_hook_memcmp, GET_CALLER_PC(), s1, s2, n,
+ s1_label, s2_label, n_label);
+ return dfsan_memcmp_bcmp(s1, s2, n, s1_label, s2_label, n_label, ret_label);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE int __dfsw_bcmp(const void *s1, const void *s2,
+ size_t n, dfsan_label s1_label,
+ dfsan_label s2_label,
+ dfsan_label n_label,
+ dfsan_label *ret_label) {
+ return dfsan_memcmp_bcmp(s1, s2, n, s1_label, s2_label, n_label, ret_label);
+}
+
DECLARE_WEAK_INTERCEPTOR_HOOK(dfsan_weak_hook_strcmp, uptr caller_pc,
const char *s1, const char *s2,
dfsan_label s1_label, dfsan_label s2_label)
@@ -394,6 +427,18 @@ SANITIZER_INTERFACE_ATTRIBUTE int __dfsw_pthread_create(
return rv;
}
+SANITIZER_INTERFACE_ATTRIBUTE int __dfsw_pthread_join(pthread_t thread,
+ void **retval,
+ dfsan_label thread_label,
+ dfsan_label retval_label,
+ dfsan_label *ret_label) {
+ int ret = pthread_join(thread, retval);
+ if (ret == 0 && retval)
+ dfsan_set_label(0, retval, sizeof(*retval));
+ *ret_label = 0;
+ return ret;
+}
+
struct dl_iterate_phdr_info {
int (*callback_trampoline)(void *callback, struct dl_phdr_info *info,
size_t size, void *data, dfsan_label info_label,
@@ -428,6 +473,20 @@ SANITIZER_INTERFACE_ATTRIBUTE int __dfsw_dl_iterate_phdr(
return dl_iterate_phdr(dl_iterate_phdr_cb, &dipi);
}
+// This function is only available for glibc 2.27 or newer. Mark it weak so
+// linking succeeds with older glibcs.
+SANITIZER_WEAK_ATTRIBUTE void _dl_get_tls_static_info(size_t *sizep,
+ size_t *alignp);
+
+SANITIZER_INTERFACE_ATTRIBUTE void __dfsw__dl_get_tls_static_info(
+ size_t *sizep, size_t *alignp, dfsan_label sizep_label,
+ dfsan_label alignp_label) {
+ assert(_dl_get_tls_static_info);
+ _dl_get_tls_static_info(sizep, alignp);
+ dfsan_set_label(0, sizep, sizeof(*sizep));
+ dfsan_set_label(0, alignp, sizeof(*alignp));
+}
+
SANITIZER_INTERFACE_ATTRIBUTE
char *__dfsw_ctime_r(const time_t *timep, char *buf, dfsan_label timep_label,
dfsan_label buf_label, dfsan_label *ret_label) {
@@ -607,8 +666,8 @@ unsigned long int __dfsw_strtoul(const char *nptr, char **endptr, int base,
SANITIZER_INTERFACE_ATTRIBUTE
long long unsigned int __dfsw_strtoull(const char *nptr, char **endptr,
- dfsan_label nptr_label,
- int base, dfsan_label endptr_label,
+ int base, dfsan_label nptr_label,
+ dfsan_label endptr_label,
dfsan_label base_label,
dfsan_label *ret_label) {
char *tmp_endptr;
@@ -684,6 +743,18 @@ int __dfsw_getpwuid_r(id_t uid, struct passwd *pwd,
}
SANITIZER_INTERFACE_ATTRIBUTE
+int __dfsw_epoll_wait(int epfd, struct epoll_event *events, int maxevents,
+ int timeout, dfsan_label epfd_label,
+ dfsan_label events_label, dfsan_label maxevents_label,
+ dfsan_label timeout_label, dfsan_label *ret_label) {
+ int ret = epoll_wait(epfd, events, maxevents, timeout);
+ if (ret > 0)
+ dfsan_set_label(0, events, ret * sizeof(*events));
+ *ret_label = 0;
+ return ret;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
int __dfsw_poll(struct pollfd *fds, nfds_t nfds, int timeout,
dfsan_label dfs_label, dfsan_label nfds_label,
dfsan_label timeout_label, dfsan_label *ret_label) {
@@ -755,6 +826,16 @@ int __dfsw_sigaction(int signum, const struct sigaction *act,
}
SANITIZER_INTERFACE_ATTRIBUTE
+int __dfsw_sigaltstack(const stack_t *ss, stack_t *old_ss, dfsan_label ss_label,
+ dfsan_label old_ss_label, dfsan_label *ret_label) {
+ int ret = sigaltstack(ss, old_ss);
+ if (ret != -1 && old_ss)
+ dfsan_set_label(0, old_ss, sizeof(*old_ss));
+ *ret_label = 0;
+ return ret;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
int __dfsw_gettimeofday(struct timeval *tv, struct timezone *tz,
dfsan_label tv_label, dfsan_label tz_label,
dfsan_label *ret_label) {
@@ -835,6 +916,44 @@ SANITIZER_INTERFACE_ATTRIBUTE int __dfsw_nanosleep(const struct timespec *req,
return ret;
}
+static void clear_msghdr_labels(size_t bytes_written, struct msghdr *msg) {
+ dfsan_set_label(0, msg, sizeof(*msg));
+ dfsan_set_label(0, msg->msg_name, msg->msg_namelen);
+ dfsan_set_label(0, msg->msg_control, msg->msg_controllen);
+ for (size_t i = 0; bytes_written > 0; ++i) {
+ assert(i < msg->msg_iovlen);
+ struct iovec *iov = &msg->msg_iov[i];
+ size_t iov_written =
+ bytes_written < iov->iov_len ? bytes_written : iov->iov_len;
+ dfsan_set_label(0, iov->iov_base, iov_written);
+ bytes_written -= iov_written;
+ }
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE int __dfsw_recvmmsg(
+ int sockfd, struct mmsghdr *msgvec, unsigned int vlen, int flags,
+ struct timespec *timeout, dfsan_label sockfd_label,
+ dfsan_label msgvec_label, dfsan_label vlen_label, dfsan_label flags_label,
+ dfsan_label timeout_label, dfsan_label *ret_label) {
+ int ret = recvmmsg(sockfd, msgvec, vlen, flags, timeout);
+ for (int i = 0; i < ret; ++i) {
+ dfsan_set_label(0, &msgvec[i].msg_len, sizeof(msgvec[i].msg_len));
+ clear_msghdr_labels(msgvec[i].msg_len, &msgvec[i].msg_hdr);
+ }
+ *ret_label = 0;
+ return ret;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE ssize_t __dfsw_recvmsg(
+ int sockfd, struct msghdr *msg, int flags, dfsan_label sockfd_label,
+ dfsan_label msg_label, dfsan_label flags_label, dfsan_label *ret_label) {
+ ssize_t ret = recvmsg(sockfd, msg, flags);
+ if (ret >= 0)
+ clear_msghdr_labels(ret, msg);
+ *ret_label = 0;
+ return ret;
+}
+
SANITIZER_INTERFACE_ATTRIBUTE int
__dfsw_socketpair(int domain, int type, int protocol, int sv[2],
dfsan_label domain_label, dfsan_label type_label,
@@ -848,6 +967,50 @@ __dfsw_socketpair(int domain, int type, int protocol, int sv[2],
return ret;
}
+SANITIZER_INTERFACE_ATTRIBUTE int __dfsw_getsockopt(
+ int sockfd, int level, int optname, void *optval, socklen_t *optlen,
+ dfsan_label sockfd_label, dfsan_label level_label,
+ dfsan_label optname_label, dfsan_label optval_label,
+ dfsan_label optlen_label, dfsan_label *ret_label) {
+ int ret = getsockopt(sockfd, level, optname, optval, optlen);
+ if (ret != -1 && optval && optlen) {
+ dfsan_set_label(0, optlen, sizeof(*optlen));
+ dfsan_set_label(0, optval, *optlen);
+ }
+ *ret_label = 0;
+ return ret;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE int __dfsw_getsockname(
+ int sockfd, struct sockaddr *addr, socklen_t *addrlen,
+ dfsan_label sockfd_label, dfsan_label addr_label, dfsan_label addrlen_label,
+ dfsan_label *ret_label) {
+ socklen_t origlen = addrlen ? *addrlen : 0;
+ int ret = getsockname(sockfd, addr, addrlen);
+ if (ret != -1 && addr && addrlen) {
+ socklen_t written_bytes = origlen < *addrlen ? origlen : *addrlen;
+ dfsan_set_label(0, addrlen, sizeof(*addrlen));
+ dfsan_set_label(0, addr, written_bytes);
+ }
+ *ret_label = 0;
+ return ret;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE int __dfsw_getpeername(
+ int sockfd, struct sockaddr *addr, socklen_t *addrlen,
+ dfsan_label sockfd_label, dfsan_label addr_label, dfsan_label addrlen_label,
+ dfsan_label *ret_label) {
+ socklen_t origlen = addrlen ? *addrlen : 0;
+ int ret = getpeername(sockfd, addr, addrlen);
+ if (ret != -1 && addr && addrlen) {
+ socklen_t written_bytes = origlen < *addrlen ? origlen : *addrlen;
+ dfsan_set_label(0, addrlen, sizeof(*addrlen));
+ dfsan_set_label(0, addr, written_bytes);
+ }
+ *ret_label = 0;
+ return ret;
+}
+
// Type of the trampoline function passed to the custom version of
// dfsan_set_write_callback.
typedef void (*write_trampoline_t)(
diff --git a/contrib/llvm-project/compiler-rt/lib/dfsan/dfsan_flags.inc b/contrib/llvm-project/compiler-rt/lib/dfsan/dfsan_flags.inc
index 29db73b98278..cdd0035c9b2d 100644
--- a/contrib/llvm-project/compiler-rt/lib/dfsan/dfsan_flags.inc
+++ b/contrib/llvm-project/compiler-rt/lib/dfsan/dfsan_flags.inc
@@ -29,7 +29,3 @@ DFSAN_FLAG(
DFSAN_FLAG(const char *, dump_labels_at_exit, "", "The path of the file where "
"to dump the labels when the "
"program terminates.")
-DFSAN_FLAG(bool, fast16labels, false,
- "Enables experimental mode where DFSan supports only 16 power-of-2 labels "
- "(1, 2, 4, 8, ... 32768) and the label union is computed as a bit-wise OR."
-)
diff --git a/contrib/llvm-project/compiler-rt/lib/dfsan/dfsan_interceptors.cpp b/contrib/llvm-project/compiler-rt/lib/dfsan/dfsan_interceptors.cpp
index 673171c46f5a..7efb182ac8d4 100644
--- a/contrib/llvm-project/compiler-rt/lib/dfsan/dfsan_interceptors.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/dfsan/dfsan_interceptors.cpp
@@ -11,35 +11,60 @@
// Interceptors for standard library functions.
//===----------------------------------------------------------------------===//
+#include <sys/syscall.h>
+#include <unistd.h>
+
#include "dfsan/dfsan.h"
#include "interception/interception.h"
#include "sanitizer_common/sanitizer_common.h"
using namespace __sanitizer;
+namespace {
+
+bool interceptors_initialized;
+
+} // namespace
+
INTERCEPTOR(void *, mmap, void *addr, SIZE_T length, int prot, int flags,
int fd, OFF_T offset) {
- void *res = REAL(mmap)(addr, length, prot, flags, fd, offset);
- if (res != (void*)-1)
- dfsan_set_label(0, res, RoundUpTo(length, GetPageSize()));
+ void *res;
+
+ // interceptors_initialized is set to true during preinit_array, when we're
+ // single-threaded. So we don't need to worry about accessing it atomically.
+ if (!interceptors_initialized)
+ res = (void *)syscall(__NR_mmap, addr, length, prot, flags, fd, offset);
+ else
+ res = REAL(mmap)(addr, length, prot, flags, fd, offset);
+
+ if (res != (void *)-1)
+ dfsan_set_label(0, res, RoundUpTo(length, GetPageSizeCached()));
return res;
}
INTERCEPTOR(void *, mmap64, void *addr, SIZE_T length, int prot, int flags,
int fd, OFF64_T offset) {
void *res = REAL(mmap64)(addr, length, prot, flags, fd, offset);
- if (res != (void*)-1)
- dfsan_set_label(0, res, RoundUpTo(length, GetPageSize()));
+ if (res != (void *)-1)
+ dfsan_set_label(0, res, RoundUpTo(length, GetPageSizeCached()));
+ return res;
+}
+
+INTERCEPTOR(int, munmap, void *addr, SIZE_T length) {
+ int res = REAL(munmap)(addr, length);
+ if (res != -1)
+ dfsan_set_label(0, addr, RoundUpTo(length, GetPageSizeCached()));
return res;
}
namespace __dfsan {
void InitializeInterceptors() {
- static int inited = 0;
- CHECK_EQ(inited, 0);
+ CHECK(!interceptors_initialized);
INTERCEPT_FUNCTION(mmap);
INTERCEPT_FUNCTION(mmap64);
- inited = 1;
+ INTERCEPT_FUNCTION(munmap);
+
+ interceptors_initialized = true;
}
} // namespace __dfsan
diff --git a/contrib/llvm-project/compiler-rt/lib/dfsan/done_abilist.txt b/contrib/llvm-project/compiler-rt/lib/dfsan/done_abilist.txt
index 52f3ff5ef239..e90dbc17a3cd 100644
--- a/contrib/llvm-project/compiler-rt/lib/dfsan/done_abilist.txt
+++ b/contrib/llvm-project/compiler-rt/lib/dfsan/done_abilist.txt
@@ -48,25 +48,55 @@ fun:tolower=functional
fun:toupper=functional
# Functions that return a value that is data-dependent on the input.
+fun:__isinf=functional
+fun:__isinff=functional
+fun:__signbit=functional
+fun:__signbitf=functional
+fun:__signbitl=functional
fun:btowc=functional
fun:exp=functional
fun:exp2=functional
+fun:expf=functional
+fun:expl=functional
fun:fabs=functional
fun:finite=functional
+fun:finitef=functional
+fun:finitel=functional
fun:floor=functional
fun:fmod=functional
fun:isinf=functional
+fun:isinff=functional
+fun:isinfl=functional
fun:isnan=functional
+fun:isnanf=functional
+fun:isnanl=functional
fun:log=functional
+fun:log1p=functional
+fun:log1pf=functional
+fun:log1pl=functional
+fun:log2=functional
+fun:log2f=functional
+fun:log2l=functional
fun:modf=functional
+fun:nextafter=functional
+fun:nextafterf=functional
+fun:nextafterl=functional
+fun:nexttoward=functional
+fun:nexttowardf=functional
+fun:nexttowardl=functional
fun:pow=functional
+fun:powf=functional
+fun:powl=functional
fun:round=functional
fun:sqrt=functional
+fun:sqrtf=functional
+fun:sqrtl=functional
fun:wctob=functional
# Functions that produce an output that does not depend on the input (shadow is
# zeroed automatically).
fun:__assert_fail=discard
+fun:__cmsg_nxthdr=discard
fun:__ctype_b_loc=discard
fun:__cxa_atexit=discard
fun:__errno_location=discard
@@ -83,8 +113,12 @@ fun:chdir=discard
fun:close=discard
fun:closedir=discard
fun:connect=discard
+fun:creat=discard
fun:dladdr=discard
fun:dlclose=discard
+fun:epoll_create=discard
+fun:epoll_create1=discard
+fun:epoll_ctl=discard
fun:fclose=discard
fun:feof=discard
fun:ferror=discard
@@ -111,6 +145,7 @@ fun:mkdir=discard
fun:mmap=discard
fun:munmap=discard
fun:open=discard
+fun:openat=discard
fun:pipe=discard
fun:posix_fadvise=discard
fun:posix_memalign=discard
@@ -148,19 +183,27 @@ fun:uselocale=discard
# Functions that produce output does not depend on the input (need to zero the
# shadow manually).
+fun:_dl_get_tls_static_info=custom
fun:calloc=custom
fun:clock_gettime=custom
fun:dlopen=custom
+fun:epoll_wait=custom
fun:fgets=custom
fun:fstat=custom
fun:getcwd=custom
fun:get_current_dir_name=custom
fun:gethostname=custom
+fun:getpeername=custom
fun:getrlimit=custom
fun:getrusage=custom
+fun:getsockname=custom
+fun:getsockopt=custom
fun:nanosleep=custom
fun:pread=custom
fun:read=custom
+fun:recvmmsg=custom
+fun:recvmsg=custom
+fun:sigaltstack=custom
fun:socketpair=custom
fun:stat=custom
fun:time=custom
@@ -183,6 +226,7 @@ fun:strtoull=custom
# Functions that produce an output that is computed from the input, but is not
# necessarily data dependent.
+fun:bcmp=custom
fun:memchr=custom
fun:memcmp=custom
fun:strcasecmp=custom
@@ -191,6 +235,7 @@ fun:strcmp=custom
fun:strlen=custom
fun:strncasecmp=custom
fun:strncmp=custom
+fun:strpbrk=custom
fun:strrchr=custom
fun:strstr=custom
@@ -220,7 +265,32 @@ fun:qsort=discard
###############################################################################
# pthread
###############################################################################
+fun:__pthread_register_cancel=discard
+fun:__pthread_unregister_cancel=discard
+fun:pthread_attr_destroy=discard
+fun:pthread_attr_getaffinity_np=discard
+fun:pthread_attr_getdetachstate=discard
+fun:pthread_attr_getguardsize=discard
+fun:pthread_attr_getinheritsched=discard
+fun:pthread_attr_getschedparam=discard
+fun:pthread_attr_getschedpolicy=discard
+fun:pthread_attr_getscope=discard
+fun:pthread_attr_getstack=discard
+fun:pthread_attr_getstackaddr=disacrd
+fun:pthread_attr_getstacksize=discard
+fun:pthread_attr_init=discard
+fun:pthread_attr_setaffinity_np=discard
+fun:pthread_attr_setdetachstate=discard
+fun:pthread_attr_setguardsize=discard
+fun:pthread_attr_setinheritsched=discard
+fun:pthread_attr_setschedparam=discard
+fun:pthread_attr_setschedpolicy=discard
+fun:pthread_attr_setscope=discard
+fun:pthread_attr_setstack=discard
+fun:pthread_attr_setstackaddr=discard
+fun:pthread_attr_setstacksize=discard
fun:pthread_equal=discard
+fun:pthread_getschedparam=discard
fun:pthread_getspecific=discard
fun:pthread_key_create=discard
fun:pthread_key_delete=discard
@@ -232,6 +302,17 @@ fun:pthread_mutex_unlock=discard
fun:pthread_mutexattr_destroy=discard
fun:pthread_mutexattr_init=discard
fun:pthread_mutexattr_settype=discard
+fun:pthread_rwlock_destroy=discard
+fun:pthread_rwlock_init=discard
+fun:pthread_rwlock_rdlock=discard
+fun:pthread_rwlock_timedrdlock=discard
+fun:pthread_rwlock_timedwrlock=discard
+fun:pthread_rwlock_tryrdlock=discard
+fun:pthread_rwlock_trywrlock=discard
+fun:pthread_rwlock_wrlock=discard
+fun:pthread_rwlock_unlock=discard
+fun:pthread_setschedparam=discard
+fun:pthread_setname_np=discard
fun:pthread_once=discard
fun:pthread_self=discard
fun:pthread_setspecific=discard
@@ -239,6 +320,10 @@ fun:pthread_setspecific=discard
# Functions that take a callback (wrap the callback manually).
fun:pthread_create=custom
+# Functions that produce output does not depend on the input (need to zero the
+# shadow manually).
+fun:pthread_join=custom
+
###############################################################################
# libffi/libgo
###############################################################################
diff --git a/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerCorpus.h b/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerCorpus.h
index 54d1e09ec6df..daea4f5213b1 100644
--- a/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerCorpus.h
+++ b/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerCorpus.h
@@ -18,6 +18,7 @@
#include "FuzzerSHA1.h"
#include "FuzzerTracePC.h"
#include <algorithm>
+#include <chrono>
#include <numeric>
#include <random>
#include <unordered_set>
@@ -26,6 +27,7 @@ namespace fuzzer {
struct InputInfo {
Unit U; // The actual input data.
+ std::chrono::microseconds TimeOfUnit;
uint8_t Sha1[kSHA1NumBytes]; // Checksum.
// Number of features that this input has and no smaller input has.
size_t NumFeatures = 0;
@@ -33,6 +35,7 @@ struct InputInfo {
// Stats.
size_t NumExecutedMutations = 0;
size_t NumSuccessfullMutations = 0;
+ bool NeverReduce = false;
bool MayDeleteFile = false;
bool Reduced = false;
bool HasFocusFunction = false;
@@ -61,11 +64,15 @@ struct InputInfo {
}
// Assign more energy to a high-entropy seed, i.e., that reveals more
- // information about the globally rare features in the neighborhood
- // of the seed. Since we do not know the entropy of a seed that has
- // never been executed we assign fresh seeds maximum entropy and
- // let II->Energy approach the true entropy from above.
- void UpdateEnergy(size_t GlobalNumberOfFeatures) {
+ // information about the globally rare features in the neighborhood of the
+ // seed. Since we do not know the entropy of a seed that has never been
+ // executed we assign fresh seeds maximum entropy and let II->Energy approach
+ // the true entropy from above. If ScalePerExecTime is true, the computed
+ // entropy is scaled based on how fast this input executes compared to the
+ // average execution time of inputs. The faster an input executes, the more
+ // energy gets assigned to the input.
+ void UpdateEnergy(size_t GlobalNumberOfFeatures, bool ScalePerExecTime,
+ std::chrono::microseconds AverageUnitExecutionTime) {
Energy = 0.0;
SumIncidence = 0;
@@ -88,6 +95,27 @@ struct InputInfo {
// Normalize.
if (SumIncidence != 0)
Energy = (Energy / SumIncidence) + logl(SumIncidence);
+
+ if (ScalePerExecTime) {
+ // Scaling to favor inputs with lower execution time.
+ uint32_t PerfScore = 100;
+ if (TimeOfUnit.count() > AverageUnitExecutionTime.count() * 10)
+ PerfScore = 10;
+ else if (TimeOfUnit.count() > AverageUnitExecutionTime.count() * 4)
+ PerfScore = 25;
+ else if (TimeOfUnit.count() > AverageUnitExecutionTime.count() * 2)
+ PerfScore = 50;
+ else if (TimeOfUnit.count() * 3 > AverageUnitExecutionTime.count() * 4)
+ PerfScore = 75;
+ else if (TimeOfUnit.count() * 4 < AverageUnitExecutionTime.count())
+ PerfScore = 300;
+ else if (TimeOfUnit.count() * 3 < AverageUnitExecutionTime.count())
+ PerfScore = 200;
+ else if (TimeOfUnit.count() * 2 < AverageUnitExecutionTime.count())
+ PerfScore = 150;
+
+ Energy *= PerfScore;
+ }
}
// Increment the frequency of the feature Idx.
@@ -120,6 +148,7 @@ struct EntropicOptions {
bool Enabled;
size_t NumberOfRarestFeatures;
size_t FeatureFrequencyThreshold;
+ bool ScalePerExecTime;
};
class InputCorpus {
@@ -177,7 +206,8 @@ public:
bool empty() const { return Inputs.empty(); }
const Unit &operator[] (size_t Idx) const { return Inputs[Idx]->U; }
InputInfo *AddToCorpus(const Unit &U, size_t NumFeatures, bool MayDeleteFile,
- bool HasFocusFunction,
+ bool HasFocusFunction, bool NeverReduce,
+ std::chrono::microseconds TimeOfUnit,
const Vector<uint32_t> &FeatureSet,
const DataFlowTrace &DFT, const InputInfo *BaseII) {
assert(!U.empty());
@@ -187,6 +217,8 @@ public:
InputInfo &II = *Inputs.back();
II.U = U;
II.NumFeatures = NumFeatures;
+ II.NeverReduce = NeverReduce;
+ II.TimeOfUnit = TimeOfUnit;
II.MayDeleteFile = MayDeleteFile;
II.UniqFeatureSet = FeatureSet;
II.HasFocusFunction = HasFocusFunction;
@@ -268,6 +300,15 @@ public:
return II;
}
+ InputInfo &ChooseUnitToCrossOverWith(Random &Rand, bool UniformDist) {
+ if (!UniformDist) {
+ return ChooseUnitToMutate(Rand);
+ }
+ InputInfo &II = *Inputs[Rand(Inputs.size())];
+ assert(!II.U.empty());
+ return II;
+ }
+
// Returns an index of random unit from the corpus to mutate.
size_t ChooseUnitIdxToMutate(Random &Rand) {
UpdateCorpusDistribution(Rand);
@@ -460,12 +501,19 @@ private:
Weights.resize(N);
std::iota(Intervals.begin(), Intervals.end(), 0);
+ std::chrono::microseconds AverageUnitExecutionTime(0);
+ for (auto II : Inputs) {
+ AverageUnitExecutionTime += II->TimeOfUnit;
+ }
+ AverageUnitExecutionTime /= N;
+
bool VanillaSchedule = true;
if (Entropic.Enabled) {
for (auto II : Inputs) {
if (II->NeedsEnergyUpdate && II->Energy != 0.0) {
II->NeedsEnergyUpdate = false;
- II->UpdateEnergy(RareFeatures.size());
+ II->UpdateEnergy(RareFeatures.size(), Entropic.ScalePerExecTime,
+ AverageUnitExecutionTime);
}
}
diff --git a/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerDataFlowTrace.cpp b/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerDataFlowTrace.cpp
index 48df8e668604..0e9cdf7e66b1 100644
--- a/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerDataFlowTrace.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerDataFlowTrace.cpp
@@ -253,7 +253,7 @@ int CollectDataFlow(const std::string &DFTBinary, const std::string &DirPath,
return 1;
}
- static char DFSanEnv[] = "DFSAN_OPTIONS=fast16labels=1:warn_unimplemented=0";
+ static char DFSanEnv[] = "DFSAN_OPTIONS=warn_unimplemented=0";
putenv(DFSanEnv);
MkDir(DirPath);
for (auto &F : CorporaFiles) {
diff --git a/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerDriver.cpp b/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerDriver.cpp
index a847c76e292d..447cafce7fd4 100644
--- a/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerDriver.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerDriver.cpp
@@ -33,7 +33,11 @@
// binary can test for its existence.
#if LIBFUZZER_MSVC
extern "C" void __libfuzzer_is_present() {}
+#if defined(_M_IX86) || defined(__i386__)
+#pragma comment(linker, "/include:___libfuzzer_is_present")
+#else
#pragma comment(linker, "/include:__libfuzzer_is_present")
+#endif
#else
extern "C" __attribute__((used)) void __libfuzzer_is_present() {}
#endif // LIBFUZZER_MSVC
@@ -246,6 +250,28 @@ static void WorkerThread(const Command &BaseCmd, std::atomic<unsigned> *Counter,
}
}
+static void ValidateDirectoryExists(const std::string &Path,
+ bool CreateDirectory) {
+ if (Path.empty()) {
+ Printf("ERROR: Provided directory path is an empty string\n");
+ exit(1);
+ }
+
+ if (IsDirectory(Path))
+ return;
+
+ if (CreateDirectory) {
+ if (!MkDirRecursive(Path)) {
+ Printf("ERROR: Failed to create directory \"%s\"\n", Path.c_str());
+ exit(1);
+ }
+ return;
+ }
+
+ Printf("ERROR: The required directory \"%s\" does not exist\n", Path.c_str());
+ exit(1);
+}
+
std::string CloneArgsWithoutX(const Vector<std::string> &Args,
const char *X1, const char *X2) {
std::string Cmd;
@@ -295,7 +321,12 @@ int RunOneTest(Fuzzer *F, const char *InputFilePath, size_t MaxLen) {
if (MaxLen && MaxLen < U.size())
U.resize(MaxLen);
F->ExecuteCallback(U.data(), U.size());
- F->TryDetectingAMemoryLeak(U.data(), U.size(), true);
+ if (Flags.print_full_coverage) {
+ // Leak detection is not needed when collecting full coverage data.
+ F->TPCUpdateObservedPCs();
+ } else {
+ F->TryDetectingAMemoryLeak(U.data(), U.size(), true);
+ }
return 0;
}
@@ -645,6 +676,7 @@ int FuzzerDriver(int *argc, char ***argv, UserCallback Callback) {
Options.Verbosity = Flags.verbosity;
Options.MaxLen = Flags.max_len;
Options.LenControl = Flags.len_control;
+ Options.KeepSeed = Flags.keep_seed;
Options.UnitTimeoutSec = Flags.timeout;
Options.ErrorExitCode = Flags.error_exitcode;
Options.TimeoutExitCode = Flags.timeout_exitcode;
@@ -653,6 +685,7 @@ int FuzzerDriver(int *argc, char ***argv, UserCallback Callback) {
Options.IgnoreCrashes = Flags.ignore_crashes;
Options.MaxTotalTimeSec = Flags.max_total_time;
Options.DoCrossOver = Flags.cross_over;
+ Options.CrossOverUniformDist = Flags.cross_over_uniform_dist;
Options.MutateDepth = Flags.mutate_depth;
Options.ReduceDepth = Flags.reduce_depth;
Options.UseCounters = Flags.use_counters;
@@ -674,13 +707,33 @@ int FuzzerDriver(int *argc, char ***argv, UserCallback Callback) {
Options.MallocLimitMb = Options.RssLimitMb;
if (Flags.runs >= 0)
Options.MaxNumberOfRuns = Flags.runs;
- if (!Inputs->empty() && !Flags.minimize_crash_internal_step)
- Options.OutputCorpus = (*Inputs)[0];
+ if (!Inputs->empty() && !Flags.minimize_crash_internal_step) {
+ // Ensure output corpus assumed to be the first arbitrary argument input
+ // is not a path to an existing file.
+ std::string OutputCorpusDir = (*Inputs)[0];
+ if (!IsFile(OutputCorpusDir)) {
+ Options.OutputCorpus = OutputCorpusDir;
+ ValidateDirectoryExists(Options.OutputCorpus, Flags.create_missing_dirs);
+ }
+ }
Options.ReportSlowUnits = Flags.report_slow_units;
- if (Flags.artifact_prefix)
+ if (Flags.artifact_prefix) {
Options.ArtifactPrefix = Flags.artifact_prefix;
- if (Flags.exact_artifact_path)
+
+ // Since the prefix could be a full path to a file name prefix, assume
+ // that if the path ends with the platform's separator that a directory
+ // is desired
+ std::string ArtifactPathDir = Options.ArtifactPrefix;
+ if (!IsSeparator(ArtifactPathDir[ArtifactPathDir.length() - 1])) {
+ ArtifactPathDir = DirName(ArtifactPathDir);
+ }
+ ValidateDirectoryExists(ArtifactPathDir, Flags.create_missing_dirs);
+ }
+ if (Flags.exact_artifact_path) {
Options.ExactArtifactPath = Flags.exact_artifact_path;
+ ValidateDirectoryExists(DirName(Options.ExactArtifactPath),
+ Flags.create_missing_dirs);
+ }
Vector<Unit> Dictionary;
if (Flags.dict)
if (!ParseDictionaryFile(FileToString(Flags.dict), &Dictionary))
@@ -695,6 +748,7 @@ int FuzzerDriver(int *argc, char ***argv, UserCallback Callback) {
Options.PrintFinalStats = Flags.print_final_stats;
Options.PrintCorpusStats = Flags.print_corpus_stats;
Options.PrintCoverage = Flags.print_coverage;
+ Options.PrintFullCoverage = Flags.print_full_coverage;
if (Flags.exit_on_src_pos)
Options.ExitOnSrcPos = Flags.exit_on_src_pos;
if (Flags.exit_on_item)
@@ -703,8 +757,12 @@ int FuzzerDriver(int *argc, char ***argv, UserCallback Callback) {
Options.FocusFunction = Flags.focus_function;
if (Flags.data_flow_trace)
Options.DataFlowTrace = Flags.data_flow_trace;
- if (Flags.features_dir)
+ if (Flags.features_dir) {
Options.FeaturesDir = Flags.features_dir;
+ ValidateDirectoryExists(Options.FeaturesDir, Flags.create_missing_dirs);
+ }
+ if (Flags.mutation_graph_file)
+ Options.MutationGraphFile = Flags.mutation_graph_file;
if (Flags.collect_data_flow)
Options.CollectDataFlow = Flags.collect_data_flow;
if (Flags.stop_file)
@@ -714,21 +772,19 @@ int FuzzerDriver(int *argc, char ***argv, UserCallback Callback) {
(size_t)Flags.entropic_feature_frequency_threshold;
Options.EntropicNumberOfRarestFeatures =
(size_t)Flags.entropic_number_of_rarest_features;
- if (Options.Entropic) {
- if (!Options.FocusFunction.empty()) {
- Printf("ERROR: The parameters `--entropic` and `--focus_function` cannot "
- "be used together.\n");
- exit(1);
- }
+ Options.EntropicScalePerExecTime = Flags.entropic_scale_per_exec_time;
+ if (!Options.FocusFunction.empty())
+ Options.Entropic = false; // FocusFunction overrides entropic scheduling.
+ if (Options.Entropic)
Printf("INFO: Running with entropic power schedule (0x%X, %d).\n",
Options.EntropicFeatureFrequencyThreshold,
Options.EntropicNumberOfRarestFeatures);
- }
struct EntropicOptions Entropic;
Entropic.Enabled = Options.Entropic;
Entropic.FeatureFrequencyThreshold =
Options.EntropicFeatureFrequencyThreshold;
Entropic.NumberOfRarestFeatures = Options.EntropicNumberOfRarestFeatures;
+ Entropic.ScalePerExecTime = Options.EntropicScalePerExecTime;
unsigned Seed = Flags.seed;
// Initialize Seed.
@@ -763,6 +819,7 @@ int FuzzerDriver(int *argc, char ***argv, UserCallback Callback) {
#endif // LIBFUZZER_EMSCRIPTEN
Options.HandleAbrt = Flags.handle_abrt;
+ Options.HandleAlrm = !Flags.minimize_crash;
Options.HandleBus = Flags.handle_bus;
Options.HandleFpe = Flags.handle_fpe;
Options.HandleIll = Flags.handle_ill;
@@ -772,6 +829,8 @@ int FuzzerDriver(int *argc, char ***argv, UserCallback Callback) {
Options.HandleXfsz = Flags.handle_xfsz;
Options.HandleUsr1 = Flags.handle_usr1;
Options.HandleUsr2 = Flags.handle_usr2;
+ Options.HandleWinExcept = Flags.handle_winexcept;
+
SetSignalHandler(Options);
std::atexit(Fuzzer::StaticExitCallback);
@@ -854,6 +913,12 @@ int FuzzerDriver(int *argc, char ***argv, UserCallback Callback) {
exit(0); // Don't let F destroy itself.
}
+extern "C" ATTRIBUTE_INTERFACE int
+LLVMFuzzerRunDriver(int *argc, char ***argv,
+ int (*UserCb)(const uint8_t *Data, size_t Size)) {
+ return FuzzerDriver(argc, argv, UserCb);
+}
+
// Storage for global ExternalFunctions object.
ExternalFunctions *EF = nullptr;
diff --git a/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerExtFunctionsWeak.cpp b/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerExtFunctionsWeak.cpp
index 24ddc57d47d6..3ef758daa7b6 100644
--- a/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerExtFunctionsWeak.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerExtFunctionsWeak.cpp
@@ -13,7 +13,7 @@
//===----------------------------------------------------------------------===//
#include "FuzzerPlatform.h"
#if LIBFUZZER_LINUX || LIBFUZZER_NETBSD || LIBFUZZER_FUCHSIA || \
- LIBFUZZER_FREEBSD || LIBFUZZER_OPENBSD || LIBFUZZER_EMSCRIPTEN
+ LIBFUZZER_FREEBSD || LIBFUZZER_EMSCRIPTEN
#include "FuzzerExtFunctions.h"
#include "FuzzerIO.h"
diff --git a/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerExtraCounters.cpp b/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerExtraCounters.cpp
index d36beba1b1ba..04f569a1a879 100644
--- a/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerExtraCounters.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerExtraCounters.cpp
@@ -12,7 +12,7 @@
#include <cstdint>
#if LIBFUZZER_LINUX || LIBFUZZER_NETBSD || LIBFUZZER_FREEBSD || \
- LIBFUZZER_OPENBSD || LIBFUZZER_FUCHSIA || LIBFUZZER_EMSCRIPTEN
+ LIBFUZZER_FUCHSIA || LIBFUZZER_EMSCRIPTEN
__attribute__((weak)) extern uint8_t __start___libfuzzer_extra_counters;
__attribute__((weak)) extern uint8_t __stop___libfuzzer_extra_counters;
diff --git a/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerFlags.def b/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerFlags.def
index 832224a705d2..ab31da0ae5d6 100644
--- a/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerFlags.def
+++ b/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerFlags.def
@@ -23,7 +23,21 @@ FUZZER_FLAG_INT(len_control, 100, "Try generating small inputs first, "
FUZZER_FLAG_STRING(seed_inputs, "A comma-separated list of input files "
"to use as an additional seed corpus. Alternatively, an \"@\" followed by "
"the name of a file containing the comma-separated list.")
+FUZZER_FLAG_INT(keep_seed, 0, "If 1, keep seed inputs in the corpus even if "
+ "they do not produce new coverage. When used with |reduce_inputs==1|, the "
+ "seed inputs will never be reduced. This option can be useful when seeds are"
+ "not properly formed for the fuzz target but still have useful snippets.")
FUZZER_FLAG_INT(cross_over, 1, "If 1, cross over inputs.")
+FUZZER_FLAG_INT(cross_over_uniform_dist, 0, "Experimental. If 1, use a "
+ "uniform probability distribution when choosing inputs to cross over with. "
+ "Some of the inputs in the corpus may never get chosen for mutation "
+ "depending on the input mutation scheduling policy. With this flag, all "
+ "inputs, regardless of the input mutation scheduling policy, can be chosen "
+ "as an input to cross over with. This can be particularly useful with "
+ "|keep_seed==1|; all the initial seed inputs, even though they do not "
+ "increase coverage because they are not properly formed, will still be "
+ "chosen as an input to cross over with.")
+
FUZZER_FLAG_INT(mutate_depth, 5,
"Apply this number of consecutive mutations to each input.")
FUZZER_FLAG_INT(reduce_depth, 0, "Experimental/internal. "
@@ -74,6 +88,11 @@ FUZZER_FLAG_STRING(features_dir, "internal flag. Used to dump feature sets on di
"Every time a new input is added to the corpus, a corresponding file in the features_dir"
" is created containing the unique features of that input."
" Features are stored in binary format.")
+FUZZER_FLAG_STRING(mutation_graph_file, "Saves a graph (in DOT format) to"
+ " mutation_graph_file. The graph contains a vertex for each input that has"
+ " unique coverage; directed edges are provided between parents and children"
+ " where the child has unique coverage, and are recorded with the type of"
+ " mutation that caused the child.")
FUZZER_FLAG_INT(use_counters, 1, "Use coverage counters")
FUZZER_FLAG_INT(use_memmem, 1,
"Use hints from intercepting memmem, strstr, etc")
@@ -113,6 +132,8 @@ FUZZER_FLAG_INT(print_corpus_stats, 0,
"If 1, print statistics on corpus elements at exit.")
FUZZER_FLAG_INT(print_coverage, 0, "If 1, print coverage information as text"
" at exit.")
+FUZZER_FLAG_INT(print_full_coverage, 0, "If 1, print full coverage information "
+ "(all branches) as text at exit.")
FUZZER_FLAG_INT(dump_coverage, 0, "Deprecated.")
FUZZER_FLAG_INT(handle_segv, 1, "If 1, try to intercept SIGSEGV.")
FUZZER_FLAG_INT(handle_bus, 1, "If 1, try to intercept SIGBUS.")
@@ -124,6 +145,8 @@ FUZZER_FLAG_INT(handle_term, 1, "If 1, try to intercept SIGTERM.")
FUZZER_FLAG_INT(handle_xfsz, 1, "If 1, try to intercept SIGXFSZ.")
FUZZER_FLAG_INT(handle_usr1, 1, "If 1, try to intercept SIGUSR1.")
FUZZER_FLAG_INT(handle_usr2, 1, "If 1, try to intercept SIGUSR2.")
+FUZZER_FLAG_INT(handle_winexcept, 1, "If 1, try to intercept uncaught Windows "
+ "Visual C++ Exceptions.")
FUZZER_FLAG_INT(close_fd_mask, 0, "If 1, close stdout at startup; "
"if 2, close stderr; if 3, close both. "
"Be careful, this will also close e.g. stderr of asan.")
@@ -152,8 +175,9 @@ FUZZER_FLAG_INT(ignore_remaining_args, 0, "If 1, ignore all arguments passed "
FUZZER_FLAG_STRING(focus_function, "Experimental. "
"Fuzzing will focus on inputs that trigger calls to this function. "
"If -focus_function=auto and -data_flow_trace is used, libFuzzer "
- "will choose the focus functions automatically.")
-FUZZER_FLAG_INT(entropic, 0, "Experimental. Enables entropic power schedule.")
+ "will choose the focus functions automatically. Disables -entropic when "
+ "specified.")
+FUZZER_FLAG_INT(entropic, 1, "Enables entropic power schedule.")
FUZZER_FLAG_INT(entropic_feature_frequency_threshold, 0xFF, "Experimental. If "
"entropic is enabled, all features which are observed less often than "
"the specified value are considered as rare.")
@@ -161,9 +185,18 @@ FUZZER_FLAG_INT(entropic_number_of_rarest_features, 100, "Experimental. If "
"entropic is enabled, we keep track of the frequencies only for the "
"Top-X least abundant features (union features that are considered as "
"rare).")
+FUZZER_FLAG_INT(entropic_scale_per_exec_time, 0, "Experimental. If 1, "
+ "the Entropic power schedule gets scaled based on the input execution "
+ "time. Inputs with lower execution time get scheduled more (up to 30x). "
+ "Note that, if 1, fuzzer stops from being deterministic even if a "
+ "non-zero random seed is given.")
FUZZER_FLAG_INT(analyze_dict, 0, "Experimental")
FUZZER_DEPRECATED_FLAG(use_clang_coverage)
FUZZER_FLAG_STRING(data_flow_trace, "Experimental: use the data flow trace")
FUZZER_FLAG_STRING(collect_data_flow,
"Experimental: collect the data flow trace")
+
+FUZZER_FLAG_INT(create_missing_dirs, 0, "Automatically attempt to create "
+ "directories for arguments that would normally expect them to already "
+ "exist (i.e. artifact_prefix, exact_artifact_path, features_dir, corpus)")
diff --git a/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerFork.cpp b/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerFork.cpp
index d9e6b79443e0..84725d22a9c7 100644
--- a/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerFork.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerFork.cpp
@@ -309,11 +309,15 @@ void FuzzWithFork(Random &Rand, const FuzzingOptions &Options,
else
Env.MainCorpusDir = CorpusDirs[0];
- auto CFPath = DirPlusFile(Env.TempDir, "merge.txt");
- CrashResistantMerge(Env.Args, {}, SeedFiles, &Env.Files, {}, &Env.Features,
- {}, &Env.Cov,
- CFPath, false);
- RemoveFile(CFPath);
+ if (Options.KeepSeed) {
+ for (auto &File : SeedFiles)
+ Env.Files.push_back(File.File);
+ } else {
+ auto CFPath = DirPlusFile(Env.TempDir, "merge.txt");
+ CrashResistantMerge(Env.Args, {}, SeedFiles, &Env.Files, {}, &Env.Features,
+ {}, &Env.Cov, CFPath, false);
+ RemoveFile(CFPath);
+ }
Printf("INFO: -fork=%d: %zd seed inputs, starting to fuzz in %s\n", NumJobs,
Env.Files.size(), Env.TempDir.c_str());
diff --git a/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerIO.cpp b/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerIO.cpp
index cbb1dbe1b86d..54a7219fc0e0 100644
--- a/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerIO.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerIO.cpp
@@ -77,6 +77,19 @@ void WriteToFile(const uint8_t *Data, size_t Size, const std::string &Path) {
fclose(Out);
}
+void AppendToFile(const std::string &Data, const std::string &Path) {
+ AppendToFile(reinterpret_cast<const uint8_t *>(Data.data()), Data.size(),
+ Path);
+}
+
+void AppendToFile(const uint8_t *Data, size_t Size, const std::string &Path) {
+ FILE *Out = fopen(Path.c_str(), "a");
+ if (!Out)
+ return;
+ fwrite(Data, sizeof(Data[0]), Size, Out);
+ fclose(Out);
+}
+
void ReadDirToVectorOfUnits(const char *Path, Vector<Unit> *V,
long *Epoch, size_t MaxSize, bool ExitOnError) {
long E = Epoch ? *Epoch : 0;
@@ -144,6 +157,38 @@ void VPrintf(bool Verbose, const char *Fmt, ...) {
fflush(OutputFile);
}
+static bool MkDirRecursiveInner(const std::string &Leaf) {
+ // Prevent chance of potential infinite recursion
+ if (Leaf == ".")
+ return true;
+
+ const std::string &Dir = DirName(Leaf);
+
+ if (IsDirectory(Dir)) {
+ MkDir(Leaf);
+ return IsDirectory(Leaf);
+ }
+
+ bool ret = MkDirRecursiveInner(Dir);
+ if (!ret) {
+ // Give up early if a previous MkDir failed
+ return ret;
+ }
+
+ MkDir(Leaf);
+ return IsDirectory(Leaf);
+}
+
+bool MkDirRecursive(const std::string &Dir) {
+ if (Dir.empty())
+ return false;
+
+ if (IsDirectory(Dir))
+ return true;
+
+ return MkDirRecursiveInner(Dir);
+}
+
void RmDirRecursive(const std::string &Dir) {
IterateDirRecursive(
Dir, [](const std::string &Path) {},
diff --git a/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerIO.h b/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerIO.h
index 6e4368b971fa..abd25110d07d 100644
--- a/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerIO.h
+++ b/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerIO.h
@@ -29,6 +29,9 @@ void WriteToFile(const uint8_t *Data, size_t Size, const std::string &Path);
void WriteToFile(const std::string &Data, const std::string &Path);
void WriteToFile(const Unit &U, const std::string &Path);
+void AppendToFile(const uint8_t *Data, size_t Size, const std::string &Path);
+void AppendToFile(const std::string &Data, const std::string &Path);
+
void ReadDirToVectorOfUnits(const char *Path, Vector<Unit> *V,
long *Epoch, size_t MaxSize, bool ExitOnError);
@@ -58,11 +61,13 @@ void RawPrint(const char *Str);
// Platform specific functions:
bool IsFile(const std::string &Path);
+bool IsDirectory(const std::string &Path);
size_t FileSize(const std::string &Path);
void ListFilesInDirRecursive(const std::string &Dir, long *Epoch,
Vector<std::string> *V, bool TopDir);
+bool MkDirRecursive(const std::string &Dir);
void RmDirRecursive(const std::string &Dir);
// Iterate files and dirs inside Dir, recursively.
@@ -82,6 +87,7 @@ struct SizedFile {
void GetSizedFilesFromDir(const std::string &Dir, Vector<SizedFile> *V);
char GetSeparator();
+bool IsSeparator(char C);
// Similar to the basename utility: returns the file name w/o the dir prefix.
std::string Basename(const std::string &Path);
diff --git a/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerIOPosix.cpp b/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerIOPosix.cpp
index aac85b08727a..4706a40959be 100644
--- a/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerIOPosix.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerIOPosix.cpp
@@ -31,7 +31,7 @@ bool IsFile(const std::string &Path) {
return S_ISREG(St.st_mode);
}
-static bool IsDirectory(const std::string &Path) {
+bool IsDirectory(const std::string &Path) {
struct stat St;
if (stat(Path.c_str(), &St))
return false;
@@ -104,6 +104,10 @@ char GetSeparator() {
return '/';
}
+bool IsSeparator(char C) {
+ return C == '/';
+}
+
FILE* OpenFile(int Fd, const char* Mode) {
return fdopen(Fd, Mode);
}
@@ -155,7 +159,7 @@ bool IsInterestingCoverageFile(const std::string &FileName) {
}
void RawPrint(const char *Str) {
- write(2, Str, strlen(Str));
+ (void)write(2, Str, strlen(Str));
}
void MkDir(const std::string &Path) {
diff --git a/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerIOWindows.cpp b/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerIOWindows.cpp
index 651283a551cf..61ad35e281f5 100644
--- a/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerIOWindows.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerIOWindows.cpp
@@ -76,6 +76,18 @@ static bool IsDir(DWORD FileAttrs) {
return FileAttrs & FILE_ATTRIBUTE_DIRECTORY;
}
+bool IsDirectory(const std::string &Path) {
+ DWORD Att = GetFileAttributesA(Path.c_str());
+
+ if (Att == INVALID_FILE_ATTRIBUTES) {
+ Printf("GetFileAttributesA() failed for \"%s\" (Error code: %lu).\n",
+ Path.c_str(), GetLastError());
+ return false;
+ }
+
+ return IsDir(Att);
+}
+
std::string Basename(const std::string &Path) {
size_t Pos = Path.find_last_of("/\\");
if (Pos == std::string::npos) return Path;
@@ -227,7 +239,7 @@ intptr_t GetHandleFromFd(int fd) {
return _get_osfhandle(fd);
}
-static bool IsSeparator(char C) {
+bool IsSeparator(char C) {
return C == '\\' || C == '/';
}
diff --git a/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerInterceptors.cpp b/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerInterceptors.cpp
new file mode 100644
index 000000000000..b87798603fda
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerInterceptors.cpp
@@ -0,0 +1,253 @@
+//===-- FuzzerInterceptors.cpp --------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+// Intercept certain libc functions to aid fuzzing.
+// Linked only when other RTs that define their own interceptors are not linked.
+//===----------------------------------------------------------------------===//
+
+#include "FuzzerPlatform.h"
+
+#if LIBFUZZER_LINUX
+
+#define GET_CALLER_PC() __builtin_return_address(0)
+
+#define PTR_TO_REAL(x) real_##x
+#define REAL(x) __interception::PTR_TO_REAL(x)
+#define FUNC_TYPE(x) x##_type
+#define DEFINE_REAL(ret_type, func, ...) \
+ typedef ret_type (*FUNC_TYPE(func))(__VA_ARGS__); \
+ namespace __interception { \
+ FUNC_TYPE(func) PTR_TO_REAL(func); \
+ }
+
+#include <cassert>
+#include <cstdint>
+#include <dlfcn.h> // for dlsym()
+
+static void *getFuncAddr(const char *name, uintptr_t wrapper_addr) {
+ void *addr = dlsym(RTLD_NEXT, name);
+ if (!addr) {
+ // If the lookup using RTLD_NEXT failed, the sanitizer runtime library is
+ // later in the library search order than the DSO that we are trying to
+ // intercept, which means that we cannot intercept this function. We still
+ // want the address of the real definition, though, so look it up using
+ // RTLD_DEFAULT.
+ addr = dlsym(RTLD_DEFAULT, name);
+
+ // In case `name' is not loaded, dlsym ends up finding the actual wrapper.
+ // We don't want to intercept the wrapper and have it point to itself.
+ if (reinterpret_cast<uintptr_t>(addr) == wrapper_addr)
+ addr = nullptr;
+ }
+ return addr;
+}
+
+static int FuzzerInited = 0;
+static bool FuzzerInitIsRunning;
+
+static void fuzzerInit();
+
+static void ensureFuzzerInited() {
+ assert(!FuzzerInitIsRunning);
+ if (!FuzzerInited) {
+ fuzzerInit();
+ }
+}
+
+static int internal_strcmp_strncmp(const char *s1, const char *s2, bool strncmp,
+ size_t n) {
+ size_t i = 0;
+ while (true) {
+ if (strncmp) {
+ if (i == n)
+ break;
+ i++;
+ }
+ unsigned c1 = *s1;
+ unsigned c2 = *s2;
+ if (c1 != c2)
+ return (c1 < c2) ? -1 : 1;
+ if (c1 == 0)
+ break;
+ s1++;
+ s2++;
+ }
+ return 0;
+}
+
+static int internal_strncmp(const char *s1, const char *s2, size_t n) {
+ return internal_strcmp_strncmp(s1, s2, true, n);
+}
+
+static int internal_strcmp(const char *s1, const char *s2) {
+ return internal_strcmp_strncmp(s1, s2, false, 0);
+}
+
+static int internal_memcmp(const void *s1, const void *s2, size_t n) {
+ const uint8_t *t1 = static_cast<const uint8_t *>(s1);
+ const uint8_t *t2 = static_cast<const uint8_t *>(s2);
+ for (size_t i = 0; i < n; ++i, ++t1, ++t2)
+ if (*t1 != *t2)
+ return *t1 < *t2 ? -1 : 1;
+ return 0;
+}
+
+static size_t internal_strlen(const char *s) {
+ size_t i = 0;
+ while (s[i])
+ i++;
+ return i;
+}
+
+static char *internal_strstr(const char *haystack, const char *needle) {
+ // This is O(N^2), but we are not using it in hot places.
+ size_t len1 = internal_strlen(haystack);
+ size_t len2 = internal_strlen(needle);
+ if (len1 < len2)
+ return nullptr;
+ for (size_t pos = 0; pos <= len1 - len2; pos++) {
+ if (internal_memcmp(haystack + pos, needle, len2) == 0)
+ return const_cast<char *>(haystack) + pos;
+ }
+ return nullptr;
+}
+
+extern "C" {
+
+// Weak hooks forward-declared to avoid dependency on
+// <sanitizer/common_interface_defs.h>.
+void __sanitizer_weak_hook_memcmp(void *called_pc, const void *s1,
+ const void *s2, size_t n, int result);
+void __sanitizer_weak_hook_strncmp(void *called_pc, const char *s1,
+ const char *s2, size_t n, int result);
+void __sanitizer_weak_hook_strncasecmp(void *called_pc, const char *s1,
+ const char *s2, size_t n, int result);
+void __sanitizer_weak_hook_strcmp(void *called_pc, const char *s1,
+ const char *s2, int result);
+void __sanitizer_weak_hook_strcasecmp(void *called_pc, const char *s1,
+ const char *s2, int result);
+void __sanitizer_weak_hook_strstr(void *called_pc, const char *s1,
+ const char *s2, char *result);
+void __sanitizer_weak_hook_strcasestr(void *called_pc, const char *s1,
+ const char *s2, char *result);
+void __sanitizer_weak_hook_memmem(void *called_pc, const void *s1, size_t len1,
+ const void *s2, size_t len2, void *result);
+
+DEFINE_REAL(int, bcmp, const void *, const void *, size_t)
+DEFINE_REAL(int, memcmp, const void *, const void *, size_t)
+DEFINE_REAL(int, strncmp, const char *, const char *, size_t)
+DEFINE_REAL(int, strcmp, const char *, const char *)
+DEFINE_REAL(int, strncasecmp, const char *, const char *, size_t)
+DEFINE_REAL(int, strcasecmp, const char *, const char *)
+DEFINE_REAL(char *, strstr, const char *, const char *)
+DEFINE_REAL(char *, strcasestr, const char *, const char *)
+DEFINE_REAL(void *, memmem, const void *, size_t, const void *, size_t)
+
+ATTRIBUTE_INTERFACE int bcmp(const char *s1, const char *s2, size_t n) {
+ if (!FuzzerInited)
+ return internal_memcmp(s1, s2, n);
+ int result = REAL(bcmp)(s1, s2, n);
+ __sanitizer_weak_hook_memcmp(GET_CALLER_PC(), s1, s2, n, result);
+ return result;
+}
+
+ATTRIBUTE_INTERFACE int memcmp(const void *s1, const void *s2, size_t n) {
+ if (!FuzzerInited)
+ return internal_memcmp(s1, s2, n);
+ int result = REAL(memcmp)(s1, s2, n);
+ __sanitizer_weak_hook_memcmp(GET_CALLER_PC(), s1, s2, n, result);
+ return result;
+}
+
+ATTRIBUTE_INTERFACE int strncmp(const char *s1, const char *s2, size_t n) {
+ if (!FuzzerInited)
+ return internal_strncmp(s1, s2, n);
+ int result = REAL(strncmp)(s1, s2, n);
+ __sanitizer_weak_hook_strncmp(GET_CALLER_PC(), s1, s2, n, result);
+ return result;
+}
+
+ATTRIBUTE_INTERFACE int strcmp(const char *s1, const char *s2) {
+ if (!FuzzerInited)
+ return internal_strcmp(s1, s2);
+ int result = REAL(strcmp)(s1, s2);
+ __sanitizer_weak_hook_strcmp(GET_CALLER_PC(), s1, s2, result);
+ return result;
+}
+
+ATTRIBUTE_INTERFACE int strncasecmp(const char *s1, const char *s2, size_t n) {
+ ensureFuzzerInited();
+ int result = REAL(strncasecmp)(s1, s2, n);
+ __sanitizer_weak_hook_strncasecmp(GET_CALLER_PC(), s1, s2, n, result);
+ return result;
+}
+
+ATTRIBUTE_INTERFACE int strcasecmp(const char *s1, const char *s2) {
+ ensureFuzzerInited();
+ int result = REAL(strcasecmp)(s1, s2);
+ __sanitizer_weak_hook_strcasecmp(GET_CALLER_PC(), s1, s2, result);
+ return result;
+}
+
+ATTRIBUTE_INTERFACE char *strstr(const char *s1, const char *s2) {
+ if (!FuzzerInited)
+ return internal_strstr(s1, s2);
+ char *result = REAL(strstr)(s1, s2);
+ __sanitizer_weak_hook_strstr(GET_CALLER_PC(), s1, s2, result);
+ return result;
+}
+
+ATTRIBUTE_INTERFACE char *strcasestr(const char *s1, const char *s2) {
+ ensureFuzzerInited();
+ char *result = REAL(strcasestr)(s1, s2);
+ __sanitizer_weak_hook_strcasestr(GET_CALLER_PC(), s1, s2, result);
+ return result;
+}
+
+ATTRIBUTE_INTERFACE
+void *memmem(const void *s1, size_t len1, const void *s2, size_t len2) {
+ ensureFuzzerInited();
+ void *result = REAL(memmem)(s1, len1, s2, len2);
+ __sanitizer_weak_hook_memmem(GET_CALLER_PC(), s1, len1, s2, len2, result);
+ return result;
+}
+
+__attribute__((section(".preinit_array"),
+ used)) static void (*__local_fuzzer_preinit)(void) = fuzzerInit;
+
+} // extern "C"
+
+static void fuzzerInit() {
+ assert(!FuzzerInitIsRunning);
+ if (FuzzerInited)
+ return;
+ FuzzerInitIsRunning = true;
+
+ REAL(bcmp) = reinterpret_cast<memcmp_type>(
+ getFuncAddr("bcmp", reinterpret_cast<uintptr_t>(&bcmp)));
+ REAL(memcmp) = reinterpret_cast<memcmp_type>(
+ getFuncAddr("memcmp", reinterpret_cast<uintptr_t>(&memcmp)));
+ REAL(strncmp) = reinterpret_cast<strncmp_type>(
+ getFuncAddr("strncmp", reinterpret_cast<uintptr_t>(&strncmp)));
+ REAL(strcmp) = reinterpret_cast<strcmp_type>(
+ getFuncAddr("strcmp", reinterpret_cast<uintptr_t>(&strcmp)));
+ REAL(strncasecmp) = reinterpret_cast<strncasecmp_type>(
+ getFuncAddr("strncasecmp", reinterpret_cast<uintptr_t>(&strncasecmp)));
+ REAL(strcasecmp) = reinterpret_cast<strcasecmp_type>(
+ getFuncAddr("strcasecmp", reinterpret_cast<uintptr_t>(&strcasecmp)));
+ REAL(strstr) = reinterpret_cast<strstr_type>(
+ getFuncAddr("strstr", reinterpret_cast<uintptr_t>(&strstr)));
+ REAL(strcasestr) = reinterpret_cast<strcasestr_type>(
+ getFuncAddr("strcasestr", reinterpret_cast<uintptr_t>(&strcasestr)));
+ REAL(memmem) = reinterpret_cast<memmem_type>(
+ getFuncAddr("memmem", reinterpret_cast<uintptr_t>(&memmem)));
+
+ FuzzerInitIsRunning = false;
+ FuzzerInited = 1;
+}
+
+#endif
diff --git a/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerInternal.h b/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerInternal.h
index 31096ce804bc..37c8a01dc3c6 100644
--- a/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerInternal.h
+++ b/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerInternal.h
@@ -67,7 +67,9 @@ public:
void ExecuteCallback(const uint8_t *Data, size_t Size);
bool RunOne(const uint8_t *Data, size_t Size, bool MayDeleteFile = false,
- InputInfo *II = nullptr, bool *FoundUniqFeatures = nullptr);
+ InputInfo *II = nullptr, bool ForceAddToCorpus = false,
+ bool *FoundUniqFeatures = nullptr);
+ void TPCUpdateObservedPCs();
// Merge Corpora[1:] into Corpora[0].
void Merge(const Vector<std::string> &Corpora);
diff --git a/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerLoop.cpp b/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerLoop.cpp
index 02db6d27b0a3..6e3bf44f8b45 100644
--- a/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerLoop.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerLoop.cpp
@@ -354,8 +354,10 @@ void Fuzzer::PrintStats(const char *Where, const char *End, size_t Units,
}
void Fuzzer::PrintFinalStats() {
+ if (Options.PrintFullCoverage)
+ TPC.PrintCoverage(/*PrintAllCounters=*/true);
if (Options.PrintCoverage)
- TPC.PrintCoverage();
+ TPC.PrintCoverage(/*PrintAllCounters=*/false);
if (Options.PrintCorpusStats)
Corpus.PrintStats();
if (!Options.PrintFinalStats)
@@ -463,12 +465,45 @@ static void RenameFeatureSetFile(const std::string &FeaturesDir,
DirPlusFile(FeaturesDir, NewFile));
}
+static void WriteEdgeToMutationGraphFile(const std::string &MutationGraphFile,
+ const InputInfo *II,
+ const InputInfo *BaseII,
+ const std::string &MS) {
+ if (MutationGraphFile.empty())
+ return;
+
+ std::string Sha1 = Sha1ToString(II->Sha1);
+
+ std::string OutputString;
+
+ // Add a new vertex.
+ OutputString.append("\"");
+ OutputString.append(Sha1);
+ OutputString.append("\"\n");
+
+ // Add a new edge if there is base input.
+ if (BaseII) {
+ std::string BaseSha1 = Sha1ToString(BaseII->Sha1);
+ OutputString.append("\"");
+ OutputString.append(BaseSha1);
+ OutputString.append("\" -> \"");
+ OutputString.append(Sha1);
+ OutputString.append("\" [label=\"");
+ OutputString.append(MS);
+ OutputString.append("\"];\n");
+ }
+
+ AppendToFile(OutputString, MutationGraphFile);
+}
+
bool Fuzzer::RunOne(const uint8_t *Data, size_t Size, bool MayDeleteFile,
- InputInfo *II, bool *FoundUniqFeatures) {
+ InputInfo *II, bool ForceAddToCorpus,
+ bool *FoundUniqFeatures) {
if (!Size)
return false;
ExecuteCallback(Data, Size);
+ auto TimeOfUnit = duration_cast<microseconds>(UnitStopTime - UnitStartTime);
UniqFeatureSetTmp.clear();
size_t FoundUniqFeaturesOfII = 0;
@@ -478,7 +513,7 @@ bool Fuzzer::RunOne(const uint8_t *Data, size_t Size, bool MayDeleteFile,
UniqFeatureSetTmp.push_back(Feature);
if (Options.Entropic)
Corpus.UpdateFeatureFrequency(II, Feature);
- if (Options.ReduceInputs && II)
+ if (Options.ReduceInputs && II && !II->NeverReduce)
if (std::binary_search(II->UniqFeatureSet.begin(),
II->UniqFeatureSet.end(), Feature))
FoundUniqFeaturesOfII++;
@@ -487,13 +522,16 @@ bool Fuzzer::RunOne(const uint8_t *Data, size_t Size, bool MayDeleteFile,
*FoundUniqFeatures = FoundUniqFeaturesOfII;
PrintPulseAndReportSlowInput(Data, Size);
size_t NumNewFeatures = Corpus.NumFeatureUpdates() - NumUpdatesBefore;
- if (NumNewFeatures) {
+ if (NumNewFeatures || ForceAddToCorpus) {
TPC.UpdateObservedPCs();
- auto NewII = Corpus.AddToCorpus({Data, Data + Size}, NumNewFeatures,
- MayDeleteFile, TPC.ObservedFocusFunction(),
- UniqFeatureSetTmp, DFT, II);
+ auto NewII =
+ Corpus.AddToCorpus({Data, Data + Size}, NumNewFeatures, MayDeleteFile,
+ TPC.ObservedFocusFunction(), ForceAddToCorpus,
+ TimeOfUnit, UniqFeatureSetTmp, DFT, II);
WriteFeatureSetToFile(Options.FeaturesDir, Sha1ToString(NewII->Sha1),
NewII->UniqFeatureSet);
+ WriteEdgeToMutationGraphFile(Options.MutationGraphFile, NewII, II,
+ MD.MutationSequence());
return true;
}
if (II && FoundUniqFeaturesOfII &&
@@ -509,6 +547,8 @@ bool Fuzzer::RunOne(const uint8_t *Data, size_t Size, bool MayDeleteFile,
return false;
}
+void Fuzzer::TPCUpdateObservedPCs() { TPC.UpdateObservedPCs(); }
+
size_t Fuzzer::GetCurrentUnitInFuzzingThead(const uint8_t **Data) const {
assert(InFuzzingThread());
*Data = CurrentUnitData;
@@ -600,7 +640,7 @@ void Fuzzer::PrintStatusForNewUnit(const Unit &U, const char *Text) {
PrintStats(Text, "");
if (Options.Verbosity) {
Printf(" L: %zd/%zd ", U.size(), Corpus.MaxInputSize());
- MD.PrintMutationSequence();
+ MD.PrintMutationSequence(Options.Verbosity >= 2);
Printf("\n");
}
}
@@ -664,8 +704,11 @@ void Fuzzer::MutateAndTestOne() {
MD.StartMutationSequence();
auto &II = Corpus.ChooseUnitToMutate(MD.GetRand());
- if (Options.DoCrossOver)
- MD.SetCrossOverWith(&Corpus.ChooseUnitToMutate(MD.GetRand()).U);
+ if (Options.DoCrossOver) {
+ auto &CrossOverII = Corpus.ChooseUnitToCrossOverWith(
+ MD.GetRand(), Options.CrossOverUniformDist);
+ MD.SetCrossOverWith(&CrossOverII.U);
+ }
const auto &U = II.U;
memcpy(BaseSha1, II.Sha1, sizeof(BaseSha1));
assert(CurrentUnitData);
@@ -700,7 +743,7 @@ void Fuzzer::MutateAndTestOne() {
bool FoundUniqFeatures = false;
bool NewCov = RunOne(CurrentUnitData, Size, /*MayDeleteFile=*/true, &II,
- &FoundUniqFeatures);
+ /*ForceAddToCorpus*/ false, &FoundUniqFeatures);
TryDetectingAMemoryLeak(CurrentUnitData, Size,
/*DuringInitialCorpusExecution*/ false);
if (NewCov) {
@@ -768,7 +811,9 @@ void Fuzzer::ReadAndExecuteSeedCorpora(Vector<SizedFile> &CorporaFiles) {
for (auto &SF : CorporaFiles) {
auto U = FileToVector(SF.File, MaxInputLen, /*ExitOnError=*/false);
assert(U.size() <= MaxInputLen);
- RunOne(U.data(), U.size());
+ RunOne(U.data(), U.size(), /*MayDeleteFile*/ false, /*II*/ nullptr,
+ /*ForceAddToCorpus*/ Options.KeepSeed,
+ /*FoundUniqFeatures*/ nullptr);
CheckExitOnSrcPosOrItem();
TryDetectingAMemoryLeak(U.data(), U.size(),
/*DuringInitialCorpusExecution*/ true);
diff --git a/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerMutate.cpp b/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerMutate.cpp
index 29541eac5dc6..cf34a9fe8e2e 100644
--- a/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerMutate.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerMutate.cpp
@@ -18,6 +18,7 @@
namespace fuzzer {
const size_t Dictionary::kMaxDictSize;
+static const size_t kMaxMutationsToPrint = 10;
static void PrintASCII(const Word &W, const char *PrintAfter) {
PrintASCII(W.data(), W.size(), PrintAfter);
@@ -425,26 +426,26 @@ size_t MutationDispatcher::Mutate_CrossOver(uint8_t *Data, size_t Size,
if (!CrossOverWith) return 0;
const Unit &O = *CrossOverWith;
if (O.empty()) return 0;
- MutateInPlaceHere.resize(MaxSize);
- auto &U = MutateInPlaceHere;
size_t NewSize = 0;
switch(Rand(3)) {
case 0:
- NewSize = CrossOver(Data, Size, O.data(), O.size(), U.data(), U.size());
+ MutateInPlaceHere.resize(MaxSize);
+ NewSize = CrossOver(Data, Size, O.data(), O.size(),
+ MutateInPlaceHere.data(), MaxSize);
+ memcpy(Data, MutateInPlaceHere.data(), NewSize);
break;
case 1:
- NewSize = InsertPartOf(O.data(), O.size(), U.data(), U.size(), MaxSize);
+ NewSize = InsertPartOf(O.data(), O.size(), Data, Size, MaxSize);
if (!NewSize)
- NewSize = CopyPartOf(O.data(), O.size(), U.data(), U.size());
+ NewSize = CopyPartOf(O.data(), O.size(), Data, Size);
break;
case 2:
- NewSize = CopyPartOf(O.data(), O.size(), U.data(), U.size());
+ NewSize = CopyPartOf(O.data(), O.size(), Data, Size);
break;
default: assert(0);
}
assert(NewSize > 0 && "CrossOver returned empty unit");
assert(NewSize <= MaxSize && "CrossOver returned overisized unit");
- memcpy(Data, U.data(), NewSize);
return NewSize;
}
@@ -481,19 +482,34 @@ void MutationDispatcher::PrintRecommendedDictionary() {
Printf("###### End of recommended dictionary. ######\n");
}
-void MutationDispatcher::PrintMutationSequence() {
+void MutationDispatcher::PrintMutationSequence(bool Verbose) {
Printf("MS: %zd ", CurrentMutatorSequence.size());
- for (auto M : CurrentMutatorSequence)
- Printf("%s-", M.Name);
+ size_t EntriesToPrint =
+ Verbose ? CurrentMutatorSequence.size()
+ : std::min(kMaxMutationsToPrint, CurrentMutatorSequence.size());
+ for (size_t i = 0; i < EntriesToPrint; i++)
+ Printf("%s-", CurrentMutatorSequence[i].Name);
if (!CurrentDictionaryEntrySequence.empty()) {
Printf(" DE: ");
- for (auto DE : CurrentDictionaryEntrySequence) {
+ EntriesToPrint = Verbose ? CurrentDictionaryEntrySequence.size()
+ : std::min(kMaxMutationsToPrint,
+ CurrentDictionaryEntrySequence.size());
+ for (size_t i = 0; i < EntriesToPrint; i++) {
Printf("\"");
- PrintASCII(DE->GetW(), "\"-");
+ PrintASCII(CurrentDictionaryEntrySequence[i]->GetW(), "\"-");
}
}
}
+std::string MutationDispatcher::MutationSequence() {
+ std::string MS;
+ for (auto M : CurrentMutatorSequence) {
+ MS += M.Name;
+ MS += "-";
+ }
+ return MS;
+}
+
size_t MutationDispatcher::Mutate(uint8_t *Data, size_t Size, size_t MaxSize) {
return MutateImpl(Data, Size, MaxSize, Mutators);
}
diff --git a/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerMutate.h b/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerMutate.h
index 6cbce8027624..fd37191156d3 100644
--- a/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerMutate.h
+++ b/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerMutate.h
@@ -24,8 +24,11 @@ public:
~MutationDispatcher() {}
/// Indicate that we are about to start a new sequence of mutations.
void StartMutationSequence();
- /// Print the current sequence of mutations.
- void PrintMutationSequence();
+ /// Print the current sequence of mutations. Only prints the full sequence
+ /// when Verbose is true.
+ void PrintMutationSequence(bool Verbose = true);
+ /// Return the current sequence of mutations.
+ std::string MutationSequence();
/// Indicate that the current sequence of mutations was successful.
void RecordSuccessfulMutationSequence();
/// Mutates data by invoking user-provided mutator.
@@ -40,9 +43,9 @@ public:
size_t Mutate_InsertByte(uint8_t *Data, size_t Size, size_t MaxSize);
/// Mutates data by inserting several repeated bytes.
size_t Mutate_InsertRepeatedBytes(uint8_t *Data, size_t Size, size_t MaxSize);
- /// Mutates data by chanding one byte.
+ /// Mutates data by changing one byte.
size_t Mutate_ChangeByte(uint8_t *Data, size_t Size, size_t MaxSize);
- /// Mutates data by chanding one bit.
+ /// Mutates data by changing one bit.
size_t Mutate_ChangeBit(uint8_t *Data, size_t Size, size_t MaxSize);
/// Mutates data by copying/inserting a part of data into a different place.
size_t Mutate_CopyPart(uint8_t *Data, size_t Size, size_t MaxSize);
@@ -126,9 +129,6 @@ public:
// Dictionary provided by the user via -dict=DICT_FILE.
Dictionary ManualDictionary;
- // Temporary dictionary modified by the fuzzer itself,
- // recreated periodically.
- Dictionary TempAutoDictionary;
// Persistent dictionary modified by the fuzzer, consists of
// entries that led to successful discoveries in the past mutations.
Dictionary PersistentAutoDictionary;
diff --git a/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerOptions.h b/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerOptions.h
index 9d975bd61fe7..d0c285a6821d 100644
--- a/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerOptions.h
+++ b/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerOptions.h
@@ -18,6 +18,7 @@ struct FuzzingOptions {
int Verbosity = 1;
size_t MaxLen = 0;
size_t LenControl = 1000;
+ bool KeepSeed = false;
int UnitTimeoutSec = 300;
int TimeoutExitCode = 70;
int OOMExitCode = 71;
@@ -30,6 +31,7 @@ struct FuzzingOptions {
int RssLimitMb = 0;
int MallocLimitMb = 0;
bool DoCrossOver = true;
+ bool CrossOverUniformDist = false;
int MutateDepth = 5;
bool ReduceDepth = false;
bool UseCounters = false;
@@ -44,9 +46,10 @@ struct FuzzingOptions {
size_t MaxNumberOfRuns = -1L;
int ReportSlowUnits = 10;
bool OnlyASCII = false;
- bool Entropic = false;
+ bool Entropic = true;
size_t EntropicFeatureFrequencyThreshold = 0xFF;
size_t EntropicNumberOfRarestFeatures = 100;
+ bool EntropicScalePerExecTime = false;
std::string OutputCorpus;
std::string ArtifactPrefix = "./";
std::string ExactArtifactPath;
@@ -56,6 +59,7 @@ struct FuzzingOptions {
std::string DataFlowTrace;
std::string CollectDataFlow;
std::string FeaturesDir;
+ std::string MutationGraphFile;
std::string StopFile;
bool SaveArtifacts = true;
bool PrintNEW = true; // Print a status line when new units are found;
@@ -64,11 +68,13 @@ struct FuzzingOptions {
bool PrintFinalStats = false;
bool PrintCorpusStats = false;
bool PrintCoverage = false;
+ bool PrintFullCoverage = false;
bool DumpCoverage = false;
bool DetectLeaks = true;
int PurgeAllocatorIntervalSec = 1;
int TraceMalloc = 0;
bool HandleAbrt = false;
+ bool HandleAlrm = false;
bool HandleBus = false;
bool HandleFpe = false;
bool HandleIll = false;
@@ -78,6 +84,7 @@ struct FuzzingOptions {
bool HandleXfsz = false;
bool HandleUsr1 = false;
bool HandleUsr2 = false;
+ bool HandleWinExcept = false;
};
} // namespace fuzzer
diff --git a/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerPlatform.h b/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerPlatform.h
index 8befdb882cc6..1602e6789500 100644
--- a/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerPlatform.h
+++ b/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerPlatform.h
@@ -18,7 +18,6 @@
#define LIBFUZZER_LINUX 1
#define LIBFUZZER_NETBSD 0
#define LIBFUZZER_FREEBSD 0
-#define LIBFUZZER_OPENBSD 0
#define LIBFUZZER_WINDOWS 0
#define LIBFUZZER_EMSCRIPTEN 0
#elif __APPLE__
@@ -27,7 +26,6 @@
#define LIBFUZZER_LINUX 0
#define LIBFUZZER_NETBSD 0
#define LIBFUZZER_FREEBSD 0
-#define LIBFUZZER_OPENBSD 0
#define LIBFUZZER_WINDOWS 0
#define LIBFUZZER_EMSCRIPTEN 0
#elif __NetBSD__
@@ -36,7 +34,6 @@
#define LIBFUZZER_LINUX 0
#define LIBFUZZER_NETBSD 1
#define LIBFUZZER_FREEBSD 0
-#define LIBFUZZER_OPENBSD 0
#define LIBFUZZER_WINDOWS 0
#define LIBFUZZER_EMSCRIPTEN 0
#elif __FreeBSD__
@@ -45,16 +42,6 @@
#define LIBFUZZER_LINUX 0
#define LIBFUZZER_NETBSD 0
#define LIBFUZZER_FREEBSD 1
-#define LIBFUZZER_OPENBSD 0
-#define LIBFUZZER_WINDOWS 0
-#define LIBFUZZER_EMSCRIPTEN 0
-#elif __OpenBSD__
-#define LIBFUZZER_APPLE 0
-#define LIBFUZZER_FUCHSIA 0
-#define LIBFUZZER_LINUX 0
-#define LIBFUZZER_NETBSD 0
-#define LIBFUZZER_FREEBSD 0
-#define LIBFUZZER_OPENBSD 1
#define LIBFUZZER_WINDOWS 0
#define LIBFUZZER_EMSCRIPTEN 0
#elif _WIN32
@@ -63,7 +50,6 @@
#define LIBFUZZER_LINUX 0
#define LIBFUZZER_NETBSD 0
#define LIBFUZZER_FREEBSD 0
-#define LIBFUZZER_OPENBSD 0
#define LIBFUZZER_WINDOWS 1
#define LIBFUZZER_EMSCRIPTEN 0
#elif __Fuchsia__
@@ -72,7 +58,6 @@
#define LIBFUZZER_LINUX 0
#define LIBFUZZER_NETBSD 0
#define LIBFUZZER_FREEBSD 0
-#define LIBFUZZER_OPENBSD 0
#define LIBFUZZER_WINDOWS 0
#define LIBFUZZER_EMSCRIPTEN 0
#elif __EMSCRIPTEN__
@@ -81,7 +66,6 @@
#define LIBFUZZER_LINUX 0
#define LIBFUZZER_NETBSD 0
#define LIBFUZZER_FREEBSD 0
-#define LIBFUZZER_OPENBSD 0
#define LIBFUZZER_WINDOWS 0
#define LIBFUZZER_EMSCRIPTEN 1
#else
@@ -101,7 +85,7 @@
#define LIBFUZZER_POSIX \
(LIBFUZZER_APPLE || LIBFUZZER_LINUX || LIBFUZZER_NETBSD || \
- LIBFUZZER_FREEBSD || LIBFUZZER_OPENBSD || LIBFUZZER_EMSCRIPTEN)
+ LIBFUZZER_FREEBSD || LIBFUZZER_EMSCRIPTEN)
#ifdef __x86_64
#if __has_attribute(target)
diff --git a/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerTracePC.cpp b/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerTracePC.cpp
index b2ca7693e540..91e94d824002 100644
--- a/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerTracePC.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerTracePC.cpp
@@ -269,7 +269,7 @@ bool TracePC::ObservedFocusFunction() {
return FocusFunctionCounterPtr && *FocusFunctionCounterPtr;
}
-void TracePC::PrintCoverage() {
+void TracePC::PrintCoverage(bool PrintAllCounters) {
if (!EF->__sanitizer_symbolize_pc ||
!EF->__sanitizer_get_module_and_offset_for_pc) {
Printf("INFO: __sanitizer_symbolize_pc or "
@@ -277,7 +277,7 @@ void TracePC::PrintCoverage() {
" not printing coverage\n");
return;
}
- Printf("COVERAGE:\n");
+ Printf(PrintAllCounters ? "FULL COVERAGE:\n" : "COVERAGE:\n");
auto CoveredFunctionCallback = [&](const PCTableEntry *First,
const PCTableEntry *Last,
uintptr_t Counter) {
@@ -292,17 +292,33 @@ void TracePC::PrintCoverage() {
std::string LineStr = DescribePC("%l", VisualizePC);
size_t NumEdges = Last - First;
Vector<uintptr_t> UncoveredPCs;
+ Vector<uintptr_t> CoveredPCs;
for (auto TE = First; TE < Last; TE++)
if (!ObservedPCs.count(TE))
UncoveredPCs.push_back(TE->PC);
- Printf("%sCOVERED_FUNC: hits: %zd", Counter ? "" : "UN", Counter);
- Printf(" edges: %zd/%zd", NumEdges - UncoveredPCs.size(), NumEdges);
- Printf(" %s %s:%s\n", FunctionStr.c_str(), FileStr.c_str(),
- LineStr.c_str());
- if (Counter)
+ else
+ CoveredPCs.push_back(TE->PC);
+
+ if (PrintAllCounters) {
+ Printf("U");
for (auto PC : UncoveredPCs)
- Printf(" UNCOVERED_PC: %s\n",
- DescribePC("%s:%l", GetNextInstructionPc(PC)).c_str());
+ Printf(DescribePC(" %l", GetNextInstructionPc(PC)).c_str());
+ Printf("\n");
+
+ Printf("C");
+ for (auto PC : CoveredPCs)
+ Printf(DescribePC(" %l", GetNextInstructionPc(PC)).c_str());
+ Printf("\n");
+ } else {
+ Printf("%sCOVERED_FUNC: hits: %zd", Counter ? "" : "UN", Counter);
+ Printf(" edges: %zd/%zd", NumEdges - UncoveredPCs.size(), NumEdges);
+ Printf(" %s %s:%s\n", FunctionStr.c_str(), FileStr.c_str(),
+ LineStr.c_str());
+ if (Counter)
+ for (auto PC : UncoveredPCs)
+ Printf(" UNCOVERED_PC: %s\n",
+ DescribePC("%s:%l", GetNextInstructionPc(PC)).c_str());
+ }
};
IterateCoveredFunctions(CoveredFunctionCallback);
diff --git a/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerTracePC.h b/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerTracePC.h
index 501f3b544971..00909230731d 100644
--- a/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerTracePC.h
+++ b/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerTracePC.h
@@ -94,7 +94,7 @@ class TracePC {
void PrintModuleInfo();
- void PrintCoverage();
+ void PrintCoverage(bool PrintAllCounters);
template<class CallBack>
void IterateCoveredFunctions(CallBack CB);
@@ -194,10 +194,12 @@ size_t ForEachNonZeroByte(const uint8_t *Begin, const uint8_t *End,
// Iterate by Step bytes at a time.
for (; P < End; P += Step)
- if (LargeType Bundle = *reinterpret_cast<const LargeType *>(P))
+ if (LargeType Bundle = *reinterpret_cast<const LargeType *>(P)) {
+ Bundle = HostToLE(Bundle);
for (size_t I = 0; I < Step; I++, Bundle >>= 8)
if (uint8_t V = Bundle & 0xff)
Handle8bitCounter(FirstFeature, P - Begin + I, V);
+ }
// Iterate by 1 byte until the end.
for (; P < End; P++)
diff --git a/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerUtil.h b/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerUtil.h
index 4ae35838306d..e90be085008e 100644
--- a/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerUtil.h
+++ b/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerUtil.h
@@ -106,6 +106,12 @@ inline uint8_t *RoundDownByPage(uint8_t *P) {
return reinterpret_cast<uint8_t *>(X);
}
+#if __BYTE_ORDER == __LITTLE_ENDIAN
+template <typename T> T HostToLE(T X) { return X; }
+#else
+template <typename T> T HostToLE(T X) { return Bswap(X); }
+#endif
+
} // namespace fuzzer
#endif // LLVM_FUZZER_UTIL_H
diff --git a/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerUtilFuchsia.cpp b/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerUtilFuchsia.cpp
index 190fb7866649..af4394616776 100644
--- a/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerUtilFuchsia.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerUtilFuchsia.cpp
@@ -68,17 +68,6 @@ void AlarmHandler(int Seconds) {
}
}
-void InterruptHandler() {
- fd_set readfds;
- // Ctrl-C sends ETX in Zircon.
- do {
- FD_ZERO(&readfds);
- FD_SET(STDIN_FILENO, &readfds);
- select(STDIN_FILENO + 1, &readfds, nullptr, nullptr, nullptr);
- } while(!FD_ISSET(STDIN_FILENO, &readfds) || getchar() != 0x03);
- Fuzzer::StaticInterruptCallback();
-}
-
// CFAOffset is used to reference the stack pointer before entering the
// trampoline (Stack Pointer + CFAOffset = prev Stack Pointer). Before jumping
// to the trampoline we copy all the registers onto the stack. We need to make
@@ -354,16 +343,12 @@ void SetSignalHandler(const FuzzingOptions &Options) {
Printf("%s", Buf);
// Set up alarm handler if needed.
- if (Options.UnitTimeoutSec > 0) {
+ if (Options.HandleAlrm && Options.UnitTimeoutSec > 0) {
std::thread T(AlarmHandler, Options.UnitTimeoutSec / 2 + 1);
T.detach();
}
- // Set up interrupt handler if needed.
- if (Options.HandleInt || Options.HandleTerm) {
- std::thread T(InterruptHandler);
- T.detach();
- }
+ // Options.HandleInt and Options.HandleTerm are not supported on Fuchsia
// Early exit if no crash handler needed.
if (!Options.HandleSegv && !Options.HandleBus && !Options.HandleIll &&
diff --git a/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerUtilLinux.cpp b/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerUtilLinux.cpp
index 95490b992e0b..981f9a8b429f 100644
--- a/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerUtilLinux.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerUtilLinux.cpp
@@ -9,7 +9,7 @@
//===----------------------------------------------------------------------===//
#include "FuzzerPlatform.h"
#if LIBFUZZER_LINUX || LIBFUZZER_NETBSD || LIBFUZZER_FREEBSD || \
- LIBFUZZER_OPENBSD || LIBFUZZER_EMSCRIPTEN
+ LIBFUZZER_EMSCRIPTEN
#include "FuzzerCommand.h"
#include <stdlib.h>
diff --git a/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerUtilPosix.cpp b/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerUtilPosix.cpp
index fc57b724db10..afb733409ab5 100644
--- a/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerUtilPosix.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerUtilPosix.cpp
@@ -113,7 +113,7 @@ void SetTimer(int Seconds) {
void SetSignalHandler(const FuzzingOptions& Options) {
// setitimer is not implemented in emscripten.
- if (Options.UnitTimeoutSec > 0 && !LIBFUZZER_EMSCRIPTEN)
+ if (Options.HandleAlrm && Options.UnitTimeoutSec > 0 && !LIBFUZZER_EMSCRIPTEN)
SetTimer(Options.UnitTimeoutSec / 2 + 1);
if (Options.HandleInt)
SetSigaction(SIGINT, InterruptHandler);
@@ -148,7 +148,7 @@ size_t GetPeakRSSMb() {
if (getrusage(RUSAGE_SELF, &usage))
return 0;
if (LIBFUZZER_LINUX || LIBFUZZER_FREEBSD || LIBFUZZER_NETBSD ||
- LIBFUZZER_OPENBSD || LIBFUZZER_EMSCRIPTEN) {
+ LIBFUZZER_EMSCRIPTEN) {
// ru_maxrss is in KiB
return usage.ru_maxrss >> 10;
} else if (LIBFUZZER_APPLE) {
diff --git a/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerUtilWindows.cpp b/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerUtilWindows.cpp
index 6c693e3d7eea..1a54bb569eca 100644
--- a/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerUtilWindows.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerUtilWindows.cpp
@@ -60,7 +60,15 @@ static LONG CALLBACK ExceptionHandler(PEXCEPTION_POINTERS ExceptionInfo) {
if (HandlerOpt->HandleFpe)
Fuzzer::StaticCrashSignalCallback();
break;
- // TODO: handle (Options.HandleXfsz)
+ // This is an undocumented exception code corresponding to a Visual C++
+ // Exception.
+ //
+ // See: https://devblogs.microsoft.com/oldnewthing/20100730-00/?p=13273
+ case 0xE06D7363:
+ if (HandlerOpt->HandleWinExcept)
+ Fuzzer::StaticCrashSignalCallback();
+ break;
+ // TODO: Handle (Options.HandleXfsz)
}
return EXCEPTION_CONTINUE_SEARCH;
}
@@ -115,7 +123,7 @@ static void CrashHandler(int) { Fuzzer::StaticCrashSignalCallback(); }
void SetSignalHandler(const FuzzingOptions& Options) {
HandlerOpt = &Options;
- if (Options.UnitTimeoutSec > 0)
+ if (Options.HandleAlrm && Options.UnitTimeoutSec > 0)
Timer.SetTimer(Options.UnitTimeoutSec / 2 + 1);
if (Options.HandleInt || Options.HandleTerm)
@@ -127,7 +135,7 @@ void SetSignalHandler(const FuzzingOptions& Options) {
}
if (Options.HandleSegv || Options.HandleBus || Options.HandleIll ||
- Options.HandleFpe)
+ Options.HandleFpe || Options.HandleWinExcept)
SetUnhandledExceptionFilter(ExceptionHandler);
if (Options.HandleAbrt)
diff --git a/contrib/llvm-project/compiler-rt/lib/gwp_asan/common.cpp b/contrib/llvm-project/compiler-rt/lib/gwp_asan/common.cpp
index 3438c4b91893..483694d57b7e 100644
--- a/contrib/llvm-project/compiler-rt/lib/gwp_asan/common.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/gwp_asan/common.cpp
@@ -34,6 +34,9 @@ const char *ErrorToString(const Error &E) {
__builtin_trap();
}
+constexpr size_t AllocationMetadata::kStackFrameStorageBytes;
+constexpr size_t AllocationMetadata::kMaxTraceLengthToCollect;
+
void AllocationMetadata::RecordAllocation(uintptr_t AllocAddr,
size_t AllocSize) {
Addr = AllocAddr;
diff --git a/contrib/llvm-project/compiler-rt/lib/gwp_asan/crash_handler.cpp b/contrib/llvm-project/compiler-rt/lib/gwp_asan/crash_handler.cpp
index c3b9e1467bd9..bd7ca5abbb6b 100644
--- a/contrib/llvm-project/compiler-rt/lib/gwp_asan/crash_handler.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/gwp_asan/crash_handler.cpp
@@ -1,4 +1,4 @@
-//===-- crash_handler_interface.cpp -----------------------------*- C++ -*-===//
+//===-- crash_handler.cpp ---------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
@@ -10,6 +10,8 @@
#include "gwp_asan/stack_trace_compressor.h"
#include <assert.h>
+#include <stdint.h>
+#include <string.h>
using AllocationMetadata = gwp_asan::AllocationMetadata;
using Error = gwp_asan::Error;
@@ -112,9 +114,15 @@ uint64_t __gwp_asan_get_allocation_thread_id(
size_t __gwp_asan_get_allocation_trace(
const gwp_asan::AllocationMetadata *AllocationMeta, uintptr_t *Buffer,
size_t BufferLen) {
- return gwp_asan::compression::unpack(
+ uintptr_t UncompressedBuffer[AllocationMetadata::kMaxTraceLengthToCollect];
+ size_t UnpackedLength = gwp_asan::compression::unpack(
AllocationMeta->AllocationTrace.CompressedTrace,
- AllocationMeta->AllocationTrace.TraceSize, Buffer, BufferLen);
+ AllocationMeta->AllocationTrace.TraceSize, UncompressedBuffer,
+ AllocationMetadata::kMaxTraceLengthToCollect);
+ if (UnpackedLength < BufferLen)
+ BufferLen = UnpackedLength;
+ memcpy(Buffer, UncompressedBuffer, BufferLen * sizeof(*Buffer));
+ return UnpackedLength;
}
bool __gwp_asan_is_deallocated(
@@ -130,9 +138,15 @@ uint64_t __gwp_asan_get_deallocation_thread_id(
size_t __gwp_asan_get_deallocation_trace(
const gwp_asan::AllocationMetadata *AllocationMeta, uintptr_t *Buffer,
size_t BufferLen) {
- return gwp_asan::compression::unpack(
+ uintptr_t UncompressedBuffer[AllocationMetadata::kMaxTraceLengthToCollect];
+ size_t UnpackedLength = gwp_asan::compression::unpack(
AllocationMeta->DeallocationTrace.CompressedTrace,
- AllocationMeta->DeallocationTrace.TraceSize, Buffer, BufferLen);
+ AllocationMeta->DeallocationTrace.TraceSize, UncompressedBuffer,
+ AllocationMetadata::kMaxTraceLengthToCollect);
+ if (UnpackedLength < BufferLen)
+ BufferLen = UnpackedLength;
+ memcpy(Buffer, UncompressedBuffer, BufferLen * sizeof(*Buffer));
+ return UnpackedLength;
}
#ifdef __cplusplus
diff --git a/contrib/llvm-project/compiler-rt/lib/gwp_asan/crash_handler.h b/contrib/llvm-project/compiler-rt/lib/gwp_asan/crash_handler.h
index 631c31929732..4a95069dac58 100644
--- a/contrib/llvm-project/compiler-rt/lib/gwp_asan/crash_handler.h
+++ b/contrib/llvm-project/compiler-rt/lib/gwp_asan/crash_handler.h
@@ -1,4 +1,4 @@
-//===-- crash_handler_interface.h -------------------------------*- C++ -*-===//
+//===-- crash_handler.h -----------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
diff --git a/contrib/llvm-project/compiler-rt/lib/gwp_asan/definitions.h b/contrib/llvm-project/compiler-rt/lib/gwp_asan/definitions.h
index 563c408b6315..bec029038934 100644
--- a/contrib/llvm-project/compiler-rt/lib/gwp_asan/definitions.h
+++ b/contrib/llvm-project/compiler-rt/lib/gwp_asan/definitions.h
@@ -1,4 +1,4 @@
-//===-- gwp_asan_definitions.h ----------------------------------*- C++ -*-===//
+//===-- definitions.h -------------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
diff --git a/contrib/llvm-project/compiler-rt/lib/gwp_asan/guarded_pool_allocator.cpp b/contrib/llvm-project/compiler-rt/lib/gwp_asan/guarded_pool_allocator.cpp
index b2602e4caa59..a1dbbe4f25e9 100644
--- a/contrib/llvm-project/compiler-rt/lib/gwp_asan/guarded_pool_allocator.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/gwp_asan/guarded_pool_allocator.cpp
@@ -8,24 +8,10 @@
#include "gwp_asan/guarded_pool_allocator.h"
-#include "gwp_asan/optional/segv_handler.h"
#include "gwp_asan/options.h"
-#include "gwp_asan/random.h"
#include "gwp_asan/utilities.h"
-// RHEL creates the PRIu64 format macro (for printing uint64_t's) only when this
-// macro is defined before including <inttypes.h>.
-#ifndef __STDC_FORMAT_MACROS
-#define __STDC_FORMAT_MACROS 1
-#endif
-
#include <assert.h>
-#include <inttypes.h>
-#include <signal.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include <time.h>
using AllocationMetadata = gwp_asan::AllocationMetadata;
using Error = gwp_asan::Error;
@@ -39,14 +25,13 @@ namespace {
// init-order-fiasco.
GuardedPoolAllocator *SingletonPtr = nullptr;
-class ScopedBoolean {
-public:
- ScopedBoolean(bool &B) : Bool(B) { Bool = true; }
- ~ScopedBoolean() { Bool = false; }
+size_t roundUpTo(size_t Size, size_t Boundary) {
+ return (Size + Boundary - 1) & ~(Boundary - 1);
+}
-private:
- bool &Bool;
-};
+uintptr_t getPageAddr(uintptr_t Ptr, uintptr_t PageSize) {
+ return Ptr & ~(PageSize - 1);
+}
} // anonymous namespace
// Gets the singleton implementation of this class. Thread-compatible until
@@ -64,7 +49,7 @@ void GuardedPoolAllocator::init(const options::Options &Opts) {
return;
Check(Opts.SampleRate >= 0, "GWP-ASan Error: SampleRate is < 0.");
- Check(Opts.SampleRate <= INT32_MAX, "GWP-ASan Error: SampleRate is > 2^31.");
+ Check(Opts.SampleRate < (1 << 30), "GWP-ASan Error: SampleRate is >= 2^30.");
Check(Opts.MaxSimultaneousAllocations >= 0,
"GWP-ASan Error: MaxSimultaneousAllocations is < 0.");
@@ -73,25 +58,29 @@ void GuardedPoolAllocator::init(const options::Options &Opts) {
State.MaxSimultaneousAllocations = Opts.MaxSimultaneousAllocations;
- State.PageSize = getPlatformPageSize();
+ const size_t PageSize = getPlatformPageSize();
+ // getPageAddr() and roundUpTo() assume the page size to be a power of 2.
+ assert((PageSize & (PageSize - 1)) == 0);
+ State.PageSize = PageSize;
PerfectlyRightAlign = Opts.PerfectlyRightAlign;
size_t PoolBytesRequired =
- State.PageSize * (1 + State.MaxSimultaneousAllocations) +
+ PageSize * (1 + State.MaxSimultaneousAllocations) +
State.MaxSimultaneousAllocations * State.maximumAllocationSize();
- void *GuardedPoolMemory = mapMemory(PoolBytesRequired, kGwpAsanGuardPageName);
+ assert(PoolBytesRequired % PageSize == 0);
+ void *GuardedPoolMemory = reserveGuardedPool(PoolBytesRequired);
- size_t BytesRequired = State.MaxSimultaneousAllocations * sizeof(*Metadata);
+ size_t BytesRequired =
+ roundUpTo(State.MaxSimultaneousAllocations * sizeof(*Metadata), PageSize);
Metadata = reinterpret_cast<AllocationMetadata *>(
- mapMemory(BytesRequired, kGwpAsanMetadataName));
- markReadWrite(Metadata, BytesRequired, kGwpAsanMetadataName);
+ map(BytesRequired, kGwpAsanMetadataName));
// Allocate memory and set up the free pages queue.
- BytesRequired = State.MaxSimultaneousAllocations * sizeof(*FreeSlots);
- FreeSlots = reinterpret_cast<size_t *>(
- mapMemory(BytesRequired, kGwpAsanFreeSlotsName));
- markReadWrite(FreeSlots, BytesRequired, kGwpAsanFreeSlotsName);
+ BytesRequired = roundUpTo(
+ State.MaxSimultaneousAllocations * sizeof(*FreeSlots), PageSize);
+ FreeSlots =
+ reinterpret_cast<size_t *>(map(BytesRequired, kGwpAsanFreeSlotsName));
// Multiply the sample rate by 2 to give a good, fast approximation for (1 /
// SampleRate) chance of sampling.
@@ -101,8 +90,9 @@ void GuardedPoolAllocator::init(const options::Options &Opts) {
AdjustedSampleRatePlusOne = 2;
initPRNG();
- ThreadLocals.NextSampleCounter =
- (getRandomUnsigned32() % (AdjustedSampleRatePlusOne - 1)) + 1;
+ getThreadLocals()->NextSampleCounter =
+ ((getRandomUnsigned32() % (AdjustedSampleRatePlusOne - 1)) + 1) &
+ ThreadLocalPackedVariables::NextSampleCounterMask;
State.GuardedPagePool = reinterpret_cast<uintptr_t>(GuardedPoolMemory);
State.GuardedPagePoolEnd =
@@ -129,39 +119,39 @@ void GuardedPoolAllocator::iterate(void *Base, size_t Size, iterate_callback Cb,
void GuardedPoolAllocator::uninitTestOnly() {
if (State.GuardedPagePool) {
- unmapMemory(reinterpret_cast<void *>(State.GuardedPagePool),
- State.GuardedPagePoolEnd - State.GuardedPagePool,
- kGwpAsanGuardPageName);
+ unreserveGuardedPool();
State.GuardedPagePool = 0;
State.GuardedPagePoolEnd = 0;
}
if (Metadata) {
- unmapMemory(Metadata, State.MaxSimultaneousAllocations * sizeof(*Metadata),
- kGwpAsanMetadataName);
+ unmap(Metadata,
+ roundUpTo(State.MaxSimultaneousAllocations * sizeof(*Metadata),
+ State.PageSize));
Metadata = nullptr;
}
if (FreeSlots) {
- unmapMemory(FreeSlots,
- State.MaxSimultaneousAllocations * sizeof(*FreeSlots),
- kGwpAsanFreeSlotsName);
+ unmap(FreeSlots,
+ roundUpTo(State.MaxSimultaneousAllocations * sizeof(*FreeSlots),
+ State.PageSize));
FreeSlots = nullptr;
}
-}
-
-static uintptr_t getPageAddr(uintptr_t Ptr, uintptr_t PageSize) {
- return Ptr & ~(PageSize - 1);
+ *getThreadLocals() = ThreadLocalPackedVariables();
}
void *GuardedPoolAllocator::allocate(size_t Size) {
// GuardedPagePoolEnd == 0 when GWP-ASan is disabled. If we are disabled, fall
// back to the supporting allocator.
- if (State.GuardedPagePoolEnd == 0)
+ if (State.GuardedPagePoolEnd == 0) {
+ getThreadLocals()->NextSampleCounter =
+ (AdjustedSampleRatePlusOne - 1) &
+ ThreadLocalPackedVariables::NextSampleCounterMask;
return nullptr;
+ }
// Protect against recursivity.
- if (ThreadLocals.RecursiveGuard)
+ if (getThreadLocals()->RecursiveGuard)
return nullptr;
- ScopedBoolean SB(ThreadLocals.RecursiveGuard);
+ ScopedRecursiveGuard SRG;
if (Size == 0 || Size > State.maximumAllocationSize())
return nullptr;
@@ -189,8 +179,9 @@ void *GuardedPoolAllocator::allocate(size_t Size) {
// If a slot is multiple pages in size, and the allocation takes up a single
// page, we can improve overflow detection by leaving the unused pages as
// unmapped.
- markReadWrite(reinterpret_cast<void *>(getPageAddr(Ptr, State.PageSize)),
- Size, kGwpAsanAliveSlotName);
+ const size_t PageSize = State.PageSize;
+ allocateInGuardedPool(reinterpret_cast<void *>(getPageAddr(Ptr, PageSize)),
+ roundUpTo(Size, PageSize));
Meta->RecordAllocation(Ptr, Size);
Meta->AllocationTrace.RecordBacktrace(Backtrace);
@@ -209,7 +200,7 @@ void GuardedPoolAllocator::trapOnAddress(uintptr_t Address, Error E) {
}
void GuardedPoolAllocator::stop() {
- ThreadLocals.RecursiveGuard = true;
+ getThreadLocals()->RecursiveGuard = true;
PoolMutex.tryLock();
}
@@ -240,14 +231,14 @@ void GuardedPoolAllocator::deallocate(void *Ptr) {
// Ensure that the unwinder is not called if the recursive flag is set,
// otherwise non-reentrant unwinders may deadlock.
- if (!ThreadLocals.RecursiveGuard) {
- ScopedBoolean B(ThreadLocals.RecursiveGuard);
+ if (!getThreadLocals()->RecursiveGuard) {
+ ScopedRecursiveGuard SRG;
Meta->DeallocationTrace.RecordBacktrace(Backtrace);
}
}
- markInaccessible(reinterpret_cast<void *>(SlotStart),
- State.maximumAllocationSize(), kGwpAsanGuardPageName);
+ deallocateInGuardedPool(reinterpret_cast<void *>(SlotStart),
+ State.maximumAllocationSize());
// And finally, lock again to release the slot back into the pool.
ScopedLock L(PoolMutex);
@@ -286,7 +277,12 @@ void GuardedPoolAllocator::freeSlot(size_t SlotIndex) {
FreeSlots[FreeSlotsLength++] = SlotIndex;
}
-GWP_ASAN_TLS_INITIAL_EXEC
-GuardedPoolAllocator::ThreadLocalPackedVariables
- GuardedPoolAllocator::ThreadLocals;
+uint32_t GuardedPoolAllocator::getRandomUnsigned32() {
+ uint32_t RandomState = getThreadLocals()->RandomState;
+ RandomState ^= RandomState << 13;
+ RandomState ^= RandomState >> 17;
+ RandomState ^= RandomState << 5;
+ getThreadLocals()->RandomState = RandomState;
+ return RandomState;
+}
} // namespace gwp_asan
diff --git a/contrib/llvm-project/compiler-rt/lib/gwp_asan/guarded_pool_allocator.h b/contrib/llvm-project/compiler-rt/lib/gwp_asan/guarded_pool_allocator.h
index ae00506c5692..b9972ffd98f7 100644
--- a/contrib/llvm-project/compiler-rt/lib/gwp_asan/guarded_pool_allocator.h
+++ b/contrib/llvm-project/compiler-rt/lib/gwp_asan/guarded_pool_allocator.h
@@ -13,8 +13,9 @@
#include "gwp_asan/definitions.h"
#include "gwp_asan/mutex.h"
#include "gwp_asan/options.h"
-#include "gwp_asan/random.h"
-#include "gwp_asan/stack_trace_compressor.h"
+#include "gwp_asan/platform_specific/guarded_pool_allocator_fuchsia.h" // IWYU pragma: keep
+#include "gwp_asan/platform_specific/guarded_pool_allocator_posix.h" // IWYU pragma: keep
+#include "gwp_asan/platform_specific/guarded_pool_allocator_tls.h"
#include <stddef.h>
#include <stdint.h>
@@ -37,7 +38,7 @@ public:
// GWP-ASan. The constructor value-initialises the class such that if no
// further initialisation takes place, calls to shouldSample() and
// pointerIsMine() will return false.
- constexpr GuardedPoolAllocator(){};
+ constexpr GuardedPoolAllocator() {}
GuardedPoolAllocator(const GuardedPoolAllocator &) = delete;
GuardedPoolAllocator &operator=(const GuardedPoolAllocator &) = delete;
@@ -78,11 +79,12 @@ public:
// class must be valid when zero-initialised, and we wish to sample as
// infrequently as possible when this is the case, hence we underflow to
// UINT32_MAX.
- if (GWP_ASAN_UNLIKELY(ThreadLocals.NextSampleCounter == 0))
- ThreadLocals.NextSampleCounter =
- (getRandomUnsigned32() % (AdjustedSampleRatePlusOne - 1)) + 1;
+ if (GWP_ASAN_UNLIKELY(getThreadLocals()->NextSampleCounter == 0))
+ getThreadLocals()->NextSampleCounter =
+ ((getRandomUnsigned32() % (AdjustedSampleRatePlusOne - 1)) + 1) &
+ ThreadLocalPackedVariables::NextSampleCounterMask;
- return GWP_ASAN_UNLIKELY(--ThreadLocals.NextSampleCounter == 0);
+ return GWP_ASAN_UNLIKELY(--getThreadLocals()->NextSampleCounter == 0);
}
// Returns whether the provided pointer is a current sampled allocation that
@@ -124,15 +126,30 @@ private:
// memory into this process in a platform-specific way. Pointer and size
// arguments are expected to be page-aligned. These functions will never
// return on error, instead electing to kill the calling process on failure.
- // Note that memory is initially mapped inaccessible. In order for RW
- // mappings, call mapMemory() followed by markReadWrite() on the returned
- // pointer. Each mapping is named on platforms that support it, primarily
- // Android. This name must be a statically allocated string, as the Android
- // kernel uses the string pointer directly.
- void *mapMemory(size_t Size, const char *Name) const;
- void unmapMemory(void *Ptr, size_t Size, const char *Name) const;
- void markReadWrite(void *Ptr, size_t Size, const char *Name) const;
- void markInaccessible(void *Ptr, size_t Size, const char *Name) const;
+ // The pool memory is initially reserved and inaccessible, and RW mappings are
+ // subsequently created and destroyed via allocateInGuardedPool() and
+ // deallocateInGuardedPool(). Each mapping is named on platforms that support
+ // it, primarily Android. This name must be a statically allocated string, as
+ // the Android kernel uses the string pointer directly.
+ void *map(size_t Size, const char *Name) const;
+ void unmap(void *Ptr, size_t Size) const;
+
+ // The pool is managed separately, as some platforms (particularly Fuchsia)
+ // manage virtual memory regions as a chunk where individual pages can still
+ // have separate permissions. These platforms maintain metadata about the
+ // region in order to perform operations. The pool is unique as it's the only
+ // thing in GWP-ASan that treats pages in a single VM region on an individual
+ // basis for page protection.
+ // The pointer returned by reserveGuardedPool() is the reserved address range
+ // of (at least) Size bytes.
+ void *reserveGuardedPool(size_t Size);
+ // allocateInGuardedPool() Ptr and Size must be a subrange of the previously
+ // reserved pool range.
+ void allocateInGuardedPool(void *Ptr, size_t Size) const;
+ // deallocateInGuardedPool() Ptr and Size must be an exact pair previously
+ // passed to allocateInGuardedPool().
+ void deallocateInGuardedPool(void *Ptr, size_t Size) const;
+ void unreserveGuardedPool();
// Get the page size from the platform-specific implementation. Only needs to
// be called once, and the result should be cached in PageSize in this class.
@@ -191,22 +208,21 @@ private:
// the sample rate.
uint32_t AdjustedSampleRatePlusOne = 0;
- // Pack the thread local variables into a struct to ensure that they're in
- // the same cache line for performance reasons. These are the most touched
- // variables in GWP-ASan.
- struct alignas(8) ThreadLocalPackedVariables {
- constexpr ThreadLocalPackedVariables() {}
- // Thread-local decrementing counter that indicates that a given allocation
- // should be sampled when it reaches zero.
- uint32_t NextSampleCounter = 0;
- // Guard against recursivity. Unwinders often contain complex behaviour that
- // may not be safe for the allocator (i.e. the unwinder calls dlopen(),
- // which calls malloc()). When recursive behaviour is detected, we will
- // automatically fall back to the supporting allocator to supply the
- // allocation.
- bool RecursiveGuard = false;
+ // Additional platform specific data structure for the guarded pool mapping.
+ PlatformSpecificMapData GuardedPagePoolPlatformData = {};
+
+ class ScopedRecursiveGuard {
+ public:
+ ScopedRecursiveGuard() { getThreadLocals()->RecursiveGuard = true; }
+ ~ScopedRecursiveGuard() { getThreadLocals()->RecursiveGuard = false; }
};
- static GWP_ASAN_TLS_INITIAL_EXEC ThreadLocalPackedVariables ThreadLocals;
+
+ // Initialise the PRNG, platform-specific.
+ void initPRNG();
+
+ // xorshift (32-bit output), extremely fast PRNG that uses arithmetic
+ // operations only. Seeded using platform-specific mechanisms by initPRNG().
+ uint32_t getRandomUnsigned32();
};
} // namespace gwp_asan
diff --git a/contrib/llvm-project/compiler-rt/lib/gwp_asan/mutex.h b/contrib/llvm-project/compiler-rt/lib/gwp_asan/mutex.h
index c29df4cde164..34b91a2880dd 100644
--- a/contrib/llvm-project/compiler-rt/lib/gwp_asan/mutex.h
+++ b/contrib/llvm-project/compiler-rt/lib/gwp_asan/mutex.h
@@ -9,14 +9,11 @@
#ifndef GWP_ASAN_MUTEX_H_
#define GWP_ASAN_MUTEX_H_
-#ifdef __unix__
-#include <pthread.h>
-#else
-#error "GWP-ASan is not supported on this platform."
-#endif
+#include "gwp_asan/platform_specific/mutex_fuchsia.h" // IWYU pragma: keep
+#include "gwp_asan/platform_specific/mutex_posix.h" // IWYU pragma: keep
namespace gwp_asan {
-class Mutex {
+class Mutex final : PlatformMutex {
public:
constexpr Mutex() = default;
~Mutex() = default;
@@ -28,11 +25,6 @@ public:
bool tryLock();
// Unlock the mutex.
void unlock();
-
-private:
-#ifdef __unix__
- pthread_mutex_t Mu = PTHREAD_MUTEX_INITIALIZER;
-#endif // defined(__unix__)
};
class ScopedLock {
diff --git a/contrib/llvm-project/compiler-rt/lib/gwp_asan/optional/backtrace.h b/contrib/llvm-project/compiler-rt/lib/gwp_asan/optional/backtrace.h
index 3a72eb3d08e8..9bb12af206a5 100644
--- a/contrib/llvm-project/compiler-rt/lib/gwp_asan/optional/backtrace.h
+++ b/contrib/llvm-project/compiler-rt/lib/gwp_asan/optional/backtrace.h
@@ -9,21 +9,45 @@
#ifndef GWP_ASAN_OPTIONAL_BACKTRACE_H_
#define GWP_ASAN_OPTIONAL_BACKTRACE_H_
-#include "gwp_asan/optional/segv_handler.h"
+#include "gwp_asan/optional/printf.h"
#include "gwp_asan/options.h"
namespace gwp_asan {
-namespace options {
-// Functions to get the platform-specific and implementation-specific backtrace
-// and backtrace printing functions when RTGwpAsanBacktraceLibc or
-// RTGwpAsanBacktraceSanitizerCommon are linked. Use these functions to get the
-// backtrace function for populating the Options::Backtrace and
-// Options::PrintBacktrace when initialising the GuardedPoolAllocator. Please
-// note any thread-safety descriptions for the implementation of these functions
-// that you use.
-Backtrace_t getBacktraceFunction();
-crash_handler::PrintBacktrace_t getPrintBacktraceFunction();
-} // namespace options
+namespace backtrace {
+// ================================ Description ================================
+// This function shall take the backtrace provided in `TraceBuffer`, and print
+// it in a human-readable format using `Print`. Generally, this function shall
+// resolve raw pointers to section offsets and print them with the following
+// sanitizer-common format:
+// " #{frame_number} {pointer} in {function name} ({binary name}+{offset}"
+// e.g. " #5 0x420459 in _start (/tmp/uaf+0x420459)"
+// This format allows the backtrace to be symbolized offline successfully using
+// llvm-symbolizer.
+// =================================== Notes ===================================
+// This function may directly or indirectly call malloc(), as the
+// GuardedPoolAllocator contains a reentrancy barrier to prevent infinite
+// recursion. Any allocation made inside this function will be served by the
+// supporting allocator, and will not have GWP-ASan protections.
+typedef void (*PrintBacktrace_t)(uintptr_t *TraceBuffer, size_t TraceLength,
+ Printf_t Print);
+
+// Returns a function pointer to a backtrace function that's suitable for
+// unwinding through a signal handler. This is important primarily for frame-
+// pointer based unwinders, DWARF or other unwinders can simply provide the
+// normal backtrace function as the implementation here. On POSIX, SignalContext
+// should be the `ucontext_t` from the signal handler.
+typedef size_t (*SegvBacktrace_t)(uintptr_t *TraceBuffer, size_t Size,
+ void *SignalContext);
+
+// Returns platform-specific provided implementations of Backtrace_t for use
+// inside the GWP-ASan core allocator.
+options::Backtrace_t getBacktraceFunction();
+
+// Returns platform-specific provided implementations of PrintBacktrace_t and
+// SegvBacktrace_t for use in the optional SEGV handler.
+PrintBacktrace_t getPrintBacktraceFunction();
+SegvBacktrace_t getSegvBacktraceFunction();
+} // namespace backtrace
} // namespace gwp_asan
#endif // GWP_ASAN_OPTIONAL_BACKTRACE_H_
diff --git a/contrib/llvm-project/compiler-rt/lib/gwp_asan/optional/backtrace_fuchsia.cpp b/contrib/llvm-project/compiler-rt/lib/gwp_asan/optional/backtrace_fuchsia.cpp
new file mode 100644
index 000000000000..879312a7631e
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/gwp_asan/optional/backtrace_fuchsia.cpp
@@ -0,0 +1,21 @@
+//===-- backtrace_fuchsia.cpp -----------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "gwp_asan/optional/backtrace.h"
+
+// GWP-ASan on Fuchsia doesn't currently support backtraces.
+
+namespace gwp_asan {
+namespace backtrace {
+
+options::Backtrace_t getBacktraceFunction() { return nullptr; }
+PrintBacktrace_t getPrintBacktraceFunction() { return nullptr; }
+SegvBacktrace_t getSegvBacktraceFunction() { return nullptr; }
+
+} // namespace backtrace
+} // namespace gwp_asan
diff --git a/contrib/llvm-project/compiler-rt/lib/gwp_asan/optional/backtrace_linux_libc.cpp b/contrib/llvm-project/compiler-rt/lib/gwp_asan/optional/backtrace_linux_libc.cpp
index bb0aad224a14..ea8e72be287d 100644
--- a/contrib/llvm-project/compiler-rt/lib/gwp_asan/optional/backtrace_linux_libc.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/gwp_asan/optional/backtrace_linux_libc.cpp
@@ -13,7 +13,9 @@
#include <stdlib.h>
#include <string.h>
+#include "gwp_asan/definitions.h"
#include "gwp_asan/optional/backtrace.h"
+#include "gwp_asan/optional/printf.h"
#include "gwp_asan/options.h"
namespace {
@@ -23,8 +25,16 @@ size_t Backtrace(uintptr_t *TraceBuffer, size_t Size) {
return backtrace(reinterpret_cast<void **>(TraceBuffer), Size);
}
+// We don't need any custom handling for the Segv backtrace - the libc unwinder
+// has no problems with unwinding through a signal handler. Force inlining here
+// to avoid the additional frame.
+GWP_ASAN_ALWAYS_INLINE size_t SegvBacktrace(uintptr_t *TraceBuffer, size_t Size,
+ void * /*Context*/) {
+ return Backtrace(TraceBuffer, Size);
+}
+
static void PrintBacktrace(uintptr_t *Trace, size_t TraceLength,
- gwp_asan::crash_handler::Printf_t Printf) {
+ gwp_asan::Printf_t Printf) {
if (TraceLength == 0) {
Printf(" <not found (does your allocator support backtracing?)>\n\n");
return;
@@ -47,10 +57,11 @@ static void PrintBacktrace(uintptr_t *Trace, size_t TraceLength,
} // anonymous namespace
namespace gwp_asan {
-namespace options {
-Backtrace_t getBacktraceFunction() { return Backtrace; }
-crash_handler::PrintBacktrace_t getPrintBacktraceFunction() {
- return PrintBacktrace;
-}
-} // namespace options
+namespace backtrace {
+
+options::Backtrace_t getBacktraceFunction() { return Backtrace; }
+PrintBacktrace_t getPrintBacktraceFunction() { return PrintBacktrace; }
+SegvBacktrace_t getSegvBacktraceFunction() { return SegvBacktrace; }
+
+} // namespace backtrace
} // namespace gwp_asan
diff --git a/contrib/llvm-project/compiler-rt/lib/gwp_asan/optional/backtrace_sanitizer_common.cpp b/contrib/llvm-project/compiler-rt/lib/gwp_asan/optional/backtrace_sanitizer_common.cpp
index 3ac4b52bfc27..e6cce86e3b7b 100644
--- a/contrib/llvm-project/compiler-rt/lib/gwp_asan/optional/backtrace_sanitizer_common.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/gwp_asan/optional/backtrace_sanitizer_common.cpp
@@ -22,30 +22,47 @@ void __sanitizer::BufferedStackTrace::UnwindImpl(uptr pc, uptr bp,
void *context,
bool request_fast,
u32 max_depth) {
- if (!StackTrace::WillUseFastUnwind(request_fast)) {
- return Unwind(max_depth, pc, bp, context, 0, 0, request_fast);
- }
- Unwind(max_depth, pc, 0, context, 0, 0, false);
+ if (!StackTrace::WillUseFastUnwind(request_fast))
+ return Unwind(max_depth, pc, 0, context, 0, 0, false);
+
+ uptr top = 0;
+ uptr bottom = 0;
+ GetThreadStackTopAndBottom(/*at_initialization*/ false, &top, &bottom);
+
+ return Unwind(max_depth, pc, bp, context, top, bottom, request_fast);
}
namespace {
-size_t Backtrace(uintptr_t *TraceBuffer, size_t Size) {
+size_t BacktraceCommon(uintptr_t *TraceBuffer, size_t Size, void *Context) {
+ // Use the slow sanitizer unwinder in the segv handler. Fast frame pointer
+ // unwinders can end up dropping frames because the kernel sigreturn() frame's
+ // return address is the return address at time of fault. This has the result
+ // of never actually capturing the PC where the signal was raised.
+ bool UseFastUnwind = (Context == nullptr);
+
__sanitizer::BufferedStackTrace Trace;
Trace.Reset();
if (Size > __sanitizer::kStackTraceMax)
Size = __sanitizer::kStackTraceMax;
Trace.Unwind((__sanitizer::uptr)__builtin_return_address(0),
- (__sanitizer::uptr)__builtin_frame_address(0),
- /* ucontext */ nullptr,
- /* fast unwind */ true, Size - 1);
+ (__sanitizer::uptr)__builtin_frame_address(0), Context,
+ UseFastUnwind, Size - 1);
memcpy(TraceBuffer, Trace.trace, Trace.size * sizeof(uintptr_t));
return Trace.size;
}
+size_t Backtrace(uintptr_t *TraceBuffer, size_t Size) {
+ return BacktraceCommon(TraceBuffer, Size, nullptr);
+}
+
+size_t SegvBacktrace(uintptr_t *TraceBuffer, size_t Size, void *Context) {
+ return BacktraceCommon(TraceBuffer, Size, Context);
+}
+
static void PrintBacktrace(uintptr_t *Trace, size_t TraceLength,
- gwp_asan::crash_handler::Printf_t Printf) {
+ gwp_asan::Printf_t Printf) {
__sanitizer::StackTrace StackTrace;
StackTrace.trace = reinterpret_cast<__sanitizer::uptr *>(Trace);
StackTrace.size = TraceLength;
@@ -60,21 +77,23 @@ static void PrintBacktrace(uintptr_t *Trace, size_t TraceLength,
} // anonymous namespace
namespace gwp_asan {
-namespace options {
+namespace backtrace {
+
// This function is thread-compatible. It must be synchronised in respect to any
// other calls to getBacktraceFunction(), calls to getPrintBacktraceFunction(),
// and calls to either of the functions that they return. Furthermore, this may
// require synchronisation with any calls to sanitizer_common that use flags.
// Generally, this function will be called during the initialisation of the
// allocator, which is done in a thread-compatible manner.
-Backtrace_t getBacktraceFunction() {
+options::Backtrace_t getBacktraceFunction() {
// The unwinder requires the default flags to be set.
__sanitizer::SetCommonFlagsDefaults();
__sanitizer::InitializeCommonFlags();
return Backtrace;
}
-crash_handler::PrintBacktrace_t getPrintBacktraceFunction() {
- return PrintBacktrace;
-}
-} // namespace options
+
+PrintBacktrace_t getPrintBacktraceFunction() { return PrintBacktrace; }
+SegvBacktrace_t getSegvBacktraceFunction() { return SegvBacktrace; }
+
+} // namespace backtrace
} // namespace gwp_asan
diff --git a/contrib/llvm-project/compiler-rt/lib/gwp_asan/optional/options_parser.cpp b/contrib/llvm-project/compiler-rt/lib/gwp_asan/optional/options_parser.cpp
index 2e6386286745..60234124e8ed 100644
--- a/contrib/llvm-project/compiler-rt/lib/gwp_asan/optional/options_parser.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/gwp_asan/optional/options_parser.cpp
@@ -7,84 +7,251 @@
//===----------------------------------------------------------------------===//
#include "gwp_asan/optional/options_parser.h"
+#include "gwp_asan/optional/printf.h"
+#include "gwp_asan/utilities.h"
+#include <assert.h>
#include <stdarg.h>
#include <stdint.h>
#include <stdlib.h>
#include <string.h>
-#include "gwp_asan/options.h"
-#include "sanitizer_common/sanitizer_common.h"
-#include "sanitizer_common/sanitizer_flag_parser.h"
-#include "sanitizer_common/sanitizer_flags.h"
-
-namespace gwp_asan {
-namespace options {
namespace {
-void registerGwpAsanFlags(__sanitizer::FlagParser *parser, Options *o) {
-#define GWP_ASAN_OPTION(Type, Name, DefaultValue, Description) \
- RegisterFlag(parser, #Name, Description, &o->Name);
+enum class OptionType : uint8_t {
+ OT_bool,
+ OT_int,
+};
+
+#define InvokeIfNonNull(Printf, ...) \
+ do { \
+ if (Printf) \
+ Printf(__VA_ARGS__); \
+ } while (0);
+
+class OptionParser {
+public:
+ explicit OptionParser(gwp_asan::Printf_t PrintfForWarnings)
+ : Printf(PrintfForWarnings) {}
+ void registerOption(const char *Name, const char *Desc, OptionType Type,
+ void *Var);
+ void parseString(const char *S);
+ void printOptionDescriptions();
+
+private:
+ // Calculate at compile-time how many options are available.
+#define GWP_ASAN_OPTION(...) +1
+ static constexpr size_t MaxOptions = 0
#include "gwp_asan/options.inc"
+ ;
#undef GWP_ASAN_OPTION
+
+ struct Option {
+ const char *Name;
+ const char *Desc;
+ OptionType Type;
+ void *Var;
+ } Options[MaxOptions];
+
+ size_t NumberOfOptions = 0;
+ const char *Buffer = nullptr;
+ uintptr_t Pos = 0;
+ gwp_asan::Printf_t Printf = nullptr;
+
+ void skipWhitespace();
+ void parseOptions();
+ bool parseOption();
+ bool setOptionToValue(const char *Name, const char *Value);
+};
+
+void OptionParser::printOptionDescriptions() {
+ InvokeIfNonNull(Printf, "GWP-ASan: Available options:\n");
+ for (size_t I = 0; I < NumberOfOptions; ++I)
+ InvokeIfNonNull(Printf, "\t%s\n\t\t- %s\n", Options[I].Name,
+ Options[I].Desc);
+}
+
+bool isSeparator(char C) {
+ return C == ' ' || C == ',' || C == ':' || C == '\n' || C == '\t' ||
+ C == '\r';
+}
+
+bool isSeparatorOrNull(char C) { return !C || isSeparator(C); }
+
+void OptionParser::skipWhitespace() {
+ while (isSeparator(Buffer[Pos]))
+ ++Pos;
+}
+
+bool OptionParser::parseOption() {
+ const uintptr_t NameStart = Pos;
+ while (Buffer[Pos] != '=' && !isSeparatorOrNull(Buffer[Pos]))
+ ++Pos;
+
+ const char *Name = Buffer + NameStart;
+ if (Buffer[Pos] != '=') {
+ InvokeIfNonNull(Printf, "GWP-ASan: Expected '=' when parsing option '%s'.",
+ Name);
+ return false;
+ }
+ const uintptr_t ValueStart = ++Pos;
+ const char *Value;
+ if (Buffer[Pos] == '\'' || Buffer[Pos] == '"') {
+ const char Quote = Buffer[Pos++];
+ while (Buffer[Pos] != 0 && Buffer[Pos] != Quote)
+ ++Pos;
+ if (Buffer[Pos] == 0) {
+ InvokeIfNonNull(Printf, "GWP-ASan: Unterminated string in option '%s'.",
+ Name);
+ return false;
+ }
+ Value = Buffer + ValueStart + 1;
+ ++Pos; // consume the closing quote
+ } else {
+ while (!isSeparatorOrNull(Buffer[Pos]))
+ ++Pos;
+ Value = Buffer + ValueStart;
+ }
+
+ return setOptionToValue(Name, Value);
}
-const char *getCompileDefinitionGwpAsanDefaultOptions() {
-#ifdef GWP_ASAN_DEFAULT_OPTIONS
- return SANITIZER_STRINGIFY(GWP_ASAN_DEFAULT_OPTIONS);
-#else
- return "";
-#endif
+void OptionParser::parseOptions() {
+ while (true) {
+ skipWhitespace();
+ if (Buffer[Pos] == 0)
+ break;
+ if (!parseOption()) {
+ InvokeIfNonNull(Printf, "GWP-ASan: Options parsing failed.\n");
+ return;
+ }
+ }
+}
+
+void OptionParser::parseString(const char *S) {
+ if (!S)
+ return;
+ Buffer = S;
+ Pos = 0;
+ parseOptions();
+}
+
+bool parseBool(const char *Value, bool *b) {
+ if (strncmp(Value, "0", 1) == 0 || strncmp(Value, "no", 2) == 0 ||
+ strncmp(Value, "false", 5) == 0) {
+ *b = false;
+ return true;
+ }
+ if (strncmp(Value, "1", 1) == 0 || strncmp(Value, "yes", 3) == 0 ||
+ strncmp(Value, "true", 4) == 0) {
+ *b = true;
+ return true;
+ }
+ return false;
+}
+
+bool OptionParser::setOptionToValue(const char *Name, const char *Value) {
+ for (size_t I = 0; I < NumberOfOptions; ++I) {
+ const uintptr_t Len = strlen(Options[I].Name);
+ if (strncmp(Name, Options[I].Name, Len) != 0 || Name[Len] != '=')
+ continue;
+ bool Ok = false;
+ switch (Options[I].Type) {
+ case OptionType::OT_bool:
+ Ok = parseBool(Value, reinterpret_cast<bool *>(Options[I].Var));
+ if (!Ok)
+ InvokeIfNonNull(
+ Printf, "GWP-ASan: Invalid boolean value '%s' for option '%s'.\n",
+ Value, Options[I].Name);
+ break;
+ case OptionType::OT_int:
+ char *ValueEnd;
+ *reinterpret_cast<int *>(Options[I].Var) =
+ static_cast<int>(strtol(Value, &ValueEnd, 10));
+ Ok =
+ *ValueEnd == '"' || *ValueEnd == '\'' || isSeparatorOrNull(*ValueEnd);
+ if (!Ok)
+ InvokeIfNonNull(
+ Printf, "GWP-ASan: Invalid integer value '%s' for option '%s'.\n",
+ Value, Options[I].Name);
+ break;
+ }
+ return Ok;
+ }
+
+ InvokeIfNonNull(Printf, "GWP-ASan: Unknown option '%s'.", Name);
+ return true;
+}
+
+void OptionParser::registerOption(const char *Name, const char *Desc,
+ OptionType Type, void *Var) {
+ assert(NumberOfOptions < MaxOptions &&
+ "GWP-ASan Error: Ran out of space for options.\n");
+ Options[NumberOfOptions].Name = Name;
+ Options[NumberOfOptions].Desc = Desc;
+ Options[NumberOfOptions].Type = Type;
+ Options[NumberOfOptions].Var = Var;
+ ++NumberOfOptions;
+}
+
+void registerGwpAsanOptions(OptionParser *parser,
+ gwp_asan::options::Options *o) {
+#define GWP_ASAN_OPTION(Type, Name, DefaultValue, Description) \
+ parser->registerOption(#Name, Description, OptionType::OT_##Type, &o->Name);
+#include "gwp_asan/options.inc"
+#undef GWP_ASAN_OPTION
}
const char *getGwpAsanDefaultOptions() {
return (__gwp_asan_default_options) ? __gwp_asan_default_options() : "";
}
-Options *getOptionsInternal() {
- static Options GwpAsanFlags;
- return &GwpAsanFlags;
+gwp_asan::options::Options *getOptionsInternal() {
+ static gwp_asan::options::Options GwpAsanOptions;
+ return &GwpAsanOptions;
}
} // anonymous namespace
-void initOptions() {
- __sanitizer::SetCommonFlagsDefaults();
+namespace gwp_asan {
+namespace options {
+void initOptions(const char *OptionsStr, Printf_t PrintfForWarnings) {
Options *o = getOptionsInternal();
o->setDefaults();
- __sanitizer::FlagParser Parser;
- registerGwpAsanFlags(&Parser, o);
-
- // Override from compile definition.
- Parser.ParseString(getCompileDefinitionGwpAsanDefaultOptions());
+ OptionParser Parser(PrintfForWarnings);
+ registerGwpAsanOptions(&Parser, o);
- // Override from user-specified string.
- Parser.ParseString(getGwpAsanDefaultOptions());
+ // Override from the weak function definition in this executable.
+ Parser.parseString(getGwpAsanDefaultOptions());
- // Override from environment.
- Parser.ParseString(__sanitizer::GetEnv("GWP_ASAN_OPTIONS"));
+ // Override from the provided options string.
+ Parser.parseString(OptionsStr);
- __sanitizer::InitializeCommonFlags();
- if (__sanitizer::Verbosity())
- __sanitizer::ReportUnrecognizedFlags();
+ if (o->help)
+ Parser.printOptionDescriptions();
if (!o->Enabled)
return;
- // Sanity checks for the parameters.
if (o->MaxSimultaneousAllocations <= 0) {
- __sanitizer::Printf("GWP-ASan ERROR: MaxSimultaneousAllocations must be > "
- "0 when GWP-ASan is enabled.\n");
- exit(EXIT_FAILURE);
+ InvokeIfNonNull(
+ PrintfForWarnings,
+ "GWP-ASan ERROR: MaxSimultaneousAllocations must be > 0 when GWP-ASan "
+ "is enabled.\n");
+ o->Enabled = false;
}
-
- if (o->SampleRate < 1) {
- __sanitizer::Printf(
+ if (o->SampleRate <= 0) {
+ InvokeIfNonNull(
+ PrintfForWarnings,
"GWP-ASan ERROR: SampleRate must be > 0 when GWP-ASan is enabled.\n");
- exit(EXIT_FAILURE);
+ o->Enabled = false;
}
}
+void initOptions(Printf_t PrintfForWarnings) {
+ initOptions(getenv("GWP_ASAN_OPTIONS"), PrintfForWarnings);
+}
+
Options &getOptions() { return *getOptionsInternal(); }
} // namespace options
diff --git a/contrib/llvm-project/compiler-rt/lib/gwp_asan/optional/options_parser.h b/contrib/llvm-project/compiler-rt/lib/gwp_asan/optional/options_parser.h
index 7a6bfaf0ce3e..a5a062801f8f 100644
--- a/contrib/llvm-project/compiler-rt/lib/gwp_asan/optional/options_parser.h
+++ b/contrib/llvm-project/compiler-rt/lib/gwp_asan/optional/options_parser.h
@@ -9,14 +9,15 @@
#ifndef GWP_ASAN_OPTIONAL_OPTIONS_PARSER_H_
#define GWP_ASAN_OPTIONAL_OPTIONS_PARSER_H_
-#include "gwp_asan/optional/backtrace.h"
+#include "gwp_asan/optional/printf.h"
#include "gwp_asan/options.h"
-#include "sanitizer_common/sanitizer_common.h"
namespace gwp_asan {
namespace options {
-// Parse the options from the GWP_ASAN_FLAGS environment variable.
-void initOptions();
+// Parse the options from the GWP_ASAN_OPTIONS environment variable.
+void initOptions(Printf_t PrintfForWarnings = nullptr);
+// Parse the options from the provided string.
+void initOptions(const char *OptionsStr, Printf_t PrintfForWarnings = nullptr);
// Returns the initialised options. Call initOptions() prior to calling this
// function.
Options &getOptions();
@@ -24,8 +25,7 @@ Options &getOptions();
} // namespace gwp_asan
extern "C" {
-SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE const char *
-__gwp_asan_default_options();
+__attribute__((weak)) const char *__gwp_asan_default_options();
}
#endif // GWP_ASAN_OPTIONAL_OPTIONS_PARSER_H_
diff --git a/contrib/llvm-project/compiler-rt/lib/gwp_asan/optional/printf.h b/contrib/llvm-project/compiler-rt/lib/gwp_asan/optional/printf.h
new file mode 100644
index 000000000000..1004a2c24989
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/gwp_asan/optional/printf.h
@@ -0,0 +1,33 @@
+//===-- printf.h ------------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef GWP_ASAN_OPTIONAL_PRINTF_H_
+#define GWP_ASAN_OPTIONAL_PRINTF_H_
+
+namespace gwp_asan {
+
+// ================================ Requirements ===============================
+// This function is required to be provided by the supporting allocator iff the
+// allocator wants to use any of the optional components.
+// ================================ Description ================================
+// This function shall produce output according to a strict subset of the C
+// standard library's printf() family. This function must support printing the
+// following formats:
+// 1. integers: "%([0-9]*)?(z|ll)?{d,u,x,X}"
+// 2. pointers: "%p"
+// 3. strings: "%[-]([0-9]*)?(\\.\\*)?s"
+// 4. chars: "%c"
+// This function must be implemented in a signal-safe manner, and thus must not
+// malloc().
+// =================================== Notes ===================================
+// This function has a slightly different signature than the C standard
+// library's printf(). Notably, it returns 'void' rather than 'int'.
+typedef void (*Printf_t)(const char *Format, ...);
+
+} // namespace gwp_asan
+#endif // GWP_ASAN_OPTIONAL_PRINTF_H_
diff --git a/contrib/llvm-project/compiler-rt/lib/gwp_asan/optional/segv_handler.h b/contrib/llvm-project/compiler-rt/lib/gwp_asan/optional/segv_handler.h
index 10af15055e2a..87d9fe1dff17 100644
--- a/contrib/llvm-project/compiler-rt/lib/gwp_asan/optional/segv_handler.h
+++ b/contrib/llvm-project/compiler-rt/lib/gwp_asan/optional/segv_handler.h
@@ -1,4 +1,4 @@
-//===-- crash_handler.h -----------------------------------------*- C++ -*-===//
+//===-- segv_handler.h ------------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
@@ -6,59 +6,15 @@
//
//===----------------------------------------------------------------------===//
-#ifndef GWP_ASAN_OPTIONAL_CRASH_HANDLER_H_
-#define GWP_ASAN_OPTIONAL_CRASH_HANDLER_H_
+#ifndef GWP_ASAN_OPTIONAL_SEGV_HANDLER_H_
+#define GWP_ASAN_OPTIONAL_SEGV_HANDLER_H_
#include "gwp_asan/guarded_pool_allocator.h"
-#include "gwp_asan/options.h"
+#include "gwp_asan/optional/backtrace.h"
+#include "gwp_asan/optional/printf.h"
namespace gwp_asan {
-namespace crash_handler {
-// ================================ Requirements ===============================
-// This function must be provided by the supporting allocator only when this
-// provided crash handler is used to dump the generic report.
-// sanitizer::Printf() function can be simply used here.
-// ================================ Description ================================
-// This function shall produce output according to a strict subset of the C
-// standard library's printf() family. This function must support printing the
-// following formats:
-// 1. integers: "%([0-9]*)?(z|ll)?{d,u,x,X}"
-// 2. pointers: "%p"
-// 3. strings: "%[-]([0-9]*)?(\\.\\*)?s"
-// 4. chars: "%c"
-// This function must be implemented in a signal-safe manner, and thus must not
-// malloc().
-// =================================== Notes ===================================
-// This function has a slightly different signature than the C standard
-// library's printf(). Notably, it returns 'void' rather than 'int'.
-typedef void (*Printf_t)(const char *Format, ...);
-
-// ================================ Requirements ===============================
-// This function is required for the supporting allocator, but one of the three
-// provided implementations may be used (RTGwpAsanBacktraceLibc,
-// RTGwpAsanBacktraceSanitizerCommon, or BasicPrintBacktraceFunction).
-// ================================ Description ================================
-// This function shall take the backtrace provided in `TraceBuffer`, and print
-// it in a human-readable format using `Print`. Generally, this function shall
-// resolve raw pointers to section offsets and print them with the following
-// sanitizer-common format:
-// " #{frame_number} {pointer} in {function name} ({binary name}+{offset}"
-// e.g. " #5 0x420459 in _start (/tmp/uaf+0x420459)"
-// This format allows the backtrace to be symbolized offline successfully using
-// llvm-symbolizer.
-// =================================== Notes ===================================
-// This function may directly or indirectly call malloc(), as the
-// GuardedPoolAllocator contains a reentrancy barrier to prevent infinite
-// recursion. Any allocation made inside this function will be served by the
-// supporting allocator, and will not have GWP-ASan protections.
-typedef void (*PrintBacktrace_t)(uintptr_t *TraceBuffer, size_t TraceLength,
- Printf_t Print);
-
-// Returns a function pointer to a basic PrintBacktrace implementation. This
-// implementation simply prints the stack trace in a human readable fashion
-// without any symbolization.
-PrintBacktrace_t getBasicPrintBacktraceFunction();
-
+namespace segv_handler {
// Install the SIGSEGV crash handler for printing use-after-free and heap-
// buffer-{under|over}flow exceptions if the user asked for it. This is platform
// specific as even though POSIX and Windows both support registering handlers
@@ -66,16 +22,12 @@ PrintBacktrace_t getBasicPrintBacktraceFunction();
// the address that caused the SIGSEGV exception. GPA->init() must be called
// before this function.
void installSignalHandlers(gwp_asan::GuardedPoolAllocator *GPA, Printf_t Printf,
- PrintBacktrace_t PrintBacktrace,
- options::Backtrace_t Backtrace);
+ gwp_asan::backtrace::PrintBacktrace_t PrintBacktrace,
+ gwp_asan::backtrace::SegvBacktrace_t SegvBacktrace);
+// Uninistall the signal handlers, test-only.
void uninstallSignalHandlers();
-
-void dumpReport(uintptr_t ErrorPtr, const gwp_asan::AllocatorState *State,
- const gwp_asan::AllocationMetadata *Metadata,
- options::Backtrace_t Backtrace, Printf_t Printf,
- PrintBacktrace_t PrintBacktrace);
-} // namespace crash_handler
+} // namespace segv_handler
} // namespace gwp_asan
-#endif // GWP_ASAN_OPTIONAL_CRASH_HANDLER_H_
+#endif // GWP_ASAN_OPTIONAL_SEGV_HANDLER_H_
diff --git a/contrib/llvm-project/compiler-rt/lib/gwp_asan/optional/segv_handler_fuchsia.cpp b/contrib/llvm-project/compiler-rt/lib/gwp_asan/optional/segv_handler_fuchsia.cpp
new file mode 100644
index 000000000000..966d7d0bd996
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/gwp_asan/optional/segv_handler_fuchsia.cpp
@@ -0,0 +1,22 @@
+//===-- segv_handler_fuchsia.cpp --------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "gwp_asan/optional/segv_handler.h"
+
+// GWP-ASan on Fuchsia doesn't currently support signal handlers.
+
+namespace gwp_asan {
+namespace segv_handler {
+void installSignalHandlers(gwp_asan::GuardedPoolAllocator * /* GPA */,
+ Printf_t /* Printf */,
+ backtrace::PrintBacktrace_t /* PrintBacktrace */,
+ backtrace::SegvBacktrace_t /* SegvBacktrace */) {}
+
+void uninstallSignalHandlers() {}
+} // namespace segv_handler
+} // namespace gwp_asan
diff --git a/contrib/llvm-project/compiler-rt/lib/gwp_asan/optional/segv_handler_posix.cpp b/contrib/llvm-project/compiler-rt/lib/gwp_asan/optional/segv_handler_posix.cpp
index 22589b893604..5c9bb9f3a2e7 100644
--- a/contrib/llvm-project/compiler-rt/lib/gwp_asan/optional/segv_handler_posix.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/gwp_asan/optional/segv_handler_posix.cpp
@@ -1,4 +1,4 @@
-//===-- crash_handler_posix.cpp ---------------------------------*- C++ -*-===//
+//===-- segv_handler_posix.cpp ----------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
@@ -12,62 +12,30 @@
#include "gwp_asan/optional/segv_handler.h"
#include "gwp_asan/options.h"
+// RHEL creates the PRIu64 format macro (for printing uint64_t's) only when this
+// macro is defined before including <inttypes.h>.
+#ifndef __STDC_FORMAT_MACROS
+#define __STDC_FORMAT_MACROS 1
+#endif
+
#include <assert.h>
#include <inttypes.h>
#include <signal.h>
#include <stdio.h>
-namespace {
using gwp_asan::AllocationMetadata;
using gwp_asan::Error;
using gwp_asan::GuardedPoolAllocator;
-using gwp_asan::crash_handler::PrintBacktrace_t;
-using gwp_asan::crash_handler::Printf_t;
-using gwp_asan::options::Backtrace_t;
-
-struct sigaction PreviousHandler;
-bool SignalHandlerInstalled;
-gwp_asan::GuardedPoolAllocator *GPAForSignalHandler;
-Printf_t PrintfForSignalHandler;
-PrintBacktrace_t PrintBacktraceForSignalHandler;
-Backtrace_t BacktraceForSignalHandler;
+using gwp_asan::Printf_t;
+using gwp_asan::backtrace::PrintBacktrace_t;
+using gwp_asan::backtrace::SegvBacktrace_t;
-static void sigSegvHandler(int sig, siginfo_t *info, void *ucontext) {
- if (GPAForSignalHandler) {
- GPAForSignalHandler->stop();
-
- gwp_asan::crash_handler::dumpReport(
- reinterpret_cast<uintptr_t>(info->si_addr),
- GPAForSignalHandler->getAllocatorState(),
- GPAForSignalHandler->getMetadataRegion(), BacktraceForSignalHandler,
- PrintfForSignalHandler, PrintBacktraceForSignalHandler);
- }
-
- // Process any previous handlers.
- if (PreviousHandler.sa_flags & SA_SIGINFO) {
- PreviousHandler.sa_sigaction(sig, info, ucontext);
- } else if (PreviousHandler.sa_handler == SIG_DFL) {
- // If the previous handler was the default handler, cause a core dump.
- signal(SIGSEGV, SIG_DFL);
- raise(SIGSEGV);
- } else if (PreviousHandler.sa_handler == SIG_IGN) {
- // If the previous segv handler was SIGIGN, crash iff we were responsible
- // for the crash.
- if (__gwp_asan_error_is_mine(GPAForSignalHandler->getAllocatorState(),
- reinterpret_cast<uintptr_t>(info->si_addr))) {
- signal(SIGSEGV, SIG_DFL);
- raise(SIGSEGV);
- }
- } else {
- PreviousHandler.sa_handler(sig);
- }
-}
+namespace {
struct ScopedEndOfReportDecorator {
- ScopedEndOfReportDecorator(gwp_asan::crash_handler::Printf_t Printf)
- : Printf(Printf) {}
+ ScopedEndOfReportDecorator(gwp_asan::Printf_t Printf) : Printf(Printf) {}
~ScopedEndOfReportDecorator() { Printf("*** End GWP-ASan report ***\n"); }
- gwp_asan::crash_handler::Printf_t Printf;
+ gwp_asan::Printf_t Printf;
};
// Prints the provided error and metadata information.
@@ -117,51 +85,10 @@ void printHeader(Error E, uintptr_t AccessPtr,
AccessPtr, DescriptionBuffer, ThreadBuffer);
}
-void defaultPrintStackTrace(uintptr_t *Trace, size_t TraceLength,
- gwp_asan::crash_handler::Printf_t Printf) {
- if (TraceLength == 0)
- Printf(" <unknown (does your allocator support backtracing?)>\n");
-
- for (size_t i = 0; i < TraceLength; ++i) {
- Printf(" #%zu 0x%zx in <unknown>\n", i, Trace[i]);
- }
- Printf("\n");
-}
-
-} // anonymous namespace
-
-namespace gwp_asan {
-namespace crash_handler {
-PrintBacktrace_t getBasicPrintBacktraceFunction() {
- return defaultPrintStackTrace;
-}
-
-void installSignalHandlers(gwp_asan::GuardedPoolAllocator *GPA, Printf_t Printf,
- PrintBacktrace_t PrintBacktrace,
- options::Backtrace_t Backtrace) {
- GPAForSignalHandler = GPA;
- PrintfForSignalHandler = Printf;
- PrintBacktraceForSignalHandler = PrintBacktrace;
- BacktraceForSignalHandler = Backtrace;
-
- struct sigaction Action;
- Action.sa_sigaction = sigSegvHandler;
- Action.sa_flags = SA_SIGINFO;
- sigaction(SIGSEGV, &Action, &PreviousHandler);
- SignalHandlerInstalled = true;
-}
-
-void uninstallSignalHandlers() {
- if (SignalHandlerInstalled) {
- sigaction(SIGSEGV, &PreviousHandler, nullptr);
- SignalHandlerInstalled = false;
- }
-}
-
void dumpReport(uintptr_t ErrorPtr, const gwp_asan::AllocatorState *State,
const gwp_asan::AllocationMetadata *Metadata,
- options::Backtrace_t Backtrace, Printf_t Printf,
- PrintBacktrace_t PrintBacktrace) {
+ SegvBacktrace_t SegvBacktrace, Printf_t Printf,
+ PrintBacktrace_t PrintBacktrace, void *Context) {
assert(State && "dumpReport missing Allocator State.");
assert(Metadata && "dumpReport missing Metadata.");
assert(Printf && "dumpReport missing Printf.");
@@ -194,7 +121,8 @@ void dumpReport(uintptr_t ErrorPtr, const gwp_asan::AllocatorState *State,
// Print the fault backtrace.
static constexpr unsigned kMaximumStackFramesForCrashTrace = 512;
uintptr_t Trace[kMaximumStackFramesForCrashTrace];
- size_t TraceLength = Backtrace(Trace, kMaximumStackFramesForCrashTrace);
+ size_t TraceLength =
+ SegvBacktrace(Trace, kMaximumStackFramesForCrashTrace, Context);
PrintBacktrace(Trace, TraceLength, Printf);
@@ -204,7 +132,7 @@ void dumpReport(uintptr_t ErrorPtr, const gwp_asan::AllocatorState *State,
// Maybe print the deallocation trace.
if (__gwp_asan_is_deallocated(AllocMeta)) {
uint64_t ThreadID = __gwp_asan_get_deallocation_thread_id(AllocMeta);
- if (ThreadID == kInvalidThreadID)
+ if (ThreadID == gwp_asan::kInvalidThreadID)
Printf("0x%zx was deallocated by thread <unknown> here:\n", ErrorPtr);
else
Printf("0x%zx was deallocated by thread %zu here:\n", ErrorPtr, ThreadID);
@@ -215,7 +143,7 @@ void dumpReport(uintptr_t ErrorPtr, const gwp_asan::AllocatorState *State,
// Print the allocation trace.
uint64_t ThreadID = __gwp_asan_get_allocation_thread_id(AllocMeta);
- if (ThreadID == kInvalidThreadID)
+ if (ThreadID == gwp_asan::kInvalidThreadID)
Printf("0x%zx was allocated by thread <unknown> here:\n", ErrorPtr);
else
Printf("0x%zx was allocated by thread %zu here:\n", ErrorPtr, ThreadID);
@@ -223,5 +151,75 @@ void dumpReport(uintptr_t ErrorPtr, const gwp_asan::AllocatorState *State,
AllocMeta, Trace, kMaximumStackFramesForCrashTrace);
PrintBacktrace(Trace, TraceLength, Printf);
}
-} // namespace crash_handler
+
+struct sigaction PreviousHandler;
+bool SignalHandlerInstalled;
+gwp_asan::GuardedPoolAllocator *GPAForSignalHandler;
+Printf_t PrintfForSignalHandler;
+PrintBacktrace_t PrintBacktraceForSignalHandler;
+SegvBacktrace_t BacktraceForSignalHandler;
+
+static void sigSegvHandler(int sig, siginfo_t *info, void *ucontext) {
+ if (GPAForSignalHandler) {
+ GPAForSignalHandler->stop();
+
+ dumpReport(reinterpret_cast<uintptr_t>(info->si_addr),
+ GPAForSignalHandler->getAllocatorState(),
+ GPAForSignalHandler->getMetadataRegion(),
+ BacktraceForSignalHandler, PrintfForSignalHandler,
+ PrintBacktraceForSignalHandler, ucontext);
+ }
+
+ // Process any previous handlers.
+ if (PreviousHandler.sa_flags & SA_SIGINFO) {
+ PreviousHandler.sa_sigaction(sig, info, ucontext);
+ } else if (PreviousHandler.sa_handler == SIG_DFL) {
+ // If the previous handler was the default handler, cause a core dump.
+ signal(SIGSEGV, SIG_DFL);
+ raise(SIGSEGV);
+ } else if (PreviousHandler.sa_handler == SIG_IGN) {
+ // If the previous segv handler was SIGIGN, crash iff we were responsible
+ // for the crash.
+ if (__gwp_asan_error_is_mine(GPAForSignalHandler->getAllocatorState(),
+ reinterpret_cast<uintptr_t>(info->si_addr))) {
+ signal(SIGSEGV, SIG_DFL);
+ raise(SIGSEGV);
+ }
+ } else {
+ PreviousHandler.sa_handler(sig);
+ }
+}
+} // anonymous namespace
+
+namespace gwp_asan {
+namespace segv_handler {
+
+void installSignalHandlers(gwp_asan::GuardedPoolAllocator *GPA, Printf_t Printf,
+ PrintBacktrace_t PrintBacktrace,
+ SegvBacktrace_t SegvBacktrace) {
+ assert(GPA && "GPA wasn't provided to installSignalHandlers.");
+ assert(Printf && "Printf wasn't provided to installSignalHandlers.");
+ assert(PrintBacktrace &&
+ "PrintBacktrace wasn't provided to installSignalHandlers.");
+ assert(SegvBacktrace &&
+ "SegvBacktrace wasn't provided to installSignalHandlers.");
+ GPAForSignalHandler = GPA;
+ PrintfForSignalHandler = Printf;
+ PrintBacktraceForSignalHandler = PrintBacktrace;
+ BacktraceForSignalHandler = SegvBacktrace;
+
+ struct sigaction Action = {};
+ Action.sa_sigaction = sigSegvHandler;
+ Action.sa_flags = SA_SIGINFO;
+ sigaction(SIGSEGV, &Action, &PreviousHandler);
+ SignalHandlerInstalled = true;
+}
+
+void uninstallSignalHandlers() {
+ if (SignalHandlerInstalled) {
+ sigaction(SIGSEGV, &PreviousHandler, nullptr);
+ SignalHandlerInstalled = false;
+ }
+}
+} // namespace segv_handler
} // namespace gwp_asan
diff --git a/contrib/llvm-project/compiler-rt/lib/gwp_asan/options.inc b/contrib/llvm-project/compiler-rt/lib/gwp_asan/options.inc
index 6cdddfbad84d..4834daef6003 100644
--- a/contrib/llvm-project/compiler-rt/lib/gwp_asan/options.inc
+++ b/contrib/llvm-project/compiler-rt/lib/gwp_asan/options.inc
@@ -10,7 +10,18 @@
#error "Define GWP_ASAN_OPTION prior to including this file!"
#endif
-GWP_ASAN_OPTION(bool, Enabled, true, "Is GWP-ASan enabled? Defaults to true.")
+#ifndef GWP_ASAN_DEFAULT_ENABLED
+#define GWP_ASAN_DEFAULT_ENABLED true
+#endif
+
+#ifndef GWP_ASAN_STRINGIFY
+#define GWP_ASAN_STRINGIFY(S) GWP_ASAN_STRINGIFY_(S)
+#define GWP_ASAN_STRINGIFY_(S) #S
+#endif
+
+GWP_ASAN_OPTION(bool, Enabled, GWP_ASAN_DEFAULT_ENABLED,
+ "Is GWP-ASan enabled? Defaults to " GWP_ASAN_STRINGIFY(
+ GWP_ASAN_DEFAULT_ENABLED) ".")
GWP_ASAN_OPTION(
bool, PerfectlyRightAlign, false,
@@ -29,7 +40,7 @@ GWP_ASAN_OPTION(int, MaxSimultaneousAllocations, 16,
GWP_ASAN_OPTION(int, SampleRate, 5000,
"The probability (1 / SampleRate) that an allocation is "
"selected for GWP-ASan sampling. Default is 5000. Sample rates "
- "up to (2^31 - 1) are supported.")
+ "up to (2^30 - 1) are supported.")
// Developer note - This option is not actually processed by GWP-ASan itself. It
// is included here so that a user can specify whether they want signal handlers
@@ -51,3 +62,18 @@ GWP_ASAN_OPTION(
GWP_ASAN_OPTION(bool, InstallForkHandlers, true,
"Install GWP-ASan atfork handlers to acquire internal locks "
"before fork and release them after.")
+
+GWP_ASAN_OPTION(bool, help, false, "Print a summary of the available options.")
+
+// =============================================================================
+// ==== WARNING
+// =============================================================================
+// If you are adding flags to GWP-ASan, please note that GWP-ASan flag strings
+// may be parsed by trusted system components (on Android, GWP-ASan flag strings
+// are parsed by libc during the dynamic loader). This means that GWP-ASan
+// should never feature flags like log paths on disk, because this can lead to
+// arbitrary file write and thus privilege escalation. For an example, see the
+// setuid ASan log_path exploits: https://www.exploit-db.com/exploits/46241.
+//
+// Please place all new flags above this warning, so that the warning always
+// stays at the bottom.
diff --git a/contrib/llvm-project/compiler-rt/lib/gwp_asan/platform_specific/common_fuchsia.cpp b/contrib/llvm-project/compiler-rt/lib/gwp_asan/platform_specific/common_fuchsia.cpp
new file mode 100644
index 000000000000..b469ef87d70f
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/gwp_asan/platform_specific/common_fuchsia.cpp
@@ -0,0 +1,15 @@
+//===-- common_fuchsia.cpp --------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "gwp_asan/common.h"
+
+namespace gwp_asan {
+// This is only used for AllocationTrace.ThreadID and allocation traces are not
+// yet supported on Fuchsia.
+uint64_t getThreadID() { return kInvalidThreadID; }
+} // namespace gwp_asan
diff --git a/contrib/llvm-project/compiler-rt/lib/gwp_asan/platform_specific/common_posix.cpp b/contrib/llvm-project/compiler-rt/lib/gwp_asan/platform_specific/common_posix.cpp
index e44e6299eeac..0637fc2a4245 100644
--- a/contrib/llvm-project/compiler-rt/lib/gwp_asan/platform_specific/common_posix.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/gwp_asan/platform_specific/common_posix.cpp
@@ -1,4 +1,4 @@
-//===-- common_posix.cpp ---------------------------------*- C++ -*-===//
+//===-- common_posix.cpp ----------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
@@ -8,7 +8,9 @@
#include "gwp_asan/common.h"
-#include <sys/syscall.h>
+#include <stdint.h>
+#include <sys/syscall.h> // IWYU pragma: keep
+// IWYU pragma: no_include <syscall.h>
#include <unistd.h>
namespace gwp_asan {
diff --git a/contrib/llvm-project/compiler-rt/lib/gwp_asan/platform_specific/guarded_pool_allocator_fuchsia.cpp b/contrib/llvm-project/compiler-rt/lib/gwp_asan/platform_specific/guarded_pool_allocator_fuchsia.cpp
new file mode 100644
index 000000000000..f58d4b104b39
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/gwp_asan/platform_specific/guarded_pool_allocator_fuchsia.cpp
@@ -0,0 +1,103 @@
+//===-- guarded_pool_allocator_fuchsia.cpp ----------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "gwp_asan/guarded_pool_allocator.h"
+#include "gwp_asan/utilities.h"
+
+#include <assert.h>
+#include <stdint.h>
+#include <string.h>
+#include <zircon/limits.h>
+#include <zircon/process.h>
+#include <zircon/syscalls.h>
+
+namespace gwp_asan {
+void GuardedPoolAllocator::initPRNG() {
+ _zx_cprng_draw(&getThreadLocals()->RandomState, sizeof(uint32_t));
+}
+
+void *GuardedPoolAllocator::map(size_t Size, const char *Name) const {
+ assert((Size % State.PageSize) == 0);
+ zx_handle_t Vmo;
+ zx_status_t Status = _zx_vmo_create(Size, 0, &Vmo);
+ Check(Status == ZX_OK, "Failed to create Vmo");
+ _zx_object_set_property(Vmo, ZX_PROP_NAME, Name, strlen(Name));
+ zx_vaddr_t Addr;
+ Status = _zx_vmar_map(_zx_vmar_root_self(),
+ ZX_VM_PERM_READ | ZX_VM_PERM_WRITE | ZX_VM_ALLOW_FAULTS,
+ 0, Vmo, 0, Size, &Addr);
+ Check(Status == ZX_OK, "Vmo mapping failed");
+ _zx_handle_close(Vmo);
+ return reinterpret_cast<void *>(Addr);
+}
+
+void GuardedPoolAllocator::unmap(void *Ptr, size_t Size) const {
+ assert((reinterpret_cast<uintptr_t>(Ptr) % State.PageSize) == 0);
+ assert((Size % State.PageSize) == 0);
+ zx_status_t Status = _zx_vmar_unmap(_zx_vmar_root_self(),
+ reinterpret_cast<zx_vaddr_t>(Ptr), Size);
+ Check(Status == ZX_OK, "Vmo unmapping failed");
+}
+
+void *GuardedPoolAllocator::reserveGuardedPool(size_t Size) {
+ assert((Size % State.PageSize) == 0);
+ zx_vaddr_t Addr;
+ const zx_status_t Status = _zx_vmar_allocate(
+ _zx_vmar_root_self(),
+ ZX_VM_CAN_MAP_READ | ZX_VM_CAN_MAP_WRITE | ZX_VM_CAN_MAP_SPECIFIC, 0,
+ Size, &GuardedPagePoolPlatformData.Vmar, &Addr);
+ Check(Status == ZX_OK, "Failed to reserve guarded pool allocator memory");
+ _zx_object_set_property(GuardedPagePoolPlatformData.Vmar, ZX_PROP_NAME,
+ kGwpAsanGuardPageName, strlen(kGwpAsanGuardPageName));
+ return reinterpret_cast<void *>(Addr);
+}
+
+void GuardedPoolAllocator::unreserveGuardedPool() {
+ const zx_handle_t Vmar = GuardedPagePoolPlatformData.Vmar;
+ assert(Vmar != ZX_HANDLE_INVALID && Vmar != _zx_vmar_root_self());
+ Check(_zx_vmar_destroy(Vmar) == ZX_OK, "Failed to destroy a vmar");
+ Check(_zx_handle_close(Vmar) == ZX_OK, "Failed to close a vmar");
+ GuardedPagePoolPlatformData.Vmar = ZX_HANDLE_INVALID;
+}
+
+void GuardedPoolAllocator::allocateInGuardedPool(void *Ptr, size_t Size) const {
+ assert((reinterpret_cast<uintptr_t>(Ptr) % State.PageSize) == 0);
+ assert((Size % State.PageSize) == 0);
+ zx_handle_t Vmo;
+ zx_status_t Status = _zx_vmo_create(Size, 0, &Vmo);
+ Check(Status == ZX_OK, "Failed to create vmo");
+ _zx_object_set_property(Vmo, ZX_PROP_NAME, kGwpAsanAliveSlotName,
+ strlen(kGwpAsanAliveSlotName));
+ const zx_handle_t Vmar = GuardedPagePoolPlatformData.Vmar;
+ assert(Vmar != ZX_HANDLE_INVALID && Vmar != _zx_vmar_root_self());
+ const size_t Offset =
+ reinterpret_cast<uintptr_t>(Ptr) - State.GuardedPagePool;
+ zx_vaddr_t P;
+ Status = _zx_vmar_map(Vmar,
+ ZX_VM_PERM_READ | ZX_VM_PERM_WRITE |
+ ZX_VM_ALLOW_FAULTS | ZX_VM_SPECIFIC,
+ Offset, Vmo, 0, Size, &P);
+ Check(Status == ZX_OK, "Vmo mapping failed");
+ _zx_handle_close(Vmo);
+}
+
+void GuardedPoolAllocator::deallocateInGuardedPool(void *Ptr,
+ size_t Size) const {
+ assert((reinterpret_cast<uintptr_t>(Ptr) % State.PageSize) == 0);
+ assert((Size % State.PageSize) == 0);
+ const zx_handle_t Vmar = GuardedPagePoolPlatformData.Vmar;
+ assert(Vmar != ZX_HANDLE_INVALID && Vmar != _zx_vmar_root_self());
+ const zx_status_t Status =
+ _zx_vmar_unmap(Vmar, reinterpret_cast<zx_vaddr_t>(Ptr), Size);
+ Check(Status == ZX_OK, "Vmar unmapping failed");
+}
+
+size_t GuardedPoolAllocator::getPlatformPageSize() { return ZX_PAGE_SIZE; }
+
+void GuardedPoolAllocator::installAtFork() {}
+} // namespace gwp_asan
diff --git a/contrib/llvm-project/compiler-rt/lib/gwp_asan/platform_specific/guarded_pool_allocator_fuchsia.h b/contrib/llvm-project/compiler-rt/lib/gwp_asan/platform_specific/guarded_pool_allocator_fuchsia.h
new file mode 100644
index 000000000000..fbd7d3aa67af
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/gwp_asan/platform_specific/guarded_pool_allocator_fuchsia.h
@@ -0,0 +1,22 @@
+//===-- guarded_pool_allocator_fuchsia.h ------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#if defined(__Fuchsia__)
+#ifndef GWP_ASAN_GUARDED_POOL_ALLOCATOR_FUCHSIA_H_
+#define GWP_ASAN_GUARDED_POOL_ALLOCATOR_FUCHSIA_H_
+
+#include <zircon/types.h>
+
+namespace gwp_asan {
+struct PlatformSpecificMapData {
+ zx_handle_t Vmar;
+};
+} // namespace gwp_asan
+
+#endif // GWP_ASAN_GUARDED_POOL_ALLOCATOR_FUCHSIA_H_
+#endif // defined(__Fuchsia__)
diff --git a/contrib/llvm-project/compiler-rt/lib/gwp_asan/platform_specific/guarded_pool_allocator_posix.cpp b/contrib/llvm-project/compiler-rt/lib/gwp_asan/platform_specific/guarded_pool_allocator_posix.cpp
index a8767a4cb808..adb7330a431e 100644
--- a/contrib/llvm-project/compiler-rt/lib/gwp_asan/platform_specific/guarded_pool_allocator_posix.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/gwp_asan/platform_specific/guarded_pool_allocator_posix.cpp
@@ -6,16 +6,17 @@
//
//===----------------------------------------------------------------------===//
+#include "gwp_asan/common.h"
#include "gwp_asan/guarded_pool_allocator.h"
+#include "gwp_asan/platform_specific/guarded_pool_allocator_tls.h"
#include "gwp_asan/utilities.h"
#include <assert.h>
-#include <errno.h>
-#include <signal.h>
+#include <pthread.h>
+#include <stdint.h>
#include <stdlib.h>
-#include <string.h>
#include <sys/mman.h>
-#include <sys/types.h>
+#include <time.h>
#include <unistd.h>
#ifdef ANDROID
@@ -24,6 +25,7 @@
#define PR_SET_VMA_ANON_NAME 0
#endif // ANDROID
+namespace {
void MaybeSetMappingName(void *Mapping, size_t Size, const char *Name) {
#ifdef ANDROID
prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, Mapping, Size, Name);
@@ -31,39 +33,64 @@ void MaybeSetMappingName(void *Mapping, size_t Size, const char *Name) {
// Anonymous mapping names are only supported on Android.
return;
}
+} // anonymous namespace
namespace gwp_asan {
-void *GuardedPoolAllocator::mapMemory(size_t Size, const char *Name) const {
- void *Ptr =
- mmap(nullptr, Size, PROT_NONE, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
+
+void GuardedPoolAllocator::initPRNG() {
+ getThreadLocals()->RandomState =
+ static_cast<uint32_t>(time(nullptr) + getThreadID());
+}
+
+void *GuardedPoolAllocator::map(size_t Size, const char *Name) const {
+ assert((Size % State.PageSize) == 0);
+ void *Ptr = mmap(nullptr, Size, PROT_READ | PROT_WRITE,
+ MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
Check(Ptr != MAP_FAILED, "Failed to map guarded pool allocator memory");
MaybeSetMappingName(Ptr, Size, Name);
return Ptr;
}
-void GuardedPoolAllocator::unmapMemory(void *Ptr, size_t Size,
- const char *Name) const {
+void GuardedPoolAllocator::unmap(void *Ptr, size_t Size) const {
+ assert((reinterpret_cast<uintptr_t>(Ptr) % State.PageSize) == 0);
+ assert((Size % State.PageSize) == 0);
Check(munmap(Ptr, Size) == 0,
"Failed to unmap guarded pool allocator memory.");
- MaybeSetMappingName(Ptr, Size, Name);
}
-void GuardedPoolAllocator::markReadWrite(void *Ptr, size_t Size,
- const char *Name) const {
+void *GuardedPoolAllocator::reserveGuardedPool(size_t Size) {
+ assert((Size % State.PageSize) == 0);
+ void *Ptr =
+ mmap(nullptr, Size, PROT_NONE, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
+ Check(Ptr != MAP_FAILED, "Failed to reserve guarded pool allocator memory");
+ MaybeSetMappingName(Ptr, Size, kGwpAsanGuardPageName);
+ return Ptr;
+}
+
+void GuardedPoolAllocator::unreserveGuardedPool() {
+ unmap(reinterpret_cast<void *>(State.GuardedPagePool),
+ State.GuardedPagePoolEnd - State.GuardedPagePool);
+}
+
+void GuardedPoolAllocator::allocateInGuardedPool(void *Ptr, size_t Size) const {
+ assert((reinterpret_cast<uintptr_t>(Ptr) % State.PageSize) == 0);
+ assert((Size % State.PageSize) == 0);
Check(mprotect(Ptr, Size, PROT_READ | PROT_WRITE) == 0,
- "Failed to set guarded pool allocator memory at as RW.");
- MaybeSetMappingName(Ptr, Size, Name);
+ "Failed to allocate in guarded pool allocator memory");
+ MaybeSetMappingName(Ptr, Size, kGwpAsanAliveSlotName);
}
-void GuardedPoolAllocator::markInaccessible(void *Ptr, size_t Size,
- const char *Name) const {
+void GuardedPoolAllocator::deallocateInGuardedPool(void *Ptr,
+ size_t Size) const {
+ assert((reinterpret_cast<uintptr_t>(Ptr) % State.PageSize) == 0);
+ assert((Size % State.PageSize) == 0);
// mmap() a PROT_NONE page over the address to release it to the system, if
// we used mprotect() here the system would count pages in the quarantine
// against the RSS.
Check(mmap(Ptr, Size, PROT_NONE, MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE, -1,
0) != MAP_FAILED,
- "Failed to set guarded pool allocator memory as inaccessible.");
- MaybeSetMappingName(Ptr, Size, Name);
+ "Failed to deallocate in guarded pool allocator memory");
+ MaybeSetMappingName(Ptr, Size, kGwpAsanGuardPageName);
}
size_t GuardedPoolAllocator::getPlatformPageSize() {
@@ -81,5 +108,4 @@ void GuardedPoolAllocator::installAtFork() {
};
pthread_atfork(Disable, Enable, Enable);
}
-
} // namespace gwp_asan
diff --git a/contrib/llvm-project/compiler-rt/lib/gwp_asan/platform_specific/guarded_pool_allocator_posix.h b/contrib/llvm-project/compiler-rt/lib/gwp_asan/platform_specific/guarded_pool_allocator_posix.h
new file mode 100644
index 000000000000..7f4ba0d8ccd1
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/gwp_asan/platform_specific/guarded_pool_allocator_posix.h
@@ -0,0 +1,18 @@
+//===-- guarded_pool_allocator_posix.h --------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#if defined(__unix__)
+#ifndef GWP_ASAN_GUARDED_POOL_ALLOCATOR_POSIX_H_
+#define GWP_ASAN_GUARDED_POOL_ALLOCATOR_POSIX_H_
+
+namespace gwp_asan {
+struct PlatformSpecificMapData {};
+} // namespace gwp_asan
+
+#endif // GWP_ASAN_GUARDED_POOL_ALLOCATOR_POSIX_H_
+#endif // defined(__unix__)
diff --git a/contrib/llvm-project/compiler-rt/lib/gwp_asan/platform_specific/guarded_pool_allocator_tls.h b/contrib/llvm-project/compiler-rt/lib/gwp_asan/platform_specific/guarded_pool_allocator_tls.h
new file mode 100644
index 000000000000..3e2055db3dc5
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/gwp_asan/platform_specific/guarded_pool_allocator_tls.h
@@ -0,0 +1,55 @@
+//===-- guarded_pool_allocator_tls.h ----------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef GWP_ASAN_GUARDED_POOL_ALLOCATOR_TLS_H_
+#define GWP_ASAN_GUARDED_POOL_ALLOCATOR_TLS_H_
+
+#include "gwp_asan/definitions.h"
+
+#include <stdint.h>
+
+namespace gwp_asan {
+// Pack the thread local variables into a struct to ensure that they're in
+// the same cache line for performance reasons. These are the most touched
+// variables in GWP-ASan.
+struct ThreadLocalPackedVariables {
+ constexpr ThreadLocalPackedVariables()
+ : RandomState(0xacd979ce), NextSampleCounter(0), RecursiveGuard(false) {}
+ // Initialised to a magic constant so that an uninitialised GWP-ASan won't
+ // regenerate its sample counter for as long as possible. The xorshift32()
+ // algorithm used below results in getRandomUnsigned32(0xacd979ce) ==
+ // 0xfffffffe.
+ uint32_t RandomState;
+ // Thread-local decrementing counter that indicates that a given allocation
+ // should be sampled when it reaches zero.
+ uint32_t NextSampleCounter : 31;
+ // The mask is needed to silence conversion errors.
+ static const uint32_t NextSampleCounterMask = (1U << 31) - 1;
+ // Guard against recursivity. Unwinders often contain complex behaviour that
+ // may not be safe for the allocator (i.e. the unwinder calls dlopen(),
+ // which calls malloc()). When recursive behaviour is detected, we will
+ // automatically fall back to the supporting allocator to supply the
+ // allocation.
+ bool RecursiveGuard : 1;
+};
+static_assert(sizeof(ThreadLocalPackedVariables) == sizeof(uint64_t),
+ "thread local data does not fit in a uint64_t");
+} // namespace gwp_asan
+
+#ifdef GWP_ASAN_PLATFORM_TLS_HEADER
+#include GWP_ASAN_PLATFORM_TLS_HEADER
+#else
+namespace gwp_asan {
+inline ThreadLocalPackedVariables *getThreadLocals() {
+ alignas(8) static GWP_ASAN_TLS_INITIAL_EXEC ThreadLocalPackedVariables Locals;
+ return &Locals;
+}
+} // namespace gwp_asan
+#endif // GWP_ASAN_PLATFORM_TLS_HEADER
+
+#endif // GWP_ASAN_GUARDED_POOL_ALLOCATOR_TLS_H_
diff --git a/contrib/llvm-project/compiler-rt/lib/gwp_asan/platform_specific/mutex_fuchsia.cpp b/contrib/llvm-project/compiler-rt/lib/gwp_asan/platform_specific/mutex_fuchsia.cpp
new file mode 100644
index 000000000000..0431a82b4b53
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/gwp_asan/platform_specific/mutex_fuchsia.cpp
@@ -0,0 +1,21 @@
+//===-- mutex_fuchsia.cpp ---------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "gwp_asan/mutex.h"
+
+#include <lib/sync/mutex.h>
+
+namespace gwp_asan {
+void Mutex::lock() __TA_NO_THREAD_SAFETY_ANALYSIS { sync_mutex_lock(&Mu); }
+
+bool Mutex::tryLock() __TA_NO_THREAD_SAFETY_ANALYSIS {
+ return sync_mutex_trylock(&Mu) == ZX_OK;
+}
+
+void Mutex::unlock() __TA_NO_THREAD_SAFETY_ANALYSIS { sync_mutex_unlock(&Mu); }
+} // namespace gwp_asan
diff --git a/contrib/llvm-project/compiler-rt/lib/gwp_asan/platform_specific/mutex_fuchsia.h b/contrib/llvm-project/compiler-rt/lib/gwp_asan/platform_specific/mutex_fuchsia.h
new file mode 100644
index 000000000000..edfc1a6f50b4
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/gwp_asan/platform_specific/mutex_fuchsia.h
@@ -0,0 +1,23 @@
+//===-- mutex_fuchsia.h -----------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#if defined(__Fuchsia__)
+#ifndef GWP_ASAN_MUTEX_FUCHSIA_H_
+#define GWP_ASAN_MUTEX_FUCHSIA_H_
+
+#include <lib/sync/mutex.h>
+
+namespace gwp_asan {
+class PlatformMutex {
+protected:
+ sync_mutex_t Mu = {};
+};
+} // namespace gwp_asan
+
+#endif // GWP_ASAN_MUTEX_FUCHSIA_H_
+#endif // defined(__Fuchsia__)
diff --git a/contrib/llvm-project/compiler-rt/lib/gwp_asan/platform_specific/mutex_posix.h b/contrib/llvm-project/compiler-rt/lib/gwp_asan/platform_specific/mutex_posix.h
new file mode 100644
index 000000000000..7f0239198f56
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/gwp_asan/platform_specific/mutex_posix.h
@@ -0,0 +1,23 @@
+//===-- mutex_posix.h -------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#if defined(__unix__)
+#ifndef GWP_ASAN_MUTEX_POSIX_H_
+#define GWP_ASAN_MUTEX_POSIX_H_
+
+#include <pthread.h>
+
+namespace gwp_asan {
+class PlatformMutex {
+protected:
+ pthread_mutex_t Mu = PTHREAD_MUTEX_INITIALIZER;
+};
+} // namespace gwp_asan
+
+#endif // GWP_ASAN_MUTEX_POSIX_H_
+#endif // defined(__unix__)
diff --git a/contrib/llvm-project/compiler-rt/lib/gwp_asan/platform_specific/utilities_fuchsia.cpp b/contrib/llvm-project/compiler-rt/lib/gwp_asan/platform_specific/utilities_fuchsia.cpp
new file mode 100644
index 000000000000..bc9d3a4462a2
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/gwp_asan/platform_specific/utilities_fuchsia.cpp
@@ -0,0 +1,19 @@
+//===-- utilities_fuchsia.cpp -----------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "gwp_asan/utilities.h"
+
+#include <string.h>
+#include <zircon/sanitizer.h>
+
+namespace gwp_asan {
+void die(const char *Message) {
+ __sanitizer_log_write(Message, strlen(Message));
+ __builtin_trap();
+}
+} // namespace gwp_asan
diff --git a/contrib/llvm-project/compiler-rt/lib/gwp_asan/platform_specific/utilities_posix.cpp b/contrib/llvm-project/compiler-rt/lib/gwp_asan/platform_specific/utilities_posix.cpp
index 0e6059896702..28fd22fa7606 100644
--- a/contrib/llvm-project/compiler-rt/lib/gwp_asan/platform_specific/utilities_posix.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/gwp_asan/platform_specific/utilities_posix.cpp
@@ -6,11 +6,6 @@
//
//===----------------------------------------------------------------------===//
-#include "gwp_asan/definitions.h"
-#include "gwp_asan/utilities.h"
-
-#include <assert.h>
-
#ifdef __BIONIC__
#include <stdlib.h>
extern "C" GWP_ASAN_WEAK void android_set_abort_message(const char *);
@@ -19,72 +14,14 @@ extern "C" GWP_ASAN_WEAK void android_set_abort_message(const char *);
#endif
namespace gwp_asan {
-
+void die(const char *Message) {
#ifdef __BIONIC__
-void Check(bool Condition, const char *Message) {
- if (Condition)
- return;
if (&android_set_abort_message != nullptr)
android_set_abort_message(Message);
abort();
-}
#else // __BIONIC__
-void Check(bool Condition, const char *Message) {
- if (Condition)
- return;
fprintf(stderr, "%s", Message);
__builtin_trap();
-}
-#endif // __BIONIC__
-
-// See `bionic/tests/malloc_test.cpp` in the Android source for documentation
-// regarding their alignment guarantees. We always round up to the closest
-// 8-byte window. As GWP-ASan's malloc(X) can always get exactly an X-sized
-// allocation, an allocation that rounds up to 16-bytes will always be given a
-// 16-byte aligned allocation.
-static size_t alignBionic(size_t RealAllocationSize) {
- if (RealAllocationSize % 8 == 0)
- return RealAllocationSize;
- return RealAllocationSize + 8 - (RealAllocationSize % 8);
-}
-
-static size_t alignPowerOfTwo(size_t RealAllocationSize) {
- if (RealAllocationSize <= 2)
- return RealAllocationSize;
- if (RealAllocationSize <= 4)
- return 4;
- if (RealAllocationSize <= 8)
- return 8;
- if (RealAllocationSize % 16 == 0)
- return RealAllocationSize;
- return RealAllocationSize + 16 - (RealAllocationSize % 16);
-}
-
-#ifdef __BIONIC__
-static constexpr AlignmentStrategy PlatformDefaultAlignment =
- AlignmentStrategy::BIONIC;
-#else // __BIONIC__
-static constexpr AlignmentStrategy PlatformDefaultAlignment =
- AlignmentStrategy::POWER_OF_TWO;
#endif // __BIONIC__
-
-size_t rightAlignedAllocationSize(size_t RealAllocationSize,
- AlignmentStrategy Align) {
- assert(RealAllocationSize > 0);
- if (Align == AlignmentStrategy::DEFAULT)
- Align = PlatformDefaultAlignment;
-
- switch (Align) {
- case AlignmentStrategy::BIONIC:
- return alignBionic(RealAllocationSize);
- case AlignmentStrategy::POWER_OF_TWO:
- return alignPowerOfTwo(RealAllocationSize);
- case AlignmentStrategy::PERFECT:
- return RealAllocationSize;
- case AlignmentStrategy::DEFAULT:
- __builtin_unreachable();
- }
- __builtin_unreachable();
}
-
} // namespace gwp_asan
diff --git a/contrib/llvm-project/compiler-rt/lib/gwp_asan/random.cpp b/contrib/llvm-project/compiler-rt/lib/gwp_asan/random.cpp
deleted file mode 100644
index 2180f9204084..000000000000
--- a/contrib/llvm-project/compiler-rt/lib/gwp_asan/random.cpp
+++ /dev/null
@@ -1,31 +0,0 @@
-//===-- random.cpp ----------------------------------------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#include "gwp_asan/random.h"
-#include "gwp_asan/common.h"
-
-#include <time.h>
-
-// Initialised to a magic constant so that an uninitialised GWP-ASan won't
-// regenerate its sample counter for as long as possible. The xorshift32()
-// algorithm used below results in getRandomUnsigned32(0xff82eb50) ==
-// 0xfffffea4.
-GWP_ASAN_TLS_INITIAL_EXEC uint32_t RandomState = 0xff82eb50;
-
-namespace gwp_asan {
-void initPRNG() {
- RandomState = time(nullptr) + getThreadID();
-}
-
-uint32_t getRandomUnsigned32() {
- RandomState ^= RandomState << 13;
- RandomState ^= RandomState >> 17;
- RandomState ^= RandomState << 5;
- return RandomState;
-}
-} // namespace gwp_asan
diff --git a/contrib/llvm-project/compiler-rt/lib/gwp_asan/random.h b/contrib/llvm-project/compiler-rt/lib/gwp_asan/random.h
deleted file mode 100644
index 953b98909e95..000000000000
--- a/contrib/llvm-project/compiler-rt/lib/gwp_asan/random.h
+++ /dev/null
@@ -1,23 +0,0 @@
-//===-- random.h ------------------------------------------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef GWP_ASAN_RANDOM_H_
-#define GWP_ASAN_RANDOM_H_
-
-#include <stdint.h>
-
-namespace gwp_asan {
-// Initialise the PRNG, using time and thread ID as the seed.
-void initPRNG();
-
-// xorshift (32-bit output), extremely fast PRNG that uses arithmetic operations
-// only. Seeded using walltime.
-uint32_t getRandomUnsigned32();
-} // namespace gwp_asan
-
-#endif // GWP_ASAN_RANDOM_H_
diff --git a/contrib/llvm-project/compiler-rt/lib/gwp_asan/utilities.cpp b/contrib/llvm-project/compiler-rt/lib/gwp_asan/utilities.cpp
new file mode 100644
index 000000000000..287630f89531
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/gwp_asan/utilities.cpp
@@ -0,0 +1,63 @@
+//===-- utilities.cpp -------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "gwp_asan/utilities.h"
+
+#include <assert.h>
+
+namespace gwp_asan {
+// See `bionic/tests/malloc_test.cpp` in the Android source for documentation
+// regarding their alignment guarantees. We always round up to the closest
+// 8-byte window. As GWP-ASan's malloc(X) can always get exactly an X-sized
+// allocation, an allocation that rounds up to 16-bytes will always be given a
+// 16-byte aligned allocation.
+static size_t alignBionic(size_t RealAllocationSize) {
+ if (RealAllocationSize % 8 == 0)
+ return RealAllocationSize;
+ return RealAllocationSize + 8 - (RealAllocationSize % 8);
+}
+
+static size_t alignPowerOfTwo(size_t RealAllocationSize) {
+ if (RealAllocationSize <= 2)
+ return RealAllocationSize;
+ if (RealAllocationSize <= 4)
+ return 4;
+ if (RealAllocationSize <= 8)
+ return 8;
+ if (RealAllocationSize % 16 == 0)
+ return RealAllocationSize;
+ return RealAllocationSize + 16 - (RealAllocationSize % 16);
+}
+
+#ifdef __BIONIC__
+static constexpr AlignmentStrategy PlatformDefaultAlignment =
+ AlignmentStrategy::BIONIC;
+#else // __BIONIC__
+static constexpr AlignmentStrategy PlatformDefaultAlignment =
+ AlignmentStrategy::POWER_OF_TWO;
+#endif // __BIONIC__
+
+size_t rightAlignedAllocationSize(size_t RealAllocationSize,
+ AlignmentStrategy Align) {
+ assert(RealAllocationSize > 0);
+ if (Align == AlignmentStrategy::DEFAULT)
+ Align = PlatformDefaultAlignment;
+
+ switch (Align) {
+ case AlignmentStrategy::BIONIC:
+ return alignBionic(RealAllocationSize);
+ case AlignmentStrategy::POWER_OF_TWO:
+ return alignPowerOfTwo(RealAllocationSize);
+ case AlignmentStrategy::PERFECT:
+ return RealAllocationSize;
+ case AlignmentStrategy::DEFAULT:
+ __builtin_unreachable();
+ }
+ __builtin_unreachable();
+}
+} // namespace gwp_asan
diff --git a/contrib/llvm-project/compiler-rt/lib/gwp_asan/utilities.h b/contrib/llvm-project/compiler-rt/lib/gwp_asan/utilities.h
index 71d525f9e14c..cee5672b491d 100644
--- a/contrib/llvm-project/compiler-rt/lib/gwp_asan/utilities.h
+++ b/contrib/llvm-project/compiler-rt/lib/gwp_asan/utilities.h
@@ -6,15 +6,23 @@
//
//===----------------------------------------------------------------------===//
+#ifndef GWP_ASAN_UTILITIES_H_
+#define GWP_ASAN_UTILITIES_H_
+
#include "gwp_asan/definitions.h"
#include <stddef.h>
-#include <stdint.h>
namespace gwp_asan {
-// Checks that `Condition` is true, otherwise fails in a platform-specific way
-// with `Message`.
-void Check(bool Condition, const char *Message);
+// Terminates in a platform-specific way with `Message`.
+void die(const char *Message);
+
+// Checks that `Condition` is true, otherwise dies with `Message`.
+GWP_ASAN_ALWAYS_INLINE void Check(bool Condition, const char *Message) {
+ if (Condition)
+ return;
+ die(Message);
+}
enum class AlignmentStrategy {
// Default => POWER_OF_TWO on most platforms, BIONIC for Android Bionic.
@@ -29,3 +37,5 @@ size_t rightAlignedAllocationSize(
size_t RealAllocationSize,
AlignmentStrategy Align = AlignmentStrategy::DEFAULT);
} // namespace gwp_asan
+
+#endif // GWP_ASAN_UTILITIES_H_
diff --git a/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan.cpp b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan.cpp
index d67a88d455ef..c5322110cb66 100644
--- a/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan.cpp
@@ -112,7 +112,7 @@ static void InitializeFlags() {
if (__hwasan_default_options)
parser.ParseString(__hwasan_default_options());
#if HWASAN_CONTAINS_UBSAN
- const char *ubsan_default_options = __ubsan::MaybeCallUbsanDefaultOptions();
+ const char *ubsan_default_options = __ubsan_default_options();
ubsan_parser.ParseString(ubsan_default_options);
#endif
@@ -286,8 +286,6 @@ void __hwasan_init() {
// initialized when InitInstrumentation() was called.
GetCurrentThread()->InitRandomState();
- MadviseShadow();
-
SetPrintfAndReportCallback(AppendToErrorMessageBuffer);
// This may call libc -> needs initialized shadow.
AndroidLogInit();
diff --git a/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan.h b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan.h
index 8cbd9e74e335..d4521efd089a 100644
--- a/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan.h
+++ b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan.h
@@ -21,10 +21,6 @@
#include "hwasan_flags.h"
#include "ubsan/ubsan_platform.h"
-#ifndef HWASAN_REPLACE_OPERATORS_NEW_AND_DELETE
-# define HWASAN_REPLACE_OPERATORS_NEW_AND_DELETE 1
-#endif
-
#ifndef HWASAN_CONTAINS_UBSAN
# define HWASAN_CONTAINS_UBSAN CAN_SANITIZE_UB
#endif
@@ -33,6 +29,10 @@
#define HWASAN_WITH_INTERCEPTORS 0
#endif
+#ifndef HWASAN_REPLACE_OPERATORS_NEW_AND_DELETE
+#define HWASAN_REPLACE_OPERATORS_NEW_AND_DELETE HWASAN_WITH_INTERCEPTORS
+#endif
+
typedef u8 tag_t;
// TBI (Top Byte Ignore) feature of AArch64: bits [63:56] are ignored in address
@@ -75,7 +75,6 @@ extern int hwasan_report_count;
bool InitShadow();
void InitPrctl();
void InitThreads();
-void MadviseShadow();
void InitializeInterceptors();
void HwasanAllocatorInit();
diff --git a/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_allocator.cpp b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_allocator.cpp
index 1d82db0e3944..0b6b7347892e 100644
--- a/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_allocator.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_allocator.cpp
@@ -42,7 +42,8 @@ enum RightAlignMode {
static ALIGNED(16) u8 tail_magic[kShadowAlignment - 1];
bool HwasanChunkView::IsAllocated() const {
- return metadata_ && metadata_->alloc_context_id && metadata_->requested_size;
+ return metadata_ && metadata_->alloc_context_id &&
+ metadata_->get_requested_size();
}
// Aligns the 'addr' right to the granule boundary.
@@ -54,14 +55,14 @@ static uptr AlignRight(uptr addr, uptr requested_size) {
uptr HwasanChunkView::Beg() const {
if (metadata_ && metadata_->right_aligned)
- return AlignRight(block_, metadata_->requested_size);
+ return AlignRight(block_, metadata_->get_requested_size());
return block_;
}
uptr HwasanChunkView::End() const {
return Beg() + UsedSize();
}
uptr HwasanChunkView::UsedSize() const {
- return metadata_->requested_size;
+ return metadata_->get_requested_size();
}
u32 HwasanChunkView::GetAllocStackId() const {
return metadata_->alloc_context_id;
@@ -129,7 +130,7 @@ static void *HwasanAllocate(StackTrace *stack, uptr orig_size, uptr alignment,
}
Metadata *meta =
reinterpret_cast<Metadata *>(allocator.GetMetaData(allocated));
- meta->requested_size = static_cast<u32>(orig_size);
+ meta->set_requested_size(orig_size);
meta->alloc_context_id = StackDepotPut(*stack);
meta->right_aligned = false;
if (zeroise) {
@@ -191,7 +192,7 @@ static void HwasanDeallocate(StackTrace *stack, void *tagged_ptr) {
RoundDownTo(reinterpret_cast<uptr>(untagged_ptr), kShadowAlignment));
Metadata *meta =
reinterpret_cast<Metadata *>(allocator.GetMetaData(aligned_ptr));
- uptr orig_size = meta->requested_size;
+ uptr orig_size = meta->get_requested_size();
u32 free_context_id = StackDepotPut(*stack);
u32 alloc_context_id = meta->alloc_context_id;
@@ -208,7 +209,7 @@ static void HwasanDeallocate(StackTrace *stack, void *tagged_ptr) {
orig_size, tail_magic);
}
- meta->requested_size = 0;
+ meta->set_requested_size(0);
meta->alloc_context_id = 0;
// This memory will not be reused by anyone else, so we are free to keep it
// poisoned.
@@ -245,8 +246,9 @@ static void *HwasanReallocate(StackTrace *stack, void *tagged_ptr_old,
void *untagged_ptr_old = UntagPtr(tagged_ptr_old);
Metadata *meta =
reinterpret_cast<Metadata *>(allocator.GetMetaData(untagged_ptr_old));
- internal_memcpy(UntagPtr(tagged_ptr_new), untagged_ptr_old,
- Min(new_size, static_cast<uptr>(meta->requested_size)));
+ internal_memcpy(
+ UntagPtr(tagged_ptr_new), untagged_ptr_old,
+ Min(new_size, static_cast<uptr>(meta->get_requested_size())));
HwasanDeallocate(stack, tagged_ptr_old);
}
return tagged_ptr_new;
@@ -282,7 +284,7 @@ static uptr AllocationSize(const void *tagged_ptr) {
} else {
if (beg != untagged_ptr) return 0;
}
- return b->requested_size;
+ return b->get_requested_size();
}
void *hwasan_malloc(uptr size, StackTrace *stack) {
diff --git a/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_allocator.h b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_allocator.h
index f62be2696021..43670a6a3fb7 100644
--- a/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_allocator.h
+++ b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_allocator.h
@@ -28,9 +28,17 @@
namespace __hwasan {
struct Metadata {
- u32 requested_size : 31; // sizes are < 2G.
- u32 right_aligned : 1;
+ u32 requested_size_low;
+ u32 requested_size_high : 31;
+ u32 right_aligned : 1;
u32 alloc_context_id;
+ u64 get_requested_size() {
+ return (static_cast<u64>(requested_size_high) << 32) + requested_size_low;
+ }
+ void set_requested_size(u64 size) {
+ requested_size_low = size & ((1ul << 32) - 1);
+ requested_size_high = size >> 32;
+ }
};
struct HwasanMapUnmapCallback {
@@ -43,7 +51,7 @@ struct HwasanMapUnmapCallback {
}
};
-static const uptr kMaxAllowedMallocSize = 2UL << 30; // 2G
+static const uptr kMaxAllowedMallocSize = 1UL << 40; // 1T
struct AP64 {
static const uptr kSpaceBeg = ~0ULL;
diff --git a/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_dynamic_shadow.cpp b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_dynamic_shadow.cpp
index a04751f44a31..12730b29bae3 100644
--- a/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_dynamic_shadow.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_dynamic_shadow.cpp
@@ -24,47 +24,6 @@
// The code in this file needs to run in an unrelocated binary. It should not
// access any external symbol, including its own non-hidden globals.
-namespace __hwasan {
-
-static void UnmapFromTo(uptr from, uptr to) {
- if (to == from)
- return;
- CHECK(to >= from);
- uptr res = internal_munmap(reinterpret_cast<void *>(from), to - from);
- if (UNLIKELY(internal_iserror(res))) {
- Report("ERROR: %s failed to unmap 0x%zx (%zd) bytes at address %p\n",
- SanitizerToolName, to - from, to - from, from);
- CHECK("unable to unmap" && 0);
- }
-}
-
-// Returns an address aligned to kShadowBaseAlignment, such that
-// 2**kShadowBaseAlingment on the left and shadow_size_bytes bytes on the right
-// of it are mapped no access.
-static uptr MapDynamicShadow(uptr shadow_size_bytes) {
- const uptr granularity = GetMmapGranularity();
- const uptr min_alignment = granularity << kShadowScale;
- const uptr alignment = 1ULL << kShadowBaseAlignment;
- CHECK_GE(alignment, min_alignment);
-
- const uptr left_padding = 1ULL << kShadowBaseAlignment;
- const uptr shadow_size =
- RoundUpTo(shadow_size_bytes, granularity);
- const uptr map_size = shadow_size + left_padding + alignment;
-
- const uptr map_start = (uptr)MmapNoAccess(map_size);
- CHECK_NE(map_start, ~(uptr)0);
-
- const uptr shadow_start = RoundUpTo(map_start + left_padding, alignment);
-
- UnmapFromTo(map_start, shadow_start - left_padding);
- UnmapFromTo(shadow_start + shadow_size, map_start + map_size);
-
- return shadow_start;
-}
-
-} // namespace __hwasan
-
#if SANITIZER_ANDROID
extern "C" {
@@ -82,7 +41,8 @@ static uptr PremapShadowSize() {
}
static uptr PremapShadow() {
- return MapDynamicShadow(PremapShadowSize());
+ return MapDynamicShadow(PremapShadowSize(), kShadowScale,
+ kShadowBaseAlignment, kHighMemEnd);
}
static bool IsPremapShadowAvailable() {
@@ -146,7 +106,8 @@ void InitShadowGOT() {
uptr FindDynamicShadowStart(uptr shadow_size_bytes) {
if (IsPremapShadowAvailable())
return FindPremappedShadowStart(shadow_size_bytes);
- return MapDynamicShadow(shadow_size_bytes);
+ return MapDynamicShadow(shadow_size_bytes, kShadowScale, kShadowBaseAlignment,
+ kHighMemEnd);
}
} // namespace __hwasan
@@ -156,7 +117,8 @@ namespace __hwasan {
void InitShadowGOT() {}
uptr FindDynamicShadowStart(uptr shadow_size_bytes) {
- return MapDynamicShadow(shadow_size_bytes);
+ return MapDynamicShadow(shadow_size_bytes, kShadowScale, kShadowBaseAlignment,
+ kHighMemEnd);
}
} // namespace __hwasan
diff --git a/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_interceptors_vfork.S b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_interceptors_vfork.S
index 13d0829c0865..23d565936d87 100644
--- a/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_interceptors_vfork.S
+++ b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_interceptors_vfork.S
@@ -4,6 +4,7 @@
#define COMMON_INTERCEPTOR_SPILL_AREA __hwasan_extra_spill_area
#define COMMON_INTERCEPTOR_HANDLE_VFORK __hwasan_handle_vfork
#include "sanitizer_common/sanitizer_common_interceptors_vfork_aarch64.inc.S"
+#include "sanitizer_common/sanitizer_common_interceptors_vfork_riscv64.inc.S"
#include "sanitizer_common/sanitizer_common_interceptors_vfork_x86_64.inc.S"
#endif
diff --git a/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_interface_internal.h b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_interface_internal.h
index aedda317497b..25c0f94fe51f 100644
--- a/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_interface_internal.h
+++ b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_interface_internal.h
@@ -222,6 +222,9 @@ SANITIZER_INTERFACE_ATTRIBUTE
void *__hwasan_memset(void *s, int c, uptr n);
SANITIZER_INTERFACE_ATTRIBUTE
void *__hwasan_memmove(void *dest, const void *src, uptr n);
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_set_error_report_callback(void (*callback)(const char *));
} // extern "C"
#endif // HWASAN_INTERFACE_INTERNAL_H
diff --git a/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_linux.cpp b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_linux.cpp
index f1e830ddf901..e99926d355cf 100644
--- a/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_linux.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_linux.cpp
@@ -57,56 +57,24 @@ THREADLOCAL uptr __hwasan_tls;
namespace __hwasan {
-static void ReserveShadowMemoryRange(uptr beg, uptr end, const char *name) {
- CHECK_EQ((beg % GetMmapGranularity()), 0);
- CHECK_EQ(((end + 1) % GetMmapGranularity()), 0);
- uptr size = end - beg + 1;
- DecreaseTotalMmap(size); // Don't count the shadow against mmap_limit_mb.
- if (!MmapFixedNoReserve(beg, size, name)) {
- Report(
- "ReserveShadowMemoryRange failed while trying to map 0x%zx bytes. "
- "Perhaps you're using ulimit -v\n",
- size);
- Abort();
- }
-}
+// With the zero shadow base we can not actually map pages starting from 0.
+// This constant is somewhat arbitrary.
+constexpr uptr kZeroBaseShadowStart = 0;
+constexpr uptr kZeroBaseMaxShadowStart = 1 << 18;
static void ProtectGap(uptr addr, uptr size) {
- if (!size)
- return;
- void *res = MmapFixedNoAccess(addr, size, "shadow gap");
- if (addr == (uptr)res)
- return;
- // A few pages at the start of the address space can not be protected.
- // But we really want to protect as much as possible, to prevent this memory
- // being returned as a result of a non-FIXED mmap().
- if (addr == 0) {
- uptr step = GetMmapGranularity();
- while (size > step) {
- addr += step;
- size -= step;
- void *res = MmapFixedNoAccess(addr, size, "shadow gap");
- if (addr == (uptr)res)
- return;
- }
- }
-
- Report(
- "ERROR: Failed to protect shadow gap [%p, %p]. "
- "HWASan cannot proceed correctly. ABORTING.\n", (void *)addr,
- (void *)(addr + size));
- DumpProcessMap();
- Die();
+ __sanitizer::ProtectGap(addr, size, kZeroBaseShadowStart,
+ kZeroBaseMaxShadowStart);
}
-static uptr kLowMemStart;
-static uptr kLowMemEnd;
-static uptr kLowShadowEnd;
-static uptr kLowShadowStart;
-static uptr kHighShadowStart;
-static uptr kHighShadowEnd;
-static uptr kHighMemStart;
-static uptr kHighMemEnd;
+uptr kLowMemStart;
+uptr kLowMemEnd;
+uptr kLowShadowEnd;
+uptr kLowShadowStart;
+uptr kHighShadowStart;
+uptr kHighShadowEnd;
+uptr kHighMemStart;
+uptr kHighMemEnd;
static void PrintRange(uptr start, uptr end, const char *name) {
Printf("|| [%p, %p] || %.*s ||\n", (void *)start, (void *)end, 10, name);
@@ -242,24 +210,12 @@ void InitThreads() {
uptr thread_space_end =
__hwasan_shadow_memory_dynamic_address - guard_page_size;
ReserveShadowMemoryRange(thread_space_start, thread_space_end - 1,
- "hwasan threads");
+ "hwasan threads", /*madvise_shadow*/ false);
ProtectGap(thread_space_end,
__hwasan_shadow_memory_dynamic_address - thread_space_end);
InitThreadList(thread_space_start, thread_space_end - thread_space_start);
}
-static void MadviseShadowRegion(uptr beg, uptr end) {
- uptr size = end - beg + 1;
- SetShadowRegionHugePageMode(beg, size);
- if (common_flags()->use_madv_dontdump)
- DontDumpShadowMemory(beg, size);
-}
-
-void MadviseShadow() {
- MadviseShadowRegion(kLowShadowStart, kLowShadowEnd);
- MadviseShadowRegion(kHighShadowStart, kHighShadowEnd);
-}
-
bool MemIsApp(uptr p) {
CHECK(GetTagFromPointer(p) == 0);
return p >= kHighMemStart || (p >= kLowMemStart && p <= kLowMemEnd);
diff --git a/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_malloc_bisect.h b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_malloc_bisect.h
index eaf124aab7dd..7d134e8c4b7f 100644
--- a/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_malloc_bisect.h
+++ b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_malloc_bisect.h
@@ -28,7 +28,7 @@ static u32 malloc_hash(StackTrace *stack, uptr orig_size) {
return H.get();
}
-static INLINE bool malloc_bisect(StackTrace *stack, uptr orig_size) {
+static inline bool malloc_bisect(StackTrace *stack, uptr orig_size) {
uptr left = flags()->malloc_bisect_left;
uptr right = flags()->malloc_bisect_right;
if (LIKELY(left == 0 && right == 0))
diff --git a/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_mapping.h b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_mapping.h
index a86ad7ca8036..c149687bdfa6 100644
--- a/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_mapping.h
+++ b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_mapping.h
@@ -39,6 +39,15 @@ constexpr uptr kShadowAlignment = 1ULL << kShadowScale;
namespace __hwasan {
+extern uptr kLowMemStart;
+extern uptr kLowMemEnd;
+extern uptr kLowShadowEnd;
+extern uptr kLowShadowStart;
+extern uptr kHighShadowStart;
+extern uptr kHighShadowEnd;
+extern uptr kHighMemStart;
+extern uptr kHighMemEnd;
+
inline uptr MemToShadow(uptr untagged_addr) {
return (untagged_addr >> kShadowScale) +
__hwasan_shadow_memory_dynamic_address;
diff --git a/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_new_delete.cpp b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_new_delete.cpp
index 191c17e56a74..8d01d3944f2b 100644
--- a/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_new_delete.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_new_delete.cpp
@@ -16,9 +16,34 @@
#include "sanitizer_common/sanitizer_allocator.h"
#include "sanitizer_common/sanitizer_allocator_report.h"
+#include <stddef.h>
+#include <stdlib.h>
+
#if HWASAN_REPLACE_OPERATORS_NEW_AND_DELETE
-#include <stddef.h>
+// TODO(alekseys): throw std::bad_alloc instead of dying on OOM.
+#define OPERATOR_NEW_BODY(nothrow) \
+ GET_MALLOC_STACK_TRACE; \
+ void *res = hwasan_malloc(size, &stack);\
+ if (!nothrow && UNLIKELY(!res)) ReportOutOfMemory(size, &stack);\
+ return res
+
+#define OPERATOR_DELETE_BODY \
+ GET_MALLOC_STACK_TRACE; \
+ if (ptr) hwasan_free(ptr, &stack)
+
+#elif defined(__ANDROID__)
+
+// We don't actually want to intercept operator new and delete on Android, but
+// since we previously released a runtime that intercepted these functions,
+// removing the interceptors would break ABI. Therefore we simply forward to
+// malloc and free.
+#define OPERATOR_NEW_BODY(nothrow) return malloc(size)
+#define OPERATOR_DELETE_BODY free(ptr)
+
+#endif
+
+#ifdef OPERATOR_NEW_BODY
using namespace __hwasan;
@@ -28,12 +53,6 @@ namespace std {
} // namespace std
-// TODO(alekseys): throw std::bad_alloc instead of dying on OOM.
-#define OPERATOR_NEW_BODY(nothrow) \
- GET_MALLOC_STACK_TRACE; \
- void *res = hwasan_malloc(size, &stack);\
- if (!nothrow && UNLIKELY(!res)) ReportOutOfMemory(size, &stack);\
- return res
INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
void *operator new(size_t size) { OPERATOR_NEW_BODY(false /*nothrow*/); }
@@ -48,10 +67,6 @@ void *operator new[](size_t size, std::nothrow_t const&) {
OPERATOR_NEW_BODY(true /*nothrow*/);
}
-#define OPERATOR_DELETE_BODY \
- GET_MALLOC_STACK_TRACE; \
- if (ptr) hwasan_free(ptr, &stack)
-
INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
void operator delete(void *ptr) NOEXCEPT { OPERATOR_DELETE_BODY; }
INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
@@ -63,4 +78,4 @@ void operator delete[](void *ptr, std::nothrow_t const&) {
OPERATOR_DELETE_BODY;
}
-#endif // HWASAN_REPLACE_OPERATORS_NEW_AND_DELETE
+#endif // OPERATOR_NEW_BODY
diff --git a/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_report.cpp b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_report.cpp
index 206aa601903e..4448d9243767 100644
--- a/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_report.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_report.cpp
@@ -43,12 +43,16 @@ class ScopedReport {
}
~ScopedReport() {
+ void (*report_cb)(const char *);
{
BlockingMutexLock lock(&error_message_lock_);
- if (fatal)
- SetAbortMessage(error_message_.data());
+ report_cb = error_report_callback_;
error_message_ptr_ = nullptr;
}
+ if (report_cb)
+ report_cb(error_message_.data());
+ if (fatal)
+ SetAbortMessage(error_message_.data());
if (common_flags()->print_module_map >= 2 ||
(fatal && common_flags()->print_module_map))
DumpProcessMap();
@@ -66,6 +70,12 @@ class ScopedReport {
// overwrite old trailing '\0', keep new trailing '\0' untouched.
internal_memcpy(&(*error_message_ptr_)[old_size - 1], msg, len);
}
+
+ static void SetErrorReportCallback(void (*callback)(const char *)) {
+ BlockingMutexLock lock(&error_message_lock_);
+ error_report_callback_ = callback;
+ }
+
private:
ScopedErrorReportLock error_report_lock_;
InternalMmapVector<char> error_message_;
@@ -73,10 +83,12 @@ class ScopedReport {
static InternalMmapVector<char> *error_message_ptr_;
static BlockingMutex error_message_lock_;
+ static void (*error_report_callback_)(const char *);
};
InternalMmapVector<char> *ScopedReport::error_message_ptr_;
BlockingMutex ScopedReport::error_message_lock_;
+void (*ScopedReport::error_report_callback_)(const char *);
// If there is an active ScopedReport, append to its error message.
void AppendToErrorMessageBuffer(const char *buffer) {
@@ -224,7 +236,7 @@ static void PrintStackAllocations(StackAllocationsRingBuffer *sa,
frame_desc.append(" record_addr:0x%zx record:0x%zx",
reinterpret_cast<uptr>(record_addr), record);
if (SymbolizedStack *frame = Symbolizer::GetOrInit()->SymbolizePC(pc)) {
- RenderFrame(&frame_desc, " %F %L\n", 0, frame->info,
+ RenderFrame(&frame_desc, " %F %L\n", 0, frame->info.address, &frame->info,
common_flags()->symbolize_vs_style,
common_flags()->strip_path_prefix);
frame->ClearAll();
@@ -254,7 +266,8 @@ static bool TagsEqual(tag_t tag, tag_t *tag_ptr) {
static uptr GetGlobalSizeFromDescriptor(uptr ptr) {
// Find the ELF object that this global resides in.
Dl_info info;
- dladdr(reinterpret_cast<void *>(ptr), &info);
+ if (dladdr(reinterpret_cast<void *>(ptr), &info) == 0)
+ return 0;
auto *ehdr = reinterpret_cast<const ElfW(Ehdr) *>(info.dli_fbase);
auto *phdr_begin = reinterpret_cast<const ElfW(Phdr) *>(
reinterpret_cast<const u8 *>(ehdr) + ehdr->e_phoff);
@@ -649,3 +662,7 @@ void ReportRegisters(uptr *frame, uptr pc) {
}
} // namespace __hwasan
+
+void __hwasan_set_error_report_callback(void (*callback)(const char *)) {
+ __hwasan::ScopedReport::SetErrorReportCallback(callback);
+}
diff --git a/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_thread.h b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_thread.h
index ebcdb791fb36..88958daf767c 100644
--- a/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_thread.h
+++ b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_thread.h
@@ -74,8 +74,6 @@ class Thread {
HeapAllocationsRingBuffer *heap_allocations_;
StackAllocationsRingBuffer *stack_allocations_;
- Thread *next_; // All live threads form a linked list.
-
u64 unique_id_; // counting from zero.
u32 tagging_disabled_; // if non-zero, malloc uses zero tag in this thread.
diff --git a/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_thread_list.h b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_thread_list.h
index 914b632d9776..e596bde36662 100644
--- a/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_thread_list.h
+++ b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_thread_list.h
@@ -66,40 +66,6 @@ static uptr RingBufferSize() {
return 0;
}
-struct ThreadListHead {
- Thread *list_;
-
- ThreadListHead() : list_(nullptr) {}
-
- void Push(Thread *t) {
- t->next_ = list_;
- list_ = t;
- }
-
- Thread *Pop() {
- Thread *t = list_;
- if (t)
- list_ = t->next_;
- return t;
- }
-
- void Remove(Thread *t) {
- Thread **cur = &list_;
- while (*cur != t) cur = &(*cur)->next_;
- CHECK(*cur && "thread not found");
- *cur = (*cur)->next_;
- }
-
- template <class CB>
- void ForEach(CB cb) {
- Thread *t = list_;
- while (t) {
- cb(t);
- t = t->next_;
- }
- }
-};
-
struct ThreadStats {
uptr n_live_threads;
uptr total_stack_size;
@@ -123,14 +89,15 @@ class HwasanThreadList {
Thread *t;
{
SpinMutexLock l(&list_mutex_);
- t = free_list_.Pop();
- if (t) {
+ if (!free_list_.empty()) {
+ t = free_list_.back();
+ free_list_.pop_back();
uptr start = (uptr)t - ring_buffer_size_;
internal_memset((void *)start, 0, ring_buffer_size_ + sizeof(Thread));
} else {
t = AllocThread();
}
- live_list_.Push(t);
+ live_list_.push_back(t);
}
t->Init((uptr)t - ring_buffer_size_, ring_buffer_size_);
AddThreadStats(t);
@@ -142,12 +109,24 @@ class HwasanThreadList {
ReleaseMemoryPagesToOS(start, start + thread_alloc_size_);
}
+ void RemoveThreadFromLiveList(Thread *t) {
+ for (Thread *&t2 : live_list_)
+ if (t2 == t) {
+ // To remove t2, copy the last element of the list in t2's position, and
+ // pop_back(). This works even if t2 is itself the last element.
+ t2 = live_list_.back();
+ live_list_.pop_back();
+ return;
+ }
+ CHECK(0 && "thread not found in live list");
+ }
+
void ReleaseThread(Thread *t) {
RemoveThreadStats(t);
t->Destroy();
SpinMutexLock l(&list_mutex_);
- live_list_.Remove(t);
- free_list_.Push(t);
+ RemoveThreadFromLiveList(t);
+ free_list_.push_back(t);
DontNeedThread(t);
}
@@ -166,7 +145,7 @@ class HwasanThreadList {
template <class CB>
void VisitAllLiveThreads(CB cb) {
SpinMutexLock l(&list_mutex_);
- live_list_.ForEach(cb);
+ for (Thread *t : live_list_) cb(t);
}
void AddThreadStats(Thread *t) {
@@ -201,8 +180,8 @@ class HwasanThreadList {
uptr ring_buffer_size_;
uptr thread_alloc_size_;
- ThreadListHead free_list_;
- ThreadListHead live_list_;
+ InternalMmapVector<Thread *> free_list_;
+ InternalMmapVector<Thread *> live_list_;
SpinMutex list_mutex_;
ThreadStats stats_;
diff --git a/contrib/llvm-project/compiler-rt/lib/interception/interception.h b/contrib/llvm-project/compiler-rt/lib/interception/interception.h
index d27a8ccf92a8..cb0b5284ed26 100644
--- a/contrib/llvm-project/compiler-rt/lib/interception/interception.h
+++ b/contrib/llvm-project/compiler-rt/lib/interception/interception.h
@@ -17,7 +17,7 @@
#include "sanitizer_common/sanitizer_internal_defs.h"
#if !SANITIZER_LINUX && !SANITIZER_FREEBSD && !SANITIZER_MAC && \
- !SANITIZER_NETBSD && !SANITIZER_OPENBSD && !SANITIZER_WINDOWS && \
+ !SANITIZER_NETBSD && !SANITIZER_WINDOWS && \
!SANITIZER_FUCHSIA && !SANITIZER_RTEMS && !SANITIZER_SOLARIS
# error "Interception doesn't work on this operating system."
#endif
@@ -281,7 +281,7 @@ typedef unsigned long uptr;
#define INCLUDED_FROM_INTERCEPTION_LIB
#if SANITIZER_LINUX || SANITIZER_FREEBSD || SANITIZER_NETBSD || \
- SANITIZER_OPENBSD || SANITIZER_SOLARIS
+ SANITIZER_SOLARIS
# include "interception_linux.h"
# define INTERCEPT_FUNCTION(func) INTERCEPT_FUNCTION_LINUX_OR_FREEBSD(func)
diff --git a/contrib/llvm-project/compiler-rt/lib/interception/interception_linux.cpp b/contrib/llvm-project/compiler-rt/lib/interception/interception_linux.cpp
index 950cd5126538..5111a87f0a6c 100644
--- a/contrib/llvm-project/compiler-rt/lib/interception/interception_linux.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/interception/interception_linux.cpp
@@ -14,7 +14,7 @@
#include "interception.h"
#if SANITIZER_LINUX || SANITIZER_FREEBSD || SANITIZER_NETBSD || \
- SANITIZER_OPENBSD || SANITIZER_SOLARIS
+ SANITIZER_SOLARIS
#include <dlfcn.h> // for dlsym() and dlvsym()
@@ -63,8 +63,8 @@ bool InterceptFunction(const char *name, uptr *ptr_to_real, uptr func,
return addr && (func == wrapper);
}
-// Android and Solaris do not have dlvsym
-#if !SANITIZER_ANDROID && !SANITIZER_SOLARIS && !SANITIZER_OPENBSD
+// dlvsym is a GNU extension supported by some other platforms.
+#if SANITIZER_GLIBC || SANITIZER_FREEBSD || SANITIZER_NETBSD
static void *GetFuncAddr(const char *name, const char *ver) {
return dlvsym(RTLD_NEXT, name, ver);
}
@@ -75,9 +75,9 @@ bool InterceptFunction(const char *name, const char *ver, uptr *ptr_to_real,
*ptr_to_real = (uptr)addr;
return addr && (func == wrapper);
}
-#endif // !SANITIZER_ANDROID
+#endif // SANITIZER_GLIBC || SANITIZER_FREEBSD || SANITIZER_NETBSD
} // namespace __interception
#endif // SANITIZER_LINUX || SANITIZER_FREEBSD || SANITIZER_NETBSD ||
- // SANITIZER_OPENBSD || SANITIZER_SOLARIS
+ // SANITIZER_SOLARIS
diff --git a/contrib/llvm-project/compiler-rt/lib/interception/interception_linux.h b/contrib/llvm-project/compiler-rt/lib/interception/interception_linux.h
index e578da0cf64e..a08f8cb98c40 100644
--- a/contrib/llvm-project/compiler-rt/lib/interception/interception_linux.h
+++ b/contrib/llvm-project/compiler-rt/lib/interception/interception_linux.h
@@ -12,7 +12,7 @@
//===----------------------------------------------------------------------===//
#if SANITIZER_LINUX || SANITIZER_FREEBSD || SANITIZER_NETBSD || \
- SANITIZER_OPENBSD || SANITIZER_SOLARIS
+ SANITIZER_SOLARIS
#if !defined(INCLUDED_FROM_INTERCEPTION_LIB)
# error "interception_linux.h should be included from interception library only"
@@ -35,8 +35,8 @@ bool InterceptFunction(const char *name, const char *ver, uptr *ptr_to_real,
(::__interception::uptr) & (func), \
(::__interception::uptr) & WRAP(func))
-// Android, Solaris and OpenBSD do not have dlvsym
-#if !SANITIZER_ANDROID && !SANITIZER_SOLARIS && !SANITIZER_OPENBSD
+// dlvsym is a GNU extension supported by some other platforms.
+#if SANITIZER_GLIBC || SANITIZER_FREEBSD || SANITIZER_NETBSD
#define INTERCEPT_FUNCTION_VER_LINUX_OR_FREEBSD(func, symver) \
::__interception::InterceptFunction( \
#func, symver, \
@@ -46,8 +46,8 @@ bool InterceptFunction(const char *name, const char *ver, uptr *ptr_to_real,
#else
#define INTERCEPT_FUNCTION_VER_LINUX_OR_FREEBSD(func, symver) \
INTERCEPT_FUNCTION_LINUX_OR_FREEBSD(func)
-#endif // !SANITIZER_ANDROID && !SANITIZER_SOLARIS
+#endif // SANITIZER_GLIBC || SANITIZER_FREEBSD || SANITIZER_NETBSD
#endif // INTERCEPTION_LINUX_H
#endif // SANITIZER_LINUX || SANITIZER_FREEBSD || SANITIZER_NETBSD ||
- // SANITIZER_OPENBSD || SANITIZER_SOLARIS
+ // SANITIZER_SOLARIS
diff --git a/contrib/llvm-project/compiler-rt/lib/interception/interception_win.cpp b/contrib/llvm-project/compiler-rt/lib/interception/interception_win.cpp
index 1a1c327e6124..98bc756ae53a 100644
--- a/contrib/llvm-project/compiler-rt/lib/interception/interception_win.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/interception/interception_win.cpp
@@ -136,7 +136,7 @@ namespace __interception {
static const int kAddressLength = FIRST_32_SECOND_64(4, 8);
static const int kJumpInstructionLength = 5;
static const int kShortJumpInstructionLength = 2;
-static const int kIndirectJumpInstructionLength = 6;
+UNUSED static const int kIndirectJumpInstructionLength = 6;
static const int kBranchLength =
FIRST_32_SECOND_64(kJumpInstructionLength, kIndirectJumpInstructionLength);
static const int kDirectBranchLength = kBranchLength + kAddressLength;
@@ -165,7 +165,7 @@ static uptr GetMmapGranularity() {
return si.dwAllocationGranularity;
}
-static uptr RoundUpTo(uptr size, uptr boundary) {
+UNUSED static uptr RoundUpTo(uptr size, uptr boundary) {
return (size + boundary - 1) & ~(boundary - 1);
}
@@ -309,7 +309,7 @@ struct TrampolineMemoryRegion {
uptr max_size;
};
-static const uptr kTrampolineScanLimitRange = 1 << 31; // 2 gig
+UNUSED static const uptr kTrampolineScanLimitRange = 1 << 31; // 2 gig
static const int kMaxTrampolineRegion = 1024;
static TrampolineMemoryRegion TrampolineRegions[kMaxTrampolineRegion];
diff --git a/contrib/llvm-project/compiler-rt/lib/lsan/lsan.cpp b/contrib/llvm-project/compiler-rt/lib/lsan/lsan.cpp
index 80a6e2fa7016..2c0a3bf0787c 100644
--- a/contrib/llvm-project/compiler-rt/lib/lsan/lsan.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/lsan/lsan.cpp
@@ -73,11 +73,11 @@ static void InitializeFlags() {
RegisterCommonFlags(&parser);
// Override from user-specified string.
- const char *lsan_default_options = MaybeCallLsanDefaultOptions();
+ const char *lsan_default_options = __lsan_default_options();
parser.ParseString(lsan_default_options);
parser.ParseStringFromEnv("LSAN_OPTIONS");
- SetVerbosity(common_flags()->verbosity);
+ InitializeCommonFlags();
if (Verbosity()) ReportUnrecognizedFlags();
diff --git a/contrib/llvm-project/compiler-rt/lib/lsan/lsan_allocator.cpp b/contrib/llvm-project/compiler-rt/lib/lsan/lsan_allocator.cpp
index d86c3921395c..70422957e6f3 100644
--- a/contrib/llvm-project/compiler-rt/lib/lsan/lsan_allocator.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/lsan/lsan_allocator.cpp
@@ -309,6 +309,16 @@ IgnoreObjectResult IgnoreObjectLocked(const void *p) {
return kIgnoreObjectInvalid;
}
}
+
+void GetAdditionalThreadContextPtrs(ThreadContextBase *tctx, void *ptrs) {
+ // This function can be used to treat memory reachable from `tctx` as live.
+ // This is useful for threads that have been created but not yet started.
+
+ // This is currently a no-op because the LSan `pthread_create()` interceptor
+ // blocks until the child thread starts which keeps the thread's `arg` pointer
+ // live.
+}
+
} // namespace __lsan
using namespace __lsan;
diff --git a/contrib/llvm-project/compiler-rt/lib/lsan/lsan_common.cpp b/contrib/llvm-project/compiler-rt/lib/lsan/lsan_common.cpp
index 67f85f2f31de..d5b4132b24d5 100644
--- a/contrib/llvm-project/compiler-rt/lib/lsan/lsan_common.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/lsan/lsan_common.cpp
@@ -25,8 +25,6 @@
#include "sanitizer_common/sanitizer_thread_registry.h"
#include "sanitizer_common/sanitizer_tls_get_addr.h"
-extern "C" const char *__lsan_current_stage = "unknown";
-
#if CAN_SANITIZE_LEAKS
namespace __lsan {
@@ -67,35 +65,67 @@ void RegisterLsanFlags(FlagParser *parser, Flags *f) {
if (flags()->log_threads) Report(__VA_ARGS__); \
} while (0)
-ALIGNED(64) static char suppression_placeholder[sizeof(SuppressionContext)];
-static SuppressionContext *suppression_ctx = nullptr;
+class LeakSuppressionContext {
+ bool parsed = false;
+ SuppressionContext context;
+ bool suppressed_stacks_sorted = true;
+ InternalMmapVector<u32> suppressed_stacks;
+
+ Suppression *GetSuppressionForAddr(uptr addr);
+ void LazyInit();
+
+ public:
+ LeakSuppressionContext(const char *supprression_types[],
+ int suppression_types_num)
+ : context(supprression_types, suppression_types_num) {}
+
+ Suppression *GetSuppressionForStack(u32 stack_trace_id);
+
+ const InternalMmapVector<u32> &GetSortedSuppressedStacks() {
+ if (!suppressed_stacks_sorted) {
+ suppressed_stacks_sorted = true;
+ SortAndDedup(suppressed_stacks);
+ }
+ return suppressed_stacks;
+ }
+ void PrintMatchedSuppressions();
+};
+
+ALIGNED(64) static char suppression_placeholder[sizeof(LeakSuppressionContext)];
+static LeakSuppressionContext *suppression_ctx = nullptr;
static const char kSuppressionLeak[] = "leak";
static const char *kSuppressionTypes[] = { kSuppressionLeak };
static const char kStdSuppressions[] =
#if SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT
- // For more details refer to the SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT
- // definition.
- "leak:*pthread_exit*\n"
+ // For more details refer to the SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT
+ // definition.
+ "leak:*pthread_exit*\n"
#endif // SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT
#if SANITIZER_MAC
- // For Darwin and os_log/os_trace: https://reviews.llvm.org/D35173
- "leak:*_os_trace*\n"
+ // For Darwin and os_log/os_trace: https://reviews.llvm.org/D35173
+ "leak:*_os_trace*\n"
#endif
- // TLS leak in some glibc versions, described in
- // https://sourceware.org/bugzilla/show_bug.cgi?id=12650.
- "leak:*tls_get_addr*\n";
+ // TLS leak in some glibc versions, described in
+ // https://sourceware.org/bugzilla/show_bug.cgi?id=12650.
+ "leak:*tls_get_addr*\n";
void InitializeSuppressions() {
CHECK_EQ(nullptr, suppression_ctx);
suppression_ctx = new (suppression_placeholder)
- SuppressionContext(kSuppressionTypes, ARRAY_SIZE(kSuppressionTypes));
- suppression_ctx->ParseFromFile(flags()->suppressions);
- if (&__lsan_default_suppressions)
- suppression_ctx->Parse(__lsan_default_suppressions());
- suppression_ctx->Parse(kStdSuppressions);
+ LeakSuppressionContext(kSuppressionTypes, ARRAY_SIZE(kSuppressionTypes));
}
-static SuppressionContext *GetSuppressionContext() {
+void LeakSuppressionContext::LazyInit() {
+ if (!parsed) {
+ parsed = true;
+ context.ParseFromFile(flags()->suppressions);
+ if (&__lsan_default_suppressions)
+ context.Parse(__lsan_default_suppressions());
+ context.Parse(kStdSuppressions);
+ }
+}
+
+static LeakSuppressionContext *GetSuppressionContext() {
CHECK(suppression_ctx);
return suppression_ctx;
}
@@ -110,10 +140,6 @@ void InitializeRootRegions() {
root_regions = new (placeholder) InternalMmapVector<RootRegion>();
}
-const char *MaybeCallLsanDefaultOptions() {
- return (&__lsan_default_options) ? __lsan_default_options() : "";
-}
-
void InitCommonLsan() {
InitializeRootRegions();
if (common_flags()->detect_leaks) {
@@ -221,13 +247,37 @@ static void ProcessThreads(SuspendedThreadsList const &, Frontier *) {}
#else
+#if SANITIZER_ANDROID
+// FIXME: Move this out into *libcdep.cpp
+extern "C" SANITIZER_WEAK_ATTRIBUTE void __libc_iterate_dynamic_tls(
+ pid_t, void (*cb)(void *, void *, uptr, void *), void *);
+#endif
+
+static void ProcessThreadRegistry(Frontier *frontier) {
+ InternalMmapVector<uptr> ptrs;
+ GetThreadRegistryLocked()->RunCallbackForEachThreadLocked(
+ GetAdditionalThreadContextPtrs, &ptrs);
+
+ for (uptr i = 0; i < ptrs.size(); ++i) {
+ void *ptr = reinterpret_cast<void *>(ptrs[i]);
+ uptr chunk = PointsIntoChunk(ptr);
+ if (!chunk)
+ continue;
+ LsanMetadata m(chunk);
+ if (!m.allocated())
+ continue;
+
+ // Mark as reachable and add to frontier.
+ LOG_POINTERS("Treating pointer %p from ThreadContext as reachable\n", ptr);
+ m.set_tag(kReachable);
+ frontier->push_back(chunk);
+ }
+}
+
// Scans thread data (stacks and TLS) for heap pointers.
static void ProcessThreads(SuspendedThreadsList const &suspended_threads,
Frontier *frontier) {
- InternalMmapVector<uptr> registers(suspended_threads.RegisterCount());
- uptr registers_begin = reinterpret_cast<uptr>(registers.data());
- uptr registers_end =
- reinterpret_cast<uptr>(registers.data() + registers.size());
+ InternalMmapVector<uptr> registers;
for (uptr i = 0; i < suspended_threads.ThreadCount(); i++) {
tid_t os_id = static_cast<tid_t>(suspended_threads.GetThreadID(i));
LOG_THREADS("Processing thread %d.\n", os_id);
@@ -244,7 +294,7 @@ static void ProcessThreads(SuspendedThreadsList const &suspended_threads,
}
uptr sp;
PtraceRegistersStatus have_registers =
- suspended_threads.GetRegistersAndSP(i, registers.data(), &sp);
+ suspended_threads.GetRegistersAndSP(i, &registers, &sp);
if (have_registers != REGISTERS_AVAILABLE) {
Report("Unable to get registers from thread %d.\n", os_id);
// If unable to get SP, consider the entire stack to be reachable unless
@@ -253,9 +303,13 @@ static void ProcessThreads(SuspendedThreadsList const &suspended_threads,
sp = stack_begin;
}
- if (flags()->use_registers && have_registers)
+ if (flags()->use_registers && have_registers) {
+ uptr registers_begin = reinterpret_cast<uptr>(registers.data());
+ uptr registers_end =
+ reinterpret_cast<uptr>(registers.data() + registers.size());
ScanRangeForPointers(registers_begin, registers_end, frontier,
"REGISTERS", kReachable);
+ }
if (flags()->use_stacks) {
LOG_THREADS("Stack at %p-%p (SP = %p).\n", stack_begin, stack_end, sp);
@@ -299,23 +353,41 @@ static void ProcessThreads(SuspendedThreadsList const &suspended_threads,
kReachable);
}
}
+#if SANITIZER_ANDROID
+ auto *cb = +[](void *dtls_begin, void *dtls_end, uptr /*dso_idd*/,
+ void *arg) -> void {
+ ScanRangeForPointers(reinterpret_cast<uptr>(dtls_begin),
+ reinterpret_cast<uptr>(dtls_end),
+ reinterpret_cast<Frontier *>(arg), "DTLS",
+ kReachable);
+ };
+
+ // FIXME: There might be a race-condition here (and in Bionic) if the
+ // thread is suspended in the middle of updating its DTLS. IOWs, we
+ // could scan already freed memory. (probably fine for now)
+ __libc_iterate_dynamic_tls(os_id, cb, frontier);
+#else
if (dtls && !DTLSInDestruction(dtls)) {
- for (uptr j = 0; j < dtls->dtv_size; ++j) {
- uptr dtls_beg = dtls->dtv[j].beg;
- uptr dtls_end = dtls_beg + dtls->dtv[j].size;
+ ForEachDVT(dtls, [&](const DTLS::DTV &dtv, int id) {
+ uptr dtls_beg = dtv.beg;
+ uptr dtls_end = dtls_beg + dtv.size;
if (dtls_beg < dtls_end) {
- LOG_THREADS("DTLS %zu at %p-%p.\n", j, dtls_beg, dtls_end);
+ LOG_THREADS("DTLS %zu at %p-%p.\n", id, dtls_beg, dtls_end);
ScanRangeForPointers(dtls_beg, dtls_end, frontier, "DTLS",
kReachable);
}
- }
+ });
} else {
// We are handling a thread with DTLS under destruction. Log about
// this and continue.
LOG_THREADS("Thread %d has DTLS under destruction.\n", os_id);
}
+#endif
}
}
+
+ // Add pointers reachable from ThreadContexts
+ ProcessThreadRegistry(frontier);
}
#endif // SANITIZER_FUCHSIA
@@ -366,7 +438,6 @@ static void FloodFillTag(Frontier *frontier, ChunkTag tag) {
// ForEachChunk callback. If the chunk is marked as leaked, marks all chunks
// which are reachable from it as indirectly leaked.
static void MarkIndirectlyLeakedCb(uptr chunk, void *arg) {
- __lsan_current_stage = "MarkIndirectlyLeakedCb";
chunk = GetUserBegin(chunk);
LsanMetadata m(chunk);
if (m.allocated() && m.tag() != kReachable) {
@@ -375,11 +446,28 @@ static void MarkIndirectlyLeakedCb(uptr chunk, void *arg) {
}
}
+static void IgnoredSuppressedCb(uptr chunk, void *arg) {
+ CHECK(arg);
+ chunk = GetUserBegin(chunk);
+ LsanMetadata m(chunk);
+ if (!m.allocated() || m.tag() == kIgnored)
+ return;
+
+ const InternalMmapVector<u32> &suppressed =
+ *static_cast<const InternalMmapVector<u32> *>(arg);
+ uptr idx = InternalLowerBound(suppressed, m.stack_trace_id());
+ if (idx >= suppressed.size() || m.stack_trace_id() != suppressed[idx])
+ return;
+
+ LOG_POINTERS("Suppressed: chunk %p-%p of size %zu.\n", chunk,
+ chunk + m.requested_size(), m.requested_size());
+ m.set_tag(kIgnored);
+}
+
// ForEachChunk callback. If chunk is marked as ignored, adds its address to
// frontier.
static void CollectIgnoredCb(uptr chunk, void *arg) {
CHECK(arg);
- __lsan_current_stage = "CollectIgnoredCb";
chunk = GetUserBegin(chunk);
LsanMetadata m(chunk);
if (m.allocated() && m.tag() == kIgnored) {
@@ -409,7 +497,6 @@ struct InvalidPCParam {
static void MarkInvalidPCCb(uptr chunk, void *arg) {
CHECK(arg);
InvalidPCParam *param = reinterpret_cast<InvalidPCParam *>(arg);
- __lsan_current_stage = "MarkInvalidPCCb";
chunk = GetUserBegin(chunk);
LsanMetadata m(chunk);
if (m.allocated() && m.tag() != kReachable && m.tag() != kIgnored) {
@@ -460,6 +547,12 @@ void ProcessPC(Frontier *frontier) {
// Sets the appropriate tag on each chunk.
static void ClassifyAllChunks(SuspendedThreadsList const &suspended_threads,
Frontier *frontier) {
+ const InternalMmapVector<u32> &suppressed_stacks =
+ GetSuppressionContext()->GetSortedSuppressedStacks();
+ if (!suppressed_stacks.empty()) {
+ ForEachChunk(IgnoredSuppressedCb,
+ const_cast<InternalMmapVector<u32> *>(&suppressed_stacks));
+ }
ForEachChunk(CollectIgnoredCb, frontier);
ProcessGlobalRegions(frontier);
ProcessThreads(suspended_threads, frontier);
@@ -485,7 +578,6 @@ static void ClassifyAllChunks(SuspendedThreadsList const &suspended_threads,
// ForEachChunk callback. Resets the tags to pre-leak-check state.
static void ResetTagsCb(uptr chunk, void *arg) {
(void)arg;
- __lsan_current_stage = "ResetTagsCb";
chunk = GetUserBegin(chunk);
LsanMetadata m(chunk);
if (m.allocated() && m.tag() != kIgnored)
@@ -502,7 +594,6 @@ static void PrintStackTraceById(u32 stack_trace_id) {
static void CollectLeaksCb(uptr chunk, void *arg) {
CHECK(arg);
LeakReport *leak_report = reinterpret_cast<LeakReport *>(arg);
- __lsan_current_stage = "CollectLeaksCb";
chunk = GetUserBegin(chunk);
LsanMetadata m(chunk);
if (!m.allocated()) return;
@@ -521,18 +612,20 @@ static void CollectLeaksCb(uptr chunk, void *arg) {
}
}
-static void PrintMatchedSuppressions() {
+void LeakSuppressionContext::PrintMatchedSuppressions() {
InternalMmapVector<Suppression *> matched;
- GetSuppressionContext()->GetMatched(&matched);
+ context.GetMatched(&matched);
if (!matched.size())
return;
const char *line = "-----------------------------------------------------";
Printf("%s\n", line);
Printf("Suppressions used:\n");
Printf(" count bytes template\n");
- for (uptr i = 0; i < matched.size(); i++)
- Printf("%7zu %10zu %s\n", static_cast<uptr>(atomic_load_relaxed(
- &matched[i]->hit_count)), matched[i]->weight, matched[i]->templ);
+ for (uptr i = 0; i < matched.size(); i++) {
+ Printf("%7zu %10zu %s\n",
+ static_cast<uptr>(atomic_load_relaxed(&matched[i]->hit_count)),
+ matched[i]->weight, matched[i]->templ);
+ }
Printf("%s\n\n", line);
}
@@ -540,8 +633,7 @@ static void ReportIfNotSuspended(ThreadContextBase *tctx, void *arg) {
const InternalMmapVector<tid_t> &suspended_threads =
*(const InternalMmapVector<tid_t> *)arg;
if (tctx->status == ThreadStatusRunning) {
- uptr i = InternalLowerBound(suspended_threads, 0, suspended_threads.size(),
- tctx->os_id, CompareLess<int>());
+ uptr i = InternalLowerBound(suspended_threads, tctx->os_id);
if (i >= suspended_threads.size() || suspended_threads[i] != tctx->os_id)
Report("Running thread %d was not suspended. False leaks are possible.\n",
tctx->os_id);
@@ -584,43 +676,68 @@ static void CheckForLeaksCallback(const SuspendedThreadsList &suspended_threads,
param->success = true;
}
-static bool CheckForLeaks() {
- if (&__lsan_is_turned_off && __lsan_is_turned_off())
- return false;
- EnsureMainThreadIDIsCorrect();
- CheckForLeaksParam param;
- LockStuffAndStopTheWorld(CheckForLeaksCallback, &param);
-
- if (!param.success) {
- Report("LeakSanitizer has encountered a fatal error.\n");
- Report(
- "HINT: For debugging, try setting environment variable "
- "LSAN_OPTIONS=verbosity=1:log_threads=1\n");
- Report(
- "HINT: LeakSanitizer does not work under ptrace (strace, gdb, etc)\n");
- Die();
- }
- param.leak_report.ApplySuppressions();
- uptr unsuppressed_count = param.leak_report.UnsuppressedLeakCount();
- if (unsuppressed_count > 0) {
+static bool PrintResults(LeakReport &report) {
+ uptr unsuppressed_count = report.UnsuppressedLeakCount();
+ if (unsuppressed_count) {
Decorator d;
- Printf("\n"
- "================================================================="
- "\n");
+ Printf(
+ "\n"
+ "================================================================="
+ "\n");
Printf("%s", d.Error());
Report("ERROR: LeakSanitizer: detected memory leaks\n");
Printf("%s", d.Default());
- param.leak_report.ReportTopLeaks(flags()->max_leaks);
+ report.ReportTopLeaks(flags()->max_leaks);
}
if (common_flags()->print_suppressions)
- PrintMatchedSuppressions();
+ GetSuppressionContext()->PrintMatchedSuppressions();
if (unsuppressed_count > 0) {
- param.leak_report.PrintSummary();
+ report.PrintSummary();
return true;
}
return false;
}
+static bool CheckForLeaks() {
+ if (&__lsan_is_turned_off && __lsan_is_turned_off())
+ return false;
+ // Inside LockStuffAndStopTheWorld we can't run symbolizer, so we can't match
+ // suppressions. However if a stack id was previously suppressed, it should be
+ // suppressed in future checks as well.
+ for (int i = 0;; ++i) {
+ EnsureMainThreadIDIsCorrect();
+ CheckForLeaksParam param;
+ LockStuffAndStopTheWorld(CheckForLeaksCallback, &param);
+ if (!param.success) {
+ Report("LeakSanitizer has encountered a fatal error.\n");
+ Report(
+ "HINT: For debugging, try setting environment variable "
+ "LSAN_OPTIONS=verbosity=1:log_threads=1\n");
+ Report(
+ "HINT: LeakSanitizer does not work under ptrace (strace, gdb, "
+ "etc)\n");
+ Die();
+ }
+ // No new suppressions stacks, so rerun will not help and we can report.
+ if (!param.leak_report.ApplySuppressions())
+ return PrintResults(param.leak_report);
+
+ // No indirect leaks to report, so we are done here.
+ if (!param.leak_report.IndirectUnsuppressedLeakCount())
+ return PrintResults(param.leak_report);
+
+ if (i >= 8) {
+ Report("WARNING: LeakSanitizer gave up on indirect leaks suppression.\n");
+ return PrintResults(param.leak_report);
+ }
+
+ // We found a new previously unseen suppressed call stack. Rerun to make
+ // sure it does not hold indirect leaks.
+ VReport(1, "Rerun with %zu suppressed stacks.",
+ GetSuppressionContext()->GetSortedSuppressedStacks().size());
+ }
+}
+
static bool has_reported_leaks = false;
bool HasReportedLeaks() { return has_reported_leaks; }
@@ -641,21 +758,20 @@ static int DoRecoverableLeakCheck() {
void DoRecoverableLeakCheckVoid() { DoRecoverableLeakCheck(); }
-static Suppression *GetSuppressionForAddr(uptr addr) {
+Suppression *LeakSuppressionContext::GetSuppressionForAddr(uptr addr) {
Suppression *s = nullptr;
// Suppress by module name.
- SuppressionContext *suppressions = GetSuppressionContext();
if (const char *module_name =
Symbolizer::GetOrInit()->GetModuleNameForPc(addr))
- if (suppressions->Match(module_name, kSuppressionLeak, &s))
+ if (context.Match(module_name, kSuppressionLeak, &s))
return s;
// Suppress by file or function name.
SymbolizedStack *frames = Symbolizer::GetOrInit()->SymbolizePC(addr);
for (SymbolizedStack *cur = frames; cur; cur = cur->next) {
- if (suppressions->Match(cur->info.function, kSuppressionLeak, &s) ||
- suppressions->Match(cur->info.file, kSuppressionLeak, &s)) {
+ if (context.Match(cur->info.function, kSuppressionLeak, &s) ||
+ context.Match(cur->info.file, kSuppressionLeak, &s)) {
break;
}
}
@@ -663,12 +779,18 @@ static Suppression *GetSuppressionForAddr(uptr addr) {
return s;
}
-static Suppression *GetSuppressionForStack(u32 stack_trace_id) {
+Suppression *LeakSuppressionContext::GetSuppressionForStack(
+ u32 stack_trace_id) {
+ LazyInit();
StackTrace stack = StackDepotGet(stack_trace_id);
for (uptr i = 0; i < stack.size; i++) {
Suppression *s = GetSuppressionForAddr(
StackTrace::GetPreviousInstructionPc(stack.trace[i]));
- if (s) return s;
+ if (s) {
+ suppressed_stacks_sorted = false;
+ suppressed_stacks.push_back(stack_trace_id);
+ return s;
+ }
}
return nullptr;
}
@@ -779,16 +901,21 @@ void LeakReport::PrintSummary() {
ReportErrorSummary(summary.data());
}
-void LeakReport::ApplySuppressions() {
+uptr LeakReport::ApplySuppressions() {
+ LeakSuppressionContext *suppressions = GetSuppressionContext();
+ uptr new_suppressions = false;
for (uptr i = 0; i < leaks_.size(); i++) {
- Suppression *s = GetSuppressionForStack(leaks_[i].stack_trace_id);
+ Suppression *s =
+ suppressions->GetSuppressionForStack(leaks_[i].stack_trace_id);
if (s) {
s->weight += leaks_[i].total_size;
atomic_store_relaxed(&s->hit_count, atomic_load_relaxed(&s->hit_count) +
leaks_[i].hit_count);
leaks_[i].is_suppressed = true;
+ ++new_suppressions;
}
}
+ return new_suppressions;
}
uptr LeakReport::UnsuppressedLeakCount() {
@@ -798,6 +925,14 @@ uptr LeakReport::UnsuppressedLeakCount() {
return result;
}
+uptr LeakReport::IndirectUnsuppressedLeakCount() {
+ uptr result = 0;
+ for (uptr i = 0; i < leaks_.size(); i++)
+ if (!leaks_[i].is_suppressed && !leaks_[i].is_directly_leaked)
+ result++;
+ return result;
+}
+
} // namespace __lsan
#else // CAN_SANITIZE_LEAKS
namespace __lsan {
@@ -900,12 +1035,11 @@ int __lsan_do_recoverable_leak_check() {
return 0;
}
-#if !SANITIZER_SUPPORTS_WEAK_HOOKS
-SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
-const char * __lsan_default_options() {
+SANITIZER_INTERFACE_WEAK_DEF(const char *, __lsan_default_options, void) {
return "";
}
+#if !SANITIZER_SUPPORTS_WEAK_HOOKS
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
int __lsan_is_turned_off() {
return 0;
diff --git a/contrib/llvm-project/compiler-rt/lib/lsan/lsan_common.h b/contrib/llvm-project/compiler-rt/lib/lsan/lsan_common.h
index 3434beede828..b0ae6f020b63 100644
--- a/contrib/llvm-project/compiler-rt/lib/lsan/lsan_common.h
+++ b/contrib/llvm-project/compiler-rt/lib/lsan/lsan_common.h
@@ -29,16 +29,17 @@
// To enable LeakSanitizer on a new architecture, one needs to implement the
// internal_clone function as well as (probably) adjust the TLS machinery for
// the new architecture inside the sanitizer library.
-#if (SANITIZER_LINUX && !SANITIZER_ANDROID || SANITIZER_MAC) && \
- (SANITIZER_WORDSIZE == 64) && \
- (defined(__x86_64__) || defined(__mips64) || defined(__aarch64__) || \
+// Exclude leak-detection on arm32 for Android because `__aeabi_read_tp`
+// is missing. This caused a link error.
+#if SANITIZER_ANDROID && (__ANDROID_API__ < 28 || defined(__arm__))
+#define CAN_SANITIZE_LEAKS 0
+#elif (SANITIZER_LINUX || SANITIZER_MAC) && (SANITIZER_WORDSIZE == 64) && \
+ (defined(__x86_64__) || defined(__mips64) || defined(__aarch64__) || \
defined(__powerpc64__) || defined(__s390x__))
#define CAN_SANITIZE_LEAKS 1
-#elif defined(__i386__) && \
- (SANITIZER_LINUX && !SANITIZER_ANDROID || SANITIZER_MAC)
+#elif defined(__i386__) && (SANITIZER_LINUX || SANITIZER_MAC)
#define CAN_SANITIZE_LEAKS 1
-#elif defined(__arm__) && \
- SANITIZER_LINUX && !SANITIZER_ANDROID
+#elif defined(__arm__) && SANITIZER_LINUX
#define CAN_SANITIZE_LEAKS 1
#elif SANITIZER_NETBSD || SANITIZER_FUCHSIA
#define CAN_SANITIZE_LEAKS 1
@@ -49,6 +50,7 @@
namespace __sanitizer {
class FlagParser;
class ThreadRegistry;
+class ThreadContextBase;
struct DTLS;
}
@@ -102,8 +104,9 @@ class LeakReport {
ChunkTag tag);
void ReportTopLeaks(uptr max_leaks);
void PrintSummary();
- void ApplySuppressions();
+ uptr ApplySuppressions();
uptr UnsuppressedLeakCount();
+ uptr IndirectUnsuppressedLeakCount();
private:
void PrintReportForLeak(uptr index);
@@ -140,6 +143,7 @@ InternalMmapVector<RootRegion> const *GetRootRegions();
void ScanRootRegion(Frontier *frontier, RootRegion const &region,
uptr region_begin, uptr region_end, bool is_readable);
void ForEachExtraStackRangeCb(uptr begin, uptr end, void* arg);
+void GetAdditionalThreadContextPtrs(ThreadContextBase *tctx, void *ptrs);
// Run stoptheworld while holding any platform-specific locks, as well as the
// allocator and thread registry locks.
void LockStuffAndStopTheWorld(StopTheWorldCallback callback,
diff --git a/contrib/llvm-project/compiler-rt/lib/lsan/lsan_common_fuchsia.cpp b/contrib/llvm-project/compiler-rt/lib/lsan/lsan_common_fuchsia.cpp
index caedbf155969..2d35fa5b1cff 100644
--- a/contrib/llvm-project/compiler-rt/lib/lsan/lsan_common_fuchsia.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/lsan/lsan_common_fuchsia.cpp
@@ -19,6 +19,7 @@
#include "lsan_allocator.h"
#include "sanitizer_common/sanitizer_flags.h"
+#include "sanitizer_common/sanitizer_stoptheworld_fuchsia.h"
#include "sanitizer_common/sanitizer_thread_registry.h"
// Ensure that the Zircon system ABI is linked in.
@@ -106,9 +107,7 @@ void LockStuffAndStopTheWorld(StopTheWorldCallback callback,
auto params = static_cast<const Params *>(data);
uptr begin = reinterpret_cast<uptr>(chunk);
uptr end = begin + size;
- auto i = __sanitizer::InternalLowerBound(params->allocator_caches, 0,
- params->allocator_caches.size(),
- begin, CompareLess<uptr>());
+ auto i = __sanitizer::InternalLowerBound(params->allocator_caches, begin);
if (i < params->allocator_caches.size() &&
params->allocator_caches[i] >= begin &&
end - params->allocator_caches[i] <= sizeof(AllocatorCache)) {
@@ -147,7 +146,7 @@ void LockStuffAndStopTheWorld(StopTheWorldCallback callback,
&params->argument->frontier);
}
- params->callback({}, params->argument);
+ params->callback(SuspendedThreadsListFuchsia(), params->argument);
},
&params);
diff --git a/contrib/llvm-project/compiler-rt/lib/lsan/lsan_common_linux.cpp b/contrib/llvm-project/compiler-rt/lib/lsan/lsan_common_linux.cpp
index c97ef31593df..3af586e220f6 100644
--- a/contrib/llvm-project/compiler-rt/lib/lsan/lsan_common_linux.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/lsan/lsan_common_linux.cpp
@@ -93,6 +93,11 @@ static int ProcessGlobalRegionsCallback(struct dl_phdr_info *info, size_t size,
return 0;
}
+#if SANITIZER_ANDROID && __ANDROID_API__ < 21
+extern "C" __attribute__((weak)) int dl_iterate_phdr(
+ int (*)(struct dl_phdr_info *, size_t, void *), void *);
+#endif
+
// Scans global variables for heap pointers.
void ProcessGlobalRegions(Frontier *frontier) {
if (!flags()->use_globals) return;
diff --git a/contrib/llvm-project/compiler-rt/lib/lsan/lsan_fuchsia.h b/contrib/llvm-project/compiler-rt/lib/lsan/lsan_fuchsia.h
index 65d20ea21148..e730d8f25f21 100644
--- a/contrib/llvm-project/compiler-rt/lib/lsan/lsan_fuchsia.h
+++ b/contrib/llvm-project/compiler-rt/lib/lsan/lsan_fuchsia.h
@@ -23,7 +23,7 @@
namespace __lsan {
-class ThreadContext : public ThreadContextLsanBase {
+class ThreadContext final : public ThreadContextLsanBase {
public:
explicit ThreadContext(int tid);
void OnCreated(void *arg) override;
diff --git a/contrib/llvm-project/compiler-rt/lib/lsan/lsan_interceptors.cpp b/contrib/llvm-project/compiler-rt/lib/lsan/lsan_interceptors.cpp
index 9ce9b78c5a5f..bf8d316770ee 100644
--- a/contrib/llvm-project/compiler-rt/lib/lsan/lsan_interceptors.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/lsan/lsan_interceptors.cpp
@@ -115,7 +115,11 @@ INTERCEPTOR(void*, memalign, uptr alignment, uptr size) {
return lsan_memalign(alignment, size, stack);
}
#define LSAN_MAYBE_INTERCEPT_MEMALIGN INTERCEPT_FUNCTION(memalign)
+#else
+#define LSAN_MAYBE_INTERCEPT_MEMALIGN
+#endif // SANITIZER_INTERCEPT_MEMALIGN
+#if SANITIZER_INTERCEPT___LIBC_MEMALIGN
INTERCEPTOR(void *, __libc_memalign, uptr alignment, uptr size) {
ENSURE_LSAN_INITED;
GET_STACK_TRACE_MALLOC;
@@ -125,9 +129,8 @@ INTERCEPTOR(void *, __libc_memalign, uptr alignment, uptr size) {
}
#define LSAN_MAYBE_INTERCEPT___LIBC_MEMALIGN INTERCEPT_FUNCTION(__libc_memalign)
#else
-#define LSAN_MAYBE_INTERCEPT_MEMALIGN
#define LSAN_MAYBE_INTERCEPT___LIBC_MEMALIGN
-#endif // SANITIZER_INTERCEPT_MEMALIGN
+#endif // SANITIZER_INTERCEPT___LIBC_MEMALIGN
#if SANITIZER_INTERCEPT_ALIGNED_ALLOC
INTERCEPTOR(void*, aligned_alloc, uptr alignment, uptr size) {
@@ -476,6 +479,15 @@ INTERCEPTOR(int, pthread_join, void *th, void **ret) {
return res;
}
+INTERCEPTOR(int, pthread_detach, void *th) {
+ ENSURE_LSAN_INITED;
+ int tid = ThreadTid((uptr)th);
+ int res = REAL(pthread_detach)(th);
+ if (res == 0)
+ ThreadDetach(tid);
+ return res;
+}
+
INTERCEPTOR(void, _exit, int status) {
if (status == 0 && HasReportedLeaks()) status = common_flags()->exitcode;
REAL(_exit)(status);
@@ -508,6 +520,7 @@ void InitializeInterceptors() {
LSAN_MAYBE_INTERCEPT_MALLINFO;
LSAN_MAYBE_INTERCEPT_MALLOPT;
INTERCEPT_FUNCTION(pthread_create);
+ INTERCEPT_FUNCTION(pthread_detach);
INTERCEPT_FUNCTION(pthread_join);
INTERCEPT_FUNCTION(_exit);
diff --git a/contrib/llvm-project/compiler-rt/lib/lsan/lsan_posix.h b/contrib/llvm-project/compiler-rt/lib/lsan/lsan_posix.h
index 840e427c55e3..b1265f233f36 100644
--- a/contrib/llvm-project/compiler-rt/lib/lsan/lsan_posix.h
+++ b/contrib/llvm-project/compiler-rt/lib/lsan/lsan_posix.h
@@ -27,7 +27,7 @@ struct DTLS;
namespace __lsan {
-class ThreadContext : public ThreadContextLsanBase {
+class ThreadContext final : public ThreadContextLsanBase {
public:
explicit ThreadContext(int tid);
void OnStarted(void *arg) override;
diff --git a/contrib/llvm-project/compiler-rt/lib/lsan/lsan_thread.cpp b/contrib/llvm-project/compiler-rt/lib/lsan/lsan_thread.cpp
index 40bdc254bb62..371a1f29dfe0 100644
--- a/contrib/llvm-project/compiler-rt/lib/lsan/lsan_thread.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/lsan/lsan_thread.cpp
@@ -83,6 +83,11 @@ u32 ThreadTid(uptr uid) {
return thread_registry->FindThread(FindThreadByUid, (void *)uid);
}
+void ThreadDetach(u32 tid) {
+ CHECK_NE(tid, kInvalidTid);
+ thread_registry->DetachThread(tid, /* arg */ nullptr);
+}
+
void ThreadJoin(u32 tid) {
CHECK_NE(tid, kInvalidTid);
thread_registry->JoinThread(tid, /* arg */ nullptr);
diff --git a/contrib/llvm-project/compiler-rt/lib/lsan/lsan_thread.h b/contrib/llvm-project/compiler-rt/lib/lsan/lsan_thread.h
index 0ab1582de662..36643753d019 100644
--- a/contrib/llvm-project/compiler-rt/lib/lsan/lsan_thread.h
+++ b/contrib/llvm-project/compiler-rt/lib/lsan/lsan_thread.h
@@ -32,6 +32,7 @@ class ThreadContextLsanBase : public ThreadContextBase {
void *onstarted_arg);
protected:
+ ~ThreadContextLsanBase() {}
uptr stack_begin_ = 0;
uptr stack_end_ = 0;
uptr cache_begin_ = 0;
@@ -46,6 +47,7 @@ void InitializeMainThread();
u32 ThreadCreate(u32 tid, uptr uid, bool detached, void *arg = nullptr);
void ThreadFinish();
+void ThreadDetach(u32 tid);
void ThreadJoin(u32 tid);
u32 ThreadTid(uptr uid);
diff --git a/contrib/llvm-project/compiler-rt/lib/memprof/README.txt b/contrib/llvm-project/compiler-rt/lib/memprof/README.txt
new file mode 100644
index 000000000000..82012c5e71b0
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/memprof/README.txt
@@ -0,0 +1,17 @@
+MemProfiling RT
+================================
+This directory contains sources of the MemProfiling (MemProf) runtime library.
+
+Directory structure:
+README.txt : This file.
+CMakeLists.txt : File for cmake-based build.
+memprof_*.{cc,h} : Sources of the memprof runtime library.
+
+Also MemProf runtime needs the following libraries:
+lib/interception/ : Machinery used to intercept function calls.
+lib/sanitizer_common/ : Code shared between various sanitizers.
+
+MemProf runtime can only be built by CMake. You can run MemProf tests
+from the root of your CMake build tree:
+
+make check-memprof
diff --git a/contrib/llvm-project/compiler-rt/lib/memprof/memprof.syms.extra b/contrib/llvm-project/compiler-rt/lib/memprof/memprof.syms.extra
new file mode 100644
index 000000000000..173280ffe97f
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/memprof/memprof.syms.extra
@@ -0,0 +1 @@
+__memprof_*
diff --git a/contrib/llvm-project/compiler-rt/lib/memprof/memprof_allocator.cpp b/contrib/llvm-project/compiler-rt/lib/memprof/memprof_allocator.cpp
new file mode 100644
index 000000000000..259c7c144ab7
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/memprof/memprof_allocator.cpp
@@ -0,0 +1,905 @@
+//===-- memprof_allocator.cpp --------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of MemProfiler, a memory profiler.
+//
+// Implementation of MemProf's memory allocator, which uses the allocator
+// from sanitizer_common.
+//
+//===----------------------------------------------------------------------===//
+
+#include "memprof_allocator.h"
+#include "memprof_mapping.h"
+#include "memprof_stack.h"
+#include "memprof_thread.h"
+#include "sanitizer_common/sanitizer_allocator_checks.h"
+#include "sanitizer_common/sanitizer_allocator_interface.h"
+#include "sanitizer_common/sanitizer_allocator_report.h"
+#include "sanitizer_common/sanitizer_errno.h"
+#include "sanitizer_common/sanitizer_file.h"
+#include "sanitizer_common/sanitizer_flags.h"
+#include "sanitizer_common/sanitizer_internal_defs.h"
+#include "sanitizer_common/sanitizer_list.h"
+#include "sanitizer_common/sanitizer_stackdepot.h"
+
+#include <sched.h>
+#include <stdlib.h>
+#include <time.h>
+
+namespace __memprof {
+
+static int GetCpuId(void) {
+ // _memprof_preinit is called via the preinit_array, which subsequently calls
+ // malloc. Since this is before _dl_init calls VDSO_SETUP, sched_getcpu
+ // will seg fault as the address of __vdso_getcpu will be null.
+ if (!memprof_init_done)
+ return -1;
+ return sched_getcpu();
+}
+
+// Compute the timestamp in ms.
+static int GetTimestamp(void) {
+ // timespec_get will segfault if called from dl_init
+ if (!memprof_timestamp_inited) {
+ // By returning 0, this will be effectively treated as being
+ // timestamped at memprof init time (when memprof_init_timestamp_s
+ // is initialized).
+ return 0;
+ }
+ timespec ts;
+ clock_gettime(CLOCK_REALTIME, &ts);
+ return (ts.tv_sec - memprof_init_timestamp_s) * 1000 + ts.tv_nsec / 1000000;
+}
+
+static MemprofAllocator &get_allocator();
+
+// The memory chunk allocated from the underlying allocator looks like this:
+// H H U U U U U U
+// H -- ChunkHeader (32 bytes)
+// U -- user memory.
+
+// If there is left padding before the ChunkHeader (due to use of memalign),
+// we store a magic value in the first uptr word of the memory block and
+// store the address of ChunkHeader in the next uptr.
+// M B L L L L L L L L L H H U U U U U U
+// | ^
+// ---------------------|
+// M -- magic value kAllocBegMagic
+// B -- address of ChunkHeader pointing to the first 'H'
+
+constexpr uptr kMaxAllowedMallocBits = 40;
+
+// Should be no more than 32-bytes
+struct ChunkHeader {
+ // 1-st 4 bytes.
+ u32 alloc_context_id;
+ // 2-nd 4 bytes
+ u32 cpu_id;
+ // 3-rd 4 bytes
+ u32 timestamp_ms;
+ // 4-th 4 bytes
+ // Note only 1 bit is needed for this flag if we need space in the future for
+ // more fields.
+ u32 from_memalign;
+ // 5-th and 6-th 4 bytes
+ // The max size of an allocation is 2^40 (kMaxAllowedMallocSize), so this
+ // could be shrunk to kMaxAllowedMallocBits if we need space in the future for
+ // more fields.
+ atomic_uint64_t user_requested_size;
+ // 23 bits available
+ // 7-th and 8-th 4 bytes
+ u64 data_type_id; // TODO: hash of type name
+};
+
+static const uptr kChunkHeaderSize = sizeof(ChunkHeader);
+COMPILER_CHECK(kChunkHeaderSize == 32);
+
+struct MemprofChunk : ChunkHeader {
+ uptr Beg() { return reinterpret_cast<uptr>(this) + kChunkHeaderSize; }
+ uptr UsedSize() {
+ return atomic_load(&user_requested_size, memory_order_relaxed);
+ }
+ void *AllocBeg() {
+ if (from_memalign)
+ return get_allocator().GetBlockBegin(reinterpret_cast<void *>(this));
+ return reinterpret_cast<void *>(this);
+ }
+};
+
+class LargeChunkHeader {
+ static constexpr uptr kAllocBegMagic =
+ FIRST_32_SECOND_64(0xCC6E96B9, 0xCC6E96B9CC6E96B9ULL);
+ atomic_uintptr_t magic;
+ MemprofChunk *chunk_header;
+
+public:
+ MemprofChunk *Get() const {
+ return atomic_load(&magic, memory_order_acquire) == kAllocBegMagic
+ ? chunk_header
+ : nullptr;
+ }
+
+ void Set(MemprofChunk *p) {
+ if (p) {
+ chunk_header = p;
+ atomic_store(&magic, kAllocBegMagic, memory_order_release);
+ return;
+ }
+
+ uptr old = kAllocBegMagic;
+ if (!atomic_compare_exchange_strong(&magic, &old, 0,
+ memory_order_release)) {
+ CHECK_EQ(old, kAllocBegMagic);
+ }
+ }
+};
+
+void FlushUnneededMemProfShadowMemory(uptr p, uptr size) {
+ // Since memprof's mapping is compacting, the shadow chunk may be
+ // not page-aligned, so we only flush the page-aligned portion.
+ ReleaseMemoryPagesToOS(MemToShadow(p), MemToShadow(p + size));
+}
+
+void MemprofMapUnmapCallback::OnMap(uptr p, uptr size) const {
+ // Statistics.
+ MemprofStats &thread_stats = GetCurrentThreadStats();
+ thread_stats.mmaps++;
+ thread_stats.mmaped += size;
+}
+void MemprofMapUnmapCallback::OnUnmap(uptr p, uptr size) const {
+ // We are about to unmap a chunk of user memory.
+ // Mark the corresponding shadow memory as not needed.
+ FlushUnneededMemProfShadowMemory(p, size);
+ // Statistics.
+ MemprofStats &thread_stats = GetCurrentThreadStats();
+ thread_stats.munmaps++;
+ thread_stats.munmaped += size;
+}
+
+AllocatorCache *GetAllocatorCache(MemprofThreadLocalMallocStorage *ms) {
+ CHECK(ms);
+ return &ms->allocator_cache;
+}
+
+struct MemInfoBlock {
+ u32 alloc_count;
+ u64 total_access_count, min_access_count, max_access_count;
+ u64 total_size;
+ u32 min_size, max_size;
+ u32 alloc_timestamp, dealloc_timestamp;
+ u64 total_lifetime;
+ u32 min_lifetime, max_lifetime;
+ u32 alloc_cpu_id, dealloc_cpu_id;
+ u32 num_migrated_cpu;
+
+ // Only compared to prior deallocated object currently.
+ u32 num_lifetime_overlaps;
+ u32 num_same_alloc_cpu;
+ u32 num_same_dealloc_cpu;
+
+ u64 data_type_id; // TODO: hash of type name
+
+ MemInfoBlock() : alloc_count(0) {}
+
+ MemInfoBlock(u32 size, u64 access_count, u32 alloc_timestamp,
+ u32 dealloc_timestamp, u32 alloc_cpu, u32 dealloc_cpu)
+ : alloc_count(1), total_access_count(access_count),
+ min_access_count(access_count), max_access_count(access_count),
+ total_size(size), min_size(size), max_size(size),
+ alloc_timestamp(alloc_timestamp), dealloc_timestamp(dealloc_timestamp),
+ total_lifetime(dealloc_timestamp - alloc_timestamp),
+ min_lifetime(total_lifetime), max_lifetime(total_lifetime),
+ alloc_cpu_id(alloc_cpu), dealloc_cpu_id(dealloc_cpu),
+ num_lifetime_overlaps(0), num_same_alloc_cpu(0),
+ num_same_dealloc_cpu(0) {
+ num_migrated_cpu = alloc_cpu_id != dealloc_cpu_id;
+ }
+
+ void Print(u64 id) {
+ u64 p;
+ if (flags()->print_terse) {
+ p = total_size * 100 / alloc_count;
+ Printf("MIB:%llu/%u/%d.%02d/%u/%u/", id, alloc_count, p / 100, p % 100,
+ min_size, max_size);
+ p = total_access_count * 100 / alloc_count;
+ Printf("%d.%02d/%u/%u/", p / 100, p % 100, min_access_count,
+ max_access_count);
+ p = total_lifetime * 100 / alloc_count;
+ Printf("%d.%02d/%u/%u/", p / 100, p % 100, min_lifetime, max_lifetime);
+ Printf("%u/%u/%u/%u\n", num_migrated_cpu, num_lifetime_overlaps,
+ num_same_alloc_cpu, num_same_dealloc_cpu);
+ } else {
+ p = total_size * 100 / alloc_count;
+ Printf("Memory allocation stack id = %llu\n", id);
+ Printf("\talloc_count %u, size (ave/min/max) %d.%02d / %u / %u\n",
+ alloc_count, p / 100, p % 100, min_size, max_size);
+ p = total_access_count * 100 / alloc_count;
+ Printf("\taccess_count (ave/min/max): %d.%02d / %u / %u\n", p / 100,
+ p % 100, min_access_count, max_access_count);
+ p = total_lifetime * 100 / alloc_count;
+ Printf("\tlifetime (ave/min/max): %d.%02d / %u / %u\n", p / 100, p % 100,
+ min_lifetime, max_lifetime);
+ Printf("\tnum migrated: %u, num lifetime overlaps: %u, num same alloc "
+ "cpu: %u, num same dealloc_cpu: %u\n",
+ num_migrated_cpu, num_lifetime_overlaps, num_same_alloc_cpu,
+ num_same_dealloc_cpu);
+ }
+ }
+
+ static void printHeader() {
+ CHECK(flags()->print_terse);
+ Printf("MIB:StackID/AllocCount/AveSize/MinSize/MaxSize/AveAccessCount/"
+ "MinAccessCount/MaxAccessCount/AveLifetime/MinLifetime/MaxLifetime/"
+ "NumMigratedCpu/NumLifetimeOverlaps/NumSameAllocCpu/"
+ "NumSameDeallocCpu\n");
+ }
+
+ void Merge(MemInfoBlock &newMIB) {
+ alloc_count += newMIB.alloc_count;
+
+ total_access_count += newMIB.total_access_count;
+ min_access_count = Min(min_access_count, newMIB.min_access_count);
+ max_access_count = Max(max_access_count, newMIB.max_access_count);
+
+ total_size += newMIB.total_size;
+ min_size = Min(min_size, newMIB.min_size);
+ max_size = Max(max_size, newMIB.max_size);
+
+ total_lifetime += newMIB.total_lifetime;
+ min_lifetime = Min(min_lifetime, newMIB.min_lifetime);
+ max_lifetime = Max(max_lifetime, newMIB.max_lifetime);
+
+ // We know newMIB was deallocated later, so just need to check if it was
+ // allocated before last one deallocated.
+ num_lifetime_overlaps += newMIB.alloc_timestamp < dealloc_timestamp;
+ alloc_timestamp = newMIB.alloc_timestamp;
+ dealloc_timestamp = newMIB.dealloc_timestamp;
+
+ num_same_alloc_cpu += alloc_cpu_id == newMIB.alloc_cpu_id;
+ num_same_dealloc_cpu += dealloc_cpu_id == newMIB.dealloc_cpu_id;
+ alloc_cpu_id = newMIB.alloc_cpu_id;
+ dealloc_cpu_id = newMIB.dealloc_cpu_id;
+ }
+};
+
+static u32 AccessCount = 0;
+static u32 MissCount = 0;
+
+struct SetEntry {
+ SetEntry() : id(0), MIB() {}
+ bool Empty() { return id == 0; }
+ void Print() {
+ CHECK(!Empty());
+ MIB.Print(id);
+ }
+ // The stack id
+ u64 id;
+ MemInfoBlock MIB;
+};
+
+struct CacheSet {
+ enum { kSetSize = 4 };
+
+ void PrintAll() {
+ for (int i = 0; i < kSetSize; i++) {
+ if (Entries[i].Empty())
+ continue;
+ Entries[i].Print();
+ }
+ }
+ void insertOrMerge(u64 new_id, MemInfoBlock &newMIB) {
+ AccessCount++;
+ SetAccessCount++;
+
+ for (int i = 0; i < kSetSize; i++) {
+ auto id = Entries[i].id;
+ // Check if this is a hit or an empty entry. Since we always move any
+ // filled locations to the front of the array (see below), we don't need
+ // to look after finding the first empty entry.
+ if (id == new_id || !id) {
+ if (id == 0) {
+ Entries[i].id = new_id;
+ Entries[i].MIB = newMIB;
+ } else {
+ Entries[i].MIB.Merge(newMIB);
+ }
+ // Assuming some id locality, we try to swap the matching entry
+ // into the first set position.
+ if (i != 0) {
+ auto tmp = Entries[0];
+ Entries[0] = Entries[i];
+ Entries[i] = tmp;
+ }
+ return;
+ }
+ }
+
+ // Miss
+ MissCount++;
+ SetMissCount++;
+
+ // We try to find the entries with the lowest alloc count to be evicted:
+ int min_idx = 0;
+ u64 min_count = Entries[0].MIB.alloc_count;
+ for (int i = 1; i < kSetSize; i++) {
+ CHECK(!Entries[i].Empty());
+ if (Entries[i].MIB.alloc_count < min_count) {
+ min_idx = i;
+ min_count = Entries[i].MIB.alloc_count;
+ }
+ }
+
+ // Print the evicted entry profile information
+ if (!flags()->print_terse)
+ Printf("Evicted:\n");
+ Entries[min_idx].Print();
+
+ // Similar to the hit case, put new MIB in first set position.
+ if (min_idx != 0)
+ Entries[min_idx] = Entries[0];
+ Entries[0].id = new_id;
+ Entries[0].MIB = newMIB;
+ }
+
+ void PrintMissRate(int i) {
+ u64 p = SetAccessCount ? SetMissCount * 10000ULL / SetAccessCount : 0;
+ Printf("Set %d miss rate: %d / %d = %5d.%02d%%\n", i, SetMissCount,
+ SetAccessCount, p / 100, p % 100);
+ }
+
+ SetEntry Entries[kSetSize];
+ u32 SetAccessCount = 0;
+ u32 SetMissCount = 0;
+};
+
+struct MemInfoBlockCache {
+ MemInfoBlockCache() {
+ if (common_flags()->print_module_map)
+ DumpProcessMap();
+ if (flags()->print_terse)
+ MemInfoBlock::printHeader();
+ Sets =
+ (CacheSet *)malloc(sizeof(CacheSet) * flags()->mem_info_cache_entries);
+ Constructed = true;
+ }
+
+ ~MemInfoBlockCache() { free(Sets); }
+
+ void insertOrMerge(u64 new_id, MemInfoBlock &newMIB) {
+ u64 hv = new_id;
+
+ // Use mod method where number of entries should be a prime close to power
+ // of 2.
+ hv %= flags()->mem_info_cache_entries;
+
+ return Sets[hv].insertOrMerge(new_id, newMIB);
+ }
+
+ void PrintAll() {
+ for (int i = 0; i < flags()->mem_info_cache_entries; i++) {
+ Sets[i].PrintAll();
+ }
+ }
+
+ void PrintMissRate() {
+ if (!flags()->print_mem_info_cache_miss_rate)
+ return;
+ u64 p = AccessCount ? MissCount * 10000ULL / AccessCount : 0;
+ Printf("Overall miss rate: %d / %d = %5d.%02d%%\n", MissCount, AccessCount,
+ p / 100, p % 100);
+ if (flags()->print_mem_info_cache_miss_rate_details)
+ for (int i = 0; i < flags()->mem_info_cache_entries; i++)
+ Sets[i].PrintMissRate(i);
+ }
+
+ CacheSet *Sets;
+ // Flag when the Sets have been allocated, in case a deallocation is called
+ // very early before the static init of the Allocator and therefore this table
+ // have completed.
+ bool Constructed = false;
+};
+
+// Accumulates the access count from the shadow for the given pointer and size.
+u64 GetShadowCount(uptr p, u32 size) {
+ u64 *shadow = (u64 *)MEM_TO_SHADOW(p);
+ u64 *shadow_end = (u64 *)MEM_TO_SHADOW(p + size);
+ u64 count = 0;
+ for (; shadow <= shadow_end; shadow++)
+ count += *shadow;
+ return count;
+}
+
+// Clears the shadow counters (when memory is allocated).
+void ClearShadow(uptr addr, uptr size) {
+ CHECK(AddrIsAlignedByGranularity(addr));
+ CHECK(AddrIsInMem(addr));
+ CHECK(AddrIsAlignedByGranularity(addr + size));
+ CHECK(AddrIsInMem(addr + size - SHADOW_GRANULARITY));
+ CHECK(REAL(memset));
+ uptr shadow_beg = MEM_TO_SHADOW(addr);
+ uptr shadow_end = MEM_TO_SHADOW(addr + size - SHADOW_GRANULARITY) + 1;
+ if (shadow_end - shadow_beg < common_flags()->clear_shadow_mmap_threshold) {
+ REAL(memset)((void *)shadow_beg, 0, shadow_end - shadow_beg);
+ } else {
+ uptr page_size = GetPageSizeCached();
+ uptr page_beg = RoundUpTo(shadow_beg, page_size);
+ uptr page_end = RoundDownTo(shadow_end, page_size);
+
+ if (page_beg >= page_end) {
+ REAL(memset)((void *)shadow_beg, 0, shadow_end - shadow_beg);
+ } else {
+ if (page_beg != shadow_beg) {
+ REAL(memset)((void *)shadow_beg, 0, page_beg - shadow_beg);
+ }
+ if (page_end != shadow_end) {
+ REAL(memset)((void *)page_end, 0, shadow_end - page_end);
+ }
+ ReserveShadowMemoryRange(page_beg, page_end - 1, nullptr);
+ }
+ }
+}
+
+struct Allocator {
+ static const uptr kMaxAllowedMallocSize = 1ULL << kMaxAllowedMallocBits;
+
+ MemprofAllocator allocator;
+ StaticSpinMutex fallback_mutex;
+ AllocatorCache fallback_allocator_cache;
+
+ uptr max_user_defined_malloc_size;
+ atomic_uint8_t rss_limit_exceeded;
+
+ MemInfoBlockCache MemInfoBlockTable;
+ bool destructing;
+
+ // ------------------- Initialization ------------------------
+ explicit Allocator(LinkerInitialized) : destructing(false) {}
+
+ ~Allocator() { FinishAndPrint(); }
+
+ void FinishAndPrint() {
+ if (!flags()->print_terse)
+ Printf("Live on exit:\n");
+ allocator.ForceLock();
+ allocator.ForEachChunk(
+ [](uptr chunk, void *alloc) {
+ u64 user_requested_size;
+ MemprofChunk *m =
+ ((Allocator *)alloc)
+ ->GetMemprofChunk((void *)chunk, user_requested_size);
+ if (!m)
+ return;
+ uptr user_beg = ((uptr)m) + kChunkHeaderSize;
+ u64 c = GetShadowCount(user_beg, user_requested_size);
+ long curtime = GetTimestamp();
+ MemInfoBlock newMIB(user_requested_size, c, m->timestamp_ms, curtime,
+ m->cpu_id, GetCpuId());
+ ((Allocator *)alloc)
+ ->MemInfoBlockTable.insertOrMerge(m->alloc_context_id, newMIB);
+ },
+ this);
+ allocator.ForceUnlock();
+
+ destructing = true;
+ MemInfoBlockTable.PrintMissRate();
+ MemInfoBlockTable.PrintAll();
+ StackDepotPrintAll();
+ }
+
+ void InitLinkerInitialized() {
+ SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null);
+ allocator.InitLinkerInitialized(
+ common_flags()->allocator_release_to_os_interval_ms);
+ max_user_defined_malloc_size = common_flags()->max_allocation_size_mb
+ ? common_flags()->max_allocation_size_mb
+ << 20
+ : kMaxAllowedMallocSize;
+ }
+
+ bool RssLimitExceeded() {
+ return atomic_load(&rss_limit_exceeded, memory_order_relaxed);
+ }
+
+ void SetRssLimitExceeded(bool limit_exceeded) {
+ atomic_store(&rss_limit_exceeded, limit_exceeded, memory_order_relaxed);
+ }
+
+ // -------------------- Allocation/Deallocation routines ---------------
+ void *Allocate(uptr size, uptr alignment, BufferedStackTrace *stack,
+ AllocType alloc_type) {
+ if (UNLIKELY(!memprof_inited))
+ MemprofInitFromRtl();
+ if (RssLimitExceeded()) {
+ if (AllocatorMayReturnNull())
+ return nullptr;
+ ReportRssLimitExceeded(stack);
+ }
+ CHECK(stack);
+ const uptr min_alignment = MEMPROF_ALIGNMENT;
+ if (alignment < min_alignment)
+ alignment = min_alignment;
+ if (size == 0) {
+ // We'd be happy to avoid allocating memory for zero-size requests, but
+ // some programs/tests depend on this behavior and assume that malloc
+ // would not return NULL even for zero-size allocations. Moreover, it
+ // looks like operator new should never return NULL, and results of
+ // consecutive "new" calls must be different even if the allocated size
+ // is zero.
+ size = 1;
+ }
+ CHECK(IsPowerOfTwo(alignment));
+ uptr rounded_size = RoundUpTo(size, alignment);
+ uptr needed_size = rounded_size + kChunkHeaderSize;
+ if (alignment > min_alignment)
+ needed_size += alignment;
+ CHECK(IsAligned(needed_size, min_alignment));
+ if (size > kMaxAllowedMallocSize || needed_size > kMaxAllowedMallocSize ||
+ size > max_user_defined_malloc_size) {
+ if (AllocatorMayReturnNull()) {
+ Report("WARNING: MemProfiler failed to allocate 0x%zx bytes\n",
+ (void *)size);
+ return nullptr;
+ }
+ uptr malloc_limit =
+ Min(kMaxAllowedMallocSize, max_user_defined_malloc_size);
+ ReportAllocationSizeTooBig(size, malloc_limit, stack);
+ }
+
+ MemprofThread *t = GetCurrentThread();
+ void *allocated;
+ if (t) {
+ AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage());
+ allocated = allocator.Allocate(cache, needed_size, 8);
+ } else {
+ SpinMutexLock l(&fallback_mutex);
+ AllocatorCache *cache = &fallback_allocator_cache;
+ allocated = allocator.Allocate(cache, needed_size, 8);
+ }
+ if (UNLIKELY(!allocated)) {
+ SetAllocatorOutOfMemory();
+ if (AllocatorMayReturnNull())
+ return nullptr;
+ ReportOutOfMemory(size, stack);
+ }
+
+ uptr alloc_beg = reinterpret_cast<uptr>(allocated);
+ uptr alloc_end = alloc_beg + needed_size;
+ uptr beg_plus_header = alloc_beg + kChunkHeaderSize;
+ uptr user_beg = beg_plus_header;
+ if (!IsAligned(user_beg, alignment))
+ user_beg = RoundUpTo(user_beg, alignment);
+ uptr user_end = user_beg + size;
+ CHECK_LE(user_end, alloc_end);
+ uptr chunk_beg = user_beg - kChunkHeaderSize;
+ MemprofChunk *m = reinterpret_cast<MemprofChunk *>(chunk_beg);
+ m->from_memalign = alloc_beg != chunk_beg;
+ CHECK(size);
+
+ m->cpu_id = GetCpuId();
+ m->timestamp_ms = GetTimestamp();
+ m->alloc_context_id = StackDepotPut(*stack);
+
+ uptr size_rounded_down_to_granularity =
+ RoundDownTo(size, SHADOW_GRANULARITY);
+ if (size_rounded_down_to_granularity)
+ ClearShadow(user_beg, size_rounded_down_to_granularity);
+
+ MemprofStats &thread_stats = GetCurrentThreadStats();
+ thread_stats.mallocs++;
+ thread_stats.malloced += size;
+ thread_stats.malloced_overhead += needed_size - size;
+ if (needed_size > SizeClassMap::kMaxSize)
+ thread_stats.malloc_large++;
+ else
+ thread_stats.malloced_by_size[SizeClassMap::ClassID(needed_size)]++;
+
+ void *res = reinterpret_cast<void *>(user_beg);
+ atomic_store(&m->user_requested_size, size, memory_order_release);
+ if (alloc_beg != chunk_beg) {
+ CHECK_LE(alloc_beg + sizeof(LargeChunkHeader), chunk_beg);
+ reinterpret_cast<LargeChunkHeader *>(alloc_beg)->Set(m);
+ }
+ MEMPROF_MALLOC_HOOK(res, size);
+ return res;
+ }
+
+ void Deallocate(void *ptr, uptr delete_size, uptr delete_alignment,
+ BufferedStackTrace *stack, AllocType alloc_type) {
+ uptr p = reinterpret_cast<uptr>(ptr);
+ if (p == 0)
+ return;
+
+ MEMPROF_FREE_HOOK(ptr);
+
+ uptr chunk_beg = p - kChunkHeaderSize;
+ MemprofChunk *m = reinterpret_cast<MemprofChunk *>(chunk_beg);
+
+ u64 user_requested_size =
+ atomic_exchange(&m->user_requested_size, 0, memory_order_acquire);
+ if (memprof_inited && memprof_init_done && !destructing &&
+ MemInfoBlockTable.Constructed) {
+ u64 c = GetShadowCount(p, user_requested_size);
+ long curtime = GetTimestamp();
+
+ MemInfoBlock newMIB(user_requested_size, c, m->timestamp_ms, curtime,
+ m->cpu_id, GetCpuId());
+ {
+ SpinMutexLock l(&fallback_mutex);
+ MemInfoBlockTable.insertOrMerge(m->alloc_context_id, newMIB);
+ }
+ }
+
+ MemprofStats &thread_stats = GetCurrentThreadStats();
+ thread_stats.frees++;
+ thread_stats.freed += user_requested_size;
+
+ void *alloc_beg = m->AllocBeg();
+ if (alloc_beg != m) {
+ // Clear the magic value, as allocator internals may overwrite the
+ // contents of deallocated chunk, confusing GetMemprofChunk lookup.
+ reinterpret_cast<LargeChunkHeader *>(alloc_beg)->Set(nullptr);
+ }
+
+ MemprofThread *t = GetCurrentThread();
+ if (t) {
+ AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage());
+ allocator.Deallocate(cache, alloc_beg);
+ } else {
+ SpinMutexLock l(&fallback_mutex);
+ AllocatorCache *cache = &fallback_allocator_cache;
+ allocator.Deallocate(cache, alloc_beg);
+ }
+ }
+
+ void *Reallocate(void *old_ptr, uptr new_size, BufferedStackTrace *stack) {
+ CHECK(old_ptr && new_size);
+ uptr p = reinterpret_cast<uptr>(old_ptr);
+ uptr chunk_beg = p - kChunkHeaderSize;
+ MemprofChunk *m = reinterpret_cast<MemprofChunk *>(chunk_beg);
+
+ MemprofStats &thread_stats = GetCurrentThreadStats();
+ thread_stats.reallocs++;
+ thread_stats.realloced += new_size;
+
+ void *new_ptr = Allocate(new_size, 8, stack, FROM_MALLOC);
+ if (new_ptr) {
+ CHECK_NE(REAL(memcpy), nullptr);
+ uptr memcpy_size = Min(new_size, m->UsedSize());
+ REAL(memcpy)(new_ptr, old_ptr, memcpy_size);
+ Deallocate(old_ptr, 0, 0, stack, FROM_MALLOC);
+ }
+ return new_ptr;
+ }
+
+ void *Calloc(uptr nmemb, uptr size, BufferedStackTrace *stack) {
+ if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) {
+ if (AllocatorMayReturnNull())
+ return nullptr;
+ ReportCallocOverflow(nmemb, size, stack);
+ }
+ void *ptr = Allocate(nmemb * size, 8, stack, FROM_MALLOC);
+ // If the memory comes from the secondary allocator no need to clear it
+ // as it comes directly from mmap.
+ if (ptr && allocator.FromPrimary(ptr))
+ REAL(memset)(ptr, 0, nmemb * size);
+ return ptr;
+ }
+
+ void CommitBack(MemprofThreadLocalMallocStorage *ms,
+ BufferedStackTrace *stack) {
+ AllocatorCache *ac = GetAllocatorCache(ms);
+ allocator.SwallowCache(ac);
+ }
+
+ // -------------------------- Chunk lookup ----------------------
+
+ // Assumes alloc_beg == allocator.GetBlockBegin(alloc_beg).
+ MemprofChunk *GetMemprofChunk(void *alloc_beg, u64 &user_requested_size) {
+ if (!alloc_beg)
+ return nullptr;
+ MemprofChunk *p = reinterpret_cast<LargeChunkHeader *>(alloc_beg)->Get();
+ if (!p) {
+ if (!allocator.FromPrimary(alloc_beg))
+ return nullptr;
+ p = reinterpret_cast<MemprofChunk *>(alloc_beg);
+ }
+ // The size is reset to 0 on deallocation (and a min of 1 on
+ // allocation).
+ user_requested_size =
+ atomic_load(&p->user_requested_size, memory_order_acquire);
+ if (user_requested_size)
+ return p;
+ return nullptr;
+ }
+
+ MemprofChunk *GetMemprofChunkByAddr(uptr p, u64 &user_requested_size) {
+ void *alloc_beg = allocator.GetBlockBegin(reinterpret_cast<void *>(p));
+ return GetMemprofChunk(alloc_beg, user_requested_size);
+ }
+
+ uptr AllocationSize(uptr p) {
+ u64 user_requested_size;
+ MemprofChunk *m = GetMemprofChunkByAddr(p, user_requested_size);
+ if (!m)
+ return 0;
+ if (m->Beg() != p)
+ return 0;
+ return user_requested_size;
+ }
+
+ void Purge(BufferedStackTrace *stack) { allocator.ForceReleaseToOS(); }
+
+ void PrintStats() { allocator.PrintStats(); }
+
+ void ForceLock() {
+ allocator.ForceLock();
+ fallback_mutex.Lock();
+ }
+
+ void ForceUnlock() {
+ fallback_mutex.Unlock();
+ allocator.ForceUnlock();
+ }
+};
+
+static Allocator instance(LINKER_INITIALIZED);
+
+static MemprofAllocator &get_allocator() { return instance.allocator; }
+
+void InitializeAllocator() { instance.InitLinkerInitialized(); }
+
+void MemprofThreadLocalMallocStorage::CommitBack() {
+ GET_STACK_TRACE_MALLOC;
+ instance.CommitBack(this, &stack);
+}
+
+void PrintInternalAllocatorStats() { instance.PrintStats(); }
+
+void memprof_free(void *ptr, BufferedStackTrace *stack, AllocType alloc_type) {
+ instance.Deallocate(ptr, 0, 0, stack, alloc_type);
+}
+
+void memprof_delete(void *ptr, uptr size, uptr alignment,
+ BufferedStackTrace *stack, AllocType alloc_type) {
+ instance.Deallocate(ptr, size, alignment, stack, alloc_type);
+}
+
+void *memprof_malloc(uptr size, BufferedStackTrace *stack) {
+ return SetErrnoOnNull(instance.Allocate(size, 8, stack, FROM_MALLOC));
+}
+
+void *memprof_calloc(uptr nmemb, uptr size, BufferedStackTrace *stack) {
+ return SetErrnoOnNull(instance.Calloc(nmemb, size, stack));
+}
+
+void *memprof_reallocarray(void *p, uptr nmemb, uptr size,
+ BufferedStackTrace *stack) {
+ if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) {
+ errno = errno_ENOMEM;
+ if (AllocatorMayReturnNull())
+ return nullptr;
+ ReportReallocArrayOverflow(nmemb, size, stack);
+ }
+ return memprof_realloc(p, nmemb * size, stack);
+}
+
+void *memprof_realloc(void *p, uptr size, BufferedStackTrace *stack) {
+ if (!p)
+ return SetErrnoOnNull(instance.Allocate(size, 8, stack, FROM_MALLOC));
+ if (size == 0) {
+ if (flags()->allocator_frees_and_returns_null_on_realloc_zero) {
+ instance.Deallocate(p, 0, 0, stack, FROM_MALLOC);
+ return nullptr;
+ }
+ // Allocate a size of 1 if we shouldn't free() on Realloc to 0
+ size = 1;
+ }
+ return SetErrnoOnNull(instance.Reallocate(p, size, stack));
+}
+
+void *memprof_valloc(uptr size, BufferedStackTrace *stack) {
+ return SetErrnoOnNull(
+ instance.Allocate(size, GetPageSizeCached(), stack, FROM_MALLOC));
+}
+
+void *memprof_pvalloc(uptr size, BufferedStackTrace *stack) {
+ uptr PageSize = GetPageSizeCached();
+ if (UNLIKELY(CheckForPvallocOverflow(size, PageSize))) {
+ errno = errno_ENOMEM;
+ if (AllocatorMayReturnNull())
+ return nullptr;
+ ReportPvallocOverflow(size, stack);
+ }
+ // pvalloc(0) should allocate one page.
+ size = size ? RoundUpTo(size, PageSize) : PageSize;
+ return SetErrnoOnNull(instance.Allocate(size, PageSize, stack, FROM_MALLOC));
+}
+
+void *memprof_memalign(uptr alignment, uptr size, BufferedStackTrace *stack,
+ AllocType alloc_type) {
+ if (UNLIKELY(!IsPowerOfTwo(alignment))) {
+ errno = errno_EINVAL;
+ if (AllocatorMayReturnNull())
+ return nullptr;
+ ReportInvalidAllocationAlignment(alignment, stack);
+ }
+ return SetErrnoOnNull(instance.Allocate(size, alignment, stack, alloc_type));
+}
+
+void *memprof_aligned_alloc(uptr alignment, uptr size,
+ BufferedStackTrace *stack) {
+ if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(alignment, size))) {
+ errno = errno_EINVAL;
+ if (AllocatorMayReturnNull())
+ return nullptr;
+ ReportInvalidAlignedAllocAlignment(size, alignment, stack);
+ }
+ return SetErrnoOnNull(instance.Allocate(size, alignment, stack, FROM_MALLOC));
+}
+
+int memprof_posix_memalign(void **memptr, uptr alignment, uptr size,
+ BufferedStackTrace *stack) {
+ if (UNLIKELY(!CheckPosixMemalignAlignment(alignment))) {
+ if (AllocatorMayReturnNull())
+ return errno_EINVAL;
+ ReportInvalidPosixMemalignAlignment(alignment, stack);
+ }
+ void *ptr = instance.Allocate(size, alignment, stack, FROM_MALLOC);
+ if (UNLIKELY(!ptr))
+ // OOM error is already taken care of by Allocate.
+ return errno_ENOMEM;
+ CHECK(IsAligned((uptr)ptr, alignment));
+ *memptr = ptr;
+ return 0;
+}
+
+uptr memprof_malloc_usable_size(const void *ptr, uptr pc, uptr bp) {
+ if (!ptr)
+ return 0;
+ uptr usable_size = instance.AllocationSize(reinterpret_cast<uptr>(ptr));
+ return usable_size;
+}
+
+void MemprofSoftRssLimitExceededCallback(bool limit_exceeded) {
+ instance.SetRssLimitExceeded(limit_exceeded);
+}
+
+} // namespace __memprof
+
+// ---------------------- Interface ---------------- {{{1
+using namespace __memprof;
+
+#if !SANITIZER_SUPPORTS_WEAK_HOOKS
+// Provide default (no-op) implementation of malloc hooks.
+SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_malloc_hook, void *ptr,
+ uptr size) {
+ (void)ptr;
+ (void)size;
+}
+
+SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_free_hook, void *ptr) {
+ (void)ptr;
+}
+#endif
+
+uptr __sanitizer_get_estimated_allocated_size(uptr size) { return size; }
+
+int __sanitizer_get_ownership(const void *p) {
+ return memprof_malloc_usable_size(p, 0, 0) != 0;
+}
+
+uptr __sanitizer_get_allocated_size(const void *p) {
+ return memprof_malloc_usable_size(p, 0, 0);
+}
+
+int __memprof_profile_dump() {
+ instance.FinishAndPrint();
+ // In the future we may want to return non-zero if there are any errors
+ // detected during the dumping process.
+ return 0;
+}
diff --git a/contrib/llvm-project/compiler-rt/lib/memprof/memprof_allocator.h b/contrib/llvm-project/compiler-rt/lib/memprof/memprof_allocator.h
new file mode 100644
index 000000000000..070b8b2f2737
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/memprof/memprof_allocator.h
@@ -0,0 +1,105 @@
+//===-- memprof_allocator.h ------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of MemProfiler, a memory profiler.
+//
+// MemProf-private header for memprof_allocator.cpp.
+//===----------------------------------------------------------------------===//
+
+#ifndef MEMPROF_ALLOCATOR_H
+#define MEMPROF_ALLOCATOR_H
+
+#include "memprof_flags.h"
+#include "memprof_interceptors.h"
+#include "memprof_internal.h"
+#include "sanitizer_common/sanitizer_allocator.h"
+#include "sanitizer_common/sanitizer_list.h"
+
+#if !defined(__x86_64__)
+#error Unsupported platform
+#endif
+#if !SANITIZER_CAN_USE_ALLOCATOR64
+#error Only 64-bit allocator supported
+#endif
+
+namespace __memprof {
+
+enum AllocType {
+ FROM_MALLOC = 1, // Memory block came from malloc, calloc, realloc, etc.
+ FROM_NEW = 2, // Memory block came from operator new.
+ FROM_NEW_BR = 3 // Memory block came from operator new [ ]
+};
+
+void InitializeAllocator();
+
+struct MemprofMapUnmapCallback {
+ void OnMap(uptr p, uptr size) const;
+ void OnUnmap(uptr p, uptr size) const;
+};
+
+constexpr uptr kAllocatorSpace = 0x600000000000ULL;
+constexpr uptr kAllocatorSize = 0x40000000000ULL; // 4T.
+typedef DefaultSizeClassMap SizeClassMap;
+template <typename AddressSpaceViewTy>
+struct AP64 { // Allocator64 parameters. Deliberately using a short name.
+ static const uptr kSpaceBeg = kAllocatorSpace;
+ static const uptr kSpaceSize = kAllocatorSize;
+ static const uptr kMetadataSize = 0;
+ typedef __memprof::SizeClassMap SizeClassMap;
+ typedef MemprofMapUnmapCallback MapUnmapCallback;
+ static const uptr kFlags = 0;
+ using AddressSpaceView = AddressSpaceViewTy;
+};
+
+template <typename AddressSpaceView>
+using PrimaryAllocatorASVT = SizeClassAllocator64<AP64<AddressSpaceView>>;
+using PrimaryAllocator = PrimaryAllocatorASVT<LocalAddressSpaceView>;
+
+static const uptr kNumberOfSizeClasses = SizeClassMap::kNumClasses;
+
+template <typename AddressSpaceView>
+using MemprofAllocatorASVT =
+ CombinedAllocator<PrimaryAllocatorASVT<AddressSpaceView>>;
+using MemprofAllocator = MemprofAllocatorASVT<LocalAddressSpaceView>;
+using AllocatorCache = MemprofAllocator::AllocatorCache;
+
+struct MemprofThreadLocalMallocStorage {
+ uptr quarantine_cache[16];
+ AllocatorCache allocator_cache;
+ void CommitBack();
+
+private:
+ // These objects are allocated via mmap() and are zero-initialized.
+ MemprofThreadLocalMallocStorage() {}
+};
+
+void *memprof_memalign(uptr alignment, uptr size, BufferedStackTrace *stack,
+ AllocType alloc_type);
+void memprof_free(void *ptr, BufferedStackTrace *stack, AllocType alloc_type);
+void memprof_delete(void *ptr, uptr size, uptr alignment,
+ BufferedStackTrace *stack, AllocType alloc_type);
+
+void *memprof_malloc(uptr size, BufferedStackTrace *stack);
+void *memprof_calloc(uptr nmemb, uptr size, BufferedStackTrace *stack);
+void *memprof_realloc(void *p, uptr size, BufferedStackTrace *stack);
+void *memprof_reallocarray(void *p, uptr nmemb, uptr size,
+ BufferedStackTrace *stack);
+void *memprof_valloc(uptr size, BufferedStackTrace *stack);
+void *memprof_pvalloc(uptr size, BufferedStackTrace *stack);
+
+void *memprof_aligned_alloc(uptr alignment, uptr size,
+ BufferedStackTrace *stack);
+int memprof_posix_memalign(void **memptr, uptr alignment, uptr size,
+ BufferedStackTrace *stack);
+uptr memprof_malloc_usable_size(const void *ptr, uptr pc, uptr bp);
+
+void PrintInternalAllocatorStats();
+void MemprofSoftRssLimitExceededCallback(bool exceeded);
+
+} // namespace __memprof
+#endif // MEMPROF_ALLOCATOR_H
diff --git a/contrib/llvm-project/compiler-rt/lib/memprof/memprof_descriptions.cpp b/contrib/llvm-project/compiler-rt/lib/memprof/memprof_descriptions.cpp
new file mode 100644
index 000000000000..ebd81d6f2f23
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/memprof/memprof_descriptions.cpp
@@ -0,0 +1,70 @@
+//===-- memprof_descriptions.cpp -------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of MemProfiler, a memory profiler.
+//
+// MemProf functions for getting information about an address and/or printing
+// it.
+//===----------------------------------------------------------------------===//
+
+#include "memprof_descriptions.h"
+#include "memprof_mapping.h"
+#include "memprof_stack.h"
+#include "sanitizer_common/sanitizer_stackdepot.h"
+
+namespace __memprof {
+
+MemprofThreadIdAndName::MemprofThreadIdAndName(MemprofThreadContext *t) {
+ Init(t->tid, t->name);
+}
+
+MemprofThreadIdAndName::MemprofThreadIdAndName(u32 tid) {
+ if (tid == kInvalidTid) {
+ Init(tid, "");
+ } else {
+ memprofThreadRegistry().CheckLocked();
+ MemprofThreadContext *t = GetThreadContextByTidLocked(tid);
+ Init(tid, t->name);
+ }
+}
+
+void MemprofThreadIdAndName::Init(u32 tid, const char *tname) {
+ int len = internal_snprintf(name, sizeof(name), "T%d", tid);
+ CHECK(((unsigned int)len) < sizeof(name));
+ if (tname[0] != '\0')
+ internal_snprintf(&name[len], sizeof(name) - len, " (%s)", tname);
+}
+
+void DescribeThread(MemprofThreadContext *context) {
+ CHECK(context);
+ memprofThreadRegistry().CheckLocked();
+ // No need to announce the main thread.
+ if (context->tid == 0 || context->announced) {
+ return;
+ }
+ context->announced = true;
+ InternalScopedString str(1024);
+ str.append("Thread %s", MemprofThreadIdAndName(context).c_str());
+ if (context->parent_tid == kInvalidTid) {
+ str.append(" created by unknown thread\n");
+ Printf("%s", str.data());
+ return;
+ }
+ str.append(" created by %s here:\n",
+ MemprofThreadIdAndName(context->parent_tid).c_str());
+ Printf("%s", str.data());
+ StackDepotGet(context->stack_id).Print();
+ // Recursively described parent thread if needed.
+ if (flags()->print_full_thread_history) {
+ MemprofThreadContext *parent_context =
+ GetThreadContextByTidLocked(context->parent_tid);
+ DescribeThread(parent_context);
+ }
+}
+
+} // namespace __memprof
diff --git a/contrib/llvm-project/compiler-rt/lib/memprof/memprof_descriptions.h b/contrib/llvm-project/compiler-rt/lib/memprof/memprof_descriptions.h
new file mode 100644
index 000000000000..e88ea441bf9e
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/memprof/memprof_descriptions.h
@@ -0,0 +1,45 @@
+//===-- memprof_descriptions.h ---------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of MemProfiler, a memory profiler.
+//
+// MemProf-private header for memprof_descriptions.cpp.
+//===----------------------------------------------------------------------===//
+#ifndef MEMPROF_DESCRIPTIONS_H
+#define MEMPROF_DESCRIPTIONS_H
+
+#include "memprof_allocator.h"
+#include "memprof_thread.h"
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_report_decorator.h"
+
+namespace __memprof {
+
+void DescribeThread(MemprofThreadContext *context);
+inline void DescribeThread(MemprofThread *t) {
+ if (t)
+ DescribeThread(t->context());
+}
+
+class MemprofThreadIdAndName {
+public:
+ explicit MemprofThreadIdAndName(MemprofThreadContext *t);
+ explicit MemprofThreadIdAndName(u32 tid);
+
+ // Contains "T%tid (%name)" or "T%tid" if the name is empty.
+ const char *c_str() const { return &name[0]; }
+
+private:
+ void Init(u32 tid, const char *tname);
+
+ char name[128];
+};
+
+} // namespace __memprof
+
+#endif // MEMPROF_DESCRIPTIONS_H
diff --git a/contrib/llvm-project/compiler-rt/lib/memprof/memprof_flags.cpp b/contrib/llvm-project/compiler-rt/lib/memprof/memprof_flags.cpp
new file mode 100644
index 000000000000..b107ff8fa0a7
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/memprof/memprof_flags.cpp
@@ -0,0 +1,93 @@
+//===-- memprof_flags.cpp --------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of MemProfiler, a memory profiler.
+//
+// MemProf flag parsing logic.
+//===----------------------------------------------------------------------===//
+
+#include "memprof_flags.h"
+#include "memprof_interface_internal.h"
+#include "memprof_stack.h"
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_flag_parser.h"
+#include "sanitizer_common/sanitizer_flags.h"
+
+namespace __memprof {
+
+Flags memprof_flags_dont_use_directly; // use via flags().
+
+static const char *MaybeUseMemprofDefaultOptionsCompileDefinition() {
+#ifdef MEMPROF_DEFAULT_OPTIONS
+ return SANITIZER_STRINGIFY(MEMPROF_DEFAULT_OPTIONS);
+#else
+ return "";
+#endif
+}
+
+void Flags::SetDefaults() {
+#define MEMPROF_FLAG(Type, Name, DefaultValue, Description) Name = DefaultValue;
+#include "memprof_flags.inc"
+#undef MEMPROF_FLAG
+}
+
+static void RegisterMemprofFlags(FlagParser *parser, Flags *f) {
+#define MEMPROF_FLAG(Type, Name, DefaultValue, Description) \
+ RegisterFlag(parser, #Name, Description, &f->Name);
+#include "memprof_flags.inc"
+#undef MEMPROF_FLAG
+}
+
+void InitializeFlags() {
+ // Set the default values and prepare for parsing MemProf and common flags.
+ SetCommonFlagsDefaults();
+ {
+ CommonFlags cf;
+ cf.CopyFrom(*common_flags());
+ cf.external_symbolizer_path = GetEnv("MEMPROF_SYMBOLIZER_PATH");
+ cf.malloc_context_size = kDefaultMallocContextSize;
+ cf.intercept_tls_get_addr = true;
+ cf.exitcode = 1;
+ OverrideCommonFlags(cf);
+ }
+ Flags *f = flags();
+ f->SetDefaults();
+
+ FlagParser memprof_parser;
+ RegisterMemprofFlags(&memprof_parser, f);
+ RegisterCommonFlags(&memprof_parser);
+
+ // Override from MemProf compile definition.
+ const char *memprof_compile_def =
+ MaybeUseMemprofDefaultOptionsCompileDefinition();
+ memprof_parser.ParseString(memprof_compile_def);
+
+ // Override from user-specified string.
+ const char *memprof_default_options = __memprof_default_options();
+ memprof_parser.ParseString(memprof_default_options);
+
+ // Override from command line.
+ memprof_parser.ParseStringFromEnv("MEMPROF_OPTIONS");
+
+ InitializeCommonFlags();
+
+ if (Verbosity())
+ ReportUnrecognizedFlags();
+
+ if (common_flags()->help) {
+ memprof_parser.PrintFlagDescriptions();
+ }
+
+ CHECK_LE((uptr)common_flags()->malloc_context_size, kStackTraceMax);
+}
+
+} // namespace __memprof
+
+SANITIZER_INTERFACE_WEAK_DEF(const char *, __memprof_default_options, void) {
+ return "";
+}
diff --git a/contrib/llvm-project/compiler-rt/lib/memprof/memprof_flags.h b/contrib/llvm-project/compiler-rt/lib/memprof/memprof_flags.h
new file mode 100644
index 000000000000..2f2b628653dc
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/memprof/memprof_flags.h
@@ -0,0 +1,45 @@
+//===-- memprof_flags.h ---------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of MemProfiler, a memory profiler.
+//
+// MemProf runtime flags.
+//===----------------------------------------------------------------------===//
+
+#ifndef MEMPROF_FLAGS_H
+#define MEMPROF_FLAGS_H
+
+#include "sanitizer_common/sanitizer_flag_parser.h"
+#include "sanitizer_common/sanitizer_internal_defs.h"
+
+// MemProf flag values can be defined in four ways:
+// 1) initialized with default values at startup.
+// 2) overriden during compilation of MemProf runtime by providing
+// compile definition MEMPROF_DEFAULT_OPTIONS.
+// 3) overriden from string returned by user-specified function
+// __memprof_default_options().
+// 4) overriden from env variable MEMPROF_OPTIONS.
+
+namespace __memprof {
+
+struct Flags {
+#define MEMPROF_FLAG(Type, Name, DefaultValue, Description) Type Name;
+#include "memprof_flags.inc"
+#undef MEMPROF_FLAG
+
+ void SetDefaults();
+};
+
+extern Flags memprof_flags_dont_use_directly;
+inline Flags *flags() { return &memprof_flags_dont_use_directly; }
+
+void InitializeFlags();
+
+} // namespace __memprof
+
+#endif // MEMPROF_FLAGS_H
diff --git a/contrib/llvm-project/compiler-rt/lib/memprof/memprof_flags.inc b/contrib/llvm-project/compiler-rt/lib/memprof/memprof_flags.inc
new file mode 100644
index 000000000000..035fd15b9288
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/memprof/memprof_flags.inc
@@ -0,0 +1,49 @@
+//===-- memprof_flags.inc --------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// MemProf runtime flags.
+//
+//===----------------------------------------------------------------------===//
+#ifndef MEMPROF_FLAG
+#error "Define MEMPROF_FLAG prior to including this file!"
+#endif
+
+// MEMPROF_FLAG(Type, Name, DefaultValue, Description)
+// See COMMON_FLAG in sanitizer_flags.inc for more details.
+
+MEMPROF_FLAG(bool, unmap_shadow_on_exit, false,
+ "If set, explicitly unmaps the (huge) shadow at exit.")
+MEMPROF_FLAG(bool, protect_shadow_gap, true, "If set, mprotect the shadow gap")
+MEMPROF_FLAG(bool, print_legend, true, "Print the legend for the shadow bytes.")
+MEMPROF_FLAG(bool, atexit, false,
+ "If set, prints MemProf exit stats even after program terminates "
+ "successfully.")
+MEMPROF_FLAG(
+ bool, print_full_thread_history, true,
+ "If set, prints thread creation stacks for the threads involved in the "
+ "report and their ancestors up to the main thread.")
+
+MEMPROF_FLAG(bool, halt_on_error, true,
+ "Crash the program after printing the first error report "
+ "(WARNING: USE AT YOUR OWN RISK!)")
+MEMPROF_FLAG(bool, allocator_frees_and_returns_null_on_realloc_zero, true,
+ "realloc(p, 0) is equivalent to free(p) by default (Same as the "
+ "POSIX standard). If set to false, realloc(p, 0) will return a "
+ "pointer to an allocated space which can not be used.")
+MEMPROF_FLAG(bool, print_terse, false,
+ "If set, prints memory profile in a terse format.")
+
+MEMPROF_FLAG(
+ int, mem_info_cache_entries, 16381,
+ "Size in entries of the mem info block cache, should be closest prime"
+ " number to a power of two for best hashing.")
+MEMPROF_FLAG(bool, print_mem_info_cache_miss_rate, false,
+ "If set, prints the miss rate of the mem info block cache.")
+MEMPROF_FLAG(
+ bool, print_mem_info_cache_miss_rate_details, false,
+ "If set, prints detailed miss rates of the mem info block cache sets.")
diff --git a/contrib/llvm-project/compiler-rt/lib/memprof/memprof_init_version.h b/contrib/llvm-project/compiler-rt/lib/memprof/memprof_init_version.h
new file mode 100644
index 000000000000..26c68f78677a
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/memprof/memprof_init_version.h
@@ -0,0 +1,26 @@
+//===-- memprof_init_version.h ---------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of MemProfiler, a memory profiler.
+//
+// This header defines a versioned __memprof_init function to be called at the
+// startup of the instrumented program.
+//===----------------------------------------------------------------------===//
+#ifndef MEMPROF_INIT_VERSION_H
+#define MEMPROF_INIT_VERSION_H
+
+#include "sanitizer_common/sanitizer_platform.h"
+
+extern "C" {
+// Every time the Memprof ABI changes we also change the version number in the
+// __memprof_init function name. Objects built with incompatible Memprof ABI
+// versions will not link with run-time.
+#define __memprof_version_mismatch_check __memprof_version_mismatch_check_v1
+}
+
+#endif // MEMPROF_INIT_VERSION_H
diff --git a/contrib/llvm-project/compiler-rt/lib/memprof/memprof_interceptors.cpp b/contrib/llvm-project/compiler-rt/lib/memprof/memprof_interceptors.cpp
new file mode 100644
index 000000000000..caa629b9c474
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/memprof/memprof_interceptors.cpp
@@ -0,0 +1,366 @@
+//===-- memprof_interceptors.cpp -----------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of MemProfiler, a memory profiler.
+//
+// Intercept various libc functions.
+//===----------------------------------------------------------------------===//
+
+#include "memprof_interceptors.h"
+#include "memprof_allocator.h"
+#include "memprof_internal.h"
+#include "memprof_mapping.h"
+#include "memprof_stack.h"
+#include "memprof_stats.h"
+#include "sanitizer_common/sanitizer_libc.h"
+#include "sanitizer_common/sanitizer_posix.h"
+
+namespace __memprof {
+
+#define MEMPROF_READ_STRING(s, n) MEMPROF_READ_RANGE((s), (n))
+
+static inline uptr MaybeRealStrnlen(const char *s, uptr maxlen) {
+#if SANITIZER_INTERCEPT_STRNLEN
+ if (REAL(strnlen)) {
+ return REAL(strnlen)(s, maxlen);
+ }
+#endif
+ return internal_strnlen(s, maxlen);
+}
+
+void SetThreadName(const char *name) {
+ MemprofThread *t = GetCurrentThread();
+ if (t)
+ memprofThreadRegistry().SetThreadName(t->tid(), name);
+}
+
+int OnExit() {
+ // FIXME: ask frontend whether we need to return failure.
+ return 0;
+}
+
+} // namespace __memprof
+
+// ---------------------- Wrappers ---------------- {{{1
+using namespace __memprof;
+
+DECLARE_REAL_AND_INTERCEPTOR(void *, malloc, uptr)
+DECLARE_REAL_AND_INTERCEPTOR(void, free, void *)
+
+#define MEMPROF_INTERCEPTOR_ENTER(ctx, func) \
+ ctx = 0; \
+ (void)ctx;
+
+#define COMMON_INTERCEPT_FUNCTION(name) MEMPROF_INTERCEPT_FUNC(name)
+#define COMMON_INTERCEPT_FUNCTION_VER(name, ver) \
+ MEMPROF_INTERCEPT_FUNC_VER(name, ver)
+#define COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ptr, size) \
+ MEMPROF_WRITE_RANGE(ptr, size)
+#define COMMON_INTERCEPTOR_READ_RANGE(ctx, ptr, size) \
+ MEMPROF_READ_RANGE(ptr, size)
+#define COMMON_INTERCEPTOR_ENTER(ctx, func, ...) \
+ MEMPROF_INTERCEPTOR_ENTER(ctx, func); \
+ do { \
+ if (memprof_init_is_running) \
+ return REAL(func)(__VA_ARGS__); \
+ ENSURE_MEMPROF_INITED(); \
+ } while (false)
+#define COMMON_INTERCEPTOR_DIR_ACQUIRE(ctx, path) \
+ do { \
+ } while (false)
+#define COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd) \
+ do { \
+ } while (false)
+#define COMMON_INTERCEPTOR_FD_RELEASE(ctx, fd) \
+ do { \
+ } while (false)
+#define COMMON_INTERCEPTOR_FD_SOCKET_ACCEPT(ctx, fd, newfd) \
+ do { \
+ } while (false)
+#define COMMON_INTERCEPTOR_SET_THREAD_NAME(ctx, name) SetThreadName(name)
+// Should be memprofThreadRegistry().SetThreadNameByUserId(thread, name)
+// But memprof does not remember UserId's for threads (pthread_t);
+// and remembers all ever existed threads, so the linear search by UserId
+// can be slow.
+#define COMMON_INTERCEPTOR_SET_PTHREAD_NAME(ctx, thread, name) \
+ do { \
+ } while (false)
+#define COMMON_INTERCEPTOR_BLOCK_REAL(name) REAL(name)
+#define COMMON_INTERCEPTOR_ON_DLOPEN(filename, flag) \
+ do { \
+ CheckNoDeepBind(filename, flag); \
+ } while (false)
+#define COMMON_INTERCEPTOR_ON_EXIT(ctx) OnExit()
+#define COMMON_INTERCEPTOR_LIBRARY_LOADED(filename, handle)
+#define COMMON_INTERCEPTOR_LIBRARY_UNLOADED()
+#define COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED (!memprof_inited)
+#define COMMON_INTERCEPTOR_GET_TLS_RANGE(begin, end) \
+ if (MemprofThread *t = GetCurrentThread()) { \
+ *begin = t->tls_begin(); \
+ *end = t->tls_end(); \
+ } else { \
+ *begin = *end = 0; \
+ }
+
+#define COMMON_INTERCEPTOR_MEMMOVE_IMPL(ctx, to, from, size) \
+ do { \
+ MEMPROF_INTERCEPTOR_ENTER(ctx, memmove); \
+ MEMPROF_MEMMOVE_IMPL(to, from, size); \
+ } while (false)
+
+#define COMMON_INTERCEPTOR_MEMCPY_IMPL(ctx, to, from, size) \
+ do { \
+ MEMPROF_INTERCEPTOR_ENTER(ctx, memcpy); \
+ MEMPROF_MEMCPY_IMPL(to, from, size); \
+ } while (false)
+
+#define COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, block, c, size) \
+ do { \
+ MEMPROF_INTERCEPTOR_ENTER(ctx, memset); \
+ MEMPROF_MEMSET_IMPL(block, c, size); \
+ } while (false)
+
+#include "sanitizer_common/sanitizer_common_interceptors.inc"
+
+#define COMMON_SYSCALL_PRE_READ_RANGE(p, s) MEMPROF_READ_RANGE(p, s)
+#define COMMON_SYSCALL_PRE_WRITE_RANGE(p, s) MEMPROF_WRITE_RANGE(p, s)
+#define COMMON_SYSCALL_POST_READ_RANGE(p, s) \
+ do { \
+ (void)(p); \
+ (void)(s); \
+ } while (false)
+#define COMMON_SYSCALL_POST_WRITE_RANGE(p, s) \
+ do { \
+ (void)(p); \
+ (void)(s); \
+ } while (false)
+#include "sanitizer_common/sanitizer_common_syscalls.inc"
+
+struct ThreadStartParam {
+ atomic_uintptr_t t;
+ atomic_uintptr_t is_registered;
+};
+
+static thread_return_t THREAD_CALLING_CONV memprof_thread_start(void *arg) {
+ ThreadStartParam *param = reinterpret_cast<ThreadStartParam *>(arg);
+ MemprofThread *t = nullptr;
+ while ((t = reinterpret_cast<MemprofThread *>(
+ atomic_load(&param->t, memory_order_acquire))) == nullptr)
+ internal_sched_yield();
+ SetCurrentThread(t);
+ return t->ThreadStart(GetTid(), &param->is_registered);
+}
+
+INTERCEPTOR(int, pthread_create, void *thread, void *attr,
+ void *(*start_routine)(void *), void *arg) {
+ EnsureMainThreadIDIsCorrect();
+ GET_STACK_TRACE_THREAD;
+ int detached = 0;
+ if (attr)
+ REAL(pthread_attr_getdetachstate)(attr, &detached);
+ ThreadStartParam param;
+ atomic_store(&param.t, 0, memory_order_relaxed);
+ atomic_store(&param.is_registered, 0, memory_order_relaxed);
+ int result;
+ {
+ // Ignore all allocations made by pthread_create: thread stack/TLS may be
+ // stored by pthread for future reuse even after thread destruction, and
+ // the linked list it's stored in doesn't even hold valid pointers to the
+ // objects, the latter are calculated by obscure pointer arithmetic.
+ result = REAL(pthread_create)(thread, attr, memprof_thread_start, &param);
+ }
+ if (result == 0) {
+ u32 current_tid = GetCurrentTidOrInvalid();
+ MemprofThread *t = MemprofThread::Create(start_routine, arg, current_tid,
+ &stack, detached);
+ atomic_store(&param.t, reinterpret_cast<uptr>(t), memory_order_release);
+ // Wait until the MemprofThread object is initialized and the
+ // ThreadRegistry entry is in "started" state.
+ while (atomic_load(&param.is_registered, memory_order_acquire) == 0)
+ internal_sched_yield();
+ }
+ return result;
+}
+
+INTERCEPTOR(int, pthread_join, void *t, void **arg) {
+ return real_pthread_join(t, arg);
+}
+
+DEFINE_REAL_PTHREAD_FUNCTIONS
+
+INTERCEPTOR(char *, index, const char *string, int c)
+ALIAS(WRAPPER_NAME(strchr));
+
+// For both strcat() and strncat() we need to check the validity of |to|
+// argument irrespective of the |from| length.
+INTERCEPTOR(char *, strcat, char *to, const char *from) {
+ void *ctx;
+ MEMPROF_INTERCEPTOR_ENTER(ctx, strcat);
+ ENSURE_MEMPROF_INITED();
+ uptr from_length = REAL(strlen)(from);
+ MEMPROF_READ_RANGE(from, from_length + 1);
+ uptr to_length = REAL(strlen)(to);
+ MEMPROF_READ_STRING(to, to_length);
+ MEMPROF_WRITE_RANGE(to + to_length, from_length + 1);
+ return REAL(strcat)(to, from);
+}
+
+INTERCEPTOR(char *, strncat, char *to, const char *from, uptr size) {
+ void *ctx;
+ MEMPROF_INTERCEPTOR_ENTER(ctx, strncat);
+ ENSURE_MEMPROF_INITED();
+ uptr from_length = MaybeRealStrnlen(from, size);
+ uptr copy_length = Min(size, from_length + 1);
+ MEMPROF_READ_RANGE(from, copy_length);
+ uptr to_length = REAL(strlen)(to);
+ MEMPROF_READ_STRING(to, to_length);
+ MEMPROF_WRITE_RANGE(to + to_length, from_length + 1);
+ return REAL(strncat)(to, from, size);
+}
+
+INTERCEPTOR(char *, strcpy, char *to, const char *from) {
+ void *ctx;
+ MEMPROF_INTERCEPTOR_ENTER(ctx, strcpy);
+ if (memprof_init_is_running) {
+ return REAL(strcpy)(to, from);
+ }
+ ENSURE_MEMPROF_INITED();
+ uptr from_size = REAL(strlen)(from) + 1;
+ MEMPROF_READ_RANGE(from, from_size);
+ MEMPROF_WRITE_RANGE(to, from_size);
+ return REAL(strcpy)(to, from);
+}
+
+INTERCEPTOR(char *, strdup, const char *s) {
+ void *ctx;
+ MEMPROF_INTERCEPTOR_ENTER(ctx, strdup);
+ if (UNLIKELY(!memprof_inited))
+ return internal_strdup(s);
+ ENSURE_MEMPROF_INITED();
+ uptr length = REAL(strlen)(s);
+ MEMPROF_READ_RANGE(s, length + 1);
+ GET_STACK_TRACE_MALLOC;
+ void *new_mem = memprof_malloc(length + 1, &stack);
+ REAL(memcpy)(new_mem, s, length + 1);
+ return reinterpret_cast<char *>(new_mem);
+}
+
+INTERCEPTOR(char *, __strdup, const char *s) {
+ void *ctx;
+ MEMPROF_INTERCEPTOR_ENTER(ctx, strdup);
+ if (UNLIKELY(!memprof_inited))
+ return internal_strdup(s);
+ ENSURE_MEMPROF_INITED();
+ uptr length = REAL(strlen)(s);
+ MEMPROF_READ_RANGE(s, length + 1);
+ GET_STACK_TRACE_MALLOC;
+ void *new_mem = memprof_malloc(length + 1, &stack);
+ REAL(memcpy)(new_mem, s, length + 1);
+ return reinterpret_cast<char *>(new_mem);
+}
+
+INTERCEPTOR(char *, strncpy, char *to, const char *from, uptr size) {
+ void *ctx;
+ MEMPROF_INTERCEPTOR_ENTER(ctx, strncpy);
+ ENSURE_MEMPROF_INITED();
+ uptr from_size = Min(size, MaybeRealStrnlen(from, size) + 1);
+ MEMPROF_READ_RANGE(from, from_size);
+ MEMPROF_WRITE_RANGE(to, size);
+ return REAL(strncpy)(to, from, size);
+}
+
+INTERCEPTOR(long, strtol, const char *nptr, char **endptr, int base) {
+ void *ctx;
+ MEMPROF_INTERCEPTOR_ENTER(ctx, strtol);
+ ENSURE_MEMPROF_INITED();
+ char *real_endptr;
+ long result = REAL(strtol)(nptr, &real_endptr, base);
+ StrtolFixAndCheck(ctx, nptr, endptr, real_endptr, base);
+ return result;
+}
+
+INTERCEPTOR(int, atoi, const char *nptr) {
+ void *ctx;
+ MEMPROF_INTERCEPTOR_ENTER(ctx, atoi);
+ ENSURE_MEMPROF_INITED();
+ char *real_endptr;
+ // "man atoi" tells that behavior of atoi(nptr) is the same as
+ // strtol(nptr, 0, 10), i.e. it sets errno to ERANGE if the
+ // parsed integer can't be stored in *long* type (even if it's
+ // different from int). So, we just imitate this behavior.
+ int result = REAL(strtol)(nptr, &real_endptr, 10);
+ FixRealStrtolEndptr(nptr, &real_endptr);
+ MEMPROF_READ_STRING(nptr, (real_endptr - nptr) + 1);
+ return result;
+}
+
+INTERCEPTOR(long, atol, const char *nptr) {
+ void *ctx;
+ MEMPROF_INTERCEPTOR_ENTER(ctx, atol);
+ ENSURE_MEMPROF_INITED();
+ char *real_endptr;
+ long result = REAL(strtol)(nptr, &real_endptr, 10);
+ FixRealStrtolEndptr(nptr, &real_endptr);
+ MEMPROF_READ_STRING(nptr, (real_endptr - nptr) + 1);
+ return result;
+}
+
+INTERCEPTOR(long long, strtoll, const char *nptr, char **endptr, int base) {
+ void *ctx;
+ MEMPROF_INTERCEPTOR_ENTER(ctx, strtoll);
+ ENSURE_MEMPROF_INITED();
+ char *real_endptr;
+ long long result = REAL(strtoll)(nptr, &real_endptr, base);
+ StrtolFixAndCheck(ctx, nptr, endptr, real_endptr, base);
+ return result;
+}
+
+INTERCEPTOR(long long, atoll, const char *nptr) {
+ void *ctx;
+ MEMPROF_INTERCEPTOR_ENTER(ctx, atoll);
+ ENSURE_MEMPROF_INITED();
+ char *real_endptr;
+ long long result = REAL(strtoll)(nptr, &real_endptr, 10);
+ FixRealStrtolEndptr(nptr, &real_endptr);
+ MEMPROF_READ_STRING(nptr, (real_endptr - nptr) + 1);
+ return result;
+}
+
+// ---------------------- InitializeMemprofInterceptors ---------------- {{{1
+namespace __memprof {
+void InitializeMemprofInterceptors() {
+ static bool was_called_once;
+ CHECK(!was_called_once);
+ was_called_once = true;
+ InitializeCommonInterceptors();
+
+ // Intercept str* functions.
+ MEMPROF_INTERCEPT_FUNC(strcat);
+ MEMPROF_INTERCEPT_FUNC(strcpy);
+ MEMPROF_INTERCEPT_FUNC(strncat);
+ MEMPROF_INTERCEPT_FUNC(strncpy);
+ MEMPROF_INTERCEPT_FUNC(strdup);
+ MEMPROF_INTERCEPT_FUNC(__strdup);
+ MEMPROF_INTERCEPT_FUNC(index);
+
+ MEMPROF_INTERCEPT_FUNC(atoi);
+ MEMPROF_INTERCEPT_FUNC(atol);
+ MEMPROF_INTERCEPT_FUNC(strtol);
+ MEMPROF_INTERCEPT_FUNC(atoll);
+ MEMPROF_INTERCEPT_FUNC(strtoll);
+
+ // Intercept threading-related functions
+ MEMPROF_INTERCEPT_FUNC(pthread_create);
+ MEMPROF_INTERCEPT_FUNC(pthread_join);
+
+ InitializePlatformInterceptors();
+
+ VReport(1, "MemProfiler: libc interceptors initialized\n");
+}
+
+} // namespace __memprof
diff --git a/contrib/llvm-project/compiler-rt/lib/memprof/memprof_interceptors.h b/contrib/llvm-project/compiler-rt/lib/memprof/memprof_interceptors.h
new file mode 100644
index 000000000000..b6a4fa411254
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/memprof/memprof_interceptors.h
@@ -0,0 +1,54 @@
+//===-- memprof_interceptors.h ---------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of MemProfiler, a memory profiler.
+//
+// MemProf-private header for memprof_interceptors.cpp
+//===----------------------------------------------------------------------===//
+#ifndef MEMPROF_INTERCEPTORS_H
+#define MEMPROF_INTERCEPTORS_H
+
+#include "interception/interception.h"
+#include "memprof_interceptors_memintrinsics.h"
+#include "memprof_internal.h"
+#include "sanitizer_common/sanitizer_platform_interceptors.h"
+
+namespace __memprof {
+
+void InitializeMemprofInterceptors();
+void InitializePlatformInterceptors();
+
+#define ENSURE_MEMPROF_INITED() \
+ do { \
+ CHECK(!memprof_init_is_running); \
+ if (UNLIKELY(!memprof_inited)) { \
+ MemprofInitFromRtl(); \
+ } \
+ } while (0)
+
+} // namespace __memprof
+
+DECLARE_REAL(int, memcmp, const void *a1, const void *a2, uptr size)
+DECLARE_REAL(char *, strchr, const char *str, int c)
+DECLARE_REAL(SIZE_T, strlen, const char *s)
+DECLARE_REAL(char *, strncpy, char *to, const char *from, uptr size)
+DECLARE_REAL(uptr, strnlen, const char *s, uptr maxlen)
+DECLARE_REAL(char *, strstr, const char *s1, const char *s2)
+
+#define MEMPROF_INTERCEPT_FUNC(name) \
+ do { \
+ if (!INTERCEPT_FUNCTION(name)) \
+ VReport(1, "MemProfiler: failed to intercept '%s'\n'", #name); \
+ } while (0)
+#define MEMPROF_INTERCEPT_FUNC_VER(name, ver) \
+ do { \
+ if (!INTERCEPT_FUNCTION_VER(name, ver)) \
+ VReport(1, "MemProfiler: failed to intercept '%s@@%s'\n", #name, #ver); \
+ } while (0)
+
+#endif // MEMPROF_INTERCEPTORS_H
diff --git a/contrib/llvm-project/compiler-rt/lib/memprof/memprof_interceptors_memintrinsics.cpp b/contrib/llvm-project/compiler-rt/lib/memprof/memprof_interceptors_memintrinsics.cpp
new file mode 100644
index 000000000000..4eb409362b57
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/memprof/memprof_interceptors_memintrinsics.cpp
@@ -0,0 +1,29 @@
+//===-- memprof_interceptors_memintrinsics.cpp ---------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===---------------------------------------------------------------------===//
+//
+// This file is a part of MemProfiler, a memory profiler.
+//
+// MemProf versions of memcpy, memmove, and memset.
+//===---------------------------------------------------------------------===//
+
+#include "memprof_interceptors_memintrinsics.h"
+#include "memprof_stack.h"
+
+using namespace __memprof;
+
+void *__memprof_memcpy(void *to, const void *from, uptr size) {
+ MEMPROF_MEMCPY_IMPL(to, from, size);
+}
+
+void *__memprof_memset(void *block, int c, uptr size) {
+ MEMPROF_MEMSET_IMPL(block, c, size);
+}
+
+void *__memprof_memmove(void *to, const void *from, uptr size) {
+ MEMPROF_MEMMOVE_IMPL(to, from, size);
+}
diff --git a/contrib/llvm-project/compiler-rt/lib/memprof/memprof_interceptors_memintrinsics.h b/contrib/llvm-project/compiler-rt/lib/memprof/memprof_interceptors_memintrinsics.h
new file mode 100644
index 000000000000..348461d55c41
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/memprof/memprof_interceptors_memintrinsics.h
@@ -0,0 +1,79 @@
+//===-- memprof_interceptors_memintrinsics.h -------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===---------------------------------------------------------------------===//
+//
+// This file is a part of MemProfiler, a memory profiler.
+//
+// MemProf-private header for memprof_interceptors_memintrinsics.cpp
+//===---------------------------------------------------------------------===//
+#ifndef MEMPROF_MEMINTRIN_H
+#define MEMPROF_MEMINTRIN_H
+
+#include "interception/interception.h"
+#include "memprof_interface_internal.h"
+#include "memprof_internal.h"
+#include "memprof_mapping.h"
+
+DECLARE_REAL(void *, memcpy, void *to, const void *from, uptr size)
+DECLARE_REAL(void *, memset, void *block, int c, uptr size)
+
+namespace __memprof {
+
+// We implement ACCESS_MEMORY_RANGE, MEMPROF_READ_RANGE,
+// and MEMPROF_WRITE_RANGE as macro instead of function so
+// that no extra frames are created, and stack trace contains
+// relevant information only.
+#define ACCESS_MEMORY_RANGE(offset, size) \
+ do { \
+ __memprof_record_access_range(offset, size); \
+ } while (0)
+
+// memcpy is called during __memprof_init() from the internals of printf(...).
+// We do not treat memcpy with to==from as a bug.
+// See http://llvm.org/bugs/show_bug.cgi?id=11763.
+#define MEMPROF_MEMCPY_IMPL(to, from, size) \
+ do { \
+ if (UNLIKELY(!memprof_inited)) \
+ return internal_memcpy(to, from, size); \
+ if (memprof_init_is_running) { \
+ return REAL(memcpy)(to, from, size); \
+ } \
+ ENSURE_MEMPROF_INITED(); \
+ MEMPROF_READ_RANGE(from, size); \
+ MEMPROF_WRITE_RANGE(to, size); \
+ return REAL(memcpy)(to, from, size); \
+ } while (0)
+
+// memset is called inside Printf.
+#define MEMPROF_MEMSET_IMPL(block, c, size) \
+ do { \
+ if (UNLIKELY(!memprof_inited)) \
+ return internal_memset(block, c, size); \
+ if (memprof_init_is_running) { \
+ return REAL(memset)(block, c, size); \
+ } \
+ ENSURE_MEMPROF_INITED(); \
+ MEMPROF_WRITE_RANGE(block, size); \
+ return REAL(memset)(block, c, size); \
+ } while (0)
+
+#define MEMPROF_MEMMOVE_IMPL(to, from, size) \
+ do { \
+ if (UNLIKELY(!memprof_inited)) \
+ return internal_memmove(to, from, size); \
+ ENSURE_MEMPROF_INITED(); \
+ MEMPROF_READ_RANGE(from, size); \
+ MEMPROF_WRITE_RANGE(to, size); \
+ return internal_memmove(to, from, size); \
+ } while (0)
+
+#define MEMPROF_READ_RANGE(offset, size) ACCESS_MEMORY_RANGE(offset, size)
+#define MEMPROF_WRITE_RANGE(offset, size) ACCESS_MEMORY_RANGE(offset, size)
+
+} // namespace __memprof
+
+#endif // MEMPROF_MEMINTRIN_H
diff --git a/contrib/llvm-project/compiler-rt/lib/memprof/memprof_interface_internal.h b/contrib/llvm-project/compiler-rt/lib/memprof/memprof_interface_internal.h
new file mode 100644
index 000000000000..0aca4afc9afa
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/memprof/memprof_interface_internal.h
@@ -0,0 +1,64 @@
+//===-- memprof_interface_internal.h ---------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of MemProfiler, a memory profiler.
+//
+// This header declares the MemProfiler runtime interface functions.
+// The runtime library has to define these functions so the instrumented program
+// could call them.
+//
+// See also include/sanitizer/memprof_interface.h
+//===----------------------------------------------------------------------===//
+#ifndef MEMPROF_INTERFACE_INTERNAL_H
+#define MEMPROF_INTERFACE_INTERNAL_H
+
+#include "sanitizer_common/sanitizer_internal_defs.h"
+
+#include "memprof_init_version.h"
+
+using __sanitizer::u32;
+using __sanitizer::u64;
+using __sanitizer::uptr;
+
+extern "C" {
+// This function should be called at the very beginning of the process,
+// before any instrumented code is executed and before any call to malloc.
+SANITIZER_INTERFACE_ATTRIBUTE void __memprof_init();
+SANITIZER_INTERFACE_ATTRIBUTE void __memprof_preinit();
+SANITIZER_INTERFACE_ATTRIBUTE void __memprof_version_mismatch_check_v1();
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __memprof_record_access(void const volatile *addr);
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __memprof_record_access_range(void const volatile *addr, uptr size);
+
+SANITIZER_INTERFACE_ATTRIBUTE void __memprof_print_accumulated_stats();
+
+SANITIZER_INTERFACE_ATTRIBUTE
+const char *__memprof_default_options();
+
+SANITIZER_INTERFACE_ATTRIBUTE
+extern uptr __memprof_shadow_memory_dynamic_address;
+
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE extern char
+ __memprof_profile_filename[1];
+SANITIZER_INTERFACE_ATTRIBUTE int __memprof_profile_dump();
+
+SANITIZER_INTERFACE_ATTRIBUTE void __memprof_load(uptr p);
+SANITIZER_INTERFACE_ATTRIBUTE void __memprof_store(uptr p);
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void *__memprof_memcpy(void *dst, const void *src, uptr size);
+SANITIZER_INTERFACE_ATTRIBUTE
+void *__memprof_memset(void *s, int c, uptr n);
+SANITIZER_INTERFACE_ATTRIBUTE
+void *__memprof_memmove(void *dest, const void *src, uptr n);
+} // extern "C"
+
+#endif // MEMPROF_INTERFACE_INTERNAL_H
diff --git a/contrib/llvm-project/compiler-rt/lib/memprof/memprof_internal.h b/contrib/llvm-project/compiler-rt/lib/memprof/memprof_internal.h
new file mode 100644
index 000000000000..8d227887fe15
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/memprof/memprof_internal.h
@@ -0,0 +1,104 @@
+//===-- memprof_internal.h -------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of MemProfiler, a memory profiler.
+//
+// MemProf-private header which defines various general utilities.
+//===----------------------------------------------------------------------===//
+#ifndef MEMPROF_INTERNAL_H
+#define MEMPROF_INTERNAL_H
+
+#include "memprof_flags.h"
+#include "memprof_interface_internal.h"
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_internal_defs.h"
+#include "sanitizer_common/sanitizer_libc.h"
+#include "sanitizer_common/sanitizer_stacktrace.h"
+
+#if __has_feature(address_sanitizer) || defined(__SANITIZE_ADDRESS__)
+#error "The MemProfiler run-time should not be instrumented by MemProfiler"
+#endif
+
+// Build-time configuration options.
+
+// If set, memprof will intercept C++ exception api call(s).
+#ifndef MEMPROF_HAS_EXCEPTIONS
+#define MEMPROF_HAS_EXCEPTIONS 1
+#endif
+
+#ifndef MEMPROF_DYNAMIC
+#ifdef PIC
+#define MEMPROF_DYNAMIC 1
+#else
+#define MEMPROF_DYNAMIC 0
+#endif
+#endif
+
+// All internal functions in memprof reside inside the __memprof namespace
+// to avoid namespace collisions with the user programs.
+// Separate namespace also makes it simpler to distinguish the memprof
+// run-time functions from the instrumented user code in a profile.
+namespace __memprof {
+
+class MemprofThread;
+using __sanitizer::StackTrace;
+
+void MemprofInitFromRtl();
+
+// memprof_rtl.cpp
+void PrintAddressSpaceLayout();
+
+// memprof_shadow_setup.cpp
+void InitializeShadowMemory();
+
+// memprof_malloc_linux.cpp
+void ReplaceSystemMalloc();
+
+// memprof_linux.cpp
+uptr FindDynamicShadowStart();
+void *MemprofDoesNotSupportStaticLinkage();
+
+// memprof_thread.cpp
+MemprofThread *CreateMainThread();
+
+void ReadContextStack(void *context, uptr *stack, uptr *ssize);
+
+// Wrapper for TLS/TSD.
+void TSDInit(void (*destructor)(void *tsd));
+void *TSDGet();
+void TSDSet(void *tsd);
+void PlatformTSDDtor(void *tsd);
+
+void *MemprofDlSymNext(const char *sym);
+
+// Add convenient macro for interface functions that may be represented as
+// weak hooks.
+#define MEMPROF_MALLOC_HOOK(ptr, size) \
+ do { \
+ if (&__sanitizer_malloc_hook) \
+ __sanitizer_malloc_hook(ptr, size); \
+ RunMallocHooks(ptr, size); \
+ } while (false)
+#define MEMPROF_FREE_HOOK(ptr) \
+ do { \
+ if (&__sanitizer_free_hook) \
+ __sanitizer_free_hook(ptr); \
+ RunFreeHooks(ptr); \
+ } while (false)
+
+extern int memprof_inited;
+extern int memprof_timestamp_inited;
+extern int memprof_init_done;
+// Used to avoid infinite recursion in __memprof_init().
+extern bool memprof_init_is_running;
+extern void (*death_callback)(void);
+extern long memprof_init_timestamp_s;
+
+} // namespace __memprof
+
+#endif // MEMPROF_INTERNAL_H
diff --git a/contrib/llvm-project/compiler-rt/lib/memprof/memprof_linux.cpp b/contrib/llvm-project/compiler-rt/lib/memprof/memprof_linux.cpp
new file mode 100644
index 000000000000..61c833bfdf64
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/memprof/memprof_linux.cpp
@@ -0,0 +1,80 @@
+//===-- memprof_linux.cpp ------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of MemProfiler, a memory profiler.
+//
+// Linux-specific details.
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_common/sanitizer_platform.h"
+#if !SANITIZER_LINUX
+#error Unsupported OS
+#endif
+
+#include "memprof_interceptors.h"
+#include "memprof_internal.h"
+#include "memprof_thread.h"
+#include "sanitizer_common/sanitizer_flags.h"
+#include "sanitizer_common/sanitizer_freebsd.h"
+#include "sanitizer_common/sanitizer_libc.h"
+#include "sanitizer_common/sanitizer_procmaps.h"
+
+#include <dlfcn.h>
+#include <fcntl.h>
+#include <limits.h>
+#include <link.h>
+#include <pthread.h>
+#include <stdio.h>
+#include <sys/mman.h>
+#include <sys/resource.h>
+#include <sys/syscall.h>
+#include <sys/time.h>
+#include <sys/types.h>
+#include <sys/ucontext.h>
+#include <unistd.h>
+#include <unwind.h>
+
+extern ElfW(Dyn) _DYNAMIC[];
+
+typedef enum {
+ MEMPROF_RT_VERSION_UNDEFINED = 0,
+ MEMPROF_RT_VERSION_DYNAMIC,
+ MEMPROF_RT_VERSION_STATIC,
+} memprof_rt_version_t;
+
+// FIXME: perhaps also store abi version here?
+extern "C" {
+SANITIZER_INTERFACE_ATTRIBUTE
+memprof_rt_version_t __memprof_rt_version;
+}
+
+namespace __memprof {
+
+void InitializePlatformInterceptors() {}
+void InitializePlatformExceptionHandlers() {}
+
+void *MemprofDoesNotSupportStaticLinkage() {
+ // This will fail to link with -static.
+ return &_DYNAMIC;
+}
+
+uptr FindDynamicShadowStart() {
+ uptr shadow_size_bytes = MemToShadowSize(kHighMemEnd);
+ return MapDynamicShadow(shadow_size_bytes, SHADOW_SCALE,
+ /*min_shadow_base_alignment*/ 0, kHighMemEnd);
+}
+
+void ReadContextStack(void *context, uptr *stack, uptr *ssize) {
+ ucontext_t *ucp = (ucontext_t *)context;
+ *stack = (uptr)ucp->uc_stack.ss_sp;
+ *ssize = ucp->uc_stack.ss_size;
+}
+
+void *MemprofDlSymNext(const char *sym) { return dlsym(RTLD_NEXT, sym); }
+
+} // namespace __memprof
diff --git a/contrib/llvm-project/compiler-rt/lib/memprof/memprof_malloc_linux.cpp b/contrib/llvm-project/compiler-rt/lib/memprof/memprof_malloc_linux.cpp
new file mode 100644
index 000000000000..c7330f4619a1
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/memprof/memprof_malloc_linux.cpp
@@ -0,0 +1,226 @@
+//===-- memprof_malloc_linux.cpp -----------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of MemProfiler, a memory profiler.
+//
+// Linux-specific malloc interception.
+// We simply define functions like malloc, free, realloc, etc.
+// They will replace the corresponding libc functions automagically.
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_common/sanitizer_platform.h"
+#if !SANITIZER_LINUX
+#error Unsupported OS
+#endif
+
+#include "memprof_allocator.h"
+#include "memprof_interceptors.h"
+#include "memprof_internal.h"
+#include "memprof_stack.h"
+#include "sanitizer_common/sanitizer_allocator_checks.h"
+#include "sanitizer_common/sanitizer_errno.h"
+#include "sanitizer_common/sanitizer_tls_get_addr.h"
+
+// ---------------------- Replacement functions ---------------- {{{1
+using namespace __memprof;
+
+static uptr allocated_for_dlsym;
+static uptr last_dlsym_alloc_size_in_words;
+static const uptr kDlsymAllocPoolSize = 1024;
+static uptr alloc_memory_for_dlsym[kDlsymAllocPoolSize];
+
+static inline bool IsInDlsymAllocPool(const void *ptr) {
+ uptr off = (uptr)ptr - (uptr)alloc_memory_for_dlsym;
+ return off < allocated_for_dlsym * sizeof(alloc_memory_for_dlsym[0]);
+}
+
+static void *AllocateFromLocalPool(uptr size_in_bytes) {
+ uptr size_in_words = RoundUpTo(size_in_bytes, kWordSize) / kWordSize;
+ void *mem = (void *)&alloc_memory_for_dlsym[allocated_for_dlsym];
+ last_dlsym_alloc_size_in_words = size_in_words;
+ allocated_for_dlsym += size_in_words;
+ CHECK_LT(allocated_for_dlsym, kDlsymAllocPoolSize);
+ return mem;
+}
+
+static void DeallocateFromLocalPool(const void *ptr) {
+ // Hack: since glibc 2.27 dlsym no longer uses stack-allocated memory to store
+ // error messages and instead uses malloc followed by free. To avoid pool
+ // exhaustion due to long object filenames, handle that special case here.
+ uptr prev_offset = allocated_for_dlsym - last_dlsym_alloc_size_in_words;
+ void *prev_mem = (void *)&alloc_memory_for_dlsym[prev_offset];
+ if (prev_mem == ptr) {
+ REAL(memset)(prev_mem, 0, last_dlsym_alloc_size_in_words * kWordSize);
+ allocated_for_dlsym = prev_offset;
+ last_dlsym_alloc_size_in_words = 0;
+ }
+}
+
+static int PosixMemalignFromLocalPool(void **memptr, uptr alignment,
+ uptr size_in_bytes) {
+ if (UNLIKELY(!CheckPosixMemalignAlignment(alignment)))
+ return errno_EINVAL;
+
+ CHECK(alignment >= kWordSize);
+
+ uptr addr = (uptr)&alloc_memory_for_dlsym[allocated_for_dlsym];
+ uptr aligned_addr = RoundUpTo(addr, alignment);
+ uptr aligned_size = RoundUpTo(size_in_bytes, kWordSize);
+
+ uptr *end_mem = (uptr *)(aligned_addr + aligned_size);
+ uptr allocated = end_mem - alloc_memory_for_dlsym;
+ if (allocated >= kDlsymAllocPoolSize)
+ return errno_ENOMEM;
+
+ allocated_for_dlsym = allocated;
+ *memptr = (void *)aligned_addr;
+ return 0;
+}
+
+static inline bool MaybeInDlsym() { return memprof_init_is_running; }
+
+static inline bool UseLocalPool() { return MaybeInDlsym(); }
+
+static void *ReallocFromLocalPool(void *ptr, uptr size) {
+ const uptr offset = (uptr)ptr - (uptr)alloc_memory_for_dlsym;
+ const uptr copy_size = Min(size, kDlsymAllocPoolSize - offset);
+ void *new_ptr;
+ if (UNLIKELY(UseLocalPool())) {
+ new_ptr = AllocateFromLocalPool(size);
+ } else {
+ ENSURE_MEMPROF_INITED();
+ GET_STACK_TRACE_MALLOC;
+ new_ptr = memprof_malloc(size, &stack);
+ }
+ internal_memcpy(new_ptr, ptr, copy_size);
+ return new_ptr;
+}
+
+INTERCEPTOR(void, free, void *ptr) {
+ GET_STACK_TRACE_FREE;
+ if (UNLIKELY(IsInDlsymAllocPool(ptr))) {
+ DeallocateFromLocalPool(ptr);
+ return;
+ }
+ memprof_free(ptr, &stack, FROM_MALLOC);
+}
+
+#if SANITIZER_INTERCEPT_CFREE
+INTERCEPTOR(void, cfree, void *ptr) {
+ GET_STACK_TRACE_FREE;
+ if (UNLIKELY(IsInDlsymAllocPool(ptr)))
+ return;
+ memprof_free(ptr, &stack, FROM_MALLOC);
+}
+#endif // SANITIZER_INTERCEPT_CFREE
+
+INTERCEPTOR(void *, malloc, uptr size) {
+ if (UNLIKELY(UseLocalPool()))
+ // Hack: dlsym calls malloc before REAL(malloc) is retrieved from dlsym.
+ return AllocateFromLocalPool(size);
+ ENSURE_MEMPROF_INITED();
+ GET_STACK_TRACE_MALLOC;
+ return memprof_malloc(size, &stack);
+}
+
+INTERCEPTOR(void *, calloc, uptr nmemb, uptr size) {
+ if (UNLIKELY(UseLocalPool()))
+ // Hack: dlsym calls calloc before REAL(calloc) is retrieved from dlsym.
+ return AllocateFromLocalPool(nmemb * size);
+ ENSURE_MEMPROF_INITED();
+ GET_STACK_TRACE_MALLOC;
+ return memprof_calloc(nmemb, size, &stack);
+}
+
+INTERCEPTOR(void *, realloc, void *ptr, uptr size) {
+ if (UNLIKELY(IsInDlsymAllocPool(ptr)))
+ return ReallocFromLocalPool(ptr, size);
+ if (UNLIKELY(UseLocalPool()))
+ return AllocateFromLocalPool(size);
+ ENSURE_MEMPROF_INITED();
+ GET_STACK_TRACE_MALLOC;
+ return memprof_realloc(ptr, size, &stack);
+}
+
+#if SANITIZER_INTERCEPT_REALLOCARRAY
+INTERCEPTOR(void *, reallocarray, void *ptr, uptr nmemb, uptr size) {
+ ENSURE_MEMPROF_INITED();
+ GET_STACK_TRACE_MALLOC;
+ return memprof_reallocarray(ptr, nmemb, size, &stack);
+}
+#endif // SANITIZER_INTERCEPT_REALLOCARRAY
+
+#if SANITIZER_INTERCEPT_MEMALIGN
+INTERCEPTOR(void *, memalign, uptr boundary, uptr size) {
+ GET_STACK_TRACE_MALLOC;
+ return memprof_memalign(boundary, size, &stack, FROM_MALLOC);
+}
+
+INTERCEPTOR(void *, __libc_memalign, uptr boundary, uptr size) {
+ GET_STACK_TRACE_MALLOC;
+ void *res = memprof_memalign(boundary, size, &stack, FROM_MALLOC);
+ DTLS_on_libc_memalign(res, size);
+ return res;
+}
+#endif // SANITIZER_INTERCEPT_MEMALIGN
+
+#if SANITIZER_INTERCEPT_ALIGNED_ALLOC
+INTERCEPTOR(void *, aligned_alloc, uptr boundary, uptr size) {
+ GET_STACK_TRACE_MALLOC;
+ return memprof_aligned_alloc(boundary, size, &stack);
+}
+#endif // SANITIZER_INTERCEPT_ALIGNED_ALLOC
+
+INTERCEPTOR(uptr, malloc_usable_size, void *ptr) {
+ GET_CURRENT_PC_BP_SP;
+ (void)sp;
+ return memprof_malloc_usable_size(ptr, pc, bp);
+}
+
+#if SANITIZER_INTERCEPT_MALLOPT_AND_MALLINFO
+// We avoid including malloc.h for portability reasons.
+// man mallinfo says the fields are "long", but the implementation uses int.
+// It doesn't matter much -- we just need to make sure that the libc's mallinfo
+// is not called.
+struct fake_mallinfo {
+ int x[10];
+};
+
+INTERCEPTOR(struct fake_mallinfo, mallinfo, void) {
+ struct fake_mallinfo res;
+ REAL(memset)(&res, 0, sizeof(res));
+ return res;
+}
+
+INTERCEPTOR(int, mallopt, int cmd, int value) { return 0; }
+#endif // SANITIZER_INTERCEPT_MALLOPT_AND_MALLINFO
+
+INTERCEPTOR(int, posix_memalign, void **memptr, uptr alignment, uptr size) {
+ if (UNLIKELY(UseLocalPool()))
+ return PosixMemalignFromLocalPool(memptr, alignment, size);
+ GET_STACK_TRACE_MALLOC;
+ return memprof_posix_memalign(memptr, alignment, size, &stack);
+}
+
+INTERCEPTOR(void *, valloc, uptr size) {
+ GET_STACK_TRACE_MALLOC;
+ return memprof_valloc(size, &stack);
+}
+
+#if SANITIZER_INTERCEPT_PVALLOC
+INTERCEPTOR(void *, pvalloc, uptr size) {
+ GET_STACK_TRACE_MALLOC;
+ return memprof_pvalloc(size, &stack);
+}
+#endif // SANITIZER_INTERCEPT_PVALLOC
+
+INTERCEPTOR(void, malloc_stats, void) { __memprof_print_accumulated_stats(); }
+
+namespace __memprof {
+void ReplaceSystemMalloc() {}
+} // namespace __memprof
diff --git a/contrib/llvm-project/compiler-rt/lib/memprof/memprof_mapping.h b/contrib/llvm-project/compiler-rt/lib/memprof/memprof_mapping.h
new file mode 100644
index 000000000000..ba05b88db307
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/memprof/memprof_mapping.h
@@ -0,0 +1,113 @@
+//===-- memprof_mapping.h --------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of MemProfiler, a memory profiler.
+//
+// Defines MemProf memory mapping.
+//===----------------------------------------------------------------------===//
+#ifndef MEMPROF_MAPPING_H
+#define MEMPROF_MAPPING_H
+
+#include "memprof_internal.h"
+
+static const u64 kDefaultShadowScale = 3;
+#define SHADOW_SCALE kDefaultShadowScale
+
+#define SHADOW_OFFSET __memprof_shadow_memory_dynamic_address
+
+#define SHADOW_GRANULARITY (1ULL << SHADOW_SCALE)
+#define MEMPROF_ALIGNMENT 32
+
+namespace __memprof {
+
+extern uptr kHighMemEnd; // Initialized in __memprof_init.
+
+} // namespace __memprof
+
+#define SHADOW_ENTRY_SIZE 8
+
+// Size of memory block mapped to a single shadow location
+#define MEM_GRANULARITY 64ULL
+
+#define SHADOW_MASK ~(MEM_GRANULARITY - 1)
+
+#define MEM_TO_SHADOW(mem) \
+ ((((mem) & SHADOW_MASK) >> SHADOW_SCALE) + (SHADOW_OFFSET))
+
+#define kLowMemBeg 0
+#define kLowMemEnd (SHADOW_OFFSET ? SHADOW_OFFSET - 1 : 0)
+
+#define kLowShadowBeg SHADOW_OFFSET
+#define kLowShadowEnd (MEM_TO_SHADOW(kLowMemEnd) + SHADOW_ENTRY_SIZE - 1)
+
+#define kHighMemBeg (MEM_TO_SHADOW(kHighMemEnd) + 1 + SHADOW_ENTRY_SIZE - 1)
+
+#define kHighShadowBeg MEM_TO_SHADOW(kHighMemBeg)
+#define kHighShadowEnd (MEM_TO_SHADOW(kHighMemEnd) + SHADOW_ENTRY_SIZE - 1)
+
+// With the zero shadow base we can not actually map pages starting from 0.
+// This constant is somewhat arbitrary.
+#define kZeroBaseShadowStart 0
+#define kZeroBaseMaxShadowStart (1 << 18)
+
+#define kShadowGapBeg (kLowShadowEnd ? kLowShadowEnd + 1 : kZeroBaseShadowStart)
+#define kShadowGapEnd (kHighShadowBeg - 1)
+
+namespace __memprof {
+
+inline uptr MemToShadowSize(uptr size) { return size >> SHADOW_SCALE; }
+inline bool AddrIsInLowMem(uptr a) { return a <= kLowMemEnd; }
+
+inline bool AddrIsInLowShadow(uptr a) {
+ return a >= kLowShadowBeg && a <= kLowShadowEnd;
+}
+
+inline bool AddrIsInHighMem(uptr a) {
+ return kHighMemBeg && a >= kHighMemBeg && a <= kHighMemEnd;
+}
+
+inline bool AddrIsInHighShadow(uptr a) {
+ return kHighMemBeg && a >= kHighShadowBeg && a <= kHighShadowEnd;
+}
+
+inline bool AddrIsInShadowGap(uptr a) {
+ // In zero-based shadow mode we treat addresses near zero as addresses
+ // in shadow gap as well.
+ if (SHADOW_OFFSET == 0)
+ return a <= kShadowGapEnd;
+ return a >= kShadowGapBeg && a <= kShadowGapEnd;
+}
+
+inline bool AddrIsInMem(uptr a) {
+ return AddrIsInLowMem(a) || AddrIsInHighMem(a) ||
+ (flags()->protect_shadow_gap == 0 && AddrIsInShadowGap(a));
+}
+
+inline uptr MemToShadow(uptr p) {
+ CHECK(AddrIsInMem(p));
+ return MEM_TO_SHADOW(p);
+}
+
+inline bool AddrIsInShadow(uptr a) {
+ return AddrIsInLowShadow(a) || AddrIsInHighShadow(a);
+}
+
+inline bool AddrIsAlignedByGranularity(uptr a) {
+ return (a & (SHADOW_GRANULARITY - 1)) == 0;
+}
+
+inline void RecordAccess(uptr a) {
+ // If we use a different shadow size then the type below needs adjustment.
+ CHECK_EQ(SHADOW_ENTRY_SIZE, 8);
+ u64 *shadow_address = (u64 *)MEM_TO_SHADOW(a);
+ (*shadow_address)++;
+}
+
+} // namespace __memprof
+
+#endif // MEMPROF_MAPPING_H
diff --git a/contrib/llvm-project/compiler-rt/lib/memprof/memprof_new_delete.cpp b/contrib/llvm-project/compiler-rt/lib/memprof/memprof_new_delete.cpp
new file mode 100644
index 000000000000..cae5de301367
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/memprof/memprof_new_delete.cpp
@@ -0,0 +1,145 @@
+//===-- memprof_interceptors.cpp -----------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of MemProfiler, a memory profiler.
+//
+// Interceptors for operators new and delete.
+//===----------------------------------------------------------------------===//
+
+#include "memprof_allocator.h"
+#include "memprof_internal.h"
+#include "memprof_stack.h"
+#include "sanitizer_common/sanitizer_allocator_report.h"
+
+#include "interception/interception.h"
+
+#include <stddef.h>
+
+#define CXX_OPERATOR_ATTRIBUTE INTERCEPTOR_ATTRIBUTE
+
+using namespace __memprof;
+
+// Fake std::nothrow_t and std::align_val_t to avoid including <new>.
+namespace std {
+struct nothrow_t {};
+enum class align_val_t : size_t {};
+} // namespace std
+
+#define OPERATOR_NEW_BODY(type, nothrow) \
+ GET_STACK_TRACE_MALLOC; \
+ void *res = memprof_memalign(0, size, &stack, type); \
+ if (!nothrow && UNLIKELY(!res)) \
+ ReportOutOfMemory(size, &stack); \
+ return res;
+#define OPERATOR_NEW_BODY_ALIGN(type, nothrow) \
+ GET_STACK_TRACE_MALLOC; \
+ void *res = memprof_memalign((uptr)align, size, &stack, type); \
+ if (!nothrow && UNLIKELY(!res)) \
+ ReportOutOfMemory(size, &stack); \
+ return res;
+
+CXX_OPERATOR_ATTRIBUTE
+void *operator new(size_t size) {
+ OPERATOR_NEW_BODY(FROM_NEW, false /*nothrow*/);
+}
+CXX_OPERATOR_ATTRIBUTE
+void *operator new[](size_t size) {
+ OPERATOR_NEW_BODY(FROM_NEW_BR, false /*nothrow*/);
+}
+CXX_OPERATOR_ATTRIBUTE
+void *operator new(size_t size, std::nothrow_t const &) {
+ OPERATOR_NEW_BODY(FROM_NEW, true /*nothrow*/);
+}
+CXX_OPERATOR_ATTRIBUTE
+void *operator new[](size_t size, std::nothrow_t const &) {
+ OPERATOR_NEW_BODY(FROM_NEW_BR, true /*nothrow*/);
+}
+CXX_OPERATOR_ATTRIBUTE
+void *operator new(size_t size, std::align_val_t align) {
+ OPERATOR_NEW_BODY_ALIGN(FROM_NEW, false /*nothrow*/);
+}
+CXX_OPERATOR_ATTRIBUTE
+void *operator new[](size_t size, std::align_val_t align) {
+ OPERATOR_NEW_BODY_ALIGN(FROM_NEW_BR, false /*nothrow*/);
+}
+CXX_OPERATOR_ATTRIBUTE
+void *operator new(size_t size, std::align_val_t align,
+ std::nothrow_t const &) {
+ OPERATOR_NEW_BODY_ALIGN(FROM_NEW, true /*nothrow*/);
+}
+CXX_OPERATOR_ATTRIBUTE
+void *operator new[](size_t size, std::align_val_t align,
+ std::nothrow_t const &) {
+ OPERATOR_NEW_BODY_ALIGN(FROM_NEW_BR, true /*nothrow*/);
+}
+
+#define OPERATOR_DELETE_BODY(type) \
+ GET_STACK_TRACE_FREE; \
+ memprof_delete(ptr, 0, 0, &stack, type);
+
+#define OPERATOR_DELETE_BODY_SIZE(type) \
+ GET_STACK_TRACE_FREE; \
+ memprof_delete(ptr, size, 0, &stack, type);
+
+#define OPERATOR_DELETE_BODY_ALIGN(type) \
+ GET_STACK_TRACE_FREE; \
+ memprof_delete(ptr, 0, static_cast<uptr>(align), &stack, type);
+
+#define OPERATOR_DELETE_BODY_SIZE_ALIGN(type) \
+ GET_STACK_TRACE_FREE; \
+ memprof_delete(ptr, size, static_cast<uptr>(align), &stack, type);
+
+CXX_OPERATOR_ATTRIBUTE
+void operator delete(void *ptr)NOEXCEPT { OPERATOR_DELETE_BODY(FROM_NEW); }
+CXX_OPERATOR_ATTRIBUTE
+void operator delete[](void *ptr) NOEXCEPT {
+ OPERATOR_DELETE_BODY(FROM_NEW_BR);
+}
+CXX_OPERATOR_ATTRIBUTE
+void operator delete(void *ptr, std::nothrow_t const &) {
+ OPERATOR_DELETE_BODY(FROM_NEW);
+}
+CXX_OPERATOR_ATTRIBUTE
+void operator delete[](void *ptr, std::nothrow_t const &) {
+ OPERATOR_DELETE_BODY(FROM_NEW_BR);
+}
+CXX_OPERATOR_ATTRIBUTE
+void operator delete(void *ptr, size_t size)NOEXCEPT {
+ OPERATOR_DELETE_BODY_SIZE(FROM_NEW);
+}
+CXX_OPERATOR_ATTRIBUTE
+void operator delete[](void *ptr, size_t size) NOEXCEPT {
+ OPERATOR_DELETE_BODY_SIZE(FROM_NEW_BR);
+}
+CXX_OPERATOR_ATTRIBUTE
+void operator delete(void *ptr, std::align_val_t align)NOEXCEPT {
+ OPERATOR_DELETE_BODY_ALIGN(FROM_NEW);
+}
+CXX_OPERATOR_ATTRIBUTE
+void operator delete[](void *ptr, std::align_val_t align) NOEXCEPT {
+ OPERATOR_DELETE_BODY_ALIGN(FROM_NEW_BR);
+}
+CXX_OPERATOR_ATTRIBUTE
+void operator delete(void *ptr, std::align_val_t align,
+ std::nothrow_t const &) {
+ OPERATOR_DELETE_BODY_ALIGN(FROM_NEW);
+}
+CXX_OPERATOR_ATTRIBUTE
+void operator delete[](void *ptr, std::align_val_t align,
+ std::nothrow_t const &) {
+ OPERATOR_DELETE_BODY_ALIGN(FROM_NEW_BR);
+}
+CXX_OPERATOR_ATTRIBUTE
+void operator delete(void *ptr, size_t size, std::align_val_t align)NOEXCEPT {
+ OPERATOR_DELETE_BODY_SIZE_ALIGN(FROM_NEW);
+}
+CXX_OPERATOR_ATTRIBUTE
+void operator delete[](void *ptr, size_t size,
+ std::align_val_t align) NOEXCEPT {
+ OPERATOR_DELETE_BODY_SIZE_ALIGN(FROM_NEW_BR);
+}
diff --git a/contrib/llvm-project/compiler-rt/lib/memprof/memprof_posix.cpp b/contrib/llvm-project/compiler-rt/lib/memprof/memprof_posix.cpp
new file mode 100644
index 000000000000..ee0821b85102
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/memprof/memprof_posix.cpp
@@ -0,0 +1,55 @@
+//===-- memprof_posix.cpp ------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of MemProfiler, a memory profiler.
+//
+// Posix-specific details.
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_common/sanitizer_platform.h"
+#if !SANITIZER_POSIX
+#error Only Posix supported
+#endif
+
+#include "memprof_thread.h"
+#include "sanitizer_common/sanitizer_internal_defs.h"
+
+#include <pthread.h>
+
+namespace __memprof {
+
+// ---------------------- TSD ---------------- {{{1
+
+static pthread_key_t tsd_key;
+static bool tsd_key_inited = false;
+void TSDInit(void (*destructor)(void *tsd)) {
+ CHECK(!tsd_key_inited);
+ tsd_key_inited = true;
+ CHECK_EQ(0, pthread_key_create(&tsd_key, destructor));
+}
+
+void *TSDGet() {
+ CHECK(tsd_key_inited);
+ return pthread_getspecific(tsd_key);
+}
+
+void TSDSet(void *tsd) {
+ CHECK(tsd_key_inited);
+ pthread_setspecific(tsd_key, tsd);
+}
+
+void PlatformTSDDtor(void *tsd) {
+ MemprofThreadContext *context = (MemprofThreadContext *)tsd;
+ if (context->destructor_iterations > 1) {
+ context->destructor_iterations--;
+ CHECK_EQ(0, pthread_setspecific(tsd_key, tsd));
+ return;
+ }
+ MemprofThread::TSDDtor(tsd);
+}
+} // namespace __memprof
diff --git a/contrib/llvm-project/compiler-rt/lib/memprof/memprof_preinit.cpp b/contrib/llvm-project/compiler-rt/lib/memprof/memprof_preinit.cpp
new file mode 100644
index 000000000000..7092cd4ee556
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/memprof/memprof_preinit.cpp
@@ -0,0 +1,23 @@
+//===-- memprof_preinit.cpp ----------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of MemProfiler, a memory profiler.
+//
+// Call __memprof_init at the very early stage of process startup.
+//===----------------------------------------------------------------------===//
+#include "memprof_internal.h"
+
+using namespace __memprof;
+
+#if SANITIZER_CAN_USE_PREINIT_ARRAY
+// The symbol is called __local_memprof_preinit, because it's not intended to
+// be exported. This code linked into the main executable when -fmemory-profile
+// is in the link flags. It can only use exported interface functions.
+__attribute__((section(".preinit_array"),
+ used)) void (*__local_memprof_preinit)(void) = __memprof_preinit;
+#endif
diff --git a/contrib/llvm-project/compiler-rt/lib/memprof/memprof_rtl.cpp b/contrib/llvm-project/compiler-rt/lib/memprof/memprof_rtl.cpp
new file mode 100644
index 000000000000..d6d606f666ee
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/memprof/memprof_rtl.cpp
@@ -0,0 +1,321 @@
+//===-- memprof_rtl.cpp --------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of MemProfiler, a memory profiler.
+//
+// Main file of the MemProf run-time library.
+//===----------------------------------------------------------------------===//
+
+#include "memprof_allocator.h"
+#include "memprof_interceptors.h"
+#include "memprof_interface_internal.h"
+#include "memprof_internal.h"
+#include "memprof_mapping.h"
+#include "memprof_stack.h"
+#include "memprof_stats.h"
+#include "memprof_thread.h"
+#include "sanitizer_common/sanitizer_atomic.h"
+#include "sanitizer_common/sanitizer_flags.h"
+#include "sanitizer_common/sanitizer_libc.h"
+#include "sanitizer_common/sanitizer_symbolizer.h"
+
+#include <time.h>
+
+uptr __memprof_shadow_memory_dynamic_address; // Global interface symbol.
+
+// Allow the user to specify a profile output file via the binary.
+SANITIZER_WEAK_ATTRIBUTE char __memprof_profile_filename[1];
+
+namespace __memprof {
+
+static void MemprofDie() {
+ static atomic_uint32_t num_calls;
+ if (atomic_fetch_add(&num_calls, 1, memory_order_relaxed) != 0) {
+ // Don't die twice - run a busy loop.
+ while (1) {
+ }
+ }
+ if (common_flags()->print_module_map >= 1)
+ DumpProcessMap();
+ if (flags()->unmap_shadow_on_exit) {
+ if (kHighShadowEnd)
+ UnmapOrDie((void *)kLowShadowBeg, kHighShadowEnd - kLowShadowBeg);
+ }
+}
+
+static void MemprofCheckFailed(const char *file, int line, const char *cond,
+ u64 v1, u64 v2) {
+ Report("MemProfiler CHECK failed: %s:%d \"%s\" (0x%zx, 0x%zx)\n", file, line,
+ cond, (uptr)v1, (uptr)v2);
+
+ // Print a stack trace the first time we come here. Otherwise, we probably
+ // failed a CHECK during symbolization.
+ static atomic_uint32_t num_calls;
+ if (atomic_fetch_add(&num_calls, 1, memory_order_relaxed) == 0) {
+ PRINT_CURRENT_STACK_CHECK();
+ }
+
+ Die();
+}
+
+// -------------------------- Globals --------------------- {{{1
+int memprof_inited;
+int memprof_init_done;
+bool memprof_init_is_running;
+int memprof_timestamp_inited;
+long memprof_init_timestamp_s;
+
+uptr kHighMemEnd;
+
+// -------------------------- Run-time entry ------------------- {{{1
+// exported functions
+
+#define MEMPROF_MEMORY_ACCESS_CALLBACK_BODY() __memprof::RecordAccess(addr);
+
+#define MEMPROF_MEMORY_ACCESS_CALLBACK(type) \
+ extern "C" NOINLINE INTERFACE_ATTRIBUTE void __memprof_##type(uptr addr) { \
+ MEMPROF_MEMORY_ACCESS_CALLBACK_BODY() \
+ }
+
+MEMPROF_MEMORY_ACCESS_CALLBACK(load)
+MEMPROF_MEMORY_ACCESS_CALLBACK(store)
+
+// Force the linker to keep the symbols for various MemProf interface
+// functions. We want to keep those in the executable in order to let the
+// instrumented dynamic libraries access the symbol even if it is not used by
+// the executable itself. This should help if the build system is removing dead
+// code at link time.
+static NOINLINE void force_interface_symbols() {
+ volatile int fake_condition = 0; // prevent dead condition elimination.
+ // clang-format off
+ switch (fake_condition) {
+ case 1: __memprof_record_access(nullptr); break;
+ case 2: __memprof_record_access_range(nullptr, 0); break;
+ }
+ // clang-format on
+}
+
+static void memprof_atexit() {
+ Printf("MemProfiler exit stats:\n");
+ __memprof_print_accumulated_stats();
+}
+
+static void InitializeHighMemEnd() {
+ kHighMemEnd = GetMaxUserVirtualAddress();
+ // Increase kHighMemEnd to make sure it's properly
+ // aligned together with kHighMemBeg:
+ kHighMemEnd |= (GetMmapGranularity() << SHADOW_SCALE) - 1;
+}
+
+void PrintAddressSpaceLayout() {
+ if (kHighMemBeg) {
+ Printf("|| `[%p, %p]` || HighMem ||\n", (void *)kHighMemBeg,
+ (void *)kHighMemEnd);
+ Printf("|| `[%p, %p]` || HighShadow ||\n", (void *)kHighShadowBeg,
+ (void *)kHighShadowEnd);
+ }
+ Printf("|| `[%p, %p]` || ShadowGap ||\n", (void *)kShadowGapBeg,
+ (void *)kShadowGapEnd);
+ if (kLowShadowBeg) {
+ Printf("|| `[%p, %p]` || LowShadow ||\n", (void *)kLowShadowBeg,
+ (void *)kLowShadowEnd);
+ Printf("|| `[%p, %p]` || LowMem ||\n", (void *)kLowMemBeg,
+ (void *)kLowMemEnd);
+ }
+ Printf("MemToShadow(shadow): %p %p", (void *)MEM_TO_SHADOW(kLowShadowBeg),
+ (void *)MEM_TO_SHADOW(kLowShadowEnd));
+ if (kHighMemBeg) {
+ Printf(" %p %p", (void *)MEM_TO_SHADOW(kHighShadowBeg),
+ (void *)MEM_TO_SHADOW(kHighShadowEnd));
+ }
+ Printf("\n");
+ Printf("malloc_context_size=%zu\n",
+ (uptr)common_flags()->malloc_context_size);
+
+ Printf("SHADOW_SCALE: %d\n", (int)SHADOW_SCALE);
+ Printf("SHADOW_GRANULARITY: %d\n", (int)SHADOW_GRANULARITY);
+ Printf("SHADOW_OFFSET: 0x%zx\n", (uptr)SHADOW_OFFSET);
+ CHECK(SHADOW_SCALE >= 3 && SHADOW_SCALE <= 7);
+}
+
+static bool UNUSED __local_memprof_dyninit = [] {
+ MaybeStartBackgroudThread();
+ SetSoftRssLimitExceededCallback(MemprofSoftRssLimitExceededCallback);
+
+ return false;
+}();
+
+static void MemprofInitInternal() {
+ if (LIKELY(memprof_inited))
+ return;
+ SanitizerToolName = "MemProfiler";
+ CHECK(!memprof_init_is_running && "MemProf init calls itself!");
+ memprof_init_is_running = true;
+
+ CacheBinaryName();
+
+ // Initialize flags. This must be done early, because most of the
+ // initialization steps look at flags().
+ InitializeFlags();
+
+ AvoidCVE_2016_2143();
+
+ SetMallocContextSize(common_flags()->malloc_context_size);
+
+ InitializeHighMemEnd();
+
+ // Make sure we are not statically linked.
+ MemprofDoesNotSupportStaticLinkage();
+
+ // Install tool-specific callbacks in sanitizer_common.
+ AddDieCallback(MemprofDie);
+ SetCheckFailedCallback(MemprofCheckFailed);
+
+ // Use profile name specified via the binary itself if it exists, and hasn't
+ // been overrriden by a flag at runtime.
+ if (__memprof_profile_filename[0] != 0 && !common_flags()->log_path)
+ __sanitizer_set_report_path(__memprof_profile_filename);
+ else
+ __sanitizer_set_report_path(common_flags()->log_path);
+
+ __sanitizer::InitializePlatformEarly();
+
+ // Re-exec ourselves if we need to set additional env or command line args.
+ MaybeReexec();
+
+ // Setup internal allocator callback.
+ SetLowLevelAllocateMinAlignment(SHADOW_GRANULARITY);
+
+ InitializeMemprofInterceptors();
+ CheckASLR();
+
+ ReplaceSystemMalloc();
+
+ DisableCoreDumperIfNecessary();
+
+ InitializeShadowMemory();
+
+ TSDInit(PlatformTSDDtor);
+
+ InitializeAllocator();
+
+ // On Linux MemprofThread::ThreadStart() calls malloc() that's why
+ // memprof_inited should be set to 1 prior to initializing the threads.
+ memprof_inited = 1;
+ memprof_init_is_running = false;
+
+ if (flags()->atexit)
+ Atexit(memprof_atexit);
+
+ InitializeCoverage(common_flags()->coverage, common_flags()->coverage_dir);
+
+ // interceptors
+ InitTlsSize();
+
+ // Create main thread.
+ MemprofThread *main_thread = CreateMainThread();
+ CHECK_EQ(0, main_thread->tid());
+ force_interface_symbols(); // no-op.
+ SanitizerInitializeUnwinder();
+
+ Symbolizer::LateInitialize();
+
+ VReport(1, "MemProfiler Init done\n");
+
+ memprof_init_done = 1;
+}
+
+void MemprofInitTime() {
+ if (LIKELY(memprof_timestamp_inited))
+ return;
+ timespec ts;
+ clock_gettime(CLOCK_REALTIME, &ts);
+ memprof_init_timestamp_s = ts.tv_sec;
+ memprof_timestamp_inited = 1;
+}
+
+// Initialize as requested from some part of MemProf runtime library
+// (interceptors, allocator, etc).
+void MemprofInitFromRtl() { MemprofInitInternal(); }
+
+#if MEMPROF_DYNAMIC
+// Initialize runtime in case it's LD_PRELOAD-ed into uninstrumented executable
+// (and thus normal initializers from .preinit_array or modules haven't run).
+
+class MemprofInitializer {
+public:
+ MemprofInitializer() { MemprofInitFromRtl(); }
+};
+
+static MemprofInitializer memprof_initializer;
+#endif // MEMPROF_DYNAMIC
+
+} // namespace __memprof
+
+// ---------------------- Interface ---------------- {{{1
+using namespace __memprof;
+
+// Initialize as requested from instrumented application code.
+void __memprof_init() {
+ MemprofInitTime();
+ MemprofInitInternal();
+}
+
+void __memprof_preinit() { MemprofInitInternal(); }
+
+void __memprof_version_mismatch_check_v1() {}
+
+void __memprof_record_access(void const volatile *addr) {
+ __memprof::RecordAccess((uptr)addr);
+}
+
+// We only record the access on the first location in the range,
+// since we will later accumulate the access counts across the
+// full allocation, and we don't want to inflate the hotness from
+// a memory intrinsic on a large range of memory.
+// TODO: Should we do something else so we can better track utilization?
+void __memprof_record_access_range(void const volatile *addr,
+ UNUSED uptr size) {
+ __memprof::RecordAccess((uptr)addr);
+}
+
+extern "C" SANITIZER_INTERFACE_ATTRIBUTE u16
+__sanitizer_unaligned_load16(const uu16 *p) {
+ __memprof_record_access(p);
+ return *p;
+}
+
+extern "C" SANITIZER_INTERFACE_ATTRIBUTE u32
+__sanitizer_unaligned_load32(const uu32 *p) {
+ __memprof_record_access(p);
+ return *p;
+}
+
+extern "C" SANITIZER_INTERFACE_ATTRIBUTE u64
+__sanitizer_unaligned_load64(const uu64 *p) {
+ __memprof_record_access(p);
+ return *p;
+}
+
+extern "C" SANITIZER_INTERFACE_ATTRIBUTE void
+__sanitizer_unaligned_store16(uu16 *p, u16 x) {
+ __memprof_record_access(p);
+ *p = x;
+}
+
+extern "C" SANITIZER_INTERFACE_ATTRIBUTE void
+__sanitizer_unaligned_store32(uu32 *p, u32 x) {
+ __memprof_record_access(p);
+ *p = x;
+}
+
+extern "C" SANITIZER_INTERFACE_ATTRIBUTE void
+__sanitizer_unaligned_store64(uu64 *p, u64 x) {
+ __memprof_record_access(p);
+ *p = x;
+}
diff --git a/contrib/llvm-project/compiler-rt/lib/memprof/memprof_shadow_setup.cpp b/contrib/llvm-project/compiler-rt/lib/memprof/memprof_shadow_setup.cpp
new file mode 100644
index 000000000000..e7832f656ee8
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/memprof/memprof_shadow_setup.cpp
@@ -0,0 +1,62 @@
+//===-- memprof_shadow_setup.cpp -----------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of MemProfiler, a memory profiler.
+//
+// Set up the shadow memory.
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_common/sanitizer_platform.h"
+
+#include "memprof_internal.h"
+#include "memprof_mapping.h"
+
+namespace __memprof {
+
+static void ProtectGap(uptr addr, uptr size) {
+ if (!flags()->protect_shadow_gap) {
+ // The shadow gap is unprotected, so there is a chance that someone
+ // is actually using this memory. Which means it needs a shadow...
+ uptr GapShadowBeg = RoundDownTo(MEM_TO_SHADOW(addr), GetPageSizeCached());
+ uptr GapShadowEnd =
+ RoundUpTo(MEM_TO_SHADOW(addr + size), GetPageSizeCached()) - 1;
+ if (Verbosity())
+ Printf("protect_shadow_gap=0:"
+ " not protecting shadow gap, allocating gap's shadow\n"
+ "|| `[%p, %p]` || ShadowGap's shadow ||\n",
+ GapShadowBeg, GapShadowEnd);
+ ReserveShadowMemoryRange(GapShadowBeg, GapShadowEnd,
+ "unprotected gap shadow");
+ return;
+ }
+ __sanitizer::ProtectGap(addr, size, kZeroBaseShadowStart,
+ kZeroBaseMaxShadowStart);
+}
+
+void InitializeShadowMemory() {
+ uptr shadow_start = FindDynamicShadowStart();
+ // Update the shadow memory address (potentially) used by instrumentation.
+ __memprof_shadow_memory_dynamic_address = shadow_start;
+
+ if (kLowShadowBeg)
+ shadow_start -= GetMmapGranularity();
+
+ if (Verbosity())
+ PrintAddressSpaceLayout();
+
+ // mmap the low shadow plus at least one page at the left.
+ if (kLowShadowBeg)
+ ReserveShadowMemoryRange(shadow_start, kLowShadowEnd, "low shadow");
+ // mmap the high shadow.
+ ReserveShadowMemoryRange(kHighShadowBeg, kHighShadowEnd, "high shadow");
+ // protect the gap.
+ ProtectGap(kShadowGapBeg, kShadowGapEnd - kShadowGapBeg + 1);
+ CHECK_EQ(kShadowGapEnd, kHighShadowBeg - 1);
+}
+
+} // namespace __memprof
diff --git a/contrib/llvm-project/compiler-rt/lib/memprof/memprof_stack.cpp b/contrib/llvm-project/compiler-rt/lib/memprof/memprof_stack.cpp
new file mode 100644
index 000000000000..b5beeeadafd7
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/memprof/memprof_stack.cpp
@@ -0,0 +1,59 @@
+//===-- memprof_stack.cpp ------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of MemProfiler, a memory profiler.
+//
+// Code for MemProf stack trace.
+//===----------------------------------------------------------------------===//
+#include "memprof_stack.h"
+#include "memprof_internal.h"
+#include "sanitizer_common/sanitizer_atomic.h"
+
+namespace __memprof {
+
+static atomic_uint32_t malloc_context_size;
+
+void SetMallocContextSize(u32 size) {
+ atomic_store(&malloc_context_size, size, memory_order_release);
+}
+
+u32 GetMallocContextSize() {
+ return atomic_load(&malloc_context_size, memory_order_acquire);
+}
+
+} // namespace __memprof
+
+void __sanitizer::BufferedStackTrace::UnwindImpl(uptr pc, uptr bp,
+ void *context,
+ bool request_fast,
+ u32 max_depth) {
+ using namespace __memprof;
+ size = 0;
+ if (UNLIKELY(!memprof_inited))
+ return;
+ request_fast = StackTrace::WillUseFastUnwind(request_fast);
+ MemprofThread *t = GetCurrentThread();
+ if (request_fast) {
+ if (t) {
+ Unwind(max_depth, pc, bp, nullptr, t->stack_top(), t->stack_bottom(),
+ true);
+ }
+ return;
+ }
+ Unwind(max_depth, pc, bp, context, 0, 0, false);
+}
+
+// ------------------ Interface -------------- {{{1
+
+extern "C" {
+SANITIZER_INTERFACE_ATTRIBUTE
+void __sanitizer_print_stack_trace() {
+ using namespace __memprof;
+ PRINT_CURRENT_STACK();
+}
+} // extern "C"
diff --git a/contrib/llvm-project/compiler-rt/lib/memprof/memprof_stack.h b/contrib/llvm-project/compiler-rt/lib/memprof/memprof_stack.h
new file mode 100644
index 000000000000..289a61e385a2
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/memprof/memprof_stack.h
@@ -0,0 +1,75 @@
+//===-- memprof_stack.h ----------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of MemProfiler, a memory profiler.
+//
+// MemProf-private header for memprof_stack.cpp.
+//===----------------------------------------------------------------------===//
+
+#ifndef MEMPROF_STACK_H
+#define MEMPROF_STACK_H
+
+#include "memprof_flags.h"
+#include "memprof_thread.h"
+#include "sanitizer_common/sanitizer_flags.h"
+#include "sanitizer_common/sanitizer_stacktrace.h"
+
+namespace __memprof {
+
+static const u32 kDefaultMallocContextSize = 30;
+
+void SetMallocContextSize(u32 size);
+u32 GetMallocContextSize();
+
+} // namespace __memprof
+
+// NOTE: A Rule of thumb is to retrieve stack trace in the interceptors
+// as early as possible (in functions exposed to the user), as we generally
+// don't want stack trace to contain functions from MemProf internals.
+
+#define GET_STACK_TRACE(max_size, fast) \
+ BufferedStackTrace stack; \
+ if (max_size <= 2) { \
+ stack.size = max_size; \
+ if (max_size > 0) { \
+ stack.top_frame_bp = GET_CURRENT_FRAME(); \
+ stack.trace_buffer[0] = StackTrace::GetCurrentPc(); \
+ if (max_size > 1) \
+ stack.trace_buffer[1] = GET_CALLER_PC(); \
+ } \
+ } else { \
+ stack.Unwind(StackTrace::GetCurrentPc(), GET_CURRENT_FRAME(), nullptr, \
+ fast, max_size); \
+ }
+
+#define GET_STACK_TRACE_FATAL_HERE \
+ GET_STACK_TRACE(kStackTraceMax, common_flags()->fast_unwind_on_fatal)
+
+#define GET_STACK_TRACE_CHECK_HERE \
+ GET_STACK_TRACE(kStackTraceMax, common_flags()->fast_unwind_on_check)
+
+#define GET_STACK_TRACE_THREAD GET_STACK_TRACE(kStackTraceMax, true)
+
+#define GET_STACK_TRACE_MALLOC \
+ GET_STACK_TRACE(GetMallocContextSize(), common_flags()->fast_unwind_on_malloc)
+
+#define GET_STACK_TRACE_FREE GET_STACK_TRACE_MALLOC
+
+#define PRINT_CURRENT_STACK() \
+ { \
+ GET_STACK_TRACE_FATAL_HERE; \
+ stack.Print(); \
+ }
+
+#define PRINT_CURRENT_STACK_CHECK() \
+ { \
+ GET_STACK_TRACE_CHECK_HERE; \
+ stack.Print(); \
+ }
+
+#endif // MEMPROF_STACK_H
diff --git a/contrib/llvm-project/compiler-rt/lib/memprof/memprof_stats.cpp b/contrib/llvm-project/compiler-rt/lib/memprof/memprof_stats.cpp
new file mode 100644
index 000000000000..8a50d270dc6a
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/memprof/memprof_stats.cpp
@@ -0,0 +1,157 @@
+//===-- memprof_stats.cpp ------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of MemProfiler, a memory profiler.
+//
+// Code related to statistics collected by MemProfiler.
+//===----------------------------------------------------------------------===//
+#include "memprof_stats.h"
+#include "memprof_interceptors.h"
+#include "memprof_internal.h"
+#include "memprof_thread.h"
+#include "sanitizer_common/sanitizer_allocator_interface.h"
+#include "sanitizer_common/sanitizer_mutex.h"
+#include "sanitizer_common/sanitizer_stackdepot.h"
+
+namespace __memprof {
+
+MemprofStats::MemprofStats() { Clear(); }
+
+void MemprofStats::Clear() {
+ if (REAL(memset))
+ return (void)REAL(memset)(this, 0, sizeof(MemprofStats));
+ internal_memset(this, 0, sizeof(MemprofStats));
+}
+
+static void PrintMallocStatsArray(const char *prefix,
+ uptr (&array)[kNumberOfSizeClasses]) {
+ Printf("%s", prefix);
+ for (uptr i = 0; i < kNumberOfSizeClasses; i++) {
+ if (!array[i])
+ continue;
+ Printf("%zu:%zu; ", i, array[i]);
+ }
+ Printf("\n");
+}
+
+void MemprofStats::Print() {
+ Printf("Stats: %zuM malloced (%zuM for overhead) by %zu calls\n",
+ malloced >> 20, malloced_overhead >> 20, mallocs);
+ Printf("Stats: %zuM realloced by %zu calls\n", realloced >> 20, reallocs);
+ Printf("Stats: %zuM freed by %zu calls\n", freed >> 20, frees);
+ Printf("Stats: %zuM really freed by %zu calls\n", really_freed >> 20,
+ real_frees);
+ Printf("Stats: %zuM (%zuM-%zuM) mmaped; %zu maps, %zu unmaps\n",
+ (mmaped - munmaped) >> 20, mmaped >> 20, munmaped >> 20, mmaps,
+ munmaps);
+
+ PrintMallocStatsArray(" mallocs by size class: ", malloced_by_size);
+ Printf("Stats: malloc large: %zu\n", malloc_large);
+}
+
+void MemprofStats::MergeFrom(const MemprofStats *stats) {
+ uptr *dst_ptr = reinterpret_cast<uptr *>(this);
+ const uptr *src_ptr = reinterpret_cast<const uptr *>(stats);
+ uptr num_fields = sizeof(*this) / sizeof(uptr);
+ for (uptr i = 0; i < num_fields; i++)
+ dst_ptr[i] += src_ptr[i];
+}
+
+static BlockingMutex print_lock(LINKER_INITIALIZED);
+
+static MemprofStats unknown_thread_stats(LINKER_INITIALIZED);
+static MemprofStats dead_threads_stats(LINKER_INITIALIZED);
+static BlockingMutex dead_threads_stats_lock(LINKER_INITIALIZED);
+// Required for malloc_zone_statistics() on OS X. This can't be stored in
+// per-thread MemprofStats.
+static uptr max_malloced_memory;
+
+static void MergeThreadStats(ThreadContextBase *tctx_base, void *arg) {
+ MemprofStats *accumulated_stats = reinterpret_cast<MemprofStats *>(arg);
+ MemprofThreadContext *tctx = static_cast<MemprofThreadContext *>(tctx_base);
+ if (MemprofThread *t = tctx->thread)
+ accumulated_stats->MergeFrom(&t->stats());
+}
+
+static void GetAccumulatedStats(MemprofStats *stats) {
+ stats->Clear();
+ {
+ ThreadRegistryLock l(&memprofThreadRegistry());
+ memprofThreadRegistry().RunCallbackForEachThreadLocked(MergeThreadStats,
+ stats);
+ }
+ stats->MergeFrom(&unknown_thread_stats);
+ {
+ BlockingMutexLock lock(&dead_threads_stats_lock);
+ stats->MergeFrom(&dead_threads_stats);
+ }
+ // This is not very accurate: we may miss allocation peaks that happen
+ // between two updates of accumulated_stats_. For more accurate bookkeeping
+ // the maximum should be updated on every malloc(), which is unacceptable.
+ if (max_malloced_memory < stats->malloced) {
+ max_malloced_memory = stats->malloced;
+ }
+}
+
+void FlushToDeadThreadStats(MemprofStats *stats) {
+ BlockingMutexLock lock(&dead_threads_stats_lock);
+ dead_threads_stats.MergeFrom(stats);
+ stats->Clear();
+}
+
+MemprofStats &GetCurrentThreadStats() {
+ MemprofThread *t = GetCurrentThread();
+ return (t) ? t->stats() : unknown_thread_stats;
+}
+
+static void PrintAccumulatedStats() {
+ MemprofStats stats;
+ GetAccumulatedStats(&stats);
+ // Use lock to keep reports from mixing up.
+ BlockingMutexLock lock(&print_lock);
+ stats.Print();
+ StackDepotStats *stack_depot_stats = StackDepotGetStats();
+ Printf("Stats: StackDepot: %zd ids; %zdM allocated\n",
+ stack_depot_stats->n_uniq_ids, stack_depot_stats->allocated >> 20);
+ PrintInternalAllocatorStats();
+}
+
+} // namespace __memprof
+
+// ---------------------- Interface ---------------- {{{1
+using namespace __memprof;
+
+uptr __sanitizer_get_current_allocated_bytes() {
+ MemprofStats stats;
+ GetAccumulatedStats(&stats);
+ uptr malloced = stats.malloced;
+ uptr freed = stats.freed;
+ // Return sane value if malloced < freed due to racy
+ // way we update accumulated stats.
+ return (malloced > freed) ? malloced - freed : 1;
+}
+
+uptr __sanitizer_get_heap_size() {
+ MemprofStats stats;
+ GetAccumulatedStats(&stats);
+ return stats.mmaped - stats.munmaped;
+}
+
+uptr __sanitizer_get_free_bytes() {
+ MemprofStats stats;
+ GetAccumulatedStats(&stats);
+ uptr total_free = stats.mmaped - stats.munmaped + stats.really_freed;
+ uptr total_used = stats.malloced;
+ // Return sane value if total_free < total_used due to racy
+ // way we update accumulated stats.
+ return (total_free > total_used) ? total_free - total_used : 1;
+}
+
+uptr __sanitizer_get_unmapped_bytes() { return 0; }
+
+void __memprof_print_accumulated_stats() { PrintAccumulatedStats(); }
diff --git a/contrib/llvm-project/compiler-rt/lib/memprof/memprof_stats.h b/contrib/llvm-project/compiler-rt/lib/memprof/memprof_stats.h
new file mode 100644
index 000000000000..ebdaa1909817
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/memprof/memprof_stats.h
@@ -0,0 +1,61 @@
+//===-- memprof_stats.h ----------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of MemProfiler, a memory profiler.
+//
+// MemProf-private header for statistics.
+//===----------------------------------------------------------------------===//
+#ifndef MEMPROF_STATS_H
+#define MEMPROF_STATS_H
+
+#include "memprof_allocator.h"
+#include "memprof_internal.h"
+
+namespace __memprof {
+
+// MemprofStats struct is NOT thread-safe.
+// Each MemprofThread has its own MemprofStats, which are sometimes flushed
+// to the accumulated MemprofStats.
+struct MemprofStats {
+ // MemprofStats must be a struct consisting of uptr fields only.
+ // When merging two MemprofStats structs, we treat them as arrays of uptr.
+ uptr mallocs;
+ uptr malloced;
+ uptr malloced_overhead;
+ uptr frees;
+ uptr freed;
+ uptr real_frees;
+ uptr really_freed;
+ uptr reallocs;
+ uptr realloced;
+ uptr mmaps;
+ uptr mmaped;
+ uptr munmaps;
+ uptr munmaped;
+ uptr malloc_large;
+ uptr malloced_by_size[kNumberOfSizeClasses];
+
+ // Ctor for global MemprofStats (accumulated stats for dead threads).
+ explicit MemprofStats(LinkerInitialized) {}
+ // Creates empty stats.
+ MemprofStats();
+
+ void Print(); // Prints formatted stats to stderr.
+ void Clear();
+ void MergeFrom(const MemprofStats *stats);
+};
+
+// Returns stats for GetCurrentThread(), or stats for fake "unknown thread"
+// if GetCurrentThread() returns 0.
+MemprofStats &GetCurrentThreadStats();
+// Flushes a given stats into accumulated stats of dead threads.
+void FlushToDeadThreadStats(MemprofStats *stats);
+
+} // namespace __memprof
+
+#endif // MEMPROF_STATS_H
diff --git a/contrib/llvm-project/compiler-rt/lib/memprof/memprof_thread.cpp b/contrib/llvm-project/compiler-rt/lib/memprof/memprof_thread.cpp
new file mode 100644
index 000000000000..1bfff69bf1be
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/memprof/memprof_thread.cpp
@@ -0,0 +1,220 @@
+//===-- memprof_thread.cpp -----------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of MemProfiler, a memory profiler.
+//
+// Thread-related code.
+//===----------------------------------------------------------------------===//
+#include "memprof_thread.h"
+#include "memprof_allocator.h"
+#include "memprof_interceptors.h"
+#include "memprof_mapping.h"
+#include "memprof_stack.h"
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_placement_new.h"
+#include "sanitizer_common/sanitizer_stackdepot.h"
+#include "sanitizer_common/sanitizer_tls_get_addr.h"
+
+namespace __memprof {
+
+// MemprofThreadContext implementation.
+
+void MemprofThreadContext::OnCreated(void *arg) {
+ CreateThreadContextArgs *args = static_cast<CreateThreadContextArgs *>(arg);
+ if (args->stack)
+ stack_id = StackDepotPut(*args->stack);
+ thread = args->thread;
+ thread->set_context(this);
+}
+
+void MemprofThreadContext::OnFinished() {
+ // Drop the link to the MemprofThread object.
+ thread = nullptr;
+}
+
+static ALIGNED(16) char thread_registry_placeholder[sizeof(ThreadRegistry)];
+static ThreadRegistry *memprof_thread_registry;
+
+static BlockingMutex mu_for_thread_context(LINKER_INITIALIZED);
+static LowLevelAllocator allocator_for_thread_context;
+
+static ThreadContextBase *GetMemprofThreadContext(u32 tid) {
+ BlockingMutexLock lock(&mu_for_thread_context);
+ return new (allocator_for_thread_context) MemprofThreadContext(tid);
+}
+
+ThreadRegistry &memprofThreadRegistry() {
+ static bool initialized;
+ // Don't worry about thread_safety - this should be called when there is
+ // a single thread.
+ if (!initialized) {
+ // Never reuse MemProf threads: we store pointer to MemprofThreadContext
+ // in TSD and can't reliably tell when no more TSD destructors will
+ // be called. It would be wrong to reuse MemprofThreadContext for another
+ // thread before all TSD destructors will be called for it.
+ memprof_thread_registry = new (thread_registry_placeholder) ThreadRegistry(
+ GetMemprofThreadContext, kMaxNumberOfThreads, kMaxNumberOfThreads);
+ initialized = true;
+ }
+ return *memprof_thread_registry;
+}
+
+MemprofThreadContext *GetThreadContextByTidLocked(u32 tid) {
+ return static_cast<MemprofThreadContext *>(
+ memprofThreadRegistry().GetThreadLocked(tid));
+}
+
+// MemprofThread implementation.
+
+MemprofThread *MemprofThread::Create(thread_callback_t start_routine, void *arg,
+ u32 parent_tid, StackTrace *stack,
+ bool detached) {
+ uptr PageSize = GetPageSizeCached();
+ uptr size = RoundUpTo(sizeof(MemprofThread), PageSize);
+ MemprofThread *thread = (MemprofThread *)MmapOrDie(size, __func__);
+ thread->start_routine_ = start_routine;
+ thread->arg_ = arg;
+ MemprofThreadContext::CreateThreadContextArgs args = {thread, stack};
+ memprofThreadRegistry().CreateThread(*reinterpret_cast<uptr *>(thread),
+ detached, parent_tid, &args);
+
+ return thread;
+}
+
+void MemprofThread::TSDDtor(void *tsd) {
+ MemprofThreadContext *context = (MemprofThreadContext *)tsd;
+ VReport(1, "T%d TSDDtor\n", context->tid);
+ if (context->thread)
+ context->thread->Destroy();
+}
+
+void MemprofThread::Destroy() {
+ int tid = this->tid();
+ VReport(1, "T%d exited\n", tid);
+
+ malloc_storage().CommitBack();
+ memprofThreadRegistry().FinishThread(tid);
+ FlushToDeadThreadStats(&stats_);
+ uptr size = RoundUpTo(sizeof(MemprofThread), GetPageSizeCached());
+ UnmapOrDie(this, size);
+ DTLS_Destroy();
+}
+
+inline MemprofThread::StackBounds MemprofThread::GetStackBounds() const {
+ if (stack_bottom_ >= stack_top_)
+ return {0, 0};
+ return {stack_bottom_, stack_top_};
+}
+
+uptr MemprofThread::stack_top() { return GetStackBounds().top; }
+
+uptr MemprofThread::stack_bottom() { return GetStackBounds().bottom; }
+
+uptr MemprofThread::stack_size() {
+ const auto bounds = GetStackBounds();
+ return bounds.top - bounds.bottom;
+}
+
+void MemprofThread::Init(const InitOptions *options) {
+ CHECK_EQ(this->stack_size(), 0U);
+ SetThreadStackAndTls(options);
+ if (stack_top_ != stack_bottom_) {
+ CHECK_GT(this->stack_size(), 0U);
+ CHECK(AddrIsInMem(stack_bottom_));
+ CHECK(AddrIsInMem(stack_top_ - 1));
+ }
+ int local = 0;
+ VReport(1, "T%d: stack [%p,%p) size 0x%zx; local=%p\n", tid(),
+ (void *)stack_bottom_, (void *)stack_top_, stack_top_ - stack_bottom_,
+ &local);
+}
+
+thread_return_t
+MemprofThread::ThreadStart(tid_t os_id,
+ atomic_uintptr_t *signal_thread_is_registered) {
+ Init();
+ memprofThreadRegistry().StartThread(tid(), os_id, ThreadType::Regular,
+ nullptr);
+ if (signal_thread_is_registered)
+ atomic_store(signal_thread_is_registered, 1, memory_order_release);
+
+ if (!start_routine_) {
+ // start_routine_ == 0 if we're on the main thread or on one of the
+ // OS X libdispatch worker threads. But nobody is supposed to call
+ // ThreadStart() for the worker threads.
+ CHECK_EQ(tid(), 0);
+ return 0;
+ }
+
+ return start_routine_(arg_);
+}
+
+MemprofThread *CreateMainThread() {
+ MemprofThread *main_thread = MemprofThread::Create(
+ /* start_routine */ nullptr, /* arg */ nullptr, /* parent_tid */ 0,
+ /* stack */ nullptr, /* detached */ true);
+ SetCurrentThread(main_thread);
+ main_thread->ThreadStart(internal_getpid(),
+ /* signal_thread_is_registered */ nullptr);
+ return main_thread;
+}
+
+// This implementation doesn't use the argument, which is just passed down
+// from the caller of Init (which see, above). It's only there to support
+// OS-specific implementations that need more information passed through.
+void MemprofThread::SetThreadStackAndTls(const InitOptions *options) {
+ DCHECK_EQ(options, nullptr);
+ uptr tls_size = 0;
+ uptr stack_size = 0;
+ GetThreadStackAndTls(tid() == 0, &stack_bottom_, &stack_size, &tls_begin_,
+ &tls_size);
+ stack_top_ = stack_bottom_ + stack_size;
+ tls_end_ = tls_begin_ + tls_size;
+ dtls_ = DTLS_Get();
+
+ if (stack_top_ != stack_bottom_) {
+ int local;
+ CHECK(AddrIsInStack((uptr)&local));
+ }
+}
+
+bool MemprofThread::AddrIsInStack(uptr addr) {
+ const auto bounds = GetStackBounds();
+ return addr >= bounds.bottom && addr < bounds.top;
+}
+
+MemprofThread *GetCurrentThread() {
+ MemprofThreadContext *context =
+ reinterpret_cast<MemprofThreadContext *>(TSDGet());
+ if (!context)
+ return nullptr;
+ return context->thread;
+}
+
+void SetCurrentThread(MemprofThread *t) {
+ CHECK(t->context());
+ VReport(2, "SetCurrentThread: %p for thread %p\n", t->context(),
+ (void *)GetThreadSelf());
+ // Make sure we do not reset the current MemprofThread.
+ CHECK_EQ(0, TSDGet());
+ TSDSet(t->context());
+ CHECK_EQ(t->context(), TSDGet());
+}
+
+u32 GetCurrentTidOrInvalid() {
+ MemprofThread *t = GetCurrentThread();
+ return t ? t->tid() : kInvalidTid;
+}
+
+void EnsureMainThreadIDIsCorrect() {
+ MemprofThreadContext *context =
+ reinterpret_cast<MemprofThreadContext *>(TSDGet());
+ if (context && (context->tid == 0))
+ context->os_id = GetTid();
+}
+} // namespace __memprof
diff --git a/contrib/llvm-project/compiler-rt/lib/memprof/memprof_thread.h b/contrib/llvm-project/compiler-rt/lib/memprof/memprof_thread.h
new file mode 100644
index 000000000000..2e1a8bb43b82
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/memprof/memprof_thread.h
@@ -0,0 +1,138 @@
+//===-- memprof_thread.h ---------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of MemProfiler, a memory profiler.
+//
+// MemProf-private header for memprof_thread.cpp.
+//===----------------------------------------------------------------------===//
+
+#ifndef MEMPROF_THREAD_H
+#define MEMPROF_THREAD_H
+
+#include "memprof_allocator.h"
+#include "memprof_internal.h"
+#include "memprof_stats.h"
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_libc.h"
+#include "sanitizer_common/sanitizer_thread_registry.h"
+
+namespace __sanitizer {
+struct DTLS;
+} // namespace __sanitizer
+
+namespace __memprof {
+
+const u32 kInvalidTid = 0xffffff; // Must fit into 24 bits.
+const u32 kMaxNumberOfThreads = (1 << 22); // 4M
+
+class MemprofThread;
+
+// These objects are created for every thread and are never deleted,
+// so we can find them by tid even if the thread is long dead.
+struct MemprofThreadContext final : public ThreadContextBase {
+ explicit MemprofThreadContext(int tid)
+ : ThreadContextBase(tid), announced(false),
+ destructor_iterations(GetPthreadDestructorIterations()), stack_id(0),
+ thread(nullptr) {}
+ bool announced;
+ u8 destructor_iterations;
+ u32 stack_id;
+ MemprofThread *thread;
+
+ void OnCreated(void *arg) override;
+ void OnFinished() override;
+
+ struct CreateThreadContextArgs {
+ MemprofThread *thread;
+ StackTrace *stack;
+ };
+};
+
+// MemprofThreadContext objects are never freed, so we need many of them.
+COMPILER_CHECK(sizeof(MemprofThreadContext) <= 256);
+
+// MemprofThread are stored in TSD and destroyed when the thread dies.
+class MemprofThread {
+public:
+ static MemprofThread *Create(thread_callback_t start_routine, void *arg,
+ u32 parent_tid, StackTrace *stack,
+ bool detached);
+ static void TSDDtor(void *tsd);
+ void Destroy();
+
+ struct InitOptions;
+ void Init(const InitOptions *options = nullptr);
+
+ thread_return_t ThreadStart(tid_t os_id,
+ atomic_uintptr_t *signal_thread_is_registered);
+
+ uptr stack_top();
+ uptr stack_bottom();
+ uptr stack_size();
+ uptr tls_begin() { return tls_begin_; }
+ uptr tls_end() { return tls_end_; }
+ DTLS *dtls() { return dtls_; }
+ u32 tid() { return context_->tid; }
+ MemprofThreadContext *context() { return context_; }
+ void set_context(MemprofThreadContext *context) { context_ = context; }
+
+ bool AddrIsInStack(uptr addr);
+
+ // True is this thread is currently unwinding stack (i.e. collecting a stack
+ // trace). Used to prevent deadlocks on platforms where libc unwinder calls
+ // malloc internally. See PR17116 for more details.
+ bool isUnwinding() const { return unwinding_; }
+ void setUnwinding(bool b) { unwinding_ = b; }
+
+ MemprofThreadLocalMallocStorage &malloc_storage() { return malloc_storage_; }
+ MemprofStats &stats() { return stats_; }
+
+private:
+ // NOTE: There is no MemprofThread constructor. It is allocated
+ // via mmap() and *must* be valid in zero-initialized state.
+
+ void SetThreadStackAndTls(const InitOptions *options);
+
+ struct StackBounds {
+ uptr bottom;
+ uptr top;
+ };
+ StackBounds GetStackBounds() const;
+
+ MemprofThreadContext *context_;
+ thread_callback_t start_routine_;
+ void *arg_;
+
+ uptr stack_top_;
+ uptr stack_bottom_;
+
+ uptr tls_begin_;
+ uptr tls_end_;
+ DTLS *dtls_;
+
+ MemprofThreadLocalMallocStorage malloc_storage_;
+ MemprofStats stats_;
+ bool unwinding_;
+};
+
+// Returns a single instance of registry.
+ThreadRegistry &memprofThreadRegistry();
+
+// Must be called under ThreadRegistryLock.
+MemprofThreadContext *GetThreadContextByTidLocked(u32 tid);
+
+// Get the current thread. May return 0.
+MemprofThread *GetCurrentThread();
+void SetCurrentThread(MemprofThread *t);
+u32 GetCurrentTidOrInvalid();
+
+// Used to handle fork().
+void EnsureMainThreadIDIsCorrect();
+} // namespace __memprof
+
+#endif // MEMPROF_THREAD_H
diff --git a/contrib/llvm-project/compiler-rt/lib/memprof/weak_symbols.txt b/contrib/llvm-project/compiler-rt/lib/memprof/weak_symbols.txt
new file mode 100644
index 000000000000..271813612ab6
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/memprof/weak_symbols.txt
@@ -0,0 +1 @@
+___memprof_default_options __memprof_profile_filename
diff --git a/contrib/llvm-project/compiler-rt/lib/msan/msan.cpp b/contrib/llvm-project/compiler-rt/lib/msan/msan.cpp
index 9afc7b026a8e..4be1630cd302 100644
--- a/contrib/llvm-project/compiler-rt/lib/msan/msan.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/msan/msan.cpp
@@ -109,7 +109,7 @@ void Flags::SetDefaults() {
// keep_going is an old name for halt_on_error,
// and it has inverse meaning.
-class FlagHandlerKeepGoing : public FlagHandlerBase {
+class FlagHandlerKeepGoing final : public FlagHandlerBase {
bool *halt_on_error_;
public:
@@ -151,7 +151,6 @@ static void InitializeFlags() {
// FIXME: test and enable.
cf.check_printf = false;
cf.intercept_tls_get_addr = true;
- cf.exitcode = 77;
OverrideCommonFlags(cf);
}
@@ -172,10 +171,9 @@ static void InitializeFlags() {
#endif
// Override from user-specified string.
- if (__msan_default_options)
- parser.ParseString(__msan_default_options());
+ parser.ParseString(__msan_default_options());
#if MSAN_CONTAINS_UBSAN
- const char *ubsan_default_options = __ubsan::MaybeCallUbsanDefaultOptions();
+ const char *ubsan_default_options = __ubsan_default_options();
ubsan_parser.ParseString(ubsan_default_options);
#endif
@@ -527,6 +525,9 @@ void __msan_dump_shadow(const void *x, uptr size) {
sptr __msan_test_shadow(const void *x, uptr size) {
if (!MEM_IS_APP(x)) return -1;
unsigned char *s = (unsigned char *)MEM_TO_SHADOW((uptr)x);
+ if (__sanitizer::mem_is_zero((const char *)s, size))
+ return -1;
+ // Slow path: loop through again to find the location.
for (uptr i = 0; i < size; ++i)
if (s[i])
return i;
@@ -692,12 +693,40 @@ void __msan_set_death_callback(void (*callback)(void)) {
SetUserDieCallback(callback);
}
-#if !SANITIZER_SUPPORTS_WEAK_HOOKS
-extern "C" {
-SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
-const char* __msan_default_options() { return ""; }
-} // extern "C"
-#endif
+void __msan_start_switch_fiber(const void *bottom, uptr size) {
+ MsanThread *t = GetCurrentThread();
+ if (!t) {
+ VReport(1, "__msan_start_switch_fiber called from unknown thread\n");
+ return;
+ }
+ t->StartSwitchFiber((uptr)bottom, size);
+}
+
+void __msan_finish_switch_fiber(const void **bottom_old, uptr *size_old) {
+ MsanThread *t = GetCurrentThread();
+ if (!t) {
+ VReport(1, "__msan_finish_switch_fiber called from unknown thread\n");
+ return;
+ }
+ t->FinishSwitchFiber((uptr *)bottom_old, (uptr *)size_old);
+
+ internal_memset(__msan_param_tls, 0, sizeof(__msan_param_tls));
+ internal_memset(__msan_retval_tls, 0, sizeof(__msan_retval_tls));
+ internal_memset(__msan_va_arg_tls, 0, sizeof(__msan_va_arg_tls));
+
+ if (__msan_get_track_origins()) {
+ internal_memset(__msan_param_origin_tls, 0,
+ sizeof(__msan_param_origin_tls));
+ internal_memset(&__msan_retval_origin_tls, 0,
+ sizeof(__msan_retval_origin_tls));
+ internal_memset(__msan_va_arg_origin_tls, 0,
+ sizeof(__msan_va_arg_origin_tls));
+ }
+}
+
+SANITIZER_INTERFACE_WEAK_DEF(const char *, __msan_default_options, void) {
+ return "";
+}
extern "C" {
SANITIZER_INTERFACE_ATTRIBUTE
diff --git a/contrib/llvm-project/compiler-rt/lib/msan/msan_interceptors.cpp b/contrib/llvm-project/compiler-rt/lib/msan/msan_interceptors.cpp
index 6459c7a593eb..4eea94f1f969 100644
--- a/contrib/llvm-project/compiler-rt/lib/msan/msan_interceptors.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/msan/msan_interceptors.cpp
@@ -827,7 +827,7 @@ INTERCEPTOR(int, prlimit64, int pid, int resource, void *new_rlimit,
INTERCEPTOR(int, gethostname, char *name, SIZE_T len) {
ENSURE_MSAN_INITED();
int res = REAL(gethostname)(name, len);
- if (!res) {
+ if (!res || (res == -1 && errno == errno_ENAMETOOLONG)) {
SIZE_T real_len = REAL(strnlen)(name, len);
if (real_len < len)
++real_len;
@@ -1245,10 +1245,10 @@ int OnExit() {
CHECK_UNPOISONED_0(x, n); \
} while (0)
-#define MSAN_INTERCEPT_FUNC(name) \
- do { \
- if (!INTERCEPT_FUNCTION(name)) \
- VReport(1, "MemorySanitizer: failed to intercept '%s'\n'", #name); \
+#define MSAN_INTERCEPT_FUNC(name) \
+ do { \
+ if (!INTERCEPT_FUNCTION(name)) \
+ VReport(1, "MemorySanitizer: failed to intercept '%s'\n", #name); \
} while (0)
#define MSAN_INTERCEPT_FUNC_VER(name, ver) \
diff --git a/contrib/llvm-project/compiler-rt/lib/msan/msan_interface_internal.h b/contrib/llvm-project/compiler-rt/lib/msan/msan_interface_internal.h
index 9e3db06bd64d..1edacbc7504f 100644
--- a/contrib/llvm-project/compiler-rt/lib/msan/msan_interface_internal.h
+++ b/contrib/llvm-project/compiler-rt/lib/msan/msan_interface_internal.h
@@ -129,8 +129,8 @@ void __msan_set_keep_going(int keep_going);
SANITIZER_INTERFACE_ATTRIBUTE
int __msan_set_poison_in_malloc(int do_poison);
-SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
-/* OPTIONAL */ const char* __msan_default_options();
+SANITIZER_INTERFACE_ATTRIBUTE
+const char *__msan_default_options();
// For testing.
SANITIZER_INTERFACE_ATTRIBUTE
@@ -187,6 +187,12 @@ void __msan_scoped_disable_interceptor_checks();
SANITIZER_INTERFACE_ATTRIBUTE
void __msan_scoped_enable_interceptor_checks();
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __msan_start_switch_fiber(const void *bottom, uptr size);
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __msan_finish_switch_fiber(const void **bottom_old, uptr *size_old);
} // extern "C"
#endif // MSAN_INTERFACE_INTERNAL_H
diff --git a/contrib/llvm-project/compiler-rt/lib/msan/msan_linux.cpp b/contrib/llvm-project/compiler-rt/lib/msan/msan_linux.cpp
index d61e9dee3065..d5baee38e710 100644
--- a/contrib/llvm-project/compiler-rt/lib/msan/msan_linux.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/msan/msan_linux.cpp
@@ -26,7 +26,6 @@
#include <signal.h>
#include <unistd.h>
#include <unwind.h>
-#include <execinfo.h>
#include <sys/time.h>
#include <sys/resource.h>
@@ -142,7 +141,7 @@ bool InitShadow(bool init_origins) {
if (map) {
if (!CheckMemoryRangeAvailability(start, size))
return false;
- if (!MmapFixedNoReserve(start, size, kMemoryLayout[i].name))
+ if (!MmapFixedSuperNoReserve(start, size, kMemoryLayout[i].name))
return false;
if (common_flags()->use_madv_dontdump)
DontDumpShadowMemory(start, size);
diff --git a/contrib/llvm-project/compiler-rt/lib/msan/msan_poisoning.cpp b/contrib/llvm-project/compiler-rt/lib/msan/msan_poisoning.cpp
index ef3c74e0a35a..a92b0565cfa8 100644
--- a/contrib/llvm-project/compiler-rt/lib/msan/msan_poisoning.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/msan/msan_poisoning.cpp
@@ -47,7 +47,7 @@ void CopyOrigin(const void *dst, const void *src, uptr size,
uptr beg = d & ~3UL;
// Copy left unaligned origin if that memory is poisoned.
if (beg < d) {
- u32 o = GetOriginIfPoisoned((uptr)src, d - beg);
+ u32 o = GetOriginIfPoisoned((uptr)src, beg + 4 - d);
if (o) {
if (__msan_get_track_origins() > 1) o = ChainOrigin(o, stack);
*(u32 *)MEM_TO_ORIGIN(beg) = o;
@@ -94,23 +94,98 @@ void CopyOrigin(const void *dst, const void *src, uptr size,
}
}
+void ReverseCopyOrigin(const void *dst, const void *src, uptr size,
+ StackTrace *stack) {
+ if (!MEM_IS_APP(dst) || !MEM_IS_APP(src))
+ return;
+
+ uptr d = (uptr)dst;
+ uptr end = (d + size) & ~3UL;
+
+ // Copy right unaligned origin if that memory is poisoned.
+ if (end < d + size) {
+ u32 o = GetOriginIfPoisoned((uptr)src + (end - d), (d + size) - end);
+ if (o) {
+ if (__msan_get_track_origins() > 1)
+ o = ChainOrigin(o, stack);
+ *(u32 *)MEM_TO_ORIGIN(end) = o;
+ }
+ }
+
+ uptr beg = d & ~3UL;
+
+ if (beg + 4 < end) {
+ // Align src up.
+ uptr s = ((uptr)src + 3) & ~3UL;
+ if (__msan_get_track_origins() > 1) {
+ u32 *src = (u32 *)MEM_TO_ORIGIN(s + end - beg - 4);
+ u32 *src_s = (u32 *)MEM_TO_SHADOW(s + end - beg - 4);
+ u32 *src_begin = (u32 *)MEM_TO_ORIGIN(s);
+ u32 *dst = (u32 *)MEM_TO_ORIGIN(end - 4);
+ u32 src_o = 0;
+ u32 dst_o = 0;
+ for (; src >= src_begin; --src, --src_s, --dst) {
+ if (!*src_s)
+ continue;
+ if (*src != src_o) {
+ src_o = *src;
+ dst_o = ChainOrigin(src_o, stack);
+ }
+ *dst = dst_o;
+ }
+ } else {
+ REAL(memmove)
+ ((void *)MEM_TO_ORIGIN(beg), (void *)MEM_TO_ORIGIN(s), end - beg - 4);
+ }
+ }
+
+ // Copy left unaligned origin if that memory is poisoned.
+ if (beg < d) {
+ u32 o = GetOriginIfPoisoned((uptr)src, beg + 4 - d);
+ if (o) {
+ if (__msan_get_track_origins() > 1)
+ o = ChainOrigin(o, stack);
+ *(u32 *)MEM_TO_ORIGIN(beg) = o;
+ }
+ }
+}
+
+void MoveOrigin(const void *dst, const void *src, uptr size,
+ StackTrace *stack) {
+ // If destination origin range overlaps with source origin range, move
+ // origins by coping origins in a reverse order; otherwise, copy origins in
+ // a normal order.
+ uptr src_aligned_beg = reinterpret_cast<uptr>(src) & ~3UL;
+ uptr src_aligned_end = (reinterpret_cast<uptr>(src) + size) & ~3UL;
+ uptr dst_aligned_beg = reinterpret_cast<uptr>(dst) & ~3UL;
+ if (dst_aligned_beg < src_aligned_end && dst_aligned_beg >= src_aligned_beg)
+ return ReverseCopyOrigin(dst, src, size, stack);
+ return CopyOrigin(dst, src, size, stack);
+}
+
void MoveShadowAndOrigin(const void *dst, const void *src, uptr size,
StackTrace *stack) {
if (!MEM_IS_APP(dst)) return;
if (!MEM_IS_APP(src)) return;
if (src == dst) return;
+ // MoveOrigin transfers origins by refering to their shadows. So we
+ // need to move origins before moving shadows.
+ if (__msan_get_track_origins())
+ MoveOrigin(dst, src, size, stack);
REAL(memmove)((void *)MEM_TO_SHADOW((uptr)dst),
(void *)MEM_TO_SHADOW((uptr)src), size);
- if (__msan_get_track_origins()) CopyOrigin(dst, src, size, stack);
}
void CopyShadowAndOrigin(const void *dst, const void *src, uptr size,
StackTrace *stack) {
if (!MEM_IS_APP(dst)) return;
if (!MEM_IS_APP(src)) return;
+ // Because origin's range is slightly larger than app range, memcpy may also
+ // cause overlapped origin ranges.
REAL(memcpy)((void *)MEM_TO_SHADOW((uptr)dst),
(void *)MEM_TO_SHADOW((uptr)src), size);
- if (__msan_get_track_origins()) CopyOrigin(dst, src, size, stack);
+ if (__msan_get_track_origins())
+ MoveOrigin(dst, src, size, stack);
}
void CopyMemory(void *dst, const void *src, uptr size, StackTrace *stack) {
diff --git a/contrib/llvm-project/compiler-rt/lib/msan/msan_thread.cpp b/contrib/llvm-project/compiler-rt/lib/msan/msan_thread.cpp
index 0ba499350064..6ae012acd9a2 100644
--- a/contrib/llvm-project/compiler-rt/lib/msan/msan_thread.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/msan/msan_thread.cpp
@@ -22,9 +22,9 @@ MsanThread *MsanThread::Create(thread_callback_t start_routine,
void MsanThread::SetThreadStackAndTls() {
uptr tls_size = 0;
uptr stack_size = 0;
- GetThreadStackAndTls(IsMainThread(), &stack_bottom_, &stack_size,
- &tls_begin_, &tls_size);
- stack_top_ = stack_bottom_ + stack_size;
+ GetThreadStackAndTls(IsMainThread(), &stack_.bottom, &stack_size, &tls_begin_,
+ &tls_size);
+ stack_.top = stack_.bottom + stack_size;
tls_end_ = tls_begin_ + tls_size;
int local;
@@ -32,19 +32,20 @@ void MsanThread::SetThreadStackAndTls() {
}
void MsanThread::ClearShadowForThreadStackAndTLS() {
- __msan_unpoison((void *)stack_bottom_, stack_top_ - stack_bottom_);
+ __msan_unpoison((void *)stack_.bottom, stack_.top - stack_.bottom);
if (tls_begin_ != tls_end_)
__msan_unpoison((void *)tls_begin_, tls_end_ - tls_begin_);
DTLS *dtls = DTLS_Get();
CHECK_NE(dtls, 0);
- for (uptr i = 0; i < dtls->dtv_size; ++i)
- __msan_unpoison((void *)(dtls->dtv[i].beg), dtls->dtv[i].size);
+ ForEachDVT(dtls, [](const DTLS::DTV &dtv, int id) {
+ __msan_unpoison((void *)(dtv.beg), dtv.size);
+ });
}
void MsanThread::Init() {
SetThreadStackAndTls();
- CHECK(MEM_IS_APP(stack_bottom_));
- CHECK(MEM_IS_APP(stack_top_ - 1));
+ CHECK(MEM_IS_APP(stack_.bottom));
+ CHECK(MEM_IS_APP(stack_.top - 1));
ClearShadowForThreadStackAndTLS();
}
@@ -79,4 +80,45 @@ thread_return_t MsanThread::ThreadStart() {
return res;
}
+MsanThread::StackBounds MsanThread::GetStackBounds() const {
+ if (!stack_switching_)
+ return {stack_.bottom, stack_.top};
+ const uptr cur_stack = GET_CURRENT_FRAME();
+ // Note: need to check next stack first, because FinishSwitchFiber
+ // may be in process of overwriting stack_.top/bottom_. But in such case
+ // we are already on the next stack.
+ if (cur_stack >= next_stack_.bottom && cur_stack < next_stack_.top)
+ return {next_stack_.bottom, next_stack_.top};
+ return {stack_.bottom, stack_.top};
+}
+
+uptr MsanThread::stack_top() { return GetStackBounds().top; }
+
+uptr MsanThread::stack_bottom() { return GetStackBounds().bottom; }
+
+bool MsanThread::AddrIsInStack(uptr addr) {
+ const auto bounds = GetStackBounds();
+ return addr >= bounds.bottom && addr < bounds.top;
+}
+
+void MsanThread::StartSwitchFiber(uptr bottom, uptr size) {
+ CHECK(!stack_switching_);
+ next_stack_.bottom = bottom;
+ next_stack_.top = bottom + size;
+ stack_switching_ = true;
+}
+
+void MsanThread::FinishSwitchFiber(uptr *bottom_old, uptr *size_old) {
+ CHECK(stack_switching_);
+ if (bottom_old)
+ *bottom_old = stack_.bottom;
+ if (size_old)
+ *size_old = stack_.top - stack_.bottom;
+ stack_.bottom = next_stack_.bottom;
+ stack_.top = next_stack_.top;
+ stack_switching_ = false;
+ next_stack_.top = 0;
+ next_stack_.bottom = 0;
+}
+
} // namespace __msan
diff --git a/contrib/llvm-project/compiler-rt/lib/msan/msan_thread.h b/contrib/llvm-project/compiler-rt/lib/msan/msan_thread.h
index 808780cd57b9..fe795e3a547a 100644
--- a/contrib/llvm-project/compiler-rt/lib/msan/msan_thread.h
+++ b/contrib/llvm-project/compiler-rt/lib/msan/msan_thread.h
@@ -27,20 +27,21 @@ class MsanThread {
void Init(); // Should be called from the thread itself.
thread_return_t ThreadStart();
- uptr stack_top() { return stack_top_; }
- uptr stack_bottom() { return stack_bottom_; }
+ uptr stack_top();
+ uptr stack_bottom();
uptr tls_begin() { return tls_begin_; }
uptr tls_end() { return tls_end_; }
bool IsMainThread() { return start_routine_ == nullptr; }
- bool AddrIsInStack(uptr addr) {
- return addr >= stack_bottom_ && addr < stack_top_;
- }
+ bool AddrIsInStack(uptr addr);
bool InSignalHandler() { return in_signal_handler_; }
void EnterSignalHandler() { in_signal_handler_++; }
void LeaveSignalHandler() { in_signal_handler_--; }
+ void StartSwitchFiber(uptr bottom, uptr size);
+ void FinishSwitchFiber(uptr *bottom_old, uptr *size_old);
+
MsanThreadLocalMallocStorage &malloc_storage() { return malloc_storage_; }
int destructor_iterations_;
@@ -50,10 +51,19 @@ class MsanThread {
// via mmap() and *must* be valid in zero-initialized state.
void SetThreadStackAndTls();
void ClearShadowForThreadStackAndTLS();
+ struct StackBounds {
+ uptr bottom;
+ uptr top;
+ };
+ StackBounds GetStackBounds() const;
thread_callback_t start_routine_;
void *arg_;
- uptr stack_top_;
- uptr stack_bottom_;
+
+ bool stack_switching_;
+
+ StackBounds stack_;
+ StackBounds next_stack_;
+
uptr tls_begin_;
uptr tls_end_;
diff --git a/contrib/llvm-project/compiler-rt/lib/profile/GCDAProfiling.c b/contrib/llvm-project/compiler-rt/lib/profile/GCDAProfiling.c
index 82369357e986..4293e8f7b5bf 100644
--- a/contrib/llvm-project/compiler-rt/lib/profile/GCDAProfiling.c
+++ b/contrib/llvm-project/compiler-rt/lib/profile/GCDAProfiling.c
@@ -128,11 +128,6 @@ struct fn_list {
struct fn_list writeout_fn_list;
/*
- * A list of flush functions that our __gcov_flush() function should call, shared between all dynamic objects.
- */
-struct fn_list flush_fn_list;
-
-/*
* A list of reset functions, shared between all dynamic objects.
*/
struct fn_list reset_fn_list;
@@ -308,16 +303,11 @@ static void unmap_file() {
mmap_handle = NULL;
#else
- if (msync(write_buffer, file_size, MS_SYNC) == -1) {
+ if (munmap(write_buffer, file_size) == -1) {
int errnum = errno;
- fprintf(stderr, "profiling: %s: cannot msync: %s\n", filename,
+ fprintf(stderr, "profiling: %s: cannot munmap: %s\n", filename,
strerror(errnum));
}
-
- /* We explicitly ignore errors from unmapping because at this point the data
- * is written and we don't care.
- */
- (void)munmap(write_buffer, file_size);
#endif
write_buffer = NULL;
@@ -406,32 +396,6 @@ void llvm_gcda_start_file(const char *orig_filename, uint32_t version,
#endif
}
-/* Given an array of pointers to counters (counters), increment the n-th one,
- * where we're also given a pointer to n (predecessor).
- */
-COMPILER_RT_VISIBILITY
-void llvm_gcda_increment_indirect_counter(uint32_t *predecessor,
- uint64_t **counters) {
- uint64_t *counter;
- uint32_t pred;
-
- pred = *predecessor;
- if (pred == 0xffffffff)
- return;
- counter = counters[pred];
-
- /* Don't crash if the pred# is out of sync. This can happen due to threads,
- or because of a TODO in GCOVProfiling.cpp buildEdgeLookupTable(). */
- if (counter)
- ++*counter;
-#ifdef DEBUG_GCDAPROFILING
- else
- fprintf(stderr,
- "llvmgcda: increment_indirect_counter counters=%08llx, pred=%u\n",
- *counter, *predecessor);
-#endif
-}
-
COMPILER_RT_VISIBILITY
void llvm_gcda_emit_function(uint32_t ident, uint32_t func_checksum,
uint32_t cfg_checksum) {
@@ -627,25 +591,6 @@ static void llvm_writeout_and_clear(void) {
}
COMPILER_RT_VISIBILITY
-void llvm_register_flush_function(fn_ptr fn) {
- fn_list_insert(&flush_fn_list, fn);
-}
-
-void __gcov_flush() {
- struct fn_node* curr = flush_fn_list.head;
-
- while (curr) {
- curr->fn();
- curr = curr->next;
- }
-}
-
-COMPILER_RT_VISIBILITY
-void llvm_delete_flush_function_list(void) {
- fn_list_remove(&flush_fn_list);
-}
-
-COMPILER_RT_VISIBILITY
void llvm_register_reset_function(fn_ptr fn) {
fn_list_insert(&reset_fn_list, fn);
}
@@ -685,15 +630,12 @@ pid_t __gcov_fork() {
#endif
COMPILER_RT_VISIBILITY
-void llvm_gcov_init(fn_ptr wfn, fn_ptr ffn, fn_ptr rfn) {
+void llvm_gcov_init(fn_ptr wfn, fn_ptr rfn) {
static int atexit_ran = 0;
if (wfn)
llvm_register_writeout_function(wfn);
- if (ffn)
- llvm_register_flush_function(ffn);
-
if (rfn)
llvm_register_reset_function(rfn);
@@ -702,11 +644,20 @@ void llvm_gcov_init(fn_ptr wfn, fn_ptr ffn, fn_ptr rfn) {
/* Make sure we write out the data and delete the data structures. */
atexit(llvm_delete_reset_function_list);
- atexit(llvm_delete_flush_function_list);
#ifdef _WIN32
atexit(llvm_writeout_and_clear);
#endif
}
}
+void __gcov_dump(void) {
+ for (struct fn_node *f = writeout_fn_list.head; f; f = f->next)
+ f->fn();
+}
+
+void __gcov_reset(void) {
+ for (struct fn_node *f = reset_fn_list.head; f; f = f->next)
+ f->fn();
+}
+
#endif
diff --git a/contrib/llvm-project/compiler-rt/lib/profile/InstrProfiling.c b/contrib/llvm-project/compiler-rt/lib/profile/InstrProfiling.c
index 31a9fe996293..6df65f66df73 100644
--- a/contrib/llvm-project/compiler-rt/lib/profile/InstrProfiling.c
+++ b/contrib/llvm-project/compiler-rt/lib/profile/InstrProfiling.c
@@ -6,6 +6,9 @@
|*
\*===----------------------------------------------------------------------===*/
+// Note: This is linked into the Darwin kernel, and must remain compatible
+// with freestanding compilation. See `darwin_add_builtin_libraries`.
+
#include <limits.h>
#include <stdio.h>
#include <stdlib.h>
@@ -17,9 +20,6 @@
#define INSTR_PROF_VALUE_PROF_DATA
#include "profile/InstrProfData.inc"
-
-COMPILER_RT_WEAK uint64_t INSTR_PROF_RAW_VERSION_VAR = INSTR_PROF_RAW_VERSION;
-
COMPILER_RT_VISIBILITY uint64_t __llvm_profile_get_magic(void) {
return sizeof(void *) == sizeof(uint64_t) ? (INSTR_PROF_RAW_MAGIC_64)
: (INSTR_PROF_RAW_MAGIC_32);
diff --git a/contrib/llvm-project/compiler-rt/lib/profile/InstrProfiling.h b/contrib/llvm-project/compiler-rt/lib/profile/InstrProfiling.h
index d7a7c32332c1..7d1c77a3fab3 100644
--- a/contrib/llvm-project/compiler-rt/lib/profile/InstrProfiling.h
+++ b/contrib/llvm-project/compiler-rt/lib/profile/InstrProfiling.h
@@ -55,6 +55,15 @@ int __llvm_profile_is_continuous_mode_enabled(void);
void __llvm_profile_enable_continuous_mode(void);
/*!
+ * \brief Set the page size.
+ *
+ * This is a pre-requisite for enabling continuous mode. The buffer size
+ * calculation code inside of libprofile cannot simply call getpagesize(), as
+ * it is not allowed to depend on libc.
+ */
+void __llvm_profile_set_page_size(unsigned PageSize);
+
+/*!
* \brief Get number of bytes necessary to pad the argument to eight
* byte boundary.
*/
diff --git a/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingBuffer.c b/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingBuffer.c
index 5ee44785a7ab..07bb4d4e4f1b 100644
--- a/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingBuffer.c
+++ b/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingBuffer.c
@@ -6,6 +6,9 @@
|*
\*===----------------------------------------------------------------------===*/
+// Note: This is linked into the Darwin kernel, and must remain compatible
+// with freestanding compilation. See `darwin_add_builtin_libraries`.
+
#include "InstrProfiling.h"
#include "InstrProfilingInternal.h"
#include "InstrProfilingPort.h"
@@ -18,14 +21,22 @@
* layering is violated. */
static int ContinuouslySyncProfile = 0;
+/* The system page size. Only valid when non-zero. If 0, the page size is
+ * unavailable. */
+static unsigned PageSize = 0;
+
COMPILER_RT_VISIBILITY int __llvm_profile_is_continuous_mode_enabled(void) {
- return ContinuouslySyncProfile;
+ return ContinuouslySyncProfile && PageSize;
}
COMPILER_RT_VISIBILITY void __llvm_profile_enable_continuous_mode(void) {
ContinuouslySyncProfile = 1;
}
+COMPILER_RT_VISIBILITY void __llvm_profile_set_page_size(unsigned PS) {
+ PageSize = PS;
+}
+
COMPILER_RT_VISIBILITY
uint64_t __llvm_profile_get_size_for_buffer(void) {
const __llvm_profile_data *DataBegin = __llvm_profile_begin_data();
@@ -49,8 +60,7 @@ uint64_t __llvm_profile_get_data_size(const __llvm_profile_data *Begin,
/// Calculate the number of padding bytes needed to add to \p Offset in order
/// for (\p Offset + Padding) to be page-aligned.
-static uint64_t calculateBytesNeededToPageAlign(uint64_t Offset,
- unsigned PageSize) {
+static uint64_t calculateBytesNeededToPageAlign(uint64_t Offset) {
uint64_t OffsetModPage = Offset % PageSize;
if (OffsetModPage > 0)
return PageSize - OffsetModPage;
@@ -72,15 +82,13 @@ void __llvm_profile_get_padding_sizes_for_counters(
// In continuous mode, the file offsets for headers and for the start of
// counter sections need to be page-aligned.
- unsigned PageSize = getpagesize();
uint64_t DataSizeInBytes = DataSize * sizeof(__llvm_profile_data);
uint64_t CountersSizeInBytes = CountersSize * sizeof(uint64_t);
*PaddingBytesBeforeCounters = calculateBytesNeededToPageAlign(
- sizeof(__llvm_profile_header) + DataSizeInBytes, PageSize);
+ sizeof(__llvm_profile_header) + DataSizeInBytes);
*PaddingBytesAfterCounters =
- calculateBytesNeededToPageAlign(CountersSizeInBytes, PageSize);
- *PaddingBytesAfterNames =
- calculateBytesNeededToPageAlign(NamesSize, PageSize);
+ calculateBytesNeededToPageAlign(CountersSizeInBytes);
+ *PaddingBytesAfterNames = calculateBytesNeededToPageAlign(NamesSize);
}
COMPILER_RT_VISIBILITY
diff --git a/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingFile.c b/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingFile.c
index 9e1a54a0c373..42ffdae82622 100644
--- a/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingFile.c
+++ b/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingFile.c
@@ -72,6 +72,7 @@ typedef struct lprofFilename {
unsigned OwnsFilenamePat;
const char *ProfilePathPrefix;
char PidChars[MAX_PID_SIZE];
+ char *TmpDir;
char Hostname[COMPILER_RT_MAX_HOSTLEN];
unsigned NumPids;
unsigned NumHosts;
@@ -86,8 +87,8 @@ typedef struct lprofFilename {
ProfileNameSpecifier PNS;
} lprofFilename;
-static lprofFilename lprofCurFilename = {0, 0, 0, {0}, {0},
- 0, 0, 0, PNS_unknown};
+static lprofFilename lprofCurFilename = {0, 0, 0, {0}, NULL,
+ {0}, 0, 0, 0, PNS_unknown};
static int ProfileMergeRequested = 0;
static int isProfileMergeRequested() { return ProfileMergeRequested; }
@@ -419,14 +420,12 @@ static void truncateCurrentFile(void) {
fclose(File);
}
-#ifndef _MSC_VER
+#if !defined(__Fuchsia__) && !defined(_WIN32)
static void assertIsZero(int *i) {
if (*i)
PROF_WARN("Expected flag to be 0, but got: %d\n", *i);
}
-#endif
-#if !defined(__Fuchsia__) && !defined(_WIN32)
/* Write a partial profile to \p Filename, which is required to be backed by
* the open file object \p File. */
static int writeProfileWithFileObject(const char *Filename, FILE *File) {
@@ -667,7 +666,8 @@ static void initializeProfileForContinuousMode(void) {
FileOffsetToCounters);
}
- unlockProfile(&ProfileRequiresUnlock, File);
+ if (ProfileRequiresUnlock)
+ unlockProfile(&ProfileRequiresUnlock, File);
#endif // defined(__Fuchsia__) || defined(_WIN32)
}
@@ -744,6 +744,14 @@ static int parseFilenamePattern(const char *FilenamePat,
FilenamePat);
return -1;
}
+ } else if (FilenamePat[I] == 't') {
+ lprofCurFilename.TmpDir = getenv("TMPDIR");
+ if (!lprofCurFilename.TmpDir) {
+ PROF_WARN("Unable to get the TMPDIR environment variable, referenced "
+ "in %s. Using the default path.",
+ FilenamePat);
+ return -1;
+ }
} else if (FilenamePat[I] == 'c') {
if (__llvm_profile_is_continuous_mode_enabled()) {
PROF_WARN("%%c specifier can only be specified once in %s.\n",
@@ -751,6 +759,7 @@ static int parseFilenamePattern(const char *FilenamePat,
return -1;
}
+ __llvm_profile_set_page_size(getpagesize());
__llvm_profile_enable_continuous_mode();
I++; /* advance to 'c' */
} else {
@@ -826,12 +835,13 @@ static int getCurFilenameLength() {
return 0;
if (!(lprofCurFilename.NumPids || lprofCurFilename.NumHosts ||
- lprofCurFilename.MergePoolSize))
+ lprofCurFilename.TmpDir || lprofCurFilename.MergePoolSize))
return strlen(lprofCurFilename.FilenamePat);
Len = strlen(lprofCurFilename.FilenamePat) +
lprofCurFilename.NumPids * (strlen(lprofCurFilename.PidChars) - 2) +
- lprofCurFilename.NumHosts * (strlen(lprofCurFilename.Hostname) - 2);
+ lprofCurFilename.NumHosts * (strlen(lprofCurFilename.Hostname) - 2) +
+ (lprofCurFilename.TmpDir ? (strlen(lprofCurFilename.TmpDir) - 1) : 0);
if (lprofCurFilename.MergePoolSize)
Len += SIGLEN;
return Len;
@@ -843,14 +853,14 @@ static int getCurFilenameLength() {
* current filename pattern string is directly returned, unless ForceUseBuf
* is enabled. */
static const char *getCurFilename(char *FilenameBuf, int ForceUseBuf) {
- int I, J, PidLength, HostNameLength, FilenamePatLength;
+ int I, J, PidLength, HostNameLength, TmpDirLength, FilenamePatLength;
const char *FilenamePat = lprofCurFilename.FilenamePat;
if (!lprofCurFilename.FilenamePat || !lprofCurFilename.FilenamePat[0])
return 0;
if (!(lprofCurFilename.NumPids || lprofCurFilename.NumHosts ||
- lprofCurFilename.MergePoolSize ||
+ lprofCurFilename.TmpDir || lprofCurFilename.MergePoolSize ||
__llvm_profile_is_continuous_mode_enabled())) {
if (!ForceUseBuf)
return lprofCurFilename.FilenamePat;
@@ -863,6 +873,7 @@ static const char *getCurFilename(char *FilenameBuf, int ForceUseBuf) {
PidLength = strlen(lprofCurFilename.PidChars);
HostNameLength = strlen(lprofCurFilename.Hostname);
+ TmpDirLength = lprofCurFilename.TmpDir ? strlen(lprofCurFilename.TmpDir) : 0;
/* Construct the new filename. */
for (I = 0, J = 0; FilenamePat[I]; ++I)
if (FilenamePat[I] == '%') {
@@ -872,6 +883,10 @@ static const char *getCurFilename(char *FilenameBuf, int ForceUseBuf) {
} else if (FilenamePat[I] == 'h') {
memcpy(FilenameBuf + J, lprofCurFilename.Hostname, HostNameLength);
J += HostNameLength;
+ } else if (FilenamePat[I] == 't') {
+ memcpy(FilenameBuf + J, lprofCurFilename.TmpDir, TmpDirLength);
+ FilenameBuf[J + TmpDirLength] = DIR_SEPARATOR;
+ J += TmpDirLength + 1;
} else {
if (!getMergePoolSize(FilenamePat, &I))
continue;
diff --git a/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingInternal.c b/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingInternal.c
index d58bc19ad11e..6a54697df7f0 100644
--- a/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingInternal.c
+++ b/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingInternal.c
@@ -6,6 +6,9 @@
|*
\*===----------------------------------------------------------------------===*/
+// Note: This is linked into the Darwin kernel, and must remain compatible
+// with freestanding compilation. See `darwin_add_builtin_libraries`.
+
#if !defined(__Fuchsia__)
#include "InstrProfilingInternal.h"
diff --git a/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingPlatformDarwin.c b/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingPlatformDarwin.c
index 23bdb7f37179..29541c74d5a6 100644
--- a/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingPlatformDarwin.c
+++ b/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingPlatformDarwin.c
@@ -6,6 +6,9 @@
|*
\*===----------------------------------------------------------------------===*/
+// Note: This is linked into the Darwin kernel, and must remain compatible
+// with freestanding compilation. See `darwin_add_builtin_libraries`.
+
#include "InstrProfiling.h"
#if defined(__APPLE__)
diff --git a/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingPlatformLinux.c b/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingPlatformLinux.c
index becfe1fd9f5a..c9fb481f8e90 100644
--- a/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingPlatformLinux.c
+++ b/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingPlatformLinux.c
@@ -43,7 +43,7 @@ uint64_t
__prof_cnts_sect_data[0] COMPILER_RT_SECTION(INSTR_PROF_CNTS_SECT_NAME);
uint32_t
__prof_orderfile_sect_data[0] COMPILER_RT_SECTION(INSTR_PROF_ORDERFILE_SECT_NAME);
-char __prof_nms_sect_data[0] COMPILER_RT_SECTION(INSTR_PROF_NAME_SECT_NAME);
+const char __prof_nms_sect_data[0] COMPILER_RT_SECTION(INSTR_PROF_NAME_SECT_NAME);
ValueProfNode __prof_vnodes_sect_data[0] COMPILER_RT_SECTION(INSTR_PROF_VNODES_SECT_NAME);
COMPILER_RT_VISIBILITY const __llvm_profile_data *
diff --git a/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingPort.h b/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingPort.h
index 4493dd512ff0..cb66c5964ad1 100644
--- a/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingPort.h
+++ b/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingPort.h
@@ -24,11 +24,17 @@
#define COMPILER_RT_ALWAYS_INLINE __forceinline
#define COMPILER_RT_CLEANUP(x)
#elif __GNUC__
-#define COMPILER_RT_ALIGNAS(x) __attribute__((aligned(x)))
+#ifdef _WIN32
+#define COMPILER_RT_FTRUNCATE(f, l) _chsize(fileno(f), l)
+#define COMPILER_RT_VISIBILITY
+#define COMPILER_RT_WEAK __attribute__((selectany))
+#else
+#define COMPILER_RT_FTRUNCATE(f, l) ftruncate(fileno(f), l)
#define COMPILER_RT_VISIBILITY __attribute__((visibility("hidden")))
#define COMPILER_RT_WEAK __attribute__((weak))
+#endif
+#define COMPILER_RT_ALIGNAS(x) __attribute__((aligned(x)))
#define COMPILER_RT_ALLOCA __builtin_alloca
-#define COMPILER_RT_FTRUNCATE(f,l) ftruncate(fileno(f),l)
#define COMPILER_RT_ALWAYS_INLINE inline __attribute((always_inline))
#define COMPILER_RT_CLEANUP(x) __attribute__((cleanup(x)))
#endif
diff --git a/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingValue.c b/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingValue.c
index fd53cac3dff3..7f368b9f8d4e 100644
--- a/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingValue.c
+++ b/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingValue.c
@@ -6,6 +6,7 @@
|*
\*===----------------------------------------------------------------------===*/
+#include <assert.h>
#include <limits.h>
#include <stdio.h>
#include <stdlib.h>
@@ -17,13 +18,14 @@
#define INSTR_PROF_VALUE_PROF_DATA
#define INSTR_PROF_COMMON_API_IMPL
+#define INSTR_PROF_VALUE_PROF_MEMOP_API
#include "profile/InstrProfData.inc"
static int hasStaticCounters = 1;
static int OutOfNodesWarnings = 0;
static int hasNonDefaultValsPerSite = 0;
#define INSTR_PROF_MAX_VP_WARNS 10
-#define INSTR_PROF_DEFAULT_NUM_VAL_PER_SITE 16
+#define INSTR_PROF_DEFAULT_NUM_VAL_PER_SITE 24
#define INSTR_PROF_VNODE_POOL_SIZE 1024
#ifndef _MSC_VER
@@ -93,6 +95,8 @@ static int allocateValueProfileCounters(__llvm_profile_data *Data) {
for (VKI = IPVK_First; VKI <= IPVK_Last; ++VKI)
NumVSites += Data->NumValueSites[VKI];
+ // If NumVSites = 0, calloc is allowed to return a non-null pointer.
+ assert(NumVSites > 0 && "NumVSites can't be zero");
ValueProfNode **Mem =
(ValueProfNode **)calloc(NumVSites, sizeof(ValueProfNode *));
if (!Mem)
@@ -235,32 +239,15 @@ __llvm_profile_instrument_target_value(uint64_t TargetValue, void *Data,
}
/*
- * The target values are partitioned into multiple regions/ranges. There is one
- * contiguous region which is precise -- every value in the range is tracked
- * individually. A value outside the precise region will be collapsed into one
- * value depending on the region it falls in.
- *
- * There are three regions:
- * 1. (-inf, PreciseRangeStart) and (PreciseRangeLast, LargeRangeValue) belong
- * to one region -- all values here should be mapped to one value of
- * "PreciseRangeLast + 1".
- * 2. [PreciseRangeStart, PreciseRangeLast]
- * 3. Large values: [LargeValue, +inf) maps to one value of LargeValue.
- *
- * The range for large values is optional. The default value of INT64_MIN
- * indicates it is not specified.
+ * The target values are partitioned into multiple ranges. The range spec is
+ * defined in InstrProfData.inc.
*/
-COMPILER_RT_VISIBILITY void __llvm_profile_instrument_range(
- uint64_t TargetValue, void *Data, uint32_t CounterIndex,
- int64_t PreciseRangeStart, int64_t PreciseRangeLast, int64_t LargeValue) {
-
- if (LargeValue != INT64_MIN && (int64_t)TargetValue >= LargeValue)
- TargetValue = LargeValue;
- else if ((int64_t)TargetValue < PreciseRangeStart ||
- (int64_t)TargetValue > PreciseRangeLast)
- TargetValue = PreciseRangeLast + 1;
-
- __llvm_profile_instrument_target(TargetValue, Data, CounterIndex);
+COMPILER_RT_VISIBILITY void
+__llvm_profile_instrument_memop(uint64_t TargetValue, void *Data,
+ uint32_t CounterIndex) {
+ // Map the target value to the representative value of its range.
+ uint64_t RepValue = InstrProfGetRangeRepValue(TargetValue);
+ __llvm_profile_instrument_target(RepValue, Data, CounterIndex);
}
/*
diff --git a/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingVersionVar.c b/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingVersionVar.c
new file mode 100644
index 000000000000..a6f222150794
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingVersionVar.c
@@ -0,0 +1,17 @@
+/*===- InstrProfilingVersionVar.c - profile version variable setup -------===*\
+|*
+|* Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+|* See https://llvm.org/LICENSE.txt for license information.
+|* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+|*
+\*===----------------------------------------------------------------------===*/
+
+#include "InstrProfiling.h"
+
+/* uint64 __llvm_profile_raw_version
+ *
+ * The runtime should only provide its own definition of this symbol when the
+ * user has not specified one. Set this up by moving the runtime's copy of this
+ * symbol to an object file within the archive.
+ */
+COMPILER_RT_WEAK uint64_t INSTR_PROF_RAW_VERSION_VAR = INSTR_PROF_RAW_VERSION;
diff --git a/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingWriter.c b/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingWriter.c
index c34e110a6959..16ad965ff608 100644
--- a/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingWriter.c
+++ b/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingWriter.c
@@ -6,6 +6,9 @@
|*
\*===----------------------------------------------------------------------===*/
+// Note: This is linked into the Darwin kernel, and must remain compatible
+// with freestanding compilation. See `darwin_add_builtin_libraries`.
+
#ifdef _MSC_VER
/* For _alloca */
#include <malloc.h>
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_allocator.cpp b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_allocator.cpp
index ec77b9cbfee8..3157b35ffaf8 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_allocator.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_allocator.cpp
@@ -137,8 +137,14 @@ static void RawInternalFree(void *ptr, InternalAllocatorCache *cache) {
#endif // SANITIZER_GO || defined(SANITIZER_USE_MALLOC)
+namespace {
const u64 kBlockMagic = 0x6A6CB03ABCEBC041ull;
+struct BlockHeader {
+ u64 magic;
+};
+} // namespace
+
static void NORETURN ReportInternalAllocatorOutOfMemory(uptr requested_size) {
SetAllocatorOutOfMemory();
Report("FATAL: %s: internal allocator is out of memory trying to allocate "
@@ -147,27 +153,28 @@ static void NORETURN ReportInternalAllocatorOutOfMemory(uptr requested_size) {
}
void *InternalAlloc(uptr size, InternalAllocatorCache *cache, uptr alignment) {
- if (size + sizeof(u64) < size)
+ uptr s = size + sizeof(BlockHeader);
+ if (s < size)
return nullptr;
- void *p = RawInternalAlloc(size + sizeof(u64), cache, alignment);
+ BlockHeader *p = (BlockHeader *)RawInternalAlloc(s, cache, alignment);
if (UNLIKELY(!p))
- ReportInternalAllocatorOutOfMemory(size + sizeof(u64));
- ((u64*)p)[0] = kBlockMagic;
- return (char*)p + sizeof(u64);
+ ReportInternalAllocatorOutOfMemory(s);
+ p->magic = kBlockMagic;
+ return p + 1;
}
void *InternalRealloc(void *addr, uptr size, InternalAllocatorCache *cache) {
if (!addr)
return InternalAlloc(size, cache);
- if (size + sizeof(u64) < size)
+ uptr s = size + sizeof(BlockHeader);
+ if (s < size)
return nullptr;
- addr = (char*)addr - sizeof(u64);
- size = size + sizeof(u64);
- CHECK_EQ(kBlockMagic, ((u64*)addr)[0]);
- void *p = RawInternalRealloc(addr, size, cache);
+ BlockHeader *p = (BlockHeader *)addr - 1;
+ CHECK_EQ(kBlockMagic, p->magic);
+ p = (BlockHeader *)RawInternalRealloc(p, s, cache);
if (UNLIKELY(!p))
- ReportInternalAllocatorOutOfMemory(size);
- return (char*)p + sizeof(u64);
+ ReportInternalAllocatorOutOfMemory(s);
+ return p + 1;
}
void *InternalReallocArray(void *addr, uptr count, uptr size,
@@ -198,10 +205,10 @@ void *InternalCalloc(uptr count, uptr size, InternalAllocatorCache *cache) {
void InternalFree(void *addr, InternalAllocatorCache *cache) {
if (!addr)
return;
- addr = (char*)addr - sizeof(u64);
- CHECK_EQ(kBlockMagic, ((u64*)addr)[0]);
- ((u64*)addr)[0] = 0;
- RawInternalFree(addr, cache);
+ BlockHeader *p = (BlockHeader *)addr - 1;
+ CHECK_EQ(kBlockMagic, p->magic);
+ p->magic = 0;
+ RawInternalFree(p, cache);
}
// LowLevelAllocator
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_allocator.h b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_allocator.h
index 23d589888d3b..5ec47416fe0c 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_allocator.h
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_allocator.h
@@ -52,14 +52,14 @@ struct NoOpMapUnmapCallback {
// Callback type for iterating over chunks.
typedef void (*ForEachChunkCallback)(uptr chunk, void *arg);
-INLINE u32 Rand(u32 *state) { // ANSI C linear congruential PRNG.
+inline u32 Rand(u32 *state) { // ANSI C linear congruential PRNG.
return (*state = *state * 1103515245 + 12345) >> 16;
}
-INLINE u32 RandN(u32 *state, u32 n) { return Rand(state) % n; } // [0, n)
+inline u32 RandN(u32 *state, u32 n) { return Rand(state) % n; } // [0, n)
template<typename T>
-INLINE void RandomShuffle(T *a, u32 n, u32 *rand_state) {
+inline void RandomShuffle(T *a, u32 n, u32 *rand_state) {
if (n <= 1) return;
u32 state = *rand_state;
for (u32 i = n - 1; i > 0; i--)
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_allocator_checks.h b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_allocator_checks.h
index fc426f0e74f4..1cc3992c4c9f 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_allocator_checks.h
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_allocator_checks.h
@@ -27,7 +27,7 @@ namespace __sanitizer {
void SetErrnoToENOMEM();
// A common errno setting logic shared by almost all sanitizer allocator APIs.
-INLINE void *SetErrnoOnNull(void *ptr) {
+inline void *SetErrnoOnNull(void *ptr) {
if (UNLIKELY(!ptr))
SetErrnoToENOMEM();
return ptr;
@@ -41,7 +41,7 @@ INLINE void *SetErrnoOnNull(void *ptr) {
// two and that the size is a multiple of alignment for POSIX implementation,
// and a bit relaxed requirement for non-POSIX ones, that the size is a multiple
// of alignment.
-INLINE bool CheckAlignedAllocAlignmentAndSize(uptr alignment, uptr size) {
+inline bool CheckAlignedAllocAlignmentAndSize(uptr alignment, uptr size) {
#if SANITIZER_POSIX
return alignment != 0 && IsPowerOfTwo(alignment) &&
(size & (alignment - 1)) == 0;
@@ -52,13 +52,13 @@ INLINE bool CheckAlignedAllocAlignmentAndSize(uptr alignment, uptr size) {
// Checks posix_memalign() parameters, verifies that alignment is a power of two
// and a multiple of sizeof(void *).
-INLINE bool CheckPosixMemalignAlignment(uptr alignment) {
+inline bool CheckPosixMemalignAlignment(uptr alignment) {
return alignment != 0 && IsPowerOfTwo(alignment) &&
(alignment % sizeof(void *)) == 0;
}
// Returns true if calloc(size, n) call overflows on size*n calculation.
-INLINE bool CheckForCallocOverflow(uptr size, uptr n) {
+inline bool CheckForCallocOverflow(uptr size, uptr n) {
if (!size)
return false;
uptr max = (uptr)-1L;
@@ -67,7 +67,7 @@ INLINE bool CheckForCallocOverflow(uptr size, uptr n) {
// Returns true if the size passed to pvalloc overflows when rounded to the next
// multiple of page_size.
-INLINE bool CheckForPvallocOverflow(uptr size, uptr page_size) {
+inline bool CheckForPvallocOverflow(uptr size, uptr page_size) {
return RoundUpTo(size, page_size) < size;
}
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_allocator_primary32.h b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_allocator_primary32.h
index 3b1838b3985a..b90dabbf7769 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_allocator_primary32.h
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_allocator_primary32.h
@@ -153,6 +153,7 @@ class SizeClassAllocator32 {
}
void *GetMetaData(const void *p) {
+ CHECK(kMetadataSize);
CHECK(PointerIsMine(p));
uptr mem = reinterpret_cast<uptr>(p);
uptr beg = ComputeRegionBeg(mem);
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_allocator_primary64.h b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_allocator_primary64.h
index 1d9a29c70f30..0a18b0c58ef7 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_allocator_primary64.h
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_allocator_primary64.h
@@ -186,13 +186,13 @@ class SizeClassAllocator64 {
void *GetBlockBegin(const void *p) {
uptr class_id = GetSizeClass(p);
+ if (class_id >= kNumClasses) return nullptr;
uptr size = ClassIdToSize(class_id);
if (!size) return nullptr;
uptr chunk_idx = GetChunkIdx((uptr)p, size);
uptr reg_beg = GetRegionBegin(p);
uptr beg = chunk_idx * size;
uptr next_beg = beg + size;
- if (class_id >= kNumClasses) return nullptr;
const RegionInfo *region = AddressSpaceView::Load(GetRegionInfo(class_id));
if (region->mapped_user >= next_beg)
return reinterpret_cast<void*>(reg_beg + beg);
@@ -207,6 +207,7 @@ class SizeClassAllocator64 {
static uptr ClassID(uptr size) { return SizeClassMap::ClassID(size); }
void *GetMetaData(const void *p) {
+ CHECK(kMetadataSize);
uptr class_id = GetSizeClass(p);
uptr size = ClassIdToSize(class_id);
uptr chunk_idx = GetChunkIdx(reinterpret_cast<uptr>(p), size);
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_allocator_report.cpp b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_allocator_report.cpp
index d74e08010d5d..1c6520819ef9 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_allocator_report.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_allocator_report.cpp
@@ -134,4 +134,12 @@ void NORETURN ReportOutOfMemory(uptr requested_size, const StackTrace *stack) {
Die();
}
+void NORETURN ReportRssLimitExceeded(const StackTrace *stack) {
+ {
+ ScopedAllocatorErrorReport report("rss-limit-exceeded", stack);
+ Report("ERROR: %s: allocator exceeded the RSS limit\n", SanitizerToolName);
+ }
+ Die();
+}
+
} // namespace __sanitizer
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_allocator_report.h b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_allocator_report.h
index 0653c365c1cd..6e4e6b135491 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_allocator_report.h
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_allocator_report.h
@@ -33,6 +33,7 @@ void NORETURN ReportInvalidPosixMemalignAlignment(uptr alignment,
void NORETURN ReportAllocationSizeTooBig(uptr user_size, uptr max_size,
const StackTrace *stack);
void NORETURN ReportOutOfMemory(uptr requested_size, const StackTrace *stack);
+void NORETURN ReportRssLimitExceeded(const StackTrace *stack);
} // namespace __sanitizer
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_allocator_secondary.h b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_allocator_secondary.h
index 1d128f55de05..61fb98742373 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_allocator_secondary.h
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_allocator_secondary.h
@@ -18,8 +18,8 @@
// (currently, 32 bits and internal allocator).
class LargeMmapAllocatorPtrArrayStatic {
public:
- INLINE void *Init() { return &p_[0]; }
- INLINE void EnsureSpace(uptr n) { CHECK_LT(n, kMaxNumChunks); }
+ inline void *Init() { return &p_[0]; }
+ inline void EnsureSpace(uptr n) { CHECK_LT(n, kMaxNumChunks); }
private:
static const int kMaxNumChunks = 1 << 15;
uptr p_[kMaxNumChunks];
@@ -31,14 +31,14 @@ class LargeMmapAllocatorPtrArrayStatic {
// same functionality in Fuchsia case, which does not support MAP_NORESERVE.
class LargeMmapAllocatorPtrArrayDynamic {
public:
- INLINE void *Init() {
+ inline void *Init() {
uptr p = address_range_.Init(kMaxNumChunks * sizeof(uptr),
SecondaryAllocatorName);
CHECK(p);
return reinterpret_cast<void*>(p);
}
- INLINE void EnsureSpace(uptr n) {
+ inline void EnsureSpace(uptr n) {
CHECK_LT(n, kMaxNumChunks);
DCHECK(n <= n_reserved_);
if (UNLIKELY(n == n_reserved_)) {
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_atomic.h b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_atomic.h
index a798a0cf25d9..46f06957228c 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_atomic.h
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_atomic.h
@@ -72,12 +72,12 @@ namespace __sanitizer {
// Clutter-reducing helpers.
template<typename T>
-INLINE typename T::Type atomic_load_relaxed(const volatile T *a) {
+inline typename T::Type atomic_load_relaxed(const volatile T *a) {
return atomic_load(a, memory_order_relaxed);
}
template<typename T>
-INLINE void atomic_store_relaxed(volatile T *a, typename T::Type v) {
+inline void atomic_store_relaxed(volatile T *a, typename T::Type v) {
atomic_store(a, v, memory_order_relaxed);
}
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_atomic_clang.h b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_atomic_clang.h
index c40461ebc3bf..fc13ca52dda7 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_atomic_clang.h
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_atomic_clang.h
@@ -34,16 +34,16 @@ namespace __sanitizer {
// See http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html
// for mappings of the memory model to different processors.
-INLINE void atomic_signal_fence(memory_order) {
+inline void atomic_signal_fence(memory_order) {
__asm__ __volatile__("" ::: "memory");
}
-INLINE void atomic_thread_fence(memory_order) {
+inline void atomic_thread_fence(memory_order) {
__sync_synchronize();
}
template<typename T>
-INLINE typename T::Type atomic_fetch_add(volatile T *a,
+inline typename T::Type atomic_fetch_add(volatile T *a,
typename T::Type v, memory_order mo) {
(void)mo;
DCHECK(!((uptr)a % sizeof(*a)));
@@ -51,7 +51,7 @@ INLINE typename T::Type atomic_fetch_add(volatile T *a,
}
template<typename T>
-INLINE typename T::Type atomic_fetch_sub(volatile T *a,
+inline typename T::Type atomic_fetch_sub(volatile T *a,
typename T::Type v, memory_order mo) {
(void)mo;
DCHECK(!((uptr)a % sizeof(*a)));
@@ -59,7 +59,7 @@ INLINE typename T::Type atomic_fetch_sub(volatile T *a,
}
template<typename T>
-INLINE typename T::Type atomic_exchange(volatile T *a,
+inline typename T::Type atomic_exchange(volatile T *a,
typename T::Type v, memory_order mo) {
DCHECK(!((uptr)a % sizeof(*a)));
if (mo & (memory_order_release | memory_order_acq_rel | memory_order_seq_cst))
@@ -71,7 +71,7 @@ INLINE typename T::Type atomic_exchange(volatile T *a,
}
template <typename T>
-INLINE bool atomic_compare_exchange_strong(volatile T *a, typename T::Type *cmp,
+inline bool atomic_compare_exchange_strong(volatile T *a, typename T::Type *cmp,
typename T::Type xchg,
memory_order mo) {
typedef typename T::Type Type;
@@ -84,7 +84,7 @@ INLINE bool atomic_compare_exchange_strong(volatile T *a, typename T::Type *cmp,
}
template<typename T>
-INLINE bool atomic_compare_exchange_weak(volatile T *a,
+inline bool atomic_compare_exchange_weak(volatile T *a,
typename T::Type *cmp,
typename T::Type xchg,
memory_order mo) {
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_atomic_clang_mips.h b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_atomic_clang_mips.h
index d369aeb9935c..59155e9883eb 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_atomic_clang_mips.h
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_atomic_clang_mips.h
@@ -37,7 +37,7 @@ static struct {
} __attribute__((aligned(32))) lock = {0, {0}};
template <>
-INLINE atomic_uint64_t::Type atomic_fetch_add(volatile atomic_uint64_t *ptr,
+inline atomic_uint64_t::Type atomic_fetch_add(volatile atomic_uint64_t *ptr,
atomic_uint64_t::Type val,
memory_order mo) {
DCHECK(mo &
@@ -55,14 +55,14 @@ INLINE atomic_uint64_t::Type atomic_fetch_add(volatile atomic_uint64_t *ptr,
}
template <>
-INLINE atomic_uint64_t::Type atomic_fetch_sub(volatile atomic_uint64_t *ptr,
+inline atomic_uint64_t::Type atomic_fetch_sub(volatile atomic_uint64_t *ptr,
atomic_uint64_t::Type val,
memory_order mo) {
return atomic_fetch_add(ptr, -val, mo);
}
template <>
-INLINE bool atomic_compare_exchange_strong(volatile atomic_uint64_t *ptr,
+inline bool atomic_compare_exchange_strong(volatile atomic_uint64_t *ptr,
atomic_uint64_t::Type *cmp,
atomic_uint64_t::Type xchg,
memory_order mo) {
@@ -87,7 +87,7 @@ INLINE bool atomic_compare_exchange_strong(volatile atomic_uint64_t *ptr,
}
template <>
-INLINE atomic_uint64_t::Type atomic_load(const volatile atomic_uint64_t *ptr,
+inline atomic_uint64_t::Type atomic_load(const volatile atomic_uint64_t *ptr,
memory_order mo) {
DCHECK(mo &
(memory_order_relaxed | memory_order_releasae | memory_order_seq_cst));
@@ -100,7 +100,7 @@ INLINE atomic_uint64_t::Type atomic_load(const volatile atomic_uint64_t *ptr,
}
template <>
-INLINE void atomic_store(volatile atomic_uint64_t *ptr, atomic_uint64_t::Type v,
+inline void atomic_store(volatile atomic_uint64_t *ptr, atomic_uint64_t::Type v,
memory_order mo) {
DCHECK(mo &
(memory_order_relaxed | memory_order_releasae | memory_order_seq_cst));
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_atomic_clang_other.h b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_atomic_clang_other.h
index b8685a854267..4a39889e534a 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_atomic_clang_other.h
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_atomic_clang_other.h
@@ -17,12 +17,12 @@
namespace __sanitizer {
-INLINE void proc_yield(int cnt) {
+inline void proc_yield(int cnt) {
__asm__ __volatile__("" ::: "memory");
}
template<typename T>
-INLINE typename T::Type atomic_load(
+inline typename T::Type atomic_load(
const volatile T *a, memory_order mo) {
DCHECK(mo & (memory_order_relaxed | memory_order_consume
| memory_order_acquire | memory_order_seq_cst));
@@ -50,17 +50,14 @@ INLINE typename T::Type atomic_load(
__sync_synchronize();
}
} else {
- // 64-bit load on 32-bit platform.
- // Gross, but simple and reliable.
- // Assume that it is not in read-only memory.
- v = __sync_fetch_and_add(
- const_cast<typename T::Type volatile *>(&a->val_dont_use), 0);
+ __atomic_load(const_cast<typename T::Type volatile *>(&a->val_dont_use), &v,
+ __ATOMIC_SEQ_CST);
}
return v;
}
template<typename T>
-INLINE void atomic_store(volatile T *a, typename T::Type v, memory_order mo) {
+inline void atomic_store(volatile T *a, typename T::Type v, memory_order mo) {
DCHECK(mo & (memory_order_relaxed | memory_order_release
| memory_order_seq_cst));
DCHECK(!((uptr)a % sizeof(*a)));
@@ -79,16 +76,7 @@ INLINE void atomic_store(volatile T *a, typename T::Type v, memory_order mo) {
__sync_synchronize();
}
} else {
- // 64-bit store on 32-bit platform.
- // Gross, but simple and reliable.
- typename T::Type cmp = a->val_dont_use;
- typename T::Type cur;
- for (;;) {
- cur = __sync_val_compare_and_swap(&a->val_dont_use, cmp, v);
- if (cur == cmp || cur == v)
- break;
- cmp = cur;
- }
+ __atomic_store(&a->val_dont_use, &v, __ATOMIC_SEQ_CST);
}
}
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_atomic_clang_x86.h b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_atomic_clang_x86.h
index f2ce553baa7a..51597b492741 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_atomic_clang_x86.h
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_atomic_clang_x86.h
@@ -16,7 +16,7 @@
namespace __sanitizer {
-INLINE void proc_yield(int cnt) {
+inline void proc_yield(int cnt) {
__asm__ __volatile__("" ::: "memory");
for (int i = 0; i < cnt; i++)
__asm__ __volatile__("pause");
@@ -24,7 +24,7 @@ INLINE void proc_yield(int cnt) {
}
template<typename T>
-INLINE typename T::Type atomic_load(
+inline typename T::Type atomic_load(
const volatile T *a, memory_order mo) {
DCHECK(mo & (memory_order_relaxed | memory_order_consume
| memory_order_acquire | memory_order_seq_cst));
@@ -70,7 +70,7 @@ INLINE typename T::Type atomic_load(
}
template<typename T>
-INLINE void atomic_store(volatile T *a, typename T::Type v, memory_order mo) {
+inline void atomic_store(volatile T *a, typename T::Type v, memory_order mo) {
DCHECK(mo & (memory_order_relaxed | memory_order_release
| memory_order_seq_cst));
DCHECK(!((uptr)a % sizeof(*a)));
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_atomic_msvc.h b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_atomic_msvc.h
index 6a7c5465dcbb..31317adcdfc9 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_atomic_msvc.h
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_atomic_msvc.h
@@ -54,21 +54,21 @@ extern "C" long long _InterlockedExchangeAdd64(long long volatile *Addend,
namespace __sanitizer {
-INLINE void atomic_signal_fence(memory_order) {
+inline void atomic_signal_fence(memory_order) {
_ReadWriteBarrier();
}
-INLINE void atomic_thread_fence(memory_order) {
+inline void atomic_thread_fence(memory_order) {
_mm_mfence();
}
-INLINE void proc_yield(int cnt) {
+inline void proc_yield(int cnt) {
for (int i = 0; i < cnt; i++)
_mm_pause();
}
template<typename T>
-INLINE typename T::Type atomic_load(
+inline typename T::Type atomic_load(
const volatile T *a, memory_order mo) {
DCHECK(mo & (memory_order_relaxed | memory_order_consume
| memory_order_acquire | memory_order_seq_cst));
@@ -86,7 +86,7 @@ INLINE typename T::Type atomic_load(
}
template<typename T>
-INLINE void atomic_store(volatile T *a, typename T::Type v, memory_order mo) {
+inline void atomic_store(volatile T *a, typename T::Type v, memory_order mo) {
DCHECK(mo & (memory_order_relaxed | memory_order_release
| memory_order_seq_cst));
DCHECK(!((uptr)a % sizeof(*a)));
@@ -102,7 +102,7 @@ INLINE void atomic_store(volatile T *a, typename T::Type v, memory_order mo) {
atomic_thread_fence(memory_order_seq_cst);
}
-INLINE u32 atomic_fetch_add(volatile atomic_uint32_t *a,
+inline u32 atomic_fetch_add(volatile atomic_uint32_t *a,
u32 v, memory_order mo) {
(void)mo;
DCHECK(!((uptr)a % sizeof(*a)));
@@ -110,7 +110,7 @@ INLINE u32 atomic_fetch_add(volatile atomic_uint32_t *a,
(long)v);
}
-INLINE uptr atomic_fetch_add(volatile atomic_uintptr_t *a,
+inline uptr atomic_fetch_add(volatile atomic_uintptr_t *a,
uptr v, memory_order mo) {
(void)mo;
DCHECK(!((uptr)a % sizeof(*a)));
@@ -123,7 +123,7 @@ INLINE uptr atomic_fetch_add(volatile atomic_uintptr_t *a,
#endif
}
-INLINE u32 atomic_fetch_sub(volatile atomic_uint32_t *a,
+inline u32 atomic_fetch_sub(volatile atomic_uint32_t *a,
u32 v, memory_order mo) {
(void)mo;
DCHECK(!((uptr)a % sizeof(*a)));
@@ -131,7 +131,7 @@ INLINE u32 atomic_fetch_sub(volatile atomic_uint32_t *a,
-(long)v);
}
-INLINE uptr atomic_fetch_sub(volatile atomic_uintptr_t *a,
+inline uptr atomic_fetch_sub(volatile atomic_uintptr_t *a,
uptr v, memory_order mo) {
(void)mo;
DCHECK(!((uptr)a % sizeof(*a)));
@@ -144,28 +144,28 @@ INLINE uptr atomic_fetch_sub(volatile atomic_uintptr_t *a,
#endif
}
-INLINE u8 atomic_exchange(volatile atomic_uint8_t *a,
+inline u8 atomic_exchange(volatile atomic_uint8_t *a,
u8 v, memory_order mo) {
(void)mo;
DCHECK(!((uptr)a % sizeof(*a)));
return (u8)_InterlockedExchange8((volatile char*)&a->val_dont_use, v);
}
-INLINE u16 atomic_exchange(volatile atomic_uint16_t *a,
+inline u16 atomic_exchange(volatile atomic_uint16_t *a,
u16 v, memory_order mo) {
(void)mo;
DCHECK(!((uptr)a % sizeof(*a)));
return (u16)_InterlockedExchange16((volatile short*)&a->val_dont_use, v);
}
-INLINE u32 atomic_exchange(volatile atomic_uint32_t *a,
+inline u32 atomic_exchange(volatile atomic_uint32_t *a,
u32 v, memory_order mo) {
(void)mo;
DCHECK(!((uptr)a % sizeof(*a)));
return (u32)_InterlockedExchange((volatile long*)&a->val_dont_use, v);
}
-INLINE bool atomic_compare_exchange_strong(volatile atomic_uint8_t *a,
+inline bool atomic_compare_exchange_strong(volatile atomic_uint8_t *a,
u8 *cmp,
u8 xchgv,
memory_order mo) {
@@ -191,7 +191,7 @@ INLINE bool atomic_compare_exchange_strong(volatile atomic_uint8_t *a,
return false;
}
-INLINE bool atomic_compare_exchange_strong(volatile atomic_uintptr_t *a,
+inline bool atomic_compare_exchange_strong(volatile atomic_uintptr_t *a,
uptr *cmp,
uptr xchg,
memory_order mo) {
@@ -204,7 +204,7 @@ INLINE bool atomic_compare_exchange_strong(volatile atomic_uintptr_t *a,
return false;
}
-INLINE bool atomic_compare_exchange_strong(volatile atomic_uint16_t *a,
+inline bool atomic_compare_exchange_strong(volatile atomic_uint16_t *a,
u16 *cmp,
u16 xchg,
memory_order mo) {
@@ -217,7 +217,7 @@ INLINE bool atomic_compare_exchange_strong(volatile atomic_uint16_t *a,
return false;
}
-INLINE bool atomic_compare_exchange_strong(volatile atomic_uint32_t *a,
+inline bool atomic_compare_exchange_strong(volatile atomic_uint32_t *a,
u32 *cmp,
u32 xchg,
memory_order mo) {
@@ -230,7 +230,7 @@ INLINE bool atomic_compare_exchange_strong(volatile atomic_uint32_t *a,
return false;
}
-INLINE bool atomic_compare_exchange_strong(volatile atomic_uint64_t *a,
+inline bool atomic_compare_exchange_strong(volatile atomic_uint64_t *a,
u64 *cmp,
u64 xchg,
memory_order mo) {
@@ -244,7 +244,7 @@ INLINE bool atomic_compare_exchange_strong(volatile atomic_uint64_t *a,
}
template<typename T>
-INLINE bool atomic_compare_exchange_weak(volatile T *a,
+inline bool atomic_compare_exchange_weak(volatile T *a,
typename T::Type *cmp,
typename T::Type xchg,
memory_order mo) {
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common.h b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common.h
index 07b307a602c9..a6532eee164d 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common.h
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common.h
@@ -53,25 +53,25 @@ const u64 kExternalPCBit = 1ULL << 60;
extern const char *SanitizerToolName; // Can be changed by the tool.
extern atomic_uint32_t current_verbosity;
-INLINE void SetVerbosity(int verbosity) {
+inline void SetVerbosity(int verbosity) {
atomic_store(&current_verbosity, verbosity, memory_order_relaxed);
}
-INLINE int Verbosity() {
+inline int Verbosity() {
return atomic_load(&current_verbosity, memory_order_relaxed);
}
#if SANITIZER_ANDROID
-INLINE uptr GetPageSize() {
+inline uptr GetPageSize() {
// Android post-M sysconf(_SC_PAGESIZE) crashes if called from .preinit_array.
return 4096;
}
-INLINE uptr GetPageSizeCached() {
+inline uptr GetPageSizeCached() {
return 4096;
}
#else
uptr GetPageSize();
extern uptr PageSizeCached;
-INLINE uptr GetPageSizeCached() {
+inline uptr GetPageSizeCached() {
if (!PageSizeCached)
PageSizeCached = GetPageSize();
return PageSizeCached;
@@ -91,7 +91,7 @@ void GetThreadStackAndTls(bool main, uptr *stk_addr, uptr *stk_size,
// Memory management
void *MmapOrDie(uptr size, const char *mem_type, bool raw_report = false);
-INLINE void *MmapOrDieQuietly(uptr size, const char *mem_type) {
+inline void *MmapOrDieQuietly(uptr size, const char *mem_type) {
return MmapOrDie(size, mem_type, /*raw_report*/ true);
}
void UnmapOrDie(void *addr, uptr size);
@@ -121,6 +121,31 @@ bool MprotectReadOnly(uptr addr, uptr size);
void MprotectMallocZones(void *addr, int prot);
+#if SANITIZER_LINUX
+// Unmap memory. Currently only used on Linux.
+void UnmapFromTo(uptr from, uptr to);
+#endif
+
+// Maps shadow_size_bytes of shadow memory and returns shadow address. It will
+// be aligned to the mmap granularity * 2^shadow_scale, or to
+// 2^min_shadow_base_alignment if that is larger. The returned address will
+// have max(2^min_shadow_base_alignment, mmap granularity) on the left, and
+// shadow_size_bytes bytes on the right, which on linux is mapped no access.
+// The high_mem_end may be updated if the original shadow size doesn't fit.
+uptr MapDynamicShadow(uptr shadow_size_bytes, uptr shadow_scale,
+ uptr min_shadow_base_alignment, uptr &high_mem_end);
+
+// Reserve memory range [beg, end]. If madvise_shadow is true then apply
+// madvise (e.g. hugepages, core dumping) requested by options.
+void ReserveShadowMemoryRange(uptr beg, uptr end, const char *name,
+ bool madvise_shadow = true);
+
+// Protect size bytes of memory starting at addr. Also try to protect
+// several pages at the start of the address space as specified by
+// zero_base_shadow_start, at most up to the size or zero_base_max_shadow_start.
+void ProtectGap(uptr addr, uptr size, uptr zero_base_shadow_start,
+ uptr zero_base_max_shadow_start);
+
// Find an available address space.
uptr FindAvailableMemoryRange(uptr size, uptr alignment, uptr left_padding,
uptr *largest_gap_found, uptr *max_occupied_addr);
@@ -229,7 +254,6 @@ void UpdateProcessName();
void CacheBinaryName();
void DisableCoreDumperIfNecessary();
void DumpProcessMap();
-void PrintModuleMap();
const char *GetEnv(const char *name);
bool SetEnv(const char *name, const char *value);
@@ -349,7 +373,7 @@ unsigned char _BitScanReverse64(unsigned long *index, unsigned __int64 mask);
}
#endif
-INLINE uptr MostSignificantSetBitIndex(uptr x) {
+inline uptr MostSignificantSetBitIndex(uptr x) {
CHECK_NE(x, 0U);
unsigned long up;
#if !SANITIZER_WINDOWS || defined(__clang__) || defined(__GNUC__)
@@ -366,7 +390,7 @@ INLINE uptr MostSignificantSetBitIndex(uptr x) {
return up;
}
-INLINE uptr LeastSignificantSetBitIndex(uptr x) {
+inline uptr LeastSignificantSetBitIndex(uptr x) {
CHECK_NE(x, 0U);
unsigned long up;
#if !SANITIZER_WINDOWS || defined(__clang__) || defined(__GNUC__)
@@ -383,11 +407,11 @@ INLINE uptr LeastSignificantSetBitIndex(uptr x) {
return up;
}
-INLINE bool IsPowerOfTwo(uptr x) {
+inline bool IsPowerOfTwo(uptr x) {
return (x & (x - 1)) == 0;
}
-INLINE uptr RoundUpToPowerOfTwo(uptr size) {
+inline uptr RoundUpToPowerOfTwo(uptr size) {
CHECK(size);
if (IsPowerOfTwo(size)) return size;
@@ -397,20 +421,20 @@ INLINE uptr RoundUpToPowerOfTwo(uptr size) {
return 1ULL << (up + 1);
}
-INLINE uptr RoundUpTo(uptr size, uptr boundary) {
+inline uptr RoundUpTo(uptr size, uptr boundary) {
RAW_CHECK(IsPowerOfTwo(boundary));
return (size + boundary - 1) & ~(boundary - 1);
}
-INLINE uptr RoundDownTo(uptr x, uptr boundary) {
+inline uptr RoundDownTo(uptr x, uptr boundary) {
return x & ~(boundary - 1);
}
-INLINE bool IsAligned(uptr a, uptr alignment) {
+inline bool IsAligned(uptr a, uptr alignment) {
return (a & (alignment - 1)) == 0;
}
-INLINE uptr Log2(uptr x) {
+inline uptr Log2(uptr x) {
CHECK(IsPowerOfTwo(x));
return LeastSignificantSetBitIndex(x);
}
@@ -426,14 +450,14 @@ template<class T> void Swap(T& a, T& b) {
}
// Char handling
-INLINE bool IsSpace(int c) {
+inline bool IsSpace(int c) {
return (c == ' ') || (c == '\n') || (c == '\t') ||
(c == '\f') || (c == '\r') || (c == '\v');
}
-INLINE bool IsDigit(int c) {
+inline bool IsDigit(int c) {
return (c >= '0') && (c <= '9');
}
-INLINE int ToLower(int c) {
+inline int ToLower(int c) {
return (c >= 'A' && c <= 'Z') ? (c + 'a' - 'A') : c;
}
@@ -443,6 +467,7 @@ INLINE int ToLower(int c) {
template<typename T>
class InternalMmapVectorNoCtor {
public:
+ using value_type = T;
void Initialize(uptr initial_capacity) {
capacity_bytes_ = 0;
size_ = 0;
@@ -627,9 +652,13 @@ void Sort(T *v, uptr size, Compare comp = {}) {
// Works like std::lower_bound: finds the first element that is not less
// than the val.
-template <class Container, class Value, class Compare>
-uptr InternalLowerBound(const Container &v, uptr first, uptr last,
- const Value &val, Compare comp) {
+template <class Container,
+ class Compare = CompareLess<typename Container::value_type>>
+uptr InternalLowerBound(const Container &v,
+ const typename Container::value_type &val,
+ Compare comp = {}) {
+ uptr first = 0;
+ uptr last = v.size();
while (last > first) {
uptr mid = (first + last) / 2;
if (comp(v[mid], val))
@@ -649,9 +678,31 @@ enum ModuleArch {
kModuleArchARMV7,
kModuleArchARMV7S,
kModuleArchARMV7K,
- kModuleArchARM64
+ kModuleArchARM64,
+ kModuleArchRISCV64
};
+// Sorts and removes duplicates from the container.
+template <class Container,
+ class Compare = CompareLess<typename Container::value_type>>
+void SortAndDedup(Container &v, Compare comp = {}) {
+ Sort(v.data(), v.size(), comp);
+ uptr size = v.size();
+ if (size < 2)
+ return;
+ uptr last = 0;
+ for (uptr i = 1; i < size; ++i) {
+ if (comp(v[last], v[i])) {
+ ++last;
+ if (last != i)
+ v[last] = v[i];
+ } else {
+ CHECK(!comp(v[i], v[last]));
+ }
+ }
+ v.resize(last + 1);
+}
+
// Opens the file 'file_name" and reads up to 'max_len' bytes.
// The resulting buffer is mmaped and stored in '*buff'.
// Returns true if file was successfully opened and read.
@@ -693,6 +744,8 @@ inline const char *ModuleArchToString(ModuleArch arch) {
return "armv7k";
case kModuleArchARM64:
return "arm64";
+ case kModuleArchRISCV64:
+ return "riscv64";
}
CHECK(0 && "Invalid module arch");
return "";
@@ -815,15 +868,15 @@ void WriteToSyslog(const char *buffer);
#if SANITIZER_MAC || SANITIZER_WIN_TRACE
void LogFullErrorReport(const char *buffer);
#else
-INLINE void LogFullErrorReport(const char *buffer) {}
+inline void LogFullErrorReport(const char *buffer) {}
#endif
#if SANITIZER_LINUX || SANITIZER_MAC
void WriteOneLineToSyslog(const char *s);
void LogMessageOnPrintf(const char *str);
#else
-INLINE void WriteOneLineToSyslog(const char *s) {}
-INLINE void LogMessageOnPrintf(const char *str) {}
+inline void WriteOneLineToSyslog(const char *s) {}
+inline void LogMessageOnPrintf(const char *str) {}
#endif
#if SANITIZER_LINUX || SANITIZER_WIN_TRACE
@@ -831,21 +884,21 @@ INLINE void LogMessageOnPrintf(const char *str) {}
void AndroidLogInit();
void SetAbortMessage(const char *);
#else
-INLINE void AndroidLogInit() {}
+inline void AndroidLogInit() {}
// FIXME: MacOS implementation could use CRSetCrashLogMessage.
-INLINE void SetAbortMessage(const char *) {}
+inline void SetAbortMessage(const char *) {}
#endif
#if SANITIZER_ANDROID
void SanitizerInitializeUnwinder();
AndroidApiLevel AndroidGetApiLevel();
#else
-INLINE void AndroidLogWrite(const char *buffer_unused) {}
-INLINE void SanitizerInitializeUnwinder() {}
-INLINE AndroidApiLevel AndroidGetApiLevel() { return ANDROID_NOT_ANDROID; }
+inline void AndroidLogWrite(const char *buffer_unused) {}
+inline void SanitizerInitializeUnwinder() {}
+inline AndroidApiLevel AndroidGetApiLevel() { return ANDROID_NOT_ANDROID; }
#endif
-INLINE uptr GetPthreadDestructorIterations() {
+inline uptr GetPthreadDestructorIterations() {
#if SANITIZER_ANDROID
return (AndroidGetApiLevel() == ANDROID_LOLLIPOP_MR1) ? 8 : 4;
#elif SANITIZER_POSIX
@@ -951,7 +1004,7 @@ RunOnDestruction<Fn> at_scope_exit(Fn fn) {
#if SANITIZER_LINUX && SANITIZER_S390_64
void AvoidCVE_2016_2143();
#else
-INLINE void AvoidCVE_2016_2143() {}
+inline void AvoidCVE_2016_2143() {}
#endif
struct StackDepotStats {
@@ -972,7 +1025,7 @@ bool GetRandom(void *buffer, uptr length, bool blocking = true);
// Returns the number of logical processors on the system.
u32 GetNumberOfCPUs();
extern u32 NumberOfCPUsCached;
-INLINE u32 GetNumberOfCPUsCached() {
+inline u32 GetNumberOfCPUsCached() {
if (!NumberOfCPUsCached)
NumberOfCPUsCached = GetNumberOfCPUs();
return NumberOfCPUsCached;
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors.inc b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors.inc
index d7e0bba76294..d4b9ea5f7f06 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors.inc
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors.inc
@@ -445,8 +445,10 @@ INTERCEPTOR(int, strcmp, const char *s1, const char *s2) {
c2 = (unsigned char)s2[i];
if (c1 != c2 || c1 == '\0') break;
}
- COMMON_INTERCEPTOR_READ_STRING(ctx, s1, i + 1);
- COMMON_INTERCEPTOR_READ_STRING(ctx, s2, i + 1);
+ if (common_flags()->intercept_strcmp) {
+ COMMON_INTERCEPTOR_READ_STRING(ctx, s1, i + 1);
+ COMMON_INTERCEPTOR_READ_STRING(ctx, s2, i + 1);
+ }
int result = CharCmpX(c1, c2);
CALL_WEAK_INTERCEPTOR_HOOK(__sanitizer_weak_hook_strcmp, GET_CALLER_PC(), s1,
s2, result);
@@ -1862,7 +1864,7 @@ UNUSED static void unpoison_passwd(void *ctx, __sanitizer_passwd *pwd) {
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, pwd->pw_gecos,
REAL(strlen)(pwd->pw_gecos) + 1);
#endif
-#if SANITIZER_MAC || SANITIZER_FREEBSD || SANITIZER_NETBSD || SANITIZER_OPENBSD
+#if SANITIZER_MAC || SANITIZER_FREEBSD || SANITIZER_NETBSD
if (pwd->pw_class)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, pwd->pw_class,
REAL(strlen)(pwd->pw_class) + 1);
@@ -3748,7 +3750,7 @@ INTERCEPTOR(char *, strerror, int errnum) {
// static storage.
#if ((_POSIX_C_SOURCE >= 200112L || _XOPEN_SOURCE >= 600) && !_GNU_SOURCE) || \
SANITIZER_MAC || SANITIZER_ANDROID || SANITIZER_NETBSD || \
- SANITIZER_FREEBSD || SANITIZER_OPENBSD
+ SANITIZER_FREEBSD
// POSIX version. Spec is not clear on whether buf is NULL-terminated.
// At least on OSX, buf contents are valid even when the call fails.
INTERCEPTOR(int, strerror_r, int errnum, char *buf, SIZE_T buflen) {
@@ -4085,6 +4087,41 @@ INTERCEPTOR(int, sigfillset, __sanitizer_sigset_t *set) {
#define INIT_SIGSETOPS
#endif
+#if SANITIZER_INTERCEPT_SIGSET_LOGICOPS
+INTERCEPTOR(int, sigandset, __sanitizer_sigset_t *dst,
+ __sanitizer_sigset_t *src1, __sanitizer_sigset_t *src2) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, sigandset, dst, src1, src2);
+ if (src1)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, src1, sizeof(*src1));
+ if (src2)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, src2, sizeof(*src2));
+ int res = REAL(sigandset)(dst, src1, src2);
+ if (!res && dst)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, sizeof(*dst));
+ return res;
+}
+
+INTERCEPTOR(int, sigorset, __sanitizer_sigset_t *dst,
+ __sanitizer_sigset_t *src1, __sanitizer_sigset_t *src2) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, sigorset, dst, src1, src2);
+ if (src1)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, src1, sizeof(*src1));
+ if (src2)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, src2, sizeof(*src2));
+ int res = REAL(sigorset)(dst, src1, src2);
+ if (!res && dst)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, sizeof(*dst));
+ return res;
+}
+#define INIT_SIGSET_LOGICOPS \
+ COMMON_INTERCEPT_FUNCTION(sigandset); \
+ COMMON_INTERCEPT_FUNCTION(sigorset);
+#else
+#define INIT_SIGSET_LOGICOPS
+#endif
+
#if SANITIZER_INTERCEPT_SIGPENDING
INTERCEPTOR(int, sigpending, __sanitizer_sigset_t *set) {
void *ctx;
@@ -4838,6 +4875,34 @@ INTERCEPTOR(char *, tmpnam_r, char *s) {
#define INIT_TMPNAM_R
#endif
+#if SANITIZER_INTERCEPT_PTSNAME
+INTERCEPTOR(char *, ptsname, int fd) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, ptsname, fd);
+ char *res = REAL(ptsname)(fd);
+ if (res != nullptr)
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, REAL(strlen)(res) + 1);
+ return res;
+}
+#define INIT_PTSNAME COMMON_INTERCEPT_FUNCTION(ptsname);
+#else
+#define INIT_PTSNAME
+#endif
+
+#if SANITIZER_INTERCEPT_PTSNAME_R
+INTERCEPTOR(int, ptsname_r, int fd, char *name, SIZE_T namesize) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, ptsname_r, fd, name, namesize);
+ int res = REAL(ptsname_r)(fd, name, namesize);
+ if (res == 0)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, name, REAL(strlen)(name) + 1);
+ return res;
+}
+#define INIT_PTSNAME_R COMMON_INTERCEPT_FUNCTION(ptsname_r);
+#else
+#define INIT_PTSNAME_R
+#endif
+
#if SANITIZER_INTERCEPT_TTYNAME
INTERCEPTOR(char *, ttyname, int fd) {
void *ctx;
@@ -5809,6 +5874,79 @@ INTERCEPTOR(int, xdr_string, __sanitizer_XDR *xdrs, char **p,
#define INIT_XDR
#endif // SANITIZER_INTERCEPT_XDR
+#if SANITIZER_INTERCEPT_XDRREC
+typedef int (*xdrrec_cb)(char*, char*, int);
+struct XdrRecWrapper {
+ char *handle;
+ xdrrec_cb rd, wr;
+};
+typedef AddrHashMap<XdrRecWrapper *, 11> XdrRecWrapMap;
+static XdrRecWrapMap *xdrrec_wrap_map;
+
+static int xdrrec_wr_wrap(char *handle, char *buf, int count) {
+ COMMON_INTERCEPTOR_UNPOISON_PARAM(3);
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(buf, count);
+ XdrRecWrapper *wrap = (XdrRecWrapper *)handle;
+ return wrap->wr(wrap->handle, buf, count);
+}
+
+static int xdrrec_rd_wrap(char *handle, char *buf, int count) {
+ COMMON_INTERCEPTOR_UNPOISON_PARAM(3);
+ XdrRecWrapper *wrap = (XdrRecWrapper *)handle;
+ return wrap->rd(wrap->handle, buf, count);
+}
+
+// This doesn't apply to the solaris version as it has a different function
+// signature.
+INTERCEPTOR(void, xdrrec_create, __sanitizer_XDR *xdr, unsigned sndsize,
+ unsigned rcvsize, char *handle, int (*rd)(char*, char*, int),
+ int (*wr)(char*, char*, int)) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, xdrrec_create, xdr, sndsize, rcvsize,
+ handle, rd, wr);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, &xdr->x_op, sizeof xdr->x_op);
+
+ // We can't allocate a wrapper on the stack, as the handle is used outside
+ // this stack frame. So we put it on the heap, and keep track of it with
+ // the HashMap (keyed by x_private). When we later need to xdr_destroy,
+ // we can index the map, free the wrapper, and then clean the map entry.
+ XdrRecWrapper *wrap_data =
+ (XdrRecWrapper *)InternalAlloc(sizeof(XdrRecWrapper));
+ wrap_data->handle = handle;
+ wrap_data->rd = rd;
+ wrap_data->wr = wr;
+ if (wr)
+ wr = xdrrec_wr_wrap;
+ if (rd)
+ rd = xdrrec_rd_wrap;
+ handle = (char *)wrap_data;
+
+ REAL(xdrrec_create)(xdr, sndsize, rcvsize, handle, rd, wr);
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, xdr, sizeof *xdr);
+
+ XdrRecWrapMap::Handle wrap(xdrrec_wrap_map, xdr->x_private, false, true);
+ *wrap = wrap_data;
+}
+
+// We have to intercept this to be able to free wrapper memory;
+// otherwise it's not necessary.
+INTERCEPTOR(void, xdr_destroy, __sanitizer_XDR *xdr) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, xdr_destroy, xdr);
+
+ XdrRecWrapMap::Handle wrap(xdrrec_wrap_map, xdr->x_private, true);
+ InternalFree(*wrap);
+ REAL(xdr_destroy)(xdr);
+}
+#define INIT_XDRREC_LINUX \
+ static u64 xdrrec_wrap_mem[sizeof(XdrRecWrapMap) / sizeof(u64) + 1]; \
+ xdrrec_wrap_map = new ((void *)&xdrrec_wrap_mem) XdrRecWrapMap(); \
+ COMMON_INTERCEPT_FUNCTION(xdrrec_create); \
+ COMMON_INTERCEPT_FUNCTION(xdr_destroy);
+#else
+#define INIT_XDRREC_LINUX
+#endif
+
#if SANITIZER_INTERCEPT_TSEARCH
INTERCEPTOR(void *, tsearch, void *key, void **rootp,
int (*compar)(const void *, const void *)) {
@@ -5840,6 +5978,9 @@ void unpoison_file(__sanitizer_FILE *fp) {
if (fp->_IO_read_base && fp->_IO_read_base < fp->_IO_read_end)
COMMON_INTERCEPTOR_INITIALIZE_RANGE(fp->_IO_read_base,
fp->_IO_read_end - fp->_IO_read_base);
+ if (fp->_IO_write_base && fp->_IO_write_base < fp->_IO_write_end)
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(fp->_IO_write_base,
+ fp->_IO_write_end - fp->_IO_write_base);
#endif
#endif // SANITIZER_HAS_STRUCT_FILE
}
@@ -6066,6 +6207,8 @@ INTERCEPTOR(void, _obstack_newchunk, __sanitizer_obstack *obstack, int length) {
INTERCEPTOR(int, fflush, __sanitizer_FILE *fp) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, fflush, fp);
+ if (fp)
+ unpoison_file(fp);
int res = REAL(fflush)(fp);
// FIXME: handle fp == NULL
if (fp) {
@@ -6085,6 +6228,8 @@ INTERCEPTOR(int, fclose, __sanitizer_FILE *fp) {
COMMON_INTERCEPTOR_ENTER(ctx, fclose, fp);
COMMON_INTERCEPTOR_FILE_CLOSE(ctx, fp);
const FileMetadata *m = GetInterceptorMetadata(fp);
+ if (fp)
+ unpoison_file(fp);
int res = REAL(fclose)(fp);
if (m) {
COMMON_INTERCEPTOR_INITIALIZE_RANGE(*m->addr, *m->size);
@@ -9755,12 +9900,25 @@ INTERCEPTOR(void, qsort, void *base, SIZE_T nmemb, SIZE_T size,
}
}
qsort_compar_f old_compar = qsort_compar;
- qsort_compar = compar;
SIZE_T old_size = qsort_size;
- qsort_size = size;
+ // Handle qsort() implementations that recurse using an
+ // interposable function call:
+ bool already_wrapped = compar == wrapped_qsort_compar;
+ if (already_wrapped) {
+ // This case should only happen if the qsort() implementation calls itself
+ // using a preemptible function call (e.g. the FreeBSD libc version).
+ // Check that the size and comparator arguments are as expected.
+ CHECK_NE(compar, qsort_compar);
+ CHECK_EQ(qsort_size, size);
+ } else {
+ qsort_compar = compar;
+ qsort_size = size;
+ }
REAL(qsort)(base, nmemb, size, wrapped_qsort_compar);
- qsort_compar = old_compar;
- qsort_size = old_size;
+ if (!already_wrapped) {
+ qsort_compar = old_compar;
+ qsort_size = old_size;
+ }
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, base, nmemb * size);
}
#define INIT_QSORT COMMON_INTERCEPT_FUNCTION(qsort)
@@ -9793,12 +9951,25 @@ INTERCEPTOR(void, qsort_r, void *base, SIZE_T nmemb, SIZE_T size,
}
}
qsort_r_compar_f old_compar = qsort_r_compar;
- qsort_r_compar = compar;
SIZE_T old_size = qsort_r_size;
- qsort_r_size = size;
+ // Handle qsort_r() implementations that recurse using an
+ // interposable function call:
+ bool already_wrapped = compar == wrapped_qsort_r_compar;
+ if (already_wrapped) {
+ // This case should only happen if the qsort() implementation calls itself
+ // using a preemptible function call (e.g. the FreeBSD libc version).
+ // Check that the size and comparator arguments are as expected.
+ CHECK_NE(compar, qsort_r_compar);
+ CHECK_EQ(qsort_r_size, size);
+ } else {
+ qsort_r_compar = compar;
+ qsort_r_size = size;
+ }
REAL(qsort_r)(base, nmemb, size, wrapped_qsort_r_compar, arg);
- qsort_r_compar = old_compar;
- qsort_r_size = old_size;
+ if (!already_wrapped) {
+ qsort_r_compar = old_compar;
+ qsort_r_size = old_size;
+ }
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, base, nmemb * size);
}
#define INIT_QSORT_R COMMON_INTERCEPT_FUNCTION(qsort_r)
@@ -9996,6 +10167,7 @@ static void InitializeCommonInterceptors() {
INIT_SIGWAITINFO;
INIT_SIGTIMEDWAIT;
INIT_SIGSETOPS;
+ INIT_SIGSET_LOGICOPS;
INIT_SIGPENDING;
INIT_SIGPROCMASK;
INIT_PTHREAD_SIGMASK;
@@ -10037,6 +10209,8 @@ static void InitializeCommonInterceptors() {
INIT_PTHREAD_BARRIERATTR_GETPSHARED;
INIT_TMPNAM;
INIT_TMPNAM_R;
+ INIT_PTSNAME;
+ INIT_PTSNAME_R;
INIT_TTYNAME;
INIT_TTYNAME_R;
INIT_TEMPNAM;
@@ -10066,6 +10240,7 @@ static void InitializeCommonInterceptors() {
INIT_BZERO;
INIT_FTIME;
INIT_XDR;
+ INIT_XDRREC_LINUX;
INIT_TSEARCH;
INIT_LIBIO_INTERNALS;
INIT_FOPEN;
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors_format.inc b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors_format.inc
index bbbedda8fbe2..082398ba960a 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors_format.inc
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors_format.inc
@@ -340,6 +340,12 @@ static void scanf_common(void *ctx, int n_inputs, bool allowGnuMalloc,
size = 0;
}
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, argp, size);
+ // For %ms/%mc, write the allocated output buffer as well.
+ if (dir.allocate) {
+ char *buf = *(char **)argp;
+ if (buf)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, internal_strlen(buf) + 1);
+ }
}
}
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors_ioctl.inc b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors_ioctl.inc
index 490a04b2181b..b7da65987557 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors_ioctl.inc
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors_ioctl.inc
@@ -330,13 +330,17 @@ static void ioctl_table_fill() {
_(SOUND_PCM_WRITE_CHANNELS, WRITE, sizeof(int));
_(SOUND_PCM_WRITE_FILTER, WRITE, sizeof(int));
_(TCFLSH, NONE, 0);
+#if SANITIZER_GLIBC
_(TCGETA, WRITE, struct_termio_sz);
+#endif
_(TCGETS, WRITE, struct_termios_sz);
_(TCSBRK, NONE, 0);
_(TCSBRKP, NONE, 0);
+#if SANITIZER_GLIBC
_(TCSETA, READ, struct_termio_sz);
_(TCSETAF, READ, struct_termio_sz);
_(TCSETAW, READ, struct_termio_sz);
+#endif
_(TCSETS, READ, struct_termios_sz);
_(TCSETSF, READ, struct_termios_sz);
_(TCSETSW, READ, struct_termios_sz);
@@ -364,17 +368,8 @@ static void ioctl_table_fill() {
_(VT_WAITACTIVE, NONE, 0);
#endif
-#if SANITIZER_LINUX && !SANITIZER_ANDROID
+#if SANITIZER_GLIBC
// _(SIOCDEVPLIP, WRITE, struct_ifreq_sz); // the same as EQL_ENSLAVE
- _(CYGETDEFTHRESH, WRITE, sizeof(int));
- _(CYGETDEFTIMEOUT, WRITE, sizeof(int));
- _(CYGETMON, WRITE, struct_cyclades_monitor_sz);
- _(CYGETTHRESH, WRITE, sizeof(int));
- _(CYGETTIMEOUT, WRITE, sizeof(int));
- _(CYSETDEFTHRESH, NONE, 0);
- _(CYSETDEFTIMEOUT, NONE, 0);
- _(CYSETTHRESH, NONE, 0);
- _(CYSETTIMEOUT, NONE, 0);
_(EQL_EMANCIPATE, WRITE, struct_ifreq_sz);
_(EQL_ENSLAVE, WRITE, struct_ifreq_sz);
_(EQL_GETMASTRCFG, WRITE, struct_ifreq_sz);
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors_vfork_riscv64.inc.S b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors_vfork_riscv64.inc.S
new file mode 100644
index 000000000000..b7ec27859b8a
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors_vfork_riscv64.inc.S
@@ -0,0 +1,56 @@
+#if (defined(__riscv) && (__riscv_xlen == 64)) && defined(__linux__)
+
+#include "sanitizer_common/sanitizer_asm.h"
+
+ASM_HIDDEN(COMMON_INTERCEPTOR_SPILL_AREA)
+
+.comm _ZN14__interception10real_vforkE,8,8
+.globl ASM_WRAPPER_NAME(vfork)
+ASM_TYPE_FUNCTION(ASM_WRAPPER_NAME(vfork))
+ASM_WRAPPER_NAME(vfork):
+ // Save ra in the off-stack spill area.
+ // allocate space on stack
+ addi sp, sp, -16
+ // store ra value
+ sd ra, 8(sp)
+ call COMMON_INTERCEPTOR_SPILL_AREA
+ // restore previous values from stack
+ ld ra, 8(sp)
+ // adjust stack
+ addi sp, sp, 16
+ // store ra by x10
+ sd ra, 0(x10)
+
+ // Call real vfork. This may return twice. User code that runs between the first and the second return
+ // may clobber the stack frame of the interceptor; that's why it does not have a frame.
+ la x10, _ZN14__interception10real_vforkE
+ ld x10, 0(x10)
+ jalr x10
+
+ // adjust stack
+ addi sp, sp, -16
+ // store x10 by adjusted stack
+ sd x10, 8(sp)
+ // jump to exit label if x10 is 0
+ beqz x10, .L_exit
+
+ // x0 != 0 => parent process. Clear stack shadow.
+ // put old sp to x10
+ addi x10, sp, 16
+ call COMMON_INTERCEPTOR_HANDLE_VFORK
+
+.L_exit:
+ // Restore ra
+ call COMMON_INTERCEPTOR_SPILL_AREA
+ ld ra, 0(x10)
+ // load value by stack
+ ld x10, 8(sp)
+ // adjust stack
+ addi sp, sp, 16
+ ret
+ASM_SIZE(vfork)
+
+.weak vfork
+.set vfork, ASM_WRAPPER_NAME(vfork)
+
+#endif
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common_interface.inc b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common_interface.inc
index c78b6e10b689..932e5478616d 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common_interface.inc
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common_interface.inc
@@ -13,6 +13,7 @@ INTERFACE_FUNCTION(__sanitizer_contiguous_container_find_bad_address)
INTERFACE_FUNCTION(__sanitizer_set_death_callback)
INTERFACE_FUNCTION(__sanitizer_set_report_path)
INTERFACE_FUNCTION(__sanitizer_set_report_fd)
+INTERFACE_FUNCTION(__sanitizer_get_report_path)
INTERFACE_FUNCTION(__sanitizer_verify_contiguous_container)
INTERFACE_WEAK_FUNCTION(__sanitizer_on_print)
INTERFACE_WEAK_FUNCTION(__sanitizer_report_error_summary)
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common_libcdep.cpp b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common_libcdep.cpp
index 0c918ebb4a9d..047c5a17ea6e 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common_libcdep.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common_libcdep.cpp
@@ -139,6 +139,59 @@ uptr ReservedAddressRange::InitAligned(uptr size, uptr align,
return start;
}
+#if !SANITIZER_FUCHSIA && !SANITIZER_RTEMS
+
+// Reserve memory range [beg, end].
+// We need to use inclusive range because end+1 may not be representable.
+void ReserveShadowMemoryRange(uptr beg, uptr end, const char *name,
+ bool madvise_shadow) {
+ CHECK_EQ((beg % GetMmapGranularity()), 0);
+ CHECK_EQ(((end + 1) % GetMmapGranularity()), 0);
+ uptr size = end - beg + 1;
+ DecreaseTotalMmap(size); // Don't count the shadow against mmap_limit_mb.
+ if (madvise_shadow ? !MmapFixedSuperNoReserve(beg, size, name)
+ : !MmapFixedNoReserve(beg, size, name)) {
+ Report(
+ "ReserveShadowMemoryRange failed while trying to map 0x%zx bytes. "
+ "Perhaps you're using ulimit -v\n",
+ size);
+ Abort();
+ }
+ if (madvise_shadow && common_flags()->use_madv_dontdump)
+ DontDumpShadowMemory(beg, size);
+}
+
+void ProtectGap(uptr addr, uptr size, uptr zero_base_shadow_start,
+ uptr zero_base_max_shadow_start) {
+ if (!size)
+ return;
+ void *res = MmapFixedNoAccess(addr, size, "shadow gap");
+ if (addr == (uptr)res)
+ return;
+ // A few pages at the start of the address space can not be protected.
+ // But we really want to protect as much as possible, to prevent this memory
+ // being returned as a result of a non-FIXED mmap().
+ if (addr == zero_base_shadow_start) {
+ uptr step = GetMmapGranularity();
+ while (size > step && addr < zero_base_max_shadow_start) {
+ addr += step;
+ size -= step;
+ void *res = MmapFixedNoAccess(addr, size, "shadow gap");
+ if (addr == (uptr)res)
+ return;
+ }
+ }
+
+ Report(
+ "ERROR: Failed to protect the shadow gap. "
+ "%s cannot proceed correctly. ABORTING.\n",
+ SanitizerToolName);
+ DumpProcessMap();
+ Die();
+}
+
+#endif // !SANITIZER_FUCHSIA && !SANITIZER_RTEMS
+
} // namespace __sanitizer
SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_sandbox_on_notify,
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common_nolibc.cpp b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common_nolibc.cpp
index 3b278e017eb7..487a634a1652 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common_nolibc.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common_nolibc.cpp
@@ -10,9 +10,10 @@
// libc in no-libcdep sources.
//===----------------------------------------------------------------------===//
-#include "sanitizer_platform.h"
#include "sanitizer_common.h"
+#include "sanitizer_flags.h"
#include "sanitizer_libc.h"
+#include "sanitizer_platform.h"
namespace __sanitizer {
@@ -29,6 +30,7 @@ void SleepForSeconds(int seconds) { internal_sleep(seconds); }
#if !SANITIZER_WINDOWS && !SANITIZER_MAC
void ListOfModules::init() {}
+void InitializePlatformCommonFlags(CommonFlags *cf) {}
#endif
} // namespace __sanitizer
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common_syscalls.inc b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common_syscalls.inc
index 532ac9ead349..1b89d6e17684 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common_syscalls.inc
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common_syscalls.inc
@@ -2294,9 +2294,10 @@ PRE_SYSCALL(ni_syscall)() {}
POST_SYSCALL(ni_syscall)(long res) {}
PRE_SYSCALL(ptrace)(long request, long pid, long addr, long data) {
-#if !SANITIZER_ANDROID && \
- (defined(__i386) || defined(__x86_64) || defined(__mips64) || \
- defined(__powerpc64__) || defined(__aarch64__) || defined(__s390__))
+#if !SANITIZER_ANDROID && \
+ (defined(__i386) || defined(__x86_64) || defined(__mips64) || \
+ defined(__powerpc64__) || defined(__aarch64__) || defined(__s390__) || \
+ SANITIZER_RISCV64)
if (data) {
if (request == ptrace_setregs) {
PRE_READ((void *)data, struct_user_regs_struct_sz);
@@ -2315,9 +2316,10 @@ PRE_SYSCALL(ptrace)(long request, long pid, long addr, long data) {
}
POST_SYSCALL(ptrace)(long res, long request, long pid, long addr, long data) {
-#if !SANITIZER_ANDROID && \
- (defined(__i386) || defined(__x86_64) || defined(__mips64) || \
- defined(__powerpc64__) || defined(__aarch64__) || defined(__s390__))
+#if !SANITIZER_ANDROID && \
+ (defined(__i386) || defined(__x86_64) || defined(__mips64) || \
+ defined(__powerpc64__) || defined(__aarch64__) || defined(__s390__) || \
+ SANITIZER_RISCV64)
if (res >= 0 && data) {
// Note that this is different from the interceptor in
// sanitizer_common_interceptors.inc.
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_deadlock_detector1.cpp b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_deadlock_detector1.cpp
index d4a325bea4b2..2c924f5d3963 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_deadlock_detector1.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_deadlock_detector1.cpp
@@ -32,7 +32,7 @@ struct DDLogicalThread {
bool report_pending;
};
-struct DD : public DDetector {
+struct DD final : public DDetector {
SpinMutex mtx;
DeadlockDetector<DDBV> dd;
DDFlags flags;
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_deadlock_detector2.cpp b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_deadlock_detector2.cpp
index 4026739d4e51..e3f8e1b12762 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_deadlock_detector2.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_deadlock_detector2.cpp
@@ -80,7 +80,7 @@ struct Mutex {
Link link[kMaxLink];
};
-struct DD : public DDetector {
+struct DD final : public DDetector {
explicit DD(const DDFlags *flags);
DDPhysicalThread* CreatePhysicalThread();
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_deadlock_detector_interface.h b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_deadlock_detector_interface.h
index a4722b080ebd..7f461c98bade 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_deadlock_detector_interface.h
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_deadlock_detector_interface.h
@@ -66,6 +66,9 @@ struct DDCallback {
virtual u32 Unwind() { return 0; }
virtual int UniqueTid() { return 0; }
+
+ protected:
+ ~DDCallback() {}
};
struct DDetector {
@@ -85,6 +88,9 @@ struct DDetector {
virtual void MutexDestroy(DDCallback *cb, DDMutex *m) {}
virtual DDReport *GetReport(DDCallback *cb) { return nullptr; }
+
+ protected:
+ ~DDetector() {}
};
} // namespace __sanitizer
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_errno.h b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_errno.h
index 584e66e4a861..94f16b6e8735 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_errno.h
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_errno.h
@@ -23,7 +23,7 @@
#if SANITIZER_FREEBSD || SANITIZER_MAC
# define __errno_location __error
-#elif SANITIZER_ANDROID || SANITIZER_NETBSD || SANITIZER_OPENBSD || \
+#elif SANITIZER_ANDROID || SANITIZER_NETBSD || \
SANITIZER_RTEMS
# define __errno_location __errno
#elif SANITIZER_SOLARIS
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_errno_codes.h b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_errno_codes.h
index f388d0d36463..192e9392d494 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_errno_codes.h
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_errno_codes.h
@@ -24,6 +24,7 @@ namespace __sanitizer {
#define errno_ENOMEM 12
#define errno_EBUSY 16
#define errno_EINVAL 22
+#define errno_ENAMETOOLONG 36
// Those might not present or their value differ on different platforms.
extern const int errno_EOWNERDEAD;
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_file.cpp b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_file.cpp
index 79930d794250..7c64b53e9b11 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_file.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_file.cpp
@@ -58,40 +58,49 @@ void ReportFile::ReopenIfNecessary() {
} else {
internal_snprintf(full_path, kMaxPathLength, "%s.%zu", path_prefix, pid);
}
- fd = OpenFile(full_path, WrOnly);
+ error_t err;
+ fd = OpenFile(full_path, WrOnly, &err);
if (fd == kInvalidFd) {
const char *ErrorMsgPrefix = "ERROR: Can't open file: ";
WriteToFile(kStderrFd, ErrorMsgPrefix, internal_strlen(ErrorMsgPrefix));
WriteToFile(kStderrFd, full_path, internal_strlen(full_path));
+ char errmsg[100];
+ internal_snprintf(errmsg, sizeof(errmsg), " (reason: %d)", err);
+ WriteToFile(kStderrFd, errmsg, internal_strlen(errmsg));
Die();
}
fd_pid = pid;
}
void ReportFile::SetReportPath(const char *path) {
- if (!path)
- return;
- uptr len = internal_strlen(path);
- if (len > sizeof(path_prefix) - 100) {
- Report("ERROR: Path is too long: %c%c%c%c%c%c%c%c...\n",
- path[0], path[1], path[2], path[3],
- path[4], path[5], path[6], path[7]);
- Die();
+ if (path) {
+ uptr len = internal_strlen(path);
+ if (len > sizeof(path_prefix) - 100) {
+ Report("ERROR: Path is too long: %c%c%c%c%c%c%c%c...\n", path[0], path[1],
+ path[2], path[3], path[4], path[5], path[6], path[7]);
+ Die();
+ }
}
SpinMutexLock l(mu);
if (fd != kStdoutFd && fd != kStderrFd && fd != kInvalidFd)
CloseFile(fd);
fd = kInvalidFd;
- if (internal_strcmp(path, "stdout") == 0) {
- fd = kStdoutFd;
- } else if (internal_strcmp(path, "stderr") == 0) {
+ if (!path || internal_strcmp(path, "stderr") == 0) {
fd = kStderrFd;
+ } else if (internal_strcmp(path, "stdout") == 0) {
+ fd = kStdoutFd;
} else {
internal_snprintf(path_prefix, kMaxPathLength, "%s", path);
}
}
+const char *ReportFile::GetReportPath() {
+ SpinMutexLock l(mu);
+ ReopenIfNecessary();
+ return full_path;
+}
+
bool ReadFileToBuffer(const char *file_name, char **buff, uptr *buff_size,
uptr *read_len, uptr max_len, error_t *errno_p) {
*buff = nullptr;
@@ -210,6 +219,10 @@ void __sanitizer_set_report_fd(void *fd) {
report_file.fd = (fd_t)reinterpret_cast<uptr>(fd);
report_file.fd_pid = internal_getpid();
}
+
+const char *__sanitizer_get_report_path() {
+ return report_file.GetReportPath();
+}
} // extern "C"
#endif // !SANITIZER_FUCHSIA
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_file.h b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_file.h
index 26681f0493d7..08671ab67d0f 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_file.h
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_file.h
@@ -26,6 +26,7 @@ struct ReportFile {
void Write(const char *buffer, uptr length);
bool SupportsColors();
void SetReportPath(const char *path);
+ const char *GetReportPath();
// Don't use fields directly. They are only declared public to allow
// aggregate initialization.
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_flag_parser.h b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_flag_parser.h
index fac5dff34633..acc71ccd89ee 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_flag_parser.h
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_flag_parser.h
@@ -42,7 +42,7 @@ class FlagHandlerBase {
};
template <typename T>
-class FlagHandler : public FlagHandlerBase {
+class FlagHandler final : public FlagHandlerBase {
T *t_;
public:
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_flags.cpp b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_flags.cpp
index 684ee1e0b999..21048be73041 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_flags.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_flags.cpp
@@ -13,9 +13,10 @@
#include "sanitizer_flags.h"
#include "sanitizer_common.h"
+#include "sanitizer_flag_parser.h"
#include "sanitizer_libc.h"
+#include "sanitizer_linux.h"
#include "sanitizer_list.h"
-#include "sanitizer_flag_parser.h"
namespace __sanitizer {
@@ -72,7 +73,7 @@ void SubstituteForFlagValue(const char *s, char *out, uptr out_size) {
*out = '\0';
}
-class FlagHandlerInclude : public FlagHandlerBase {
+class FlagHandlerInclude final : public FlagHandlerBase {
FlagParser *parser_;
bool ignore_missing_;
const char *original_path_;
@@ -91,7 +92,7 @@ class FlagHandlerInclude : public FlagHandlerBase {
}
return parser_->ParseFile(value, ignore_missing_);
}
- bool Format(char *buffer, uptr size) {
+ bool Format(char *buffer, uptr size) override {
// Note `original_path_` isn't actually what's parsed due to `%`
// substitutions. Printing the substituted path would require holding onto
// mmap'ed memory.
@@ -124,6 +125,8 @@ void InitializeCommonFlags(CommonFlags *cf) {
// need to record coverage to generate coverage report.
cf->coverage |= cf->html_cov_report;
SetVerbosity(cf->verbosity);
+
+ InitializePlatformCommonFlags(cf);
}
} // namespace __sanitizer
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_flags.h b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_flags.h
index 8f5e987da3ff..5b59e5801bf9 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_flags.h
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_flags.h
@@ -62,6 +62,10 @@ void RegisterIncludeFlags(FlagParser *parser, CommonFlags *cf);
// and perform initializations common to all sanitizers (e.g. setting
// verbosity).
void InitializeCommonFlags(CommonFlags *cf = &common_flags_dont_use);
+
+// Platform specific flags initialization.
+void InitializePlatformCommonFlags(CommonFlags *cf);
+
} // namespace __sanitizer
#endif // SANITIZER_FLAGS_H
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_flags.inc b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_flags.inc
index 065258a5a6e1..cfb5822645f1 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_flags.inc
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_flags.inc
@@ -40,16 +40,21 @@ COMMON_FLAG(bool, fast_unwind_on_check, false,
COMMON_FLAG(bool, fast_unwind_on_fatal, false,
"If available, use the fast frame-pointer-based unwinder on fatal "
"errors.")
-COMMON_FLAG(bool, fast_unwind_on_malloc, true,
+// ARM thumb/thumb2 frame pointer is inconsistent on GCC and Clang [1]
+// and fast-unwider is also unreliable with mixing arm and thumb code [2].
+// [1] https://gcc.gnu.org/bugzilla/show_bug.cgi?id=92172
+// [2] https://bugs.llvm.org/show_bug.cgi?id=44158
+COMMON_FLAG(bool, fast_unwind_on_malloc,
+ !(SANITIZER_LINUX && !SANITIZER_ANDROID && SANITIZER_ARM),
"If available, use the fast frame-pointer-based unwinder on "
"malloc/free.")
COMMON_FLAG(bool, handle_ioctl, false, "Intercept and handle ioctl requests.")
COMMON_FLAG(int, malloc_context_size, 1,
"Max number of stack frames kept for each allocation/deallocation.")
COMMON_FLAG(
- const char *, log_path, "stderr",
+ const char *, log_path, nullptr,
"Write logs to \"log_path.pid\". The special values are \"stdout\" and "
- "\"stderr\". The default is \"stderr\".")
+ "\"stderr\". If unspecified, defaults to \"stderr\".")
COMMON_FLAG(
bool, log_exe_name, false,
"Mention name of executable when reporting error and "
@@ -77,8 +82,9 @@ COMMON_FLAG(bool, print_summary, true,
"If false, disable printing error summaries in addition to error "
"reports.")
COMMON_FLAG(int, print_module_map, 0,
- "OS X only (0 - don't print, 1 - print only once before process "
- "exits, 2 - print after each report).")
+ "Print the process module map where supported (0 - don't print, "
+ "1 - print only once before process exits, 2 - print after each "
+ "report).")
COMMON_FLAG(bool, check_printf, true, "Check printf arguments.")
#define COMMON_FLAG_HANDLE_SIGNAL_HELP(signal) \
"Controls custom tool's " #signal " handler (0 - do not registers the " \
@@ -195,6 +201,9 @@ COMMON_FLAG(bool, intercept_strtok, true,
COMMON_FLAG(bool, intercept_strpbrk, true,
"If set, uses custom wrappers for strpbrk function "
"to find more errors.")
+COMMON_FLAG(
+ bool, intercept_strcmp, true,
+ "If set, uses custom wrappers for strcmp functions to find more errors.")
COMMON_FLAG(bool, intercept_strlen, true,
"If set, uses custom wrappers for strlen and strnlen functions "
"to find more errors.")
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_fuchsia.cpp b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_fuchsia.cpp
index 6d1ad7946770..5ad20d0d7da6 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_fuchsia.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_fuchsia.cpp
@@ -14,10 +14,6 @@
#include "sanitizer_fuchsia.h"
#if SANITIZER_FUCHSIA
-#include "sanitizer_common.h"
-#include "sanitizer_libc.h"
-#include "sanitizer_mutex.h"
-
#include <limits.h>
#include <pthread.h>
#include <stdlib.h>
@@ -25,6 +21,11 @@
#include <zircon/errors.h>
#include <zircon/process.h>
#include <zircon/syscalls.h>
+#include <zircon/utc.h>
+
+#include "sanitizer_common.h"
+#include "sanitizer_libc.h"
+#include "sanitizer_mutex.h"
namespace __sanitizer {
@@ -47,8 +48,10 @@ unsigned int internal_sleep(unsigned int seconds) {
}
u64 NanoTime() {
+ zx_handle_t utc_clock = _zx_utc_reference_get();
+ CHECK_NE(utc_clock, ZX_HANDLE_INVALID);
zx_time_t time;
- zx_status_t status = _zx_clock_get(ZX_CLOCK_UTC, &time);
+ zx_status_t status = _zx_clock_read(utc_clock, &time);
CHECK_EQ(status, ZX_OK);
return time;
}
@@ -105,8 +108,6 @@ void SetAlternateSignalStack() {}
void UnsetAlternateSignalStack() {}
void InitTlsSize() {}
-void PrintModuleMap() {}
-
bool SignalContext::IsStackOverflow() const { return false; }
void SignalContext::DumpAllRegisters(void *context) { UNIMPLEMENTED(); }
const char *SignalContext::Describe() const { UNIMPLEMENTED(); }
@@ -504,6 +505,8 @@ u32 GetNumberOfCPUs() {
uptr GetRSS() { UNIMPLEMENTED(); }
+void InitializePlatformCommonFlags(CommonFlags *cf) {}
+
} // namespace __sanitizer
using namespace __sanitizer;
@@ -526,6 +529,10 @@ void __sanitizer_set_report_path(const char *path) {
void __sanitizer_set_report_fd(void *fd) {
UNREACHABLE("not available on Fuchsia");
}
+
+const char *__sanitizer_get_report_path() {
+ UNREACHABLE("not available on Fuchsia");
+}
} // extern "C"
#endif // SANITIZER_FUCHSIA
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_getauxval.h b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_getauxval.h
index 86ad3a5e2c2a..38439e44f611 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_getauxval.h
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_getauxval.h
@@ -21,8 +21,9 @@
#if SANITIZER_LINUX || SANITIZER_FUCHSIA
-# if __GLIBC_PREREQ(2, 16) || (SANITIZER_ANDROID && __ANDROID_API__ >= 21) || \
- SANITIZER_FUCHSIA
+# if (__GLIBC_PREREQ(2, 16) || (SANITIZER_ANDROID && __ANDROID_API__ >= 21) || \
+ SANITIZER_FUCHSIA) && \
+ !SANITIZER_GO
# define SANITIZER_USE_GETAUXVAL 1
# else
# define SANITIZER_USE_GETAUXVAL 0
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_interface_internal.h b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_interface_internal.h
index be8023e9e16c..0b001c1c4830 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_interface_internal.h
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_interface_internal.h
@@ -28,6 +28,10 @@ extern "C" {
// (casted to void *).
SANITIZER_INTERFACE_ATTRIBUTE
void __sanitizer_set_report_fd(void *fd);
+ // Get the current full report file path, if a path was specified by
+ // an earlier call to __sanitizer_set_report_path. Returns null otherwise.
+ SANITIZER_INTERFACE_ATTRIBUTE
+ const char *__sanitizer_get_report_path();
typedef struct {
int coverage_sandboxed;
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_internal_defs.h b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_internal_defs.h
index d0ffc79b0610..d8f0540037d2 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_internal_defs.h
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_internal_defs.h
@@ -39,7 +39,7 @@
// TLS is handled differently on different platforms
#if SANITIZER_LINUX || SANITIZER_NETBSD || \
- SANITIZER_FREEBSD || SANITIZER_OPENBSD
+ SANITIZER_FREEBSD
# define SANITIZER_TLS_INITIAL_EXEC_ATTRIBUTE \
__attribute__((tls_model("initial-exec"))) thread_local
#else
@@ -104,8 +104,7 @@
//
// FIXME: do we have anything like this on Mac?
#ifndef SANITIZER_CAN_USE_PREINIT_ARRAY
-#if ((SANITIZER_LINUX && !SANITIZER_ANDROID) || SANITIZER_OPENBSD || \
- SANITIZER_FUCHSIA || SANITIZER_NETBSD) && !defined(PIC)
+#if (SANITIZER_LINUX || SANITIZER_FUCHSIA || SANITIZER_NETBSD) && !defined(PIC)
#define SANITIZER_CAN_USE_PREINIT_ARRAY 1
// Before Solaris 11.4, .preinit_array is fully supported only with GNU ld.
// FIXME: Check for those conditions.
@@ -170,7 +169,7 @@ typedef int pid_t;
#endif
#if SANITIZER_FREEBSD || SANITIZER_NETBSD || \
- SANITIZER_OPENBSD || SANITIZER_MAC || \
+ SANITIZER_MAC || \
(SANITIZER_SOLARIS && (defined(_LP64) || _FILE_OFFSET_BITS == 64)) || \
(SANITIZER_LINUX && defined(__x86_64__))
typedef u64 OFF_T;
@@ -182,7 +181,7 @@ typedef u64 OFF64_T;
#if (SANITIZER_WORDSIZE == 64) || SANITIZER_MAC
typedef uptr operator_new_size_type;
#else
-# if SANITIZER_OPENBSD || defined(__s390__) && !defined(__s390x__)
+# if defined(__s390__) && !defined(__s390x__)
// Special case: 31-bit s390 has unsigned long as size_t.
typedef unsigned long operator_new_size_type;
# else
@@ -196,9 +195,6 @@ typedef u64 tid_t;
// This header should NOT include any other headers to avoid portability issues.
// Common defs.
-#ifndef INLINE
-#define INLINE inline
-#endif
#define INTERFACE_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE
#define SANITIZER_WEAK_DEFAULT_IMPL \
extern "C" SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE NOINLINE
@@ -333,14 +329,10 @@ void NORETURN CheckFailed(const char *file, int line, const char *cond,
#define UNIMPLEMENTED() UNREACHABLE("unimplemented")
-#define COMPILER_CHECK(pred) IMPL_COMPILER_ASSERT(pred, __LINE__)
+#define COMPILER_CHECK(pred) static_assert(pred, "")
#define ARRAY_SIZE(a) (sizeof(a)/sizeof((a)[0]))
-#define IMPL_PASTE(a, b) a##b
-#define IMPL_COMPILER_ASSERT(pred, line) \
- typedef char IMPL_PASTE(assertion_failed_##_, line)[2*(int)(pred)-1]
-
// Limits for integral types. We have to redefine it in case we don't
// have stdint.h (like in Visual Studio 9).
#undef __INT64_C
@@ -455,5 +447,8 @@ using namespace __sanitizer;
namespace __hwasan {
using namespace __sanitizer;
}
+namespace __memprof {
+using namespace __sanitizer;
+}
#endif // SANITIZER_DEFS_H
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_libignore.cpp b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_libignore.cpp
index eb9bb765013d..9ea19bc21fa3 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_libignore.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_libignore.cpp
@@ -9,7 +9,7 @@
#include "sanitizer_platform.h"
#if SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_MAC || \
- SANITIZER_NETBSD || SANITIZER_OPENBSD
+ SANITIZER_NETBSD
#include "sanitizer_libignore.h"
#include "sanitizer_flags.h"
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_linux.cpp b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_linux.cpp
index 470f4b70f059..379f6d9e294b 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_linux.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_linux.cpp
@@ -14,7 +14,7 @@
#include "sanitizer_platform.h"
#if SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD || \
- SANITIZER_OPENBSD || SANITIZER_SOLARIS
+ SANITIZER_SOLARIS
#include "sanitizer_common.h"
#include "sanitizer_flags.h"
@@ -38,6 +38,14 @@
#include <asm/unistd.h>
#include <sys/types.h>
#define stat kernel_stat
+#if SANITIZER_GO
+#undef st_atime
+#undef st_mtime
+#undef st_ctime
+#define st_atime st_atim
+#define st_mtime st_mtim
+#define st_ctime st_ctim
+#endif
#include <asm/stat.h>
#undef stat
#endif
@@ -59,13 +67,7 @@
#include <sys/syscall.h>
#include <sys/time.h>
#include <sys/types.h>
-#if !SANITIZER_OPENBSD
#include <ucontext.h>
-#endif
-#if SANITIZER_OPENBSD
-#include <sys/futex.h>
-#include <sys/sysctl.h>
-#endif
#include <unistd.h>
#if SANITIZER_LINUX
@@ -129,7 +131,7 @@ const int FUTEX_WAKE_PRIVATE = FUTEX_WAKE | FUTEX_PRIVATE_FLAG;
#endif
// Note : FreeBSD had implemented both
-// Linux and OpenBSD apis, available from
+// Linux apis, available from
// future 12.x version most likely
#if SANITIZER_LINUX && defined(__NR_getrandom)
# if !defined(GRND_NONBLOCK)
@@ -140,20 +142,18 @@ const int FUTEX_WAKE_PRIVATE = FUTEX_WAKE | FUTEX_PRIVATE_FLAG;
# define SANITIZER_USE_GETRANDOM 0
#endif // SANITIZER_LINUX && defined(__NR_getrandom)
-#if SANITIZER_OPENBSD
-# define SANITIZER_USE_GETENTROPY 1
+#if SANITIZER_FREEBSD && __FreeBSD_version >= 1200000
+# define SANITIZER_USE_GETENTROPY 1
#else
-# if SANITIZER_FREEBSD && __FreeBSD_version >= 1200000
-# define SANITIZER_USE_GETENTROPY 1
-# else
-# define SANITIZER_USE_GETENTROPY 0
-# endif
-#endif // SANITIZER_USE_GETENTROPY
+# define SANITIZER_USE_GETENTROPY 0
+#endif
namespace __sanitizer {
#if SANITIZER_LINUX && defined(__x86_64__)
#include "sanitizer_syscall_linux_x86_64.inc"
+#elif SANITIZER_LINUX && SANITIZER_RISCV64
+#include "sanitizer_syscall_linux_riscv64.inc"
#elif SANITIZER_LINUX && defined(__aarch64__)
#include "sanitizer_syscall_linux_aarch64.inc"
#elif SANITIZER_LINUX && defined(__arm__)
@@ -164,7 +164,7 @@ namespace __sanitizer {
// --------------- sanitizer_libc.h
#if !SANITIZER_SOLARIS && !SANITIZER_NETBSD
-#if !SANITIZER_S390 && !SANITIZER_OPENBSD
+#if !SANITIZER_S390
uptr internal_mmap(void *addr, uptr length, int prot, int flags, int fd,
u64 offset) {
#if SANITIZER_FREEBSD || SANITIZER_LINUX_USES_64BIT_SYSCALLS
@@ -177,9 +177,8 @@ uptr internal_mmap(void *addr, uptr length, int prot, int flags, int fd,
offset / 4096);
#endif
}
-#endif // !SANITIZER_S390 && !SANITIZER_OPENBSD
+#endif // !SANITIZER_S390
-#if !SANITIZER_OPENBSD
uptr internal_munmap(void *addr, uptr length) {
return internal_syscall(SYSCALL(munmap), (uptr)addr, length);
}
@@ -187,7 +186,10 @@ uptr internal_munmap(void *addr, uptr length) {
int internal_mprotect(void *addr, uptr length, int prot) {
return internal_syscall(SYSCALL(mprotect), (uptr)addr, length, prot);
}
-#endif
+
+int internal_madvise(uptr addr, uptr length, int advice) {
+ return internal_syscall(SYSCALL(madvise), addr, length, advice);
+}
uptr internal_close(fd_t fd) {
return internal_syscall(SYSCALL(close), fd);
@@ -254,9 +256,11 @@ static void stat64_to_stat(struct stat64 *in, struct stat *out) {
// Undefine compatibility macros from <sys/stat.h>
// so that they would not clash with the kernel_stat
// st_[a|m|c]time fields
+#if !SANITIZER_GO
#undef st_atime
#undef st_mtime
#undef st_ctime
+#endif
#if defined(SANITIZER_ANDROID)
// Bionic sys/stat.h defines additional macros
// for compatibility with the old NDKs and
@@ -299,7 +303,7 @@ static void kernel_stat_to_stat(struct kernel_stat *in, struct stat *out) {
#endif
uptr internal_stat(const char *path, void *buf) {
-#if SANITIZER_FREEBSD || SANITIZER_OPENBSD
+#if SANITIZER_FREEBSD
return internal_syscall(SYSCALL(fstatat), AT_FDCWD, (uptr)path, (uptr)buf, 0);
#elif SANITIZER_USES_CANONICAL_LINUX_SYSCALLS
return internal_syscall(SYSCALL(newfstatat), AT_FDCWD, (uptr)path, (uptr)buf,
@@ -323,7 +327,7 @@ uptr internal_stat(const char *path, void *buf) {
}
uptr internal_lstat(const char *path, void *buf) {
-#if SANITIZER_FREEBSD || SANITIZER_OPENBSD
+#if SANITIZER_FREEBSD
return internal_syscall(SYSCALL(fstatat), AT_FDCWD, (uptr)path, (uptr)buf,
AT_SYMLINK_NOFOLLOW);
#elif SANITIZER_USES_CANONICAL_LINUX_SYSCALLS
@@ -348,9 +352,8 @@ uptr internal_lstat(const char *path, void *buf) {
}
uptr internal_fstat(fd_t fd, void *buf) {
-#if SANITIZER_FREEBSD || SANITIZER_OPENBSD || \
- SANITIZER_LINUX_USES_64BIT_SYSCALLS
-#if SANITIZER_MIPS64 && !SANITIZER_OPENBSD
+#if SANITIZER_FREEBSD || SANITIZER_LINUX_USES_64BIT_SYSCALLS
+#if SANITIZER_MIPS64
// For mips64, fstat syscall fills buffer in the format of kernel_stat
struct kernel_stat kbuf;
int res = internal_syscall(SYSCALL(fstat), fd, &kbuf);
@@ -390,16 +393,13 @@ uptr internal_readlink(const char *path, char *buf, uptr bufsize) {
#if SANITIZER_USES_CANONICAL_LINUX_SYSCALLS
return internal_syscall(SYSCALL(readlinkat), AT_FDCWD, (uptr)path, (uptr)buf,
bufsize);
-#elif SANITIZER_OPENBSD
- return internal_syscall(SYSCALL(readlinkat), AT_FDCWD, (uptr)path, (uptr)buf,
- bufsize);
#else
return internal_syscall(SYSCALL(readlink), (uptr)path, (uptr)buf, bufsize);
#endif
}
uptr internal_unlink(const char *path) {
-#if SANITIZER_USES_CANONICAL_LINUX_SYSCALLS || SANITIZER_OPENBSD
+#if SANITIZER_USES_CANONICAL_LINUX_SYSCALLS
return internal_syscall(SYSCALL(unlinkat), AT_FDCWD, (uptr)path, 0);
#else
return internal_syscall(SYSCALL(unlink), (uptr)path);
@@ -410,7 +410,7 @@ uptr internal_rename(const char *oldpath, const char *newpath) {
#if defined(__riscv)
return internal_syscall(SYSCALL(renameat2), AT_FDCWD, (uptr)oldpath, AT_FDCWD,
(uptr)newpath, 0);
-#elif SANITIZER_USES_CANONICAL_LINUX_SYSCALLS || SANITIZER_OPENBSD
+#elif SANITIZER_USES_CANONICAL_LINUX_SYSCALLS
return internal_syscall(SYSCALL(renameat), AT_FDCWD, (uptr)oldpath, AT_FDCWD,
(uptr)newpath);
#else
@@ -422,15 +422,6 @@ uptr internal_sched_yield() {
return internal_syscall(SYSCALL(sched_yield));
}
-void internal__exit(int exitcode) {
-#if SANITIZER_FREEBSD || SANITIZER_OPENBSD
- internal_syscall(SYSCALL(exit), exitcode);
-#else
- internal_syscall(SYSCALL(exit_group), exitcode);
-#endif
- Die(); // Unreachable.
-}
-
unsigned int internal_sleep(unsigned int seconds) {
struct timespec ts;
ts.tv_sec = seconds;
@@ -447,6 +438,17 @@ uptr internal_execve(const char *filename, char *const argv[],
}
#endif // !SANITIZER_SOLARIS && !SANITIZER_NETBSD
+#if !SANITIZER_NETBSD
+void internal__exit(int exitcode) {
+#if SANITIZER_FREEBSD || SANITIZER_SOLARIS
+ internal_syscall(SYSCALL(exit), exitcode);
+#else
+ internal_syscall(SYSCALL(exit_group), exitcode);
+#endif
+ Die(); // Unreachable.
+}
+#endif // !SANITIZER_NETBSD
+
// ----------------- sanitizer_common.h
bool FileExists(const char *filename) {
if (ShouldMockFailureToOpen(filename))
@@ -468,8 +470,6 @@ tid_t GetTid() {
long Tid;
thr_self(&Tid);
return Tid;
-#elif SANITIZER_OPENBSD
- return internal_syscall(SYSCALL(getthrid));
#elif SANITIZER_SOLARIS
return thr_self();
#else
@@ -482,9 +482,6 @@ int TgKill(pid_t pid, tid_t tid, int sig) {
return internal_syscall(SYSCALL(tgkill), pid, tid, sig);
#elif SANITIZER_FREEBSD
return internal_syscall(SYSCALL(thr_kill2), pid, tid, sig);
-#elif SANITIZER_OPENBSD
- (void)pid;
- return internal_syscall(SYSCALL(thrkill), tid, sig, nullptr);
#elif SANITIZER_SOLARIS
(void)pid;
return thr_kill(tid, sig);
@@ -494,7 +491,7 @@ int TgKill(pid_t pid, tid_t tid, int sig) {
#if !SANITIZER_SOLARIS && !SANITIZER_NETBSD
u64 NanoTime() {
-#if SANITIZER_FREEBSD || SANITIZER_OPENBSD
+#if SANITIZER_FREEBSD
timeval tv;
#else
kernel_timeval tv;
@@ -513,8 +510,7 @@ uptr internal_clock_gettime(__sanitizer_clockid_t clk_id, void *tp) {
// 'environ' array (on some others) and does not use libc. This function
// should be called first inside __asan_init.
const char *GetEnv(const char *name) {
-#if SANITIZER_FREEBSD || SANITIZER_NETBSD || SANITIZER_OPENBSD || \
- SANITIZER_SOLARIS
+#if SANITIZER_FREEBSD || SANITIZER_NETBSD || SANITIZER_SOLARIS
if (::environ != 0) {
uptr NameLen = internal_strlen(name);
for (char **Env = ::environ; *Env != 0; Env++) {
@@ -552,15 +548,13 @@ const char *GetEnv(const char *name) {
#endif
}
-#if !SANITIZER_FREEBSD && !SANITIZER_NETBSD && !SANITIZER_OPENBSD && \
- !SANITIZER_GO
+#if !SANITIZER_FREEBSD && !SANITIZER_NETBSD && !SANITIZER_GO
extern "C" {
SANITIZER_WEAK_ATTRIBUTE extern void *__libc_stack_end;
}
#endif
-#if !SANITIZER_FREEBSD && !SANITIZER_NETBSD && \
- !SANITIZER_OPENBSD
+#if !SANITIZER_FREEBSD && !SANITIZER_NETBSD
static void ReadNullSepFileToArray(const char *path, char ***arr,
int arr_size) {
char *buff;
@@ -585,7 +579,6 @@ static void ReadNullSepFileToArray(const char *path, char ***arr,
}
#endif
-#if !SANITIZER_OPENBSD
static void GetArgsAndEnv(char ***argv, char ***envp) {
#if SANITIZER_FREEBSD
// On FreeBSD, retrieving the argument and environment arrays is done via the
@@ -637,8 +630,6 @@ char **GetEnviron() {
return envp;
}
-#endif // !SANITIZER_OPENBSD
-
#if !SANITIZER_SOLARIS
enum MutexState {
MtxUnlocked = 0,
@@ -694,19 +685,9 @@ void BlockingMutex::CheckLocked() {
// 32-bit syscall here.
#if SANITIZER_NETBSD
// Not used
-#elif SANITIZER_OPENBSD
-// struct dirent is different for Linux and us. At this moment, we use only
-// d_fileno (Linux call this d_ino), d_reclen, and d_name.
-struct linux_dirent {
- u64 d_ino; // d_fileno
- u16 d_reclen;
- u16 d_namlen; // not used
- u8 d_type; // not used
- char d_name[NAME_MAX + 1];
-};
#else
struct linux_dirent {
-#if SANITIZER_X32 || defined(__aarch64__)
+#if SANITIZER_X32 || defined(__aarch64__) || SANITIZER_RISCV64
u64 d_ino;
u64 d_off;
#else
@@ -714,7 +695,7 @@ struct linux_dirent {
unsigned long d_off;
#endif
unsigned short d_reclen;
-#ifdef __aarch64__
+#if defined(__aarch64__) || SANITIZER_RISCV64
unsigned char d_type;
#endif
char d_name[256];
@@ -781,28 +762,39 @@ int internal_fork() {
#endif
}
-#if SANITIZER_FREEBSD || SANITIZER_OPENBSD
+#if SANITIZER_FREEBSD
int internal_sysctl(const int *name, unsigned int namelen, void *oldp,
uptr *oldlenp, const void *newp, uptr newlen) {
-#if SANITIZER_OPENBSD
- return sysctl(name, namelen, oldp, (size_t *)oldlenp, (void *)newp,
- (size_t)newlen);
-#else
return internal_syscall(SYSCALL(__sysctl), name, namelen, oldp,
(size_t *)oldlenp, newp, (size_t)newlen);
-#endif
}
-#if SANITIZER_FREEBSD
int internal_sysctlbyname(const char *sname, void *oldp, uptr *oldlenp,
const void *newp, uptr newlen) {
- static decltype(sysctlbyname) *real = nullptr;
- if (!real)
- real = (decltype(sysctlbyname) *)dlsym(RTLD_NEXT, "sysctlbyname");
- CHECK(real);
- return real(sname, oldp, (size_t *)oldlenp, newp, (size_t)newlen);
-}
+ // Note: this function can be called during startup, so we need to avoid
+ // calling any interceptable functions. On FreeBSD >= 1300045 sysctlbyname()
+ // is a real syscall, but for older versions it calls sysctlnametomib()
+ // followed by sysctl(). To avoid calling the intercepted version and
+ // asserting if this happens during startup, call the real sysctlnametomib()
+ // followed by internal_sysctl() if the syscall is not available.
+#ifdef SYS___sysctlbyname
+ return internal_syscall(SYSCALL(__sysctlbyname), sname,
+ internal_strlen(sname), oldp, (size_t *)oldlenp, newp,
+ (size_t)newlen);
+#else
+ static decltype(sysctlnametomib) *real_sysctlnametomib = nullptr;
+ if (!real_sysctlnametomib)
+ real_sysctlnametomib =
+ (decltype(sysctlnametomib) *)dlsym(RTLD_NEXT, "sysctlnametomib");
+ CHECK(real_sysctlnametomib);
+
+ int oid[CTL_MAXNAME];
+ size_t len = CTL_MAXNAME;
+ if (real_sysctlnametomib(sname, oid, &len) == -1)
+ return (-1);
+ return internal_sysctl(oid, len, oldp, oldlenp, newp, newlen);
#endif
+}
#endif
#if SANITIZER_LINUX
@@ -856,7 +848,7 @@ int internal_sigaction_norestorer(int signum, const void *act, void *oldact) {
uptr internal_sigprocmask(int how, __sanitizer_sigset_t *set,
__sanitizer_sigset_t *oldset) {
-#if SANITIZER_FREEBSD || SANITIZER_OPENBSD
+#if SANITIZER_FREEBSD
return internal_syscall(SYSCALL(sigprocmask), how, set, oldset);
#else
__sanitizer_kernel_sigset_t *k_set = (__sanitizer_kernel_sigset_t *)set;
@@ -1033,7 +1025,7 @@ static uptr GetKernelAreaSize() {
#endif // SANITIZER_WORDSIZE == 32
uptr GetMaxVirtualAddress() {
-#if (SANITIZER_NETBSD || SANITIZER_OPENBSD) && defined(__x86_64__)
+#if SANITIZER_NETBSD && defined(__x86_64__)
return 0x7f7ffffff000ULL; // (0x00007f8000000000 - PAGE_SIZE)
#elif SANITIZER_WORDSIZE == 64
# if defined(__powerpc64__) || defined(__aarch64__)
@@ -1045,6 +1037,8 @@ uptr GetMaxVirtualAddress() {
// This should (does) work for both PowerPC64 Endian modes.
// Similarly, aarch64 has multiple address space layouts: 39, 42 and 47-bit.
return (1ULL << (MostSignificantSetBitIndex(GET_CURRENT_FRAME()) + 1)) - 1;
+#elif SANITIZER_RISCV64
+ return (1ULL << 38) - 1;
# elif defined(__mips64)
return (1ULL << 40) - 1; // 0x000000ffffffffffUL;
# elif defined(__s390x__)
@@ -1094,7 +1088,6 @@ uptr GetPageSize() {
}
#endif // !SANITIZER_ANDROID
-#if !SANITIZER_OPENBSD
uptr ReadBinaryName(/*out*/char *buf, uptr buf_len) {
#if SANITIZER_SOLARIS
const char *default_module_name = getexecname();
@@ -1131,7 +1124,6 @@ uptr ReadBinaryName(/*out*/char *buf, uptr buf_len) {
return module_name_len;
#endif
}
-#endif // !SANITIZER_OPENBSD
uptr ReadLongProcessName(/*out*/ char *buf, uptr buf_len) {
#if SANITIZER_LINUX
@@ -1164,10 +1156,10 @@ bool LibraryNameIs(const char *full_name, const char *base_name) {
// Call cb for each region mapped by map.
void ForEachMappedRegion(link_map *map, void (*cb)(const void *, uptr)) {
CHECK_NE(map, nullptr);
-#if !SANITIZER_FREEBSD && !SANITIZER_OPENBSD
+#if !SANITIZER_FREEBSD
typedef ElfW(Phdr) Elf_Phdr;
typedef ElfW(Ehdr) Elf_Ehdr;
-#endif // !SANITIZER_FREEBSD && !SANITIZER_OPENBSD
+#endif // !SANITIZER_FREEBSD
char *base = (char *)map->l_addr;
Elf_Ehdr *ehdr = (Elf_Ehdr *)base;
char *phdrs = base + ehdr->e_phoff;
@@ -1339,6 +1331,55 @@ uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,
: "memory", "$29" );
return res;
}
+#elif SANITIZER_RISCV64
+uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,
+ int *parent_tidptr, void *newtls, int *child_tidptr) {
+ long long res;
+ if (!fn || !child_stack)
+ return -EINVAL;
+ CHECK_EQ(0, (uptr)child_stack % 16);
+ child_stack = (char *)child_stack - 2 * sizeof(unsigned long long);
+ ((unsigned long long *)child_stack)[0] = (uptr)fn;
+ ((unsigned long long *)child_stack)[1] = (uptr)arg;
+
+ register int (*__fn)(void *) __asm__("a0") = fn;
+ register void *__stack __asm__("a1") = child_stack;
+ register int __flags __asm__("a2") = flags;
+ register void *__arg __asm__("a3") = arg;
+ register int *__ptid __asm__("a4") = parent_tidptr;
+ register void *__tls __asm__("a5") = newtls;
+ register int *__ctid __asm__("a6") = child_tidptr;
+
+ __asm__ __volatile__(
+ "mv a0,a2\n" /* flags */
+ "mv a2,a4\n" /* ptid */
+ "mv a3,a5\n" /* tls */
+ "mv a4,a6\n" /* ctid */
+ "addi a7, zero, %9\n" /* clone */
+
+ "ecall\n"
+
+ /* if (%r0 != 0)
+ * return %r0;
+ */
+ "bnez a0, 1f\n"
+
+ /* In the child, now. Call "fn(arg)". */
+ "ld a0, 8(sp)\n"
+ "ld a1, 16(sp)\n"
+ "jalr a1\n"
+
+ /* Call _exit(%r0). */
+ "addi a7, zero, %10\n"
+ "ecall\n"
+ "1:\n"
+
+ : "=r"(res)
+ : "i"(-EINVAL), "r"(__fn), "r"(__stack), "r"(__flags), "r"(__arg),
+ "r"(__ptid), "r"(__tls), "r"(__ctid), "i"(__NR_clone), "i"(__NR_exit)
+ : "ra", "memory");
+ return res;
+}
#elif defined(__aarch64__)
uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,
int *parent_tidptr, void *newtls, int *child_tidptr) {
@@ -1768,11 +1809,7 @@ static bool Aarch64GetESR(ucontext_t *ucontext, u64 *esr) {
}
#endif
-#if SANITIZER_OPENBSD
-using Context = sigcontext;
-#else
using Context = ucontext_t;
-#endif
SignalContext::WriteFlag SignalContext::GetWriteFlag() const {
Context *ucontext = (Context *)context;
@@ -1782,8 +1819,6 @@ SignalContext::WriteFlag SignalContext::GetWriteFlag() const {
uptr err = ucontext->uc_mcontext.mc_err;
#elif SANITIZER_NETBSD
uptr err = ucontext->uc_mcontext.__gregs[_REG_ERR];
-#elif SANITIZER_OPENBSD
- uptr err = ucontext->sc_err;
#elif SANITIZER_SOLARIS && defined(__i386__)
const int Err = 13;
uptr err = ucontext->uc_mcontext.gregs[Err];
@@ -2009,11 +2044,6 @@ static void GetPcSpBp(void *context, uptr *pc, uptr *sp, uptr *bp) {
*pc = ucontext->uc_mcontext.mc_rip;
*bp = ucontext->uc_mcontext.mc_rbp;
*sp = ucontext->uc_mcontext.mc_rsp;
-#elif SANITIZER_OPENBSD
- sigcontext *ucontext = (sigcontext *)context;
- *pc = ucontext->sc_rip;
- *bp = ucontext->sc_rbp;
- *sp = ucontext->sc_rsp;
# else
ucontext_t *ucontext = (ucontext_t*)context;
*pc = ucontext->uc_mcontext.gregs[REG_RIP];
@@ -2026,11 +2056,6 @@ static void GetPcSpBp(void *context, uptr *pc, uptr *sp, uptr *bp) {
*pc = ucontext->uc_mcontext.mc_eip;
*bp = ucontext->uc_mcontext.mc_ebp;
*sp = ucontext->uc_mcontext.mc_esp;
-#elif SANITIZER_OPENBSD
- sigcontext *ucontext = (sigcontext *)context;
- *pc = ucontext->sc_eip;
- *bp = ucontext->sc_ebp;
- *sp = ucontext->sc_esp;
# else
ucontext_t *ucontext = (ucontext_t*)context;
# if SANITIZER_SOLARIS
@@ -2203,8 +2228,6 @@ void CheckMPROTECT() {
#endif
}
-void PrintModuleMap() { }
-
void CheckNoDeepBind(const char *filename, int flag) {
#ifdef RTLD_DEEPBIND
if (flag & RTLD_DEEPBIND) {
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_linux.h b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_linux.h
index c162d1ca5d28..24902d1b6bce 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_linux.h
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_linux.h
@@ -14,12 +14,11 @@
#include "sanitizer_platform.h"
#if SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD || \
- SANITIZER_OPENBSD || SANITIZER_SOLARIS
+ SANITIZER_SOLARIS
#include "sanitizer_common.h"
#include "sanitizer_internal_defs.h"
#include "sanitizer_platform_limits_freebsd.h"
#include "sanitizer_platform_limits_netbsd.h"
-#include "sanitizer_platform_limits_openbsd.h"
#include "sanitizer_platform_limits_posix.h"
#include "sanitizer_platform_limits_solaris.h"
#include "sanitizer_posix.h"
@@ -60,9 +59,9 @@ uptr internal_prctl(int option, uptr arg2, uptr arg3, uptr arg4, uptr arg5);
// internal_sigaction instead.
int internal_sigaction_norestorer(int signum, const void *act, void *oldact);
void internal_sigdelset(__sanitizer_sigset_t *set, int signum);
-#if defined(__x86_64__) || defined(__mips__) || defined(__aarch64__) \
- || defined(__powerpc64__) || defined(__s390__) || defined(__i386__) \
- || defined(__arm__)
+#if defined(__x86_64__) || defined(__mips__) || defined(__aarch64__) || \
+ defined(__powerpc64__) || defined(__s390__) || defined(__i386__) || \
+ defined(__arm__) || SANITIZER_RISCV64
uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,
int *parent_tidptr, void *newtls, int *child_tidptr);
#endif
@@ -109,7 +108,7 @@ void ForEachMappedRegion(link_map *map, void (*cb)(const void *, uptr));
// Releases memory pages entirely within the [beg, end] address range.
// The pages no longer count toward RSS; reads are guaranteed to return 0.
// Requires (but does not verify!) that pages are MAP_PRIVATE.
-INLINE void ReleaseMemoryPagesToOSAndZeroFill(uptr beg, uptr end) {
+inline void ReleaseMemoryPagesToOSAndZeroFill(uptr beg, uptr end) {
// man madvise on Linux promises zero-fill for anonymous private pages.
// Testing shows the same behaviour for private (but not anonymous) mappings
// of shm_open() files, as long as the underlying file is untouched.
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_linux_libcdep.cpp b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_linux_libcdep.cpp
index 4d17c9686e4e..f20b9001c2c2 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_linux_libcdep.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_linux_libcdep.cpp
@@ -13,8 +13,8 @@
#include "sanitizer_platform.h"
-#if SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD || \
- SANITIZER_OPENBSD || SANITIZER_SOLARIS
+#if SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD || \
+ SANITIZER_SOLARIS
#include "sanitizer_allocator_internal.h"
#include "sanitizer_atomic.h"
@@ -28,6 +28,10 @@
#include "sanitizer_placement_new.h"
#include "sanitizer_procmaps.h"
+#if SANITIZER_NETBSD
+#define _RTLD_SOURCE // for __lwp_gettcb_fast() / __lwp_getprivate_fast()
+#endif
+
#include <dlfcn.h> // for dlsym()
#include <link.h>
#include <pthread.h>
@@ -46,11 +50,6 @@
#define pthread_getattr_np pthread_attr_get_np
#endif
-#if SANITIZER_OPENBSD
-#include <pthread_np.h>
-#include <sys/sysctl.h>
-#endif
-
#if SANITIZER_NETBSD
#include <sys/sysctl.h>
#include <sys/tls.h>
@@ -138,18 +137,13 @@ void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top,
CHECK_EQ(thr_stksegment(&ss), 0);
stacksize = ss.ss_size;
stackaddr = (char *)ss.ss_sp - stacksize;
-#elif SANITIZER_OPENBSD
- stack_t sattr;
- CHECK_EQ(pthread_stackseg_np(pthread_self(), &sattr), 0);
- stackaddr = sattr.ss_sp;
- stacksize = sattr.ss_size;
#else // !SANITIZER_SOLARIS
pthread_attr_t attr;
pthread_attr_init(&attr);
CHECK_EQ(pthread_getattr_np(pthread_self(), &attr), 0);
my_pthread_attr_getstack(&attr, &stackaddr, &stacksize);
pthread_attr_destroy(&attr);
-#endif // SANITIZER_SOLARIS
+#endif // SANITIZER_SOLARIS
*stack_top = (uptr)stackaddr + stacksize;
*stack_bottom = (uptr)stackaddr;
@@ -189,20 +183,19 @@ __attribute__((unused)) static bool GetLibcVersion(int *major, int *minor,
#endif
}
-#if !SANITIZER_FREEBSD && !SANITIZER_ANDROID && !SANITIZER_GO && \
- !SANITIZER_NETBSD && !SANITIZER_OPENBSD && !SANITIZER_SOLARIS
+#if SANITIZER_GLIBC && !SANITIZER_GO
static uptr g_tls_size;
#ifdef __i386__
-# define CHECK_GET_TLS_STATIC_INFO_VERSION (!__GLIBC_PREREQ(2, 27))
+#define CHECK_GET_TLS_STATIC_INFO_VERSION (!__GLIBC_PREREQ(2, 27))
#else
-# define CHECK_GET_TLS_STATIC_INFO_VERSION 0
+#define CHECK_GET_TLS_STATIC_INFO_VERSION 0
#endif
#if CHECK_GET_TLS_STATIC_INFO_VERSION
-# define DL_INTERNAL_FUNCTION __attribute__((regparm(3), stdcall))
+#define DL_INTERNAL_FUNCTION __attribute__((regparm(3), stdcall))
#else
-# define DL_INTERNAL_FUNCTION
+#define DL_INTERNAL_FUNCTION
#endif
namespace {
@@ -262,12 +255,11 @@ void InitTlsSize() {
}
#else
void InitTlsSize() { }
-#endif // !SANITIZER_FREEBSD && !SANITIZER_ANDROID && !SANITIZER_GO &&
- // !SANITIZER_NETBSD && !SANITIZER_SOLARIS
+#endif // SANITIZER_GLIBC && !SANITIZER_GO
-#if (defined(__x86_64__) || defined(__i386__) || defined(__mips__) || \
- defined(__aarch64__) || defined(__powerpc64__) || defined(__s390__) || \
- defined(__arm__)) && \
+#if (defined(__x86_64__) || defined(__i386__) || defined(__mips__) || \
+ defined(__aarch64__) || defined(__powerpc64__) || defined(__s390__) || \
+ defined(__arm__) || SANITIZER_RISCV64) && \
SANITIZER_LINUX && !SANITIZER_ANDROID
// sizeof(struct pthread) from glibc.
static atomic_uintptr_t thread_descriptor_size;
@@ -301,12 +293,29 @@ uptr ThreadDescriptorSize() {
val = FIRST_32_SECOND_64(1168, 2288);
else if (minor <= 14)
val = FIRST_32_SECOND_64(1168, 2304);
- else
+ else if (minor < 32) // Unknown version
val = FIRST_32_SECOND_64(1216, 2304);
+ else // minor == 32
+ val = FIRST_32_SECOND_64(1344, 2496);
}
#elif defined(__mips__)
// TODO(sagarthakur): add more values as per different glibc versions.
val = FIRST_32_SECOND_64(1152, 1776);
+#elif SANITIZER_RISCV64
+ int major;
+ int minor;
+ int patch;
+ if (GetLibcVersion(&major, &minor, &patch) && major == 2) {
+ // TODO: consider adding an optional runtime check for an unknown (untested)
+ // glibc version
+ if (minor <= 28) // WARNING: the highest tested version is 2.29
+ val = 1772; // no guarantees for this one
+ else if (minor <= 31)
+ val = 1772; // tested against glibc 2.29, 2.31
+ else
+ val = 1936; // tested against glibc 2.32
+ }
+
#elif defined(__aarch64__)
// The sizeof (struct pthread) is the same from GLIBC 2.17 to 2.22.
val = 1776;
@@ -327,15 +336,17 @@ uptr ThreadSelfOffset() {
return kThreadSelfOffset;
}
-#if defined(__mips__) || defined(__powerpc64__)
+#if defined(__mips__) || defined(__powerpc64__) || SANITIZER_RISCV64
// TlsPreTcbSize includes size of struct pthread_descr and size of tcb
// head structure. It lies before the static tls blocks.
static uptr TlsPreTcbSize() {
-# if defined(__mips__)
+#if defined(__mips__)
const uptr kTcbHead = 16; // sizeof (tcbhead_t)
-# elif defined(__powerpc64__)
+#elif defined(__powerpc64__)
const uptr kTcbHead = 88; // sizeof (tcbhead_t)
-# endif
+#elif SANITIZER_RISCV64
+ const uptr kTcbHead = 16; // sizeof (tcbhead_t)
+#endif
const uptr kTlsAlign = 16;
const uptr kTlsPreTcbSize =
RoundUpTo(ThreadDescriptorSize() + kTcbHead, kTlsAlign);
@@ -345,11 +356,11 @@ static uptr TlsPreTcbSize() {
uptr ThreadSelf() {
uptr descr_addr;
-# if defined(__i386__)
+#if defined(__i386__)
asm("mov %%gs:%c1,%0" : "=r"(descr_addr) : "i"(kThreadSelfOffset));
-# elif defined(__x86_64__)
+#elif defined(__x86_64__)
asm("mov %%fs:%c1,%0" : "=r"(descr_addr) : "i"(kThreadSelfOffset));
-# elif defined(__mips__)
+#elif defined(__mips__)
// MIPS uses TLS variant I. The thread pointer (in hardware register $29)
// points to the end of the TCB + 0x7000. The pthread_descr structure is
// immediately in front of the TCB. TlsPreTcbSize() includes the size of the
@@ -361,12 +372,16 @@ uptr ThreadSelf() {
rdhwr %0,$29;\
.set pop" : "=r" (thread_pointer));
descr_addr = thread_pointer - kTlsTcbOffset - TlsPreTcbSize();
-# elif defined(__aarch64__) || defined(__arm__)
+#elif defined(__aarch64__) || defined(__arm__)
descr_addr = reinterpret_cast<uptr>(__builtin_thread_pointer()) -
ThreadDescriptorSize();
-# elif defined(__s390__)
+#elif SANITIZER_RISCV64
+ // https://github.com/riscv/riscv-elf-psabi-doc/issues/53
+ uptr thread_pointer = reinterpret_cast<uptr>(__builtin_thread_pointer());
+ descr_addr = thread_pointer - TlsPreTcbSize();
+#elif defined(__s390__)
descr_addr = reinterpret_cast<uptr>(__builtin_thread_pointer());
-# elif defined(__powerpc64__)
+#elif defined(__powerpc64__)
// PPC64LE uses TLS variant I. The thread pointer (in GPR 13)
// points to the end of the TCB + 0x7000. The pthread_descr structure is
// immediately in front of the TCB. TlsPreTcbSize() includes the size of the
@@ -375,9 +390,9 @@ uptr ThreadSelf() {
uptr thread_pointer;
asm("addi %0,13,%1" : "=r"(thread_pointer) : "I"(-kTlsTcbOffset));
descr_addr = thread_pointer - TlsPreTcbSize();
-# else
-# error "unsupported CPU arch"
-# endif
+#else
+#error "unsupported CPU arch"
+#endif
return descr_addr;
}
#endif // (x86_64 || i386 || MIPS) && SANITIZER_LINUX
@@ -385,15 +400,15 @@ uptr ThreadSelf() {
#if SANITIZER_FREEBSD
static void **ThreadSelfSegbase() {
void **segbase = 0;
-# if defined(__i386__)
+#if defined(__i386__)
// sysarch(I386_GET_GSBASE, segbase);
__asm __volatile("mov %%gs:0, %0" : "=r" (segbase));
-# elif defined(__x86_64__)
+#elif defined(__x86_64__)
// sysarch(AMD64_GET_FSBASE, segbase);
__asm __volatile("movq %%fs:0, %0" : "=r" (segbase));
-# else
-# error "unsupported CPU arch"
-# endif
+#else
+#error "unsupported CPU arch"
+#endif
return segbase;
}
@@ -404,7 +419,13 @@ uptr ThreadSelf() {
#if SANITIZER_NETBSD
static struct tls_tcb * ThreadSelfTlsTcb() {
- return (struct tls_tcb *)_lwp_getprivate();
+ struct tls_tcb *tcb = nullptr;
+#ifdef __HAVE___LWP_GETTCB_FAST
+ tcb = (struct tls_tcb *)__lwp_gettcb_fast();
+#elif defined(__HAVE___LWP_GETPRIVATE_FAST)
+ tcb = (struct tls_tcb *)__lwp_getprivate_fast();
+#endif
+ return tcb;
}
uptr ThreadSelf() {
@@ -425,22 +446,40 @@ int GetSizeFromHdr(struct dl_phdr_info *info, size_t size, void *data) {
}
#endif // SANITIZER_NETBSD
+#if SANITIZER_ANDROID
+// Bionic provides this API since S.
+extern "C" SANITIZER_WEAK_ATTRIBUTE void __libc_get_static_tls_bounds(void **,
+ void **);
+#endif
+
#if !SANITIZER_GO
static void GetTls(uptr *addr, uptr *size) {
-#if SANITIZER_LINUX && !SANITIZER_ANDROID
-# if defined(__x86_64__) || defined(__i386__) || defined(__s390__)
+#if SANITIZER_ANDROID
+ if (&__libc_get_static_tls_bounds) {
+ void *start_addr;
+ void *end_addr;
+ __libc_get_static_tls_bounds(&start_addr, &end_addr);
+ *addr = reinterpret_cast<uptr>(start_addr);
+ *size =
+ reinterpret_cast<uptr>(end_addr) - reinterpret_cast<uptr>(start_addr);
+ } else {
+ *addr = 0;
+ *size = 0;
+ }
+#elif SANITIZER_LINUX
+#if defined(__x86_64__) || defined(__i386__) || defined(__s390__)
*addr = ThreadSelf();
*size = GetTlsSize();
*addr -= *size;
*addr += ThreadDescriptorSize();
-# elif defined(__mips__) || defined(__aarch64__) || defined(__powerpc64__) \
- || defined(__arm__)
+#elif defined(__mips__) || defined(__aarch64__) || defined(__powerpc64__) || \
+ defined(__arm__) || SANITIZER_RISCV64
*addr = ThreadSelf();
*size = GetTlsSize();
-# else
+#else
*addr = 0;
*size = 0;
-# endif
+#endif
#elif SANITIZER_FREEBSD
void** segbase = ThreadSelfSegbase();
*addr = 0;
@@ -468,34 +507,32 @@ static void GetTls(uptr *addr, uptr *size) {
*addr = (uptr)tcb->tcb_dtv[1];
}
}
-#elif SANITIZER_OPENBSD
- *addr = 0;
- *size = 0;
-#elif SANITIZER_ANDROID
- *addr = 0;
- *size = 0;
#elif SANITIZER_SOLARIS
// FIXME
*addr = 0;
*size = 0;
#else
-# error "Unknown OS"
+#error "Unknown OS"
#endif
}
#endif
#if !SANITIZER_GO
uptr GetTlsSize() {
-#if SANITIZER_FREEBSD || SANITIZER_ANDROID || SANITIZER_NETBSD || \
- SANITIZER_OPENBSD || SANITIZER_SOLARIS
+#if SANITIZER_FREEBSD || SANITIZER_ANDROID || SANITIZER_NETBSD || \
+ SANITIZER_SOLARIS
uptr addr, size;
GetTls(&addr, &size);
return size;
-#elif defined(__mips__) || defined(__powerpc64__)
+#elif SANITIZER_GLIBC
+#if defined(__mips__) || defined(__powerpc64__) || SANITIZER_RISCV64
return RoundUpTo(g_tls_size + TlsPreTcbSize(), 16);
#else
return g_tls_size;
#endif
+#else
+ return 0;
+#endif
}
#endif
@@ -524,13 +561,13 @@ void GetThreadStackAndTls(bool main, uptr *stk_addr, uptr *stk_size,
#endif
}
-#if !SANITIZER_FREEBSD && !SANITIZER_OPENBSD
+#if !SANITIZER_FREEBSD
typedef ElfW(Phdr) Elf_Phdr;
-#elif SANITIZER_WORDSIZE == 32 && __FreeBSD_version <= 902001 // v9.2
+#elif SANITIZER_WORDSIZE == 32 && __FreeBSD_version <= 902001 // v9.2
#define Elf_Phdr XElf32_Phdr
#define dl_phdr_info xdl_phdr_info
#define dl_iterate_phdr(c, b) xdl_iterate_phdr((c), (b))
-#endif // !SANITIZER_FREEBSD && !SANITIZER_OPENBSD
+#endif // !SANITIZER_FREEBSD
struct DlIteratePhdrData {
InternalMmapVectorNoCtor<LoadedModule> *modules;
@@ -650,7 +687,7 @@ uptr GetRSS() {
// sysconf(_SC_NPROCESSORS_{CONF,ONLN}) cannot be used on most platforms as
// they allocate memory.
u32 GetNumberOfCPUs() {
-#if SANITIZER_FREEBSD || SANITIZER_NETBSD || SANITIZER_OPENBSD
+#if SANITIZER_FREEBSD || SANITIZER_NETBSD
u32 ncpu;
int req[2];
uptr len = sizeof(ncpu);
@@ -705,7 +742,7 @@ u32 GetNumberOfCPUs() {
#if SANITIZER_LINUX
-# if SANITIZER_ANDROID
+#if SANITIZER_ANDROID
static atomic_uint8_t android_log_initialized;
void AndroidLogInit() {
@@ -749,7 +786,7 @@ void SetAbortMessage(const char *str) {
if (&android_set_abort_message)
android_set_abort_message(str);
}
-# else
+#else
void AndroidLogInit() {}
static bool ShouldLogAfterPrintf() { return true; }
@@ -757,7 +794,7 @@ static bool ShouldLogAfterPrintf() { return true; }
void WriteOneLineToSyslog(const char *s) { syslog(LOG_INFO, "%s", s); }
void SetAbortMessage(const char *str) {}
-# endif // SANITIZER_ANDROID
+#endif // SANITIZER_ANDROID
void LogMessageOnPrintf(const char *str) {
if (common_flags()->log_to_syslog && ShouldLogAfterPrintf())
@@ -772,7 +809,7 @@ void LogMessageOnPrintf(const char *str) {
// initialized after the vDSO function pointers, so if it exists, is not null
// and is not empty, we can use clock_gettime.
extern "C" SANITIZER_WEAK_ATTRIBUTE char *__progname;
-INLINE bool CanUseVDSO() {
+inline bool CanUseVDSO() {
// Bionic is safe, it checks for the vDSO function pointers to be initialized.
if (SANITIZER_ANDROID)
return true;
@@ -807,7 +844,6 @@ u64 MonotonicNanoTime() {
}
#endif // SANITIZER_LINUX && !SANITIZER_GO
-#if !SANITIZER_OPENBSD
void ReExec() {
const char *pathname = "/proc/self/exe";
@@ -839,7 +875,48 @@ void ReExec() {
Printf("execve failed, errno %d\n", rverrno);
Die();
}
-#endif // !SANITIZER_OPENBSD
+
+void UnmapFromTo(uptr from, uptr to) {
+ if (to == from)
+ return;
+ CHECK(to >= from);
+ uptr res = internal_munmap(reinterpret_cast<void *>(from), to - from);
+ if (UNLIKELY(internal_iserror(res))) {
+ Report("ERROR: %s failed to unmap 0x%zx (%zd) bytes at address %p\n",
+ SanitizerToolName, to - from, to - from, (void *)from);
+ CHECK("unable to unmap" && 0);
+ }
+}
+
+uptr MapDynamicShadow(uptr shadow_size_bytes, uptr shadow_scale,
+ uptr min_shadow_base_alignment,
+ UNUSED uptr &high_mem_end) {
+ const uptr granularity = GetMmapGranularity();
+ const uptr alignment =
+ Max<uptr>(granularity << shadow_scale, 1ULL << min_shadow_base_alignment);
+ const uptr left_padding =
+ Max<uptr>(granularity, 1ULL << min_shadow_base_alignment);
+
+ const uptr shadow_size = RoundUpTo(shadow_size_bytes, granularity);
+ const uptr map_size = shadow_size + left_padding + alignment;
+
+ const uptr map_start = (uptr)MmapNoAccess(map_size);
+ CHECK_NE(map_start, ~(uptr)0);
+
+ const uptr shadow_start = RoundUpTo(map_start + left_padding, alignment);
+
+ UnmapFromTo(map_start, shadow_start - left_padding);
+ UnmapFromTo(shadow_start + shadow_size, map_start + map_size);
+
+ return shadow_start;
+}
+
+void InitializePlatformCommonFlags(CommonFlags *cf) {
+#if SANITIZER_ANDROID
+ if (&__libc_get_static_tls_bounds == nullptr)
+ cf->detect_leaks = false;
+#endif
+}
} // namespace __sanitizer
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_mac.cpp b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_mac.cpp
index 7a3dfbcc2760..2b53d7d730d7 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_mac.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_mac.cpp
@@ -137,6 +137,10 @@ int internal_mprotect(void *addr, uptr length, int prot) {
return mprotect(addr, length, prot);
}
+int internal_madvise(uptr addr, uptr length, int advice) {
+ return madvise((void *)addr, length, advice);
+}
+
uptr internal_close(fd_t fd) {
return close(fd);
}
@@ -606,21 +610,103 @@ HandleSignalMode GetHandleSignalMode(int signum) {
return result;
}
-// This corresponds to Triple::getMacOSXVersion() in the Clang driver.
-static MacosVersion GetMacosAlignedVersionInternal() {
+// Offset example:
+// XNU 17 -- macOS 10.13 -- iOS 11 -- tvOS 11 -- watchOS 4
+constexpr u16 GetOSMajorKernelOffset() {
+ if (TARGET_OS_OSX) return 4;
+ if (TARGET_OS_IOS || TARGET_OS_TV) return 6;
+ if (TARGET_OS_WATCH) return 13;
+}
+
+using VersStr = char[64];
+
+static uptr ApproximateOSVersionViaKernelVersion(VersStr vers) {
u16 kernel_major = GetDarwinKernelVersion().major;
- // Darwin 0-3 -> unsupported
- // Darwin 4-19 -> macOS 10.x
- // Darwin 20+ -> macOS 11+
- CHECK_GE(kernel_major, 4);
- u16 major, minor;
- if (kernel_major < 20) {
- major = 10;
- minor = kernel_major - 4;
+ u16 offset = GetOSMajorKernelOffset();
+ CHECK_GE(kernel_major, offset);
+ u16 os_major = kernel_major - offset;
+
+ const char *format = "%d.0";
+ if (TARGET_OS_OSX) {
+ if (os_major >= 16) { // macOS 11+
+ os_major -= 5;
+ } else { // macOS 10.15 and below
+ format = "10.%d";
+ }
+ }
+ return internal_snprintf(vers, sizeof(VersStr), format, os_major);
+}
+
+static void GetOSVersion(VersStr vers) {
+ uptr len = sizeof(VersStr);
+ if (SANITIZER_IOSSIM) {
+ const char *vers_env = GetEnv("SIMULATOR_RUNTIME_VERSION");
+ if (!vers_env) {
+ Report("ERROR: Running in simulator but SIMULATOR_RUNTIME_VERSION env "
+ "var is not set.\n");
+ Die();
+ }
+ len = internal_strlcpy(vers, vers_env, len);
} else {
- major = 11 + kernel_major - 20;
- minor = 0;
+ int res =
+ internal_sysctlbyname("kern.osproductversion", vers, &len, nullptr, 0);
+
+ // XNU 17 (macOS 10.13) and below do not provide the sysctl
+ // `kern.osproductversion` entry (res != 0).
+ bool no_os_version = res != 0;
+
+ // For launchd, sanitizer initialization runs before sysctl is setup
+ // (res == 0 && len != strlen(vers), vers is not a valid version). However,
+ // the kernel version `kern.osrelease` is available.
+ bool launchd = (res == 0 && internal_strlen(vers) < 3);
+ if (launchd) CHECK_EQ(internal_getpid(), 1);
+
+ if (no_os_version || launchd) {
+ len = ApproximateOSVersionViaKernelVersion(vers);
+ }
+ }
+ CHECK_LT(len, sizeof(VersStr));
+}
+
+void ParseVersion(const char *vers, u16 *major, u16 *minor) {
+ // Format: <major>.<minor>[.<patch>]\0
+ CHECK_GE(internal_strlen(vers), 3);
+ const char *p = vers;
+ *major = internal_simple_strtoll(p, &p, /*base=*/10);
+ CHECK_EQ(*p, '.');
+ p += 1;
+ *minor = internal_simple_strtoll(p, &p, /*base=*/10);
+}
+
+// Aligned versions example:
+// macOS 10.15 -- iOS 13 -- tvOS 13 -- watchOS 6
+static void MapToMacos(u16 *major, u16 *minor) {
+ if (TARGET_OS_OSX)
+ return;
+
+ if (TARGET_OS_IOS || TARGET_OS_TV)
+ *major += 2;
+ else if (TARGET_OS_WATCH)
+ *major += 9;
+ else
+ UNREACHABLE("unsupported platform");
+
+ if (*major >= 16) { // macOS 11+
+ *major -= 5;
+ } else { // macOS 10.15 and below
+ *minor = *major;
+ *major = 10;
}
+}
+
+static MacosVersion GetMacosAlignedVersionInternal() {
+ VersStr vers = {};
+ GetOSVersion(vers);
+
+ u16 major, minor;
+ ParseVersion(vers, &major, &minor);
+ MapToMacos(&major, &minor);
+
return MacosVersion(major, minor);
}
@@ -639,24 +725,15 @@ MacosVersion GetMacosAlignedVersion() {
return *reinterpret_cast<MacosVersion *>(&result);
}
-void ParseVersion(const char *vers, u16 *major, u16 *minor) {
- // Format: <major>.<minor>.<patch>\0
- CHECK_GE(internal_strlen(vers), 5);
- const char *p = vers;
- *major = internal_simple_strtoll(p, &p, /*base=*/10);
- CHECK_EQ(*p, '.');
- p += 1;
- *minor = internal_simple_strtoll(p, &p, /*base=*/10);
-}
-
DarwinKernelVersion GetDarwinKernelVersion() {
- char buf[100];
- size_t len = sizeof(buf);
- int res = internal_sysctlbyname("kern.osrelease", buf, &len, nullptr, 0);
+ VersStr vers = {};
+ uptr len = sizeof(VersStr);
+ int res = internal_sysctlbyname("kern.osrelease", vers, &len, nullptr, 0);
CHECK_EQ(res, 0);
+ CHECK_LT(len, sizeof(VersStr));
u16 major, minor;
- ParseVersion(buf, &major, &minor);
+ ParseVersion(vers, &major, &minor);
return DarwinKernelVersion(major, minor);
}
@@ -796,6 +873,19 @@ void SignalContext::InitPcSpBp() {
GetPcSpBp(context, &pc, &sp, &bp);
}
+// ASan/TSan use mmap in a way that creates “deallocation gaps” which triggers
+// EXC_GUARD exceptions on macOS 10.15+ (XNU 19.0+).
+static void DisableMmapExcGuardExceptions() {
+ using task_exc_guard_behavior_t = uint32_t;
+ using task_set_exc_guard_behavior_t =
+ kern_return_t(task_t task, task_exc_guard_behavior_t behavior);
+ auto *set_behavior = (task_set_exc_guard_behavior_t *)dlsym(
+ RTLD_DEFAULT, "task_set_exc_guard_behavior");
+ if (set_behavior == nullptr) return;
+ const task_exc_guard_behavior_t task_exc_guard_none = 0;
+ set_behavior(mach_task_self(), task_exc_guard_none);
+}
+
void InitializePlatformEarly() {
// Only use xnu_fast_mmap when on x86_64 and the kernel supports it.
use_xnu_fast_mmap =
@@ -804,6 +894,8 @@ void InitializePlatformEarly() {
#else
false;
#endif
+ if (GetDarwinKernelVersion() >= DarwinKernelVersion(19, 0))
+ DisableMmapExcGuardExceptions();
}
#if !SANITIZER_GO
@@ -844,20 +936,10 @@ bool ReexecDisabled() {
return false;
}
-extern "C" SANITIZER_WEAK_ATTRIBUTE double dyldVersionNumber;
-static const double kMinDyldVersionWithAutoInterposition = 360.0;
-
-bool DyldNeedsEnvVariable() {
- // Although sanitizer support was added to LLVM on OS X 10.7+, GCC users
- // still may want use them on older systems. On older Darwin platforms, dyld
- // doesn't export dyldVersionNumber symbol and we simply return true.
- if (!&dyldVersionNumber) return true;
+static bool DyldNeedsEnvVariable() {
// If running on OS X 10.11+ or iOS 9.0+, dyld will interpose even if
- // DYLD_INSERT_LIBRARIES is not set. However, checking OS version via
- // GetMacosAlignedVersion() doesn't work for the simulator. Let's instead
- // check `dyldVersionNumber`, which is exported by dyld, against a known
- // version number from the first OS release where this appeared.
- return dyldVersionNumber < kMinDyldVersionWithAutoInterposition;
+ // DYLD_INSERT_LIBRARIES is not set.
+ return GetMacosAlignedVersion() < MacosVersion(10, 11);
}
void MaybeReexec() {
@@ -1003,7 +1085,7 @@ char **GetArgv() {
return *_NSGetArgv();
}
-#if SANITIZER_IOS
+#if SANITIZER_IOS && !SANITIZER_IOSSIM
// The task_vm_info struct is normally provided by the macOS SDK, but we need
// fields only available in 10.12+. Declare the struct manually to be able to
// build against older SDKs.
@@ -1070,6 +1152,53 @@ uptr GetMaxVirtualAddress() {
return GetMaxUserVirtualAddress();
}
+uptr MapDynamicShadow(uptr shadow_size_bytes, uptr shadow_scale,
+ uptr min_shadow_base_alignment, uptr &high_mem_end) {
+ const uptr granularity = GetMmapGranularity();
+ const uptr alignment =
+ Max<uptr>(granularity << shadow_scale, 1ULL << min_shadow_base_alignment);
+ const uptr left_padding =
+ Max<uptr>(granularity, 1ULL << min_shadow_base_alignment);
+
+ uptr space_size = shadow_size_bytes + left_padding;
+
+ uptr largest_gap_found = 0;
+ uptr max_occupied_addr = 0;
+ VReport(2, "FindDynamicShadowStart, space_size = %p\n", space_size);
+ uptr shadow_start =
+ FindAvailableMemoryRange(space_size, alignment, granularity,
+ &largest_gap_found, &max_occupied_addr);
+ // If the shadow doesn't fit, restrict the address space to make it fit.
+ if (shadow_start == 0) {
+ VReport(
+ 2,
+ "Shadow doesn't fit, largest_gap_found = %p, max_occupied_addr = %p\n",
+ largest_gap_found, max_occupied_addr);
+ uptr new_max_vm = RoundDownTo(largest_gap_found << shadow_scale, alignment);
+ if (new_max_vm < max_occupied_addr) {
+ Report("Unable to find a memory range for dynamic shadow.\n");
+ Report(
+ "space_size = %p, largest_gap_found = %p, max_occupied_addr = %p, "
+ "new_max_vm = %p\n",
+ space_size, largest_gap_found, max_occupied_addr, new_max_vm);
+ CHECK(0 && "cannot place shadow");
+ }
+ RestrictMemoryToMaxAddress(new_max_vm);
+ high_mem_end = new_max_vm - 1;
+ space_size = (high_mem_end >> shadow_scale) + left_padding;
+ VReport(2, "FindDynamicShadowStart, space_size = %p\n", space_size);
+ shadow_start = FindAvailableMemoryRange(space_size, alignment, granularity,
+ nullptr, nullptr);
+ if (shadow_start == 0) {
+ Report("Unable to find a memory range after restricting VM.\n");
+ CHECK(0 && "cannot place shadow after restricting vm");
+ }
+ }
+ CHECK_NE((uptr)0, shadow_start);
+ CHECK(IsAligned(shadow_start, alignment));
+ return shadow_start;
+}
+
uptr FindAvailableMemoryRange(uptr size, uptr alignment, uptr left_padding,
uptr *largest_gap_found,
uptr *max_occupied_addr) {
@@ -1190,7 +1319,7 @@ void FormatUUID(char *out, uptr size, const u8 *uuid) {
uuid[12], uuid[13], uuid[14], uuid[15]);
}
-void PrintModuleMap() {
+void DumpProcessMap() {
Printf("Process module map:\n");
MemoryMappingLayout memory_mapping(false);
InternalMmapVector<LoadedModule> modules;
@@ -1223,6 +1352,8 @@ u32 GetNumberOfCPUs() {
return (u32)sysconf(_SC_NPROCESSORS_ONLN);
}
+void InitializePlatformCommonFlags(CommonFlags *cf) {}
+
} // namespace __sanitizer
#endif // SANITIZER_MAC
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_mac.h b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_mac.h
index 90ecff4815c2..023071e4f11d 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_mac.h
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_mac.h
@@ -44,6 +44,7 @@ struct VersionBase {
return major > other.major ||
(major == other.major && minor >= other.minor);
}
+ bool operator<(const VersionType &other) const { return !(*this >= other); }
};
struct MacosVersion : VersionBase<MacosVersion> {
@@ -74,7 +75,7 @@ asm(".desc ___crashreporter_info__, 0x10");
namespace __sanitizer {
static BlockingMutex crashreporter_info_mutex(LINKER_INITIALIZED);
-INLINE void CRAppendCrashLogMessage(const char *msg) {
+inline void CRAppendCrashLogMessage(const char *msg) {
BlockingMutexLock l(&crashreporter_info_mutex);
internal_strlcat(__crashreporter_info_buff__, msg,
sizeof(__crashreporter_info_buff__)); }
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_netbsd.cpp b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_netbsd.cpp
index d9aff51d8ae7..98ac7365da05 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_netbsd.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_netbsd.cpp
@@ -110,6 +110,11 @@ int internal_mprotect(void *addr, uptr length, int prot) {
return _REAL(mprotect, addr, length, prot);
}
+int internal_madvise(uptr addr, uptr length, int advice) {
+ DEFINE__REAL(int, madvise, void *a, uptr b, int c);
+ return _REAL(madvise, (void *)addr, length, advice);
+}
+
uptr internal_close(fd_t fd) {
CHECK(&_sys_close);
return _sys_close(fd);
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_openbsd.cpp b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_openbsd.cpp
index ed2d8edeb7a2..e69de29bb2d1 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_openbsd.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_openbsd.cpp
@@ -1,115 +0,0 @@
-//===-- sanitizer_openbsd.cpp ---------------------------------------------===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-//
-// This file is shared between various sanitizers' runtime libraries and
-// implements Solaris-specific functions.
-//===----------------------------------------------------------------------===//
-
-#include "sanitizer_platform.h"
-#if SANITIZER_OPENBSD
-
-#include <stdio.h>
-
-#include "sanitizer_common.h"
-#include "sanitizer_flags.h"
-#include "sanitizer_internal_defs.h"
-#include "sanitizer_libc.h"
-#include "sanitizer_placement_new.h"
-#include "sanitizer_platform_limits_posix.h"
-#include "sanitizer_procmaps.h"
-
-#include <errno.h>
-#include <fcntl.h>
-#include <limits.h>
-#include <pthread.h>
-#include <sched.h>
-#include <signal.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <sys/mman.h>
-#include <sys/shm.h>
-#include <sys/sysctl.h>
-#include <sys/types.h>
-#include <unistd.h>
-
-extern char **environ;
-
-namespace __sanitizer {
-
-uptr internal_mmap(void *addr, size_t length, int prot, int flags, int fd,
- u64 offset) {
- return (uptr)mmap(addr, length, prot, flags, fd, offset);
-}
-
-uptr internal_munmap(void *addr, uptr length) { return munmap(addr, length); }
-
-int internal_mprotect(void *addr, uptr length, int prot) {
- return mprotect(addr, length, prot);
-}
-
-int internal_sysctlbyname(const char *sname, void *oldp, uptr *oldlenp,
- const void *newp, uptr newlen) {
- Printf("internal_sysctlbyname not implemented for OpenBSD");
- Die();
- return 0;
-}
-
-uptr ReadBinaryName(/*out*/char *buf, uptr buf_len) {
- // On OpenBSD we cannot get the full path
- struct kinfo_proc kp;
- uptr kl;
- const int Mib[4] = {CTL_KERN, KERN_PROC, KERN_PROC_PID, getpid()};
- if (internal_sysctl(Mib, ARRAY_SIZE(Mib), &kp, &kl, NULL, 0) != -1)
- return internal_snprintf(buf,
- (KI_MAXCOMLEN < buf_len ? KI_MAXCOMLEN : buf_len),
- "%s", kp.p_comm);
- return (uptr)0;
-}
-
-static void GetArgsAndEnv(char ***argv, char ***envp) {
- uptr nargv;
- uptr nenv;
- int argvmib[4] = {CTL_KERN, KERN_PROC_ARGS, getpid(), KERN_PROC_ARGV};
- int envmib[4] = {CTL_KERN, KERN_PROC_ARGS, getpid(), KERN_PROC_ENV};
- if (internal_sysctl(argvmib, 4, NULL, &nargv, NULL, 0) == -1) {
- Printf("sysctl KERN_PROC_NARGV failed\n");
- Die();
- }
- if (internal_sysctl(envmib, 4, NULL, &nenv, NULL, 0) == -1) {
- Printf("sysctl KERN_PROC_NENV failed\n");
- Die();
- }
- if (internal_sysctl(argvmib, 4, &argv, &nargv, NULL, 0) == -1) {
- Printf("sysctl KERN_PROC_ARGV failed\n");
- Die();
- }
- if (internal_sysctl(envmib, 4, &envp, &nenv, NULL, 0) == -1) {
- Printf("sysctl KERN_PROC_ENV failed\n");
- Die();
- }
-}
-
-char **GetArgv() {
- char **argv, **envp;
- GetArgsAndEnv(&argv, &envp);
- return argv;
-}
-
-char **GetEnviron() {
- char **argv, **envp;
- GetArgsAndEnv(&argv, &envp);
- return envp;
-}
-
-void ReExec() {
- UNIMPLEMENTED();
-}
-
-} // namespace __sanitizer
-
-#endif // SANITIZER_OPENBSD
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform.h b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform.h
index f0b1e04d1dd6..96c01bad870d 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform.h
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform.h
@@ -13,18 +13,31 @@
#define SANITIZER_PLATFORM_H
#if !defined(__linux__) && !defined(__FreeBSD__) && !defined(__NetBSD__) && \
- !defined(__OpenBSD__) && !defined(__APPLE__) && !defined(_WIN32) && \
+ !defined(__APPLE__) && !defined(_WIN32) && \
!defined(__Fuchsia__) && !defined(__rtems__) && \
!(defined(__sun__) && defined(__svr4__))
# error "This operating system is not supported"
#endif
+// Get __GLIBC__ on a glibc platform. Exclude Android: features.h includes C
+// function declarations into a .S file which doesn't compile.
+// https://crbug.com/1162741
+#if __has_include(<features.h>) && !defined(__ANDROID__)
+#include <features.h>
+#endif
+
#if defined(__linux__)
# define SANITIZER_LINUX 1
#else
# define SANITIZER_LINUX 0
#endif
+#if defined(__GLIBC__)
+# define SANITIZER_GLIBC 1
+#else
+# define SANITIZER_GLIBC 0
+#endif
+
#if defined(__FreeBSD__)
# define SANITIZER_FREEBSD 1
#else
@@ -37,12 +50,6 @@
# define SANITIZER_NETBSD 0
#endif
-#if defined(__OpenBSD__)
-# define SANITIZER_OPENBSD 1
-#else
-# define SANITIZER_OPENBSD 0
-#endif
-
#if defined(__sun__) && defined(__svr4__)
# define SANITIZER_SOLARIS 1
#else
@@ -112,7 +119,7 @@
#define SANITIZER_POSIX \
(SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_MAC || \
- SANITIZER_NETBSD || SANITIZER_OPENBSD || SANITIZER_SOLARIS)
+ SANITIZER_NETBSD || SANITIZER_SOLARIS)
#if __LP64__ || defined(_WIN64)
# define SANITIZER_WORDSIZE 64
@@ -219,6 +226,12 @@
# define SANITIZER_MYRIAD2 0
#endif
+#if defined(__riscv) && (__riscv_xlen == 64)
+#define SANITIZER_RISCV64 1
+#else
+#define SANITIZER_RISCV64 0
+#endif
+
// By default we allow to use SizeClassAllocator64 on 64-bit platform.
// But in some cases (e.g. AArch64's 39-bit address space) SizeClassAllocator64
// does not work well and we need to fallback to SizeClassAllocator32.
@@ -238,7 +251,13 @@
// FIXME: this value should be different on different platforms. Larger values
// will still work but will consume more memory for TwoLevelByteMap.
#if defined(__mips__)
+#if SANITIZER_GO && defined(__mips64)
+#define SANITIZER_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 47)
+#else
# define SANITIZER_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 40)
+#endif
+#elif SANITIZER_RISCV64
+#define SANITIZER_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 38)
#elif defined(__aarch64__)
# if SANITIZER_MAC
// Darwin iOS/ARM64 has a 36-bit VMA, 64GiB VM
@@ -331,7 +350,7 @@
#endif
#if SANITIZER_FREEBSD || SANITIZER_MAC || SANITIZER_NETBSD || \
- SANITIZER_OPENBSD || SANITIZER_SOLARIS
+ SANITIZER_SOLARIS
# define SANITIZER_MADVISE_DONTNEED MADV_FREE
#else
# define SANITIZER_MADVISE_DONTNEED MADV_DONTNEED
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_interceptors.h b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_interceptors.h
index e28bb937ae83..068fc9829e57 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_interceptors.h
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_interceptors.h
@@ -15,133 +15,133 @@
#include "sanitizer_glibc_version.h"
#include "sanitizer_internal_defs.h"
+#include "sanitizer_platform.h"
#if SANITIZER_POSIX
-# define SI_POSIX 1
+#define SI_POSIX 1
#else
-# define SI_POSIX 0
+#define SI_POSIX 0
#endif
#if !SANITIZER_WINDOWS
-# define SI_WINDOWS 0
+#define SI_WINDOWS 0
#else
-# define SI_WINDOWS 1
+#define SI_WINDOWS 1
#endif
#if SI_WINDOWS && SI_POSIX
-# error "Windows is not POSIX!"
+#error "Windows is not POSIX!"
#endif
#if SI_POSIX
-# include "sanitizer_platform_limits_freebsd.h"
-# include "sanitizer_platform_limits_netbsd.h"
-# include "sanitizer_platform_limits_openbsd.h"
-# include "sanitizer_platform_limits_posix.h"
-# include "sanitizer_platform_limits_solaris.h"
+#include "sanitizer_platform_limits_freebsd.h"
+#include "sanitizer_platform_limits_netbsd.h"
+#include "sanitizer_platform_limits_posix.h"
+#include "sanitizer_platform_limits_solaris.h"
#endif
#if SANITIZER_LINUX && !SANITIZER_ANDROID
-# define SI_LINUX_NOT_ANDROID 1
+#define SI_LINUX_NOT_ANDROID 1
#else
-# define SI_LINUX_NOT_ANDROID 0
+#define SI_LINUX_NOT_ANDROID 0
#endif
-#if SANITIZER_ANDROID
-# define SI_ANDROID 1
+#if SANITIZER_GLIBC
+#define SI_GLIBC 1
#else
-# define SI_ANDROID 0
+#define SI_GLIBC 0
#endif
-#if SANITIZER_FREEBSD
-# define SI_FREEBSD 1
+#if SANITIZER_ANDROID
+#define SI_ANDROID 1
#else
-# define SI_FREEBSD 0
+#define SI_ANDROID 0
#endif
-#if SANITIZER_NETBSD
-# define SI_NETBSD 1
+#if SANITIZER_FREEBSD
+#define SI_FREEBSD 1
#else
-# define SI_NETBSD 0
+#define SI_FREEBSD 0
#endif
-#if SANITIZER_OPENBSD
-#define SI_OPENBSD 1
+#if SANITIZER_NETBSD
+#define SI_NETBSD 1
#else
-#define SI_OPENBSD 0
+#define SI_NETBSD 0
#endif
#if SANITIZER_LINUX
-# define SI_LINUX 1
+#define SI_LINUX 1
#else
-# define SI_LINUX 0
+#define SI_LINUX 0
#endif
#if SANITIZER_MAC
-# define SI_MAC 1
-# define SI_NOT_MAC 0
+#define SI_MAC 1
+#define SI_NOT_MAC 0
#else
-# define SI_MAC 0
-# define SI_NOT_MAC 1
+#define SI_MAC 0
+#define SI_NOT_MAC 1
#endif
#if SANITIZER_IOS
-# define SI_IOS 1
+#define SI_IOS 1
#else
-# define SI_IOS 0
+#define SI_IOS 0
#endif
#if SANITIZER_IOSSIM
-# define SI_IOSSIM 1
+#define SI_IOSSIM 1
#else
-# define SI_IOSSIM 0
+#define SI_IOSSIM 0
#endif
#if SANITIZER_WATCHOS
-# define SI_WATCHOS 1
+#define SI_WATCHOS 1
#else
-# define SI_WATCHOS 0
+#define SI_WATCHOS 0
#endif
#if SANITIZER_TVOS
-# define SI_TVOS 1
+#define SI_TVOS 1
#else
-# define SI_TVOS 0
+#define SI_TVOS 0
#endif
#if SANITIZER_FUCHSIA
-# define SI_NOT_FUCHSIA 0
+#define SI_NOT_FUCHSIA 0
#else
-# define SI_NOT_FUCHSIA 1
+#define SI_NOT_FUCHSIA 1
#endif
#if SANITIZER_RTEMS
-# define SI_NOT_RTEMS 0
+#define SI_NOT_RTEMS 0
#else
-# define SI_NOT_RTEMS 1
+#define SI_NOT_RTEMS 1
#endif
#if SANITIZER_SOLARIS
-# define SI_SOLARIS 1
+#define SI_SOLARIS 1
#else
-# define SI_SOLARIS 0
+#define SI_SOLARIS 0
#endif
#if SANITIZER_SOLARIS32
-# define SI_SOLARIS32 1
+#define SI_SOLARIS32 1
#else
-# define SI_SOLARIS32 0
+#define SI_SOLARIS32 0
#endif
#if SANITIZER_POSIX && !SANITIZER_MAC
-# define SI_POSIX_NOT_MAC 1
+#define SI_POSIX_NOT_MAC 1
#else
-# define SI_POSIX_NOT_MAC 0
+#define SI_POSIX_NOT_MAC 0
#endif
#if SANITIZER_LINUX && !SANITIZER_FREEBSD
-# define SI_LINUX_NOT_FREEBSD 1
-# else
-# define SI_LINUX_NOT_FREEBSD 0
+#define SI_LINUX_NOT_FREEBSD 1
+#else
+#define SI_LINUX_NOT_FREEBSD 0
#endif
#define SANITIZER_INTERCEPT_STRLEN SI_NOT_FUCHSIA
@@ -163,21 +163,20 @@
#define SANITIZER_INTERCEPT_MEMCMP SI_NOT_FUCHSIA
#define SANITIZER_INTERCEPT_BCMP \
SANITIZER_INTERCEPT_MEMCMP && \
- ((SI_POSIX && _GNU_SOURCE) || SI_NETBSD || SI_OPENBSD || SI_FREEBSD)
+ ((SI_POSIX && _GNU_SOURCE) || SI_NETBSD || SI_FREEBSD)
#define SANITIZER_INTERCEPT_STRNDUP SI_POSIX
-#define SANITIZER_INTERCEPT___STRNDUP SI_LINUX_NOT_FREEBSD
+#define SANITIZER_INTERCEPT___STRNDUP SI_GLIBC
#if defined(__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__) && \
__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ < 1070
-# define SI_MAC_DEPLOYMENT_BELOW_10_7 1
+#define SI_MAC_DEPLOYMENT_BELOW_10_7 1
#else
-# define SI_MAC_DEPLOYMENT_BELOW_10_7 0
+#define SI_MAC_DEPLOYMENT_BELOW_10_7 0
#endif
// memmem on Darwin doesn't exist on 10.6
// FIXME: enable memmem on Windows.
#define SANITIZER_INTERCEPT_MEMMEM (SI_POSIX && !SI_MAC_DEPLOYMENT_BELOW_10_7)
#define SANITIZER_INTERCEPT_MEMCHR SI_NOT_FUCHSIA
-#define SANITIZER_INTERCEPT_MEMRCHR \
- (SI_FREEBSD || SI_LINUX || SI_NETBSD || SI_OPENBSD)
+#define SANITIZER_INTERCEPT_MEMRCHR (SI_FREEBSD || SI_LINUX || SI_NETBSD)
#define SANITIZER_INTERCEPT_READ SI_POSIX
#define SANITIZER_INTERCEPT_PREAD SI_POSIX
@@ -190,64 +189,60 @@
#define SANITIZER_INTERCEPT_FPUTS SI_POSIX
#define SANITIZER_INTERCEPT_PUTS SI_POSIX
-#define SANITIZER_INTERCEPT_PREAD64 SI_LINUX_NOT_ANDROID || SI_SOLARIS32
-#define SANITIZER_INTERCEPT_PWRITE64 SI_LINUX_NOT_ANDROID || SI_SOLARIS32
+#define SANITIZER_INTERCEPT_PREAD64 (SI_GLIBC || SI_SOLARIS32)
+#define SANITIZER_INTERCEPT_PWRITE64 (SI_GLIBC || SI_SOLARIS32)
#define SANITIZER_INTERCEPT_READV SI_POSIX
#define SANITIZER_INTERCEPT_WRITEV SI_POSIX
#define SANITIZER_INTERCEPT_PREADV \
- (SI_FREEBSD || SI_NETBSD || SI_OPENBSD || SI_LINUX_NOT_ANDROID)
+ (SI_FREEBSD || SI_NETBSD || SI_LINUX_NOT_ANDROID)
#define SANITIZER_INTERCEPT_PWRITEV SI_LINUX_NOT_ANDROID
-#define SANITIZER_INTERCEPT_PREADV64 SI_LINUX_NOT_ANDROID
-#define SANITIZER_INTERCEPT_PWRITEV64 SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_PREADV64 SI_GLIBC
+#define SANITIZER_INTERCEPT_PWRITEV64 SI_GLIBC
-#define SANITIZER_INTERCEPT_PRCTL SI_LINUX
+#define SANITIZER_INTERCEPT_PRCTL SI_LINUX
#define SANITIZER_INTERCEPT_LOCALTIME_AND_FRIENDS SI_POSIX
#define SANITIZER_INTERCEPT_STRPTIME SI_POSIX
#define SANITIZER_INTERCEPT_SCANF SI_POSIX
-#define SANITIZER_INTERCEPT_ISOC99_SCANF SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_ISOC99_SCANF SI_GLIBC
#ifndef SANITIZER_INTERCEPT_PRINTF
-# define SANITIZER_INTERCEPT_PRINTF SI_POSIX
-# define SANITIZER_INTERCEPT_PRINTF_L (SI_FREEBSD || SI_NETBSD)
-# define SANITIZER_INTERCEPT_ISOC99_PRINTF SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_PRINTF SI_POSIX
+#define SANITIZER_INTERCEPT_PRINTF_L (SI_FREEBSD || SI_NETBSD)
+#define SANITIZER_INTERCEPT_ISOC99_PRINTF SI_GLIBC
#endif
#define SANITIZER_INTERCEPT___PRINTF_CHK \
- (SANITIZER_INTERCEPT_PRINTF && SI_LINUX_NOT_ANDROID)
+ (SANITIZER_INTERCEPT_PRINTF && SI_GLIBC)
#define SANITIZER_INTERCEPT_FREXP SI_NOT_FUCHSIA
#define SANITIZER_INTERCEPT_FREXPF_FREXPL SI_POSIX
#define SANITIZER_INTERCEPT_GETPWNAM_AND_FRIENDS SI_POSIX
-#define SANITIZER_INTERCEPT_GETPWNAM_R_AND_FRIENDS \
- (SI_FREEBSD || SI_NETBSD || SI_OPENBSD || SI_MAC || SI_LINUX_NOT_ANDROID || \
- SI_SOLARIS)
-#define SANITIZER_INTERCEPT_GETPWENT \
- (SI_FREEBSD || SI_NETBSD || SI_OPENBSD || SI_MAC || SI_LINUX_NOT_ANDROID || \
- SI_SOLARIS)
-#define SANITIZER_INTERCEPT_FGETGRENT_R \
- (SI_FREEBSD || SI_OPENBSD || SI_LINUX_NOT_ANDROID || SI_SOLARIS)
+#define SANITIZER_INTERCEPT_GETPWNAM_R_AND_FRIENDS \
+ (SI_FREEBSD || SI_NETBSD || SI_MAC || SI_LINUX_NOT_ANDROID || SI_SOLARIS)
+#define SANITIZER_INTERCEPT_GETPWENT \
+ (SI_FREEBSD || SI_NETBSD || SI_MAC || SI_LINUX_NOT_ANDROID || SI_SOLARIS)
+#define SANITIZER_INTERCEPT_FGETGRENT_R (SI_GLIBC || SI_SOLARIS)
#define SANITIZER_INTERCEPT_FGETPWENT SI_LINUX_NOT_ANDROID || SI_SOLARIS
#define SANITIZER_INTERCEPT_GETPWENT_R \
- (SI_FREEBSD || SI_NETBSD || SI_OPENBSD || SI_LINUX_NOT_ANDROID || SI_SOLARIS)
-#define SANITIZER_INTERCEPT_FGETPWENT_R \
- (SI_FREEBSD || SI_OPENBSD || SI_LINUX_NOT_ANDROID || SI_SOLARIS)
+ (SI_FREEBSD || SI_NETBSD || SI_GLIBC || SI_SOLARIS)
+#define SANITIZER_INTERCEPT_FGETPWENT_R (SI_FREEBSD || SI_GLIBC || SI_SOLARIS)
#define SANITIZER_INTERCEPT_SETPWENT \
(SI_MAC || SI_LINUX_NOT_ANDROID || SI_SOLARIS)
#define SANITIZER_INTERCEPT_CLOCK_GETTIME \
- (SI_FREEBSD || SI_NETBSD || SI_OPENBSD || SI_LINUX || SI_SOLARIS)
+ (SI_FREEBSD || SI_NETBSD || SI_LINUX || SI_SOLARIS)
#define SANITIZER_INTERCEPT_CLOCK_GETCPUCLOCKID SI_LINUX
#define SANITIZER_INTERCEPT_GETITIMER SI_POSIX
#define SANITIZER_INTERCEPT_TIME SI_POSIX
-#define SANITIZER_INTERCEPT_GLOB SI_LINUX_NOT_ANDROID || SI_SOLARIS
-#define SANITIZER_INTERCEPT_GLOB64 SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_GLOB (SI_GLIBC || SI_SOLARIS)
+#define SANITIZER_INTERCEPT_GLOB64 SI_GLIBC
#define SANITIZER_INTERCEPT_WAIT SI_POSIX
#define SANITIZER_INTERCEPT_INET SI_POSIX
-#define SANITIZER_INTERCEPT_PTHREAD_GETSCHEDPARAM (SI_POSIX && !SI_OPENBSD)
+#define SANITIZER_INTERCEPT_PTHREAD_GETSCHEDPARAM SI_POSIX
#define SANITIZER_INTERCEPT_GETADDRINFO SI_POSIX
#define SANITIZER_INTERCEPT_GETNAMEINFO SI_POSIX
#define SANITIZER_INTERCEPT_GETSOCKNAME SI_POSIX
@@ -259,12 +254,10 @@
(SI_FREEBSD || SI_LINUX_NOT_ANDROID)
#define SANITIZER_INTERCEPT_GETHOSTBYADDR_R \
(SI_FREEBSD || SI_LINUX_NOT_ANDROID || SI_SOLARIS)
-#define SANITIZER_INTERCEPT_GETHOSTENT_R \
- (SI_FREEBSD || SI_LINUX_NOT_ANDROID || SI_SOLARIS)
+#define SANITIZER_INTERCEPT_GETHOSTENT_R (SI_FREEBSD || SI_GLIBC || SI_SOLARIS)
#define SANITIZER_INTERCEPT_GETSOCKOPT SI_POSIX
#define SANITIZER_INTERCEPT_ACCEPT SI_POSIX
-#define SANITIZER_INTERCEPT_ACCEPT4 \
- (SI_LINUX_NOT_ANDROID || SI_NETBSD || SI_OPENBSD)
+#define SANITIZER_INTERCEPT_ACCEPT4 (SI_LINUX_NOT_ANDROID || SI_NETBSD)
#define SANITIZER_INTERCEPT_PACCEPT SI_NETBSD
#define SANITIZER_INTERCEPT_MODF SI_POSIX
#define SANITIZER_INTERCEPT_RECVMSG SI_POSIX
@@ -278,10 +271,10 @@
#define SANITIZER_INTERCEPT_SYSINFO SI_LINUX
#define SANITIZER_INTERCEPT_READDIR SI_POSIX
#define SANITIZER_INTERCEPT_READDIR64 SI_LINUX_NOT_ANDROID || SI_SOLARIS32
-#if SI_LINUX_NOT_ANDROID && \
- (defined(__i386) || defined(__x86_64) || defined(__mips64) || \
- defined(__powerpc64__) || defined(__aarch64__) || defined(__arm__) || \
- defined(__s390__))
+#if SI_LINUX_NOT_ANDROID && \
+ (defined(__i386) || defined(__x86_64) || defined(__mips64) || \
+ defined(__powerpc64__) || defined(__aarch64__) || defined(__arm__) || \
+ defined(__s390__) || SANITIZER_RISCV64)
#define SANITIZER_INTERCEPT_PTRACE 1
#else
#define SANITIZER_INTERCEPT_PTRACE 0
@@ -298,46 +291,42 @@
#define SANITIZER_INTERCEPT___STRXFRM_L SI_LINUX
#define SANITIZER_INTERCEPT_WCSXFRM SI_POSIX
#define SANITIZER_INTERCEPT___WCSXFRM_L SI_LINUX
-#define SANITIZER_INTERCEPT_WCSNRTOMBS \
- (SI_FREEBSD || SI_NETBSD || SI_OPENBSD || SI_MAC || SI_LINUX_NOT_ANDROID || \
- SI_SOLARIS)
-#define SANITIZER_INTERCEPT_WCRTOMB \
- (SI_FREEBSD || SI_NETBSD || SI_OPENBSD || SI_MAC || SI_LINUX_NOT_ANDROID || \
- SI_SOLARIS)
-#define SANITIZER_INTERCEPT_WCTOMB \
- (SI_FREEBSD || SI_NETBSD || SI_OPENBSD || SI_MAC || SI_LINUX_NOT_ANDROID || \
- SI_SOLARIS)
+#define SANITIZER_INTERCEPT_WCSNRTOMBS \
+ (SI_FREEBSD || SI_NETBSD || SI_MAC || SI_LINUX_NOT_ANDROID || SI_SOLARIS)
+#define SANITIZER_INTERCEPT_WCRTOMB \
+ (SI_FREEBSD || SI_NETBSD || SI_MAC || SI_LINUX_NOT_ANDROID || SI_SOLARIS)
+#define SANITIZER_INTERCEPT_WCTOMB \
+ (SI_FREEBSD || SI_NETBSD || SI_MAC || SI_LINUX_NOT_ANDROID || SI_SOLARIS)
#define SANITIZER_INTERCEPT_TCGETATTR SI_LINUX_NOT_ANDROID || SI_SOLARIS
#define SANITIZER_INTERCEPT_REALPATH SI_POSIX
-#define SANITIZER_INTERCEPT_CANONICALIZE_FILE_NAME \
- (SI_LINUX_NOT_ANDROID || SI_SOLARIS)
-#define SANITIZER_INTERCEPT_CONFSTR \
- (SI_FREEBSD || SI_NETBSD || SI_OPENBSD || SI_MAC || SI_LINUX_NOT_ANDROID || \
- SI_SOLARIS)
+#define SANITIZER_INTERCEPT_CANONICALIZE_FILE_NAME (SI_GLIBC || SI_SOLARIS)
+#define SANITIZER_INTERCEPT_CONFSTR \
+ (SI_FREEBSD || SI_NETBSD || SI_MAC || SI_LINUX_NOT_ANDROID || SI_SOLARIS)
#define SANITIZER_INTERCEPT_SCHED_GETAFFINITY SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_SCHED_GETPARAM SI_LINUX_NOT_ANDROID || SI_SOLARIS
#define SANITIZER_INTERCEPT_STRERROR SI_POSIX
#define SANITIZER_INTERCEPT_STRERROR_R SI_POSIX
#define SANITIZER_INTERCEPT_XPG_STRERROR_R SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_SCANDIR \
- (SI_FREEBSD || SI_NETBSD || SI_OPENBSD || SI_LINUX_NOT_ANDROID || SI_SOLARIS)
+ (SI_FREEBSD || SI_NETBSD || SI_LINUX_NOT_ANDROID || SI_SOLARIS)
#define SANITIZER_INTERCEPT_SCANDIR64 SI_LINUX_NOT_ANDROID || SI_SOLARIS32
#define SANITIZER_INTERCEPT_GETGROUPS SI_POSIX
#define SANITIZER_INTERCEPT_POLL SI_POSIX
#define SANITIZER_INTERCEPT_PPOLL SI_LINUX_NOT_ANDROID || SI_SOLARIS
-#define SANITIZER_INTERCEPT_WORDEXP \
+#define SANITIZER_INTERCEPT_WORDEXP \
(SI_FREEBSD || SI_NETBSD || (SI_MAC && !SI_IOS) || SI_LINUX_NOT_ANDROID || \
- SI_SOLARIS)
+ SI_SOLARIS) // NOLINT
#define SANITIZER_INTERCEPT_SIGWAIT SI_POSIX
#define SANITIZER_INTERCEPT_SIGWAITINFO SI_LINUX_NOT_ANDROID || SI_SOLARIS
#define SANITIZER_INTERCEPT_SIGTIMEDWAIT SI_LINUX_NOT_ANDROID || SI_SOLARIS
#define SANITIZER_INTERCEPT_SIGSETOPS \
(SI_FREEBSD || SI_NETBSD || SI_MAC || SI_LINUX_NOT_ANDROID || SI_SOLARIS)
+#define SANITIZER_INTERCEPT_SIGSET_LOGICOPS SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_SIGPENDING SI_POSIX
#define SANITIZER_INTERCEPT_SIGPROCMASK SI_POSIX
#define SANITIZER_INTERCEPT_PTHREAD_SIGMASK SI_POSIX
#define SANITIZER_INTERCEPT_BACKTRACE \
- (SI_FREEBSD || SI_NETBSD || SI_OPENBSD || SI_LINUX_NOT_ANDROID || SI_SOLARIS)
+ (SI_FREEBSD || SI_NETBSD || SI_GLIBC || SI_SOLARIS)
#define SANITIZER_INTERCEPT_GETMNTENT SI_LINUX
#define SANITIZER_INTERCEPT_GETMNTENT_R SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_STATFS \
@@ -345,25 +334,25 @@
#define SANITIZER_INTERCEPT_STATFS64 \
(((SI_MAC && !TARGET_CPU_ARM64) && !SI_IOS) || SI_LINUX_NOT_ANDROID)
#define SANITIZER_INTERCEPT_STATVFS \
- (SI_FREEBSD || SI_NETBSD || SI_OPENBSD || SI_LINUX_NOT_ANDROID)
+ (SI_FREEBSD || SI_NETBSD || SI_LINUX_NOT_ANDROID)
#define SANITIZER_INTERCEPT_STATVFS64 SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_INITGROUPS SI_POSIX
-#define SANITIZER_INTERCEPT_ETHER_NTOA_ATON (SI_POSIX && !SI_OPENBSD)
+#define SANITIZER_INTERCEPT_ETHER_NTOA_ATON SI_POSIX
#define SANITIZER_INTERCEPT_ETHER_HOST \
(SI_FREEBSD || SI_MAC || SI_LINUX_NOT_ANDROID)
#define SANITIZER_INTERCEPT_ETHER_R (SI_FREEBSD || SI_LINUX_NOT_ANDROID)
#define SANITIZER_INTERCEPT_SHMCTL \
(((SI_FREEBSD || SI_LINUX_NOT_ANDROID) && SANITIZER_WORDSIZE == 64) || \
- SI_NETBSD || SI_OPENBSD || SI_SOLARIS) // NOLINT
-#define SANITIZER_INTERCEPT_RANDOM_R SI_LINUX_NOT_ANDROID
+ SI_NETBSD || SI_SOLARIS) // NOLINT
+#define SANITIZER_INTERCEPT_RANDOM_R SI_GLIBC
#define SANITIZER_INTERCEPT_PTHREAD_ATTR_GET SI_POSIX
#define SANITIZER_INTERCEPT_PTHREAD_ATTR_GETINHERITSCHED \
(SI_FREEBSD || SI_NETBSD || SI_MAC || SI_LINUX_NOT_ANDROID || SI_SOLARIS)
-#define SANITIZER_INTERCEPT_PTHREAD_ATTR_GETAFFINITY_NP SI_LINUX_NOT_ANDROID
-#define SANITIZER_INTERCEPT_PTHREAD_ATTR_GET_SCHED (SI_POSIX && !SI_OPENBSD)
+#define SANITIZER_INTERCEPT_PTHREAD_ATTR_GETAFFINITY_NP SI_GLIBC
+#define SANITIZER_INTERCEPT_PTHREAD_ATTR_GET_SCHED SI_POSIX
#define SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GETPSHARED \
- (SI_POSIX && !SI_NETBSD && !SI_OPENBSD)
-#define SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GETTYPE (SI_POSIX && !SI_OPENBSD)
+ (SI_POSIX && !SI_NETBSD)
+#define SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GETTYPE SI_POSIX
#define SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GETPROTOCOL \
(SI_MAC || SI_NETBSD || SI_LINUX_NOT_ANDROID || SI_SOLARIS)
#define SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GETPRIOCEILING \
@@ -372,17 +361,18 @@
(SI_LINUX_NOT_ANDROID || SI_SOLARIS)
#define SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GETROBUST_NP SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_PTHREAD_RWLOCKATTR_GETPSHARED \
- (SI_POSIX && !SI_NETBSD && !SI_OPENBSD)
-#define SANITIZER_INTERCEPT_PTHREAD_RWLOCKATTR_GETKIND_NP SI_LINUX_NOT_ANDROID
-#define SANITIZER_INTERCEPT_PTHREAD_CONDATTR_GETPSHARED \
- (SI_POSIX && !SI_NETBSD && !SI_OPENBSD)
+ (SI_POSIX && !SI_NETBSD)
+#define SANITIZER_INTERCEPT_PTHREAD_RWLOCKATTR_GETKIND_NP SI_GLIBC
+#define SANITIZER_INTERCEPT_PTHREAD_CONDATTR_GETPSHARED (SI_POSIX && !SI_NETBSD)
#define SANITIZER_INTERCEPT_PTHREAD_CONDATTR_GETCLOCK \
(SI_LINUX_NOT_ANDROID || SI_SOLARIS)
#define SANITIZER_INTERCEPT_PTHREAD_BARRIERATTR_GETPSHARED \
- (SI_LINUX_NOT_ANDROID && !SI_NETBSD && !SI_OPENBSD)
+ (SI_LINUX_NOT_ANDROID && !SI_NETBSD)
#define SANITIZER_INTERCEPT_THR_EXIT SI_FREEBSD
#define SANITIZER_INTERCEPT_TMPNAM SI_POSIX
-#define SANITIZER_INTERCEPT_TMPNAM_R SI_LINUX_NOT_ANDROID || SI_SOLARIS
+#define SANITIZER_INTERCEPT_TMPNAM_R (SI_GLIBC || SI_SOLARIS)
+#define SANITIZER_INTERCEPT_PTSNAME SI_LINUX
+#define SANITIZER_INTERCEPT_PTSNAME_R SI_LINUX
#define SANITIZER_INTERCEPT_TTYNAME SI_POSIX
#define SANITIZER_INTERCEPT_TTYNAME_R SI_POSIX
#define SANITIZER_INTERCEPT_TEMPNAM SI_POSIX
@@ -393,71 +383,67 @@
#define SANITIZER_INTERCEPT_LGAMMAL (SI_POSIX && !SI_NETBSD)
#define SANITIZER_INTERCEPT_LGAMMA_R (SI_FREEBSD || SI_LINUX || SI_SOLARIS)
#define SANITIZER_INTERCEPT_LGAMMAL_R SI_LINUX_NOT_ANDROID || SI_SOLARIS
-#define SANITIZER_INTERCEPT_DRAND48_R SI_LINUX_NOT_ANDROID
-#define SANITIZER_INTERCEPT_RAND_R \
- (SI_FREEBSD || SI_NETBSD || SI_OPENBSD || SI_MAC || SI_LINUX_NOT_ANDROID || \
- SI_SOLARIS)
+#define SANITIZER_INTERCEPT_DRAND48_R SI_GLIBC
+#define SANITIZER_INTERCEPT_RAND_R \
+ (SI_FREEBSD || SI_NETBSD || SI_MAC || SI_LINUX_NOT_ANDROID || SI_SOLARIS)
#define SANITIZER_INTERCEPT_ICONV \
- (SI_FREEBSD || SI_NETBSD || SI_OPENBSD || SI_LINUX_NOT_ANDROID || SI_SOLARIS)
+ (SI_FREEBSD || SI_NETBSD || SI_LINUX_NOT_ANDROID || SI_SOLARIS)
#define SANITIZER_INTERCEPT_TIMES SI_POSIX
// FIXME: getline seems to be available on OSX 10.7
#define SANITIZER_INTERCEPT_GETLINE \
- (SI_FREEBSD || SI_NETBSD || SI_OPENBSD || SI_LINUX_NOT_ANDROID || SI_SOLARIS)
+ (SI_FREEBSD || SI_NETBSD || SI_LINUX_NOT_ANDROID || SI_SOLARIS)
#define SANITIZER_INTERCEPT__EXIT \
- (SI_LINUX || SI_FREEBSD || SI_NETBSD || SI_OPENBSD || SI_MAC || SI_SOLARIS)
+ (SI_LINUX || SI_FREEBSD || SI_NETBSD || SI_MAC || SI_SOLARIS)
#define SANITIZER_INTERCEPT_PTHREAD_MUTEX SI_POSIX
-#define SANITIZER_INTERCEPT___PTHREAD_MUTEX SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT___PTHREAD_MUTEX SI_GLIBC
#define SANITIZER_INTERCEPT___LIBC_MUTEX SI_NETBSD
#define SANITIZER_INTERCEPT_PTHREAD_SETNAME_NP \
- (SI_FREEBSD || SI_NETBSD || SI_OPENBSD || SI_LINUX_NOT_ANDROID || SI_SOLARIS)
+ (SI_FREEBSD || SI_NETBSD || SI_GLIBC || SI_SOLARIS)
#define SANITIZER_INTERCEPT_PTHREAD_GETNAME_NP \
- (SI_FREEBSD || SI_NETBSD || SI_LINUX_NOT_ANDROID || SI_SOLARIS)
+ (SI_FREEBSD || SI_NETBSD || SI_GLIBC || SI_SOLARIS)
#define SANITIZER_INTERCEPT_TLS_GET_ADDR \
- (SI_FREEBSD || SI_NETBSD || SI_OPENBSD || SI_LINUX_NOT_ANDROID || SI_SOLARIS)
+ (SI_FREEBSD || SI_NETBSD || SI_LINUX_NOT_ANDROID || SI_SOLARIS)
#define SANITIZER_INTERCEPT_LISTXATTR SI_LINUX
#define SANITIZER_INTERCEPT_GETXATTR SI_LINUX
#define SANITIZER_INTERCEPT_GETRESID SI_LINUX
-#define SANITIZER_INTERCEPT_GETIFADDRS \
- (SI_FREEBSD || SI_NETBSD || SI_OPENBSD || SI_LINUX_NOT_ANDROID || SI_MAC || \
- SI_SOLARIS)
-#define SANITIZER_INTERCEPT_IF_INDEXTONAME \
- (SI_FREEBSD || SI_NETBSD || SI_OPENBSD || SI_LINUX_NOT_ANDROID || SI_MAC || \
- SI_SOLARIS)
+#define SANITIZER_INTERCEPT_GETIFADDRS \
+ (SI_FREEBSD || SI_NETBSD || SI_LINUX_NOT_ANDROID || SI_MAC || SI_SOLARIS)
+#define SANITIZER_INTERCEPT_IF_INDEXTONAME \
+ (SI_FREEBSD || SI_NETBSD || SI_LINUX_NOT_ANDROID || SI_MAC || SI_SOLARIS)
#define SANITIZER_INTERCEPT_CAPGET SI_LINUX_NOT_ANDROID
#if SI_LINUX && defined(__arm__)
#define SANITIZER_INTERCEPT_AEABI_MEM 1
#else
#define SANITIZER_INTERCEPT_AEABI_MEM 0
#endif
-#define SANITIZER_INTERCEPT___BZERO SI_MAC || SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT___BZERO SI_MAC || SI_GLIBC
#define SANITIZER_INTERCEPT_BZERO SI_LINUX_NOT_ANDROID
-#define SANITIZER_INTERCEPT_FTIME \
- (!SI_FREEBSD && !SI_NETBSD && !SI_OPENBSD && SI_POSIX)
-#define SANITIZER_INTERCEPT_XDR SI_LINUX_NOT_ANDROID || SI_SOLARIS
+#define SANITIZER_INTERCEPT_FTIME (!SI_FREEBSD && !SI_NETBSD && SI_POSIX)
+#define SANITIZER_INTERCEPT_XDR (SI_GLIBC || SI_SOLARIS)
+#define SANITIZER_INTERCEPT_XDRREC SI_GLIBC
#define SANITIZER_INTERCEPT_TSEARCH \
- (SI_LINUX_NOT_ANDROID || SI_MAC || SI_NETBSD || SI_OPENBSD || SI_SOLARIS)
-#define SANITIZER_INTERCEPT_LIBIO_INTERNALS SI_LINUX_NOT_ANDROID
+ (SI_LINUX_NOT_ANDROID || SI_MAC || SI_NETBSD || SI_SOLARIS)
+#define SANITIZER_INTERCEPT_LIBIO_INTERNALS SI_GLIBC
#define SANITIZER_INTERCEPT_FOPEN SI_POSIX
-#define SANITIZER_INTERCEPT_FOPEN64 SI_LINUX_NOT_ANDROID || SI_SOLARIS32
+#define SANITIZER_INTERCEPT_FOPEN64 (SI_GLIBC || SI_SOLARIS32)
#define SANITIZER_INTERCEPT_OPEN_MEMSTREAM \
- (SI_LINUX_NOT_ANDROID || SI_NETBSD || SI_OPENBSD || SI_SOLARIS)
-#define SANITIZER_INTERCEPT_OBSTACK SI_LINUX_NOT_ANDROID
+ (SI_LINUX_NOT_ANDROID || SI_NETBSD || SI_SOLARIS)
+#define SANITIZER_INTERCEPT_OBSTACK SI_GLIBC
#define SANITIZER_INTERCEPT_FFLUSH SI_POSIX
#define SANITIZER_INTERCEPT_FCLOSE SI_POSIX
#ifndef SANITIZER_INTERCEPT_DLOPEN_DLCLOSE
-#define SANITIZER_INTERCEPT_DLOPEN_DLCLOSE \
- (SI_FREEBSD || SI_NETBSD || SI_OPENBSD || SI_LINUX_NOT_ANDROID || SI_MAC || \
- SI_SOLARIS)
+#define SANITIZER_INTERCEPT_DLOPEN_DLCLOSE \
+ (SI_FREEBSD || SI_NETBSD || SI_LINUX_NOT_ANDROID || SI_MAC || SI_SOLARIS)
#endif
#define SANITIZER_INTERCEPT_GETPASS \
- (SI_LINUX_NOT_ANDROID || SI_MAC || SI_NETBSD || SI_OPENBSD)
+ (SI_LINUX_NOT_ANDROID || SI_MAC || SI_NETBSD)
#define SANITIZER_INTERCEPT_TIMERFD SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_MLOCKX SI_POSIX
@@ -465,21 +451,20 @@
#define SANITIZER_INTERCEPT_SEM \
(SI_LINUX || SI_FREEBSD || SI_NETBSD || SI_SOLARIS)
#define SANITIZER_INTERCEPT_PTHREAD_SETCANCEL SI_POSIX
-#define SANITIZER_INTERCEPT_MINCORE \
- (SI_LINUX || SI_NETBSD || SI_OPENBSD || SI_SOLARIS)
+#define SANITIZER_INTERCEPT_MINCORE (SI_LINUX || SI_NETBSD || SI_SOLARIS)
#define SANITIZER_INTERCEPT_PROCESS_VM_READV SI_LINUX
#define SANITIZER_INTERCEPT_CTERMID \
- (SI_LINUX || SI_MAC || SI_FREEBSD || SI_NETBSD || SI_OPENBSD || SI_SOLARIS)
+ (SI_LINUX || SI_MAC || SI_FREEBSD || SI_NETBSD || SI_SOLARIS)
#define SANITIZER_INTERCEPT_CTERMID_R (SI_MAC || SI_FREEBSD || SI_SOLARIS)
#define SANITIZER_INTERCEPTOR_HOOKS \
- (SI_LINUX || SI_MAC || SI_WINDOWS || SI_NETBSD)
+ (SI_LINUX || SI_MAC || SI_WINDOWS || SI_NETBSD || SI_SOLARIS)
#define SANITIZER_INTERCEPT_RECV_RECVFROM SI_POSIX
#define SANITIZER_INTERCEPT_SEND_SENDTO SI_POSIX
#define SANITIZER_INTERCEPT_EVENTFD_READ_WRITE SI_LINUX
#define SANITIZER_INTERCEPT_STAT \
- (SI_FREEBSD || SI_MAC || SI_ANDROID || SI_NETBSD || SI_OPENBSD || SI_SOLARIS)
+ (SI_FREEBSD || SI_MAC || SI_ANDROID || SI_NETBSD || SI_SOLARIS)
#define SANITIZER_INTERCEPT_LSTAT (SI_NETBSD || SI_FREEBSD)
#define SANITIZER_INTERCEPT___XSTAT (!SANITIZER_INTERCEPT_STAT && SI_POSIX)
#define SANITIZER_INTERCEPT___XSTAT64 SI_LINUX_NOT_ANDROID
@@ -492,41 +477,35 @@
(SI_LINUX_NOT_ANDROID || SI_MAC || SI_FREEBSD || SI_NETBSD)
#define SANITIZER_INTERCEPT_GETLOADAVG \
- (SI_LINUX_NOT_ANDROID || SI_MAC || SI_FREEBSD || SI_NETBSD || SI_OPENBSD)
+ (SI_LINUX_NOT_ANDROID || SI_MAC || SI_FREEBSD || SI_NETBSD)
#define SANITIZER_INTERCEPT_MMAP SI_POSIX
#define SANITIZER_INTERCEPT_MMAP64 SI_LINUX_NOT_ANDROID
-#define SANITIZER_INTERCEPT_MALLOPT_AND_MALLINFO \
- (!SI_FREEBSD && !SI_MAC && !SI_NETBSD && !SI_OPENBSD && SI_NOT_FUCHSIA && \
- SI_NOT_RTEMS)
+#define SANITIZER_INTERCEPT_MALLOPT_AND_MALLINFO (SI_GLIBC || SI_ANDROID)
#define SANITIZER_INTERCEPT_MEMALIGN \
- (!SI_FREEBSD && !SI_MAC && !SI_NETBSD && !SI_OPENBSD && SI_NOT_RTEMS)
-#define SANITIZER_INTERCEPT_PVALLOC \
- (!SI_FREEBSD && !SI_MAC && !SI_NETBSD && !SI_OPENBSD && SI_NOT_FUCHSIA && \
- SI_NOT_RTEMS)
-#define SANITIZER_INTERCEPT_CFREE \
- (!SI_FREEBSD && !SI_MAC && !SI_NETBSD && !SI_OPENBSD && SI_NOT_FUCHSIA && \
- SI_NOT_RTEMS)
+ (!SI_FREEBSD && !SI_MAC && !SI_NETBSD && SI_NOT_RTEMS)
+#define SANITIZER_INTERCEPT___LIBC_MEMALIGN SI_GLIBC
+#define SANITIZER_INTERCEPT_PVALLOC (SI_GLIBC || SI_ANDROID)
+#define SANITIZER_INTERCEPT_CFREE SI_GLIBC
#define SANITIZER_INTERCEPT_REALLOCARRAY SI_POSIX
#define SANITIZER_INTERCEPT_ALIGNED_ALLOC (!SI_MAC && SI_NOT_RTEMS)
-#define SANITIZER_INTERCEPT_MALLOC_USABLE_SIZE \
- (!SI_MAC && !SI_OPENBSD && !SI_NETBSD)
+#define SANITIZER_INTERCEPT_MALLOC_USABLE_SIZE (!SI_MAC && !SI_NETBSD)
#define SANITIZER_INTERCEPT_MCHECK_MPROBE SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_WCSCAT SI_POSIX
#define SANITIZER_INTERCEPT_WCSDUP SI_POSIX
#define SANITIZER_INTERCEPT_SIGNAL_AND_SIGACTION (!SI_WINDOWS && SI_NOT_FUCHSIA)
#define SANITIZER_INTERCEPT_BSD_SIGNAL SI_ANDROID
-#define SANITIZER_INTERCEPT_ACCT (SI_NETBSD || SI_OPENBSD || SI_FREEBSD)
+#define SANITIZER_INTERCEPT_ACCT (SI_NETBSD || SI_FREEBSD)
#define SANITIZER_INTERCEPT_USER_FROM_UID SI_NETBSD
#define SANITIZER_INTERCEPT_UID_FROM_USER SI_NETBSD
#define SANITIZER_INTERCEPT_GROUP_FROM_GID SI_NETBSD
#define SANITIZER_INTERCEPT_GID_FROM_GROUP SI_NETBSD
-#define SANITIZER_INTERCEPT_ACCESS (SI_NETBSD || SI_OPENBSD || SI_FREEBSD)
-#define SANITIZER_INTERCEPT_FACCESSAT (SI_NETBSD || SI_OPENBSD || SI_FREEBSD)
-#define SANITIZER_INTERCEPT_GETGROUPLIST (SI_NETBSD || SI_OPENBSD)
-#define SANITIZER_INTERCEPT_STRLCPY \
- (SI_NETBSD || SI_FREEBSD || SI_OPENBSD || SI_MAC || SI_ANDROID)
+#define SANITIZER_INTERCEPT_ACCESS (SI_NETBSD || SI_FREEBSD)
+#define SANITIZER_INTERCEPT_FACCESSAT (SI_NETBSD || SI_FREEBSD)
+#define SANITIZER_INTERCEPT_GETGROUPLIST SI_NETBSD
+#define SANITIZER_INTERCEPT_STRLCPY \
+ (SI_NETBSD || SI_FREEBSD || SI_MAC || SI_ANDROID)
#define SANITIZER_INTERCEPT_NAME_TO_HANDLE_AT SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_OPEN_BY_HANDLE_AT SI_LINUX_NOT_ANDROID
@@ -534,23 +513,23 @@
#define SANITIZER_INTERCEPT_READLINK SI_POSIX
#if defined(__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__) && \
__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ < 101000
-# define SI_MAC_DEPLOYMENT_BELOW_10_10 1
+#define SI_MAC_DEPLOYMENT_BELOW_10_10 1
#else
-# define SI_MAC_DEPLOYMENT_BELOW_10_10 0
+#define SI_MAC_DEPLOYMENT_BELOW_10_10 0
#endif
#define SANITIZER_INTERCEPT_READLINKAT \
(SI_POSIX && !SI_MAC_DEPLOYMENT_BELOW_10_10)
-#define SANITIZER_INTERCEPT_DEVNAME (SI_NETBSD || SI_OPENBSD || SI_FREEBSD)
+#define SANITIZER_INTERCEPT_DEVNAME (SI_NETBSD || SI_FREEBSD)
#define SANITIZER_INTERCEPT_DEVNAME_R (SI_NETBSD || SI_FREEBSD)
#define SANITIZER_INTERCEPT_FGETLN (SI_NETBSD || SI_FREEBSD)
#define SANITIZER_INTERCEPT_STRMODE (SI_NETBSD || SI_FREEBSD)
#define SANITIZER_INTERCEPT_TTYENT SI_NETBSD
#define SANITIZER_INTERCEPT_PROTOENT (SI_NETBSD || SI_LINUX)
-#define SANITIZER_INTERCEPT_PROTOENT_R (SI_LINUX_NOT_ANDROID)
+#define SANITIZER_INTERCEPT_PROTOENT_R SI_GLIBC
#define SANITIZER_INTERCEPT_NETENT SI_NETBSD
-#define SANITIZER_INTERCEPT_SETVBUF (SI_NETBSD || SI_FREEBSD || \
- SI_LINUX || SI_MAC)
+#define SANITIZER_INTERCEPT_SETVBUF \
+ (SI_NETBSD || SI_FREEBSD || SI_LINUX || SI_MAC)
#define SANITIZER_INTERCEPT_GETMNTINFO (SI_NETBSD || SI_FREEBSD || SI_MAC)
#define SANITIZER_INTERCEPT_MI_VECTOR_HASH SI_NETBSD
#define SANITIZER_INTERCEPT_GETVFSSTAT SI_NETBSD
@@ -598,7 +577,7 @@
#define SANITIZER_INTERCEPT_GETENTROPY SI_FREEBSD
#define SANITIZER_INTERCEPT_QSORT \
(SI_POSIX && !SI_IOSSIM && !SI_WATCHOS && !SI_TVOS && !SI_ANDROID)
-#define SANITIZER_INTERCEPT_QSORT_R (SI_LINUX && !SI_ANDROID)
+#define SANITIZER_INTERCEPT_QSORT_R SI_GLIBC
// sigaltstack on i386 macOS cannot be intercepted due to setjmp()
// calling it and assuming that it does not clobber registers.
#define SANITIZER_INTERCEPT_SIGALTSTACK \
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_freebsd.cpp b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_freebsd.cpp
index dcc6c71c07d8..b1c15be58dea 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_freebsd.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_freebsd.cpp
@@ -81,8 +81,6 @@
#include <sys/shm.h>
#undef _KERNEL
-#undef INLINE // to avoid clashes with sanitizers' definitions
-
#undef IOC_DIRMASK
// Include these after system headers to avoid name clashes and ambiguities.
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_netbsd.cpp b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_netbsd.cpp
index 25da334b63f0..c8f2aa5dba4a 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_netbsd.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_netbsd.cpp
@@ -34,6 +34,7 @@
#include <sys/chio.h>
#include <sys/clockctl.h>
#include <sys/cpuio.h>
+#include <sys/dkbad.h>
#include <sys/dkio.h>
#include <sys/drvctlio.h>
#include <sys/dvdio.h>
@@ -83,6 +84,7 @@
#include <sys/resource.h>
#include <sys/sem.h>
+#include <sys/scsiio.h>
#include <sys/sha1.h>
#include <sys/sha2.h>
#include <sys/shm.h>
@@ -139,7 +141,158 @@
#include <dev/ir/irdaio.h>
#include <dev/isa/isvio.h>
#include <dev/isa/wtreg.h>
+#if __has_include(<dev/iscsi/iscsi_ioctl.h>)
#include <dev/iscsi/iscsi_ioctl.h>
+#else
+/* Fallback for MKISCSI=no */
+
+typedef struct {
+ uint32_t status;
+ uint32_t session_id;
+ uint32_t connection_id;
+} iscsi_conn_status_parameters_t;
+
+typedef struct {
+ uint32_t status;
+ uint16_t interface_version;
+ uint16_t major;
+ uint16_t minor;
+ uint8_t version_string[224];
+} iscsi_get_version_parameters_t;
+
+typedef struct {
+ uint32_t status;
+ uint32_t session_id;
+ uint32_t connection_id;
+ struct {
+ unsigned int immediate : 1;
+ } options;
+ uint64_t lun;
+ scsireq_t req; /* from <sys/scsiio.h> */
+} iscsi_iocommand_parameters_t;
+
+typedef enum {
+ ISCSI_AUTH_None = 0,
+ ISCSI_AUTH_CHAP = 1,
+ ISCSI_AUTH_KRB5 = 2,
+ ISCSI_AUTH_SRP = 3
+} iscsi_auth_types_t;
+
+typedef enum {
+ ISCSI_LOGINTYPE_DISCOVERY = 0,
+ ISCSI_LOGINTYPE_NOMAP = 1,
+ ISCSI_LOGINTYPE_MAP = 2
+} iscsi_login_session_type_t;
+
+typedef enum { ISCSI_DIGEST_None = 0, ISCSI_DIGEST_CRC32C = 1 } iscsi_digest_t;
+
+typedef enum {
+ ISCSI_SESSION_TERMINATED = 1,
+ ISCSI_CONNECTION_TERMINATED,
+ ISCSI_RECOVER_CONNECTION,
+ ISCSI_DRIVER_TERMINATING
+} iscsi_event_t;
+
+typedef struct {
+ unsigned int mutual_auth : 1;
+ unsigned int is_secure : 1;
+ unsigned int auth_number : 4;
+ iscsi_auth_types_t auth_type[4];
+} iscsi_auth_info_t;
+
+typedef struct {
+ uint32_t status;
+ int socket;
+ struct {
+ unsigned int HeaderDigest : 1;
+ unsigned int DataDigest : 1;
+ unsigned int MaxConnections : 1;
+ unsigned int DefaultTime2Wait : 1;
+ unsigned int DefaultTime2Retain : 1;
+ unsigned int MaxRecvDataSegmentLength : 1;
+ unsigned int auth_info : 1;
+ unsigned int user_name : 1;
+ unsigned int password : 1;
+ unsigned int target_password : 1;
+ unsigned int TargetName : 1;
+ unsigned int TargetAlias : 1;
+ unsigned int ErrorRecoveryLevel : 1;
+ } is_present;
+ iscsi_auth_info_t auth_info;
+ iscsi_login_session_type_t login_type;
+ iscsi_digest_t HeaderDigest;
+ iscsi_digest_t DataDigest;
+ uint32_t session_id;
+ uint32_t connection_id;
+ uint32_t MaxRecvDataSegmentLength;
+ uint16_t MaxConnections;
+ uint16_t DefaultTime2Wait;
+ uint16_t DefaultTime2Retain;
+ uint16_t ErrorRecoveryLevel;
+ void *user_name;
+ void *password;
+ void *target_password;
+ void *TargetName;
+ void *TargetAlias;
+} iscsi_login_parameters_t;
+
+typedef struct {
+ uint32_t status;
+ uint32_t session_id;
+} iscsi_logout_parameters_t;
+
+typedef struct {
+ uint32_t status;
+ uint32_t event_id;
+} iscsi_register_event_parameters_t;
+
+typedef struct {
+ uint32_t status;
+ uint32_t session_id;
+ uint32_t connection_id;
+} iscsi_remove_parameters_t;
+
+typedef struct {
+ uint32_t status;
+ uint32_t session_id;
+ void *response_buffer;
+ uint32_t response_size;
+ uint32_t response_used;
+ uint32_t response_total;
+ uint8_t key[224];
+} iscsi_send_targets_parameters_t;
+
+typedef struct {
+ uint32_t status;
+ uint8_t InitiatorName[224];
+ uint8_t InitiatorAlias[224];
+ uint8_t ISID[6];
+} iscsi_set_node_name_parameters_t;
+
+typedef struct {
+ uint32_t status;
+ uint32_t event_id;
+ iscsi_event_t event_kind;
+ uint32_t session_id;
+ uint32_t connection_id;
+ uint32_t reason;
+} iscsi_wait_event_parameters_t;
+
+#define ISCSI_GET_VERSION _IOWR(0, 1, iscsi_get_version_parameters_t)
+#define ISCSI_LOGIN _IOWR(0, 2, iscsi_login_parameters_t)
+#define ISCSI_LOGOUT _IOWR(0, 3, iscsi_logout_parameters_t)
+#define ISCSI_ADD_CONNECTION _IOWR(0, 4, iscsi_login_parameters_t)
+#define ISCSI_RESTORE_CONNECTION _IOWR(0, 5, iscsi_login_parameters_t)
+#define ISCSI_REMOVE_CONNECTION _IOWR(0, 6, iscsi_remove_parameters_t)
+#define ISCSI_CONNECTION_STATUS _IOWR(0, 7, iscsi_conn_status_parameters_t)
+#define ISCSI_SEND_TARGETS _IOWR(0, 8, iscsi_send_targets_parameters_t)
+#define ISCSI_SET_NODE_NAME _IOWR(0, 9, iscsi_set_node_name_parameters_t)
+#define ISCSI_IO_COMMAND _IOWR(0, 10, iscsi_iocommand_parameters_t)
+#define ISCSI_REGISTER_EVENT _IOWR(0, 11, iscsi_register_event_parameters_t)
+#define ISCSI_DEREGISTER_EVENT _IOWR(0, 12, iscsi_register_event_parameters_t)
+#define ISCSI_WAIT_EVENT _IOWR(0, 13, iscsi_wait_event_parameters_t)
+#define ISCSI_POLL_EVENT _IOWR(0, 14, iscsi_wait_event_parameters_t)
+#endif
#include <dev/ofw/openfirmio.h>
#include <dev/pci/amrio.h>
#include <dev/pci/mlyreg.h>
@@ -372,7 +525,7 @@ struct urio_command {
#include "sanitizer_platform_limits_netbsd.h"
namespace __sanitizer {
-void *__sanitizer_get_link_map_by_dlopen_handle(void* handle) {
+void *__sanitizer_get_link_map_by_dlopen_handle(void *handle) {
void *p = nullptr;
return internal_dlinfo(handle, RTLD_DI_LINKMAP, &p) == 0 ? p : nullptr;
}
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_netbsd.h b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_netbsd.h
index d80280d9bf8c..9e28dcfef041 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_netbsd.h
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_netbsd.h
@@ -21,8 +21,8 @@
namespace __sanitizer {
void *__sanitizer_get_link_map_by_dlopen_handle(void *handle);
-# define GET_LINK_MAP_BY_DLOPEN_HANDLE(handle) \
- (link_map *)__sanitizer_get_link_map_by_dlopen_handle(handle)
+#define GET_LINK_MAP_BY_DLOPEN_HANDLE(handle) \
+ (link_map *)__sanitizer_get_link_map_by_dlopen_handle(handle)
extern unsigned struct_utsname_sz;
extern unsigned struct_stat_sz;
@@ -1024,12 +1024,10 @@ extern unsigned struct_RF_ProgressInfo_sz;
extern unsigned struct_nvlist_ref_sz;
extern unsigned struct_StringList_sz;
-
// A special value to mark ioctls that are not present on the target platform,
// when it can not be determined without including any system headers.
extern const unsigned IOCTL_NOT_PRESENT;
-
extern unsigned IOCTL_AFM_ADDFMAP;
extern unsigned IOCTL_AFM_DELFMAP;
extern unsigned IOCTL_AFM_CLEANFMAP;
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_openbsd.cpp b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_openbsd.cpp
index 1420ecbfa568..e69de29bb2d1 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_openbsd.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_openbsd.cpp
@@ -1,279 +0,0 @@
-//===-- sanitizer_platform_limits_openbsd.cpp -----------------------------===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-//
-// This file is a part of Sanitizer common code.
-//
-// Sizes and layouts of platform-specific NetBSD data structures.
-//===----------------------------------------------------------------------===//
-
-#include "sanitizer_platform.h"
-
-#if SANITIZER_OPENBSD
-#include <arpa/inet.h>
-#include <dirent.h>
-#include <glob.h>
-#include <grp.h>
-#include <ifaddrs.h>
-#include <limits.h>
-#include <link_elf.h>
-#include <sys/socket.h>
-#include <net/if.h>
-#include <net/ppp_defs.h>
-#include <net/route.h>
-#include <netdb.h>
-#include <netinet/in.h>
-#include <netinet/ip_mroute.h>
-#include <poll.h>
-#include <pthread.h>
-#include <pwd.h>
-#include <semaphore.h>
-#include <signal.h>
-#include <soundcard.h>
-#include <stddef.h>
-#include <stdint.h>
-#include <sys/filio.h>
-#include <sys/ipc.h>
-#include <sys/mman.h>
-#include <sys/mount.h>
-#include <sys/msg.h>
-#include <sys/mtio.h>
-#include <sys/ptrace.h>
-#include <sys/resource.h>
-#include <sys/shm.h>
-#include <sys/signal.h>
-#include <sys/sockio.h>
-#include <sys/stat.h>
-#include <sys/statvfs.h>
-#include <sys/time.h>
-#include <sys/times.h>
-#include <sys/types.h>
-#include <sys/utsname.h>
-#include <term.h>
-#include <time.h>
-#include <utime.h>
-#include <utmp.h>
-#include <wchar.h>
-
-// Include these after system headers to avoid name clashes and ambiguities.
-#include "sanitizer_internal_defs.h"
-#include "sanitizer_platform_limits_openbsd.h"
-
-namespace __sanitizer {
-unsigned struct_utsname_sz = sizeof(struct utsname);
-unsigned struct_stat_sz = sizeof(struct stat);
-unsigned struct_rusage_sz = sizeof(struct rusage);
-unsigned struct_tm_sz = sizeof(struct tm);
-unsigned struct_passwd_sz = sizeof(struct passwd);
-unsigned struct_group_sz = sizeof(struct group);
-unsigned siginfo_t_sz = sizeof(siginfo_t);
-unsigned struct_sigaction_sz = sizeof(struct sigaction);
-unsigned struct_stack_t_sz = sizeof(stack_t);
-unsigned struct_itimerval_sz = sizeof(struct itimerval);
-unsigned pthread_t_sz = sizeof(pthread_t);
-unsigned pthread_mutex_t_sz = sizeof(pthread_mutex_t);
-unsigned pthread_cond_t_sz = sizeof(pthread_cond_t);
-unsigned pid_t_sz = sizeof(pid_t);
-unsigned timeval_sz = sizeof(timeval);
-unsigned uid_t_sz = sizeof(uid_t);
-unsigned gid_t_sz = sizeof(gid_t);
-unsigned mbstate_t_sz = sizeof(mbstate_t);
-unsigned sigset_t_sz = sizeof(sigset_t);
-unsigned struct_timezone_sz = sizeof(struct timezone);
-unsigned struct_tms_sz = sizeof(struct tms);
-unsigned struct_sched_param_sz = sizeof(struct sched_param);
-unsigned struct_sockaddr_sz = sizeof(struct sockaddr);
-unsigned struct_rlimit_sz = sizeof(struct rlimit);
-unsigned struct_timespec_sz = sizeof(struct timespec);
-unsigned struct_utimbuf_sz = sizeof(struct utimbuf);
-unsigned struct_itimerspec_sz = sizeof(struct itimerspec);
-unsigned struct_msqid_ds_sz = sizeof(struct msqid_ds);
-unsigned struct_statvfs_sz = sizeof(struct statvfs);
-
-const uptr sig_ign = (uptr)SIG_IGN;
-const uptr sig_dfl = (uptr)SIG_DFL;
-const uptr sig_err = (uptr)SIG_ERR;
-const uptr sa_siginfo = (uptr)SA_SIGINFO;
-
-int shmctl_ipc_stat = (int)IPC_STAT;
-
-unsigned struct_utmp_sz = sizeof(struct utmp);
-
-int map_fixed = MAP_FIXED;
-
-int af_inet = (int)AF_INET;
-int af_inet6 = (int)AF_INET6;
-
-uptr __sanitizer_in_addr_sz(int af) {
- if (af == AF_INET)
- return sizeof(struct in_addr);
- else if (af == AF_INET6)
- return sizeof(struct in6_addr);
- else
- return 0;
-}
-
-unsigned struct_ElfW_Phdr_sz = sizeof(Elf_Phdr);
-
-int glob_nomatch = GLOB_NOMATCH;
-int glob_altdirfunc = GLOB_ALTDIRFUNC;
-
-unsigned path_max = PATH_MAX;
-
-const int si_SEGV_MAPERR = SEGV_MAPERR;
-const int si_SEGV_ACCERR = SEGV_ACCERR;
-} // namespace __sanitizer
-
-using namespace __sanitizer;
-
-COMPILER_CHECK(sizeof(__sanitizer_pthread_attr_t) >= sizeof(pthread_attr_t));
-
-COMPILER_CHECK(sizeof(socklen_t) == sizeof(unsigned));
-CHECK_TYPE_SIZE(pthread_key_t);
-
-CHECK_TYPE_SIZE(dl_phdr_info);
-CHECK_SIZE_AND_OFFSET(dl_phdr_info, dlpi_addr);
-CHECK_SIZE_AND_OFFSET(dl_phdr_info, dlpi_name);
-CHECK_SIZE_AND_OFFSET(dl_phdr_info, dlpi_phdr);
-CHECK_SIZE_AND_OFFSET(dl_phdr_info, dlpi_phnum);
-
-CHECK_TYPE_SIZE(glob_t);
-CHECK_SIZE_AND_OFFSET(glob_t, gl_pathc);
-CHECK_SIZE_AND_OFFSET(glob_t, gl_pathv);
-CHECK_SIZE_AND_OFFSET(glob_t, gl_offs);
-CHECK_SIZE_AND_OFFSET(glob_t, gl_flags);
-CHECK_SIZE_AND_OFFSET(glob_t, gl_closedir);
-CHECK_SIZE_AND_OFFSET(glob_t, gl_readdir);
-CHECK_SIZE_AND_OFFSET(glob_t, gl_opendir);
-CHECK_SIZE_AND_OFFSET(glob_t, gl_lstat);
-CHECK_SIZE_AND_OFFSET(glob_t, gl_stat);
-
-CHECK_TYPE_SIZE(addrinfo);
-CHECK_SIZE_AND_OFFSET(addrinfo, ai_flags);
-CHECK_SIZE_AND_OFFSET(addrinfo, ai_family);
-CHECK_SIZE_AND_OFFSET(addrinfo, ai_socktype);
-CHECK_SIZE_AND_OFFSET(addrinfo, ai_protocol);
-CHECK_SIZE_AND_OFFSET(addrinfo, ai_addrlen);
-CHECK_SIZE_AND_OFFSET(addrinfo, ai_addr);
-CHECK_SIZE_AND_OFFSET(addrinfo, ai_canonname);
-CHECK_SIZE_AND_OFFSET(addrinfo, ai_next);
-
-CHECK_TYPE_SIZE(hostent);
-CHECK_SIZE_AND_OFFSET(hostent, h_name);
-CHECK_SIZE_AND_OFFSET(hostent, h_aliases);
-CHECK_SIZE_AND_OFFSET(hostent, h_addrtype);
-CHECK_SIZE_AND_OFFSET(hostent, h_length);
-CHECK_SIZE_AND_OFFSET(hostent, h_addr_list);
-
-CHECK_TYPE_SIZE(iovec);
-CHECK_SIZE_AND_OFFSET(iovec, iov_base);
-CHECK_SIZE_AND_OFFSET(iovec, iov_len);
-
-CHECK_TYPE_SIZE(msghdr);
-CHECK_SIZE_AND_OFFSET(msghdr, msg_name);
-CHECK_SIZE_AND_OFFSET(msghdr, msg_namelen);
-CHECK_SIZE_AND_OFFSET(msghdr, msg_iov);
-CHECK_SIZE_AND_OFFSET(msghdr, msg_iovlen);
-CHECK_SIZE_AND_OFFSET(msghdr, msg_control);
-CHECK_SIZE_AND_OFFSET(msghdr, msg_controllen);
-CHECK_SIZE_AND_OFFSET(msghdr, msg_flags);
-
-CHECK_TYPE_SIZE(cmsghdr);
-CHECK_SIZE_AND_OFFSET(cmsghdr, cmsg_len);
-CHECK_SIZE_AND_OFFSET(cmsghdr, cmsg_level);
-CHECK_SIZE_AND_OFFSET(cmsghdr, cmsg_type);
-
-COMPILER_CHECK(sizeof(__sanitizer_dirent) <= sizeof(dirent));
-CHECK_SIZE_AND_OFFSET(dirent, d_fileno);
-CHECK_SIZE_AND_OFFSET(dirent, d_off);
-CHECK_SIZE_AND_OFFSET(dirent, d_reclen);
-
-CHECK_TYPE_SIZE(ifconf);
-CHECK_SIZE_AND_OFFSET(ifconf, ifc_len);
-CHECK_SIZE_AND_OFFSET(ifconf, ifc_ifcu);
-
-CHECK_TYPE_SIZE(pollfd);
-CHECK_SIZE_AND_OFFSET(pollfd, fd);
-CHECK_SIZE_AND_OFFSET(pollfd, events);
-CHECK_SIZE_AND_OFFSET(pollfd, revents);
-
-CHECK_TYPE_SIZE(nfds_t);
-
-CHECK_TYPE_SIZE(sigset_t);
-
-COMPILER_CHECK(sizeof(__sanitizer_sigaction) == sizeof(struct sigaction));
-// Can't write checks for sa_handler and sa_sigaction due to them being
-// preprocessor macros.
-CHECK_STRUCT_SIZE_AND_OFFSET(sigaction, sa_mask);
-
-CHECK_TYPE_SIZE(tm);
-CHECK_SIZE_AND_OFFSET(tm, tm_sec);
-CHECK_SIZE_AND_OFFSET(tm, tm_min);
-CHECK_SIZE_AND_OFFSET(tm, tm_hour);
-CHECK_SIZE_AND_OFFSET(tm, tm_mday);
-CHECK_SIZE_AND_OFFSET(tm, tm_mon);
-CHECK_SIZE_AND_OFFSET(tm, tm_year);
-CHECK_SIZE_AND_OFFSET(tm, tm_wday);
-CHECK_SIZE_AND_OFFSET(tm, tm_yday);
-CHECK_SIZE_AND_OFFSET(tm, tm_isdst);
-CHECK_SIZE_AND_OFFSET(tm, tm_gmtoff);
-CHECK_SIZE_AND_OFFSET(tm, tm_zone);
-
-CHECK_TYPE_SIZE(ipc_perm);
-CHECK_SIZE_AND_OFFSET(ipc_perm, cuid);
-CHECK_SIZE_AND_OFFSET(ipc_perm, cgid);
-CHECK_SIZE_AND_OFFSET(ipc_perm, uid);
-CHECK_SIZE_AND_OFFSET(ipc_perm, gid);
-CHECK_SIZE_AND_OFFSET(ipc_perm, mode);
-CHECK_SIZE_AND_OFFSET(ipc_perm, seq);
-CHECK_SIZE_AND_OFFSET(ipc_perm, key);
-
-CHECK_TYPE_SIZE(shmid_ds);
-CHECK_SIZE_AND_OFFSET(shmid_ds, shm_perm);
-CHECK_SIZE_AND_OFFSET(shmid_ds, shm_segsz);
-CHECK_SIZE_AND_OFFSET(shmid_ds, shm_atime);
-CHECK_SIZE_AND_OFFSET(shmid_ds, __shm_atimensec);
-CHECK_SIZE_AND_OFFSET(shmid_ds, shm_dtime);
-CHECK_SIZE_AND_OFFSET(shmid_ds, __shm_dtimensec);
-CHECK_SIZE_AND_OFFSET(shmid_ds, shm_ctime);
-CHECK_SIZE_AND_OFFSET(shmid_ds, __shm_ctimensec);
-CHECK_SIZE_AND_OFFSET(shmid_ds, shm_cpid);
-CHECK_SIZE_AND_OFFSET(shmid_ds, shm_lpid);
-CHECK_SIZE_AND_OFFSET(shmid_ds, shm_nattch);
-
-CHECK_TYPE_SIZE(clock_t);
-
-CHECK_TYPE_SIZE(ifaddrs);
-CHECK_SIZE_AND_OFFSET(ifaddrs, ifa_next);
-CHECK_SIZE_AND_OFFSET(ifaddrs, ifa_name);
-CHECK_SIZE_AND_OFFSET(ifaddrs, ifa_addr);
-CHECK_SIZE_AND_OFFSET(ifaddrs, ifa_netmask);
-// Compare against the union, because we can't reach into the union in a
-// compliant way.
-#ifdef ifa_dstaddr
-#undef ifa_dstaddr
-#endif
-CHECK_SIZE_AND_OFFSET(ifaddrs, ifa_dstaddr);
-CHECK_SIZE_AND_OFFSET(ifaddrs, ifa_data);
-
-CHECK_TYPE_SIZE(passwd);
-CHECK_SIZE_AND_OFFSET(passwd, pw_name);
-CHECK_SIZE_AND_OFFSET(passwd, pw_passwd);
-CHECK_SIZE_AND_OFFSET(passwd, pw_uid);
-CHECK_SIZE_AND_OFFSET(passwd, pw_gid);
-CHECK_SIZE_AND_OFFSET(passwd, pw_dir);
-CHECK_SIZE_AND_OFFSET(passwd, pw_shell);
-
-CHECK_SIZE_AND_OFFSET(passwd, pw_gecos);
-
-CHECK_TYPE_SIZE(group);
-CHECK_SIZE_AND_OFFSET(group, gr_name);
-CHECK_SIZE_AND_OFFSET(group, gr_passwd);
-CHECK_SIZE_AND_OFFSET(group, gr_gid);
-CHECK_SIZE_AND_OFFSET(group, gr_mem);
-
-#endif // SANITIZER_OPENBSD
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_openbsd.h b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_openbsd.h
index 8a1948723605..e69de29bb2d1 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_openbsd.h
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_openbsd.h
@@ -1,382 +0,0 @@
-//===-- sanitizer_platform_limits_openbsd.h -------------------------------===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-//
-// This file is a part of Sanitizer common code.
-//
-// Sizes and layouts of platform-specific OpenBSD data structures.
-//===----------------------------------------------------------------------===//
-
-#ifndef SANITIZER_PLATFORM_LIMITS_OPENBSD_H
-#define SANITIZER_PLATFORM_LIMITS_OPENBSD_H
-
-#if SANITIZER_OPENBSD
-
-#include "sanitizer_internal_defs.h"
-#include "sanitizer_platform.h"
-
-#define _GET_LINK_MAP_BY_DLOPEN_HANDLE(handle, shift) \
- ((link_map *)((handle) == nullptr ? nullptr : ((char *)(handle) + (shift))))
-
-#if defined(__x86_64__)
-#define GET_LINK_MAP_BY_DLOPEN_HANDLE(handle) \
- _GET_LINK_MAP_BY_DLOPEN_HANDLE(handle, 312)
-#elif defined(__i386__)
-#define GET_LINK_MAP_BY_DLOPEN_HANDLE(handle) \
- _GET_LINK_MAP_BY_DLOPEN_HANDLE(handle, 164)
-#endif
-
-#define RLIMIT_AS RLIMIT_DATA
-
-namespace __sanitizer {
-extern unsigned struct_utsname_sz;
-extern unsigned struct_stat_sz;
-extern unsigned struct_rusage_sz;
-extern unsigned siginfo_t_sz;
-extern unsigned struct_itimerval_sz;
-extern unsigned pthread_t_sz;
-extern unsigned pthread_mutex_t_sz;
-extern unsigned pthread_cond_t_sz;
-extern unsigned pid_t_sz;
-extern unsigned timeval_sz;
-extern unsigned uid_t_sz;
-extern unsigned gid_t_sz;
-extern unsigned mbstate_t_sz;
-extern unsigned struct_timezone_sz;
-extern unsigned struct_tms_sz;
-extern unsigned struct_itimerspec_sz;
-extern unsigned struct_sigevent_sz;
-extern unsigned struct_stack_t_sz;
-extern unsigned struct_statfs_sz;
-extern unsigned struct_sockaddr_sz;
-
-extern unsigned struct_rlimit_sz;
-extern unsigned struct_utimbuf_sz;
-extern unsigned struct_timespec_sz;
-
-struct __sanitizer_iocb {
- u64 aio_offset;
- uptr aio_buf;
- long aio_nbytes;
- u32 aio_fildes;
- u32 aio_lio_opcode;
- long aio_reqprio;
-#if SANITIZER_WORDSIZE == 64
- u8 aio_sigevent[32];
-#else
- u8 aio_sigevent[20];
-#endif
- u32 _state;
- u32 _errno;
- long _retval;
-};
-
-struct __sanitizer___sysctl_args {
- int *name;
- int nlen;
- void *oldval;
- uptr *oldlenp;
- void *newval;
- uptr newlen;
-};
-
-struct __sanitizer_sem_t {
- uptr data[5];
-};
-
-struct __sanitizer_ipc_perm {
- u32 cuid;
- u32 cgid;
- u32 uid;
- u32 gid;
- u32 mode;
- unsigned short seq;
- long key;
-};
-
-struct __sanitizer_shmid_ds {
- __sanitizer_ipc_perm shm_perm;
- int shm_segsz;
- u32 shm_lpid;
- u32 shm_cpid;
- short shm_nattch;
- u64 shm_atime;
- long __shm_atimensec;
- u64 shm_dtime;
- long __shm_dtimensec;
- u64 shm_ctime;
- long __shm_ctimensec;
- void *_shm_internal;
-};
-
-extern unsigned struct_msqid_ds_sz;
-extern unsigned struct_mq_attr_sz;
-extern unsigned struct_timex_sz;
-extern unsigned struct_statvfs_sz;
-
-struct __sanitizer_iovec {
- void *iov_base;
- uptr iov_len;
-};
-
-struct __sanitizer_ifaddrs {
- struct __sanitizer_ifaddrs *ifa_next;
- char *ifa_name;
- unsigned int ifa_flags;
- struct __sanitizer_sockaddr *ifa_addr; // (struct sockaddr *)
- struct __sanitizer_sockaddr *ifa_netmask; // (struct sockaddr *)
- struct __sanitizer_sockaddr *ifa_dstaddr; // (struct sockaddr *)
- void *ifa_data;
-};
-
-typedef unsigned __sanitizer_pthread_key_t;
-
-typedef long long __sanitizer_time_t;
-typedef int __sanitizer_suseconds_t;
-
-struct __sanitizer_timeval {
- __sanitizer_time_t tv_sec;
- __sanitizer_suseconds_t tv_usec;
-};
-
-struct __sanitizer_itimerval {
- struct __sanitizer_timeval it_interval;
- struct __sanitizer_timeval it_value;
-};
-
-struct __sanitizer_passwd {
- char *pw_name;
- char *pw_passwd;
- int pw_uid;
- int pw_gid;
- __sanitizer_time_t pw_change;
- char *pw_class;
- char *pw_gecos;
- char *pw_dir;
- char *pw_shell;
- __sanitizer_time_t pw_expire;
-};
-
-struct __sanitizer_group {
- char *gr_name;
- char *gr_passwd;
- int gr_gid;
- char **gr_mem;
-};
-
-struct __sanitizer_ether_addr {
- u8 octet[6];
-};
-
-struct __sanitizer_tm {
- int tm_sec;
- int tm_min;
- int tm_hour;
- int tm_mday;
- int tm_mon;
- int tm_year;
- int tm_wday;
- int tm_yday;
- int tm_isdst;
- long int tm_gmtoff;
- const char *tm_zone;
-};
-
-struct __sanitizer_msghdr {
- void *msg_name;
- unsigned msg_namelen;
- struct __sanitizer_iovec *msg_iov;
- unsigned msg_iovlen;
- void *msg_control;
- unsigned msg_controllen;
- int msg_flags;
-};
-struct __sanitizer_cmsghdr {
- unsigned cmsg_len;
- int cmsg_level;
- int cmsg_type;
-};
-
-struct __sanitizer_dirent {
- u64 d_fileno;
- u64 d_off;
- u16 d_reclen;
-};
-
-typedef u64 __sanitizer_clock_t;
-typedef u32 __sanitizer_clockid_t;
-
-typedef u32 __sanitizer___kernel_uid_t;
-typedef u32 __sanitizer___kernel_gid_t;
-typedef u64 __sanitizer___kernel_off_t;
-typedef struct {
- u32 fds_bits[8];
-} __sanitizer___kernel_fd_set;
-
-typedef struct {
- unsigned int pta_magic;
- int pta_flags;
- void *pta_private;
-} __sanitizer_pthread_attr_t;
-
-typedef unsigned int __sanitizer_sigset_t;
-
-struct __sanitizer_siginfo {
- // The size is determined by looking at sizeof of real siginfo_t on linux.
- u64 opaque[128 / sizeof(u64)];
-};
-
-using __sanitizer_sighandler_ptr = void (*)(int sig);
-using __sanitizer_sigactionhandler_ptr = void (*)(int sig,
- __sanitizer_siginfo *siginfo,
- void *uctx);
-
-struct __sanitizer_sigaction {
- union {
- __sanitizer_sighandler_ptr handler;
- __sanitizer_sigactionhandler_ptr sigaction;
- };
- __sanitizer_sigset_t sa_mask;
- int sa_flags;
-};
-
-typedef __sanitizer_sigset_t __sanitizer_kernel_sigset_t;
-
-struct __sanitizer_kernel_sigaction_t {
- union {
- void (*handler)(int signo);
- void (*sigaction)(int signo, void *info, void *ctx);
- };
- unsigned long sa_flags;
- void (*sa_restorer)(void);
- __sanitizer_kernel_sigset_t sa_mask;
-};
-
-extern const uptr sig_ign;
-extern const uptr sig_dfl;
-extern const uptr sig_err;
-extern const uptr sa_siginfo;
-
-extern int af_inet;
-extern int af_inet6;
-uptr __sanitizer_in_addr_sz(int af);
-
-struct __sanitizer_dl_phdr_info {
-#if SANITIZER_WORDSIZE == 64
- u64 dlpi_addr;
-#else
- u32 dlpi_addr;
-#endif
- const char *dlpi_name;
- const void *dlpi_phdr;
-#if SANITIZER_WORDSIZE == 64
- u32 dlpi_phnum;
-#else
- u16 dlpi_phnum;
-#endif
-};
-
-extern unsigned struct_ElfW_Phdr_sz;
-
-struct __sanitizer_addrinfo {
- int ai_flags;
- int ai_family;
- int ai_socktype;
- int ai_protocol;
- unsigned ai_addrlen;
- struct __sanitizer_sockaddr *ai_addr;
- char *ai_canonname;
- struct __sanitizer_addrinfo *ai_next;
-};
-
-struct __sanitizer_hostent {
- char *h_name;
- char **h_aliases;
- int h_addrtype;
- int h_length;
- char **h_addr_list;
-};
-
-struct __sanitizer_pollfd {
- int fd;
- short events;
- short revents;
-};
-
-typedef unsigned __sanitizer_nfds_t;
-
-struct __sanitizer_glob_t {
- int gl_pathc;
- int gl_matchc;
- int gl_offs;
- int gl_flags;
- char **gl_pathv;
- void **gl_statv;
- int (*gl_errfunc)(const char *, int);
- void (*gl_closedir)(void *dirp);
- struct dirent *(*gl_readdir)(void *dirp);
- void *(*gl_opendir)(const char *);
- int (*gl_lstat)(const char *, void * /* struct stat* */);
- int (*gl_stat)(const char *, void * /* struct stat* */);
-};
-
-extern int glob_nomatch;
-extern int glob_altdirfunc;
-
-extern unsigned path_max;
-
-typedef char __sanitizer_FILE;
-#define SANITIZER_HAS_STRUCT_FILE 0
-
-extern int shmctl_ipc_stat;
-
-// This simplifies generic code
-#define struct_shminfo_sz -1
-#define struct_shm_info_sz -1
-#define shmctl_shm_stat -1
-#define shmctl_ipc_info -1
-#define shmctl_shm_info -1
-
-extern unsigned struct_utmp_sz;
-extern unsigned struct_utmpx_sz;
-
-extern int map_fixed;
-
-// ioctl arguments
-struct __sanitizer_ifconf {
- int ifc_len;
- union {
- void *ifcu_req;
- } ifc_ifcu;
-};
-
-extern const int si_SEGV_MAPERR;
-extern const int si_SEGV_ACCERR;
-} // namespace __sanitizer
-
-#define CHECK_TYPE_SIZE(TYPE) \
- COMPILER_CHECK(sizeof(__sanitizer_##TYPE) == sizeof(TYPE))
-
-#define CHECK_SIZE_AND_OFFSET(CLASS, MEMBER) \
- COMPILER_CHECK(sizeof(((__sanitizer_##CLASS *)NULL)->MEMBER) == \
- sizeof(((CLASS *)NULL)->MEMBER)); \
- COMPILER_CHECK(offsetof(__sanitizer_##CLASS, MEMBER) == \
- offsetof(CLASS, MEMBER))
-
-// For sigaction, which is a function and struct at the same time,
-// and thus requires explicit "struct" in sizeof() expression.
-#define CHECK_STRUCT_SIZE_AND_OFFSET(CLASS, MEMBER) \
- COMPILER_CHECK(sizeof(((struct __sanitizer_##CLASS *)NULL)->MEMBER) == \
- sizeof(((struct CLASS *)NULL)->MEMBER)); \
- COMPILER_CHECK(offsetof(struct __sanitizer_##CLASS, MEMBER) == \
- offsetof(struct CLASS, MEMBER))
-
-#define SIGACTION_SYMNAME __sigaction14
-
-#endif // SANITIZER_OPENBSD
-
-#endif
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_posix.cpp b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_posix.cpp
index c052aa2bc950..7abaeb880bf3 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_posix.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_posix.cpp
@@ -11,19 +11,20 @@
// Sizes and layouts of platform-specific POSIX data structures.
//===----------------------------------------------------------------------===//
-#include "sanitizer_platform.h"
-
-#if SANITIZER_LINUX || SANITIZER_MAC
+#if defined(__linux__) || defined(__APPLE__)
// Tests in this file assume that off_t-dependent data structures match the
// libc ABI. For example, struct dirent here is what readdir() function (as
// exported from libc) returns, and not the user-facing "dirent", which
// depends on _FILE_OFFSET_BITS setting.
// To get this "true" dirent definition, we undefine _FILE_OFFSET_BITS below.
-#ifdef _FILE_OFFSET_BITS
#undef _FILE_OFFSET_BITS
#endif
// Must go after undef _FILE_OFFSET_BITS.
+#include "sanitizer_platform.h"
+
+#if SANITIZER_LINUX || SANITIZER_MAC
+// Must go after undef _FILE_OFFSET_BITS.
#include "sanitizer_glibc_version.h"
#include <arpa/inet.h>
@@ -37,6 +38,7 @@
#include <pwd.h>
#include <signal.h>
#include <stddef.h>
+#include <stdio.h>
#include <sys/mman.h>
#include <sys/resource.h>
#include <sys/socket.h>
@@ -58,7 +60,6 @@
#endif
#if !SANITIZER_ANDROID
-#include <fstab.h>
#include <sys/mount.h>
#include <sys/timeb.h>
#include <utmpx.h>
@@ -90,7 +91,8 @@
#if SANITIZER_LINUX
# include <utime.h>
# include <sys/ptrace.h>
-# if defined(__mips64) || defined(__aarch64__) || defined(__arm__)
+#if defined(__mips64) || defined(__aarch64__) || defined(__arm__) || \
+ SANITIZER_RISCV64
# include <asm/ptrace.h>
# ifdef __arm__
typedef struct user_fpregs elf_fpregset_t;
@@ -109,20 +111,31 @@ typedef struct user_fpregs elf_fpregset_t;
#include <wordexp.h>
#endif
-#if SANITIZER_LINUX && !SANITIZER_ANDROID
-#include <glob.h>
-#include <obstack.h>
-#include <mqueue.h>
+#if SANITIZER_LINUX
+#if SANITIZER_GLIBC
+#include <fstab.h>
#include <net/if_ppp.h>
#include <netax25/ax25.h>
#include <netipx/ipx.h>
#include <netrom/netrom.h>
+#include <obstack.h>
#if HAVE_RPC_XDR_H
# include <rpc/xdr.h>
#endif
#include <scsi/scsi.h>
-#include <sys/mtio.h>
+#else
+#include <linux/if_ppp.h>
+#include <linux/kd.h>
+#include <linux/ppp_defs.h>
+#endif // SANITIZER_GLIBC
+
+#if SANITIZER_ANDROID
+#include <linux/mtio.h>
+#else
+#include <glob.h>
+#include <mqueue.h>
#include <sys/kd.h>
+#include <sys/mtio.h>
#include <sys/shm.h>
#include <sys/statvfs.h>
#include <sys/timex.h>
@@ -130,7 +143,6 @@ typedef struct user_fpregs elf_fpregset_t;
# include <sys/procfs.h>
#endif
#include <sys/user.h>
-#include <linux/cyclades.h>
#include <linux/if_eql.h>
#include <linux/if_plip.h>
#include <linux/lp.h>
@@ -141,20 +153,14 @@ typedef struct user_fpregs elf_fpregset_t;
#include <sys/msg.h>
#include <sys/ipc.h>
#include <crypt.h>
-#endif // SANITIZER_LINUX && !SANITIZER_ANDROID
+#endif // SANITIZER_ANDROID
-#if SANITIZER_ANDROID
-#include <linux/kd.h>
-#include <linux/mtio.h>
-#include <linux/ppp_defs.h>
-#include <linux/if_ppp.h>
-#endif
-
-#if SANITIZER_LINUX
#include <link.h>
#include <sys/vfs.h>
#include <sys/epoll.h>
#include <linux/capability.h>
+#else
+#include <fstab.h>
#endif // SANITIZER_LINUX
#if SANITIZER_MAC
@@ -201,8 +207,10 @@ namespace __sanitizer {
unsigned struct_statfs64_sz = sizeof(struct statfs64);
#endif // (SANITIZER_MAC && !TARGET_CPU_ARM64) && !SANITIZER_IOS
-#if !SANITIZER_ANDROID
+#if SANITIZER_GLIBC || SANITIZER_FREEBSD || SANITIZER_NETBSD || SANITIZER_MAC
unsigned struct_fstab_sz = sizeof(struct fstab);
+#endif // SANITIZER_GLIBC || SANITIZER_FREEBSD || SANITIZER_NETBSD || SANITIZER_MAC
+#if !SANITIZER_ANDROID
unsigned struct_statfs_sz = sizeof(struct statfs);
unsigned struct_sockaddr_sz = sizeof(struct sockaddr);
unsigned ucontext_t_sz = sizeof(ucontext_t);
@@ -229,9 +237,9 @@ namespace __sanitizer {
#if SANITIZER_LINUX && !SANITIZER_ANDROID
// Use pre-computed size of struct ustat to avoid <sys/ustat.h> which
// has been removed from glibc 2.28.
-#if defined(__aarch64__) || defined(__s390x__) || defined (__mips64) \
- || defined(__powerpc64__) || defined(__arch64__) || defined(__sparcv9) \
- || defined(__x86_64__) || (defined(__riscv) && __riscv_xlen == 64)
+#if defined(__aarch64__) || defined(__s390x__) || defined(__mips64) || \
+ defined(__powerpc64__) || defined(__arch64__) || defined(__sparcv9) || \
+ defined(__x86_64__) || SANITIZER_RISCV64
#define SIZEOF_STRUCT_USTAT 32
#elif defined(__arm__) || defined(__i386__) || defined(__mips__) \
|| defined(__powerpc__) || defined(__s390__) || defined(__sparc__)
@@ -298,18 +306,21 @@ unsigned struct_ElfW_Phdr_sz = sizeof(ElfW(Phdr));
unsigned struct_ElfW_Phdr_sz = sizeof(Elf_Phdr);
#endif
-#if SANITIZER_LINUX && !SANITIZER_ANDROID
+#if SANITIZER_GLIBC
int glob_nomatch = GLOB_NOMATCH;
int glob_altdirfunc = GLOB_ALTDIRFUNC;
#endif
-#if SANITIZER_LINUX && !SANITIZER_ANDROID && \
- (defined(__i386) || defined(__x86_64) || defined(__mips64) || \
- defined(__powerpc64__) || defined(__aarch64__) || defined(__arm__) || \
- defined(__s390__))
+#if SANITIZER_LINUX && !SANITIZER_ANDROID && \
+ (defined(__i386) || defined(__x86_64) || defined(__mips64) || \
+ defined(__powerpc64__) || defined(__aarch64__) || defined(__arm__) || \
+ defined(__s390__) || SANITIZER_RISCV64)
#if defined(__mips64) || defined(__powerpc64__) || defined(__arm__)
unsigned struct_user_regs_struct_sz = sizeof(struct pt_regs);
unsigned struct_user_fpregs_struct_sz = sizeof(elf_fpregset_t);
+#elif SANITIZER_RISCV64
+ unsigned struct_user_regs_struct_sz = sizeof(struct user_regs_struct);
+ unsigned struct_user_fpregs_struct_sz = sizeof(struct __riscv_q_ext_state);
#elif defined(__aarch64__)
unsigned struct_user_regs_struct_sz = sizeof(struct user_pt_regs);
unsigned struct_user_fpregs_struct_sz = sizeof(struct user_fpsimd_state);
@@ -321,7 +332,8 @@ unsigned struct_ElfW_Phdr_sz = sizeof(Elf_Phdr);
unsigned struct_user_fpregs_struct_sz = sizeof(struct user_fpregs_struct);
#endif // __mips64 || __powerpc64__ || __aarch64__
#if defined(__x86_64) || defined(__mips64) || defined(__powerpc64__) || \
- defined(__aarch64__) || defined(__arm__) || defined(__s390__)
+ defined(__aarch64__) || defined(__arm__) || defined(__s390__) || \
+ SANITIZER_RISCV64
unsigned struct_user_fpxregs_struct_sz = 0;
#else
unsigned struct_user_fpxregs_struct_sz = sizeof(struct user_fpxregs_struct);
@@ -417,7 +429,9 @@ unsigned struct_ElfW_Phdr_sz = sizeof(Elf_Phdr);
unsigned struct_input_id_sz = sizeof(struct input_id);
unsigned struct_mtpos_sz = sizeof(struct mtpos);
unsigned struct_rtentry_sz = sizeof(struct rtentry);
+#if SANITIZER_GLIBC || SANITIZER_ANDROID
unsigned struct_termio_sz = sizeof(struct termio);
+#endif
unsigned struct_vt_consize_sz = sizeof(struct vt_consize);
unsigned struct_vt_sizes_sz = sizeof(struct vt_sizes);
unsigned struct_vt_stat_sz = sizeof(struct vt_stat);
@@ -442,9 +456,8 @@ unsigned struct_ElfW_Phdr_sz = sizeof(Elf_Phdr);
unsigned struct_vt_mode_sz = sizeof(struct vt_mode);
#endif // SANITIZER_LINUX
-#if SANITIZER_LINUX && !SANITIZER_ANDROID
+#if SANITIZER_GLIBC
unsigned struct_ax25_parms_struct_sz = sizeof(struct ax25_parms_struct);
- unsigned struct_cyclades_monitor_sz = sizeof(struct cyclades_monitor);
#if EV_VERSION > (0x010000)
unsigned struct_input_keymap_entry_sz = sizeof(struct input_keymap_entry);
#else
@@ -465,12 +478,10 @@ unsigned struct_ElfW_Phdr_sz = sizeof(Elf_Phdr);
unsigned struct_sockaddr_ax25_sz = sizeof(struct sockaddr_ax25);
unsigned struct_unimapdesc_sz = sizeof(struct unimapdesc);
unsigned struct_unimapinit_sz = sizeof(struct unimapinit);
-#endif // SANITIZER_LINUX && !SANITIZER_ANDROID
-#if SANITIZER_LINUX && !SANITIZER_ANDROID
unsigned struct_audio_buf_info_sz = sizeof(struct audio_buf_info);
unsigned struct_ppp_stats_sz = sizeof(struct ppp_stats);
-#endif // (SANITIZER_LINUX || SANITIZER_FREEBSD) && !SANITIZER_ANDROID
+#endif // SANITIZER_GLIBC
#if !SANITIZER_ANDROID && !SANITIZER_MAC
unsigned struct_sioc_sg_req_sz = sizeof(struct sioc_sg_req);
@@ -810,15 +821,6 @@ unsigned struct_ElfW_Phdr_sz = sizeof(Elf_Phdr);
#endif // SANITIZER_LINUX
#if SANITIZER_LINUX && !SANITIZER_ANDROID
- unsigned IOCTL_CYGETDEFTHRESH = CYGETDEFTHRESH;
- unsigned IOCTL_CYGETDEFTIMEOUT = CYGETDEFTIMEOUT;
- unsigned IOCTL_CYGETMON = CYGETMON;
- unsigned IOCTL_CYGETTHRESH = CYGETTHRESH;
- unsigned IOCTL_CYGETTIMEOUT = CYGETTIMEOUT;
- unsigned IOCTL_CYSETDEFTHRESH = CYSETDEFTHRESH;
- unsigned IOCTL_CYSETDEFTIMEOUT = CYSETDEFTIMEOUT;
- unsigned IOCTL_CYSETTHRESH = CYSETTHRESH;
- unsigned IOCTL_CYSETTIMEOUT = CYSETTIMEOUT;
unsigned IOCTL_EQL_EMANCIPATE = EQL_EMANCIPATE;
unsigned IOCTL_EQL_ENSLAVE = EQL_ENSLAVE;
unsigned IOCTL_EQL_GETMASTRCFG = EQL_GETMASTRCFG;
@@ -876,6 +878,7 @@ unsigned struct_ElfW_Phdr_sz = sizeof(Elf_Phdr);
unsigned IOCTL_PIO_UNIMAP = PIO_UNIMAP;
unsigned IOCTL_PIO_UNIMAPCLR = PIO_UNIMAPCLR;
unsigned IOCTL_PIO_UNISCRNMAP = PIO_UNISCRNMAP;
+#if SANITIZER_GLIBC
unsigned IOCTL_SCSI_IOCTL_GET_IDLUN = SCSI_IOCTL_GET_IDLUN;
unsigned IOCTL_SCSI_IOCTL_PROBE_HOST = SCSI_IOCTL_PROBE_HOST;
unsigned IOCTL_SCSI_IOCTL_TAGGED_DISABLE = SCSI_IOCTL_TAGGED_DISABLE;
@@ -894,6 +897,7 @@ unsigned struct_ElfW_Phdr_sz = sizeof(Elf_Phdr);
unsigned IOCTL_SIOCNRGETPARMS = SIOCNRGETPARMS;
unsigned IOCTL_SIOCNRRTCTL = SIOCNRRTCTL;
unsigned IOCTL_SIOCNRSETPARMS = SIOCNRSETPARMS;
+#endif
unsigned IOCTL_TIOCGSERIAL = TIOCGSERIAL;
unsigned IOCTL_TIOCSERGETMULTI = TIOCSERGETMULTI;
unsigned IOCTL_TIOCSERSETMULTI = TIOCSERSETMULTI;
@@ -964,7 +968,7 @@ CHECK_SIZE_AND_OFFSET(dl_phdr_info, dlpi_phdr);
CHECK_SIZE_AND_OFFSET(dl_phdr_info, dlpi_phnum);
#endif // SANITIZER_LINUX || SANITIZER_FREEBSD
-#if (SANITIZER_LINUX || SANITIZER_FREEBSD) && !SANITIZER_ANDROID
+#if SANITIZER_GLIBC || SANITIZER_FREEBSD
CHECK_TYPE_SIZE(glob_t);
CHECK_SIZE_AND_OFFSET(glob_t, gl_pathc);
CHECK_SIZE_AND_OFFSET(glob_t, gl_pathv);
@@ -975,7 +979,7 @@ CHECK_SIZE_AND_OFFSET(glob_t, gl_readdir);
CHECK_SIZE_AND_OFFSET(glob_t, gl_opendir);
CHECK_SIZE_AND_OFFSET(glob_t, gl_lstat);
CHECK_SIZE_AND_OFFSET(glob_t, gl_stat);
-#endif
+#endif // SANITIZER_GLIBC || SANITIZER_FREEBSD
CHECK_TYPE_SIZE(addrinfo);
CHECK_SIZE_AND_OFFSET(addrinfo, ai_flags);
@@ -998,17 +1002,27 @@ CHECK_TYPE_SIZE(iovec);
CHECK_SIZE_AND_OFFSET(iovec, iov_base);
CHECK_SIZE_AND_OFFSET(iovec, iov_len);
+// In POSIX, int msg_iovlen; socklen_t msg_controllen; socklen_t cmsg_len; but
+// many implementations don't conform to the standard. Since we pick the
+// non-conforming glibc definition, exclude the checks for musl (incompatible
+// sizes but compatible offsets).
CHECK_TYPE_SIZE(msghdr);
CHECK_SIZE_AND_OFFSET(msghdr, msg_name);
CHECK_SIZE_AND_OFFSET(msghdr, msg_namelen);
CHECK_SIZE_AND_OFFSET(msghdr, msg_iov);
+#if SANITIZER_GLIBC || SANITIZER_ANDROID
CHECK_SIZE_AND_OFFSET(msghdr, msg_iovlen);
+#endif
CHECK_SIZE_AND_OFFSET(msghdr, msg_control);
+#if SANITIZER_GLIBC || SANITIZER_ANDROID
CHECK_SIZE_AND_OFFSET(msghdr, msg_controllen);
+#endif
CHECK_SIZE_AND_OFFSET(msghdr, msg_flags);
CHECK_TYPE_SIZE(cmsghdr);
+#if SANITIZER_GLIBC || SANITIZER_ANDROID
CHECK_SIZE_AND_OFFSET(cmsghdr, cmsg_len);
+#endif
CHECK_SIZE_AND_OFFSET(cmsghdr, cmsg_level);
CHECK_SIZE_AND_OFFSET(cmsghdr, cmsg_type);
@@ -1116,7 +1130,7 @@ CHECK_SIZE_AND_OFFSET(mntent, mnt_passno);
CHECK_TYPE_SIZE(ether_addr);
-#if (SANITIZER_LINUX || SANITIZER_FREEBSD) && !SANITIZER_ANDROID
+#if SANITIZER_GLIBC || SANITIZER_FREEBSD
CHECK_TYPE_SIZE(ipc_perm);
# if SANITIZER_FREEBSD
CHECK_SIZE_AND_OFFSET(ipc_perm, key);
@@ -1178,7 +1192,7 @@ CHECK_SIZE_AND_OFFSET(ifaddrs, ifa_dstaddr);
CHECK_SIZE_AND_OFFSET(ifaddrs, ifa_data);
#endif
-#if SANITIZER_LINUX
+#if SANITIZER_GLIBC || SANITIZER_ANDROID
COMPILER_CHECK(sizeof(__sanitizer_struct_mallinfo) == sizeof(struct mallinfo));
#endif
@@ -1228,7 +1242,7 @@ COMPILER_CHECK(__sanitizer_XDR_DECODE == XDR_DECODE);
COMPILER_CHECK(__sanitizer_XDR_FREE == XDR_FREE);
#endif
-#if SANITIZER_LINUX && !SANITIZER_ANDROID
+#if SANITIZER_GLIBC
COMPILER_CHECK(sizeof(__sanitizer_FILE) <= sizeof(FILE));
CHECK_SIZE_AND_OFFSET(FILE, _flags);
CHECK_SIZE_AND_OFFSET(FILE, _IO_read_ptr);
@@ -1245,9 +1259,7 @@ CHECK_SIZE_AND_OFFSET(FILE, _IO_save_end);
CHECK_SIZE_AND_OFFSET(FILE, _markers);
CHECK_SIZE_AND_OFFSET(FILE, _chain);
CHECK_SIZE_AND_OFFSET(FILE, _fileno);
-#endif
-#if SANITIZER_LINUX && !SANITIZER_ANDROID
COMPILER_CHECK(sizeof(__sanitizer__obstack_chunk) <= sizeof(_obstack_chunk));
CHECK_SIZE_AND_OFFSET(_obstack_chunk, limit);
CHECK_SIZE_AND_OFFSET(_obstack_chunk, prev);
@@ -1262,7 +1274,7 @@ CHECK_SIZE_AND_OFFSET(cookie_io_functions_t, read);
CHECK_SIZE_AND_OFFSET(cookie_io_functions_t, write);
CHECK_SIZE_AND_OFFSET(cookie_io_functions_t, seek);
CHECK_SIZE_AND_OFFSET(cookie_io_functions_t, close);
-#endif
+#endif // SANITIZER_GLIBC
#if SANITIZER_LINUX || SANITIZER_FREEBSD
CHECK_TYPE_SIZE(sem_t);
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_posix.h b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_posix.h
index c3f9c1f0986d..d3e9a6353909 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_posix.h
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_posix.h
@@ -99,9 +99,9 @@ const unsigned struct_kernel_stat64_sz = 144;
const unsigned struct___old_kernel_stat_sz = 0;
const unsigned struct_kernel_stat_sz = 64;
const unsigned struct_kernel_stat64_sz = 104;
-#elif defined(__riscv) && __riscv_xlen == 64
+#elif SANITIZER_RISCV64
const unsigned struct_kernel_stat_sz = 128;
-const unsigned struct_kernel_stat64_sz = 104;
+const unsigned struct_kernel_stat64_sz = 0; // RISCV64 does not use stat64
#endif
struct __sanitizer_perf_event_attr {
unsigned type;
@@ -443,6 +443,8 @@ struct __sanitizer_cmsghdr {
int cmsg_type;
};
#else
+// In POSIX, int msg_iovlen; socklen_t msg_controllen; socklen_t cmsg_len; but
+// many implementations don't conform to the standard.
struct __sanitizer_msghdr {
void *msg_name;
unsigned msg_namelen;
@@ -804,7 +806,7 @@ typedef void __sanitizer_FILE;
#if SANITIZER_LINUX && !SANITIZER_ANDROID && \
(defined(__i386) || defined(__x86_64) || defined(__mips64) || \
defined(__powerpc64__) || defined(__aarch64__) || defined(__arm__) || \
- defined(__s390__))
+ defined(__s390__) || SANITIZER_RISCV64)
extern unsigned struct_user_regs_struct_sz;
extern unsigned struct_user_fpregs_struct_sz;
extern unsigned struct_user_fpxregs_struct_sz;
@@ -981,7 +983,6 @@ extern unsigned struct_vt_mode_sz;
#if SANITIZER_LINUX && !SANITIZER_ANDROID
extern unsigned struct_ax25_parms_struct_sz;
-extern unsigned struct_cyclades_monitor_sz;
extern unsigned struct_input_keymap_entry_sz;
extern unsigned struct_ipx_config_data_sz;
extern unsigned struct_kbdiacrs_sz;
@@ -1326,15 +1327,6 @@ extern unsigned IOCTL_VT_WAITACTIVE;
#endif // SANITIZER_LINUX
#if SANITIZER_LINUX && !SANITIZER_ANDROID
-extern unsigned IOCTL_CYGETDEFTHRESH;
-extern unsigned IOCTL_CYGETDEFTIMEOUT;
-extern unsigned IOCTL_CYGETMON;
-extern unsigned IOCTL_CYGETTHRESH;
-extern unsigned IOCTL_CYGETTIMEOUT;
-extern unsigned IOCTL_CYSETDEFTHRESH;
-extern unsigned IOCTL_CYSETDEFTIMEOUT;
-extern unsigned IOCTL_CYSETTHRESH;
-extern unsigned IOCTL_CYSETTIMEOUT;
extern unsigned IOCTL_EQL_EMANCIPATE;
extern unsigned IOCTL_EQL_ENSLAVE;
extern unsigned IOCTL_EQL_GETMASTRCFG;
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_solaris.cpp b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_solaris.cpp
index 6ec1a1bdd114..565b31f68aae 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_solaris.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_solaris.cpp
@@ -202,7 +202,8 @@ CHECK_SIZE_AND_OFFSET(dl_phdr_info, dlpi_name);
CHECK_SIZE_AND_OFFSET(dl_phdr_info, dlpi_phdr);
CHECK_SIZE_AND_OFFSET(dl_phdr_info, dlpi_phnum);
-CHECK_TYPE_SIZE(glob_t);
+// There are additional fields we are not interested in.
+COMPILER_CHECK(sizeof(__sanitizer_glob_t) <= sizeof(glob_t));
CHECK_SIZE_AND_OFFSET(glob_t, gl_pathc);
CHECK_SIZE_AND_OFFSET(glob_t, gl_pathv);
CHECK_SIZE_AND_OFFSET(glob_t, gl_offs);
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_posix.cpp b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_posix.cpp
index e21661b42f8d..2e080098283f 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_posix.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_posix.cpp
@@ -239,6 +239,7 @@ bool MemoryRangeIsAvailable(uptr range_start, uptr range_end) {
return true;
}
+#if !SANITIZER_MAC
void DumpProcessMap() {
MemoryMappingLayout proc_maps(/*cache_enabled*/true);
const sptr kBufSize = 4095;
@@ -252,6 +253,7 @@ void DumpProcessMap() {
Report("End of process memory map.\n");
UnmapOrDie(filename, kBufSize);
}
+#endif
const char *GetPwd() {
return GetEnv("PWD");
@@ -293,7 +295,7 @@ uptr SignalContext::GetAddress() const {
bool SignalContext::IsMemoryAccess() const {
auto si = static_cast<const siginfo_t *>(siginfo);
- return si->si_signo == SIGSEGV;
+ return si->si_signo == SIGSEGV || si->si_signo == SIGBUS;
}
int SignalContext::GetType() const {
@@ -354,11 +356,11 @@ int GetNamedMappingFd(const char *name, uptr size, int *flags) {
int fd = ReserveStandardFds(
internal_open(shmname, O_RDWR | O_CREAT | O_TRUNC | o_cloexec, S_IRWXU));
CHECK_GE(fd, 0);
- if (!o_cloexec) {
- int res = fcntl(fd, F_SETFD, FD_CLOEXEC);
- CHECK_EQ(0, res);
- }
int res = internal_ftruncate(fd, size);
+#if !defined(O_CLOEXEC)
+ res = fcntl(fd, F_SETFD, FD_CLOEXEC);
+ CHECK_EQ(0, res);
+#endif
CHECK_EQ(0, res);
res = internal_unlink(shmname);
CHECK_EQ(0, res);
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_posix.h b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_posix.h
index a1b49702da23..e1a2b48e5cd8 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_posix.h
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_posix.h
@@ -17,7 +17,6 @@
#include "sanitizer_internal_defs.h"
#include "sanitizer_platform_limits_freebsd.h"
#include "sanitizer_platform_limits_netbsd.h"
-#include "sanitizer_platform_limits_openbsd.h"
#include "sanitizer_platform_limits_posix.h"
#include "sanitizer_platform_limits_solaris.h"
@@ -42,6 +41,7 @@ uptr internal_mmap(void *addr, uptr length, int prot, int flags,
int fd, u64 offset);
uptr internal_munmap(void *addr, uptr length);
int internal_mprotect(void *addr, uptr length, int prot);
+int internal_madvise(uptr addr, uptr length, int advice);
// OS
uptr internal_filesize(fd_t fd); // -1 on error.
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_posix_libcdep.cpp b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_posix_libcdep.cpp
index f920172c06d6..12603da1750d 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_posix_libcdep.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_posix_libcdep.cpp
@@ -18,7 +18,6 @@
#include "sanitizer_common.h"
#include "sanitizer_flags.h"
#include "sanitizer_platform_limits_netbsd.h"
-#include "sanitizer_platform_limits_openbsd.h"
#include "sanitizer_platform_limits_posix.h"
#include "sanitizer_platform_limits_solaris.h"
#include "sanitizer_posix.h"
@@ -61,27 +60,24 @@ void ReleaseMemoryPagesToOS(uptr beg, uptr end) {
uptr beg_aligned = RoundUpTo(beg, page_size);
uptr end_aligned = RoundDownTo(end, page_size);
if (beg_aligned < end_aligned)
- // In the default Solaris compilation environment, madvise() is declared
- // to take a caddr_t arg; casting it to void * results in an invalid
- // conversion error, so use char * instead.
- madvise((char *)beg_aligned, end_aligned - beg_aligned,
- SANITIZER_MADVISE_DONTNEED);
+ internal_madvise(beg_aligned, end_aligned - beg_aligned,
+ SANITIZER_MADVISE_DONTNEED);
}
void SetShadowRegionHugePageMode(uptr addr, uptr size) {
#ifdef MADV_NOHUGEPAGE // May not be defined on old systems.
if (common_flags()->no_huge_pages_for_shadow)
- madvise((char *)addr, size, MADV_NOHUGEPAGE);
+ internal_madvise(addr, size, MADV_NOHUGEPAGE);
else
- madvise((char *)addr, size, MADV_HUGEPAGE);
+ internal_madvise(addr, size, MADV_HUGEPAGE);
#endif // MADV_NOHUGEPAGE
}
bool DontDumpShadowMemory(uptr addr, uptr length) {
#if defined(MADV_DONTDUMP)
- return madvise((char *)addr, length, MADV_DONTDUMP) == 0;
+ return internal_madvise(addr, length, MADV_DONTDUMP) == 0;
#elif defined(MADV_NOCORE)
- return madvise((char *)addr, length, MADV_NOCORE) == 0;
+ return internal_madvise(addr, length, MADV_NOCORE) == 0;
#else
return true;
#endif // MADV_DONTDUMP
@@ -169,7 +165,12 @@ bool SupportsColoredOutput(fd_t fd) {
#if !SANITIZER_GO
// TODO(glider): different tools may require different altstack size.
-static const uptr kAltStackSize = SIGSTKSZ * 4; // SIGSTKSZ is not enough.
+static uptr GetAltStackSize() {
+ // Note: since GLIBC_2.31, SIGSTKSZ may be a function call, so this may be
+ // more costly that you think. However GetAltStackSize is only call 2-3 times
+ // per thread so don't cache the evaluation.
+ return SIGSTKSZ * 4;
+}
void SetAlternateSignalStack() {
stack_t altstack, oldstack;
@@ -180,10 +181,10 @@ void SetAlternateSignalStack() {
// TODO(glider): the mapped stack should have the MAP_STACK flag in the
// future. It is not required by man 2 sigaltstack now (they're using
// malloc()).
- void* base = MmapOrDie(kAltStackSize, __func__);
+ void *base = MmapOrDie(GetAltStackSize(), __func__);
altstack.ss_sp = (char*) base;
altstack.ss_flags = 0;
- altstack.ss_size = kAltStackSize;
+ altstack.ss_size = GetAltStackSize();
CHECK_EQ(0, sigaltstack(&altstack, nullptr));
}
@@ -191,7 +192,7 @@ void UnsetAlternateSignalStack() {
stack_t altstack, oldstack;
altstack.ss_sp = nullptr;
altstack.ss_flags = SS_DISABLE;
- altstack.ss_size = kAltStackSize; // Some sane value required on Darwin.
+ altstack.ss_size = GetAltStackSize(); // Some sane value required on Darwin.
CHECK_EQ(0, sigaltstack(&altstack, &oldstack));
UnmapOrDie(oldstack.ss_sp, oldstack.ss_size);
}
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_procmaps.h b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_procmaps.h
index 665ed45fa93e..a56640db43e8 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_procmaps.h
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_procmaps.h
@@ -16,7 +16,7 @@
#include "sanitizer_platform.h"
#if SANITIZER_LINUX || SANITIZER_FREEBSD || SANITIZER_NETBSD || \
- SANITIZER_OPENBSD || SANITIZER_MAC || SANITIZER_SOLARIS || \
+ SANITIZER_MAC || SANITIZER_SOLARIS || \
SANITIZER_FUCHSIA
#include "sanitizer_common.h"
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_procmaps_bsd.cpp b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_procmaps_bsd.cpp
index 02ff7c0e91a8..1f489b71ad99 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_procmaps_bsd.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_procmaps_bsd.cpp
@@ -7,11 +7,11 @@
//===----------------------------------------------------------------------===//
//
// Information about the process mappings
-// (FreeBSD, OpenBSD and NetBSD-specific parts).
+// (FreeBSD and NetBSD-specific parts).
//===----------------------------------------------------------------------===//
#include "sanitizer_platform.h"
-#if SANITIZER_FREEBSD || SANITIZER_NETBSD || SANITIZER_OPENBSD
+#if SANITIZER_FREEBSD || SANITIZER_NETBSD
#include "sanitizer_common.h"
#if SANITIZER_FREEBSD
#include "sanitizer_freebsd.h"
@@ -28,11 +28,6 @@
#endif
#include <limits.h>
-#if SANITIZER_OPENBSD
-#define KVME_PROT_READ KVE_PROT_READ
-#define KVME_PROT_WRITE KVE_PROT_WRITE
-#define KVME_PROT_EXEC KVE_PROT_EXEC
-#endif
// Fix 'kinfo_vmentry' definition on FreeBSD prior v9.2 in 32-bit mode.
#if SANITIZER_FREEBSD && (SANITIZER_WORDSIZE == 32)
@@ -51,10 +46,6 @@ void ReadProcMaps(ProcSelfMapsBuff *proc_maps) {
KERN_PROC,
KERN_PROC_VMMAP,
getpid()
-#elif SANITIZER_OPENBSD
- CTL_KERN,
- KERN_PROC_VMMAP,
- getpid()
#elif SANITIZER_NETBSD
CTL_VM,
VM_PROC,
@@ -71,28 +62,12 @@ void ReadProcMaps(ProcSelfMapsBuff *proc_maps) {
CHECK_EQ(Err, 0);
CHECK_GT(Size, 0);
-#if !SANITIZER_OPENBSD
size_t MmapedSize = Size * 4 / 3;
void *VmMap = MmapOrDie(MmapedSize, "ReadProcMaps()");
Size = MmapedSize;
Err = internal_sysctl(Mib, ARRAY_SIZE(Mib), VmMap, &Size, NULL, 0);
CHECK_EQ(Err, 0);
proc_maps->data = (char *)VmMap;
-#else
- size_t PageSize = GetPageSize();
- size_t MmapedSize = Size;
- MmapedSize = ((MmapedSize - 1) / PageSize + 1) * PageSize;
- char *Mem = (char *)MmapOrDie(MmapedSize, "ReadProcMaps()");
- Size = 2 * Size + 10 * sizeof(struct kinfo_vmentry);
- if (Size > 0x10000)
- Size = 0x10000;
- Size = (Size / sizeof(struct kinfo_vmentry)) * sizeof(struct kinfo_vmentry);
- Err = internal_sysctl(Mib, ARRAY_SIZE(Mib), Mem, &Size, NULL, 0);
- CHECK_EQ(Err, 0);
- MmapedSize = Size;
- proc_maps->data = Mem;
-#endif
-
proc_maps->mmaped_size = MmapedSize;
proc_maps->len = Size;
}
@@ -117,13 +92,11 @@ bool MemoryMappingLayout::Next(MemoryMappedSegment *segment) {
if ((VmEntry->kve_protection & KVME_PROT_EXEC) != 0)
segment->protection |= kProtectionExecute;
-#if !SANITIZER_OPENBSD
if (segment->filename != NULL && segment->filename_size > 0) {
internal_snprintf(segment->filename,
Min(segment->filename_size, (uptr)PATH_MAX), "%s",
VmEntry->kve_path);
}
-#endif
#if SANITIZER_FREEBSD
data_.current += VmEntry->kve_structsize;
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_procmaps_common.cpp b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_procmaps_common.cpp
index e0cb47f8ca9a..f2cfcffaf476 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_procmaps_common.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_procmaps_common.cpp
@@ -12,7 +12,7 @@
#include "sanitizer_platform.h"
#if SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD || \
- SANITIZER_OPENBSD || SANITIZER_SOLARIS
+ SANITIZER_SOLARIS
#include "sanitizer_common.h"
#include "sanitizer_placement_new.h"
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_procmaps_solaris.cpp b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_procmaps_solaris.cpp
index 8793423a6017..bf813f235bb7 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_procmaps_solaris.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_procmaps_solaris.cpp
@@ -9,13 +9,13 @@
// Information about the process mappings (Solaris-specific parts).
//===----------------------------------------------------------------------===//
+// Before Solaris 11.4, <procfs.h> doesn't work in a largefile environment.
+#undef _FILE_OFFSET_BITS
#include "sanitizer_platform.h"
#if SANITIZER_SOLARIS
#include "sanitizer_common.h"
#include "sanitizer_procmaps.h"
-// Before Solaris 11.4, <procfs.h> doesn't work in a largefile environment.
-#undef _FILE_OFFSET_BITS
#include <procfs.h>
#include <limits.h>
@@ -35,7 +35,8 @@ bool MemoryMappingLayout::Next(MemoryMappedSegment *segment) {
char *last = data_.proc_self_maps.data + data_.proc_self_maps.len;
if (data_.current >= last) return false;
- prxmap_t *xmapentry = (prxmap_t*)data_.current;
+ prxmap_t *xmapentry =
+ const_cast<prxmap_t *>(reinterpret_cast<const prxmap_t *>(data_.current));
segment->start = (uptr)xmapentry->pr_vaddr;
segment->end = (uptr)(xmapentry->pr_vaddr + xmapentry->pr_size);
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_ptrauth.h b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_ptrauth.h
index 4d0d96a64f62..a288068bf943 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_ptrauth.h
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_ptrauth.h
@@ -18,4 +18,6 @@
#define ptrauth_string_discriminator(__string) ((int)0)
#endif
+#define STRIP_PC(pc) ((uptr)ptrauth_strip(pc, 0))
+
#endif // SANITIZER_PTRAUTH_H
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_rtems.cpp b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_rtems.cpp
index 29bcfcfa6f15..d58bd08fb1a8 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_rtems.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_rtems.cpp
@@ -108,8 +108,6 @@ void SetAlternateSignalStack() {}
void UnsetAlternateSignalStack() {}
void InitTlsSize() {}
-void PrintModuleMap() {}
-
void SignalContext::DumpAllRegisters(void *context) {}
const char *DescribeSignalOrException(int signo) { UNIMPLEMENTED(); }
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_signal_interceptors.inc b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_signal_interceptors.inc
index 68d9eb65968d..cefb870f7e25 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_signal_interceptors.inc
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_signal_interceptors.inc
@@ -53,7 +53,10 @@ INTERCEPTOR(uptr, signal, int signum, uptr handler) {
INTERCEPTOR(int, sigaction_symname, int signum,
const __sanitizer_sigaction *act, __sanitizer_sigaction *oldact) {
- if (GetHandleSignalMode(signum) == kHandleSignalExclusive) return 0;
+ if (GetHandleSignalMode(signum) == kHandleSignalExclusive) {
+ if (!oldact) return 0;
+ act = nullptr;
+ }
SIGNAL_INTERCEPTOR_SIGACTION_IMPL(signum, act, oldact);
}
#define INIT_SIGACTION COMMON_INTERCEPT_FUNCTION(sigaction_symname)
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_solaris.cpp b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_solaris.cpp
index 035f2d0ca292..8789dcd10a95 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_solaris.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_solaris.cpp
@@ -74,6 +74,20 @@ DECLARE__REAL_AND_INTERNAL(int, mprotect, void *addr, uptr length, int prot) {
return _REAL(mprotect)(addr, length, prot);
}
+// Illumos' declaration of madvise cannot be made visible if _XOPEN_SOURCE
+// is defined as g++ does on Solaris.
+//
+// This declaration is consistent with Solaris 11.4. Both Illumos and Solaris
+// versions older than 11.4 declared madvise with a caddr_t as the first
+// argument, but we don't currently support Solaris versions older than 11.4,
+// and as mentioned above the declaration is not visible on Illumos so we can
+// use any declaration we like on Illumos.
+extern "C" int madvise(void *, size_t, int);
+
+int internal_madvise(uptr addr, uptr length, int advice) {
+ return madvise((void *)addr, length, advice);
+}
+
DECLARE__REAL_AND_INTERNAL(uptr, close, fd_t fd) {
return _REAL(close)(fd);
}
@@ -146,10 +160,6 @@ DECLARE__REAL_AND_INTERNAL(uptr, sched_yield, void) {
return sched_yield();
}
-DECLARE__REAL_AND_INTERNAL(void, _exit, int exitcode) {
- _exit(exitcode);
-}
-
DECLARE__REAL_AND_INTERNAL(uptr, execve, const char *filename,
char *const argv[], char *const envp[]) {
return _REAL(execve)(filename, argv, envp);
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stackdepot.cpp b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stackdepot.cpp
index 30073a96ceeb..44a95214e38b 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stackdepot.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stackdepot.cpp
@@ -115,6 +115,12 @@ void StackDepotUnlockAll() {
theDepot.UnlockAll();
}
+void StackDepotPrintAll() {
+#if !SANITIZER_GO
+ theDepot.PrintAll();
+#endif
+}
+
bool StackDepotReverseMap::IdDescPair::IdComparator(
const StackDepotReverseMap::IdDescPair &a,
const StackDepotReverseMap::IdDescPair &b) {
@@ -139,8 +145,7 @@ StackTrace StackDepotReverseMap::Get(u32 id) {
if (!map_.size())
return StackTrace();
IdDescPair pair = {id, nullptr};
- uptr idx =
- InternalLowerBound(map_, 0, map_.size(), pair, IdDescPair::IdComparator);
+ uptr idx = InternalLowerBound(map_, pair, IdDescPair::IdComparator);
if (idx > map_.size() || map_[idx].id != id)
return StackTrace();
return map_[idx].desc->load();
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stackdepot.h b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stackdepot.h
index bf29cb9a006e..0e26c1fc37c4 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stackdepot.h
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stackdepot.h
@@ -41,6 +41,7 @@ StackTrace StackDepotGet(u32 id);
void StackDepotLockAll();
void StackDepotUnlockAll();
+void StackDepotPrintAll();
// Instantiating this class creates a snapshot of StackDepot which can be
// efficiently queried with StackDepotGet(). You can use it concurrently with
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stackdepotbase.h b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stackdepotbase.h
index ef1b4f7f7055..1af2c1892eff 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stackdepotbase.h
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stackdepotbase.h
@@ -13,9 +13,11 @@
#ifndef SANITIZER_STACKDEPOTBASE_H
#define SANITIZER_STACKDEPOTBASE_H
+#include <stdio.h>
+
+#include "sanitizer_atomic.h"
#include "sanitizer_internal_defs.h"
#include "sanitizer_mutex.h"
-#include "sanitizer_atomic.h"
#include "sanitizer_persistent_allocator.h"
namespace __sanitizer {
@@ -34,6 +36,7 @@ class StackDepotBase {
void LockAll();
void UnlockAll();
+ void PrintAll();
private:
static Node *find(Node *s, args_type args, u32 hash);
@@ -172,6 +175,21 @@ void StackDepotBase<Node, kReservedBits, kTabSizeLog>::UnlockAll() {
}
}
+template <class Node, int kReservedBits, int kTabSizeLog>
+void StackDepotBase<Node, kReservedBits, kTabSizeLog>::PrintAll() {
+ for (int i = 0; i < kTabSize; ++i) {
+ atomic_uintptr_t *p = &tab[i];
+ lock(p);
+ uptr v = atomic_load(p, memory_order_relaxed);
+ Node *s = (Node *)(v & ~1UL);
+ for (; s; s = s->link) {
+ Printf("Stack for id %u:\n", s->id);
+ s->load().Print();
+ }
+ unlock(p, s);
+ }
+}
+
} // namespace __sanitizer
#endif // SANITIZER_STACKDEPOTBASE_H
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stacktrace.cpp b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stacktrace.cpp
index ef14fb704eed..b0487d8987db 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stacktrace.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stacktrace.cpp
@@ -10,9 +10,11 @@
// run-time libraries.
//===----------------------------------------------------------------------===//
+#include "sanitizer_stacktrace.h"
+
#include "sanitizer_common.h"
#include "sanitizer_flags.h"
-#include "sanitizer_stacktrace.h"
+#include "sanitizer_platform.h"
namespace __sanitizer {
@@ -21,6 +23,28 @@ uptr StackTrace::GetNextInstructionPc(uptr pc) {
return pc + 8;
#elif defined(__powerpc__) || defined(__arm__) || defined(__aarch64__)
return pc + 4;
+#elif SANITIZER_RISCV64
+ // Current check order is 4 -> 2 -> 6 -> 8
+ u8 InsnByte = *(u8 *)(pc);
+ if (((InsnByte & 0x3) == 0x3) && ((InsnByte & 0x1c) != 0x1c)) {
+ // xxxxxxxxxxxbbb11 | 32 bit | bbb != 111
+ return pc + 4;
+ }
+ if ((InsnByte & 0x3) != 0x3) {
+ // xxxxxxxxxxxxxxaa | 16 bit | aa != 11
+ return pc + 2;
+ }
+ // RISC-V encoding allows instructions to be up to 8 bytes long
+ if ((InsnByte & 0x3f) == 0x1f) {
+ // xxxxxxxxxx011111 | 48 bit |
+ return pc + 6;
+ }
+ if ((InsnByte & 0x7f) == 0x3f) {
+ // xxxxxxxxx0111111 | 64 bit |
+ return pc + 8;
+ }
+ // bail-out if could not figure out the instruction size
+ return 0;
#else
return pc + 1;
#endif
@@ -94,6 +118,9 @@ void BufferedStackTrace::UnwindFast(uptr pc, uptr bp, uptr stack_top,
uhwptr pc1 = caller_frame[2];
#elif defined(__s390__)
uhwptr pc1 = frame[14];
+#elif defined(__riscv)
+ // frame[-1] contains the return address
+ uhwptr pc1 = frame[-1];
#else
uhwptr pc1 = frame[1];
#endif
@@ -106,7 +133,13 @@ void BufferedStackTrace::UnwindFast(uptr pc, uptr bp, uptr stack_top,
trace_buffer[size++] = (uptr) pc1;
}
bottom = (uptr)frame;
- frame = GetCanonicFrame((uptr)frame[0], stack_top, bottom);
+#if defined(__riscv)
+ // frame[-2] contain fp of the previous frame
+ uptr new_bp = (uptr)frame[-2];
+#else
+ uptr new_bp = (uptr)frame[0];
+#endif
+ frame = GetCanonicFrame(new_bp, stack_top, bottom);
}
}
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stacktrace.h b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stacktrace.h
index f1f29e9f32ee..15616f899d01 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stacktrace.h
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stacktrace.h
@@ -13,6 +13,7 @@
#define SANITIZER_STACKTRACE_H
#include "sanitizer_internal_defs.h"
+#include "sanitizer_platform.h"
namespace __sanitizer {
@@ -24,8 +25,6 @@ static const u32 kStackTraceMax = 256;
# define SANITIZER_CAN_FAST_UNWIND 0
#elif SANITIZER_WINDOWS
# define SANITIZER_CAN_FAST_UNWIND 0
-#elif SANITIZER_OPENBSD
-# define SANITIZER_CAN_FAST_UNWIND 0
#else
# define SANITIZER_CAN_FAST_UNWIND 1
#endif
@@ -33,7 +32,7 @@ static const u32 kStackTraceMax = 256;
// Fast unwind is the only option on Mac for now; we will need to
// revisit this macro when slow unwind works on Mac, see
// https://github.com/google/sanitizers/issues/137
-#if SANITIZER_MAC || SANITIZER_OPENBSD || SANITIZER_RTEMS
+#if SANITIZER_MAC || SANITIZER_RTEMS
# define SANITIZER_CAN_SLOW_UNWIND 0
#else
# define SANITIZER_CAN_SLOW_UNWIND 1
@@ -68,8 +67,6 @@ struct StackTrace {
static uptr GetCurrentPc();
static inline uptr GetPreviousInstructionPc(uptr pc);
static uptr GetNextInstructionPc(uptr pc);
- typedef bool (*SymbolizeCallback)(const void *pc, char *out_buffer,
- int out_size);
};
// Performance-critical, must be in the header.
@@ -85,6 +82,14 @@ uptr StackTrace::GetPreviousInstructionPc(uptr pc) {
return pc - 4;
#elif defined(__sparc__) || defined(__mips__)
return pc - 8;
+#elif SANITIZER_RISCV64
+ // RV-64 has variable instruciton length...
+ // C extentions gives us 2-byte instructoins
+ // RV-64 has 4-byte instructions
+ // + RISCV architecture allows instructions up to 8 bytes
+ // It seems difficult to figure out the exact instruction length -
+ // pc - 2 seems like a safe option for the purposes of stack tracing
+ return pc - 2;
#else
return pc - 1;
#endif
@@ -143,9 +148,17 @@ struct BufferedStackTrace : public StackTrace {
friend class FastUnwindTest;
};
+#if defined(__s390x__)
+static const uptr kFrameSize = 160;
+#elif defined(__s390__)
+static const uptr kFrameSize = 96;
+#else
+static const uptr kFrameSize = 2 * sizeof(uhwptr);
+#endif
+
// Check if given pointer points into allocated stack area.
static inline bool IsValidFrame(uptr frame, uptr stack_top, uptr stack_bottom) {
- return frame > stack_bottom && frame < stack_top - 2 * sizeof (uhwptr);
+ return frame > stack_bottom && frame < stack_top - kFrameSize;
}
} // namespace __sanitizer
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stacktrace_libcdep.cpp b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stacktrace_libcdep.cpp
index 4ef305cf1799..7808ba9b0f57 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stacktrace_libcdep.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stacktrace_libcdep.cpp
@@ -26,17 +26,23 @@ void StackTrace::Print() const {
InternalScopedString frame_desc(GetPageSizeCached() * 2);
InternalScopedString dedup_token(GetPageSizeCached());
int dedup_frames = common_flags()->dedup_token_length;
+ bool symbolize = RenderNeedsSymbolization(common_flags()->stack_trace_format);
uptr frame_num = 0;
for (uptr i = 0; i < size && trace[i]; i++) {
// PCs in stack traces are actually the return addresses, that is,
// addresses of the next instructions after the call.
uptr pc = GetPreviousInstructionPc(trace[i]);
- SymbolizedStack *frames = Symbolizer::GetOrInit()->SymbolizePC(pc);
+ SymbolizedStack *frames;
+ if (symbolize)
+ frames = Symbolizer::GetOrInit()->SymbolizePC(pc);
+ else
+ frames = SymbolizedStack::New(pc);
CHECK(frames);
for (SymbolizedStack *cur = frames; cur; cur = cur->next) {
frame_desc.clear();
RenderFrame(&frame_desc, common_flags()->stack_trace_format, frame_num++,
- cur->info, common_flags()->symbolize_vs_style,
+ cur->info.address, symbolize ? &cur->info : nullptr,
+ common_flags()->symbolize_vs_style,
common_flags()->strip_path_prefix);
Printf("%s\n", frame_desc.data());
if (dedup_frames-- > 0) {
@@ -108,7 +114,12 @@ void __sanitizer_symbolize_pc(uptr pc, const char *fmt, char *out_buf,
uptr out_buf_size) {
if (!out_buf_size) return;
pc = StackTrace::GetPreviousInstructionPc(pc);
- SymbolizedStack *frame = Symbolizer::GetOrInit()->SymbolizePC(pc);
+ SymbolizedStack *frame;
+ bool symbolize = RenderNeedsSymbolization(fmt);
+ if (symbolize)
+ frame = Symbolizer::GetOrInit()->SymbolizePC(pc);
+ else
+ frame = SymbolizedStack::New(pc);
if (!frame) {
internal_strncpy(out_buf, "<can't symbolize>", out_buf_size);
out_buf[out_buf_size - 1] = 0;
@@ -121,7 +132,8 @@ void __sanitizer_symbolize_pc(uptr pc, const char *fmt, char *out_buf,
for (SymbolizedStack *cur = frame; cur && out_buf < out_end;
cur = cur->next) {
frame_desc.clear();
- RenderFrame(&frame_desc, fmt, frame_num++, cur->info,
+ RenderFrame(&frame_desc, fmt, frame_num++, cur->info.address,
+ symbolize ? &cur->info : nullptr,
common_flags()->symbolize_vs_style,
common_flags()->strip_path_prefix);
if (!frame_desc.length())
@@ -134,6 +146,7 @@ void __sanitizer_symbolize_pc(uptr pc, const char *fmt, char *out_buf,
}
CHECK(out_buf <= out_end);
*out_buf = 0;
+ frame->ClearAll();
}
SANITIZER_INTERFACE_ATTRIBUTE
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stacktrace_printer.cpp b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stacktrace_printer.cpp
index 150ff475316b..c998322d3944 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stacktrace_printer.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stacktrace_printer.cpp
@@ -107,8 +107,14 @@ static const char *DemangleFunctionName(const char *function) {
static const char kDefaultFormat[] = " #%n %p %F %L";
void RenderFrame(InternalScopedString *buffer, const char *format, int frame_no,
- const AddressInfo &info, bool vs_style,
+ uptr address, const AddressInfo *info, bool vs_style,
const char *strip_path_prefix, const char *strip_func_prefix) {
+ // info will be null in the case where symbolization is not needed for the
+ // given format. This ensures that the code below will get a hard failure
+ // rather than print incorrect information in case RenderNeedsSymbolization
+ // ever ends up out of sync with this function. If non-null, the addresses
+ // should match.
+ CHECK(!info || address == info->address);
if (0 == internal_strcmp(format, "DEFAULT"))
format = kDefaultFormat;
for (const char *p = format; *p != '\0'; p++) {
@@ -126,71 +132,70 @@ void RenderFrame(InternalScopedString *buffer, const char *format, int frame_no,
buffer->append("%zu", frame_no);
break;
case 'p':
- buffer->append("0x%zx", info.address);
+ buffer->append("0x%zx", address);
break;
case 'm':
- buffer->append("%s", StripPathPrefix(info.module, strip_path_prefix));
+ buffer->append("%s", StripPathPrefix(info->module, strip_path_prefix));
break;
case 'o':
- buffer->append("0x%zx", info.module_offset);
+ buffer->append("0x%zx", info->module_offset);
break;
case 'f':
- buffer->append("%s",
- DemangleFunctionName(
- StripFunctionName(info.function, strip_func_prefix)));
+ buffer->append("%s", DemangleFunctionName(StripFunctionName(
+ info->function, strip_func_prefix)));
break;
case 'q':
- buffer->append("0x%zx", info.function_offset != AddressInfo::kUnknown
- ? info.function_offset
+ buffer->append("0x%zx", info->function_offset != AddressInfo::kUnknown
+ ? info->function_offset
: 0x0);
break;
case 's':
- buffer->append("%s", StripPathPrefix(info.file, strip_path_prefix));
+ buffer->append("%s", StripPathPrefix(info->file, strip_path_prefix));
break;
case 'l':
- buffer->append("%d", info.line);
+ buffer->append("%d", info->line);
break;
case 'c':
- buffer->append("%d", info.column);
+ buffer->append("%d", info->column);
break;
// Smarter special cases.
case 'F':
// Function name and offset, if file is unknown.
- if (info.function) {
- buffer->append("in %s",
- DemangleFunctionName(
- StripFunctionName(info.function, strip_func_prefix)));
- if (!info.file && info.function_offset != AddressInfo::kUnknown)
- buffer->append("+0x%zx", info.function_offset);
+ if (info->function) {
+ buffer->append("in %s", DemangleFunctionName(StripFunctionName(
+ info->function, strip_func_prefix)));
+ if (!info->file && info->function_offset != AddressInfo::kUnknown)
+ buffer->append("+0x%zx", info->function_offset);
}
break;
case 'S':
// File/line information.
- RenderSourceLocation(buffer, info.file, info.line, info.column, vs_style,
- strip_path_prefix);
+ RenderSourceLocation(buffer, info->file, info->line, info->column,
+ vs_style, strip_path_prefix);
break;
case 'L':
// Source location, or module location.
- if (info.file) {
- RenderSourceLocation(buffer, info.file, info.line, info.column,
+ if (info->file) {
+ RenderSourceLocation(buffer, info->file, info->line, info->column,
vs_style, strip_path_prefix);
- } else if (info.module) {
- RenderModuleLocation(buffer, info.module, info.module_offset,
- info.module_arch, strip_path_prefix);
+ } else if (info->module) {
+ RenderModuleLocation(buffer, info->module, info->module_offset,
+ info->module_arch, strip_path_prefix);
} else {
buffer->append("(<unknown module>)");
}
break;
case 'M':
// Module basename and offset, or PC.
- if (info.address & kExternalPCBit)
- {} // There PCs are not meaningful.
- else if (info.module)
+ if (address & kExternalPCBit) {
+ // There PCs are not meaningful.
+ } else if (info->module) {
// Always strip the module name for %M.
- RenderModuleLocation(buffer, StripModuleName(info.module),
- info.module_offset, info.module_arch, "");
- else
- buffer->append("(%p)", (void *)info.address);
+ RenderModuleLocation(buffer, StripModuleName(info->module),
+ info->module_offset, info->module_arch, "");
+ } else {
+ buffer->append("(%p)", (void *)address);
+ }
break;
default:
Report("Unsupported specifier in stack frame format: %c (0x%zx)!\n", *p,
@@ -200,6 +205,29 @@ void RenderFrame(InternalScopedString *buffer, const char *format, int frame_no,
}
}
+bool RenderNeedsSymbolization(const char *format) {
+ if (0 == internal_strcmp(format, "DEFAULT"))
+ format = kDefaultFormat;
+ for (const char *p = format; *p != '\0'; p++) {
+ if (*p != '%')
+ continue;
+ p++;
+ switch (*p) {
+ case '%':
+ break;
+ case 'n':
+ // frame_no
+ break;
+ case 'p':
+ // address
+ break;
+ default:
+ return true;
+ }
+ }
+ return false;
+}
+
void RenderData(InternalScopedString *buffer, const char *format,
const DataInfo *DI, const char *strip_path_prefix) {
for (const char *p = format; *p != '\0'; p++) {
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stacktrace_printer.h b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stacktrace_printer.h
index f7f7629f773f..96119b2ee9e9 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stacktrace_printer.h
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stacktrace_printer.h
@@ -47,10 +47,12 @@ namespace __sanitizer {
// module+offset if it is known, or (<unknown module>) string.
// %M - prints module basename and offset, if it is known, or PC.
void RenderFrame(InternalScopedString *buffer, const char *format, int frame_no,
- const AddressInfo &info, bool vs_style,
+ uptr address, const AddressInfo *info, bool vs_style,
const char *strip_path_prefix = "",
const char *strip_func_prefix = "");
+bool RenderNeedsSymbolization(const char *format);
+
void RenderSourceLocation(InternalScopedString *buffer, const char *file,
int line, int column, bool vs_style,
const char *strip_path_prefix);
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stoptheworld.h b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stoptheworld.h
index 4e4240057142..7891c1081fe7 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stoptheworld.h
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stoptheworld.h
@@ -32,20 +32,21 @@ class SuspendedThreadsList {
// Can't declare pure virtual functions in sanitizer runtimes:
// __cxa_pure_virtual might be unavailable. Use UNIMPLEMENTED() instead.
- virtual PtraceRegistersStatus GetRegistersAndSP(uptr index, uptr *buffer,
- uptr *sp) const {
+ virtual PtraceRegistersStatus GetRegistersAndSP(
+ uptr index, InternalMmapVector<uptr> *buffer, uptr *sp) const {
UNIMPLEMENTED();
}
- // The buffer in GetRegistersAndSP should be at least this big.
- virtual uptr RegisterCount() const { UNIMPLEMENTED(); }
virtual uptr ThreadCount() const { UNIMPLEMENTED(); }
virtual tid_t GetThreadID(uptr index) const { UNIMPLEMENTED(); }
+ protected:
+ ~SuspendedThreadsList() {}
+
private:
// Prohibit copy and assign.
- SuspendedThreadsList(const SuspendedThreadsList&);
- void operator=(const SuspendedThreadsList&);
+ SuspendedThreadsList(const SuspendedThreadsList &) = delete;
+ void operator=(const SuspendedThreadsList &) = delete;
};
typedef void (*StopTheWorldCallback)(
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stoptheworld_fuchsia.cpp b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stoptheworld_fuchsia.cpp
index 3a246443ed99..91bf19e40dad 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stoptheworld_fuchsia.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stoptheworld_fuchsia.cpp
@@ -17,6 +17,7 @@
#include <zircon/sanitizer.h>
#include "sanitizer_stoptheworld.h"
+#include "sanitizer_stoptheworld_fuchsia.h"
namespace __sanitizer {
@@ -32,7 +33,7 @@ void StopTheWorld(StopTheWorldCallback callback, void *argument) {
nullptr, nullptr, nullptr, nullptr,
[](zx_status_t, void *data) {
auto params = reinterpret_cast<Params *>(data);
- params->callback({}, params->argument);
+ params->callback(SuspendedThreadsListFuchsia(), params->argument);
},
&params);
}
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stoptheworld_fuchsia.h b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stoptheworld_fuchsia.h
new file mode 100644
index 000000000000..6d9ead605086
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stoptheworld_fuchsia.h
@@ -0,0 +1,20 @@
+//===-- sanitizer_stoptheworld_fuchsia.h ------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_STOPTHEWORLD_FUCHSIA_H
+#define SANITIZER_STOPTHEWORLD_FUCHSIA_H
+
+#include "sanitizer_stoptheworld.h"
+
+namespace __sanitizer {
+
+class SuspendedThreadsListFuchsia final : public SuspendedThreadsList {};
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_STOPTHEWORLD_FUCHSIA_H
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stoptheworld_linux_libcdep.cpp b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stoptheworld_linux_libcdep.cpp
index 651d5056dd9d..53cfddcfbe0b 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stoptheworld_linux_libcdep.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stoptheworld_linux_libcdep.cpp
@@ -13,10 +13,10 @@
#include "sanitizer_platform.h"
-#if SANITIZER_LINUX && (defined(__x86_64__) || defined(__mips__) || \
- defined(__aarch64__) || defined(__powerpc64__) || \
- defined(__s390__) || defined(__i386__) || \
- defined(__arm__))
+#if SANITIZER_LINUX && \
+ (defined(__x86_64__) || defined(__mips__) || defined(__aarch64__) || \
+ defined(__powerpc64__) || defined(__s390__) || defined(__i386__) || \
+ defined(__arm__) || SANITIZER_RISCV64)
#include "sanitizer_stoptheworld.h"
@@ -31,7 +31,7 @@
#include <sys/types.h> // for pid_t
#include <sys/uio.h> // for iovec
#include <elf.h> // for NT_PRSTATUS
-#if defined(__aarch64__) && !SANITIZER_ANDROID
+#if (defined(__aarch64__) || SANITIZER_RISCV64) && !SANITIZER_ANDROID
// GLIBC 2.20+ sys/user does not include asm/ptrace.h
# include <asm/ptrace.h>
#endif
@@ -85,18 +85,18 @@
namespace __sanitizer {
-class SuspendedThreadsListLinux : public SuspendedThreadsList {
+class SuspendedThreadsListLinux final : public SuspendedThreadsList {
public:
SuspendedThreadsListLinux() { thread_ids_.reserve(1024); }
- tid_t GetThreadID(uptr index) const;
- uptr ThreadCount() const;
+ tid_t GetThreadID(uptr index) const override;
+ uptr ThreadCount() const override;
bool ContainsTid(tid_t thread_id) const;
void Append(tid_t tid);
- PtraceRegistersStatus GetRegistersAndSP(uptr index, uptr *buffer,
- uptr *sp) const;
- uptr RegisterCount() const;
+ PtraceRegistersStatus GetRegistersAndSP(uptr index,
+ InternalMmapVector<uptr> *buffer,
+ uptr *sp) const override;
private:
InternalMmapVector<tid_t> thread_ids_;
@@ -485,6 +485,16 @@ typedef user_regs_struct regs_struct;
#else
#define REG_SP rsp
#endif
+#define ARCH_IOVEC_FOR_GETREGSET
+// Support ptrace extensions even when compiled without required kernel support
+#ifndef NT_X86_XSTATE
+#define NT_X86_XSTATE 0x202
+#endif
+#ifndef PTRACE_GETREGSET
+#define PTRACE_GETREGSET 0x4204
+#endif
+// Compiler may use FP registers to store pointers.
+static constexpr uptr kExtraRegs[] = {NT_X86_XSTATE, NT_FPREGSET};
#elif defined(__powerpc__) || defined(__powerpc64__)
typedef pt_regs regs_struct;
@@ -501,11 +511,21 @@ typedef struct user regs_struct;
#elif defined(__aarch64__)
typedef struct user_pt_regs regs_struct;
#define REG_SP sp
+static constexpr uptr kExtraRegs[] = {0};
+#define ARCH_IOVEC_FOR_GETREGSET
+
+#elif SANITIZER_RISCV64
+typedef struct user_regs_struct regs_struct;
+// sys/ucontext.h already defines REG_SP as 2. Undefine it first.
+#undef REG_SP
+#define REG_SP sp
+static constexpr uptr kExtraRegs[] = {0};
#define ARCH_IOVEC_FOR_GETREGSET
#elif defined(__s390__)
typedef _user_regs_struct regs_struct;
#define REG_SP gprs[15]
+static constexpr uptr kExtraRegs[] = {0};
#define ARCH_IOVEC_FOR_GETREGSET
#else
@@ -533,24 +553,58 @@ void SuspendedThreadsListLinux::Append(tid_t tid) {
}
PtraceRegistersStatus SuspendedThreadsListLinux::GetRegistersAndSP(
- uptr index, uptr *buffer, uptr *sp) const {
+ uptr index, InternalMmapVector<uptr> *buffer, uptr *sp) const {
pid_t tid = GetThreadID(index);
- regs_struct regs;
+ constexpr uptr uptr_sz = sizeof(uptr);
int pterrno;
#ifdef ARCH_IOVEC_FOR_GETREGSET
- struct iovec regset_io;
- regset_io.iov_base = &regs;
- regset_io.iov_len = sizeof(regs_struct);
- bool isErr = internal_iserror(internal_ptrace(PTRACE_GETREGSET, tid,
- (void*)NT_PRSTATUS, (void*)&regset_io),
- &pterrno);
+ auto append = [&](uptr regset) {
+ uptr size = buffer->size();
+ // NT_X86_XSTATE requires 64bit alignment.
+ uptr size_up = RoundUpTo(size, 8 / uptr_sz);
+ buffer->reserve(Max<uptr>(1024, size_up));
+ struct iovec regset_io;
+ for (;; buffer->resize(buffer->capacity() * 2)) {
+ buffer->resize(buffer->capacity());
+ uptr available_bytes = (buffer->size() - size_up) * uptr_sz;
+ regset_io.iov_base = buffer->data() + size_up;
+ regset_io.iov_len = available_bytes;
+ bool fail =
+ internal_iserror(internal_ptrace(PTRACE_GETREGSET, tid,
+ (void *)regset, (void *)&regset_io),
+ &pterrno);
+ if (fail) {
+ VReport(1, "Could not get regset %p from thread %d (errno %d).\n",
+ (void *)regset, tid, pterrno);
+ buffer->resize(size);
+ return false;
+ }
+
+ // Far enough from the buffer size, no need to resize and repeat.
+ if (regset_io.iov_len + 64 < available_bytes)
+ break;
+ }
+ buffer->resize(size_up + RoundUpTo(regset_io.iov_len, uptr_sz) / uptr_sz);
+ return true;
+ };
+
+ buffer->clear();
+ bool fail = !append(NT_PRSTATUS);
+ if (!fail) {
+ // Accept the first available and do not report errors.
+ for (uptr regs : kExtraRegs)
+ if (regs && append(regs))
+ break;
+ }
#else
- bool isErr = internal_iserror(internal_ptrace(PTRACE_GETREGS, tid, nullptr,
- &regs), &pterrno);
-#endif
- if (isErr) {
+ buffer->resize(RoundUpTo(sizeof(regs_struct), uptr_sz) / uptr_sz);
+ bool fail = internal_iserror(
+ internal_ptrace(PTRACE_GETREGS, tid, nullptr, buffer->data()), &pterrno);
+ if (fail)
VReport(1, "Could not get registers from thread %d (errno %d).\n", tid,
pterrno);
+#endif
+ if (fail) {
// ESRCH means that the given thread is not suspended or already dead.
// Therefore it's unsafe to inspect its data (e.g. walk through stack) and
// we should notify caller about this.
@@ -558,14 +612,10 @@ PtraceRegistersStatus SuspendedThreadsListLinux::GetRegistersAndSP(
: REGISTERS_UNAVAILABLE;
}
- *sp = regs.REG_SP;
- internal_memcpy(buffer, &regs, sizeof(regs));
+ *sp = reinterpret_cast<regs_struct *>(buffer->data())[0].REG_SP;
return REGISTERS_AVAILABLE;
}
-uptr SuspendedThreadsListLinux::RegisterCount() const {
- return sizeof(regs_struct) / sizeof(uptr);
-}
} // namespace __sanitizer
#endif // SANITIZER_LINUX && (defined(__x86_64__) || defined(__mips__)
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stoptheworld_mac.cpp b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stoptheworld_mac.cpp
index 6c577426ad56..5ec30803b7ad 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stoptheworld_mac.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stoptheworld_mac.cpp
@@ -27,19 +27,19 @@ typedef struct {
thread_t thread;
} SuspendedThreadInfo;
-class SuspendedThreadsListMac : public SuspendedThreadsList {
+class SuspendedThreadsListMac final : public SuspendedThreadsList {
public:
SuspendedThreadsListMac() : threads_(1024) {}
- tid_t GetThreadID(uptr index) const;
+ tid_t GetThreadID(uptr index) const override;
thread_t GetThread(uptr index) const;
- uptr ThreadCount() const;
+ uptr ThreadCount() const override;
bool ContainsThread(thread_t thread) const;
void Append(thread_t thread);
- PtraceRegistersStatus GetRegistersAndSP(uptr index, uptr *buffer,
- uptr *sp) const;
- uptr RegisterCount() const;
+ PtraceRegistersStatus GetRegistersAndSP(uptr index,
+ InternalMmapVector<uptr> *buffer,
+ uptr *sp) const override;
private:
InternalMmapVector<SuspendedThreadInfo> threads_;
@@ -142,7 +142,7 @@ void SuspendedThreadsListMac::Append(thread_t thread) {
}
PtraceRegistersStatus SuspendedThreadsListMac::GetRegistersAndSP(
- uptr index, uptr *buffer, uptr *sp) const {
+ uptr index, InternalMmapVector<uptr> *buffer, uptr *sp) const {
thread_t thread = GetThread(index);
regs_struct regs;
int err;
@@ -159,7 +159,8 @@ PtraceRegistersStatus SuspendedThreadsListMac::GetRegistersAndSP(
: REGISTERS_UNAVAILABLE;
}
- internal_memcpy(buffer, &regs, sizeof(regs));
+ buffer->resize(RoundUpTo(sizeof(regs), sizeof(uptr)) / sizeof(uptr));
+ internal_memcpy(buffer->data(), &regs, sizeof(regs));
#if defined(__aarch64__) && defined(arm_thread_state64_get_sp)
*sp = arm_thread_state64_get_sp(regs);
#else
@@ -173,9 +174,6 @@ PtraceRegistersStatus SuspendedThreadsListMac::GetRegistersAndSP(
return REGISTERS_AVAILABLE;
}
-uptr SuspendedThreadsListMac::RegisterCount() const {
- return MACHINE_THREAD_STATE_COUNT;
-}
} // namespace __sanitizer
#endif // SANITIZER_MAC && (defined(__x86_64__) || defined(__aarch64__)) ||
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stoptheworld_netbsd_libcdep.cpp b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stoptheworld_netbsd_libcdep.cpp
index 1ed21343254d..9c7cd64255e5 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stoptheworld_netbsd_libcdep.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stoptheworld_netbsd_libcdep.cpp
@@ -48,7 +48,7 @@
namespace __sanitizer {
-class SuspendedThreadsListNetBSD : public SuspendedThreadsList {
+class SuspendedThreadsListNetBSD final : public SuspendedThreadsList {
public:
SuspendedThreadsListNetBSD() { thread_ids_.reserve(1024); }
@@ -57,9 +57,9 @@ class SuspendedThreadsListNetBSD : public SuspendedThreadsList {
bool ContainsTid(tid_t thread_id) const;
void Append(tid_t tid);
- PtraceRegistersStatus GetRegistersAndSP(uptr index, uptr *buffer,
+ PtraceRegistersStatus GetRegistersAndSP(uptr index,
+ InternalMmapVector<uptr> *buffer,
uptr *sp) const;
- uptr RegisterCount() const;
private:
InternalMmapVector<tid_t> thread_ids_;
@@ -131,7 +131,7 @@ bool ThreadSuspender::SuspendAllThreads() {
pl.pl_lwpid = 0;
int val;
- while ((val = ptrace(op, pid_, (void *)&pl, sizeof(pl))) != -1 &&
+ while ((val = internal_ptrace(op, pid_, (void *)&pl, sizeof(pl))) != -1 &&
pl.pl_lwpid != 0) {
suspended_threads_list_.Append(pl.pl_lwpid);
VReport(2, "Appended thread %d in process %d.\n", pl.pl_lwpid, pid_);
@@ -335,7 +335,7 @@ void SuspendedThreadsListNetBSD::Append(tid_t tid) {
}
PtraceRegistersStatus SuspendedThreadsListNetBSD::GetRegistersAndSP(
- uptr index, uptr *buffer, uptr *sp) const {
+ uptr index, InternalMmapVector<uptr> *buffer, uptr *sp) const {
lwpid_t tid = GetThreadID(index);
pid_t ppid = internal_getppid();
struct reg regs;
@@ -351,14 +351,12 @@ PtraceRegistersStatus SuspendedThreadsListNetBSD::GetRegistersAndSP(
}
*sp = PTRACE_REG_SP(&regs);
- internal_memcpy(buffer, &regs, sizeof(regs));
+ buffer->resize(RoundUpTo(sizeof(regs), sizeof(uptr)) / sizeof(uptr));
+ internal_memcpy(buffer->data(), &regs, sizeof(regs));
return REGISTERS_AVAILABLE;
}
-uptr SuspendedThreadsListNetBSD::RegisterCount() const {
- return sizeof(struct reg) / sizeof(uptr);
-}
} // namespace __sanitizer
#endif
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_internal.h b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_internal.h
index e4c351e667b4..71de1758b3e9 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_internal.h
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_internal.h
@@ -74,6 +74,9 @@ class SymbolizerTool {
// Usually this is a safe place to call code that might need to use user
// memory allocators.
virtual void LateInitialize() {}
+
+ protected:
+ ~SymbolizerTool() {}
};
// SymbolizerProcess encapsulates communication between the tool and
@@ -85,6 +88,8 @@ class SymbolizerProcess {
const char *SendCommand(const char *command);
protected:
+ ~SymbolizerProcess() {}
+
/// The maximum number of arguments required to invoke a tool process.
static const unsigned kArgVMax = 6;
@@ -128,7 +133,7 @@ class LLVMSymbolizerProcess;
// This tool invokes llvm-symbolizer in a subprocess. It should be as portable
// as the llvm-symbolizer tool is.
-class LLVMSymbolizer : public SymbolizerTool {
+class LLVMSymbolizer final : public SymbolizerTool {
public:
explicit LLVMSymbolizer(const char *path, LowLevelAllocator *allocator);
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_libbacktrace.h b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_libbacktrace.h
index e2a0f71420f0..7b039b894b3b 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_libbacktrace.h
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_libbacktrace.h
@@ -28,7 +28,7 @@
namespace __sanitizer {
-class LibbacktraceSymbolizer : public SymbolizerTool {
+class LibbacktraceSymbolizer final : public SymbolizerTool {
public:
static LibbacktraceSymbolizer *get(LowLevelAllocator *alloc);
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_libcdep.cpp b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_libcdep.cpp
index 490c6fe89beb..710da4c1cecd 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_libcdep.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_libcdep.cpp
@@ -12,6 +12,7 @@
#include "sanitizer_allocator_internal.h"
#include "sanitizer_internal_defs.h"
+#include "sanitizer_platform.h"
#include "sanitizer_symbolizer_internal.h"
namespace __sanitizer {
@@ -236,7 +237,7 @@ const LoadedModule *Symbolizer::FindModuleForAddress(uptr address) {
// <file_name>:<line_number>:<column_number>
// ...
// <empty line>
-class LLVMSymbolizerProcess : public SymbolizerProcess {
+class LLVMSymbolizerProcess final : public SymbolizerProcess {
public:
explicit LLVMSymbolizerProcess(const char *path)
: SymbolizerProcess(path, /*use_posix_spawn=*/SANITIZER_MAC) {}
@@ -258,6 +259,8 @@ class LLVMSymbolizerProcess : public SymbolizerProcess {
const char* const kSymbolizerArch = "--default-arch=x86_64";
#elif defined(__i386__)
const char* const kSymbolizerArch = "--default-arch=i386";
+#elif SANITIZER_RISCV64
+ const char *const kSymbolizerArch = "--default-arch=riscv64";
#elif defined(__aarch64__)
const char* const kSymbolizerArch = "--default-arch=arm64";
#elif defined(__arm__)
@@ -275,8 +278,8 @@ class LLVMSymbolizerProcess : public SymbolizerProcess {
#endif
const char *const inline_flag = common_flags()->symbolize_inline_frames
- ? "--inlining=true"
- : "--inlining=false";
+ ? "--inlines"
+ : "--no-inlines";
int i = 0;
argv[i++] = path_to_binary;
argv[i++] = inline_flag;
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_mac.cpp b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_mac.cpp
index 29cbf62acd5c..5c25b28b5dc9 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_mac.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_mac.cpp
@@ -33,8 +33,15 @@ bool DlAddrSymbolizer::SymbolizePC(uptr addr, SymbolizedStack *stack) {
int result = dladdr((const void *)addr, &info);
if (!result) return false;
- CHECK(addr >= reinterpret_cast<uptr>(info.dli_saddr));
- stack->info.function_offset = addr - reinterpret_cast<uptr>(info.dli_saddr);
+ // Compute offset if possible. `dladdr()` doesn't always ensure that `addr >=
+ // sym_addr` so only compute the offset when this holds. Failure to find the
+ // function offset is not treated as a failure because it might still be
+ // possible to get the symbol name.
+ uptr sym_addr = reinterpret_cast<uptr>(info.dli_saddr);
+ if (addr >= sym_addr) {
+ stack->info.function_offset = addr - sym_addr;
+ }
+
const char *demangled = DemangleSwiftAndCXX(info.dli_sname);
if (!demangled) return false;
stack->info.function = internal_strdup(demangled);
@@ -58,7 +65,7 @@ bool DlAddrSymbolizer::SymbolizeData(uptr addr, DataInfo *datainfo) {
// kAsanInternalHeapMagic.
static char kAtosMachPortEnvEntry[] = K_ATOS_ENV_VAR "=000000000000000";
-class AtosSymbolizerProcess : public SymbolizerProcess {
+class AtosSymbolizerProcess final : public SymbolizerProcess {
public:
explicit AtosSymbolizerProcess(const char *path)
: SymbolizerProcess(path, /*use_posix_spawn*/ true) {
@@ -219,10 +226,10 @@ bool AtosSymbolizer::SymbolizePC(uptr addr, SymbolizedStack *stack) {
start_address = reinterpret_cast<uptr>(info.dli_saddr);
}
- // Only assig to `function_offset` if we were able to get the function's
- // start address.
- if (start_address != AddressInfo::kUnknown) {
- CHECK(addr >= start_address);
+ // Only assign to `function_offset` if we were able to get the function's
+ // start address and we got a sensible `start_address` (dladdr doesn't always
+ // ensure that `addr >= sym_addr`).
+ if (start_address != AddressInfo::kUnknown && addr >= start_address) {
stack->info.function_offset = addr - start_address;
}
return true;
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_mac.h b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_mac.h
index 8996131fc138..401d30fa5033 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_mac.h
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_mac.h
@@ -21,7 +21,7 @@
namespace __sanitizer {
-class DlAddrSymbolizer : public SymbolizerTool {
+class DlAddrSymbolizer final : public SymbolizerTool {
public:
bool SymbolizePC(uptr addr, SymbolizedStack *stack) override;
bool SymbolizeData(uptr addr, DataInfo *info) override;
@@ -29,7 +29,7 @@ class DlAddrSymbolizer : public SymbolizerTool {
class AtosSymbolizerProcess;
-class AtosSymbolizer : public SymbolizerTool {
+class AtosSymbolizer final : public SymbolizerTool {
public:
explicit AtosSymbolizer(const char *path, LowLevelAllocator *allocator);
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_markup.cpp b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_markup.cpp
index 2963af953609..30cba08ed539 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_markup.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_markup.cpp
@@ -83,11 +83,14 @@ void RenderData(InternalScopedString *buffer, const char *format,
buffer->append(kFormatData, DI->start);
}
+bool RenderNeedsSymbolization(const char *format) { return false; }
+
// We don't support the stack_trace_format flag at all.
void RenderFrame(InternalScopedString *buffer, const char *format, int frame_no,
- const AddressInfo &info, bool vs_style,
+ uptr address, const AddressInfo *info, bool vs_style,
const char *strip_path_prefix, const char *strip_func_prefix) {
- buffer->append(kFormatFrame, frame_no, info.address);
+ CHECK(!RenderNeedsSymbolization(format));
+ buffer->append(kFormatFrame, frame_no, address);
}
Symbolizer *Symbolizer::PlatformInit() {
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_posix_libcdep.cpp b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_posix_libcdep.cpp
index 3c379a848025..4dd5cc3ad7cb 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_posix_libcdep.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_posix_libcdep.cpp
@@ -201,7 +201,7 @@ bool SymbolizerProcess::StartSymbolizerSubprocess() {
return true;
}
-class Addr2LineProcess : public SymbolizerProcess {
+class Addr2LineProcess final : public SymbolizerProcess {
public:
Addr2LineProcess(const char *path, const char *module_name)
: SymbolizerProcess(path), module_name_(internal_strdup(module_name)) {}
@@ -261,7 +261,7 @@ bool Addr2LineProcess::ReachedEndOfOutput(const char *buffer,
output_terminator_, kTerminatorLen);
}
-class Addr2LinePool : public SymbolizerTool {
+class Addr2LinePool final : public SymbolizerTool {
public:
explicit Addr2LinePool(const char *addr2line_path,
LowLevelAllocator *allocator)
@@ -328,7 +328,7 @@ int __sanitizer_symbolize_demangle(const char *Name, char *Buffer,
int MaxLength);
} // extern "C"
-class InternalSymbolizer : public SymbolizerTool {
+class InternalSymbolizer final : public SymbolizerTool {
public:
static InternalSymbolizer *get(LowLevelAllocator *alloc) {
if (__sanitizer_symbolize_code != 0 &&
@@ -387,7 +387,7 @@ class InternalSymbolizer : public SymbolizerTool {
};
#else // SANITIZER_SUPPORTS_WEAK_HOOKS
-class InternalSymbolizer : public SymbolizerTool {
+class InternalSymbolizer final : public SymbolizerTool {
public:
static InternalSymbolizer *get(LowLevelAllocator *alloc) { return 0; }
};
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_report.cpp b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_report.cpp
index c26724ceb7a7..c99a6ceaa562 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_report.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_report.cpp
@@ -33,7 +33,8 @@ void ReportErrorSummary(const char *error_type, const AddressInfo &info,
if (!common_flags()->print_summary) return;
InternalScopedString buff(kMaxSummaryLength);
buff.append("%s ", error_type);
- RenderFrame(&buff, "%L %F", 0, info, common_flags()->symbolize_vs_style,
+ RenderFrame(&buff, "%L %F", 0, info.address, &info,
+ common_flags()->symbolize_vs_style,
common_flags()->strip_path_prefix);
ReportErrorSummary(buff.data(), alt_tool_name);
}
@@ -47,14 +48,14 @@ bool ReportFile::SupportsColors() {
return SupportsColoredOutput(fd);
}
-static INLINE bool ReportSupportsColors() {
+static inline bool ReportSupportsColors() {
return report_file.SupportsColors();
}
#else // SANITIZER_FUCHSIA
// Fuchsia's logs always go through post-processing that handles colorization.
-static INLINE bool ReportSupportsColors() { return true; }
+static inline bool ReportSupportsColors() { return true; }
#endif // !SANITIZER_FUCHSIA
@@ -210,7 +211,7 @@ static void ReportDeadlySignalImpl(const SignalContext &sig, u32 tid,
Report("The signal is caused by a %s memory access.\n", access_type);
if (!sig.is_true_faulting_addr)
Report("Hint: this fault was caused by a dereference of a high value "
- "address (see register values below). Dissassemble the provided "
+ "address (see register values below). Disassemble the provided "
"pc to learn which register was used.\n");
else if (sig.addr < GetPageSizeCached())
Report("Hint: address points to the zero page.\n");
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_win.cpp b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_win.cpp
index 373437e7ee2a..7db7d3b0eb9d 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_win.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_win.cpp
@@ -33,7 +33,7 @@ decltype(::UnDecorateSymbolName) *UnDecorateSymbolName;
namespace {
-class WinSymbolizerTool : public SymbolizerTool {
+class WinSymbolizerTool final : public SymbolizerTool {
public:
// The constructor is provided to avoid synthesized memsets.
WinSymbolizerTool() {}
@@ -133,10 +133,14 @@ void InitializeDbgHelpIfNeeded() {
}
}
+#ifdef __clang__
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wframe-larger-than="
+#endif
bool WinSymbolizerTool::SymbolizePC(uptr addr, SymbolizedStack *frame) {
InitializeDbgHelpIfNeeded();
- // See http://msdn.microsoft.com/en-us/library/ms680578(VS.85).aspx
+ // See https://docs.microsoft.com/en-us/windows/win32/debug/retrieving-symbol-information-by-address
char buffer[sizeof(SYMBOL_INFO) + MAX_SYM_NAME * sizeof(CHAR)];
PSYMBOL_INFO symbol = (PSYMBOL_INFO)buffer;
symbol->SizeOfStruct = sizeof(SYMBOL_INFO);
@@ -162,6 +166,9 @@ bool WinSymbolizerTool::SymbolizePC(uptr addr, SymbolizedStack *frame) {
// Otherwise, try llvm-symbolizer.
return got_fileline;
}
+#ifdef __clang__
+#pragma clang diagnostic pop
+#endif
const char *WinSymbolizerTool::Demangle(const char *name) {
CHECK(is_dbghelp_initialized);
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_syscall_generic.inc b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_syscall_generic.inc
index a43ce3efab12..8829985b5b07 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_syscall_generic.inc
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_syscall_generic.inc
@@ -13,7 +13,7 @@
// NetBSD uses libc calls directly
#if !SANITIZER_NETBSD
-#if SANITIZER_FREEBSD || SANITIZER_MAC || SANITIZER_OPENBSD || SANITIZER_SOLARIS
+#if SANITIZER_FREEBSD || SANITIZER_MAC || SANITIZER_SOLARIS
# define SYSCALL(name) SYS_ ## name
#else
# define SYSCALL(name) __NR_ ## name
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_syscall_linux_riscv64.inc b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_syscall_linux_riscv64.inc
new file mode 100644
index 000000000000..89c12602057c
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_syscall_linux_riscv64.inc
@@ -0,0 +1,174 @@
+//===-- sanitizer_syscall_linux_riscv64.inc ---------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Implementations of internal_syscall and internal_iserror for Linux/riscv64.
+//
+//===----------------------------------------------------------------------===//
+
+// About local register variables:
+// https://gcc.gnu.org/onlinedocs/gcc/Local-Register-Variables.html#Local-Register-Variables
+//
+// Kernel ABI...
+// To my surprise I haven't found much information regarding it.
+// Kernel source and internet browsing shows that:
+// syscall number is passed in a7
+// (http://man7.org/linux/man-pages/man2/syscall.2.html) results are return in
+// a0 and a1 (http://man7.org/linux/man-pages/man2/syscall.2.html) arguments
+// are passed in: a0-a7 (see below)
+//
+// Regarding the arguments. The only "documentation" I could find is
+// this comment (!!!) by Bruce Hold on google forums (!!!):
+// https://groups.google.com/a/groups.riscv.org/forum/#!topic/sw-dev/exbrzM3GZDQ
+// Confirmed by inspecting glibc sources.
+// Great way to document things.
+#define SYSCALL(name) __NR_##name
+
+#define INTERNAL_SYSCALL_CLOBBERS "memory"
+
+static uptr __internal_syscall(u64 nr) {
+ register u64 a7 asm("a7") = nr;
+ register u64 a0 asm("a0");
+ __asm__ volatile("ecall\n\t"
+ : "=r"(a0)
+ : "r"(a7)
+ : INTERNAL_SYSCALL_CLOBBERS);
+ return a0;
+}
+#define __internal_syscall0(n) (__internal_syscall)(n)
+
+static uptr __internal_syscall(u64 nr, u64 arg1) {
+ register u64 a7 asm("a7") = nr;
+ register u64 a0 asm("a0") = arg1;
+ __asm__ volatile("ecall\n\t"
+ : "+r"(a0)
+ : "r"(a7)
+ : INTERNAL_SYSCALL_CLOBBERS);
+ return a0;
+}
+#define __internal_syscall1(n, a1) (__internal_syscall)(n, (u64)(a1))
+
+static uptr __internal_syscall(u64 nr, u64 arg1, long arg2) {
+ register u64 a7 asm("a7") = nr;
+ register u64 a0 asm("a0") = arg1;
+ register u64 a1 asm("a1") = arg2;
+ __asm__ volatile("ecall\n\t"
+ : "+r"(a0)
+ : "r"(a7), "r"(a1)
+ : INTERNAL_SYSCALL_CLOBBERS);
+ return a0;
+}
+#define __internal_syscall2(n, a1, a2) \
+ (__internal_syscall)(n, (u64)(a1), (long)(a2))
+
+static uptr __internal_syscall(u64 nr, u64 arg1, long arg2, long arg3) {
+ register u64 a7 asm("a7") = nr;
+ register u64 a0 asm("a0") = arg1;
+ register u64 a1 asm("a1") = arg2;
+ register u64 a2 asm("a2") = arg3;
+ __asm__ volatile("ecall\n\t"
+ : "+r"(a0)
+ : "r"(a7), "r"(a1), "r"(a2)
+ : INTERNAL_SYSCALL_CLOBBERS);
+ return a0;
+}
+#define __internal_syscall3(n, a1, a2, a3) \
+ (__internal_syscall)(n, (u64)(a1), (long)(a2), (long)(a3))
+
+static uptr __internal_syscall(u64 nr, u64 arg1, long arg2, long arg3,
+ u64 arg4) {
+ register u64 a7 asm("a7") = nr;
+ register u64 a0 asm("a0") = arg1;
+ register u64 a1 asm("a1") = arg2;
+ register u64 a2 asm("a2") = arg3;
+ register u64 a3 asm("a3") = arg4;
+ __asm__ volatile("ecall\n\t"
+ : "+r"(a0)
+ : "r"(a7), "r"(a1), "r"(a2), "r"(a3)
+ : INTERNAL_SYSCALL_CLOBBERS);
+ return a0;
+}
+#define __internal_syscall4(n, a1, a2, a3, a4) \
+ (__internal_syscall)(n, (u64)(a1), (long)(a2), (long)(a3), (long)(a4))
+
+static uptr __internal_syscall(u64 nr, u64 arg1, long arg2, long arg3, u64 arg4,
+ long arg5) {
+ register u64 a7 asm("a7") = nr;
+ register u64 a0 asm("a0") = arg1;
+ register u64 a1 asm("a1") = arg2;
+ register u64 a2 asm("a2") = arg3;
+ register u64 a3 asm("a3") = arg4;
+ register u64 a4 asm("a4") = arg5;
+ __asm__ volatile("ecall\n\t"
+ : "+r"(a0)
+ : "r"(a7), "r"(a1), "r"(a2), "r"(a3), "r"(a4)
+ : INTERNAL_SYSCALL_CLOBBERS);
+ return a0;
+}
+#define __internal_syscall5(n, a1, a2, a3, a4, a5) \
+ (__internal_syscall)(n, (u64)(a1), (long)(a2), (long)(a3), (long)(a4), \
+ (u64)(a5))
+
+static uptr __internal_syscall(u64 nr, u64 arg1, long arg2, long arg3, u64 arg4,
+ long arg5, long arg6) {
+ register u64 a7 asm("a7") = nr;
+ register u64 a0 asm("a0") = arg1;
+ register u64 a1 asm("a1") = arg2;
+ register u64 a2 asm("a2") = arg3;
+ register u64 a3 asm("a3") = arg4;
+ register u64 a4 asm("a4") = arg5;
+ register u64 a5 asm("a5") = arg6;
+ __asm__ volatile("ecall\n\t"
+ : "+r"(a0)
+ : "r"(a7), "r"(a1), "r"(a2), "r"(a3), "r"(a4), "r"(a5)
+ : INTERNAL_SYSCALL_CLOBBERS);
+ return a0;
+}
+#define __internal_syscall6(n, a1, a2, a3, a4, a5, a6) \
+ (__internal_syscall)(n, (u64)(a1), (long)(a2), (long)(a3), (long)(a4), \
+ (u64)(a5), (long)(a6))
+
+static uptr __internal_syscall(u64 nr, u64 arg1, long arg2, long arg3, u64 arg4,
+ long arg5, long arg6, long arg7) {
+ register u64 a7 asm("a7") = nr;
+ register u64 a0 asm("a0") = arg1;
+ register u64 a1 asm("a1") = arg2;
+ register u64 a2 asm("a2") = arg3;
+ register u64 a3 asm("a3") = arg4;
+ register u64 a4 asm("a4") = arg5;
+ register u64 a5 asm("a5") = arg6;
+ register u64 a6 asm("a6") = arg7;
+ __asm__ volatile("ecall\n\t"
+ : "+r"(a0)
+ : "r"(a7), "r"(a1), "r"(a2), "r"(a3), "r"(a4), "r"(a5),
+ "r"(a6)
+ : INTERNAL_SYSCALL_CLOBBERS);
+ return a0;
+}
+#define __internal_syscall7(n, a1, a2, a3, a4, a5, a6, a7) \
+ (__internal_syscall)(n, (u64)(a1), (long)(a2), (long)(a3), (long)(a4), \
+ (u64)(a5), (long)(a6), (long)(a7))
+
+#define __SYSCALL_NARGS_X(a1, a2, a3, a4, a5, a6, a7, a8, n, ...) n
+#define __SYSCALL_NARGS(...) \
+ __SYSCALL_NARGS_X(__VA_ARGS__, 7, 6, 5, 4, 3, 2, 1, 0, )
+#define __SYSCALL_CONCAT_X(a, b) a##b
+#define __SYSCALL_CONCAT(a, b) __SYSCALL_CONCAT_X(a, b)
+#define __SYSCALL_DISP(b, ...) \
+ __SYSCALL_CONCAT(b, __SYSCALL_NARGS(__VA_ARGS__))(__VA_ARGS__)
+
+#define internal_syscall(...) __SYSCALL_DISP(__internal_syscall, __VA_ARGS__)
+
+// Helper function used to avoid clobbering of errno.
+bool internal_iserror(uptr retval, int *rverrno) {
+ if (retval >= (uptr)-4095) {
+ if (rverrno)
+ *rverrno = -retval;
+ return true;
+ }
+ return false;
+}
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_syscalls_netbsd.inc b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_syscalls_netbsd.inc
index 02b7e11b1677..c4a9d99fe2f0 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_syscalls_netbsd.inc
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_syscalls_netbsd.inc
@@ -42,8 +42,8 @@
// DO NOT EDIT! THIS FILE HAS BEEN GENERATED!
//
// Generated with: generate_netbsd_syscalls.awk
-// Generated date: 2019-12-24
-// Generated from: syscalls.master,v 1.296 2019/09/22 22:59:39 christos Exp
+// Generated date: 2020-09-10
+// Generated from: syscalls.master,v 1.306 2020/08/14 00:53:16 riastradh Exp
//
//===----------------------------------------------------------------------===//
@@ -872,7 +872,13 @@ PRE_SYSCALL(dup2)(long long from_, long long to_) { /* Nothing to do */ }
POST_SYSCALL(dup2)(long long res, long long from_, long long to_) {
/* Nothing to do */
}
-/* syscall 91 has been skipped */
+PRE_SYSCALL(getrandom)(void *buf_, long long buflen_, long long flags_) {
+ /* TODO */
+}
+POST_SYSCALL(getrandom)
+(long long res, void *buf_, long long buflen_, long long flags_) {
+ /* TODO */
+}
PRE_SYSCALL(fcntl)(long long fd_, long long cmd_, void *arg_) {
/* Nothing to do */
}
@@ -1332,9 +1338,29 @@ PRE_SYSCALL(compat_09_ouname)(void *name_) { /* TODO */ }
POST_SYSCALL(compat_09_ouname)(long long res, void *name_) { /* TODO */ }
PRE_SYSCALL(sysarch)(long long op_, void *parms_) { /* TODO */ }
POST_SYSCALL(sysarch)(long long res, long long op_, void *parms_) { /* TODO */ }
-/* syscall 166 has been skipped */
-/* syscall 167 has been skipped */
-/* syscall 168 has been skipped */
+PRE_SYSCALL(__futex)
+(void *uaddr_, long long op_, long long val_, void *timeout_, void *uaddr2_,
+ long long val2_, long long val3_) {
+ /* TODO */
+}
+POST_SYSCALL(__futex)
+(long long res, void *uaddr_, long long op_, long long val_, void *timeout_,
+ void *uaddr2_, long long val2_, long long val3_) {
+ /* TODO */
+}
+PRE_SYSCALL(__futex_set_robust_list)(void *head_, long long len_) { /* TODO */ }
+POST_SYSCALL(__futex_set_robust_list)
+(long long res, void *head_, long long len_) {
+ /* TODO */
+}
+PRE_SYSCALL(__futex_get_robust_list)
+(long long lwpid_, void **headp_, void *lenp_) {
+ /* TODO */
+}
+POST_SYSCALL(__futex_get_robust_list)
+(long long res, long long lwpid_, void **headp_, void *lenp_) {
+ /* TODO */
+}
#if !defined(_LP64)
PRE_SYSCALL(compat_10_osemsys)
(long long which_, long long a2_, long long a3_, long long a4_, long long a5_) {
@@ -3824,6 +3850,87 @@ PRE_SYSCALL(__fhstatvfs190)
}
POST_SYSCALL(__fhstatvfs190)
(long long res, void *fhp_, long long fh_size_, void *buf_, long long flags_) {}
+PRE_SYSCALL(__acl_get_link)(void *path_, long long type_, void *aclp_) {
+ /* TODO */
+}
+POST_SYSCALL(__acl_get_link)
+(long long res, void *path_, long long type_, void *aclp_) {
+ /* TODO */
+}
+PRE_SYSCALL(__acl_set_link)(void *path_, long long type_, void *aclp_) {
+ /* TODO */
+}
+POST_SYSCALL(__acl_set_link)
+(long long res, void *path_, long long type_, void *aclp_) {
+ /* TODO */
+}
+PRE_SYSCALL(__acl_delete_link)(void *path_, long long type_) { /* TODO */ }
+POST_SYSCALL(__acl_delete_link)(long long res, void *path_, long long type_) {
+ /* TODO */
+}
+PRE_SYSCALL(__acl_aclcheck_link)(void *path_, long long type_, void *aclp_) {
+ /* TODO */
+}
+POST_SYSCALL(__acl_aclcheck_link)
+(long long res, void *path_, long long type_, void *aclp_) {
+ /* TODO */
+}
+PRE_SYSCALL(__acl_get_file)(void *path_, long long type_, void *aclp_) {
+ /* TODO */
+}
+POST_SYSCALL(__acl_get_file)
+(long long res, void *path_, long long type_, void *aclp_) {
+ /* TODO */
+}
+PRE_SYSCALL(__acl_set_file)(void *path_, long long type_, void *aclp_) {
+ /* TODO */
+}
+POST_SYSCALL(__acl_set_file)
+(long long res, void *path_, long long type_, void *aclp_) {
+ /* TODO */
+}
+PRE_SYSCALL(__acl_get_fd)(long long filedes_, long long type_, void *aclp_) {
+ /* TODO */
+}
+POST_SYSCALL(__acl_get_fd)
+(long long res, long long filedes_, long long type_, void *aclp_) {
+ /* TODO */
+}
+PRE_SYSCALL(__acl_set_fd)(long long filedes_, long long type_, void *aclp_) {
+ /* TODO */
+}
+POST_SYSCALL(__acl_set_fd)
+(long long res, long long filedes_, long long type_, void *aclp_) {
+ /* TODO */
+}
+PRE_SYSCALL(__acl_delete_file)(void *path_, long long type_) { /* TODO */ }
+POST_SYSCALL(__acl_delete_file)(long long res, void *path_, long long type_) {
+ /* TODO */
+}
+PRE_SYSCALL(__acl_delete_fd)(long long filedes_, long long type_) { /* TODO */ }
+POST_SYSCALL(__acl_delete_fd)
+(long long res, long long filedes_, long long type_) {
+ /* TODO */
+}
+PRE_SYSCALL(__acl_aclcheck_file)(void *path_, long long type_, void *aclp_) {
+ /* TODO */
+}
+POST_SYSCALL(__acl_aclcheck_file)
+(long long res, void *path_, long long type_, void *aclp_) {
+ /* TODO */
+}
+PRE_SYSCALL(__acl_aclcheck_fd)
+(long long filedes_, long long type_, void *aclp_) {
+ /* TODO */
+}
+POST_SYSCALL(__acl_aclcheck_fd)
+(long long res, long long filedes_, long long type_, void *aclp_) {
+ /* TODO */
+}
+PRE_SYSCALL(lpathconf)(void *path_, long long name_) { /* TODO */ }
+POST_SYSCALL(lpathconf)(long long res, void *path_, long long name_) {
+ /* TODO */
+}
#undef SYS_MAXSYSARGS
} // extern "C"
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_thread_registry.h b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_thread_registry.h
index 493aa988f7e6..85c522a31cac 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_thread_registry.h
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_thread_registry.h
@@ -39,8 +39,6 @@ enum class ThreadType {
class ThreadContextBase {
public:
explicit ThreadContextBase(u32 tid);
- ~ThreadContextBase(); // Should never be called.
-
const u32 tid; // Thread ID. Main thread should have tid = 0.
u64 unique_id; // Unique thread ID.
u32 reuse_count; // Number of times this tid was reused.
@@ -80,6 +78,9 @@ class ThreadContextBase {
virtual void OnCreated(void *arg) {}
virtual void OnReset() {}
virtual void OnDetached(void *arg) {}
+
+ protected:
+ ~ThreadContextBase();
};
typedef ThreadContextBase* (*ThreadContextFactory)(u32 tid);
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_tls_get_addr.cpp b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_tls_get_addr.cpp
index 9ca898a306a8..1f664b6cf5b8 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_tls_get_addr.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_tls_get_addr.cpp
@@ -12,6 +12,7 @@
#include "sanitizer_tls_get_addr.h"
+#include "sanitizer_atomic.h"
#include "sanitizer_flags.h"
#include "sanitizer_platform_interceptors.h"
@@ -42,46 +43,66 @@ static atomic_uintptr_t number_of_live_dtls;
static const uptr kDestroyedThread = -1;
-static inline void DTLS_Deallocate(DTLS::DTV *dtv, uptr size) {
- if (!size) return;
- VReport(2, "__tls_get_addr: DTLS_Deallocate %p %zd\n", dtv, size);
- UnmapOrDie(dtv, size * sizeof(DTLS::DTV));
+static void DTLS_Deallocate(DTLS::DTVBlock *block) {
+ VReport(2, "__tls_get_addr: DTLS_Deallocate %p %zd\n", block);
+ UnmapOrDie(block, sizeof(DTLS::DTVBlock));
atomic_fetch_sub(&number_of_live_dtls, 1, memory_order_relaxed);
}
-static inline void DTLS_Resize(uptr new_size) {
- if (dtls.dtv_size >= new_size) return;
- new_size = RoundUpToPowerOfTwo(new_size);
- new_size = Max(new_size, 4096UL / sizeof(DTLS::DTV));
- DTLS::DTV *new_dtv =
- (DTLS::DTV *)MmapOrDie(new_size * sizeof(DTLS::DTV), "DTLS_Resize");
+static DTLS::DTVBlock *DTLS_NextBlock(atomic_uintptr_t *cur) {
+ uptr v = atomic_load(cur, memory_order_acquire);
+ if (v == kDestroyedThread)
+ return nullptr;
+ DTLS::DTVBlock *next = (DTLS::DTVBlock *)v;
+ if (next)
+ return next;
+ DTLS::DTVBlock *new_dtv =
+ (DTLS::DTVBlock *)MmapOrDie(sizeof(DTLS::DTVBlock), "DTLS_NextBlock");
+ uptr prev = 0;
+ if (!atomic_compare_exchange_strong(cur, &prev, (uptr)new_dtv,
+ memory_order_seq_cst)) {
+ UnmapOrDie(new_dtv, sizeof(DTLS::DTVBlock));
+ return (DTLS::DTVBlock *)prev;
+ }
uptr num_live_dtls =
atomic_fetch_add(&number_of_live_dtls, 1, memory_order_relaxed);
- VReport(2, "__tls_get_addr: DTLS_Resize %p %zd\n", &dtls, num_live_dtls);
- CHECK_LT(num_live_dtls, 1 << 20);
- uptr old_dtv_size = dtls.dtv_size;
- DTLS::DTV *old_dtv = dtls.dtv;
- if (old_dtv_size)
- internal_memcpy(new_dtv, dtls.dtv, dtls.dtv_size * sizeof(DTLS::DTV));
- dtls.dtv = new_dtv;
- dtls.dtv_size = new_size;
- if (old_dtv_size)
- DTLS_Deallocate(old_dtv, old_dtv_size);
+ VReport(2, "__tls_get_addr: DTLS_NextBlock %p %zd\n", &dtls, num_live_dtls);
+ return new_dtv;
+}
+
+static DTLS::DTV *DTLS_Find(uptr id) {
+ VReport(2, "__tls_get_addr: DTLS_Find %p %zd\n", &dtls, id);
+ static constexpr uptr kPerBlock = ARRAY_SIZE(DTLS::DTVBlock::dtvs);
+ DTLS::DTVBlock *cur = DTLS_NextBlock(&dtls.dtv_block);
+ if (!cur)
+ return nullptr;
+ for (; id >= kPerBlock; id -= kPerBlock) cur = DTLS_NextBlock(&cur->next);
+ return cur->dtvs + id;
}
void DTLS_Destroy() {
if (!common_flags()->intercept_tls_get_addr) return;
- VReport(2, "__tls_get_addr: DTLS_Destroy %p %zd\n", &dtls, dtls.dtv_size);
- uptr s = dtls.dtv_size;
- dtls.dtv_size = kDestroyedThread; // Do this before unmap for AS-safety.
- DTLS_Deallocate(dtls.dtv, s);
+ VReport(2, "__tls_get_addr: DTLS_Destroy %p\n", &dtls);
+ DTLS::DTVBlock *block = (DTLS::DTVBlock *)atomic_exchange(
+ &dtls.dtv_block, kDestroyedThread, memory_order_release);
+ while (block) {
+ DTLS::DTVBlock *next =
+ (DTLS::DTVBlock *)atomic_load(&block->next, memory_order_acquire);
+ DTLS_Deallocate(block);
+ block = next;
+ }
}
#if defined(__powerpc64__) || defined(__mips__)
// This is glibc's TLS_DTV_OFFSET:
// "Dynamic thread vector pointers point 0x8000 past the start of each
-// TLS block."
+// TLS block." (sysdeps/<arch>/dl-tls.h)
static const uptr kDtvOffset = 0x8000;
+#elif defined(__riscv)
+// This is glibc's TLS_DTV_OFFSET:
+// "Dynamic thread vector pointers point 0x800 past the start of each
+// TLS block." (sysdeps/riscv/dl-tls.h)
+static const uptr kDtvOffset = 0x800;
#else
static const uptr kDtvOffset = 0;
#endif
@@ -91,9 +112,9 @@ DTLS::DTV *DTLS_on_tls_get_addr(void *arg_void, void *res,
if (!common_flags()->intercept_tls_get_addr) return 0;
TlsGetAddrParam *arg = reinterpret_cast<TlsGetAddrParam *>(arg_void);
uptr dso_id = arg->dso_id;
- if (dtls.dtv_size == kDestroyedThread) return 0;
- DTLS_Resize(dso_id + 1);
- if (dtls.dtv[dso_id].beg) return 0;
+ DTLS::DTV *dtv = DTLS_Find(dso_id);
+ if (!dtv || dtv->beg)
+ return 0;
uptr tls_size = 0;
uptr tls_beg = reinterpret_cast<uptr>(res) - arg->offset - kDtvOffset;
VReport(2, "__tls_get_addr: %p {%p,%p} => %p; tls_beg: %p; sp: %p "
@@ -121,9 +142,9 @@ DTLS::DTV *DTLS_on_tls_get_addr(void *arg_void, void *res,
// This may happen inside the DTOR of main thread, so just ignore it.
tls_size = 0;
}
- dtls.dtv[dso_id].beg = tls_beg;
- dtls.dtv[dso_id].size = tls_size;
- return dtls.dtv + dso_id;
+ dtv->beg = tls_beg;
+ dtv->size = tls_size;
+ return dtv;
}
void DTLS_on_libc_memalign(void *ptr, uptr size) {
@@ -136,7 +157,8 @@ void DTLS_on_libc_memalign(void *ptr, uptr size) {
DTLS *DTLS_Get() { return &dtls; }
bool DTLSInDestruction(DTLS *dtls) {
- return dtls->dtv_size == kDestroyedThread;
+ return atomic_load(&dtls->dtv_block, memory_order_relaxed) ==
+ kDestroyedThread;
}
#else
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_tls_get_addr.h b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_tls_get_addr.h
index c7cd5a8bffcf..a599c0bbc75c 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_tls_get_addr.h
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_tls_get_addr.h
@@ -28,6 +28,7 @@
#ifndef SANITIZER_TLS_GET_ADDR_H
#define SANITIZER_TLS_GET_ADDR_H
+#include "sanitizer_atomic.h"
#include "sanitizer_common.h"
namespace __sanitizer {
@@ -38,15 +39,31 @@ struct DTLS {
struct DTV {
uptr beg, size;
};
+ struct DTVBlock {
+ atomic_uintptr_t next;
+ DTV dtvs[(4096UL - sizeof(next)) / sizeof(DTLS::DTV)];
+ };
+
+ static_assert(sizeof(DTVBlock) <= 4096UL, "Unexpected block size");
- uptr dtv_size;
- DTV *dtv; // dtv_size elements, allocated by MmapOrDie.
+ atomic_uintptr_t dtv_block;
// Auxiliary fields, don't access them outside sanitizer_tls_get_addr.cpp
uptr last_memalign_size;
uptr last_memalign_ptr;
};
+template <typename Fn>
+void ForEachDVT(DTLS *dtls, const Fn &fn) {
+ DTLS::DTVBlock *block =
+ (DTLS::DTVBlock *)atomic_load(&dtls->dtv_block, memory_order_acquire);
+ while (block) {
+ int id = 0;
+ for (auto &d : block->dtvs) fn(d, id++);
+ block = (DTLS::DTVBlock *)atomic_load(&block->next, memory_order_acquire);
+ }
+}
+
// Returns pointer and size of a linker-allocated TLS block.
// Each block is returned exactly once.
DTLS::DTV *DTLS_on_tls_get_addr(void *arg, void *res, uptr static_tls_begin,
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_unwind_win.cpp b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_unwind_win.cpp
index 8e06940685dc..7e01c81d0422 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_unwind_win.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_unwind_win.cpp
@@ -37,8 +37,16 @@ void BufferedStackTrace::UnwindSlow(uptr pc, u32 max_depth) {
// Skip the RTL frames by searching for the PC in the stacktrace.
uptr pc_location = LocatePcInTrace(pc);
PopStackFrames(pc_location);
+
+ // Replace the first frame with the PC because the frame in the
+ // stacktrace might be incorrect.
+ trace_buffer[0] = pc;
}
+#ifdef __clang__
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wframe-larger-than="
+#endif
void BufferedStackTrace::UnwindSlow(uptr pc, void *context, u32 max_depth) {
CHECK(context);
CHECK_GE(max_depth, 2);
@@ -70,6 +78,9 @@ void BufferedStackTrace::UnwindSlow(uptr pc, void *context, u32 max_depth) {
trace_buffer[size++] = (uptr)stack_frame.AddrPC.Offset;
}
}
+#ifdef __clang__
+#pragma clang diagnostic pop
+#endif
#endif // #if !SANITIZER_GO
#endif // SANITIZER_WINDOWS
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_win.cpp b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_win.cpp
index fca15beb6161..63c90785f270 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_win.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_win.cpp
@@ -348,6 +348,22 @@ bool DontDumpShadowMemory(uptr addr, uptr length) {
return true;
}
+uptr MapDynamicShadow(uptr shadow_size_bytes, uptr shadow_scale,
+ uptr min_shadow_base_alignment,
+ UNUSED uptr &high_mem_end) {
+ const uptr granularity = GetMmapGranularity();
+ const uptr alignment =
+ Max<uptr>(granularity << shadow_scale, 1ULL << min_shadow_base_alignment);
+ const uptr left_padding =
+ Max<uptr>(granularity, 1ULL << min_shadow_base_alignment);
+ uptr space_size = shadow_size_bytes + left_padding;
+ uptr shadow_start = FindAvailableMemoryRange(space_size, alignment,
+ granularity, nullptr, nullptr);
+ CHECK_NE((uptr)0, shadow_start);
+ CHECK(IsAligned(shadow_start, alignment));
+ return shadow_start;
+}
+
uptr FindAvailableMemoryRange(uptr size, uptr alignment, uptr left_padding,
uptr *largest_gap_found,
uptr *max_occupied_addr) {
@@ -475,8 +491,6 @@ void DumpProcessMap() {
}
#endif
-void PrintModuleMap() { }
-
void DisableCoreDumperIfNecessary() {
// Do nothing.
}
@@ -597,6 +611,10 @@ static uptr GetPreferredBase(const char *modname) {
return (uptr)pe_header->ImageBase;
}
+#ifdef __clang__
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wframe-larger-than="
+#endif
void ListOfModules::init() {
clearOrInit();
HANDLE cur_process = GetCurrentProcess();
@@ -658,6 +676,9 @@ void ListOfModules::init() {
}
UnmapOrDie(hmodules, modules_buffer_size);
}
+#ifdef __clang__
+#pragma clang diagnostic pop
+#endif
void ListOfModules::fallbackInit() { clear(); }
@@ -942,22 +963,27 @@ void SignalContext::InitPcSpBp() {
uptr SignalContext::GetAddress() const {
EXCEPTION_RECORD *exception_record = (EXCEPTION_RECORD *)siginfo;
- return exception_record->ExceptionInformation[1];
+ if (exception_record->ExceptionCode == EXCEPTION_ACCESS_VIOLATION)
+ return exception_record->ExceptionInformation[1];
+ return (uptr)exception_record->ExceptionAddress;
}
bool SignalContext::IsMemoryAccess() const {
- return GetWriteFlag() != SignalContext::UNKNOWN;
+ return ((EXCEPTION_RECORD *)siginfo)->ExceptionCode ==
+ EXCEPTION_ACCESS_VIOLATION;
}
-bool SignalContext::IsTrueFaultingAddress() const {
- // FIXME: Provide real implementation for this. See Linux and Mac variants.
- return IsMemoryAccess();
-}
+bool SignalContext::IsTrueFaultingAddress() const { return true; }
SignalContext::WriteFlag SignalContext::GetWriteFlag() const {
EXCEPTION_RECORD *exception_record = (EXCEPTION_RECORD *)siginfo;
+
+ // The write flag is only available for access violation exceptions.
+ if (exception_record->ExceptionCode != EXCEPTION_ACCESS_VIOLATION)
+ return SignalContext::UNKNOWN;
+
// The contents of this array are documented at
- // https://msdn.microsoft.com/en-us/library/windows/desktop/aa363082(v=vs.85).aspx
+ // https://docs.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-exception_record
// The first element indicates read as 0, write as 1, or execute as 8. The
// second element is the faulting address.
switch (exception_record->ExceptionInformation[0]) {
@@ -1124,6 +1150,8 @@ void LogFullErrorReport(const char *buffer) {
}
#endif // SANITIZER_WIN_TRACE
+void InitializePlatformCommonFlags(CommonFlags *cf) {}
+
} // namespace __sanitizer
#endif // _WIN32
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/symbolizer/scripts/build_symbolizer.sh b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/symbolizer/scripts/build_symbolizer.sh
index a0aa79ee54bb..5b6433011a09 100755
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/symbolizer/scripts/build_symbolizer.sh
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/symbolizer/scripts/build_symbolizer.sh
@@ -98,7 +98,7 @@ make -j${J} libz.a
if [[ ! -d ${LIBCXX_BUILD} ]]; then
mkdir -p ${LIBCXX_BUILD}
cd ${LIBCXX_BUILD}
- LIBCXX_FLAGS="${FLAGS} -Wno-macro-redefined -I${LIBCXX_SRC}/include"
+ LIBCXX_FLAGS="${FLAGS} -Wno-macro-redefined"
PROJECTS=
if [[ ! -d $LLVM_SRC/projects/libcxxabi ]] ; then
PROJECTS="-DLLVM_ENABLE_PROJECTS='libcxx;libcxxabi'"
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/symbolizer/scripts/global_symbols.txt b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/symbolizer/scripts/global_symbols.txt
index c3f41f19c365..29b2960e11fe 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/symbolizer/scripts/global_symbols.txt
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/symbolizer/scripts/global_symbols.txt
@@ -31,6 +31,8 @@ __interceptor_pthread_setspecific w
__interceptor_read w
__interceptor_realpath w
__isinf U
+__isoc99_sscanf U
+__isoc99_vsscanf U
__moddi3 U
__sanitizer_symbolize_code T
__sanitizer_symbolize_data T
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/scudo_allocator.cpp b/contrib/llvm-project/compiler-rt/lib/scudo/scudo_allocator.cpp
index d9023c2f7ab6..82864405dfb0 100644
--- a/contrib/llvm-project/compiler-rt/lib/scudo/scudo_allocator.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/scudo_allocator.cpp
@@ -29,6 +29,7 @@
# include "gwp_asan/guarded_pool_allocator.h"
# include "gwp_asan/optional/backtrace.h"
# include "gwp_asan/optional/options_parser.h"
+#include "gwp_asan/optional/segv_handler.h"
#endif // GWP_ASAN_HOOKS
#include <errno.h>
@@ -43,7 +44,7 @@ static u32 Cookie;
// at compilation or at runtime.
static atomic_uint8_t HashAlgorithm = { CRC32Software };
-INLINE u32 computeCRC32(u32 Crc, uptr Value, uptr *Array, uptr ArraySize) {
+inline u32 computeCRC32(u32 Crc, uptr Value, uptr *Array, uptr ArraySize) {
// If the hardware CRC32 feature is defined here, it was enabled everywhere,
// as opposed to only for scudo_crc32.cpp. This means that other hardware
// specific instructions were likely emitted at other places, and as a
@@ -70,31 +71,31 @@ INLINE u32 computeCRC32(u32 Crc, uptr Value, uptr *Array, uptr ArraySize) {
static BackendT &getBackend();
namespace Chunk {
- static INLINE AtomicPackedHeader *getAtomicHeader(void *Ptr) {
+ static inline AtomicPackedHeader *getAtomicHeader(void *Ptr) {
return reinterpret_cast<AtomicPackedHeader *>(reinterpret_cast<uptr>(Ptr) -
getHeaderSize());
}
- static INLINE
+ static inline
const AtomicPackedHeader *getConstAtomicHeader(const void *Ptr) {
return reinterpret_cast<const AtomicPackedHeader *>(
reinterpret_cast<uptr>(Ptr) - getHeaderSize());
}
- static INLINE bool isAligned(const void *Ptr) {
+ static inline bool isAligned(const void *Ptr) {
return IsAligned(reinterpret_cast<uptr>(Ptr), MinAlignment);
}
// We can't use the offset member of the chunk itself, as we would double
// fetch it without any warranty that it wouldn't have been tampered. To
// prevent this, we work with a local copy of the header.
- static INLINE void *getBackendPtr(const void *Ptr, UnpackedHeader *Header) {
+ static inline void *getBackendPtr(const void *Ptr, UnpackedHeader *Header) {
return reinterpret_cast<void *>(reinterpret_cast<uptr>(Ptr) -
getHeaderSize() - (Header->Offset << MinAlignmentLog));
}
// Returns the usable size for a chunk, meaning the amount of bytes from the
// beginning of the user data to the end of the backend allocated chunk.
- static INLINE uptr getUsableSize(const void *Ptr, UnpackedHeader *Header) {
+ static inline uptr getUsableSize(const void *Ptr, UnpackedHeader *Header) {
const uptr ClassId = Header->ClassId;
if (ClassId)
return PrimaryT::ClassIdToSize(ClassId) - getHeaderSize() -
@@ -104,7 +105,7 @@ namespace Chunk {
}
// Returns the size the user requested when allocating the chunk.
- static INLINE uptr getSize(const void *Ptr, UnpackedHeader *Header) {
+ static inline uptr getSize(const void *Ptr, UnpackedHeader *Header) {
const uptr SizeOrUnusedBytes = Header->SizeOrUnusedBytes;
if (Header->ClassId)
return SizeOrUnusedBytes;
@@ -113,7 +114,7 @@ namespace Chunk {
}
// Compute the checksum of the chunk pointer and its header.
- static INLINE u16 computeChecksum(const void *Ptr, UnpackedHeader *Header) {
+ static inline u16 computeChecksum(const void *Ptr, UnpackedHeader *Header) {
UnpackedHeader ZeroChecksumHeader = *Header;
ZeroChecksumHeader.Checksum = 0;
uptr HeaderHolder[sizeof(UnpackedHeader) / sizeof(uptr)];
@@ -125,7 +126,7 @@ namespace Chunk {
// Checks the validity of a chunk by verifying its checksum. It doesn't
// incur termination in the event of an invalid chunk.
- static INLINE bool isValid(const void *Ptr) {
+ static inline bool isValid(const void *Ptr) {
PackedHeader NewPackedHeader =
atomic_load_relaxed(getConstAtomicHeader(Ptr));
UnpackedHeader NewUnpackedHeader =
@@ -139,7 +140,7 @@ namespace Chunk {
COMPILER_CHECK(ChunkAvailable == 0);
// Loads and unpacks the header, verifying the checksum in the process.
- static INLINE
+ static inline
void loadHeader(const void *Ptr, UnpackedHeader *NewUnpackedHeader) {
PackedHeader NewPackedHeader =
atomic_load_relaxed(getConstAtomicHeader(Ptr));
@@ -150,7 +151,7 @@ namespace Chunk {
}
// Packs and stores the header, computing the checksum in the process.
- static INLINE void storeHeader(void *Ptr, UnpackedHeader *NewUnpackedHeader) {
+ static inline void storeHeader(void *Ptr, UnpackedHeader *NewUnpackedHeader) {
NewUnpackedHeader->Checksum = computeChecksum(Ptr, NewUnpackedHeader);
PackedHeader NewPackedHeader = bit_cast<PackedHeader>(*NewUnpackedHeader);
atomic_store_relaxed(getAtomicHeader(Ptr), NewPackedHeader);
@@ -159,7 +160,7 @@ namespace Chunk {
// Packs and stores the header, computing the checksum in the process. We
// compare the current header with the expected provided one to ensure that
// we are not being raced by a corruption occurring in another thread.
- static INLINE void compareExchangeHeader(void *Ptr,
+ static inline void compareExchangeHeader(void *Ptr,
UnpackedHeader *NewUnpackedHeader,
UnpackedHeader *OldUnpackedHeader) {
NewUnpackedHeader->Checksum = computeChecksum(Ptr, NewUnpackedHeader);
@@ -671,15 +672,17 @@ static BackendT &getBackend() {
void initScudo() {
Instance.init();
#ifdef GWP_ASAN_HOOKS
- gwp_asan::options::initOptions();
+ gwp_asan::options::initOptions(__sanitizer::GetEnv("GWP_ASAN_OPTIONS"),
+ Printf);
gwp_asan::options::Options &Opts = gwp_asan::options::getOptions();
- Opts.Backtrace = gwp_asan::options::getBacktraceFunction();
+ Opts.Backtrace = gwp_asan::backtrace::getBacktraceFunction();
GuardedAlloc.init(Opts);
if (Opts.InstallSignalHandlers)
- gwp_asan::crash_handler::installSignalHandlers(
+ gwp_asan::segv_handler::installSignalHandlers(
&GuardedAlloc, __sanitizer::Printf,
- gwp_asan::options::getPrintBacktraceFunction(), Opts.Backtrace);
+ gwp_asan::backtrace::getPrintBacktraceFunction(),
+ gwp_asan::backtrace::getSegvBacktraceFunction());
#endif // GWP_ASAN_HOOKS
}
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/scudo_crc32.h b/contrib/llvm-project/compiler-rt/lib/scudo/scudo_crc32.h
index bad15a929a3e..ef40595a56d1 100644
--- a/contrib/llvm-project/compiler-rt/lib/scudo/scudo_crc32.h
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/scudo_crc32.h
@@ -85,7 +85,7 @@ static const u32 CRC32Table[] = {
0xb40bbe37, 0xc30c8ea1, 0x5a05df1b, 0x2d02ef8d
};
-INLINE u32 computeSoftwareCRC32(u32 Crc, uptr Data) {
+inline u32 computeSoftwareCRC32(u32 Crc, uptr Data) {
for (uptr i = 0; i < sizeof(Data); i++) {
Crc = CRC32Table[(Crc ^ Data) & 0xff] ^ (Crc >> 8);
Data >>= 8;
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/scudo_tsd.h b/contrib/llvm-project/compiler-rt/lib/scudo/scudo_tsd.h
index 1d4e4e6f126e..ec8dabc1f8a7 100644
--- a/contrib/llvm-project/compiler-rt/lib/scudo/scudo_tsd.h
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/scudo_tsd.h
@@ -29,7 +29,7 @@ struct ALIGNED(SANITIZER_CACHE_LINE_SIZE) ScudoTSD {
void init();
void commitBack();
- INLINE bool tryLock() {
+ inline bool tryLock() {
if (Mutex.TryLock()) {
atomic_store_relaxed(&Precedence, 0);
return true;
@@ -40,14 +40,14 @@ struct ALIGNED(SANITIZER_CACHE_LINE_SIZE) ScudoTSD {
return false;
}
- INLINE void lock() {
+ inline void lock() {
atomic_store_relaxed(&Precedence, 0);
Mutex.Lock();
}
- INLINE void unlock() { Mutex.Unlock(); }
+ inline void unlock() { Mutex.Unlock(); }
- INLINE uptr getPrecedence() { return atomic_load_relaxed(&Precedence); }
+ inline uptr getPrecedence() { return atomic_load_relaxed(&Precedence); }
private:
StaticSpinMutex Mutex;
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/scudo_utils.cpp b/contrib/llvm-project/compiler-rt/lib/scudo/scudo_utils.cpp
index f31d68058acb..b7ce8f915817 100644
--- a/contrib/llvm-project/compiler-rt/lib/scudo/scudo_utils.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/scudo_utils.cpp
@@ -121,7 +121,7 @@ bool hasHardwareCRC32ARMPosix() { return false; }
// initialized after the other globals, so we can check its value to know if
// calling getauxval is safe.
extern "C" SANITIZER_WEAK_ATTRIBUTE char *__progname;
-INLINE bool areBionicGlobalsInitialized() {
+inline bool areBionicGlobalsInitialized() {
return !SANITIZER_ANDROID || (&__progname && __progname);
}
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/scudo_utils.h b/contrib/llvm-project/compiler-rt/lib/scudo/scudo_utils.h
index a8dfbdeb3b70..b657c69d9baf 100644
--- a/contrib/llvm-project/compiler-rt/lib/scudo/scudo_utils.h
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/scudo_utils.h
@@ -20,7 +20,7 @@
namespace __scudo {
template <class Dest, class Source>
-INLINE Dest bit_cast(const Source& source) {
+inline Dest bit_cast(const Source& source) {
static_assert(sizeof(Dest) == sizeof(Source), "Sizes are not equal!");
Dest dest;
memcpy(&dest, &source, sizeof(dest));
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/allocator_config.h b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/allocator_config.h
index ad2a17ef7014..12daaa2f6b44 100644
--- a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/allocator_config.h
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/allocator_config.h
@@ -25,55 +25,91 @@ namespace scudo {
struct DefaultConfig {
using SizeClassMap = DefaultSizeClassMap;
+ static const bool MaySupportMemoryTagging = false;
+
#if SCUDO_CAN_USE_PRIMARY64
- // 1GB Regions
- typedef SizeClassAllocator64<SizeClassMap, 30U> Primary;
+ typedef SizeClassAllocator64<DefaultConfig> Primary;
+ static const uptr PrimaryRegionSizeLog = 30U;
#else
- // 512KB regions
- typedef SizeClassAllocator32<SizeClassMap, 19U> Primary;
+ typedef SizeClassAllocator32<DefaultConfig> Primary;
+ static const uptr PrimaryRegionSizeLog = 19U;
#endif
- typedef MapAllocator<MapAllocatorCache<>> Secondary;
+ static const s32 PrimaryMinReleaseToOsIntervalMs = INT32_MIN;
+ static const s32 PrimaryMaxReleaseToOsIntervalMs = INT32_MAX;
+
+ typedef MapAllocatorCache<DefaultConfig> SecondaryCache;
+ static const u32 SecondaryCacheEntriesArraySize = 32U;
+ static const u32 SecondaryCacheDefaultMaxEntriesCount = 32U;
+ static const uptr SecondaryCacheDefaultMaxEntrySize = 1UL << 19;
+ static const s32 SecondaryCacheMinReleaseToOsIntervalMs = INT32_MIN;
+ static const s32 SecondaryCacheMaxReleaseToOsIntervalMs = INT32_MAX;
+
template <class A> using TSDRegistryT = TSDRegistryExT<A>; // Exclusive
};
struct AndroidConfig {
using SizeClassMap = AndroidSizeClassMap;
+ static const bool MaySupportMemoryTagging = true;
+
#if SCUDO_CAN_USE_PRIMARY64
- // 256MB regions
- typedef SizeClassAllocator64<SizeClassMap, 28U, 1000, 1000,
- /*MaySupportMemoryTagging=*/true>
- Primary;
+ typedef SizeClassAllocator64<AndroidConfig> Primary;
+ static const uptr PrimaryRegionSizeLog = 28U;
#else
- // 256KB regions
- typedef SizeClassAllocator32<SizeClassMap, 18U, 1000, 1000> Primary;
+ typedef SizeClassAllocator32<AndroidConfig> Primary;
+ static const uptr PrimaryRegionSizeLog = 18U;
#endif
- // Cache blocks up to 2MB
- typedef MapAllocator<MapAllocatorCache<32U, 2UL << 20, 0, 1000>> Secondary;
+ static const s32 PrimaryMinReleaseToOsIntervalMs = 1000;
+ static const s32 PrimaryMaxReleaseToOsIntervalMs = 1000;
+
+ typedef MapAllocatorCache<AndroidConfig> SecondaryCache;
+ static const u32 SecondaryCacheEntriesArraySize = 256U;
+ static const u32 SecondaryCacheDefaultMaxEntriesCount = 32U;
+ static const uptr SecondaryCacheDefaultMaxEntrySize = 2UL << 20;
+ static const s32 SecondaryCacheMinReleaseToOsIntervalMs = 0;
+ static const s32 SecondaryCacheMaxReleaseToOsIntervalMs = 1000;
+
template <class A>
- using TSDRegistryT = TSDRegistrySharedT<A, 2U>; // Shared, max 2 TSDs.
+ using TSDRegistryT = TSDRegistrySharedT<A, 8U, 2U>; // Shared, max 8 TSDs.
};
struct AndroidSvelteConfig {
using SizeClassMap = SvelteSizeClassMap;
+ static const bool MaySupportMemoryTagging = false;
+
#if SCUDO_CAN_USE_PRIMARY64
- // 128MB regions
- typedef SizeClassAllocator64<SizeClassMap, 27U, 1000, 1000> Primary;
+ typedef SizeClassAllocator64<AndroidSvelteConfig> Primary;
+ static const uptr PrimaryRegionSizeLog = 27U;
#else
- // 64KB regions
- typedef SizeClassAllocator32<SizeClassMap, 16U, 1000, 1000> Primary;
+ typedef SizeClassAllocator32<AndroidSvelteConfig> Primary;
+ static const uptr PrimaryRegionSizeLog = 16U;
#endif
- typedef MapAllocator<MapAllocatorCache<4U, 1UL << 18, 0, 0>> Secondary;
+ static const s32 PrimaryMinReleaseToOsIntervalMs = 1000;
+ static const s32 PrimaryMaxReleaseToOsIntervalMs = 1000;
+
+ typedef MapAllocatorCache<AndroidSvelteConfig> SecondaryCache;
+ static const u32 SecondaryCacheEntriesArraySize = 16U;
+ static const u32 SecondaryCacheDefaultMaxEntriesCount = 4U;
+ static const uptr SecondaryCacheDefaultMaxEntrySize = 1UL << 18;
+ static const s32 SecondaryCacheMinReleaseToOsIntervalMs = 0;
+ static const s32 SecondaryCacheMaxReleaseToOsIntervalMs = 0;
+
template <class A>
- using TSDRegistryT = TSDRegistrySharedT<A, 1U>; // Shared, only 1 TSD.
+ using TSDRegistryT = TSDRegistrySharedT<A, 2U, 1U>; // Shared, max 2 TSDs.
};
#if SCUDO_CAN_USE_PRIMARY64
struct FuchsiaConfig {
- // 1GB Regions
- typedef SizeClassAllocator64<DefaultSizeClassMap, 30U> Primary;
- typedef MapAllocator<MapAllocatorNoCache> Secondary;
+ using SizeClassMap = DefaultSizeClassMap;
+ static const bool MaySupportMemoryTagging = false;
+
+ typedef SizeClassAllocator64<FuchsiaConfig> Primary;
+ static const uptr PrimaryRegionSizeLog = 30U;
+ static const s32 PrimaryMinReleaseToOsIntervalMs = INT32_MIN;
+ static const s32 PrimaryMaxReleaseToOsIntervalMs = INT32_MAX;
+
+ typedef MapAllocatorNoCache SecondaryCache;
template <class A>
- using TSDRegistryT = TSDRegistrySharedT<A, 8U>; // Shared, max 8 TSDs.
+ using TSDRegistryT = TSDRegistrySharedT<A, 8U, 4U>; // Shared, max 8 TSDs.
};
#endif
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/atomic_helpers.h b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/atomic_helpers.h
index 1ea1a86ae506..d88f5d7be642 100644
--- a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/atomic_helpers.h
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/atomic_helpers.h
@@ -90,6 +90,20 @@ inline typename T::Type atomic_fetch_sub(volatile T *A, typename T::Type V,
}
template <typename T>
+inline typename T::Type atomic_fetch_and(volatile T *A, typename T::Type V,
+ memory_order MO) {
+ DCHECK(!(reinterpret_cast<uptr>(A) % sizeof(*A)));
+ return __atomic_fetch_and(&A->ValDoNotUse, V, MO);
+}
+
+template <typename T>
+inline typename T::Type atomic_fetch_or(volatile T *A, typename T::Type V,
+ memory_order MO) {
+ DCHECK(!(reinterpret_cast<uptr>(A) % sizeof(*A)));
+ return __atomic_fetch_or(&A->ValDoNotUse, V, MO);
+}
+
+template <typename T>
inline typename T::Type atomic_exchange(volatile T *A, typename T::Type V,
memory_order MO) {
DCHECK(!(reinterpret_cast<uptr>(A) % sizeof(*A)));
@@ -106,14 +120,6 @@ inline bool atomic_compare_exchange_strong(volatile T *A, typename T::Type *Cmp,
__ATOMIC_RELAXED);
}
-template <typename T>
-inline bool atomic_compare_exchange_weak(volatile T *A, typename T::Type *Cmp,
- typename T::Type Xchg,
- memory_order MO) {
- return __atomic_compare_exchange(&A->ValDoNotUse, Cmp, &Xchg, true, MO,
- __ATOMIC_RELAXED);
-}
-
// Clutter-reducing helpers.
template <typename T>
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/chunk.h b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/chunk.h
index f4d68b3ac6c4..69b8e1b12a91 100644
--- a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/chunk.h
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/chunk.h
@@ -65,7 +65,8 @@ typedef u64 PackedHeader;
struct UnpackedHeader {
uptr ClassId : 8;
u8 State : 2;
- u8 Origin : 2;
+ // Origin if State == Allocated, or WasZeroed otherwise.
+ u8 OriginOrWasZeroed : 2;
uptr SizeOrUnusedBytes : 20;
uptr Offset : 16;
uptr Checksum : 16;
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/combined.h b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/combined.h
index 3bb41eca88f7..0df7a652ffa5 100644
--- a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/combined.h
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/combined.h
@@ -15,6 +15,7 @@
#include "flags_parser.h"
#include "local_cache.h"
#include "memtag.h"
+#include "options.h"
#include "quarantine.h"
#include "report.h"
#include "secondary.h"
@@ -27,6 +28,7 @@
#ifdef GWP_ASAN_HOOKS
#include "gwp_asan/guarded_pool_allocator.h"
#include "gwp_asan/optional/backtrace.h"
+#include "gwp_asan/optional/options_parser.h"
#include "gwp_asan/optional/segv_handler.h"
#endif // GWP_ASAN_HOOKS
@@ -41,8 +43,6 @@ extern "C" size_t android_unsafe_frame_pointer_chase(scudo::uptr *buf,
namespace scudo {
-enum class Option { ReleaseInterval };
-
template <class Params, void (*PostInitCallback)(void) = EmptyCallback>
class Allocator {
public:
@@ -99,6 +99,12 @@ public:
Header.State = Chunk::State::Allocated;
Chunk::storeHeader(Allocator.Cookie, Ptr, &Header);
+ // Reset tag to 0 as this chunk may have been previously used for a tagged
+ // user allocation.
+ if (UNLIKELY(useMemoryTagging<Params>(Allocator.Primary.Options.load())))
+ storeTags(reinterpret_cast<uptr>(Ptr),
+ reinterpret_cast<uptr>(Ptr) + sizeof(QuarantineBatch));
+
return Ptr;
}
@@ -146,15 +152,22 @@ public:
reportUnrecognizedFlags();
// Store some flags locally.
- Options.MayReturnNull = getFlags()->may_return_null;
- Options.FillContents =
- getFlags()->zero_contents
- ? ZeroFill
- : (getFlags()->pattern_fill_contents ? PatternOrZeroFill : NoFill);
- Options.DeallocTypeMismatch = getFlags()->dealloc_type_mismatch;
- Options.DeleteSizeMismatch = getFlags()->delete_size_mismatch;
- Options.TrackAllocationStacks = false;
- Options.QuarantineMaxChunkSize =
+ if (getFlags()->may_return_null)
+ Primary.Options.set(OptionBit::MayReturnNull);
+ if (getFlags()->zero_contents)
+ Primary.Options.setFillContentsMode(ZeroFill);
+ else if (getFlags()->pattern_fill_contents)
+ Primary.Options.setFillContentsMode(PatternOrZeroFill);
+ if (getFlags()->dealloc_type_mismatch)
+ Primary.Options.set(OptionBit::DeallocTypeMismatch);
+ if (getFlags()->delete_size_mismatch)
+ Primary.Options.set(OptionBit::DeleteSizeMismatch);
+ if (allocatorSupportsMemoryTagging<Params>() &&
+ systemSupportsMemoryTagging())
+ Primary.Options.set(OptionBit::UseMemoryTagging);
+ Primary.Options.set(OptionBit::UseOddEvenTags);
+
+ QuarantineMaxChunkSize =
static_cast<u32>(getFlags()->quarantine_max_chunk_size);
Stats.initLinkerInitialized();
@@ -171,31 +184,31 @@ public:
// be functional, best called from PostInitCallback.
void initGwpAsan() {
#ifdef GWP_ASAN_HOOKS
- gwp_asan::options::Options Opt;
- Opt.Enabled = getFlags()->GWP_ASAN_Enabled;
// Bear in mind - Scudo has its own alignment guarantees that are strictly
// enforced. Scudo exposes the same allocation function for everything from
// malloc() to posix_memalign, so in general this flag goes unused, as Scudo
// will always ask GWP-ASan for an aligned amount of bytes.
- Opt.PerfectlyRightAlign = getFlags()->GWP_ASAN_PerfectlyRightAlign;
- Opt.MaxSimultaneousAllocations =
- getFlags()->GWP_ASAN_MaxSimultaneousAllocations;
- Opt.SampleRate = getFlags()->GWP_ASAN_SampleRate;
- Opt.InstallSignalHandlers = getFlags()->GWP_ASAN_InstallSignalHandlers;
+ gwp_asan::options::initOptions(getEnv("GWP_ASAN_OPTIONS"), Printf);
+ gwp_asan::options::Options Opt = gwp_asan::options::getOptions();
// Embedded GWP-ASan is locked through the Scudo atfork handler (via
// Allocator::disable calling GWPASan.disable). Disable GWP-ASan's atfork
// handler.
Opt.InstallForkHandlers = false;
- Opt.Backtrace = gwp_asan::options::getBacktraceFunction();
+ Opt.Backtrace = gwp_asan::backtrace::getBacktraceFunction();
GuardedAlloc.init(Opt);
if (Opt.InstallSignalHandlers)
- gwp_asan::crash_handler::installSignalHandlers(
- &GuardedAlloc, Printf, gwp_asan::options::getPrintBacktraceFunction(),
- Opt.Backtrace);
+ gwp_asan::segv_handler::installSignalHandlers(
+ &GuardedAlloc, Printf,
+ gwp_asan::backtrace::getPrintBacktraceFunction(),
+ gwp_asan::backtrace::getSegvBacktraceFunction());
#endif // GWP_ASAN_HOOKS
}
+ ALWAYS_INLINE void initThreadMaybe(bool MinimalInit = false) {
+ TSDRegistry.initThreadMaybe(this, MinimalInit);
+ }
+
void reset() { memset(this, 0, sizeof(*this)); }
void unmapTestOnly() {
@@ -203,7 +216,7 @@ public:
Primary.unmapTestOnly();
#ifdef GWP_ASAN_HOOKS
if (getFlags()->GWP_ASAN_InstallSignalHandlers)
- gwp_asan::crash_handler::uninstallSignalHandlers();
+ gwp_asan::segv_handler::uninstallSignalHandlers();
GuardedAlloc.uninitTestOnly();
#endif // GWP_ASAN_HOOKS
}
@@ -227,7 +240,7 @@ public:
}
ALWAYS_INLINE void *untagPointerMaybe(void *Ptr) {
- if (Primary.SupportsMemoryTagging)
+ if (allocatorSupportsMemoryTagging<Params>())
return reinterpret_cast<void *>(
untagPointer(reinterpret_cast<uptr>(Ptr)));
return Ptr;
@@ -247,6 +260,19 @@ public:
#endif
}
+ uptr computeOddEvenMaskForPointerMaybe(Options Options, uptr Ptr, uptr Size) {
+ if (!Options.get(OptionBit::UseOddEvenTags))
+ return 0;
+
+ // If a chunk's tag is odd, we want the tags of the surrounding blocks to be
+ // even, and vice versa. Blocks are laid out Size bytes apart, and adding
+ // Size to Ptr will flip the least significant set bit of Size in Ptr, so
+ // that bit will have the pattern 010101... for consecutive blocks, which we
+ // can use to determine which tag mask to use.
+ return (Ptr & (1ULL << getLeastSignificantSetBitIndex(Size))) ? 0xaaaa
+ : 0x5555;
+ }
+
NOINLINE void *allocate(uptr Size, Chunk::Origin Origin,
uptr Alignment = MinAlignment,
bool ZeroContents = false) {
@@ -259,11 +285,14 @@ public:
}
#endif // GWP_ASAN_HOOKS
- FillContentsMode FillContents =
- ZeroContents ? ZeroFill : Options.FillContents;
+ const Options Options = Primary.Options.load();
+ const FillContentsMode FillContents = ZeroContents ? ZeroFill
+ : TSDRegistry.getDisableMemInit()
+ ? NoFill
+ : Options.getFillContentsMode();
if (UNLIKELY(Alignment > MaxAlignment)) {
- if (Options.MayReturnNull)
+ if (Options.get(OptionBit::MayReturnNull))
return nullptr;
reportAlignmentTooBig(Alignment, MaxAlignment);
}
@@ -282,7 +311,7 @@ public:
// Takes care of extravagantly large sizes as well as integer overflows.
static_assert(MaxAllowedMallocSize < UINTPTR_MAX - MaxAlignment, "");
if (UNLIKELY(Size >= MaxAllowedMallocSize)) {
- if (Options.MayReturnNull)
+ if (Options.get(OptionBit::MayReturnNull))
return nullptr;
reportAllocationSizeTooBig(Size, NeededSize, MaxAllowedMallocSize);
}
@@ -290,7 +319,7 @@ public:
void *Block = nullptr;
uptr ClassId = 0;
- uptr SecondaryBlockEnd;
+ uptr SecondaryBlockEnd = 0;
if (LIKELY(PrimaryT::canAllocate(NeededSize))) {
ClassId = SizeClassMap::getClassIdBySize(NeededSize);
DCHECK_NE(ClassId, 0U);
@@ -302,15 +331,10 @@ public:
// larger class until it fits. If it fails to fit in the largest class,
// fallback to the Secondary.
if (UNLIKELY(!Block)) {
- while (ClassId < SizeClassMap::LargestClassId) {
+ while (ClassId < SizeClassMap::LargestClassId && !Block)
Block = TSD->Cache.allocate(++ClassId);
- if (LIKELY(Block)) {
- break;
- }
- }
- if (UNLIKELY(!Block)) {
+ if (!Block)
ClassId = 0;
- }
}
if (UnlockRequired)
TSD->unlock();
@@ -320,7 +344,7 @@ public:
FillContents);
if (UNLIKELY(!Block)) {
- if (Options.MayReturnNull)
+ if (Options.get(OptionBit::MayReturnNull))
return nullptr;
reportOutOfMemory(NeededSize);
}
@@ -331,7 +355,7 @@ public:
void *Ptr = reinterpret_cast<void *>(UserPtr);
void *TaggedPtr = Ptr;
- if (ClassId) {
+ if (LIKELY(ClassId)) {
// We only need to zero or tag the contents for Primary backed
// allocations. We only set tags for primary allocations in order to avoid
// faulting potentially large numbers of pages for large secondary
@@ -343,10 +367,11 @@ public:
//
// When memory tagging is enabled, zeroing the contents is done as part of
// setting the tag.
- if (UNLIKELY(useMemoryTagging())) {
+ if (UNLIKELY(useMemoryTagging<Params>(Options))) {
uptr PrevUserPtr;
Chunk::UnpackedHeader Header;
- const uptr BlockEnd = BlockUptr + PrimaryT::getSizeByClassId(ClassId);
+ const uptr BlockSize = PrimaryT::getSizeByClassId(ClassId);
+ const uptr BlockEnd = BlockUptr + BlockSize;
// If possible, try to reuse the UAF tag that was set by deallocate().
// For simplicity, only reuse tags if we have the same start address as
// the previous allocation. This handles the majority of cases since
@@ -390,15 +415,27 @@ public:
PrevEnd = NextPage;
TaggedPtr = reinterpret_cast<void *>(TaggedUserPtr);
resizeTaggedChunk(PrevEnd, TaggedUserPtr + Size, BlockEnd);
- if (Size) {
+ if (UNLIKELY(FillContents != NoFill && !Header.OriginOrWasZeroed)) {
+ // If an allocation needs to be zeroed (i.e. calloc) we can normally
+ // avoid zeroing the memory now since we can rely on memory having
+ // been zeroed on free, as this is normally done while setting the
+ // UAF tag. But if tagging was disabled per-thread when the memory
+ // was freed, it would not have been retagged and thus zeroed, and
+ // therefore it needs to be zeroed now.
+ memset(TaggedPtr, 0,
+ Min(Size, roundUpTo(PrevEnd - TaggedUserPtr,
+ archMemoryTagGranuleSize())));
+ } else if (Size) {
// Clear any stack metadata that may have previously been stored in
// the chunk data.
memset(TaggedPtr, 0, archMemoryTagGranuleSize());
}
} else {
- TaggedPtr = prepareTaggedChunk(Ptr, Size, BlockEnd);
+ const uptr OddEvenMask =
+ computeOddEvenMaskForPointerMaybe(Options, BlockUptr, BlockSize);
+ TaggedPtr = prepareTaggedChunk(Ptr, Size, OddEvenMask, BlockEnd);
}
- storeAllocationStackMaybe(Ptr);
+ storeAllocationStackMaybe(Options, Ptr);
} else if (UNLIKELY(FillContents != NoFill)) {
// This condition is not necessarily unlikely, but since memset is
// costly, we might as well mark it as such.
@@ -421,13 +458,13 @@ public:
}
Header.ClassId = ClassId & Chunk::ClassIdMask;
Header.State = Chunk::State::Allocated;
- Header.Origin = Origin & Chunk::OriginMask;
+ Header.OriginOrWasZeroed = Origin & Chunk::OriginMask;
Header.SizeOrUnusedBytes =
(ClassId ? Size : SecondaryBlockEnd - (UserPtr + Size)) &
Chunk::SizeOrUnusedBytesMask;
Chunk::storeHeader(Cookie, Ptr, &Header);
- if (&__scudo_allocate_hook)
+ if (UNLIKELY(&__scudo_allocate_hook))
__scudo_allocate_hook(TaggedPtr, Size);
return TaggedPtr;
@@ -450,7 +487,7 @@ public:
}
#endif // GWP_ASAN_HOOKS
- if (&__scudo_deallocate_hook)
+ if (UNLIKELY(&__scudo_deallocate_hook))
__scudo_deallocate_hook(Ptr);
if (UNLIKELY(!Ptr))
@@ -465,30 +502,33 @@ public:
if (UNLIKELY(Header.State != Chunk::State::Allocated))
reportInvalidChunkState(AllocatorAction::Deallocating, Ptr);
- if (Options.DeallocTypeMismatch) {
- if (Header.Origin != Origin) {
+
+ const Options Options = Primary.Options.load();
+ if (Options.get(OptionBit::DeallocTypeMismatch)) {
+ if (UNLIKELY(Header.OriginOrWasZeroed != Origin)) {
// With the exception of memalign'd chunks, that can be still be free'd.
- if (UNLIKELY(Header.Origin != Chunk::Origin::Memalign ||
- Origin != Chunk::Origin::Malloc))
+ if (Header.OriginOrWasZeroed != Chunk::Origin::Memalign ||
+ Origin != Chunk::Origin::Malloc)
reportDeallocTypeMismatch(AllocatorAction::Deallocating, Ptr,
- Header.Origin, Origin);
+ Header.OriginOrWasZeroed, Origin);
}
}
const uptr Size = getSize(Ptr, &Header);
- if (DeleteSize && Options.DeleteSizeMismatch) {
+ if (DeleteSize && Options.get(OptionBit::DeleteSizeMismatch)) {
if (UNLIKELY(DeleteSize != Size))
reportDeleteSizeMismatch(Ptr, DeleteSize, Size);
}
- quarantineOrDeallocateChunk(Ptr, &Header, Size);
+ quarantineOrDeallocateChunk(Options, Ptr, &Header, Size);
}
void *reallocate(void *OldPtr, uptr NewSize, uptr Alignment = MinAlignment) {
initThreadMaybe();
+ const Options Options = Primary.Options.load();
if (UNLIKELY(NewSize >= MaxAllowedMallocSize)) {
- if (Options.MayReturnNull)
+ if (Options.get(OptionBit::MayReturnNull))
return nullptr;
reportAllocationSizeTooBig(NewSize, 0, MaxAllowedMallocSize);
}
@@ -523,10 +563,11 @@ public:
// Pointer has to be allocated with a malloc-type function. Some
// applications think that it is OK to realloc a memalign'ed pointer, which
// will trigger this check. It really isn't.
- if (Options.DeallocTypeMismatch) {
- if (UNLIKELY(OldHeader.Origin != Chunk::Origin::Malloc))
+ if (Options.get(OptionBit::DeallocTypeMismatch)) {
+ if (UNLIKELY(OldHeader.OriginOrWasZeroed != Chunk::Origin::Malloc))
reportDeallocTypeMismatch(AllocatorAction::Reallocating, OldPtr,
- OldHeader.Origin, Chunk::Origin::Malloc);
+ OldHeader.OriginOrWasZeroed,
+ Chunk::Origin::Malloc);
}
void *BlockBegin = getBlockBegin(OldPtr, &OldHeader);
@@ -553,11 +594,11 @@ public:
: BlockEnd - (reinterpret_cast<uptr>(OldPtr) + NewSize)) &
Chunk::SizeOrUnusedBytesMask;
Chunk::compareExchangeHeader(Cookie, OldPtr, &NewHeader, &OldHeader);
- if (UNLIKELY(ClassId && useMemoryTagging())) {
+ if (UNLIKELY(ClassId && useMemoryTagging<Params>(Options))) {
resizeTaggedChunk(reinterpret_cast<uptr>(OldTaggedPtr) + OldSize,
reinterpret_cast<uptr>(OldTaggedPtr) + NewSize,
BlockEnd);
- storeAllocationStackMaybe(OldPtr);
+ storeAllocationStackMaybe(Options, OldPtr);
}
return OldTaggedPtr;
}
@@ -568,10 +609,9 @@ public:
// allow for potential further in-place realloc. The gains of such a trick
// are currently unclear.
void *NewPtr = allocate(NewSize, Chunk::Origin::Malloc, Alignment);
- if (NewPtr) {
- const uptr OldSize = getSize(OldPtr, &OldHeader);
+ if (LIKELY(NewPtr)) {
memcpy(NewPtr, OldTaggedPtr, Min(NewSize, OldSize));
- quarantineOrDeallocateChunk(OldPtr, &OldHeader, OldSize);
+ quarantineOrDeallocateChunk(Options, OldPtr, &OldHeader, OldSize);
}
return NewPtr;
}
@@ -652,7 +692,7 @@ public:
if (getChunkFromBlock(Block, &Chunk, &Header) &&
Header.State == Chunk::State::Allocated) {
uptr TaggedChunk = Chunk;
- if (useMemoryTagging())
+ if (useMemoryTagging<Params>(Primary.Options.load()))
TaggedChunk = loadTag(Chunk);
Callback(TaggedChunk, getSize(reinterpret_cast<void *>(Chunk), &Header),
Arg);
@@ -667,14 +707,32 @@ public:
bool canReturnNull() {
initThreadMaybe();
- return Options.MayReturnNull;
+ return Primary.Options.load().get(OptionBit::MayReturnNull);
}
bool setOption(Option O, sptr Value) {
- if (O == Option::ReleaseInterval) {
- Primary.setReleaseToOsIntervalMs(static_cast<s32>(Value));
- Secondary.setReleaseToOsIntervalMs(static_cast<s32>(Value));
+ initThreadMaybe();
+ if (O == Option::MemtagTuning) {
+ // Enabling odd/even tags involves a tradeoff between use-after-free
+ // detection and buffer overflow detection. Odd/even tags make it more
+ // likely for buffer overflows to be detected by increasing the size of
+ // the guaranteed "red zone" around the allocation, but on the other hand
+ // use-after-free is less likely to be detected because the tag space for
+ // any particular chunk is cut in half. Therefore we use this tuning
+ // setting to control whether odd/even tags are enabled.
+ if (Value == M_MEMTAG_TUNING_BUFFER_OVERFLOW)
+ Primary.Options.set(OptionBit::UseOddEvenTags);
+ else if (Value == M_MEMTAG_TUNING_UAF)
+ Primary.Options.clear(OptionBit::UseOddEvenTags);
return true;
+ } else {
+ // We leave it to the various sub-components to decide whether or not they
+ // want to handle the option, but we do not want to short-circuit
+ // execution if one of the setOption was to return false.
+ const bool PrimaryResult = Primary.setOption(O, Value);
+ const bool SecondaryResult = Secondary.setOption(O, Value);
+ const bool RegistryResult = TSDRegistry.setOption(O, Value);
+ return PrimaryResult && SecondaryResult && RegistryResult;
}
return false;
}
@@ -725,18 +783,25 @@ public:
Header.State == Chunk::State::Allocated;
}
- bool useMemoryTagging() { return Primary.useMemoryTagging(); }
-
- void disableMemoryTagging() { Primary.disableMemoryTagging(); }
+ bool useMemoryTaggingTestOnly() const {
+ return useMemoryTagging<Params>(Primary.Options.load());
+ }
+ void disableMemoryTagging() {
+ if (allocatorSupportsMemoryTagging<Params>())
+ Primary.Options.clear(OptionBit::UseMemoryTagging);
+ }
void setTrackAllocationStacks(bool Track) {
initThreadMaybe();
- Options.TrackAllocationStacks = Track;
+ if (Track)
+ Primary.Options.set(OptionBit::TrackAllocationStacks);
+ else
+ Primary.Options.clear(OptionBit::TrackAllocationStacks);
}
void setFillContents(FillContentsMode FillContents) {
initThreadMaybe();
- Options.FillContents = FillContents;
+ Primary.Options.setFillContentsMode(FillContents);
}
const char *getStackDepotAddress() const {
@@ -757,7 +822,7 @@ public:
const char *MemoryTags, uintptr_t MemoryAddr,
size_t MemorySize) {
*ErrorInfo = {};
- if (!PrimaryT::SupportsMemoryTagging ||
+ if (!allocatorSupportsMemoryTagging<Params>() ||
MemoryAddr + MemorySize < MemoryAddr)
return;
@@ -767,8 +832,7 @@ public:
PrimaryT::findNearestBlock(RegionInfoPtr, UntaggedFaultAddr);
auto GetGranule = [&](uptr Addr, const char **Data, uint8_t *Tag) -> bool {
- if (Addr < MemoryAddr ||
- Addr + archMemoryTagGranuleSize() < Addr ||
+ if (Addr < MemoryAddr || Addr + archMemoryTagGranuleSize() < Addr ||
Addr + archMemoryTagGranuleSize() > MemoryAddr + MemorySize)
return false;
*Data = &Memory[Addr - MemoryAddr];
@@ -865,7 +929,7 @@ public:
}
private:
- using SecondaryT = typename Params::Secondary;
+ using SecondaryT = MapAllocator<Params>;
typedef typename PrimaryT::SizeClassMap SizeClassMap;
static const uptr MinAlignmentLog = SCUDO_MIN_ALIGNMENT_LOG;
@@ -877,7 +941,7 @@ private:
static_assert(MinAlignment >= sizeof(Chunk::PackedHeader),
"Minimal alignment must at least cover a chunk header.");
- static_assert(!PrimaryT::SupportsMemoryTagging ||
+ static_assert(!allocatorSupportsMemoryTagging<Params>() ||
MinAlignment >= archMemoryTagGranuleSize(),
"");
@@ -903,22 +967,14 @@ private:
static const uptr MaxTraceSize = 64;
+ u32 Cookie;
+ u32 QuarantineMaxChunkSize;
+
GlobalStats Stats;
- TSDRegistryT TSDRegistry;
PrimaryT Primary;
SecondaryT Secondary;
QuarantineT Quarantine;
-
- u32 Cookie;
-
- struct {
- u8 MayReturnNull : 1; // may_return_null
- FillContentsMode FillContents : 2; // zero_contents, pattern_fill_contents
- u8 DeallocTypeMismatch : 1; // dealloc_type_mismatch
- u8 DeleteSizeMismatch : 1; // delete_size_mismatch
- u8 TrackAllocationStacks : 1;
- u32 QuarantineMaxChunkSize; // quarantine_max_chunk_size
- } Options;
+ TSDRegistryT TSDRegistry;
#ifdef GWP_ASAN_HOOKS
gwp_asan::GuardedPoolAllocator GuardedAlloc;
@@ -977,27 +1033,29 @@ private:
reinterpret_cast<uptr>(Ptr) - SizeOrUnusedBytes;
}
- ALWAYS_INLINE void initThreadMaybe(bool MinimalInit = false) {
- TSDRegistry.initThreadMaybe(this, MinimalInit);
- }
-
- void quarantineOrDeallocateChunk(void *Ptr, Chunk::UnpackedHeader *Header,
- uptr Size) {
+ void quarantineOrDeallocateChunk(Options Options, void *Ptr,
+ Chunk::UnpackedHeader *Header, uptr Size) {
Chunk::UnpackedHeader NewHeader = *Header;
- if (UNLIKELY(NewHeader.ClassId && useMemoryTagging())) {
+ if (UNLIKELY(NewHeader.ClassId && useMemoryTagging<Params>(Options))) {
u8 PrevTag = extractTag(loadTag(reinterpret_cast<uptr>(Ptr)));
- uptr TaggedBegin, TaggedEnd;
- // Exclude the previous tag so that immediate use after free is detected
- // 100% of the time.
- setRandomTag(Ptr, Size, 1UL << PrevTag, &TaggedBegin, &TaggedEnd);
- storeDeallocationStackMaybe(Ptr, PrevTag);
+ if (!TSDRegistry.getDisableMemInit()) {
+ uptr TaggedBegin, TaggedEnd;
+ const uptr OddEvenMask = computeOddEvenMaskForPointerMaybe(
+ Options, reinterpret_cast<uptr>(getBlockBegin(Ptr, &NewHeader)),
+ SizeClassMap::getSizeByClassId(NewHeader.ClassId));
+ // Exclude the previous tag so that immediate use after free is detected
+ // 100% of the time.
+ setRandomTag(Ptr, Size, OddEvenMask | (1UL << PrevTag), &TaggedBegin,
+ &TaggedEnd);
+ }
+ NewHeader.OriginOrWasZeroed = !TSDRegistry.getDisableMemInit();
+ storeDeallocationStackMaybe(Options, Ptr, PrevTag);
}
// If the quarantine is disabled, the actual size of a chunk is 0 or larger
// than the maximum allowed, we return a chunk directly to the backend.
- // Logical Or can be short-circuited, which introduces unnecessary
- // conditional jumps, so use bitwise Or and let the compiler be clever.
- const bool BypassQuarantine = !Quarantine.getCacheSize() | !Size |
- (Size > Options.QuarantineMaxChunkSize);
+ // This purposefully underflows for Size == 0.
+ const bool BypassQuarantine =
+ !Quarantine.getCacheSize() || ((Size - 1) >= QuarantineMaxChunkSize);
if (BypassQuarantine) {
NewHeader.State = Chunk::State::Available;
Chunk::compareExchangeHeader(Cookie, Ptr, &NewHeader, Header);
@@ -1038,16 +1096,17 @@ private:
return Offset + Chunk::getHeaderSize();
}
- void storeAllocationStackMaybe(void *Ptr) {
- if (!UNLIKELY(Options.TrackAllocationStacks))
+ void storeAllocationStackMaybe(Options Options, void *Ptr) {
+ if (!UNLIKELY(Options.get(OptionBit::TrackAllocationStacks)))
return;
auto *Ptr32 = reinterpret_cast<u32 *>(Ptr);
Ptr32[MemTagAllocationTraceIndex] = collectStackTrace();
Ptr32[MemTagAllocationTidIndex] = getThreadID();
}
- void storeDeallocationStackMaybe(void *Ptr, uint8_t PrevTag) {
- if (!UNLIKELY(Options.TrackAllocationStacks))
+ void storeDeallocationStackMaybe(Options Options, void *Ptr,
+ uint8_t PrevTag) {
+ if (!UNLIKELY(Options.get(OptionBit::TrackAllocationStacks)))
return;
// Disable tag checks here so that we don't need to worry about zero sized
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/common.h b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/common.h
index 9037f92b4976..662b733050bb 100644
--- a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/common.h
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/common.h
@@ -182,6 +182,16 @@ struct BlockInfo {
uptr RegionEnd;
};
+enum class Option : u8 {
+ ReleaseInterval, // Release to OS interval in milliseconds.
+ MemtagTuning, // Whether to tune tagging for UAF or overflow.
+ ThreadDisableMemInit, // Whether to disable automatic heap initialization and,
+ // where possible, memory tagging, on this thread.
+ MaxCacheEntriesCount, // Maximum number of blocks that can be cached.
+ MaxCacheEntrySize, // Maximum size of a block that can be cached.
+ MaxTSDsCount, // Number of usable TSDs for the shared registry.
+};
+
constexpr unsigned char PatternFillByte = 0xAB;
enum FillContentsMode {
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/flags.cpp b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/flags.cpp
index de5153b288b1..285143a5d6bb 100644
--- a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/flags.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/flags.cpp
@@ -23,13 +23,6 @@ void Flags::setDefaults() {
#define SCUDO_FLAG(Type, Name, DefaultValue, Description) Name = DefaultValue;
#include "flags.inc"
#undef SCUDO_FLAG
-
-#ifdef GWP_ASAN_HOOKS
-#define GWP_ASAN_OPTION(Type, Name, DefaultValue, Description) \
- GWP_ASAN_##Name = DefaultValue;
-#include "gwp_asan/options.inc"
-#undef GWP_ASAN_OPTION
-#endif // GWP_ASAN_HOOKS
}
void registerFlags(FlagParser *Parser, Flags *F) {
@@ -38,14 +31,6 @@ void registerFlags(FlagParser *Parser, Flags *F) {
reinterpret_cast<void *>(&F->Name));
#include "flags.inc"
#undef SCUDO_FLAG
-
-#ifdef GWP_ASAN_HOOKS
-#define GWP_ASAN_OPTION(Type, Name, DefaultValue, Description) \
- Parser->registerFlag("GWP_ASAN_" #Name, Description, FlagType::FT_##Type, \
- reinterpret_cast<void *>(&F->GWP_ASAN_##Name));
-#include "gwp_asan/options.inc"
-#undef GWP_ASAN_OPTION
-#endif // GWP_ASAN_HOOKS
}
static const char *getCompileDefinitionScudoDefaultOptions() {
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/fuzz/get_error_info_fuzzer.cpp b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/fuzz/get_error_info_fuzzer.cpp
index d29f515215e6..f20a8a84a010 100644
--- a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/fuzz/get_error_info_fuzzer.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/fuzz/get_error_info_fuzzer.cpp
@@ -22,21 +22,25 @@ extern "C" int LLVMFuzzerTestOneInput(uint8_t *Data, size_t Size) {
uintptr_t FaultAddr = FDP.ConsumeIntegral<uintptr_t>();
uintptr_t MemoryAddr = FDP.ConsumeIntegral<uintptr_t>();
- std::string MemoryAndTags = FDP.ConsumeRandomLengthString(FDP.remaining_bytes());
+ std::string MemoryAndTags =
+ FDP.ConsumeRandomLengthString(FDP.remaining_bytes());
const char *Memory = MemoryAndTags.c_str();
// Assume 16-byte alignment.
size_t MemorySize = (MemoryAndTags.length() / 17) * 16;
const char *MemoryTags = Memory + MemorySize;
- std::string StackDepotBytes = FDP.ConsumeRandomLengthString(FDP.remaining_bytes());
+ std::string StackDepotBytes =
+ FDP.ConsumeRandomLengthString(FDP.remaining_bytes());
std::vector<char> StackDepot(sizeof(scudo::StackDepot), 0);
- for (size_t i = 0; i < StackDepotBytes.length() && i < StackDepot.size(); ++i) {
+ for (size_t i = 0; i < StackDepotBytes.length() && i < StackDepot.size();
+ ++i) {
StackDepot[i] = StackDepotBytes[i];
}
std::string RegionInfoBytes = FDP.ConsumeRemainingBytesAsString();
std::vector<char> RegionInfo(AllocatorT::getRegionInfoArraySize(), 0);
- for (size_t i = 0; i < RegionInfoBytes.length() && i < RegionInfo.size(); ++i) {
+ for (size_t i = 0; i < RegionInfoBytes.length() && i < RegionInfo.size();
+ ++i) {
RegionInfo[i] = RegionInfoBytes[i];
}
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/include/scudo/interface.h b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/include/scudo/interface.h
index d30fb6514a14..68029e4857a3 100644
--- a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/include/scudo/interface.h
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/include/scudo/interface.h
@@ -10,6 +10,7 @@
#define SCUDO_INTERFACE_H_
#include <stddef.h>
+#include <stdint.h>
extern "C" {
@@ -39,10 +40,11 @@ typedef void (*iterate_callback)(uintptr_t base, size_t size, void *arg);
// the version in the process that analyzes the crash.
//
// fault_addr is the fault address. On aarch64 this is available in the system
-// register FAR_ELx, or far_context.far in an upcoming release of the Linux
-// kernel. This address must include the pointer tag; note that the kernel
-// strips the tag from the fields siginfo.si_addr and sigcontext.fault_address,
-// so these addresses are not suitable to be passed as fault_addr.
+// register FAR_ELx, or siginfo.si_addr in Linux 5.11 or above. This address
+// must include the pointer tag; this is available if SA_EXPOSE_TAGBITS was set
+// in sigaction.sa_flags when the signal handler was registered. Note that the
+// kernel strips the tag from the field sigcontext.fault_address, so this
+// address is not suitable to be passed as fault_addr.
//
// stack_depot is a pointer to the stack depot data structure, which may be
// obtained by calling the function __scudo_get_stack_depot_addr() in the
@@ -105,6 +107,49 @@ size_t __scudo_get_stack_depot_size();
const char *__scudo_get_region_info_addr();
size_t __scudo_get_region_info_size();
+#ifndef M_DECAY_TIME
+#define M_DECAY_TIME -100
+#endif
+
+#ifndef M_PURGE
+#define M_PURGE -101
+#endif
+
+// Tune the allocator's choice of memory tags to make it more likely that
+// a certain class of memory errors will be detected. The value argument should
+// be one of the enumerators of the scudo_memtag_tuning enum below.
+#ifndef M_MEMTAG_TUNING
+#define M_MEMTAG_TUNING -102
+#endif
+
+// Per-thread memory initialization tuning. The value argument should be one of:
+// 1: Disable automatic heap initialization and, where possible, memory tagging,
+// on this thread.
+// 0: Normal behavior.
+#ifndef M_THREAD_DISABLE_MEM_INIT
+#define M_THREAD_DISABLE_MEM_INIT -103
+#endif
+
+#ifndef M_CACHE_COUNT_MAX
+#define M_CACHE_COUNT_MAX -200
+#endif
+
+#ifndef M_CACHE_SIZE_MAX
+#define M_CACHE_SIZE_MAX -201
+#endif
+
+#ifndef M_TSDS_COUNT_MAX
+#define M_TSDS_COUNT_MAX -202
+#endif
+
+enum scudo_memtag_tuning {
+ // Tune for buffer overflows.
+ M_MEMTAG_TUNING_BUFFER_OVERFLOW,
+
+ // Tune for use-after-free.
+ M_MEMTAG_TUNING_UAF,
+};
+
} // extern "C"
#endif // SCUDO_INTERFACE_H_
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/internal_defs.h b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/internal_defs.h
index a884f1f3a40e..0babbbe3c11b 100644
--- a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/internal_defs.h
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/internal_defs.h
@@ -36,7 +36,6 @@
#define FORMAT(F, A) __attribute__((format(printf, F, A)))
#define NOINLINE __attribute__((noinline))
#define NORETURN __attribute__((noreturn))
-#define THREADLOCAL __thread
#define LIKELY(X) __builtin_expect(!!(X), 1)
#define UNLIKELY(X) __builtin_expect(!!(X), 0)
#if defined(__i386__) || defined(__x86_64__)
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/linux.cpp b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/linux.cpp
index 69ffdd9a165b..d2464677b279 100644
--- a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/linux.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/linux.cpp
@@ -35,10 +35,6 @@
#define ANDROID_PR_SET_VMA_ANON_NAME 0
#endif
-#ifdef ANDROID_EXPERIMENTAL_MTE
-#include <bionic/mte_kernel.h>
-#endif
-
namespace scudo {
uptr getPageSize() { return static_cast<uptr>(sysconf(_SC_PAGESIZE)); }
@@ -54,7 +50,10 @@ void *map(void *Addr, uptr Size, UNUSED const char *Name, uptr Flags,
MmapProt = PROT_NONE;
} else {
MmapProt = PROT_READ | PROT_WRITE;
-#if defined(__aarch64__) && defined(ANDROID_EXPERIMENTAL_MTE)
+#if defined(__aarch64__)
+#ifndef PROT_MTE
+#define PROT_MTE 0x20
+#endif
if (Flags & MAP_MEMTAG)
MmapProt |= PROT_MTE;
#endif
@@ -198,7 +197,7 @@ void outputRaw(const char *Buffer) {
}
async_safe_write_log(AndroidLogInfo, "scudo", Buffer);
} else {
- write(2, Buffer, strlen(Buffer));
+ (void)write(2, Buffer, strlen(Buffer));
}
}
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/linux.h b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/linux.h
index c8e41484c851..72acb6da83a7 100644
--- a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/linux.h
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/linux.h
@@ -18,51 +18,6 @@ namespace scudo {
// MapPlatformData is unused on Linux, define it as a minimally sized structure.
struct MapPlatformData {};
-#if SCUDO_ANDROID
-
-#if defined(__aarch64__)
-#define __get_tls() \
- ({ \
- void **__v; \
- __asm__("mrs %0, tpidr_el0" : "=r"(__v)); \
- __v; \
- })
-#elif defined(__arm__)
-#define __get_tls() \
- ({ \
- void **__v; \
- __asm__("mrc p15, 0, %0, c13, c0, 3" : "=r"(__v)); \
- __v; \
- })
-#elif defined(__i386__)
-#define __get_tls() \
- ({ \
- void **__v; \
- __asm__("movl %%gs:0, %0" : "=r"(__v)); \
- __v; \
- })
-#elif defined(__x86_64__)
-#define __get_tls() \
- ({ \
- void **__v; \
- __asm__("mov %%fs:0, %0" : "=r"(__v)); \
- __v; \
- })
-#else
-#error "Unsupported architecture."
-#endif
-
-// The Android Bionic team has allocated a TLS slot for sanitizers starting
-// with Q, given that Android currently doesn't support ELF TLS. It is used to
-// store sanitizer thread specific data.
-static const int TLS_SLOT_SANITIZER = 6;
-
-ALWAYS_INLINE uptr *getAndroidTlsPtr() {
- return reinterpret_cast<uptr *>(&__get_tls()[TLS_SLOT_SANITIZER]);
-}
-
-#endif // SCUDO_ANDROID
-
} // namespace scudo
#endif // SCUDO_LINUX
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/local_cache.h b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/local_cache.h
index a6425fc6d1ea..089aeb939627 100644
--- a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/local_cache.h
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/local_cache.h
@@ -159,6 +159,7 @@ private:
DCHECK_GT(B->getCount(), 0);
C->Count = B->getCount();
B->copyToArray(C->Chunks);
+ B->clear();
destroyBatch(ClassId, B);
return true;
}
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/memtag.h b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/memtag.h
index 6f347f4694e8..b1b62065ed72 100644
--- a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/memtag.h
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/memtag.h
@@ -14,13 +14,13 @@
#if SCUDO_LINUX
#include <sys/auxv.h>
#include <sys/prctl.h>
-#if defined(ANDROID_EXPERIMENTAL_MTE)
-#include <bionic/mte_kernel.h>
-#endif
#endif
namespace scudo {
+void setRandomTag(void *Ptr, uptr Size, uptr ExcludeMask, uptr *TaggedBegin,
+ uptr *TaggedEnd);
+
#if defined(__aarch64__) || defined(SCUDO_FUZZ)
inline constexpr bool archSupportsMemoryTagging() { return true; }
@@ -28,9 +28,7 @@ inline constexpr uptr archMemoryTagGranuleSize() { return 16; }
inline uptr untagPointer(uptr Ptr) { return Ptr & ((1ULL << 56) - 1); }
-inline uint8_t extractTag(uptr Ptr) {
- return (Ptr >> 56) & 0xf;
-}
+inline uint8_t extractTag(uptr Ptr) { return (Ptr >> 56) & 0xf; }
#else
@@ -54,23 +52,41 @@ inline uint8_t extractTag(uptr Ptr) {
#if defined(__aarch64__)
+#if SCUDO_LINUX
+
inline bool systemSupportsMemoryTagging() {
-#if defined(ANDROID_EXPERIMENTAL_MTE)
- return getauxval(AT_HWCAP2) & HWCAP2_MTE;
-#else
- return false;
+#ifndef HWCAP2_MTE
+#define HWCAP2_MTE (1 << 18)
#endif
+ return getauxval(AT_HWCAP2) & HWCAP2_MTE;
}
inline bool systemDetectsMemoryTagFaultsTestOnly() {
-#if defined(ANDROID_EXPERIMENTAL_MTE)
- return (prctl(PR_GET_TAGGED_ADDR_CTRL, 0, 0, 0, 0) & PR_MTE_TCF_MASK) !=
- PR_MTE_TCF_NONE;
-#else
- return false;
+#ifndef PR_GET_TAGGED_ADDR_CTRL
+#define PR_GET_TAGGED_ADDR_CTRL 56
+#endif
+#ifndef PR_MTE_TCF_SHIFT
+#define PR_MTE_TCF_SHIFT 1
+#endif
+#ifndef PR_MTE_TCF_NONE
+#define PR_MTE_TCF_NONE (0UL << PR_MTE_TCF_SHIFT)
#endif
+#ifndef PR_MTE_TCF_MASK
+#define PR_MTE_TCF_MASK (3UL << PR_MTE_TCF_SHIFT)
+#endif
+ return (static_cast<unsigned long>(
+ prctl(PR_GET_TAGGED_ADDR_CTRL, 0, 0, 0, 0)) &
+ PR_MTE_TCF_MASK) != PR_MTE_TCF_NONE;
}
+#else // !SCUDO_LINUX
+
+inline bool systemSupportsMemoryTagging() { return false; }
+
+inline bool systemDetectsMemoryTagFaultsTestOnly() { return false; }
+
+#endif // SCUDO_LINUX
+
inline void disableMemoryTagChecksTestOnly() {
__asm__ __volatile__(".arch_extension mte; msr tco, #1");
}
@@ -82,7 +98,7 @@ inline void enableMemoryTagChecksTestOnly() {
class ScopedDisableMemoryTagChecks {
size_t PrevTCO;
- public:
+public:
ScopedDisableMemoryTagChecks() {
__asm__ __volatile__(".arch_extension mte; mrs %0, tco; msr tco, #1"
: "=r"(PrevTCO));
@@ -93,40 +109,36 @@ class ScopedDisableMemoryTagChecks {
}
};
-inline void setRandomTag(void *Ptr, uptr Size, uptr ExcludeMask,
- uptr *TaggedBegin, uptr *TaggedEnd) {
- void *End;
+inline uptr selectRandomTag(uptr Ptr, uptr ExcludeMask) {
+ uptr TaggedPtr;
__asm__ __volatile__(
- R"(
- .arch_extension mte
-
- // Set a random tag for Ptr in TaggedPtr. This needs to happen even if
- // Size = 0 so that TaggedPtr ends up pointing at a valid address.
- irg %[TaggedPtr], %[Ptr], %[ExcludeMask]
- mov %[Cur], %[TaggedPtr]
-
- // Skip the loop if Size = 0. We don't want to do any tagging in this case.
- cbz %[Size], 2f
-
- // Set the memory tag of the region
- // [TaggedPtr, TaggedPtr + roundUpTo(Size, 16))
- // to the pointer tag stored in TaggedPtr.
- add %[End], %[TaggedPtr], %[Size]
-
- 1:
- stzg %[Cur], [%[Cur]], #16
- cmp %[Cur], %[End]
- b.lt 1b
+ ".arch_extension mte; irg %[TaggedPtr], %[Ptr], %[ExcludeMask]"
+ : [TaggedPtr] "=r"(TaggedPtr)
+ : [Ptr] "r"(Ptr), [ExcludeMask] "r"(ExcludeMask));
+ return TaggedPtr;
+}
- 2:
- )"
- :
- [TaggedPtr] "=&r"(*TaggedBegin), [Cur] "=&r"(*TaggedEnd), [End] "=&r"(End)
- : [Ptr] "r"(Ptr), [Size] "r"(Size), [ExcludeMask] "r"(ExcludeMask)
- : "memory");
+inline uptr storeTags(uptr Begin, uptr End) {
+ DCHECK(Begin % 16 == 0);
+ if (Begin != End) {
+ __asm__ __volatile__(
+ R"(
+ .arch_extension mte
+
+ 1:
+ stzg %[Cur], [%[Cur]], #16
+ cmp %[Cur], %[End]
+ b.lt 1b
+ )"
+ : [Cur] "+&r"(Begin)
+ : [End] "r"(End)
+ : "memory");
+ }
+ return Begin;
}
-inline void *prepareTaggedChunk(void *Ptr, uptr Size, uptr BlockEnd) {
+inline void *prepareTaggedChunk(void *Ptr, uptr Size, uptr ExcludeMask,
+ uptr BlockEnd) {
// Prepare the granule before the chunk to store the chunk header by setting
// its tag to 0. Normally its tag will already be 0, but in the case where a
// chunk holding a low alignment allocation is reused for a higher alignment
@@ -138,7 +150,7 @@ inline void *prepareTaggedChunk(void *Ptr, uptr Size, uptr BlockEnd) {
: "memory");
uptr TaggedBegin, TaggedEnd;
- setRandomTag(Ptr, Size, 0, &TaggedBegin, &TaggedEnd);
+ setRandomTag(Ptr, Size, ExcludeMask, &TaggedBegin, &TaggedEnd);
// Finally, set the tag of the granule past the end of the allocation to 0,
// to catch linear overflows even if a previous larger allocation used the
@@ -189,8 +201,8 @@ inline void resizeTaggedChunk(uptr OldPtr, uptr NewPtr, uptr BlockEnd) {
2:
)"
- : [ Cur ] "+&r"(RoundOldPtr), [ End ] "+&r"(NewPtr)
- : [ BlockEnd ] "r"(BlockEnd)
+ : [Cur] "+&r"(RoundOldPtr), [End] "+&r"(NewPtr)
+ : [BlockEnd] "r"(BlockEnd)
: "memory");
}
@@ -225,19 +237,23 @@ struct ScopedDisableMemoryTagChecks {
ScopedDisableMemoryTagChecks() {}
};
-inline void setRandomTag(void *Ptr, uptr Size, uptr ExcludeMask,
- uptr *TaggedBegin, uptr *TaggedEnd) {
+inline uptr selectRandomTag(uptr Ptr, uptr ExcludeMask) {
(void)Ptr;
- (void)Size;
(void)ExcludeMask;
- (void)TaggedBegin;
- (void)TaggedEnd;
UNREACHABLE("memory tagging not supported");
}
-inline void *prepareTaggedChunk(void *Ptr, uptr Size, uptr BlockEnd) {
+inline uptr storeTags(uptr Begin, uptr End) {
+ (void)Begin;
+ (void)End;
+ UNREACHABLE("memory tagging not supported");
+}
+
+inline void *prepareTaggedChunk(void *Ptr, uptr Size, uptr ExcludeMask,
+ uptr BlockEnd) {
(void)Ptr;
(void)Size;
+ (void)ExcludeMask;
(void)BlockEnd;
UNREACHABLE("memory tagging not supported");
}
@@ -256,6 +272,17 @@ inline uptr loadTag(uptr Ptr) {
#endif
+inline void setRandomTag(void *Ptr, uptr Size, uptr ExcludeMask,
+ uptr *TaggedBegin, uptr *TaggedEnd) {
+ *TaggedBegin = selectRandomTag(reinterpret_cast<uptr>(Ptr), ExcludeMask);
+ *TaggedEnd = storeTags(*TaggedBegin, *TaggedBegin + Size);
+}
+
+template <typename Config>
+inline constexpr bool allocatorSupportsMemoryTagging() {
+ return archSupportsMemoryTagging() && Config::MaySupportMemoryTagging;
+}
+
} // namespace scudo
#endif
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/options.h b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/options.h
new file mode 100644
index 000000000000..91301bf5ec9c
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/options.h
@@ -0,0 +1,74 @@
+//===-- options.h -----------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_OPTIONS_H_
+#define SCUDO_OPTIONS_H_
+
+#include "atomic_helpers.h"
+#include "common.h"
+#include "memtag.h"
+
+namespace scudo {
+
+enum class OptionBit {
+ MayReturnNull,
+ FillContents0of2,
+ FillContents1of2,
+ DeallocTypeMismatch,
+ DeleteSizeMismatch,
+ TrackAllocationStacks,
+ UseOddEvenTags,
+ UseMemoryTagging,
+};
+
+struct Options {
+ u32 Val;
+
+ bool get(OptionBit Opt) const { return Val & (1U << static_cast<u32>(Opt)); }
+
+ FillContentsMode getFillContentsMode() const {
+ return static_cast<FillContentsMode>(
+ (Val >> static_cast<u32>(OptionBit::FillContents0of2)) & 3);
+ }
+};
+
+template <typename Config> bool useMemoryTagging(Options Options) {
+ return allocatorSupportsMemoryTagging<Config>() &&
+ Options.get(OptionBit::UseMemoryTagging);
+}
+
+struct AtomicOptions {
+ atomic_u32 Val;
+
+public:
+ Options load() const { return Options{atomic_load_relaxed(&Val)}; }
+
+ void clear(OptionBit Opt) {
+ atomic_fetch_and(&Val, ~(1U << static_cast<u32>(Opt)),
+ memory_order_relaxed);
+ }
+
+ void set(OptionBit Opt) {
+ atomic_fetch_or(&Val, 1U << static_cast<u32>(Opt), memory_order_relaxed);
+ }
+
+ void setFillContentsMode(FillContentsMode FillContents) {
+ u32 Opts = atomic_load_relaxed(&Val), NewOpts;
+ do {
+ NewOpts = Opts;
+ NewOpts &= ~(3U << static_cast<u32>(OptionBit::FillContents0of2));
+ NewOpts |= static_cast<u32>(FillContents)
+ << static_cast<u32>(OptionBit::FillContents0of2);
+ } while (!atomic_compare_exchange_strong(&Val, &Opts, NewOpts,
+ memory_order_relaxed));
+ }
+};
+
+} // namespace scudo
+
+#endif // SCUDO_OPTIONS_H_
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/primary32.h b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/primary32.h
index 29a268098185..a88a2a67e951 100644
--- a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/primary32.h
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/primary32.h
@@ -13,6 +13,7 @@
#include "common.h"
#include "list.h"
#include "local_cache.h"
+#include "options.h"
#include "release.h"
#include "report.h"
#include "stats.h"
@@ -38,23 +39,17 @@ namespace scudo {
// Memory used by this allocator is never unmapped but can be partially
// reclaimed if the platform allows for it.
-template <class SizeClassMapT, uptr RegionSizeLog,
- s32 MinReleaseToOsIntervalMs = INT32_MIN,
- s32 MaxReleaseToOsIntervalMs = INT32_MAX>
-class SizeClassAllocator32 {
+template <typename Config> class SizeClassAllocator32 {
public:
- typedef SizeClassMapT SizeClassMap;
+ typedef typename Config::SizeClassMap SizeClassMap;
// The bytemap can only track UINT8_MAX - 1 classes.
static_assert(SizeClassMap::LargestClassId <= (UINT8_MAX - 1), "");
// Regions should be large enough to hold the largest Block.
- static_assert((1UL << RegionSizeLog) >= SizeClassMap::MaxSize, "");
- typedef SizeClassAllocator32<SizeClassMapT, RegionSizeLog,
- MinReleaseToOsIntervalMs,
- MaxReleaseToOsIntervalMs>
- ThisT;
+ static_assert((1UL << Config::PrimaryRegionSizeLog) >= SizeClassMap::MaxSize,
+ "");
+ typedef SizeClassAllocator32<Config> ThisT;
typedef SizeClassAllocatorLocalCache<ThisT> CacheT;
typedef typename CacheT::TransferBatch TransferBatch;
- static const bool SupportsMemoryTagging = false;
static uptr getSizeByClassId(uptr ClassId) {
return (ClassId == SizeClassMap::BatchClassId)
@@ -69,24 +64,20 @@ public:
reportError("SizeClassAllocator32 is not supported on Fuchsia");
PossibleRegions.initLinkerInitialized();
- MinRegionIndex = NumRegions; // MaxRegionIndex is already initialized to 0.
u32 Seed;
const u64 Time = getMonotonicTime();
if (UNLIKELY(!getRandom(reinterpret_cast<void *>(&Seed), sizeof(Seed))))
Seed = static_cast<u32>(
Time ^ (reinterpret_cast<uptr>(SizeClassInfoArray) >> 6));
- const uptr PageSize = getPageSizeCached();
for (uptr I = 0; I < NumClasses; I++) {
SizeClassInfo *Sci = getSizeClassInfo(I);
Sci->RandState = getRandomU32(&Seed);
- // See comment in the 64-bit primary about releasing smaller size classes.
- Sci->CanRelease = (I != SizeClassMap::BatchClassId) &&
- (getSizeByClassId(I) >= (PageSize / 32));
- if (Sci->CanRelease)
- Sci->ReleaseInfo.LastReleaseAtNs = Time;
+ // Sci->MaxRegionIndex is already initialized to 0.
+ Sci->MinRegionIndex = NumRegions;
+ Sci->ReleaseInfo.LastReleaseAtNs = Time;
}
- setReleaseToOsIntervalMs(ReleaseToOsInterval);
+ setOption(Option::ReleaseInterval, static_cast<sptr>(ReleaseToOsInterval));
}
void init(s32 ReleaseToOsInterval) {
memset(this, 0, sizeof(*this));
@@ -97,7 +88,15 @@ public:
while (NumberOfStashedRegions > 0)
unmap(reinterpret_cast<void *>(RegionsStash[--NumberOfStashedRegions]),
RegionSize);
- for (uptr I = MinRegionIndex; I <= MaxRegionIndex; I++)
+ uptr MinRegionIndex = NumRegions, MaxRegionIndex = 0;
+ for (uptr I = 0; I < NumClasses; I++) {
+ SizeClassInfo *Sci = getSizeClassInfo(I);
+ if (Sci->MinRegionIndex < MinRegionIndex)
+ MinRegionIndex = Sci->MinRegionIndex;
+ if (Sci->MaxRegionIndex > MaxRegionIndex)
+ MaxRegionIndex = Sci->MaxRegionIndex;
+ }
+ for (uptr I = MinRegionIndex; I < MaxRegionIndex; I++)
if (PossibleRegions[I])
unmap(reinterpret_cast<void *>(I * RegionSize), RegionSize);
PossibleRegions.unmapTestOnly();
@@ -127,7 +126,7 @@ public:
ScopedLock L(Sci->Mutex);
Sci->FreeList.push_front(B);
Sci->Stats.PushedBlocks += B->getCount();
- if (Sci->CanRelease)
+ if (ClassId != SizeClassMap::BatchClassId)
releaseToOSMaybe(Sci, ClassId);
}
@@ -155,6 +154,14 @@ public:
}
template <typename F> void iterateOverBlocks(F Callback) {
+ uptr MinRegionIndex = NumRegions, MaxRegionIndex = 0;
+ for (uptr I = 0; I < NumClasses; I++) {
+ SizeClassInfo *Sci = getSizeClassInfo(I);
+ if (Sci->MinRegionIndex < MinRegionIndex)
+ MinRegionIndex = Sci->MinRegionIndex;
+ if (Sci->MaxRegionIndex > MaxRegionIndex)
+ MaxRegionIndex = Sci->MaxRegionIndex;
+ }
for (uptr I = MinRegionIndex; I <= MaxRegionIndex; I++)
if (PossibleRegions[I] &&
(PossibleRegions[I] - 1U) != SizeClassMap::BatchClassId) {
@@ -184,18 +191,23 @@ public:
getStats(Str, I, 0);
}
- void setReleaseToOsIntervalMs(s32 Interval) {
- if (Interval >= MaxReleaseToOsIntervalMs) {
- Interval = MaxReleaseToOsIntervalMs;
- } else if (Interval <= MinReleaseToOsIntervalMs) {
- Interval = MinReleaseToOsIntervalMs;
+ bool setOption(Option O, sptr Value) {
+ if (O == Option::ReleaseInterval) {
+ const s32 Interval = Max(
+ Min(static_cast<s32>(Value), Config::PrimaryMaxReleaseToOsIntervalMs),
+ Config::PrimaryMinReleaseToOsIntervalMs);
+ atomic_store_relaxed(&ReleaseToOsIntervalMs, Interval);
+ return true;
}
- atomic_store(&ReleaseToOsIntervalMs, Interval, memory_order_relaxed);
+ // Not supported by the Primary, but not an error either.
+ return true;
}
uptr releaseToOS() {
uptr TotalReleasedBytes = 0;
for (uptr I = 0; I < NumClasses; I++) {
+ if (I == SizeClassMap::BatchClassId)
+ continue;
SizeClassInfo *Sci = getSizeClassInfo(I);
ScopedLock L(Sci->Mutex);
TotalReleasedBytes += releaseToOSMaybe(Sci, I, /*Force=*/true);
@@ -203,22 +215,21 @@ public:
return TotalReleasedBytes;
}
- bool useMemoryTagging() { return false; }
- void disableMemoryTagging() {}
-
const char *getRegionInfoArrayAddress() const { return nullptr; }
static uptr getRegionInfoArraySize() { return 0; }
- static BlockInfo findNearestBlock(const char *RegionInfoData, uptr Ptr) {
- (void)RegionInfoData;
- (void)Ptr;
+ static BlockInfo findNearestBlock(UNUSED const char *RegionInfoData,
+ UNUSED uptr Ptr) {
return {};
}
+ AtomicOptions Options;
+
private:
static const uptr NumClasses = SizeClassMap::NumClasses;
- static const uptr RegionSize = 1UL << RegionSizeLog;
- static const uptr NumRegions = SCUDO_MMAP_RANGE_SIZE >> RegionSizeLog;
+ static const uptr RegionSize = 1UL << Config::PrimaryRegionSizeLog;
+ static const uptr NumRegions =
+ SCUDO_MMAP_RANGE_SIZE >> Config::PrimaryRegionSizeLog;
static const u32 MaxNumBatches = SCUDO_ANDROID ? 4U : 8U;
typedef FlatByteMap<NumRegions> ByteMap;
@@ -240,15 +251,18 @@ private:
uptr CurrentRegion;
uptr CurrentRegionAllocated;
SizeClassStats Stats;
- bool CanRelease;
u32 RandState;
uptr AllocatedUser;
+ // Lowest & highest region index allocated for this size class, to avoid
+ // looping through the whole NumRegions.
+ uptr MinRegionIndex;
+ uptr MaxRegionIndex;
ReleaseToOsInfo ReleaseInfo;
};
static_assert(sizeof(SizeClassInfo) % SCUDO_CACHE_LINE_SIZE == 0, "");
uptr computeRegionId(uptr Mem) {
- const uptr Id = Mem >> RegionSizeLog;
+ const uptr Id = Mem >> Config::PrimaryRegionSizeLog;
CHECK_LT(Id, NumRegions);
return Id;
}
@@ -257,7 +271,7 @@ private:
uptr MapSize = 2 * RegionSize;
const uptr MapBase = reinterpret_cast<uptr>(
map(nullptr, MapSize, "scudo:primary", MAP_ALLOWNOMEM));
- if (UNLIKELY(!MapBase))
+ if (!MapBase)
return 0;
const uptr MapEnd = MapBase + MapSize;
uptr Region = MapBase;
@@ -278,7 +292,7 @@ private:
return Region;
}
- uptr allocateRegion(uptr ClassId) {
+ uptr allocateRegion(SizeClassInfo *Sci, uptr ClassId) {
DCHECK_LT(ClassId, NumClasses);
uptr Region = 0;
{
@@ -289,11 +303,12 @@ private:
if (!Region)
Region = allocateRegionSlow();
if (LIKELY(Region)) {
+ // Sci->Mutex is held by the caller, updating the Min/Max is safe.
const uptr RegionIndex = computeRegionId(Region);
- if (RegionIndex < MinRegionIndex)
- MinRegionIndex = RegionIndex;
- if (RegionIndex > MaxRegionIndex)
- MaxRegionIndex = RegionIndex;
+ if (RegionIndex < Sci->MinRegionIndex)
+ Sci->MinRegionIndex = RegionIndex;
+ if (RegionIndex > Sci->MaxRegionIndex)
+ Sci->MaxRegionIndex = RegionIndex;
PossibleRegions.set(RegionIndex, static_cast<u8>(ClassId + 1U));
}
return Region;
@@ -304,29 +319,6 @@ private:
return &SizeClassInfoArray[ClassId];
}
- bool populateBatches(CacheT *C, SizeClassInfo *Sci, uptr ClassId,
- TransferBatch **CurrentBatch, u32 MaxCount,
- void **PointersArray, u32 Count) {
- if (ClassId != SizeClassMap::BatchClassId)
- shuffle(PointersArray, Count, &Sci->RandState);
- TransferBatch *B = *CurrentBatch;
- for (uptr I = 0; I < Count; I++) {
- if (B && B->getCount() == MaxCount) {
- Sci->FreeList.push_back(B);
- B = nullptr;
- }
- if (!B) {
- B = C->createBatch(ClassId, PointersArray[I]);
- if (UNLIKELY(!B))
- return false;
- B->clear();
- }
- B->add(PointersArray[I]);
- }
- *CurrentBatch = B;
- return true;
- }
-
NOINLINE TransferBatch *populateFreeList(CacheT *C, uptr ClassId,
SizeClassInfo *Sci) {
uptr Region;
@@ -341,7 +333,7 @@ private:
Offset = Sci->CurrentRegionAllocated;
} else {
DCHECK_EQ(Sci->CurrentRegionAllocated, 0U);
- Region = allocateRegion(ClassId);
+ Region = allocateRegion(Sci, ClassId);
if (UNLIKELY(!Region))
return nullptr;
C->getStats().add(StatMapped, RegionSize);
@@ -362,38 +354,35 @@ private:
static_cast<u32>((RegionSize - Offset) / Size));
DCHECK_GT(NumberOfBlocks, 0U);
- TransferBatch *B = nullptr;
constexpr u32 ShuffleArraySize =
MaxNumBatches * TransferBatch::MaxNumCached;
// Fill the transfer batches and put them in the size-class freelist. We
// need to randomize the blocks for security purposes, so we first fill a
// local array that we then shuffle before populating the batches.
void *ShuffleArray[ShuffleArraySize];
- u32 Count = 0;
- const uptr AllocatedUser = Size * NumberOfBlocks;
- for (uptr I = Region + Offset; I < Region + Offset + AllocatedUser;
- I += Size) {
- ShuffleArray[Count++] = reinterpret_cast<void *>(I);
- if (Count == ShuffleArraySize) {
- if (UNLIKELY(!populateBatches(C, Sci, ClassId, &B, MaxCount,
- ShuffleArray, Count)))
- return nullptr;
- Count = 0;
- }
- }
- if (Count) {
- if (UNLIKELY(!populateBatches(C, Sci, ClassId, &B, MaxCount, ShuffleArray,
- Count)))
+ DCHECK_LE(NumberOfBlocks, ShuffleArraySize);
+
+ uptr P = Region + Offset;
+ for (u32 I = 0; I < NumberOfBlocks; I++, P += Size)
+ ShuffleArray[I] = reinterpret_cast<void *>(P);
+ // No need to shuffle the batches size class.
+ if (ClassId != SizeClassMap::BatchClassId)
+ shuffle(ShuffleArray, NumberOfBlocks, &Sci->RandState);
+ for (u32 I = 0; I < NumberOfBlocks;) {
+ TransferBatch *B = C->createBatch(ClassId, ShuffleArray[I]);
+ if (UNLIKELY(!B))
return nullptr;
- }
- DCHECK(B);
- if (!Sci->FreeList.empty()) {
+ const u32 N = Min(MaxCount, NumberOfBlocks - I);
+ B->setFromArray(&ShuffleArray[I], N);
Sci->FreeList.push_back(B);
- B = Sci->FreeList.front();
- Sci->FreeList.pop_front();
+ I += N;
}
+ TransferBatch *B = Sci->FreeList.front();
+ Sci->FreeList.pop_front();
+ DCHECK(B);
DCHECK_GT(B->getCount(), 0);
+ const uptr AllocatedUser = Size * NumberOfBlocks;
C->getStats().add(StatFree, AllocatedUser);
DCHECK_LE(Sci->CurrentRegionAllocated + AllocatedUser, RegionSize);
// If there is not enough room in the region currently associated to fit
@@ -423,16 +412,12 @@ private:
AvailableChunks, Rss >> 10, Sci->ReleaseInfo.RangesReleased);
}
- s32 getReleaseToOsIntervalMs() {
- return atomic_load(&ReleaseToOsIntervalMs, memory_order_relaxed);
- }
-
NOINLINE uptr releaseToOSMaybe(SizeClassInfo *Sci, uptr ClassId,
bool Force = false) {
const uptr BlockSize = getSizeByClassId(ClassId);
const uptr PageSize = getPageSizeCached();
- CHECK_GE(Sci->Stats.PoppedBlocks, Sci->Stats.PushedBlocks);
+ DCHECK_GE(Sci->Stats.PoppedBlocks, Sci->Stats.PushedBlocks);
const uptr BytesInFreeList =
Sci->AllocatedUser -
(Sci->Stats.PoppedBlocks - Sci->Stats.PushedBlocks) * BlockSize;
@@ -444,8 +429,20 @@ private:
if (BytesPushed < PageSize)
return 0; // Nothing new to release.
+ // Releasing smaller blocks is expensive, so we want to make sure that a
+ // significant amount of bytes are free, and that there has been a good
+ // amount of batches pushed to the freelist before attempting to release.
+ if (BlockSize < PageSize / 16U) {
+ if (!Force && BytesPushed < Sci->AllocatedUser / 16U)
+ return 0;
+ // We want 8x% to 9x% free bytes (the larger the bock, the lower the %).
+ if ((BytesInFreeList * 100U) / Sci->AllocatedUser <
+ (100U - 1U - BlockSize / 16U))
+ return 0;
+ }
+
if (!Force) {
- const s32 IntervalMs = getReleaseToOsIntervalMs();
+ const s32 IntervalMs = atomic_load_relaxed(&ReleaseToOsIntervalMs);
if (IntervalMs < 0)
return 0;
if (Sci->ReleaseInfo.LastReleaseAtNs +
@@ -455,31 +452,27 @@ private:
}
}
- // TODO(kostyak): currently not ideal as we loop over all regions and
- // iterate multiple times over the same freelist if a ClassId spans multiple
- // regions. But it will have to do for now.
+ const uptr First = Sci->MinRegionIndex;
+ const uptr Last = Sci->MaxRegionIndex;
+ DCHECK_NE(Last, 0U);
+ DCHECK_LE(First, Last);
uptr TotalReleasedBytes = 0;
- const uptr MaxSize = (RegionSize / BlockSize) * BlockSize;
- for (uptr I = MinRegionIndex; I <= MaxRegionIndex; I++) {
- if (PossibleRegions[I] - 1U == ClassId) {
- const uptr Region = I * RegionSize;
- // If the region is the one currently associated to the size-class, we
- // only need to release up to CurrentRegionAllocated, MaxSize otherwise.
- const uptr Size = (Region == Sci->CurrentRegion)
- ? Sci->CurrentRegionAllocated
- : MaxSize;
- ReleaseRecorder Recorder(Region);
- releaseFreeMemoryToOS(Sci->FreeList, Region, Size, BlockSize,
- &Recorder);
- if (Recorder.getReleasedRangesCount() > 0) {
- Sci->ReleaseInfo.PushedBlocksAtLastRelease = Sci->Stats.PushedBlocks;
- Sci->ReleaseInfo.RangesReleased += Recorder.getReleasedRangesCount();
- Sci->ReleaseInfo.LastReleasedBytes = Recorder.getReleasedBytes();
- TotalReleasedBytes += Sci->ReleaseInfo.LastReleasedBytes;
- }
- }
+ const uptr Base = First * RegionSize;
+ const uptr NumberOfRegions = Last - First + 1U;
+ ReleaseRecorder Recorder(Base);
+ auto SkipRegion = [this, First, ClassId](uptr RegionIndex) {
+ return (PossibleRegions[First + RegionIndex] - 1U) != ClassId;
+ };
+ releaseFreeMemoryToOS(Sci->FreeList, Base, RegionSize, NumberOfRegions,
+ BlockSize, &Recorder, SkipRegion);
+ if (Recorder.getReleasedRangesCount() > 0) {
+ Sci->ReleaseInfo.PushedBlocksAtLastRelease = Sci->Stats.PushedBlocks;
+ Sci->ReleaseInfo.RangesReleased += Recorder.getReleasedRangesCount();
+ Sci->ReleaseInfo.LastReleasedBytes = Recorder.getReleasedBytes();
+ TotalReleasedBytes += Sci->ReleaseInfo.LastReleasedBytes;
}
Sci->ReleaseInfo.LastReleaseAtNs = getMonotonicTime();
+
return TotalReleasedBytes;
}
@@ -487,10 +480,6 @@ private:
// Track the regions in use, 0 is unused, otherwise store ClassId + 1.
ByteMap PossibleRegions;
- // Keep track of the lowest & highest regions allocated to avoid looping
- // through the whole NumRegions.
- uptr MinRegionIndex;
- uptr MaxRegionIndex;
atomic_s32 ReleaseToOsIntervalMs;
// Unless several threads request regions simultaneously from different size
// classes, the stash rarely contains more than 1 entry.
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/primary64.h b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/primary64.h
index d4767882ba2c..2724a2529f75 100644
--- a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/primary64.h
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/primary64.h
@@ -14,6 +14,7 @@
#include "list.h"
#include "local_cache.h"
#include "memtag.h"
+#include "options.h"
#include "release.h"
#include "stats.h"
#include "string_utils.h"
@@ -39,21 +40,12 @@ namespace scudo {
// The memory used by this allocator is never unmapped, but can be partially
// released if the platform allows for it.
-template <class SizeClassMapT, uptr RegionSizeLog,
- s32 MinReleaseToOsIntervalMs = INT32_MIN,
- s32 MaxReleaseToOsIntervalMs = INT32_MAX,
- bool MaySupportMemoryTagging = false>
-class SizeClassAllocator64 {
+template <typename Config> class SizeClassAllocator64 {
public:
- typedef SizeClassMapT SizeClassMap;
- typedef SizeClassAllocator64<
- SizeClassMap, RegionSizeLog, MinReleaseToOsIntervalMs,
- MaxReleaseToOsIntervalMs, MaySupportMemoryTagging>
- ThisT;
+ typedef typename Config::SizeClassMap SizeClassMap;
+ typedef SizeClassAllocator64<Config> ThisT;
typedef SizeClassAllocatorLocalCache<ThisT> CacheT;
typedef typename CacheT::TransferBatch TransferBatch;
- static const bool SupportsMemoryTagging =
- MaySupportMemoryTagging && archSupportsMemoryTagging();
static uptr getSizeByClassId(uptr ClassId) {
return (ClassId == SizeClassMap::BatchClassId)
@@ -79,22 +71,9 @@ public:
Region->RegionBeg =
getRegionBaseByClassId(I) + (getRandomModN(&Seed, 16) + 1) * PageSize;
Region->RandState = getRandomU32(&Seed);
- // Releasing smaller size classes doesn't necessarily yield to a
- // meaningful RSS impact: there are more blocks per page, they are
- // randomized around, and thus pages are less likely to be entirely empty.
- // On top of this, attempting to release those require more iterations and
- // memory accesses which ends up being fairly costly. The current lower
- // limit is mostly arbitrary and based on empirical observations.
- // TODO(kostyak): make the lower limit a runtime option
- Region->CanRelease = (I != SizeClassMap::BatchClassId) &&
- (getSizeByClassId(I) >= (PageSize / 32));
- if (Region->CanRelease)
- Region->ReleaseInfo.LastReleaseAtNs = Time;
+ Region->ReleaseInfo.LastReleaseAtNs = Time;
}
- setReleaseToOsIntervalMs(ReleaseToOsInterval);
-
- if (SupportsMemoryTagging)
- UseMemoryTagging = systemSupportsMemoryTagging();
+ setOption(Option::ReleaseInterval, static_cast<sptr>(ReleaseToOsInterval));
}
void init(s32 ReleaseToOsInterval) {
memset(this, 0, sizeof(*this));
@@ -128,7 +107,7 @@ public:
ScopedLock L(Region->Mutex);
Region->FreeList.push_front(B);
Region->Stats.PushedBlocks += B->getCount();
- if (Region->CanRelease)
+ if (ClassId != SizeClassMap::BatchClassId)
releaseToOSMaybe(Region, ClassId);
}
@@ -185,18 +164,23 @@ public:
getStats(Str, I, 0);
}
- void setReleaseToOsIntervalMs(s32 Interval) {
- if (Interval >= MaxReleaseToOsIntervalMs) {
- Interval = MaxReleaseToOsIntervalMs;
- } else if (Interval <= MinReleaseToOsIntervalMs) {
- Interval = MinReleaseToOsIntervalMs;
+ bool setOption(Option O, sptr Value) {
+ if (O == Option::ReleaseInterval) {
+ const s32 Interval = Max(
+ Min(static_cast<s32>(Value), Config::PrimaryMaxReleaseToOsIntervalMs),
+ Config::PrimaryMinReleaseToOsIntervalMs);
+ atomic_store_relaxed(&ReleaseToOsIntervalMs, Interval);
+ return true;
}
- atomic_store(&ReleaseToOsIntervalMs, Interval, memory_order_relaxed);
+ // Not supported by the Primary, but not an error either.
+ return true;
}
uptr releaseToOS() {
uptr TotalReleasedBytes = 0;
for (uptr I = 0; I < NumClasses; I++) {
+ if (I == SizeClassMap::BatchClassId)
+ continue;
RegionInfo *Region = getRegionInfo(I);
ScopedLock L(Region->Mutex);
TotalReleasedBytes += releaseToOSMaybe(Region, I, /*Force=*/true);
@@ -204,18 +188,11 @@ public:
return TotalReleasedBytes;
}
- bool useMemoryTagging() const {
- return SupportsMemoryTagging && UseMemoryTagging;
- }
- void disableMemoryTagging() { UseMemoryTagging = false; }
-
const char *getRegionInfoArrayAddress() const {
return reinterpret_cast<const char *>(RegionInfoArray);
}
- static uptr getRegionInfoArraySize() {
- return sizeof(RegionInfoArray);
- }
+ static uptr getRegionInfoArraySize() { return sizeof(RegionInfoArray); }
static BlockInfo findNearestBlock(const char *RegionInfoData, uptr Ptr) {
const RegionInfo *RegionInfoArray =
@@ -261,8 +238,10 @@ public:
return B;
}
+ AtomicOptions Options;
+
private:
- static const uptr RegionSize = 1UL << RegionSizeLog;
+ static const uptr RegionSize = 1UL << Config::PrimaryRegionSizeLog;
static const uptr NumClasses = SizeClassMap::NumClasses;
static const uptr PrimarySize = RegionSize * NumClasses;
@@ -287,7 +266,6 @@ private:
HybridMutex Mutex;
SinglyLinkedList<TransferBatch> FreeList;
RegionStats Stats;
- bool CanRelease;
bool Exhausted;
u32 RandState;
uptr RegionBeg;
@@ -305,7 +283,6 @@ private:
uptr PrimaryBase;
MapPlatformData Data;
atomic_s32 ReleaseToOsIntervalMs;
- bool UseMemoryTagging;
alignas(SCUDO_CACHE_LINE_SIZE) RegionInfo RegionInfoArray[NumClasses];
RegionInfo *getRegionInfo(uptr ClassId) {
@@ -314,31 +291,7 @@ private:
}
uptr getRegionBaseByClassId(uptr ClassId) const {
- return PrimaryBase + (ClassId << RegionSizeLog);
- }
-
- bool populateBatches(CacheT *C, RegionInfo *Region, uptr ClassId,
- TransferBatch **CurrentBatch, u32 MaxCount,
- void **PointersArray, u32 Count) {
- // No need to shuffle the batches size class.
- if (ClassId != SizeClassMap::BatchClassId)
- shuffle(PointersArray, Count, &Region->RandState);
- TransferBatch *B = *CurrentBatch;
- for (uptr I = 0; I < Count; I++) {
- if (B && B->getCount() == MaxCount) {
- Region->FreeList.push_back(B);
- B = nullptr;
- }
- if (!B) {
- B = C->createBatch(ClassId, PointersArray[I]);
- if (UNLIKELY(!B))
- return false;
- B->clear();
- }
- B->add(PointersArray[I]);
- }
- *CurrentBatch = B;
- return true;
+ return PrimaryBase + (ClassId << Config::PrimaryRegionSizeLog);
}
NOINLINE TransferBatch *populateFreeList(CacheT *C, uptr ClassId,
@@ -350,30 +303,30 @@ private:
const uptr MappedUser = Region->MappedUser;
const uptr TotalUserBytes = Region->AllocatedUser + MaxCount * Size;
// Map more space for blocks, if necessary.
- if (TotalUserBytes > MappedUser) {
+ if (UNLIKELY(TotalUserBytes > MappedUser)) {
// Do the mmap for the user memory.
const uptr UserMapSize =
roundUpTo(TotalUserBytes - MappedUser, MapSizeIncrement);
const uptr RegionBase = RegionBeg - getRegionBaseByClassId(ClassId);
- if (UNLIKELY(RegionBase + MappedUser + UserMapSize > RegionSize)) {
+ if (RegionBase + MappedUser + UserMapSize > RegionSize) {
if (!Region->Exhausted) {
Region->Exhausted = true;
ScopedString Str(1024);
getStats(&Str);
Str.append(
- "Scudo OOM: The process has Exhausted %zuM for size class %zu.\n",
+ "Scudo OOM: The process has exhausted %zuM for size class %zu.\n",
RegionSize >> 20, Size);
Str.output();
}
return nullptr;
}
- if (UNLIKELY(MappedUser == 0))
+ if (MappedUser == 0)
Region->Data = Data;
- if (UNLIKELY(!map(reinterpret_cast<void *>(RegionBeg + MappedUser),
- UserMapSize, "scudo:primary",
- MAP_ALLOWNOMEM | MAP_RESIZABLE |
- (useMemoryTagging() ? MAP_MEMTAG : 0),
- &Region->Data)))
+ if (!map(reinterpret_cast<void *>(RegionBeg + MappedUser), UserMapSize,
+ "scudo:primary",
+ MAP_ALLOWNOMEM | MAP_RESIZABLE |
+ (useMemoryTagging<Config>(Options.load()) ? MAP_MEMTAG : 0),
+ &Region->Data))
return nullptr;
Region->MappedUser += UserMapSize;
C->getStats().add(StatMapped, UserMapSize);
@@ -384,38 +337,34 @@ private:
static_cast<u32>((Region->MappedUser - Region->AllocatedUser) / Size));
DCHECK_GT(NumberOfBlocks, 0);
- TransferBatch *B = nullptr;
constexpr u32 ShuffleArraySize =
MaxNumBatches * TransferBatch::MaxNumCached;
void *ShuffleArray[ShuffleArraySize];
- u32 Count = 0;
- const uptr P = RegionBeg + Region->AllocatedUser;
- const uptr AllocatedUser = Size * NumberOfBlocks;
- for (uptr I = P; I < P + AllocatedUser; I += Size) {
- ShuffleArray[Count++] = reinterpret_cast<void *>(I);
- if (Count == ShuffleArraySize) {
- if (UNLIKELY(!populateBatches(C, Region, ClassId, &B, MaxCount,
- ShuffleArray, Count)))
- return nullptr;
- Count = 0;
- }
- }
- if (Count) {
- if (UNLIKELY(!populateBatches(C, Region, ClassId, &B, MaxCount,
- ShuffleArray, Count)))
+ DCHECK_LE(NumberOfBlocks, ShuffleArraySize);
+
+ uptr P = RegionBeg + Region->AllocatedUser;
+ for (u32 I = 0; I < NumberOfBlocks; I++, P += Size)
+ ShuffleArray[I] = reinterpret_cast<void *>(P);
+ // No need to shuffle the batches size class.
+ if (ClassId != SizeClassMap::BatchClassId)
+ shuffle(ShuffleArray, NumberOfBlocks, &Region->RandState);
+ for (u32 I = 0; I < NumberOfBlocks;) {
+ TransferBatch *B = C->createBatch(ClassId, ShuffleArray[I]);
+ if (UNLIKELY(!B))
return nullptr;
- }
- DCHECK(B);
- if (!Region->FreeList.empty()) {
+ const u32 N = Min(MaxCount, NumberOfBlocks - I);
+ B->setFromArray(&ShuffleArray[I], N);
Region->FreeList.push_back(B);
- B = Region->FreeList.front();
- Region->FreeList.pop_front();
+ I += N;
}
+ TransferBatch *B = Region->FreeList.front();
+ Region->FreeList.pop_front();
+ DCHECK(B);
DCHECK_GT(B->getCount(), 0);
+ const uptr AllocatedUser = Size * NumberOfBlocks;
C->getStats().add(StatFree, AllocatedUser);
Region->AllocatedUser += AllocatedUser;
- Region->Exhausted = false;
return B;
}
@@ -437,16 +386,12 @@ private:
getRegionBaseByClassId(ClassId));
}
- s32 getReleaseToOsIntervalMs() {
- return atomic_load(&ReleaseToOsIntervalMs, memory_order_relaxed);
- }
-
NOINLINE uptr releaseToOSMaybe(RegionInfo *Region, uptr ClassId,
bool Force = false) {
const uptr BlockSize = getSizeByClassId(ClassId);
const uptr PageSize = getPageSizeCached();
- CHECK_GE(Region->Stats.PoppedBlocks, Region->Stats.PushedBlocks);
+ DCHECK_GE(Region->Stats.PoppedBlocks, Region->Stats.PushedBlocks);
const uptr BytesInFreeList =
Region->AllocatedUser -
(Region->Stats.PoppedBlocks - Region->Stats.PushedBlocks) * BlockSize;
@@ -458,8 +403,20 @@ private:
if (BytesPushed < PageSize)
return 0; // Nothing new to release.
+ // Releasing smaller blocks is expensive, so we want to make sure that a
+ // significant amount of bytes are free, and that there has been a good
+ // amount of batches pushed to the freelist before attempting to release.
+ if (BlockSize < PageSize / 16U) {
+ if (!Force && BytesPushed < Region->AllocatedUser / 16U)
+ return 0;
+ // We want 8x% to 9x% free bytes (the larger the bock, the lower the %).
+ if ((BytesInFreeList * 100U) / Region->AllocatedUser <
+ (100U - 1U - BlockSize / 16U))
+ return 0;
+ }
+
if (!Force) {
- const s32 IntervalMs = getReleaseToOsIntervalMs();
+ const s32 IntervalMs = atomic_load_relaxed(&ReleaseToOsIntervalMs);
if (IntervalMs < 0)
return 0;
if (Region->ReleaseInfo.LastReleaseAtNs +
@@ -469,9 +426,11 @@ private:
}
}
+ auto SkipRegion = [](UNUSED uptr RegionIndex) { return false; };
ReleaseRecorder Recorder(Region->RegionBeg, &Region->Data);
releaseFreeMemoryToOS(Region->FreeList, Region->RegionBeg,
- Region->AllocatedUser, BlockSize, &Recorder);
+ Region->AllocatedUser, 1U, BlockSize, &Recorder,
+ SkipRegion);
if (Recorder.getReleasedRangesCount() > 0) {
Region->ReleaseInfo.PushedBlocksAtLastRelease =
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/release.cpp b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/release.cpp
index e144b354b258..5d7c6c5fc110 100644
--- a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/release.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/release.cpp
@@ -11,6 +11,6 @@
namespace scudo {
HybridMutex PackedCounterArray::Mutex = {};
-uptr PackedCounterArray::StaticBuffer[1024];
+uptr PackedCounterArray::StaticBuffer[PackedCounterArray::StaticBufferCount];
} // namespace scudo
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/release.h b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/release.h
index 323bf9db6dca..5c11da2200e9 100644
--- a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/release.h
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/release.h
@@ -49,27 +49,32 @@ private:
// incremented past MaxValue.
class PackedCounterArray {
public:
- PackedCounterArray(uptr NumCounters, uptr MaxValue) : N(NumCounters) {
- CHECK_GT(NumCounters, 0);
- CHECK_GT(MaxValue, 0);
+ PackedCounterArray(uptr NumberOfRegions, uptr CountersPerRegion,
+ uptr MaxValue)
+ : Regions(NumberOfRegions), NumCounters(CountersPerRegion) {
+ DCHECK_GT(Regions, 0);
+ DCHECK_GT(NumCounters, 0);
+ DCHECK_GT(MaxValue, 0);
constexpr uptr MaxCounterBits = sizeof(*Buffer) * 8UL;
// Rounding counter storage size up to the power of two allows for using
// bit shifts calculating particular counter's Index and offset.
const uptr CounterSizeBits =
roundUpToPowerOfTwo(getMostSignificantSetBitIndex(MaxValue) + 1);
- CHECK_LE(CounterSizeBits, MaxCounterBits);
+ DCHECK_LE(CounterSizeBits, MaxCounterBits);
CounterSizeBitsLog = getLog2(CounterSizeBits);
CounterMask = ~(static_cast<uptr>(0)) >> (MaxCounterBits - CounterSizeBits);
const uptr PackingRatio = MaxCounterBits >> CounterSizeBitsLog;
- CHECK_GT(PackingRatio, 0);
+ DCHECK_GT(PackingRatio, 0);
PackingRatioLog = getLog2(PackingRatio);
BitOffsetMask = PackingRatio - 1;
- BufferSize = (roundUpTo(N, static_cast<uptr>(1U) << PackingRatioLog) >>
- PackingRatioLog) *
- sizeof(*Buffer);
- if (BufferSize <= StaticBufferSize && Mutex.tryLock()) {
+ SizePerRegion =
+ roundUpTo(NumCounters, static_cast<uptr>(1U) << PackingRatioLog) >>
+ PackingRatioLog;
+ BufferSize = SizePerRegion * sizeof(*Buffer) * Regions;
+ if (BufferSize <= (StaticBufferCount * sizeof(Buffer[0])) &&
+ Mutex.tryLock()) {
Buffer = &StaticBuffer[0];
memset(Buffer, 0, BufferSize);
} else {
@@ -88,45 +93,50 @@ public:
bool isAllocated() const { return !!Buffer; }
- uptr getCount() const { return N; }
+ uptr getCount() const { return NumCounters; }
- uptr get(uptr I) const {
- DCHECK_LT(I, N);
+ uptr get(uptr Region, uptr I) const {
+ DCHECK_LT(Region, Regions);
+ DCHECK_LT(I, NumCounters);
const uptr Index = I >> PackingRatioLog;
const uptr BitOffset = (I & BitOffsetMask) << CounterSizeBitsLog;
- return (Buffer[Index] >> BitOffset) & CounterMask;
+ return (Buffer[Region * SizePerRegion + Index] >> BitOffset) & CounterMask;
}
- void inc(uptr I) const {
- DCHECK_LT(get(I), CounterMask);
+ void inc(uptr Region, uptr I) const {
+ DCHECK_LT(get(Region, I), CounterMask);
const uptr Index = I >> PackingRatioLog;
const uptr BitOffset = (I & BitOffsetMask) << CounterSizeBitsLog;
DCHECK_LT(BitOffset, SCUDO_WORDSIZE);
- Buffer[Index] += static_cast<uptr>(1U) << BitOffset;
+ Buffer[Region * SizePerRegion + Index] += static_cast<uptr>(1U)
+ << BitOffset;
}
- void incRange(uptr From, uptr To) const {
+ void incRange(uptr Region, uptr From, uptr To) const {
DCHECK_LE(From, To);
- const uptr Top = Min(To + 1, N);
+ const uptr Top = Min(To + 1, NumCounters);
for (uptr I = From; I < Top; I++)
- inc(I);
+ inc(Region, I);
}
uptr getBufferSize() const { return BufferSize; }
+ static const uptr StaticBufferCount = 2048U;
+
private:
- const uptr N;
+ const uptr Regions;
+ const uptr NumCounters;
uptr CounterSizeBitsLog;
uptr CounterMask;
uptr PackingRatioLog;
uptr BitOffsetMask;
+ uptr SizePerRegion;
uptr BufferSize;
uptr *Buffer;
static HybridMutex Mutex;
- static const uptr StaticBufferSize = 1024U;
- static uptr StaticBuffer[StaticBufferSize];
+ static uptr StaticBuffer[StaticBufferCount];
};
template <class ReleaseRecorderT> class FreePagesRangeTracker {
@@ -146,6 +156,11 @@ public:
CurrentPage++;
}
+ void skipPages(uptr N) {
+ closeOpenedRange();
+ CurrentPage += N;
+ }
+
void finish() { closeOpenedRange(); }
private:
@@ -164,10 +179,11 @@ private:
uptr CurrentRangeStatePage = 0;
};
-template <class TransferBatchT, class ReleaseRecorderT>
+template <class TransferBatchT, class ReleaseRecorderT, typename SkipRegionT>
NOINLINE void
releaseFreeMemoryToOS(const IntrusiveList<TransferBatchT> &FreeList, uptr Base,
- uptr Size, uptr BlockSize, ReleaseRecorderT *Recorder) {
+ uptr RegionSize, uptr NumberOfRegions, uptr BlockSize,
+ ReleaseRecorderT *Recorder, SkipRegionT SkipRegion) {
const uptr PageSize = getPageSizeCached();
// Figure out the number of chunks per page and whether we can take a fast
@@ -204,51 +220,57 @@ releaseFreeMemoryToOS(const IntrusiveList<TransferBatchT> &FreeList, uptr Base,
}
}
- const uptr PagesCount = roundUpTo(Size, PageSize) / PageSize;
- PackedCounterArray Counters(PagesCount, FullPagesBlockCountMax);
+ const uptr PagesCount = roundUpTo(RegionSize, PageSize) / PageSize;
+ PackedCounterArray Counters(NumberOfRegions, PagesCount,
+ FullPagesBlockCountMax);
if (!Counters.isAllocated())
return;
const uptr PageSizeLog = getLog2(PageSize);
- const uptr RoundedSize = PagesCount << PageSizeLog;
+ const uptr RoundedRegionSize = PagesCount << PageSizeLog;
+ const uptr RoundedSize = NumberOfRegions * RoundedRegionSize;
// Iterate over free chunks and count how many free chunks affect each
// allocated page.
if (BlockSize <= PageSize && PageSize % BlockSize == 0) {
// Each chunk affects one page only.
for (const auto &It : FreeList) {
- // If dealing with a TransferBatch, the first pointer of the batch will
- // point to the batch itself, we do not want to mark this for release as
- // the batch is in use, so skip the first entry.
- const bool IsTransferBatch =
- (It.getCount() != 0) &&
- (reinterpret_cast<uptr>(It.get(0)) == reinterpret_cast<uptr>(&It));
- for (u32 I = IsTransferBatch ? 1 : 0; I < It.getCount(); I++) {
+ for (u32 I = 0; I < It.getCount(); I++) {
const uptr P = reinterpret_cast<uptr>(It.get(I)) - Base;
// This takes care of P < Base and P >= Base + RoundedSize.
- if (P < RoundedSize)
- Counters.inc(P >> PageSizeLog);
+ if (UNLIKELY(P >= RoundedSize))
+ continue;
+ const uptr RegionIndex = NumberOfRegions == 1U ? 0 : P / RegionSize;
+ const uptr PInRegion = P - RegionIndex * RegionSize;
+ Counters.inc(RegionIndex, PInRegion >> PageSizeLog);
}
}
- for (uptr P = Size; P < RoundedSize; P += BlockSize)
- Counters.inc(P >> PageSizeLog);
} else {
// In all other cases chunks might affect more than one page.
+ DCHECK_GE(RegionSize, BlockSize);
+ const uptr LastBlockInRegion = ((RegionSize / BlockSize) - 1U) * BlockSize;
for (const auto &It : FreeList) {
- // See TransferBatch comment above.
- const bool IsTransferBatch =
- (It.getCount() != 0) &&
- (reinterpret_cast<uptr>(It.get(0)) == reinterpret_cast<uptr>(&It));
- for (u32 I = IsTransferBatch ? 1 : 0; I < It.getCount(); I++) {
+ for (u32 I = 0; I < It.getCount(); I++) {
const uptr P = reinterpret_cast<uptr>(It.get(I)) - Base;
// This takes care of P < Base and P >= Base + RoundedSize.
- if (P < RoundedSize)
- Counters.incRange(P >> PageSizeLog,
- (P + BlockSize - 1) >> PageSizeLog);
+ if (UNLIKELY(P >= RoundedSize))
+ continue;
+ const uptr RegionIndex = NumberOfRegions == 1U ? 0 : P / RegionSize;
+ uptr PInRegion = P - RegionIndex * RegionSize;
+ Counters.incRange(RegionIndex, PInRegion >> PageSizeLog,
+ (PInRegion + BlockSize - 1) >> PageSizeLog);
+ // The last block in a region might straddle a page, so if it's
+ // free, we mark the following "pretend" memory block(s) as free.
+ if (PInRegion == LastBlockInRegion) {
+ PInRegion += BlockSize;
+ while (PInRegion < RoundedRegionSize) {
+ Counters.incRange(RegionIndex, PInRegion >> PageSizeLog,
+ (PInRegion + BlockSize - 1) >> PageSizeLog);
+ PInRegion += BlockSize;
+ }
+ }
}
}
- for (uptr P = Size; P < RoundedSize; P += BlockSize)
- Counters.incRange(P >> PageSizeLog, (P + BlockSize - 1) >> PageSizeLog);
}
// Iterate over pages detecting ranges of pages with chunk Counters equal
@@ -256,8 +278,15 @@ releaseFreeMemoryToOS(const IntrusiveList<TransferBatchT> &FreeList, uptr Base,
FreePagesRangeTracker<ReleaseRecorderT> RangeTracker(Recorder);
if (SameBlockCountPerPage) {
// Fast path, every page has the same number of chunks affecting it.
- for (uptr I = 0; I < Counters.getCount(); I++)
- RangeTracker.processNextPage(Counters.get(I) == FullPagesBlockCountMax);
+ for (uptr I = 0; I < NumberOfRegions; I++) {
+ if (SkipRegion(I)) {
+ RangeTracker.skipPages(PagesCount);
+ continue;
+ }
+ for (uptr J = 0; J < PagesCount; J++)
+ RangeTracker.processNextPage(Counters.get(I, J) ==
+ FullPagesBlockCountMax);
+ }
} else {
// Slow path, go through the pages keeping count how many chunks affect
// each page.
@@ -268,23 +297,28 @@ releaseFreeMemoryToOS(const IntrusiveList<TransferBatchT> &FreeList, uptr Base,
// except the first and the last one) and then the last chunk size, adding
// up the number of chunks on the current page and checking on every step
// whether the page boundary was crossed.
- uptr PrevPageBoundary = 0;
- uptr CurrentBoundary = 0;
- for (uptr I = 0; I < Counters.getCount(); I++) {
- const uptr PageBoundary = PrevPageBoundary + PageSize;
- uptr BlocksPerPage = Pn;
- if (CurrentBoundary < PageBoundary) {
- if (CurrentBoundary > PrevPageBoundary)
- BlocksPerPage++;
- CurrentBoundary += Pnc;
+ for (uptr I = 0; I < NumberOfRegions; I++) {
+ if (SkipRegion(I)) {
+ RangeTracker.skipPages(PagesCount);
+ continue;
+ }
+ uptr PrevPageBoundary = 0;
+ uptr CurrentBoundary = 0;
+ for (uptr J = 0; J < PagesCount; J++) {
+ const uptr PageBoundary = PrevPageBoundary + PageSize;
+ uptr BlocksPerPage = Pn;
if (CurrentBoundary < PageBoundary) {
- BlocksPerPage++;
- CurrentBoundary += BlockSize;
+ if (CurrentBoundary > PrevPageBoundary)
+ BlocksPerPage++;
+ CurrentBoundary += Pnc;
+ if (CurrentBoundary < PageBoundary) {
+ BlocksPerPage++;
+ CurrentBoundary += BlockSize;
+ }
}
+ PrevPageBoundary = PageBoundary;
+ RangeTracker.processNextPage(Counters.get(I, J) == BlocksPerPage);
}
- PrevPageBoundary = PageBoundary;
-
- RangeTracker.processNextPage(Counters.get(I) == BlocksPerPage);
}
}
RangeTracker.finish();
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/secondary.h b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/secondary.h
index 84eaa5091b43..063640106abb 100644
--- a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/secondary.h
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/secondary.h
@@ -31,7 +31,7 @@ struct Header {
uptr BlockEnd;
uptr MapBase;
uptr MapSize;
- MapPlatformData Data;
+ [[no_unique_address]] MapPlatformData Data;
};
constexpr uptr getHeaderSize() {
@@ -52,29 +52,37 @@ class MapAllocatorNoCache {
public:
void initLinkerInitialized(UNUSED s32 ReleaseToOsInterval) {}
void init(UNUSED s32 ReleaseToOsInterval) {}
- bool retrieve(UNUSED uptr Size, UNUSED LargeBlock::Header **H) {
+ bool retrieve(UNUSED uptr Size, UNUSED LargeBlock::Header **H,
+ UNUSED bool *Zeroed) {
return false;
}
bool store(UNUSED LargeBlock::Header *H) { return false; }
- static bool canCache(UNUSED uptr Size) { return false; }
+ bool canCache(UNUSED uptr Size) { return false; }
void disable() {}
void enable() {}
void releaseToOS() {}
- void setReleaseToOsIntervalMs(UNUSED s32 Interval) {}
+ bool setOption(Option O, UNUSED sptr Value) {
+ if (O == Option::ReleaseInterval || O == Option::MaxCacheEntriesCount ||
+ O == Option::MaxCacheEntrySize)
+ return false;
+ // Not supported by the Secondary Cache, but not an error either.
+ return true;
+ }
};
-template <uptr MaxEntriesCount = 32U, uptr MaxEntrySize = 1UL << 19,
- s32 MinReleaseToOsIntervalMs = INT32_MIN,
- s32 MaxReleaseToOsIntervalMs = INT32_MAX>
-class MapAllocatorCache {
+template <typename Config> class MapAllocatorCache {
public:
- // Fuchsia doesn't allow releasing Secondary blocks yet. Note that 0 length
- // arrays are an extension for some compilers.
- // FIXME(kostyak): support (partially) the cache on Fuchsia.
- static_assert(!SCUDO_FUCHSIA || MaxEntriesCount == 0U, "");
+ // Ensure the default maximum specified fits the array.
+ static_assert(Config::SecondaryCacheDefaultMaxEntriesCount <=
+ Config::SecondaryCacheEntriesArraySize,
+ "");
void initLinkerInitialized(s32 ReleaseToOsInterval) {
- setReleaseToOsIntervalMs(ReleaseToOsInterval);
+ setOption(Option::MaxCacheEntriesCount,
+ static_cast<sptr>(Config::SecondaryCacheDefaultMaxEntriesCount));
+ setOption(Option::MaxCacheEntrySize,
+ static_cast<sptr>(Config::SecondaryCacheDefaultMaxEntrySize));
+ setOption(Option::ReleaseInterval, static_cast<sptr>(ReleaseToOsInterval));
}
void init(s32 ReleaseToOsInterval) {
memset(this, 0, sizeof(*this));
@@ -85,13 +93,14 @@ public:
bool EntryCached = false;
bool EmptyCache = false;
const u64 Time = getMonotonicTime();
+ const u32 MaxCount = atomic_load_relaxed(&MaxEntriesCount);
{
ScopedLock L(Mutex);
- if (EntriesCount == MaxEntriesCount) {
+ if (EntriesCount >= MaxCount) {
if (IsFullEvents++ == 4U)
EmptyCache = true;
} else {
- for (uptr I = 0; I < MaxEntriesCount; I++) {
+ for (u32 I = 0; I < MaxCount; I++) {
if (Entries[I].Block)
continue;
if (I != 0)
@@ -111,17 +120,18 @@ public:
s32 Interval;
if (EmptyCache)
empty();
- else if ((Interval = getReleaseToOsIntervalMs()) >= 0)
+ else if ((Interval = atomic_load_relaxed(&ReleaseToOsIntervalMs)) >= 0)
releaseOlderThan(Time - static_cast<u64>(Interval) * 1000000);
return EntryCached;
}
- bool retrieve(uptr Size, LargeBlock::Header **H) {
+ bool retrieve(uptr Size, LargeBlock::Header **H, bool *Zeroed) {
const uptr PageSize = getPageSizeCached();
+ const u32 MaxCount = atomic_load_relaxed(&MaxEntriesCount);
ScopedLock L(Mutex);
if (EntriesCount == 0)
return false;
- for (uptr I = 0; I < MaxEntriesCount; I++) {
+ for (u32 I = 0; I < MaxCount; I++) {
if (!Entries[I].Block)
continue;
const uptr BlockSize = Entries[I].BlockEnd - Entries[I].Block;
@@ -130,6 +140,7 @@ public:
if (Size < BlockSize - PageSize * 4U)
continue;
*H = reinterpret_cast<LargeBlock::Header *>(Entries[I].Block);
+ *Zeroed = Entries[I].Time == 0;
Entries[I].Block = 0;
(*H)->BlockEnd = Entries[I].BlockEnd;
(*H)->MapBase = Entries[I].MapBase;
@@ -141,17 +152,31 @@ public:
return false;
}
- static bool canCache(uptr Size) {
- return MaxEntriesCount != 0U && Size <= MaxEntrySize;
+ bool canCache(uptr Size) {
+ return atomic_load_relaxed(&MaxEntriesCount) != 0U &&
+ Size <= atomic_load_relaxed(&MaxEntrySize);
}
- void setReleaseToOsIntervalMs(s32 Interval) {
- if (Interval >= MaxReleaseToOsIntervalMs) {
- Interval = MaxReleaseToOsIntervalMs;
- } else if (Interval <= MinReleaseToOsIntervalMs) {
- Interval = MinReleaseToOsIntervalMs;
+ bool setOption(Option O, sptr Value) {
+ if (O == Option::ReleaseInterval) {
+ const s32 Interval =
+ Max(Min(static_cast<s32>(Value),
+ Config::SecondaryCacheMaxReleaseToOsIntervalMs),
+ Config::SecondaryCacheMinReleaseToOsIntervalMs);
+ atomic_store_relaxed(&ReleaseToOsIntervalMs, Interval);
+ return true;
+ } else if (O == Option::MaxCacheEntriesCount) {
+ const u32 MaxCount = static_cast<u32>(Value);
+ if (MaxCount > Config::SecondaryCacheEntriesArraySize)
+ return false;
+ atomic_store_relaxed(&MaxEntriesCount, MaxCount);
+ return true;
+ } else if (O == Option::MaxCacheEntrySize) {
+ atomic_store_relaxed(&MaxEntrySize, static_cast<uptr>(Value));
+ return true;
}
- atomic_store(&ReleaseToOsIntervalMs, Interval, memory_order_relaxed);
+ // Not supported by the Secondary Cache, but not an error either.
+ return true;
}
void releaseToOS() { releaseOlderThan(UINT64_MAX); }
@@ -166,11 +191,11 @@ private:
void *MapBase;
uptr MapSize;
MapPlatformData Data;
- } MapInfo[MaxEntriesCount];
+ } MapInfo[Config::SecondaryCacheEntriesArraySize];
uptr N = 0;
{
ScopedLock L(Mutex);
- for (uptr I = 0; I < MaxEntriesCount; I++) {
+ for (uptr I = 0; I < Config::SecondaryCacheEntriesArraySize; I++) {
if (!Entries[I].Block)
continue;
MapInfo[N].MapBase = reinterpret_cast<void *>(Entries[I].MapBase);
@@ -191,7 +216,7 @@ private:
ScopedLock L(Mutex);
if (!EntriesCount)
return;
- for (uptr I = 0; I < MaxEntriesCount; I++) {
+ for (uptr I = 0; I < Config::SecondaryCacheEntriesArraySize; I++) {
if (!Entries[I].Block || !Entries[I].Time || Entries[I].Time > Time)
continue;
releasePagesToOS(Entries[I].Block, 0,
@@ -201,28 +226,26 @@ private:
}
}
- s32 getReleaseToOsIntervalMs() {
- return atomic_load(&ReleaseToOsIntervalMs, memory_order_relaxed);
- }
-
struct CachedBlock {
uptr Block;
uptr BlockEnd;
uptr MapBase;
uptr MapSize;
- MapPlatformData Data;
+ [[no_unique_address]] MapPlatformData Data;
u64 Time;
};
HybridMutex Mutex;
- CachedBlock Entries[MaxEntriesCount];
+ CachedBlock Entries[Config::SecondaryCacheEntriesArraySize];
u32 EntriesCount;
+ atomic_u32 MaxEntriesCount;
+ atomic_uptr MaxEntrySize;
uptr LargestSize;
u32 IsFullEvents;
atomic_s32 ReleaseToOsIntervalMs;
};
-template <class CacheT> class MapAllocator {
+template <typename Config> class MapAllocator {
public:
void initLinkerInitialized(GlobalStats *S, s32 ReleaseToOsInterval = -1) {
Cache.initLinkerInitialized(ReleaseToOsInterval);
@@ -265,16 +288,14 @@ public:
Callback(reinterpret_cast<uptr>(&H) + LargeBlock::getHeaderSize());
}
- static uptr canCache(uptr Size) { return CacheT::canCache(Size); }
+ uptr canCache(uptr Size) { return Cache.canCache(Size); }
- void setReleaseToOsIntervalMs(s32 Interval) {
- Cache.setReleaseToOsIntervalMs(Interval);
- }
+ bool setOption(Option O, sptr Value) { return Cache.setOption(O, Value); }
void releaseToOS() { Cache.releaseToOS(); }
private:
- CacheT Cache;
+ typename Config::SecondaryCache Cache;
HybridMutex Mutex;
DoublyLinkedList<LargeBlock::Header> InUseBlocks;
@@ -297,8 +318,8 @@ private:
// For allocations requested with an alignment greater than or equal to a page,
// the committed memory will amount to something close to Size - AlignmentHint
// (pending rounding and headers).
-template <class CacheT>
-void *MapAllocator<CacheT>::allocate(uptr Size, uptr AlignmentHint,
+template <typename Config>
+void *MapAllocator<Config>::allocate(uptr Size, uptr AlignmentHint,
uptr *BlockEnd,
FillContentsMode FillContents) {
DCHECK_GE(Size, AlignmentHint);
@@ -306,14 +327,15 @@ void *MapAllocator<CacheT>::allocate(uptr Size, uptr AlignmentHint,
const uptr RoundedSize =
roundUpTo(Size + LargeBlock::getHeaderSize(), PageSize);
- if (AlignmentHint < PageSize && CacheT::canCache(RoundedSize)) {
+ if (AlignmentHint < PageSize && Cache.canCache(RoundedSize)) {
LargeBlock::Header *H;
- if (Cache.retrieve(RoundedSize, &H)) {
+ bool Zeroed;
+ if (Cache.retrieve(RoundedSize, &H, &Zeroed)) {
if (BlockEnd)
*BlockEnd = H->BlockEnd;
void *Ptr = reinterpret_cast<void *>(reinterpret_cast<uptr>(H) +
LargeBlock::getHeaderSize());
- if (FillContents)
+ if (FillContents && !Zeroed)
memset(Ptr, FillContents == ZeroFill ? 0 : PatternFillByte,
H->BlockEnd - reinterpret_cast<uptr>(Ptr));
const uptr BlockSize = H->BlockEnd - reinterpret_cast<uptr>(H);
@@ -365,9 +387,9 @@ void *MapAllocator<CacheT>::allocate(uptr Size, uptr AlignmentHint,
}
const uptr CommitSize = MapEnd - PageSize - CommitBase;
- const uptr Ptr =
- reinterpret_cast<uptr>(map(reinterpret_cast<void *>(CommitBase),
- CommitSize, "scudo:secondary", 0, &Data));
+ const uptr Ptr = reinterpret_cast<uptr>(
+ map(reinterpret_cast<void *>(CommitBase), CommitSize, "scudo:secondary",
+ MAP_RESIZABLE, &Data));
LargeBlock::Header *H = reinterpret_cast<LargeBlock::Header *>(Ptr);
H->MapBase = MapBase;
H->MapSize = MapEnd - MapBase;
@@ -388,7 +410,7 @@ void *MapAllocator<CacheT>::allocate(uptr Size, uptr AlignmentHint,
return reinterpret_cast<void *>(Ptr + LargeBlock::getHeaderSize());
}
-template <class CacheT> void MapAllocator<CacheT>::deallocate(void *Ptr) {
+template <typename Config> void MapAllocator<Config>::deallocate(void *Ptr) {
LargeBlock::Header *H = LargeBlock::getHeader(Ptr);
const uptr Block = reinterpret_cast<uptr>(H);
const uptr CommitSize = H->BlockEnd - Block;
@@ -400,7 +422,7 @@ template <class CacheT> void MapAllocator<CacheT>::deallocate(void *Ptr) {
Stats.sub(StatAllocated, CommitSize);
Stats.sub(StatMapped, H->MapSize);
}
- if (CacheT::canCache(CommitSize) && Cache.store(H))
+ if (Cache.canCache(CommitSize) && Cache.store(H))
return;
void *Addr = reinterpret_cast<void *>(H->MapBase);
const uptr Size = H->MapSize;
@@ -408,8 +430,8 @@ template <class CacheT> void MapAllocator<CacheT>::deallocate(void *Ptr) {
unmap(Addr, Size, UNMAP_ALL, &Data);
}
-template <class CacheT>
-void MapAllocator<CacheT>::getStats(ScopedString *Str) const {
+template <typename Config>
+void MapAllocator<Config>::getStats(ScopedString *Str) const {
Str->append(
"Stats: MapAllocator: allocated %zu times (%zuK), freed %zu times "
"(%zuK), remains %zu (%zuK) max %zuM\n",
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/stack_depot.h b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/stack_depot.h
index f2f4d9597795..7968f7efff7c 100644
--- a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/stack_depot.h
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/stack_depot.h
@@ -20,7 +20,7 @@ class MurMur2HashBuilder {
static const u32 R = 24;
u32 H;
- public:
+public:
explicit MurMur2HashBuilder(u32 Init = 0) { H = Seed ^ Init; }
void add(u32 K) {
K *= M;
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/string_utils.cpp b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/string_utils.cpp
index 5de8b57bfcd1..f304491019b2 100644
--- a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/string_utils.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/string_utils.cpp
@@ -78,10 +78,11 @@ static int appendUnsigned(char **Buffer, const char *BufferEnd, u64 Num,
static int appendSignedDecimal(char **Buffer, const char *BufferEnd, s64 Num,
u8 MinNumberLength, bool PadWithZero) {
const bool Negative = (Num < 0);
- return appendNumber(Buffer, BufferEnd,
- static_cast<u64>(Negative ? -Num : Num), 10,
- MinNumberLength, PadWithZero, Negative,
- /*Upper=*/false);
+ const u64 UnsignedNum = (Num == INT64_MIN)
+ ? static_cast<u64>(INT64_MAX) + 1
+ : static_cast<u64>(Negative ? -Num : Num);
+ return appendNumber(Buffer, BufferEnd, UnsignedNum, 10, MinNumberLength,
+ PadWithZero, Negative, /*Upper=*/false);
}
// Use the fact that explicitly requesting 0 Width (%0s) results in UB and
@@ -158,16 +159,18 @@ int formatString(char *Buffer, uptr BufferLength, const char *Format,
CHECK(!((Precision >= 0 || LeftJustified) && *Cur != 's'));
switch (*Cur) {
case 'd': {
- DVal = HaveLL ? va_arg(Args, s64)
- : HaveZ ? va_arg(Args, sptr) : va_arg(Args, int);
+ DVal = HaveLL ? va_arg(Args, s64)
+ : HaveZ ? va_arg(Args, sptr)
+ : va_arg(Args, int);
Res += appendSignedDecimal(&Buffer, BufferEnd, DVal, Width, PadWithZero);
break;
}
case 'u':
case 'x':
case 'X': {
- UVal = HaveLL ? va_arg(Args, u64)
- : HaveZ ? va_arg(Args, uptr) : va_arg(Args, unsigned);
+ UVal = HaveLL ? va_arg(Args, u64)
+ : HaveZ ? va_arg(Args, uptr)
+ : va_arg(Args, unsigned);
const bool Upper = (*Cur == 'X');
Res += appendUnsigned(&Buffer, BufferEnd, UVal, (*Cur == 'u') ? 10 : 16,
Width, PadWithZero, Upper);
@@ -219,6 +222,7 @@ void ScopedString::append(const char *Format, va_list Args) {
static_cast<uptr>(formatString(C, sizeof(C), Format, Args)) + 1;
String.resize(Length + AdditionalLength);
formatString(String.data() + Length, AdditionalLength, Format, ArgsCopy);
+ va_end(ArgsCopy);
Length = strlen(String.data());
CHECK_LT(Length, String.size());
}
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/tsd_exclusive.h b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/tsd_exclusive.h
index 3492509b5a8e..1704c8cf80d8 100644
--- a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/tsd_exclusive.h
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/tsd_exclusive.h
@@ -13,10 +13,13 @@
namespace scudo {
-enum class ThreadState : u8 {
- NotInitialized = 0,
- Initialized,
- TornDown,
+struct ThreadState {
+ bool DisableMemInit : 1;
+ enum {
+ NotInitialized = 0,
+ Initialized,
+ TornDown,
+ } InitState : 2;
};
template <class Allocator> void teardownThread(void *Ptr);
@@ -36,13 +39,13 @@ template <class Allocator> struct TSDRegistryExT {
void unmapTestOnly() {}
ALWAYS_INLINE void initThreadMaybe(Allocator *Instance, bool MinimalInit) {
- if (LIKELY(State != ThreadState::NotInitialized))
+ if (LIKELY(State.InitState != ThreadState::NotInitialized))
return;
initThread(Instance, MinimalInit);
}
ALWAYS_INLINE TSD<Allocator> *getTSDAndLock(bool *UnlockRequired) {
- if (LIKELY(State == ThreadState::Initialized &&
+ if (LIKELY(State.InitState == ThreadState::Initialized &&
!atomic_load(&Disabled, memory_order_acquire))) {
*UnlockRequired = false;
return &ThreadTSD;
@@ -66,6 +69,16 @@ template <class Allocator> struct TSDRegistryExT {
Mutex.unlock();
}
+ bool setOption(Option O, UNUSED sptr Value) {
+ if (O == Option::ThreadDisableMemInit)
+ State.DisableMemInit = Value;
+ if (O == Option::MaxTSDsCount)
+ return false;
+ return true;
+ }
+
+ bool getDisableMemInit() { return State.DisableMemInit; }
+
private:
void initOnceMaybe(Allocator *Instance) {
ScopedLock L(Mutex);
@@ -84,7 +97,7 @@ private:
CHECK_EQ(
pthread_setspecific(PThreadKey, reinterpret_cast<void *>(Instance)), 0);
ThreadTSD.initLinkerInitialized(Instance);
- State = ThreadState::Initialized;
+ State.InitState = ThreadState::Initialized;
Instance->callPostInitCallback();
}
@@ -93,16 +106,16 @@ private:
atomic_u8 Disabled;
TSD<Allocator> FallbackTSD;
HybridMutex Mutex;
- static THREADLOCAL ThreadState State;
- static THREADLOCAL TSD<Allocator> ThreadTSD;
+ static thread_local ThreadState State;
+ static thread_local TSD<Allocator> ThreadTSD;
friend void teardownThread<Allocator>(void *Ptr);
};
template <class Allocator>
-THREADLOCAL TSD<Allocator> TSDRegistryExT<Allocator>::ThreadTSD;
+thread_local TSD<Allocator> TSDRegistryExT<Allocator>::ThreadTSD;
template <class Allocator>
-THREADLOCAL ThreadState TSDRegistryExT<Allocator>::State;
+thread_local ThreadState TSDRegistryExT<Allocator>::State;
template <class Allocator> void teardownThread(void *Ptr) {
typedef TSDRegistryExT<Allocator> TSDRegistryT;
@@ -120,7 +133,7 @@ template <class Allocator> void teardownThread(void *Ptr) {
return;
}
TSDRegistryT::ThreadTSD.commitBack(Instance);
- TSDRegistryT::State = ThreadState::TornDown;
+ TSDRegistryT::State.InitState = ThreadState::TornDown;
}
} // namespace scudo
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/tsd_shared.h b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/tsd_shared.h
index 038a5905ff48..6a68b3ef5453 100644
--- a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/tsd_shared.h
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/tsd_shared.h
@@ -9,36 +9,28 @@
#ifndef SCUDO_TSD_SHARED_H_
#define SCUDO_TSD_SHARED_H_
-#include "linux.h" // for getAndroidTlsPtr()
#include "tsd.h"
+#if SCUDO_HAS_PLATFORM_TLS_SLOT
+// This is a platform-provided header that needs to be on the include path when
+// Scudo is compiled. It must declare a function with the prototype:
+// uintptr_t *getPlatformAllocatorTlsSlot()
+// that returns the address of a thread-local word of storage reserved for
+// Scudo, that must be zero-initialized in newly created threads.
+#include "scudo_platform_tls_slot.h"
+#endif
+
namespace scudo {
-template <class Allocator, u32 MaxTSDCount> struct TSDRegistrySharedT {
+template <class Allocator, u32 TSDsArraySize, u32 DefaultTSDCount>
+struct TSDRegistrySharedT {
void initLinkerInitialized(Allocator *Instance) {
Instance->initLinkerInitialized();
- CHECK_EQ(pthread_key_create(&PThreadKey, nullptr), 0); // For non-TLS
- const u32 NumberOfCPUs = getNumberOfCPUs();
- NumberOfTSDs = (SCUDO_ANDROID || NumberOfCPUs == 0)
- ? MaxTSDCount
- : Min(NumberOfCPUs, MaxTSDCount);
- for (u32 I = 0; I < NumberOfTSDs; I++)
+ for (u32 I = 0; I < TSDsArraySize; I++)
TSDs[I].initLinkerInitialized(Instance);
- // Compute all the coprimes of NumberOfTSDs. This will be used to walk the
- // array of TSDs in a random order. For details, see:
- // https://lemire.me/blog/2017/09/18/visiting-all-values-in-an-array-exactly-once-in-random-order/
- for (u32 I = 0; I < NumberOfTSDs; I++) {
- u32 A = I + 1;
- u32 B = NumberOfTSDs;
- // Find the GCD between I + 1 and NumberOfTSDs. If 1, they are coprimes.
- while (B != 0) {
- const u32 T = A;
- A = B;
- B = T % B;
- }
- if (A == 1)
- CoPrimes[NumberOfCoPrimes++] = I + 1;
- }
+ const u32 NumberOfCPUs = getNumberOfCPUs();
+ setNumberOfTSDs((NumberOfCPUs == 0) ? DefaultTSDCount
+ : Min(NumberOfCPUs, DefaultTSDCount));
Initialized = true;
}
void init(Allocator *Instance) {
@@ -46,10 +38,7 @@ template <class Allocator, u32 MaxTSDCount> struct TSDRegistrySharedT {
initLinkerInitialized(Instance);
}
- void unmapTestOnly() {
- setCurrentTSD(nullptr);
- pthread_key_delete(PThreadKey);
- }
+ void unmapTestOnly() { setCurrentTSD(nullptr); }
ALWAYS_INLINE void initThreadMaybe(Allocator *Instance,
UNUSED bool MinimalInit) {
@@ -66,42 +55,88 @@ template <class Allocator, u32 MaxTSDCount> struct TSDRegistrySharedT {
if (TSD->tryLock())
return TSD;
// If that fails, go down the slow path.
+ if (TSDsArraySize == 1U) {
+ // Only 1 TSD, not need to go any further.
+ // The compiler will optimize this one way or the other.
+ TSD->lock();
+ return TSD;
+ }
return getTSDAndLockSlow(TSD);
}
void disable() {
Mutex.lock();
- for (u32 I = 0; I < NumberOfTSDs; I++)
+ for (u32 I = 0; I < TSDsArraySize; I++)
TSDs[I].lock();
}
void enable() {
- for (s32 I = static_cast<s32>(NumberOfTSDs - 1); I >= 0; I--)
+ for (s32 I = static_cast<s32>(TSDsArraySize - 1); I >= 0; I--)
TSDs[I].unlock();
Mutex.unlock();
}
+ bool setOption(Option O, sptr Value) {
+ if (O == Option::MaxTSDsCount)
+ return setNumberOfTSDs(static_cast<u32>(Value));
+ if (O == Option::ThreadDisableMemInit)
+ setDisableMemInit(Value);
+ // Not supported by the TSD Registry, but not an error either.
+ return true;
+ }
+
+ bool getDisableMemInit() const { return *getTlsPtr() & 1; }
+
private:
- ALWAYS_INLINE void setCurrentTSD(TSD<Allocator> *CurrentTSD) {
-#if _BIONIC
- *getAndroidTlsPtr() = reinterpret_cast<uptr>(CurrentTSD);
-#elif SCUDO_LINUX
- ThreadTSD = CurrentTSD;
+ ALWAYS_INLINE uptr *getTlsPtr() const {
+#if SCUDO_HAS_PLATFORM_TLS_SLOT
+ return reinterpret_cast<uptr *>(getPlatformAllocatorTlsSlot());
#else
- CHECK_EQ(
- pthread_setspecific(PThreadKey, reinterpret_cast<void *>(CurrentTSD)),
- 0);
+ static thread_local uptr ThreadTSD;
+ return &ThreadTSD;
#endif
}
+ static_assert(alignof(TSD<Allocator>) >= 2, "");
+
+ ALWAYS_INLINE void setCurrentTSD(TSD<Allocator> *CurrentTSD) {
+ *getTlsPtr() &= 1;
+ *getTlsPtr() |= reinterpret_cast<uptr>(CurrentTSD);
+ }
+
ALWAYS_INLINE TSD<Allocator> *getCurrentTSD() {
-#if _BIONIC
- return reinterpret_cast<TSD<Allocator> *>(*getAndroidTlsPtr());
-#elif SCUDO_LINUX
- return ThreadTSD;
-#else
- return reinterpret_cast<TSD<Allocator> *>(pthread_getspecific(PThreadKey));
-#endif
+ return reinterpret_cast<TSD<Allocator> *>(*getTlsPtr() & ~1ULL);
+ }
+
+ bool setNumberOfTSDs(u32 N) {
+ ScopedLock L(MutexTSDs);
+ if (N < NumberOfTSDs)
+ return false;
+ if (N > TSDsArraySize)
+ N = TSDsArraySize;
+ NumberOfTSDs = N;
+ NumberOfCoPrimes = 0;
+ // Compute all the coprimes of NumberOfTSDs. This will be used to walk the
+ // array of TSDs in a random order. For details, see:
+ // https://lemire.me/blog/2017/09/18/visiting-all-values-in-an-array-exactly-once-in-random-order/
+ for (u32 I = 0; I < N; I++) {
+ u32 A = I + 1;
+ u32 B = N;
+ // Find the GCD between I + 1 and N. If 1, they are coprimes.
+ while (B != 0) {
+ const u32 T = A;
+ A = B;
+ B = T % B;
+ }
+ if (A == 1)
+ CoPrimes[NumberOfCoPrimes++] = I + 1;
+ }
+ return true;
+ }
+
+ void setDisableMemInit(bool B) {
+ *getTlsPtr() &= ~1ULL;
+ *getTlsPtr() |= B;
}
void initOnceMaybe(Allocator *Instance) {
@@ -120,17 +155,23 @@ private:
}
NOINLINE TSD<Allocator> *getTSDAndLockSlow(TSD<Allocator> *CurrentTSD) {
- if (MaxTSDCount > 1U && NumberOfTSDs > 1U) {
- // Use the Precedence of the current TSD as our random seed. Since we are
- // in the slow path, it means that tryLock failed, and as a result it's
- // very likely that said Precedence is non-zero.
- const u32 R = static_cast<u32>(CurrentTSD->getPrecedence());
- const u32 Inc = CoPrimes[R % NumberOfCoPrimes];
- u32 Index = R % NumberOfTSDs;
+ // Use the Precedence of the current TSD as our random seed. Since we are
+ // in the slow path, it means that tryLock failed, and as a result it's
+ // very likely that said Precedence is non-zero.
+ const u32 R = static_cast<u32>(CurrentTSD->getPrecedence());
+ u32 N, Inc;
+ {
+ ScopedLock L(MutexTSDs);
+ N = NumberOfTSDs;
+ DCHECK_NE(NumberOfCoPrimes, 0U);
+ Inc = CoPrimes[R % NumberOfCoPrimes];
+ }
+ if (N > 1U) {
+ u32 Index = R % N;
uptr LowestPrecedence = UINTPTR_MAX;
TSD<Allocator> *CandidateTSD = nullptr;
// Go randomly through at most 4 contexts and find a candidate.
- for (u32 I = 0; I < Min(4U, NumberOfTSDs); I++) {
+ for (u32 I = 0; I < Min(4U, N); I++) {
if (TSDs[Index].tryLock()) {
setCurrentTSD(&TSDs[Index]);
return &TSDs[Index];
@@ -142,8 +183,8 @@ private:
LowestPrecedence = Precedence;
}
Index += Inc;
- if (Index >= NumberOfTSDs)
- Index -= NumberOfTSDs;
+ if (Index >= N)
+ Index -= N;
}
if (CandidateTSD) {
CandidateTSD->lock();
@@ -156,25 +197,16 @@ private:
return CurrentTSD;
}
- pthread_key_t PThreadKey;
atomic_u32 CurrentIndex;
u32 NumberOfTSDs;
u32 NumberOfCoPrimes;
- u32 CoPrimes[MaxTSDCount];
+ u32 CoPrimes[TSDsArraySize];
bool Initialized;
HybridMutex Mutex;
- TSD<Allocator> TSDs[MaxTSDCount];
-#if SCUDO_LINUX && !_BIONIC
- static THREADLOCAL TSD<Allocator> *ThreadTSD;
-#endif
+ HybridMutex MutexTSDs;
+ TSD<Allocator> TSDs[TSDsArraySize];
};
-#if SCUDO_LINUX && !_BIONIC
-template <class Allocator, u32 MaxTSDCount>
-THREADLOCAL TSD<Allocator>
- *TSDRegistrySharedT<Allocator, MaxTSDCount>::ThreadTSD;
-#endif
-
} // namespace scudo
#endif // SCUDO_TSD_SHARED_H_
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/wrappers_c.h b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/wrappers_c.h
index 33a0c53cec03..6d0cecdc4b41 100644
--- a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/wrappers_c.h
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/wrappers_c.h
@@ -41,12 +41,4 @@ struct __scudo_mallinfo {
#define SCUDO_MALLINFO __scudo_mallinfo
#endif
-#ifndef M_DECAY_TIME
-#define M_DECAY_TIME -100
-#endif
-
-#ifndef M_PURGE
-#define M_PURGE -101
-#endif
-
#endif // SCUDO_WRAPPERS_C_H_
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/wrappers_c.inc b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/wrappers_c.inc
index 4396dfc50d1d..9d640038d8e2 100644
--- a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/wrappers_c.inc
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/wrappers_c.inc
@@ -155,7 +155,7 @@ void SCUDO_PREFIX(malloc_postinit)() {
SCUDO_PREFIX(malloc_enable));
}
-INTERFACE WEAK int SCUDO_PREFIX(mallopt)(int param, UNUSED int value) {
+INTERFACE WEAK int SCUDO_PREFIX(mallopt)(int param, int value) {
if (param == M_DECAY_TIME) {
if (SCUDO_ANDROID) {
if (value == 0) {
@@ -173,8 +173,29 @@ INTERFACE WEAK int SCUDO_PREFIX(mallopt)(int param, UNUSED int value) {
} else if (param == M_PURGE) {
SCUDO_ALLOCATOR.releaseToOS();
return 1;
+ } else {
+ scudo::Option option;
+ switch (param) {
+ case M_MEMTAG_TUNING:
+ option = scudo::Option::MemtagTuning;
+ break;
+ case M_THREAD_DISABLE_MEM_INIT:
+ option = scudo::Option::ThreadDisableMemInit;
+ break;
+ case M_CACHE_COUNT_MAX:
+ option = scudo::Option::MaxCacheEntriesCount;
+ break;
+ case M_CACHE_SIZE_MAX:
+ option = scudo::Option::MaxCacheEntrySize;
+ break;
+ case M_TSDS_COUNT_MAX:
+ option = scudo::Option::MaxTSDsCount;
+ break;
+ default:
+ return 0;
+ }
+ return SCUDO_ALLOCATOR.setOption(option, static_cast<scudo::sptr>(value));
}
- return 0;
}
INTERFACE WEAK void *SCUDO_PREFIX(aligned_alloc)(size_t alignment,
@@ -213,30 +234,26 @@ INTERFACE WEAK int SCUDO_PREFIX(malloc_info)(UNUSED int options, FILE *stream) {
// Disable memory tagging for the heap. The caller must disable memory tag
// checks globally (e.g. by clearing TCF0 on aarch64) before calling this
-// function, and may not re-enable them after calling the function. The program
-// must be single threaded at the point when the function is called.
+// function, and may not re-enable them after calling the function.
INTERFACE WEAK void SCUDO_PREFIX(malloc_disable_memory_tagging)() {
SCUDO_ALLOCATOR.disableMemoryTagging();
}
// Sets whether scudo records stack traces and other metadata for allocations
// and deallocations. This function only has an effect if the allocator and
-// hardware support memory tagging. The program must be single threaded at the
-// point when the function is called.
+// hardware support memory tagging.
INTERFACE WEAK void
SCUDO_PREFIX(malloc_set_track_allocation_stacks)(int track) {
SCUDO_ALLOCATOR.setTrackAllocationStacks(track);
}
-// Sets whether scudo zero-initializes all allocated memory. The program must
-// be single threaded at the point when the function is called.
+// Sets whether scudo zero-initializes all allocated memory.
INTERFACE WEAK void SCUDO_PREFIX(malloc_set_zero_contents)(int zero_contents) {
SCUDO_ALLOCATOR.setFillContents(zero_contents ? scudo::ZeroFill
: scudo::NoFill);
}
-// Sets whether scudo pattern-initializes all allocated memory. The program must
-// be single threaded at the point when the function is called.
+// Sets whether scudo pattern-initializes all allocated memory.
INTERFACE WEAK void
SCUDO_PREFIX(malloc_set_pattern_fill_contents)(int pattern_fill_contents) {
SCUDO_ALLOCATOR.setFillContents(
diff --git a/contrib/llvm-project/compiler-rt/lib/tsan/dd/dd_interceptors.cpp b/contrib/llvm-project/compiler-rt/lib/tsan/dd/dd_interceptors.cpp
index 35a0beb19196..f78ef2d44279 100644
--- a/contrib/llvm-project/compiler-rt/lib/tsan/dd/dd_interceptors.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/tsan/dd/dd_interceptors.cpp
@@ -6,11 +6,12 @@
//
//===----------------------------------------------------------------------===//
+#include <pthread.h>
+
#include "dd_rtl.h"
#include "interception/interception.h"
+#include "sanitizer_common/sanitizer_allocator_internal.h"
#include "sanitizer_common/sanitizer_procmaps.h"
-#include <pthread.h>
-#include <stdlib.h>
using namespace __dsan;
@@ -163,12 +164,12 @@ static pthread_cond_t *init_cond(pthread_cond_t *c, bool force = false) {
uptr cond = atomic_load(p, memory_order_acquire);
if (!force && cond != 0)
return (pthread_cond_t*)cond;
- void *newcond = malloc(sizeof(pthread_cond_t));
+ void *newcond = InternalAlloc(sizeof(pthread_cond_t));
internal_memset(newcond, 0, sizeof(pthread_cond_t));
if (atomic_compare_exchange_strong(p, &cond, (uptr)newcond,
memory_order_acq_rel))
return (pthread_cond_t*)newcond;
- free(newcond);
+ InternalFree(newcond);
return (pthread_cond_t*)cond;
}
@@ -216,7 +217,7 @@ INTERCEPTOR(int, pthread_cond_destroy, pthread_cond_t *c) {
InitThread();
pthread_cond_t *cond = init_cond(c);
int res = REAL(pthread_cond_destroy)(cond);
- free(cond);
+ InternalFree(cond);
atomic_store((atomic_uintptr_t*)c, 0, memory_order_relaxed);
return res;
}
diff --git a/contrib/llvm-project/compiler-rt/lib/tsan/dd/dd_rtl.h b/contrib/llvm-project/compiler-rt/lib/tsan/dd/dd_rtl.h
index ffe0684306dc..b1e19be57d3f 100644
--- a/contrib/llvm-project/compiler-rt/lib/tsan/dd/dd_rtl.h
+++ b/contrib/llvm-project/compiler-rt/lib/tsan/dd/dd_rtl.h
@@ -30,7 +30,7 @@ struct Thread {
bool ignore_interceptors;
};
-struct Callback : DDCallback {
+struct Callback final : public DDCallback {
Thread *thr;
Callback(Thread *thr);
diff --git a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_dispatch_defs.h b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_dispatch_defs.h
index 298297af31eb..94e0b50fed36 100644
--- a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_dispatch_defs.h
+++ b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_dispatch_defs.h
@@ -51,11 +51,18 @@ extern const dispatch_block_t _dispatch_data_destructor_munmap;
#define DISPATCH_DATA_DESTRUCTOR_MUNMAP _dispatch_data_destructor_munmap
#if __has_attribute(noescape)
- #define DISPATCH_NOESCAPE __attribute__((__noescape__))
+# define DISPATCH_NOESCAPE __attribute__((__noescape__))
#else
- #define DISPATCH_NOESCAPE
+# define DISPATCH_NOESCAPE
#endif
+#if SANITIZER_MAC
+# define SANITIZER_WEAK_IMPORT extern "C" __attribute((weak_import))
+#else
+# define SANITIZER_WEAK_IMPORT extern "C" __attribute((weak))
+#endif
+
+
// Data types used in dispatch APIs
typedef unsigned long size_t;
typedef unsigned long uintptr_t;
diff --git a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_external.cpp b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_external.cpp
index 0faa1ee93a13..466b2bf0f66c 100644
--- a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_external.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_external.cpp
@@ -11,6 +11,7 @@
//===----------------------------------------------------------------------===//
#include "tsan_rtl.h"
#include "tsan_interceptors.h"
+#include "sanitizer_common/sanitizer_ptrauth.h"
namespace __tsan {
@@ -57,13 +58,13 @@ uptr TagFromShadowStackFrame(uptr pc) {
#if !SANITIZER_GO
typedef void(*AccessFunc)(ThreadState *, uptr, uptr, int);
-void ExternalAccess(void *addr, void *caller_pc, void *tag, AccessFunc access) {
+void ExternalAccess(void *addr, uptr caller_pc, void *tag, AccessFunc access) {
CHECK_LT(tag, atomic_load(&used_tags, memory_order_relaxed));
ThreadState *thr = cur_thread();
- if (caller_pc) FuncEntry(thr, (uptr)caller_pc);
+ if (caller_pc) FuncEntry(thr, caller_pc);
InsertShadowStackFrameForTag(thr, (uptr)tag);
bool in_ignored_lib;
- if (!caller_pc || !libignore()->IsIgnored((uptr)caller_pc, &in_ignored_lib)) {
+ if (!caller_pc || !libignore()->IsIgnored(caller_pc, &in_ignored_lib)) {
access(thr, CALLERPC, (uptr)addr, kSizeLog1);
}
FuncExit(thr);
@@ -110,12 +111,12 @@ void __tsan_external_assign_tag(void *addr, void *tag) {
SANITIZER_INTERFACE_ATTRIBUTE
void __tsan_external_read(void *addr, void *caller_pc, void *tag) {
- ExternalAccess(addr, caller_pc, tag, MemoryRead);
+ ExternalAccess(addr, STRIP_PC(caller_pc), tag, MemoryRead);
}
SANITIZER_INTERFACE_ATTRIBUTE
void __tsan_external_write(void *addr, void *caller_pc, void *tag) {
- ExternalAccess(addr, caller_pc, tag, MemoryWrite);
+ ExternalAccess(addr, STRIP_PC(caller_pc), tag, MemoryWrite);
}
} // extern "C"
diff --git a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_flags.cpp b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_flags.cpp
index 44bf325cd35b..49e4a9c21da9 100644
--- a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_flags.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_flags.cpp
@@ -87,7 +87,7 @@ void InitializeFlags(Flags *f, const char *env, const char *env_option_name) {
// Let a frontend override.
parser.ParseString(__tsan_default_options());
#if TSAN_CONTAINS_UBSAN
- const char *ubsan_default_options = __ubsan::MaybeCallUbsanDefaultOptions();
+ const char *ubsan_default_options = __ubsan_default_options();
ubsan_parser.ParseString(ubsan_default_options);
#endif
// Override from command line.
diff --git a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_interceptors.h b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_interceptors.h
index 88d1edd775d3..29576ea2d49a 100644
--- a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_interceptors.h
+++ b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_interceptors.h
@@ -22,7 +22,7 @@ class ScopedInterceptor {
LibIgnore *libignore();
#if !SANITIZER_GO
-INLINE bool in_symbolizer() {
+inline bool in_symbolizer() {
cur_thread_init();
return UNLIKELY(cur_thread()->in_symbolizer);
}
diff --git a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_interceptors_libdispatch.cpp b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_interceptors_libdispatch.cpp
index 5dacd3256abc..cbbb7ecb2397 100644
--- a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_interceptors_libdispatch.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_interceptors_libdispatch.cpp
@@ -19,6 +19,10 @@
#include "BlocksRuntime/Block.h"
#include "tsan_dispatch_defs.h"
+#if SANITIZER_MAC
+# include <Availability.h>
+#endif
+
namespace __tsan {
typedef u16 uint16_t;
@@ -219,6 +223,30 @@ static void invoke_and_release_block(void *param) {
DISPATCH_INTERCEPT(dispatch, false)
DISPATCH_INTERCEPT(dispatch_barrier, true)
+// dispatch_async_and_wait() and friends were introduced in macOS 10.14.
+// Linking of these interceptors fails when using an older SDK.
+#if !SANITIZER_MAC || defined(__MAC_10_14)
+// macOS 10.14 is greater than our minimal deployment target. To ensure we
+// generate a weak reference so the TSan dylib continues to work on older
+// systems, we need to forward declare the intercepted functions as "weak
+// imports". Note that this file is multi-platform, so we cannot include the
+// actual header file (#include <dispatch/dispatch.h>).
+SANITIZER_WEAK_IMPORT void dispatch_async_and_wait(
+ dispatch_queue_t queue, DISPATCH_NOESCAPE dispatch_block_t block);
+SANITIZER_WEAK_IMPORT void dispatch_async_and_wait_f(
+ dispatch_queue_t queue, void *context, dispatch_function_t work);
+SANITIZER_WEAK_IMPORT void dispatch_barrier_async_and_wait(
+ dispatch_queue_t queue, DISPATCH_NOESCAPE dispatch_block_t block);
+SANITIZER_WEAK_IMPORT void dispatch_barrier_async_and_wait_f(
+ dispatch_queue_t queue, void *context, dispatch_function_t work);
+
+DISPATCH_INTERCEPT_SYNC_F(dispatch_async_and_wait_f, false)
+DISPATCH_INTERCEPT_SYNC_B(dispatch_async_and_wait, false)
+DISPATCH_INTERCEPT_SYNC_F(dispatch_barrier_async_and_wait_f, true)
+DISPATCH_INTERCEPT_SYNC_B(dispatch_barrier_async_and_wait, true)
+#endif
+
+
DECLARE_REAL(void, dispatch_after_f, dispatch_time_t when,
dispatch_queue_t queue, void *context, dispatch_function_t work)
@@ -746,6 +774,10 @@ void InitializeLibdispatchInterceptors() {
INTERCEPT_FUNCTION(dispatch_barrier_async_f);
INTERCEPT_FUNCTION(dispatch_barrier_sync);
INTERCEPT_FUNCTION(dispatch_barrier_sync_f);
+ INTERCEPT_FUNCTION(dispatch_async_and_wait);
+ INTERCEPT_FUNCTION(dispatch_async_and_wait_f);
+ INTERCEPT_FUNCTION(dispatch_barrier_async_and_wait);
+ INTERCEPT_FUNCTION(dispatch_barrier_async_and_wait_f);
INTERCEPT_FUNCTION(dispatch_after);
INTERCEPT_FUNCTION(dispatch_after_f);
INTERCEPT_FUNCTION(dispatch_once);
diff --git a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_interceptors_mac.cpp b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_interceptors_mac.cpp
index aa29536d8616..ed10fccc980a 100644
--- a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_interceptors_mac.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_interceptors_mac.cpp
@@ -438,6 +438,7 @@ struct fake_shared_weak_count {
virtual void on_zero_shared() = 0;
virtual void _unused_0x18() = 0;
virtual void on_zero_shared_weak() = 0;
+ virtual ~fake_shared_weak_count() = 0; // suppress -Wnon-virtual-dtor
};
} // namespace
diff --git a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_interceptors_mach_vm.cpp b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_interceptors_mach_vm.cpp
index cd318f8af93f..6d62ff6a8382 100644
--- a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_interceptors_mach_vm.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_interceptors_mach_vm.cpp
@@ -19,12 +19,11 @@
namespace __tsan {
-static bool intersects_with_shadow(mach_vm_address_t *address,
+static bool intersects_with_shadow(mach_vm_address_t address,
mach_vm_size_t size, int flags) {
// VM_FLAGS_FIXED is 0x0, so we have to test for VM_FLAGS_ANYWHERE.
if (flags & VM_FLAGS_ANYWHERE) return false;
- uptr ptr = *address;
- return !IsAppMem(ptr) || !IsAppMem(ptr + size - 1);
+ return !IsAppMem(address) || !IsAppMem(address + size - 1);
}
TSAN_INTERCEPTOR(kern_return_t, mach_vm_allocate, vm_map_t target,
@@ -32,12 +31,12 @@ TSAN_INTERCEPTOR(kern_return_t, mach_vm_allocate, vm_map_t target,
SCOPED_TSAN_INTERCEPTOR(mach_vm_allocate, target, address, size, flags);
if (target != mach_task_self())
return REAL(mach_vm_allocate)(target, address, size, flags);
- if (intersects_with_shadow(address, size, flags))
+ if (address && intersects_with_shadow(*address, size, flags))
return KERN_NO_SPACE;
- kern_return_t res = REAL(mach_vm_allocate)(target, address, size, flags);
- if (res == KERN_SUCCESS)
+ kern_return_t kr = REAL(mach_vm_allocate)(target, address, size, flags);
+ if (kr == KERN_SUCCESS)
MemoryRangeImitateWriteOrResetRange(thr, pc, *address, size);
- return res;
+ return kr;
}
TSAN_INTERCEPTOR(kern_return_t, mach_vm_deallocate, vm_map_t target,
@@ -45,8 +44,10 @@ TSAN_INTERCEPTOR(kern_return_t, mach_vm_deallocate, vm_map_t target,
SCOPED_TSAN_INTERCEPTOR(mach_vm_deallocate, target, address, size);
if (target != mach_task_self())
return REAL(mach_vm_deallocate)(target, address, size);
- UnmapShadow(thr, address, size);
- return REAL(mach_vm_deallocate)(target, address, size);
+ kern_return_t kr = REAL(mach_vm_deallocate)(target, address, size);
+ if (kr == KERN_SUCCESS && address)
+ UnmapShadow(thr, address, size);
+ return kr;
}
} // namespace __tsan
diff --git a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp
index 9c3e0369bc6c..6c49ccd6dd5b 100644
--- a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp
@@ -31,6 +31,8 @@
#include "tsan_mman.h"
#include "tsan_fd.h"
+#include <stdarg.h>
+
using namespace __tsan;
#if SANITIZER_FREEBSD || SANITIZER_MAC
@@ -52,10 +54,6 @@ using namespace __tsan;
#define vfork __vfork14
#endif
-#if SANITIZER_ANDROID
-#define mallopt(a, b)
-#endif
-
#ifdef __mips__
const int kSigCount = 129;
#else
@@ -95,7 +93,7 @@ extern "C" void _exit(int status);
extern "C" int fileno_unlocked(void *stream);
extern "C" int dirfd(void *dirp);
#endif
-#if !SANITIZER_FREEBSD && !SANITIZER_ANDROID && !SANITIZER_NETBSD
+#if SANITIZER_GLIBC
extern "C" int mallopt(int param, int value);
#endif
#if SANITIZER_NETBSD
@@ -135,6 +133,7 @@ const int PTHREAD_BARRIER_SERIAL_THREAD = -1;
#endif
const int MAP_FIXED = 0x10;
typedef long long_t;
+typedef __sanitizer::u16 mode_t;
// From /usr/include/unistd.h
# define F_ULOCK 0 /* Unlock a previously locked region. */
@@ -1119,27 +1118,37 @@ static void *init_cond(void *c, bool force = false) {
return (void*)cond;
}
+namespace {
+
+template <class Fn>
struct CondMutexUnlockCtx {
ScopedInterceptor *si;
ThreadState *thr;
uptr pc;
void *m;
+ void *c;
+ const Fn &fn;
+
+ int Cancel() const { return fn(); }
+ void Unlock() const;
};
-static void cond_mutex_unlock(CondMutexUnlockCtx *arg) {
+template <class Fn>
+void CondMutexUnlockCtx<Fn>::Unlock() const {
// pthread_cond_wait interceptor has enabled async signal delivery
// (see BlockingCall below). Disable async signals since we are running
// tsan code. Also ScopedInterceptor and BlockingCall destructors won't run
// since the thread is cancelled, so we have to manually execute them
// (the thread still can run some user code due to pthread_cleanup_push).
- ThreadSignalContext *ctx = SigCtx(arg->thr);
+ ThreadSignalContext *ctx = SigCtx(thr);
CHECK_EQ(atomic_load(&ctx->in_blocking_func, memory_order_relaxed), 1);
atomic_store(&ctx->in_blocking_func, 0, memory_order_relaxed);
- MutexPostLock(arg->thr, arg->pc, (uptr)arg->m, MutexFlagDoPreLockOnPostLock);
+ MutexPostLock(thr, pc, (uptr)m, MutexFlagDoPreLockOnPostLock);
// Undo BlockingCall ctor effects.
- arg->thr->ignore_interceptors--;
- arg->si->~ScopedInterceptor();
+ thr->ignore_interceptors--;
+ si->~ScopedInterceptor();
}
+} // namespace
INTERCEPTOR(int, pthread_cond_init, void *c, void *a) {
void *cond = init_cond(c, true);
@@ -1148,20 +1157,24 @@ INTERCEPTOR(int, pthread_cond_init, void *c, void *a) {
return REAL(pthread_cond_init)(cond, a);
}
-static int cond_wait(ThreadState *thr, uptr pc, ScopedInterceptor *si,
- int (*fn)(void *c, void *m, void *abstime), void *c,
- void *m, void *t) {
+template <class Fn>
+int cond_wait(ThreadState *thr, uptr pc, ScopedInterceptor *si, const Fn &fn,
+ void *c, void *m) {
MemoryAccessRange(thr, pc, (uptr)c, sizeof(uptr), false);
MutexUnlock(thr, pc, (uptr)m);
- CondMutexUnlockCtx arg = {si, thr, pc, m};
int res = 0;
// This ensures that we handle mutex lock even in case of pthread_cancel.
// See test/tsan/cond_cancel.cpp.
{
// Enable signal delivery while the thread is blocked.
BlockingCall bc(thr);
+ CondMutexUnlockCtx<Fn> arg = {si, thr, pc, m, c, fn};
res = call_pthread_cancel_with_cleanup(
- fn, c, m, t, (void (*)(void *arg))cond_mutex_unlock, &arg);
+ [](void *arg) -> int {
+ return ((const CondMutexUnlockCtx<Fn> *)arg)->Cancel();
+ },
+ [](void *arg) { ((const CondMutexUnlockCtx<Fn> *)arg)->Unlock(); },
+ &arg);
}
if (res == errno_EOWNERDEAD) MutexRepair(thr, pc, (uptr)m);
MutexPostLock(thr, pc, (uptr)m, MutexFlagDoPreLockOnPostLock);
@@ -1171,25 +1184,46 @@ static int cond_wait(ThreadState *thr, uptr pc, ScopedInterceptor *si,
INTERCEPTOR(int, pthread_cond_wait, void *c, void *m) {
void *cond = init_cond(c);
SCOPED_TSAN_INTERCEPTOR(pthread_cond_wait, cond, m);
- return cond_wait(thr, pc, &si, (int (*)(void *c, void *m, void *abstime))REAL(
- pthread_cond_wait),
- cond, m, 0);
+ return cond_wait(
+ thr, pc, &si, [=]() { return REAL(pthread_cond_wait)(cond, m); }, cond,
+ m);
}
INTERCEPTOR(int, pthread_cond_timedwait, void *c, void *m, void *abstime) {
void *cond = init_cond(c);
SCOPED_TSAN_INTERCEPTOR(pthread_cond_timedwait, cond, m, abstime);
- return cond_wait(thr, pc, &si, REAL(pthread_cond_timedwait), cond, m,
- abstime);
+ return cond_wait(
+ thr, pc, &si,
+ [=]() { return REAL(pthread_cond_timedwait)(cond, m, abstime); }, cond,
+ m);
+}
+
+#if SANITIZER_LINUX
+INTERCEPTOR(int, pthread_cond_clockwait, void *c, void *m,
+ __sanitizer_clockid_t clock, void *abstime) {
+ void *cond = init_cond(c);
+ SCOPED_TSAN_INTERCEPTOR(pthread_cond_clockwait, cond, m, clock, abstime);
+ return cond_wait(
+ thr, pc, &si,
+ [=]() { return REAL(pthread_cond_clockwait)(cond, m, clock, abstime); },
+ cond, m);
}
+#define TSAN_MAYBE_PTHREAD_COND_CLOCKWAIT TSAN_INTERCEPT(pthread_cond_clockwait)
+#else
+#define TSAN_MAYBE_PTHREAD_COND_CLOCKWAIT
+#endif
#if SANITIZER_MAC
INTERCEPTOR(int, pthread_cond_timedwait_relative_np, void *c, void *m,
void *reltime) {
void *cond = init_cond(c);
SCOPED_TSAN_INTERCEPTOR(pthread_cond_timedwait_relative_np, cond, m, reltime);
- return cond_wait(thr, pc, &si, REAL(pthread_cond_timedwait_relative_np), cond,
- m, reltime);
+ return cond_wait(
+ thr, pc, &si,
+ [=]() {
+ return REAL(pthread_cond_timedwait_relative_np)(cond, m, reltime);
+ },
+ cond, m);
}
#endif
@@ -1508,20 +1542,28 @@ TSAN_INTERCEPTOR(int, fstat64, int fd, void *buf) {
#define TSAN_MAYBE_INTERCEPT_FSTAT64
#endif
-TSAN_INTERCEPTOR(int, open, const char *name, int flags, int mode) {
- SCOPED_TSAN_INTERCEPTOR(open, name, flags, mode);
+TSAN_INTERCEPTOR(int, open, const char *name, int oflag, ...) {
+ va_list ap;
+ va_start(ap, oflag);
+ mode_t mode = va_arg(ap, int);
+ va_end(ap);
+ SCOPED_TSAN_INTERCEPTOR(open, name, oflag, mode);
READ_STRING(thr, pc, name, 0);
- int fd = REAL(open)(name, flags, mode);
+ int fd = REAL(open)(name, oflag, mode);
if (fd >= 0)
FdFileCreate(thr, pc, fd);
return fd;
}
#if SANITIZER_LINUX
-TSAN_INTERCEPTOR(int, open64, const char *name, int flags, int mode) {
- SCOPED_TSAN_INTERCEPTOR(open64, name, flags, mode);
+TSAN_INTERCEPTOR(int, open64, const char *name, int oflag, ...) {
+ va_list ap;
+ va_start(ap, oflag);
+ mode_t mode = va_arg(ap, int);
+ va_end(ap);
+ SCOPED_TSAN_INTERCEPTOR(open64, name, oflag, mode);
READ_STRING(thr, pc, name, 0);
- int fd = REAL(open64)(name, flags, mode);
+ int fd = REAL(open64)(name, oflag, mode);
if (fd >= 0)
FdFileCreate(thr, pc, fd);
return fd;
@@ -2437,13 +2479,13 @@ static void syscall_access_range(uptr pc, uptr p, uptr s, bool write) {
MemoryAccessRange(thr, pc, p, s, write);
}
-static void syscall_acquire(uptr pc, uptr addr) {
+static USED void syscall_acquire(uptr pc, uptr addr) {
TSAN_SYSCALL();
Acquire(thr, pc, addr);
DPrintf("syscall_acquire(%p)\n", addr);
}
-static void syscall_release(uptr pc, uptr addr) {
+static USED void syscall_release(uptr pc, uptr addr) {
TSAN_SYSCALL();
DPrintf("syscall_release(%p)\n", addr);
Release(thr, pc, addr);
@@ -2622,7 +2664,7 @@ void InitializeInterceptors() {
#endif
// Instruct libc malloc to consume less memory.
-#if SANITIZER_LINUX
+#if SANITIZER_GLIBC
mallopt(1, 0); // M_MXFAST
mallopt(-3, 32*1024); // M_MMAP_THRESHOLD
#endif
@@ -2685,6 +2727,8 @@ void InitializeInterceptors() {
TSAN_INTERCEPT_VER(pthread_cond_timedwait, PTHREAD_ABI_BASE);
TSAN_INTERCEPT_VER(pthread_cond_destroy, PTHREAD_ABI_BASE);
+ TSAN_MAYBE_PTHREAD_COND_CLOCKWAIT;
+
TSAN_INTERCEPT(pthread_mutex_init);
TSAN_INTERCEPT(pthread_mutex_destroy);
TSAN_INTERCEPT(pthread_mutex_trylock);
diff --git a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_interface.cpp b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_interface.cpp
index 2b3a0889b70a..55f1c9834f70 100644
--- a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_interface.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_interface.cpp
@@ -14,15 +14,12 @@
#include "tsan_interface_ann.h"
#include "tsan_rtl.h"
#include "sanitizer_common/sanitizer_internal_defs.h"
+#include "sanitizer_common/sanitizer_ptrauth.h"
#define CALLERPC ((uptr)__builtin_return_address(0))
using namespace __tsan;
-typedef u16 uint16_t;
-typedef u32 uint32_t;
-typedef u64 uint64_t;
-
void __tsan_init() {
cur_thread_init();
Initialize(cur_thread());
@@ -43,13 +40,13 @@ void __tsan_write16(void *addr) {
}
void __tsan_read16_pc(void *addr, void *pc) {
- MemoryRead(cur_thread(), (uptr)pc, (uptr)addr, kSizeLog8);
- MemoryRead(cur_thread(), (uptr)pc, (uptr)addr + 8, kSizeLog8);
+ MemoryRead(cur_thread(), STRIP_PC(pc), (uptr)addr, kSizeLog8);
+ MemoryRead(cur_thread(), STRIP_PC(pc), (uptr)addr + 8, kSizeLog8);
}
void __tsan_write16_pc(void *addr, void *pc) {
- MemoryWrite(cur_thread(), (uptr)pc, (uptr)addr, kSizeLog8);
- MemoryWrite(cur_thread(), (uptr)pc, (uptr)addr + 8, kSizeLog8);
+ MemoryWrite(cur_thread(), STRIP_PC(pc), (uptr)addr, kSizeLog8);
+ MemoryWrite(cur_thread(), STRIP_PC(pc), (uptr)addr + 8, kSizeLog8);
}
// __tsan_unaligned_read/write calls are emitted by compiler.
diff --git a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_interface_inl.h b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_interface_inl.h
index f955ddf99247..f5d743c10772 100644
--- a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_interface_inl.h
+++ b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_interface_inl.h
@@ -12,6 +12,7 @@
#include "tsan_interface.h"
#include "tsan_rtl.h"
+#include "sanitizer_common/sanitizer_ptrauth.h"
#define CALLERPC ((uptr)__builtin_return_address(0))
@@ -50,35 +51,35 @@ void __tsan_write8(void *addr) {
}
void __tsan_read1_pc(void *addr, void *pc) {
- MemoryRead(cur_thread(), (uptr)pc, (uptr)addr, kSizeLog1);
+ MemoryRead(cur_thread(), STRIP_PC(pc), (uptr)addr, kSizeLog1);
}
void __tsan_read2_pc(void *addr, void *pc) {
- MemoryRead(cur_thread(), (uptr)pc, (uptr)addr, kSizeLog2);
+ MemoryRead(cur_thread(), STRIP_PC(pc), (uptr)addr, kSizeLog2);
}
void __tsan_read4_pc(void *addr, void *pc) {
- MemoryRead(cur_thread(), (uptr)pc, (uptr)addr, kSizeLog4);
+ MemoryRead(cur_thread(), STRIP_PC(pc), (uptr)addr, kSizeLog4);
}
void __tsan_read8_pc(void *addr, void *pc) {
- MemoryRead(cur_thread(), (uptr)pc, (uptr)addr, kSizeLog8);
+ MemoryRead(cur_thread(), STRIP_PC(pc), (uptr)addr, kSizeLog8);
}
void __tsan_write1_pc(void *addr, void *pc) {
- MemoryWrite(cur_thread(), (uptr)pc, (uptr)addr, kSizeLog1);
+ MemoryWrite(cur_thread(), STRIP_PC(pc), (uptr)addr, kSizeLog1);
}
void __tsan_write2_pc(void *addr, void *pc) {
- MemoryWrite(cur_thread(), (uptr)pc, (uptr)addr, kSizeLog2);
+ MemoryWrite(cur_thread(), STRIP_PC(pc), (uptr)addr, kSizeLog2);
}
void __tsan_write4_pc(void *addr, void *pc) {
- MemoryWrite(cur_thread(), (uptr)pc, (uptr)addr, kSizeLog4);
+ MemoryWrite(cur_thread(), STRIP_PC(pc), (uptr)addr, kSizeLog4);
}
void __tsan_write8_pc(void *addr, void *pc) {
- MemoryWrite(cur_thread(), (uptr)pc, (uptr)addr, kSizeLog8);
+ MemoryWrite(cur_thread(), STRIP_PC(pc), (uptr)addr, kSizeLog8);
}
void __tsan_vptr_update(void **vptr_p, void *new_val) {
@@ -100,7 +101,7 @@ void __tsan_vptr_read(void **vptr_p) {
}
void __tsan_func_entry(void *pc) {
- FuncEntry(cur_thread(), (uptr)pc);
+ FuncEntry(cur_thread(), STRIP_PC(pc));
}
void __tsan_func_exit() {
@@ -124,9 +125,9 @@ void __tsan_write_range(void *addr, uptr size) {
}
void __tsan_read_range_pc(void *addr, uptr size, void *pc) {
- MemoryAccessRange(cur_thread(), (uptr)pc, (uptr)addr, size, false);
+ MemoryAccessRange(cur_thread(), STRIP_PC(pc), (uptr)addr, size, false);
}
void __tsan_write_range_pc(void *addr, uptr size, void *pc) {
- MemoryAccessRange(cur_thread(), (uptr)pc, (uptr)addr, size, true);
+ MemoryAccessRange(cur_thread(), STRIP_PC(pc), (uptr)addr, size, true);
}
diff --git a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_platform.h b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_platform.h
index 7256d64e5079..81d345dea756 100644
--- a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_platform.h
+++ b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_platform.h
@@ -461,7 +461,7 @@ struct Mapping47 {
#elif SANITIZER_GO && defined(__aarch64__)
-/* Go on linux/aarch64 (48-bit VMA)
+/* Go on linux/aarch64 (48-bit VMA) and darwin/aarch64 (47-bit VMA)
0000 0000 1000 - 0000 1000 0000: executable
0000 1000 0000 - 00c0 0000 0000: -
00c0 0000 0000 - 00e0 0000 0000: heap
@@ -488,6 +488,30 @@ struct Mapping {
// Indicates the runtime will define the memory regions at runtime.
#define TSAN_RUNTIME_VMA 1
+#elif SANITIZER_GO && defined(__mips64)
+/*
+Go on linux/mips64 (47-bit VMA)
+0000 0000 1000 - 0000 1000 0000: executable
+0000 1000 0000 - 00c0 0000 0000: -
+00c0 0000 0000 - 00e0 0000 0000: heap
+00e0 0000 0000 - 2000 0000 0000: -
+2000 0000 0000 - 3000 0000 0000: shadow
+3000 0000 0000 - 3000 0000 0000: -
+3000 0000 0000 - 4000 0000 0000: metainfo (memory blocks and sync objects)
+4000 0000 0000 - 6000 0000 0000: -
+6000 0000 0000 - 6200 0000 0000: traces
+6200 0000 0000 - 8000 0000 0000: -
+*/
+struct Mapping {
+ static const uptr kMetaShadowBeg = 0x300000000000ull;
+ static const uptr kMetaShadowEnd = 0x400000000000ull;
+ static const uptr kTraceMemBeg = 0x600000000000ull;
+ static const uptr kTraceMemEnd = 0x620000000000ull;
+ static const uptr kShadowBeg = 0x200000000000ull;
+ static const uptr kShadowEnd = 0x300000000000ull;
+ static const uptr kAppMemBeg = 0x000000001000ull;
+ static const uptr kAppMemEnd = 0x00e000000000ull;
+};
#else
# error "Unknown platform"
#endif
@@ -1016,9 +1040,8 @@ int ExtractRecvmsgFDs(void *msg, int *fds, int nfd);
uptr ExtractLongJmpSp(uptr *env);
void ImitateTlsWrite(ThreadState *thr, uptr tls_addr, uptr tls_size);
-int call_pthread_cancel_with_cleanup(int(*fn)(void *c, void *m,
- void *abstime), void *c, void *m, void *abstime,
- void(*cleanup)(void *arg), void *arg);
+int call_pthread_cancel_with_cleanup(int (*fn)(void *arg),
+ void (*cleanup)(void *arg), void *arg);
void DestroyThreadState();
void PlatformCleanUpThreadState(ThreadState *thr);
diff --git a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_platform_linux.cpp b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_platform_linux.cpp
index 645152a06c39..5e8879de26a7 100644
--- a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_platform_linux.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_platform_linux.cpp
@@ -12,14 +12,12 @@
//===----------------------------------------------------------------------===//
#include "sanitizer_common/sanitizer_platform.h"
-#if SANITIZER_LINUX || SANITIZER_FREEBSD || SANITIZER_NETBSD || \
- SANITIZER_OPENBSD
+#if SANITIZER_LINUX || SANITIZER_FREEBSD || SANITIZER_NETBSD
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_libc.h"
#include "sanitizer_common/sanitizer_linux.h"
#include "sanitizer_common/sanitizer_platform_limits_netbsd.h"
-#include "sanitizer_common/sanitizer_platform_limits_openbsd.h"
#include "sanitizer_common/sanitizer_platform_limits_posix.h"
#include "sanitizer_common/sanitizer_posix.h"
#include "sanitizer_common/sanitizer_procmaps.h"
@@ -384,12 +382,16 @@ static uptr UnmangleLongJmpSp(uptr mangled_sp) {
#endif
}
-#ifdef __powerpc__
+#if SANITIZER_NETBSD
+# ifdef __x86_64__
+# define LONG_JMP_SP_ENV_SLOT 6
+# else
+# error unsupported
+# endif
+#elif defined(__powerpc__)
# define LONG_JMP_SP_ENV_SLOT 0
#elif SANITIZER_FREEBSD
# define LONG_JMP_SP_ENV_SLOT 2
-#elif SANITIZER_NETBSD
-# define LONG_JMP_SP_ENV_SLOT 6
#elif SANITIZER_LINUX
# ifdef __aarch64__
# define LONG_JMP_SP_ENV_SLOT 13
@@ -441,14 +443,13 @@ void ImitateTlsWrite(ThreadState *thr, uptr tls_addr, uptr tls_size) {
// Note: this function runs with async signals enabled,
// so it must not touch any tsan state.
-int call_pthread_cancel_with_cleanup(int(*fn)(void *c, void *m,
- void *abstime), void *c, void *m, void *abstime,
- void(*cleanup)(void *arg), void *arg) {
+int call_pthread_cancel_with_cleanup(int (*fn)(void *arg),
+ void (*cleanup)(void *arg), void *arg) {
// pthread_cleanup_push/pop are hardcore macros mess.
// We can't intercept nor call them w/o including pthread.h.
int res;
pthread_cleanup_push(cleanup, arg);
- res = fn(c, m, abstime);
+ res = fn(arg);
pthread_cleanup_pop(0);
return res;
}
@@ -513,5 +514,4 @@ void cur_thread_finalize() {
} // namespace __tsan
-#endif // SANITIZER_LINUX || SANITIZER_FREEBSD || SANITIZER_NETBSD ||
- // SANITIZER_OPENBSD
+#endif // SANITIZER_LINUX || SANITIZER_FREEBSD || SANITIZER_NETBSD
diff --git a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_platform_mac.cpp b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_platform_mac.cpp
index eea52a34e97f..0740805822de 100644
--- a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_platform_mac.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_platform_mac.cpp
@@ -234,7 +234,7 @@ static void my_pthread_introspection_hook(unsigned int event, pthread_t thread,
#endif
void InitializePlatformEarly() {
-#if defined(__aarch64__)
+#if !SANITIZER_GO && defined(__aarch64__)
uptr max_vm = GetMaxUserVirtualAddress() + 1;
if (max_vm != Mapping::kHiAppMemEnd) {
Printf("ThreadSanitizer: unsupported vm address limit %p, expected %p.\n",
@@ -306,14 +306,13 @@ void ImitateTlsWrite(ThreadState *thr, uptr tls_addr, uptr tls_size) {
#if !SANITIZER_GO
// Note: this function runs with async signals enabled,
// so it must not touch any tsan state.
-int call_pthread_cancel_with_cleanup(int(*fn)(void *c, void *m,
- void *abstime), void *c, void *m, void *abstime,
- void(*cleanup)(void *arg), void *arg) {
+int call_pthread_cancel_with_cleanup(int (*fn)(void *arg),
+ void (*cleanup)(void *arg), void *arg) {
// pthread_cleanup_push/pop are hardcore macros mess.
// We can't intercept nor call them w/o including pthread.h.
int res;
pthread_cleanup_push(cleanup, arg);
- res = fn(c, m, abstime);
+ res = fn(arg);
pthread_cleanup_pop(0);
return res;
}
diff --git a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_platform_posix.cpp b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_platform_posix.cpp
index 1a0faee0252e..d56b6c3b9c54 100644
--- a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_platform_posix.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_platform_posix.cpp
@@ -29,10 +29,6 @@ static const char kShadowMemoryMappingHint[] =
"HINT: if %s is not supported in your environment, you may set "
"TSAN_OPTIONS=%s=0\n";
-static void NoHugePagesInShadow(uptr addr, uptr size) {
- SetShadowRegionHugePageMode(addr, size);
-}
-
static void DontDumpShadow(uptr addr, uptr size) {
if (common_flags()->use_madv_dontdump)
if (!DontDumpShadowMemory(addr, size)) {
@@ -46,7 +42,8 @@ static void DontDumpShadow(uptr addr, uptr size) {
#if !SANITIZER_GO
void InitializeShadowMemory() {
// Map memory shadow.
- if (!MmapFixedNoReserve(ShadowBeg(), ShadowEnd() - ShadowBeg(), "shadow")) {
+ if (!MmapFixedSuperNoReserve(ShadowBeg(), ShadowEnd() - ShadowBeg(),
+ "shadow")) {
Printf("FATAL: ThreadSanitizer can not mmap the shadow memory\n");
Printf("FATAL: Make sure to compile with -fPIE and to link with -pie.\n");
Die();
@@ -55,43 +52,6 @@ void InitializeShadowMemory() {
// Frequently a thread uses only a small part of stack and similarly
// a program uses a small part of large mmap. On some programs
// we see 20% memory usage reduction without huge pages for this range.
- // FIXME: don't use constants here.
-#if defined(__x86_64__)
- const uptr kMadviseRangeBeg = 0x7f0000000000ull;
- const uptr kMadviseRangeSize = 0x010000000000ull;
-#elif defined(__mips64)
- const uptr kMadviseRangeBeg = 0xff00000000ull;
- const uptr kMadviseRangeSize = 0x0100000000ull;
-#elif defined(__aarch64__) && defined(__APPLE__)
- uptr kMadviseRangeBeg = LoAppMemBeg();
- uptr kMadviseRangeSize = LoAppMemEnd() - LoAppMemBeg();
-#elif defined(__aarch64__)
- uptr kMadviseRangeBeg = 0;
- uptr kMadviseRangeSize = 0;
- if (vmaSize == 39) {
- kMadviseRangeBeg = 0x7d00000000ull;
- kMadviseRangeSize = 0x0300000000ull;
- } else if (vmaSize == 42) {
- kMadviseRangeBeg = 0x3f000000000ull;
- kMadviseRangeSize = 0x01000000000ull;
- } else {
- DCHECK(0);
- }
-#elif defined(__powerpc64__)
- uptr kMadviseRangeBeg = 0;
- uptr kMadviseRangeSize = 0;
- if (vmaSize == 44) {
- kMadviseRangeBeg = 0x0f60000000ull;
- kMadviseRangeSize = 0x0010000000ull;
- } else if (vmaSize == 46) {
- kMadviseRangeBeg = 0x3f0000000000ull;
- kMadviseRangeSize = 0x010000000000ull;
- } else {
- DCHECK(0);
- }
-#endif
- NoHugePagesInShadow(MemToShadow(kMadviseRangeBeg),
- kMadviseRangeSize * kShadowMultiplier);
DontDumpShadow(ShadowBeg(), ShadowEnd() - ShadowBeg());
DPrintf("memory shadow: %zx-%zx (%zuGB)\n",
ShadowBeg(), ShadowEnd(),
@@ -100,12 +60,11 @@ void InitializeShadowMemory() {
// Map meta shadow.
const uptr meta = MetaShadowBeg();
const uptr meta_size = MetaShadowEnd() - meta;
- if (!MmapFixedNoReserve(meta, meta_size, "meta shadow")) {
+ if (!MmapFixedSuperNoReserve(meta, meta_size, "meta shadow")) {
Printf("FATAL: ThreadSanitizer can not mmap the shadow memory\n");
Printf("FATAL: Make sure to compile with -fPIE and to link with -pie.\n");
Die();
}
- NoHugePagesInShadow(meta, meta_size);
DontDumpShadow(meta, meta_size);
DPrintf("meta shadow: %zx-%zx (%zuGB)\n",
meta, meta + meta_size, meta_size >> 30);
diff --git a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_report.cpp b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_report.cpp
index 368f1ca8adf2..968c7b97553c 100644
--- a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_report.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_report.cpp
@@ -128,7 +128,8 @@ void PrintStack(const ReportStack *ent) {
SymbolizedStack *frame = ent->frames;
for (int i = 0; frame && frame->info.address; frame = frame->next, i++) {
InternalScopedString res(2 * GetPageSizeCached());
- RenderFrame(&res, common_flags()->stack_trace_format, i, frame->info,
+ RenderFrame(&res, common_flags()->stack_trace_format, i,
+ frame->info.address, &frame->info,
common_flags()->symbolize_vs_style,
common_flags()->strip_path_prefix, kInterposedFunctionPrefix);
Printf("%s\n", res.data());
@@ -385,7 +386,8 @@ void PrintReport(const ReportDesc *rep) {
ReportErrorSummary(rep_typ_str, frame->info);
}
- if (common_flags()->print_module_map == 2) PrintModuleMap();
+ if (common_flags()->print_module_map == 2)
+ DumpProcessMap();
Printf("==================\n");
}
diff --git a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_rtl.cpp b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_rtl.cpp
index 13c9b770f50a..3d721eb95a2c 100644
--- a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_rtl.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_rtl.cpp
@@ -256,7 +256,8 @@ void MapShadow(uptr addr, uptr size) {
const uptr kPageSize = GetPageSizeCached();
uptr shadow_begin = RoundDownTo((uptr)MemToShadow(addr), kPageSize);
uptr shadow_end = RoundUpTo((uptr)MemToShadow(addr + size), kPageSize);
- if (!MmapFixedNoReserve(shadow_begin, shadow_end - shadow_begin, "shadow"))
+ if (!MmapFixedSuperNoReserve(shadow_begin, shadow_end - shadow_begin,
+ "shadow"))
Die();
// Meta shadow is 2:1, so tread carefully.
@@ -269,7 +270,8 @@ void MapShadow(uptr addr, uptr size) {
if (!data_mapped) {
// First call maps data+bss.
data_mapped = true;
- if (!MmapFixedNoReserve(meta_begin, meta_end - meta_begin, "meta shadow"))
+ if (!MmapFixedSuperNoReserve(meta_begin, meta_end - meta_begin,
+ "meta shadow"))
Die();
} else {
// Mapping continous heap.
@@ -280,7 +282,8 @@ void MapShadow(uptr addr, uptr size) {
return;
if (meta_begin < mapped_meta_end)
meta_begin = mapped_meta_end;
- if (!MmapFixedNoReserve(meta_begin, meta_end - meta_begin, "meta shadow"))
+ if (!MmapFixedSuperNoReserve(meta_begin, meta_end - meta_begin,
+ "meta shadow"))
Die();
mapped_meta_end = meta_end;
}
@@ -293,7 +296,7 @@ void MapThreadTrace(uptr addr, uptr size, const char *name) {
CHECK_GE(addr, TraceMemBeg());
CHECK_LE(addr + size, TraceMemEnd());
CHECK_EQ(addr, addr & ~((64 << 10) - 1)); // windows wants 64K alignment
- if (!MmapFixedNoReserve(addr, size, name)) {
+ if (!MmapFixedSuperNoReserve(addr, size, name)) {
Printf("FATAL: ThreadSanitizer can not mmap thread trace (%p/%p)\n",
addr, size);
Die();
@@ -443,7 +446,8 @@ void MaybeSpawnBackgroundThread() {
int Finalize(ThreadState *thr) {
bool failed = false;
- if (common_flags()->print_module_map == 1) PrintModuleMap();
+ if (common_flags()->print_module_map == 1)
+ DumpProcessMap();
if (flags()->atexit_sleep_ms > 0 && ThreadCount(thr) > 1)
SleepForMillis(flags()->atexit_sleep_ms);
@@ -957,7 +961,7 @@ static void MemoryRangeSet(ThreadState *thr, uptr pc, uptr addr, uptr size,
u64 *p1 = p;
p = RoundDown(end, kPageSize);
UnmapOrDie((void*)p1, (uptr)p - (uptr)p1);
- if (!MmapFixedNoReserve((uptr)p1, (uptr)p - (uptr)p1))
+ if (!MmapFixedSuperNoReserve((uptr)p1, (uptr)p - (uptr)p1))
Die();
// Set the ending.
while (p < end) {
diff --git a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_rtl.h b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_rtl.h
index d3bb61ff87d3..04d474e044e1 100644
--- a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_rtl.h
+++ b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_rtl.h
@@ -458,26 +458,26 @@ struct ThreadState {
ThreadState *cur_thread();
void set_cur_thread(ThreadState *thr);
void cur_thread_finalize();
-INLINE void cur_thread_init() { }
+inline void cur_thread_init() { }
#else
__attribute__((tls_model("initial-exec")))
extern THREADLOCAL char cur_thread_placeholder[];
-INLINE ThreadState *cur_thread() {
+inline ThreadState *cur_thread() {
return reinterpret_cast<ThreadState *>(cur_thread_placeholder)->current;
}
-INLINE void cur_thread_init() {
+inline void cur_thread_init() {
ThreadState *thr = reinterpret_cast<ThreadState *>(cur_thread_placeholder);
if (UNLIKELY(!thr->current))
thr->current = thr;
}
-INLINE void set_cur_thread(ThreadState *thr) {
+inline void set_cur_thread(ThreadState *thr) {
reinterpret_cast<ThreadState *>(cur_thread_placeholder)->current = thr;
}
-INLINE void cur_thread_finalize() { }
+inline void cur_thread_finalize() { }
#endif // SANITIZER_MAC || SANITIZER_ANDROID
#endif // SANITIZER_GO
-class ThreadContext : public ThreadContextBase {
+class ThreadContext final : public ThreadContextBase {
public:
explicit ThreadContext(int tid);
~ThreadContext();
diff --git a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_rtl_mutex.cpp b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_rtl_mutex.cpp
index ebd0d7221818..27897f0592b0 100644
--- a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_rtl_mutex.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_rtl_mutex.cpp
@@ -24,7 +24,7 @@ namespace __tsan {
void ReportDeadlock(ThreadState *thr, uptr pc, DDReport *r);
-struct Callback : DDCallback {
+struct Callback final : public DDCallback {
ThreadState *thr;
uptr pc;
diff --git a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_rtl_report.cpp b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_rtl_report.cpp
index 3354546c2a10..208d0df44df7 100644
--- a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_rtl_report.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_rtl_report.cpp
@@ -706,9 +706,7 @@ void ReportRace(ThreadState *thr) {
}
#endif
- if (!OutputReport(thr, rep))
- return;
-
+ OutputReport(thr, rep);
}
void PrintCurrentStack(ThreadState *thr, uptr pc) {
diff --git a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_sync.cpp b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_sync.cpp
index 7f686dc5fcdc..17ddd50f1284 100644
--- a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_sync.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_sync.cpp
@@ -175,7 +175,7 @@ void MetaMap::ResetRange(Processor *proc, uptr p, uptr sz) {
uptr metap = (uptr)MemToMeta(p0);
uptr metasz = sz0 / kMetaRatio;
UnmapOrDie((void*)metap, metasz);
- if (!MmapFixedNoReserve(metap, metasz))
+ if (!MmapFixedSuperNoReserve(metap, metasz))
Die();
}
diff --git a/contrib/llvm-project/compiler-rt/lib/ubsan/ubsan_flags.cpp b/contrib/llvm-project/compiler-rt/lib/ubsan/ubsan_flags.cpp
index 721c2273f133..25cefd46ce27 100644
--- a/contrib/llvm-project/compiler-rt/lib/ubsan/ubsan_flags.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/ubsan/ubsan_flags.cpp
@@ -21,10 +21,6 @@
namespace __ubsan {
-const char *MaybeCallUbsanDefaultOptions() {
- return (&__ubsan_default_options) ? __ubsan_default_options() : "";
-}
-
static const char *GetFlag(const char *flag) {
// We cannot call getenv() from inside a preinit array initializer
if (SANITIZER_CAN_USE_PREINIT_ARRAY) {
@@ -66,7 +62,7 @@ void InitializeFlags() {
RegisterUbsanFlags(&parser, f);
// Override from user-specified string.
- parser.ParseString(MaybeCallUbsanDefaultOptions());
+ parser.ParseString(__ubsan_default_options());
// Override from environment variable.
parser.ParseStringFromEnv("UBSAN_OPTIONS");
InitializeCommonFlags();
diff --git a/contrib/llvm-project/compiler-rt/lib/ubsan/ubsan_flags.h b/contrib/llvm-project/compiler-rt/lib/ubsan/ubsan_flags.h
index daa0d7c701e0..c47009bafe53 100644
--- a/contrib/llvm-project/compiler-rt/lib/ubsan/ubsan_flags.h
+++ b/contrib/llvm-project/compiler-rt/lib/ubsan/ubsan_flags.h
@@ -34,8 +34,6 @@ inline Flags *flags() { return &ubsan_flags; }
void InitializeFlags();
void RegisterUbsanFlags(FlagParser *parser, Flags *f);
-const char *MaybeCallUbsanDefaultOptions();
-
} // namespace __ubsan
extern "C" {
diff --git a/contrib/llvm-project/compiler-rt/lib/ubsan/ubsan_platform.h b/contrib/llvm-project/compiler-rt/lib/ubsan/ubsan_platform.h
index 71d7fb18c9b3..32d949d75b9c 100644
--- a/contrib/llvm-project/compiler-rt/lib/ubsan/ubsan_platform.h
+++ b/contrib/llvm-project/compiler-rt/lib/ubsan/ubsan_platform.h
@@ -14,7 +14,7 @@
// Other platforms should be easy to add, and probably work as-is.
#if defined(__linux__) || defined(__FreeBSD__) || defined(__APPLE__) || \
- defined(__NetBSD__) || defined(__OpenBSD__) || \
+ defined(__NetBSD__) || \
(defined(__sun__) && defined(__svr4__)) || \
defined(_WIN32) || defined(__Fuchsia__) || defined(__rtems__)
# define CAN_SANITIZE_UB 1
diff --git a/contrib/llvm-project/compiler-rt/lib/ubsan/ubsan_type_hash_itanium.cpp b/contrib/llvm-project/compiler-rt/lib/ubsan/ubsan_type_hash_itanium.cpp
index 4f1708ba1901..d82b542a020e 100644
--- a/contrib/llvm-project/compiler-rt/lib/ubsan/ubsan_type_hash_itanium.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/ubsan/ubsan_type_hash_itanium.cpp
@@ -12,7 +12,7 @@
#include "sanitizer_common/sanitizer_platform.h"
#include "ubsan_platform.h"
-#if CAN_SANITIZE_UB && !SANITIZER_WINDOWS
+#if CAN_SANITIZE_UB && !defined(_MSC_VER)
#include "ubsan_type_hash.h"
#include "sanitizer_common/sanitizer_common.h"
diff --git a/contrib/llvm-project/compiler-rt/lib/ubsan/ubsan_type_hash_win.cpp b/contrib/llvm-project/compiler-rt/lib/ubsan/ubsan_type_hash_win.cpp
index 45dcb758ec44..106fa1b85a55 100644
--- a/contrib/llvm-project/compiler-rt/lib/ubsan/ubsan_type_hash_win.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/ubsan/ubsan_type_hash_win.cpp
@@ -12,7 +12,7 @@
#include "sanitizer_common/sanitizer_platform.h"
#include "ubsan_platform.h"
-#if CAN_SANITIZE_UB && SANITIZER_WINDOWS
+#if CAN_SANITIZE_UB && defined(_MSC_VER)
#include "ubsan_type_hash.h"
#include "sanitizer_common/sanitizer_common.h"
diff --git a/contrib/llvm-project/compiler-rt/lib/ubsan_minimal/ubsan_minimal_handlers.cpp b/contrib/llvm-project/compiler-rt/lib/ubsan_minimal/ubsan_minimal_handlers.cpp
index 8654c705cfbb..6a1903da62ce 100644
--- a/contrib/llvm-project/compiler-rt/lib/ubsan_minimal/ubsan_minimal_handlers.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/ubsan_minimal/ubsan_minimal_handlers.cpp
@@ -10,7 +10,7 @@ extern "C" void ubsan_message(const char *msg);
static void message(const char *msg) { ubsan_message(msg); }
#else
static void message(const char *msg) {
- write(2, msg, strlen(msg));
+ (void)write(2, msg, strlen(msg));
}
#endif
diff --git a/contrib/llvm-project/compiler-rt/lib/xray/xray_basic_logging.cpp b/contrib/llvm-project/compiler-rt/lib/xray/xray_basic_logging.cpp
index 6e8e93131451..a58ae9b5e267 100644
--- a/contrib/llvm-project/compiler-rt/lib/xray/xray_basic_logging.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/xray/xray_basic_logging.cpp
@@ -18,7 +18,7 @@
#include <fcntl.h>
#include <pthread.h>
#include <sys/stat.h>
-#if SANITIZER_FREEBSD || SANITIZER_NETBSD || SANITIZER_OPENBSD || SANITIZER_MAC
+#if SANITIZER_FREEBSD || SANITIZER_NETBSD || SANITIZER_MAC
#include <sys/syscall.h>
#endif
#include <sys/types.h>
diff --git a/contrib/llvm-project/compiler-rt/lib/xray/xray_mips.cpp b/contrib/llvm-project/compiler-rt/lib/xray/xray_mips.cpp
index 26fc50374471..dc9e837a555d 100644
--- a/contrib/llvm-project/compiler-rt/lib/xray/xray_mips.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/xray/xray_mips.cpp
@@ -93,6 +93,7 @@ inline static bool patchSled(const bool Enable, const uint32_t FuncId,
// When |Enable|==false, we set back the first instruction in the sled to be
// B #44
+ uint32_t *Address = reinterpret_cast<uint32_t *>(Sled.address());
if (Enable) {
uint32_t LoTracingHookAddr =
reinterpret_cast<int32_t>(TracingHook) & 0xffff;
@@ -100,34 +101,34 @@ inline static bool patchSled(const bool Enable, const uint32_t FuncId,
(reinterpret_cast<int32_t>(TracingHook) >> 16) & 0xffff;
uint32_t LoFunctionID = FuncId & 0xffff;
uint32_t HiFunctionID = (FuncId >> 16) & 0xffff;
- *reinterpret_cast<uint32_t *>(Sled.Address + 8) = encodeInstruction(
- PatchOpcodes::PO_SW, RegNum::RN_SP, RegNum::RN_RA, 0x4);
- *reinterpret_cast<uint32_t *>(Sled.Address + 12) = encodeInstruction(
- PatchOpcodes::PO_SW, RegNum::RN_SP, RegNum::RN_T9, 0x0);
- *reinterpret_cast<uint32_t *>(Sled.Address + 16) = encodeInstruction(
- PatchOpcodes::PO_LUI, 0x0, RegNum::RN_T9, HiTracingHookAddr);
- *reinterpret_cast<uint32_t *>(Sled.Address + 20) = encodeInstruction(
- PatchOpcodes::PO_ORI, RegNum::RN_T9, RegNum::RN_T9, LoTracingHookAddr);
- *reinterpret_cast<uint32_t *>(Sled.Address + 24) = encodeInstruction(
- PatchOpcodes::PO_LUI, 0x0, RegNum::RN_T0, HiFunctionID);
- *reinterpret_cast<uint32_t *>(Sled.Address + 28) = encodeSpecialInstruction(
- PatchOpcodes::PO_JALR, RegNum::RN_T9, 0x0, RegNum::RN_RA, 0X0);
- *reinterpret_cast<uint32_t *>(Sled.Address + 32) = encodeInstruction(
- PatchOpcodes::PO_ORI, RegNum::RN_T0, RegNum::RN_T0, LoFunctionID);
- *reinterpret_cast<uint32_t *>(Sled.Address + 36) = encodeInstruction(
- PatchOpcodes::PO_LW, RegNum::RN_SP, RegNum::RN_T9, 0x0);
- *reinterpret_cast<uint32_t *>(Sled.Address + 40) = encodeInstruction(
- PatchOpcodes::PO_LW, RegNum::RN_SP, RegNum::RN_RA, 0x4);
- *reinterpret_cast<uint32_t *>(Sled.Address + 44) = encodeInstruction(
- PatchOpcodes::PO_ADDIU, RegNum::RN_SP, RegNum::RN_SP, 0x8);
+ Address[2] = encodeInstruction(PatchOpcodes::PO_SW, RegNum::RN_SP,
+ RegNum::RN_RA, 0x4);
+ Address[3] = encodeInstruction(PatchOpcodes::PO_SW, RegNum::RN_SP,
+ RegNum::RN_T9, 0x0);
+ Address[4] = encodeInstruction(PatchOpcodes::PO_LUI, 0x0, RegNum::RN_T9,
+ HiTracingHookAddr);
+ Address[5] = encodeInstruction(PatchOpcodes::PO_ORI, RegNum::RN_T9,
+ RegNum::RN_T9, LoTracingHookAddr);
+ Address[6] = encodeInstruction(PatchOpcodes::PO_LUI, 0x0, RegNum::RN_T0,
+ HiFunctionID);
+ Address[7] = encodeSpecialInstruction(PatchOpcodes::PO_JALR, RegNum::RN_T9,
+ 0x0, RegNum::RN_RA, 0X0);
+ Address[8] = encodeInstruction(PatchOpcodes::PO_ORI, RegNum::RN_T0,
+ RegNum::RN_T0, LoFunctionID);
+ Address[9] = encodeInstruction(PatchOpcodes::PO_LW, RegNum::RN_SP,
+ RegNum::RN_T9, 0x0);
+ Address[10] = encodeInstruction(PatchOpcodes::PO_LW, RegNum::RN_SP,
+ RegNum::RN_RA, 0x4);
+ Address[11] = encodeInstruction(PatchOpcodes::PO_ADDIU, RegNum::RN_SP,
+ RegNum::RN_SP, 0x8);
uint32_t CreateStackSpaceInstr = encodeInstruction(
PatchOpcodes::PO_ADDIU, RegNum::RN_SP, RegNum::RN_SP, 0xFFF8);
std::atomic_store_explicit(
- reinterpret_cast<std::atomic<uint32_t> *>(Sled.Address),
+ reinterpret_cast<std::atomic<uint32_t> *>(Address),
uint32_t(CreateStackSpaceInstr), std::memory_order_release);
} else {
std::atomic_store_explicit(
- reinterpret_cast<std::atomic<uint32_t> *>(Sled.Address),
+ reinterpret_cast<std::atomic<uint32_t> *>(Address),
uint32_t(PatchOpcodes::PO_B44), std::memory_order_release);
}
return true;
diff --git a/contrib/llvm-project/compiler-rt/lib/xray/xray_mips64.cpp b/contrib/llvm-project/compiler-rt/lib/xray/xray_mips64.cpp
index 62c67ff7376d..5b221bb6ddc0 100644
--- a/contrib/llvm-project/compiler-rt/lib/xray/xray_mips64.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/xray/xray_mips64.cpp
@@ -89,6 +89,7 @@ inline static bool patchSled(const bool Enable, const uint32_t FuncId,
// When |Enable|==false, we set back the first instruction in the sled to be
// B #60
+ uint32_t *Address = reinterpret_cast<uint32_t *>(Sled.address());
if (Enable) {
uint32_t LoTracingHookAddr =
reinterpret_cast<int64_t>(TracingHook) & 0xffff;
@@ -100,43 +101,42 @@ inline static bool patchSled(const bool Enable, const uint32_t FuncId,
(reinterpret_cast<int64_t>(TracingHook) >> 48) & 0xffff;
uint32_t LoFunctionID = FuncId & 0xffff;
uint32_t HiFunctionID = (FuncId >> 16) & 0xffff;
- *reinterpret_cast<uint32_t *>(Sled.Address + 8) = encodeInstruction(
- PatchOpcodes::PO_SD, RegNum::RN_SP, RegNum::RN_RA, 0x8);
- *reinterpret_cast<uint32_t *>(Sled.Address + 12) = encodeInstruction(
- PatchOpcodes::PO_SD, RegNum::RN_SP, RegNum::RN_T9, 0x0);
- *reinterpret_cast<uint32_t *>(Sled.Address + 16) = encodeInstruction(
- PatchOpcodes::PO_LUI, 0x0, RegNum::RN_T9, HighestTracingHookAddr);
- *reinterpret_cast<uint32_t *>(Sled.Address + 20) =
- encodeInstruction(PatchOpcodes::PO_ORI, RegNum::RN_T9, RegNum::RN_T9,
- HigherTracingHookAddr);
- *reinterpret_cast<uint32_t *>(Sled.Address + 24) = encodeSpecialInstruction(
- PatchOpcodes::PO_DSLL, 0x0, RegNum::RN_T9, RegNum::RN_T9, 0x10);
- *reinterpret_cast<uint32_t *>(Sled.Address + 28) = encodeInstruction(
- PatchOpcodes::PO_ORI, RegNum::RN_T9, RegNum::RN_T9, HiTracingHookAddr);
- *reinterpret_cast<uint32_t *>(Sled.Address + 32) = encodeSpecialInstruction(
- PatchOpcodes::PO_DSLL, 0x0, RegNum::RN_T9, RegNum::RN_T9, 0x10);
- *reinterpret_cast<uint32_t *>(Sled.Address + 36) = encodeInstruction(
- PatchOpcodes::PO_ORI, RegNum::RN_T9, RegNum::RN_T9, LoTracingHookAddr);
- *reinterpret_cast<uint32_t *>(Sled.Address + 40) = encodeInstruction(
- PatchOpcodes::PO_LUI, 0x0, RegNum::RN_T0, HiFunctionID);
- *reinterpret_cast<uint32_t *>(Sled.Address + 44) = encodeSpecialInstruction(
- PatchOpcodes::PO_JALR, RegNum::RN_T9, 0x0, RegNum::RN_RA, 0X0);
- *reinterpret_cast<uint32_t *>(Sled.Address + 48) = encodeInstruction(
- PatchOpcodes::PO_ORI, RegNum::RN_T0, RegNum::RN_T0, LoFunctionID);
- *reinterpret_cast<uint32_t *>(Sled.Address + 52) = encodeInstruction(
- PatchOpcodes::PO_LD, RegNum::RN_SP, RegNum::RN_T9, 0x0);
- *reinterpret_cast<uint32_t *>(Sled.Address + 56) = encodeInstruction(
- PatchOpcodes::PO_LD, RegNum::RN_SP, RegNum::RN_RA, 0x8);
- *reinterpret_cast<uint32_t *>(Sled.Address + 60) = encodeInstruction(
- PatchOpcodes::PO_DADDIU, RegNum::RN_SP, RegNum::RN_SP, 0x10);
+ Address[2] = encodeInstruction(PatchOpcodes::PO_SD, RegNum::RN_SP,
+ RegNum::RN_RA, 0x8);
+ Address[3] = encodeInstruction(PatchOpcodes::PO_SD, RegNum::RN_SP,
+ RegNum::RN_T9, 0x0);
+ Address[4] = encodeInstruction(PatchOpcodes::PO_LUI, 0x0, RegNum::RN_T9,
+ HighestTracingHookAddr);
+ Address[5] = encodeInstruction(PatchOpcodes::PO_ORI, RegNum::RN_T9,
+ RegNum::RN_T9, HigherTracingHookAddr);
+ Address[6] = encodeSpecialInstruction(PatchOpcodes::PO_DSLL, 0x0,
+ RegNum::RN_T9, RegNum::RN_T9, 0x10);
+ Address[7] = encodeInstruction(PatchOpcodes::PO_ORI, RegNum::RN_T9,
+ RegNum::RN_T9, HiTracingHookAddr);
+ Address[8] = encodeSpecialInstruction(PatchOpcodes::PO_DSLL, 0x0,
+ RegNum::RN_T9, RegNum::RN_T9, 0x10);
+ Address[9] = encodeInstruction(PatchOpcodes::PO_ORI, RegNum::RN_T9,
+ RegNum::RN_T9, LoTracingHookAddr);
+ Address[10] = encodeInstruction(PatchOpcodes::PO_LUI, 0x0, RegNum::RN_T0,
+ HiFunctionID);
+ Address[11] = encodeSpecialInstruction(PatchOpcodes::PO_JALR, RegNum::RN_T9,
+ 0x0, RegNum::RN_RA, 0X0);
+ Address[12] = encodeInstruction(PatchOpcodes::PO_ORI, RegNum::RN_T0,
+ RegNum::RN_T0, LoFunctionID);
+ Address[13] = encodeInstruction(PatchOpcodes::PO_LD, RegNum::RN_SP,
+ RegNum::RN_T9, 0x0);
+ Address[14] = encodeInstruction(PatchOpcodes::PO_LD, RegNum::RN_SP,
+ RegNum::RN_RA, 0x8);
+ Address[15] = encodeInstruction(PatchOpcodes::PO_DADDIU, RegNum::RN_SP,
+ RegNum::RN_SP, 0x10);
uint32_t CreateStackSpace = encodeInstruction(
PatchOpcodes::PO_DADDIU, RegNum::RN_SP, RegNum::RN_SP, 0xfff0);
std::atomic_store_explicit(
- reinterpret_cast<std::atomic<uint32_t> *>(Sled.Address),
- CreateStackSpace, std::memory_order_release);
+ reinterpret_cast<std::atomic<uint32_t> *>(Address), CreateStackSpace,
+ std::memory_order_release);
} else {
std::atomic_store_explicit(
- reinterpret_cast<std::atomic<uint32_t> *>(Sled.Address),
+ reinterpret_cast<std::atomic<uint32_t> *>(Address),
uint32_t(PatchOpcodes::PO_B60), std::memory_order_release);
}
return true;
diff --git a/contrib/llvm-project/compiler-rt/lib/xray/xray_x86_64.cpp b/contrib/llvm-project/compiler-rt/lib/xray/xray_x86_64.cpp
index f3742ac71290..c58584b3a14b 100644
--- a/contrib/llvm-project/compiler-rt/lib/xray/xray_x86_64.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/xray/xray_x86_64.cpp
@@ -6,12 +6,8 @@
#include "xray_defs.h"
#include "xray_interface_internal.h"
-#if SANITIZER_FREEBSD || SANITIZER_NETBSD || SANITIZER_OPENBSD || SANITIZER_MAC
+#if SANITIZER_FREEBSD || SANITIZER_NETBSD || SANITIZER_MAC
#include <sys/types.h>
-#if SANITIZER_OPENBSD
-#include <sys/time.h>
-#include <machine/cpu.h>
-#endif
#include <sys/sysctl.h>
#elif SANITIZER_FUCHSIA
#include <zircon/syscalls.h>
@@ -86,14 +82,11 @@ uint64_t getTSCFrequency() XRAY_NEVER_INSTRUMENT {
}
return TSCFrequency == -1 ? 0 : static_cast<uint64_t>(TSCFrequency);
}
-#elif SANITIZER_FREEBSD || SANITIZER_NETBSD || SANITIZER_OPENBSD || SANITIZER_MAC
+#elif SANITIZER_FREEBSD || SANITIZER_NETBSD || SANITIZER_MAC
uint64_t getTSCFrequency() XRAY_NEVER_INSTRUMENT {
long long TSCFrequency = -1;
size_t tscfreqsz = sizeof(TSCFrequency);
-#if SANITIZER_OPENBSD
- int Mib[2] = { CTL_MACHDEP, CPU_TSCFREQ };
- if (internal_sysctl(Mib, 2, &TSCFrequency, &tscfreqsz, NULL, 0) != -1) {
-#elif SANITIZER_MAC
+#if SANITIZER_MAC
if (internal_sysctlbyname("machdep.tsc.frequency", &TSCFrequency,
&tscfreqsz, NULL, 0) != -1) {
diff --git a/contrib/llvm-project/compiler-rt/lib/xray/xray_x86_64.inc b/contrib/llvm-project/compiler-rt/lib/xray/xray_x86_64.inc
index 477900355cf4..dc71fb87f63d 100644
--- a/contrib/llvm-project/compiler-rt/lib/xray/xray_x86_64.inc
+++ b/contrib/llvm-project/compiler-rt/lib/xray/xray_x86_64.inc
@@ -11,7 +11,6 @@
//===----------------------------------------------------------------------===//
#include <cstdint>
-#include <x86intrin.h>
#include "sanitizer_common/sanitizer_internal_defs.h"
#include "xray_defs.h"