aboutsummaryrefslogtreecommitdiff
path: root/compiler-rt/lib
diff options
context:
space:
mode:
Diffstat (limited to 'compiler-rt/lib')
-rw-r--r--compiler-rt/lib/asan/asan_allocator.cpp14
-rw-r--r--compiler-rt/lib/asan/asan_descriptions.cpp19
-rw-r--r--compiler-rt/lib/asan/asan_descriptions.h13
-rw-r--r--compiler-rt/lib/asan/asan_errors.cpp8
-rw-r--r--compiler-rt/lib/asan/asan_fake_stack.cpp52
-rw-r--r--compiler-rt/lib/asan/asan_flags.cpp4
-rw-r--r--compiler-rt/lib/asan/asan_flags.inc3
-rw-r--r--compiler-rt/lib/asan/asan_fuchsia.cpp2
-rw-r--r--compiler-rt/lib/asan/asan_ignorelist.txt (renamed from compiler-rt/lib/asan/asan_blacklist.txt)6
-rw-r--r--compiler-rt/lib/asan/asan_interceptors.cpp33
-rw-r--r--compiler-rt/lib/asan/asan_interceptors.h11
-rw-r--r--compiler-rt/lib/asan/asan_interceptors_memintrinsics.cpp6
-rw-r--r--compiler-rt/lib/asan/asan_interface.inc11
-rw-r--r--compiler-rt/lib/asan/asan_internal.h15
-rw-r--r--compiler-rt/lib/asan/asan_malloc_linux.cpp36
-rw-r--r--compiler-rt/lib/asan/asan_malloc_local.h52
-rw-r--r--compiler-rt/lib/asan/asan_mapping.h52
-rw-r--r--compiler-rt/lib/asan/asan_mapping_myriad.h85
-rw-r--r--compiler-rt/lib/asan/asan_new_delete.cpp22
-rw-r--r--compiler-rt/lib/asan/asan_poisoning.cpp21
-rw-r--r--compiler-rt/lib/asan/asan_poisoning.h3
-rw-r--r--compiler-rt/lib/asan/asan_posix.cpp2
-rw-r--r--compiler-rt/lib/asan/asan_rtems.cpp266
-rw-r--r--compiler-rt/lib/asan/asan_rtl.cpp75
-rw-r--r--compiler-rt/lib/asan/asan_shadow_setup.cpp11
-rw-r--r--compiler-rt/lib/asan/asan_stack.cpp3
-rw-r--r--compiler-rt/lib/asan/asan_stack.h9
-rw-r--r--compiler-rt/lib/asan/asan_thread.cpp88
-rw-r--r--compiler-rt/lib/asan/asan_thread.h18
-rw-r--r--compiler-rt/lib/builtins/aarch64/fp_mode.c13
-rw-r--r--compiler-rt/lib/builtins/arm/fp_mode.c15
-rw-r--r--compiler-rt/lib/builtins/assembly.h9
-rw-r--r--compiler-rt/lib/builtins/atomic.c8
-rw-r--r--compiler-rt/lib/builtins/clear_cache.c3
-rw-r--r--compiler-rt/lib/builtins/comparedf2.c84
-rw-r--r--compiler-rt/lib/builtins/comparesf2.c84
-rw-r--r--compiler-rt/lib/builtins/comparetf2.c81
-rw-r--r--compiler-rt/lib/builtins/cpu_model.c8
-rw-r--r--compiler-rt/lib/builtins/divdc3.c12
-rw-r--r--compiler-rt/lib/builtins/divsc3.c11
-rw-r--r--compiler-rt/lib/builtins/divtc3.c11
-rw-r--r--compiler-rt/lib/builtins/fixdfdi.c2
-rw-r--r--compiler-rt/lib/builtins/fixsfdi.c2
-rw-r--r--compiler-rt/lib/builtins/fixunsdfdi.c2
-rw-r--r--compiler-rt/lib/builtins/fixunssfdi.c2
-rw-r--r--compiler-rt/lib/builtins/floatdidf.c2
-rw-r--r--compiler-rt/lib/builtins/floatundidf.c2
-rw-r--r--compiler-rt/lib/builtins/fp_add_impl.inc8
-rw-r--r--compiler-rt/lib/builtins/fp_compare_impl.inc116
-rw-r--r--compiler-rt/lib/builtins/fp_lib.h95
-rw-r--r--compiler-rt/lib/builtins/fp_mode.c4
-rw-r--r--compiler-rt/lib/builtins/fp_mode.h12
-rw-r--r--compiler-rt/lib/builtins/gcc_personality_v0.c33
-rw-r--r--compiler-rt/lib/builtins/i386/fp_mode.c12
-rw-r--r--compiler-rt/lib/builtins/int_lib.h24
-rw-r--r--compiler-rt/lib/builtins/int_math.h8
-rw-r--r--compiler-rt/lib/builtins/int_types.h12
-rw-r--r--compiler-rt/lib/builtins/int_util.c22
-rw-r--r--compiler-rt/lib/builtins/ppc/atomic.exp41
-rw-r--r--compiler-rt/lib/builtins/ppc/divtc3.c19
-rw-r--r--compiler-rt/lib/builtins/riscv/restore.S166
-rw-r--r--compiler-rt/lib/builtins/riscv/save.S184
-rw-r--r--compiler-rt/lib/cfi/cfi.cpp4
-rw-r--r--compiler-rt/lib/cfi/cfi_ignorelist.txt (renamed from compiler-rt/lib/cfi/cfi_blacklist.txt)0
-rw-r--r--compiler-rt/lib/dfsan/dfsan.cpp1130
-rw-r--r--compiler-rt/lib/dfsan/dfsan.h70
-rw-r--r--compiler-rt/lib/dfsan/dfsan.syms.extra1
-rw-r--r--compiler-rt/lib/dfsan/dfsan_allocator.cpp287
-rw-r--r--compiler-rt/lib/dfsan/dfsan_allocator.h30
-rw-r--r--compiler-rt/lib/dfsan/dfsan_chained_origin_depot.cpp22
-rw-r--r--compiler-rt/lib/dfsan/dfsan_chained_origin_depot.h26
-rw-r--r--compiler-rt/lib/dfsan/dfsan_custom.cpp1537
-rw-r--r--compiler-rt/lib/dfsan/dfsan_flags.h32
-rw-r--r--compiler-rt/lib/dfsan/dfsan_flags.inc20
-rw-r--r--compiler-rt/lib/dfsan/dfsan_interceptors.cpp199
-rw-r--r--compiler-rt/lib/dfsan/dfsan_new_delete.cpp124
-rw-r--r--compiler-rt/lib/dfsan/dfsan_origin.h127
-rw-r--r--compiler-rt/lib/dfsan/dfsan_platform.h133
-rw-r--r--compiler-rt/lib/dfsan/dfsan_thread.cpp146
-rw-r--r--compiler-rt/lib/dfsan/dfsan_thread.h83
-rw-r--r--compiler-rt/lib/dfsan/done_abilist.txt68
-rw-r--r--compiler-rt/lib/fuzzer/FuzzerBuiltins.h1
-rw-r--r--compiler-rt/lib/fuzzer/FuzzerBuiltinsMsvc.h6
-rw-r--r--compiler-rt/lib/fuzzer/FuzzerCorpus.h38
-rw-r--r--compiler-rt/lib/fuzzer/FuzzerDataFlowTrace.cpp4
-rw-r--r--compiler-rt/lib/fuzzer/FuzzerDataFlowTrace.h14
-rw-r--r--compiler-rt/lib/fuzzer/FuzzerDictionary.h8
-rw-r--r--compiler-rt/lib/fuzzer/FuzzerDriver.cpp12
-rw-r--r--compiler-rt/lib/fuzzer/FuzzerFork.cpp11
-rw-r--r--compiler-rt/lib/fuzzer/FuzzerIO.cpp11
-rw-r--r--compiler-rt/lib/fuzzer/FuzzerIO.h5
-rw-r--r--compiler-rt/lib/fuzzer/FuzzerInterceptors.cpp1
-rw-r--r--compiler-rt/lib/fuzzer/FuzzerLoop.cpp30
-rw-r--r--compiler-rt/lib/fuzzer/FuzzerMerge.cpp8
-rw-r--r--compiler-rt/lib/fuzzer/FuzzerMutate.cpp48
-rw-r--r--compiler-rt/lib/fuzzer/FuzzerRandom.h23
-rw-r--r--compiler-rt/lib/fuzzer/FuzzerSHA1.cpp9
-rw-r--r--compiler-rt/lib/fuzzer/FuzzerTracePC.cpp11
-rw-r--r--compiler-rt/lib/fuzzer/FuzzerTracePC.h39
-rw-r--r--compiler-rt/lib/fuzzer/FuzzerUtil.cpp9
-rw-r--r--compiler-rt/lib/fuzzer/FuzzerUtil.h6
-rw-r--r--compiler-rt/lib/fuzzer/FuzzerUtilFuchsia.cpp2
-rw-r--r--compiler-rt/lib/fuzzer/FuzzerUtilPosix.cpp11
-rw-r--r--compiler-rt/lib/gwp_asan/common.cpp2
-rw-r--r--compiler-rt/lib/gwp_asan/common.h6
-rw-r--r--compiler-rt/lib/gwp_asan/crash_handler.cpp2
-rw-r--r--compiler-rt/lib/gwp_asan/guarded_pool_allocator.cpp118
-rw-r--r--compiler-rt/lib/gwp_asan/guarded_pool_allocator.h28
-rw-r--r--compiler-rt/lib/gwp_asan/optional/backtrace_fuchsia.cpp10
-rw-r--r--compiler-rt/lib/gwp_asan/options.inc10
-rw-r--r--compiler-rt/lib/gwp_asan/platform_specific/guarded_pool_allocator_fuchsia.cpp5
-rw-r--r--compiler-rt/lib/gwp_asan/platform_specific/utilities_posix.cpp3
-rw-r--r--compiler-rt/lib/gwp_asan/utilities.cpp63
-rw-r--r--compiler-rt/lib/gwp_asan/utilities.h13
-rw-r--r--compiler-rt/lib/hwasan/hwasan.cpp96
-rw-r--r--compiler-rt/lib/hwasan/hwasan.h79
-rw-r--r--compiler-rt/lib/hwasan/hwasan_allocation_functions.cpp172
-rw-r--r--compiler-rt/lib/hwasan/hwasan_allocator.cpp59
-rw-r--r--compiler-rt/lib/hwasan/hwasan_allocator.h20
-rw-r--r--compiler-rt/lib/hwasan/hwasan_blacklist.txt7
-rw-r--r--compiler-rt/lib/hwasan/hwasan_checks.h5
-rw-r--r--compiler-rt/lib/hwasan/hwasan_dynamic_shadow.cpp25
-rw-r--r--compiler-rt/lib/hwasan/hwasan_flags.h2
-rw-r--r--compiler-rt/lib/hwasan/hwasan_flags.inc9
-rw-r--r--compiler-rt/lib/hwasan/hwasan_fuchsia.cpp213
-rw-r--r--compiler-rt/lib/hwasan/hwasan_ignorelist.txt7
-rw-r--r--compiler-rt/lib/hwasan/hwasan_interceptors.cpp185
-rw-r--r--compiler-rt/lib/hwasan/hwasan_interceptors_vfork.S3
-rw-r--r--compiler-rt/lib/hwasan/hwasan_linux.cpp169
-rw-r--r--compiler-rt/lib/hwasan/hwasan_mapping.h15
-rw-r--r--compiler-rt/lib/hwasan/hwasan_memintrinsics.cpp4
-rw-r--r--compiler-rt/lib/hwasan/hwasan_new_delete.cpp72
-rw-r--r--compiler-rt/lib/hwasan/hwasan_poisoning.cpp24
-rw-r--r--compiler-rt/lib/hwasan/hwasan_report.cpp212
-rw-r--r--compiler-rt/lib/hwasan/hwasan_setjmp.S6
-rw-r--r--compiler-rt/lib/hwasan/hwasan_tag_mismatch_aarch64.S6
-rw-r--r--compiler-rt/lib/hwasan/hwasan_thread.cpp36
-rw-r--r--compiler-rt/lib/hwasan/hwasan_thread.h13
-rw-r--r--compiler-rt/lib/hwasan/hwasan_thread_list.cpp2
-rw-r--r--compiler-rt/lib/hwasan/hwasan_thread_list.h37
-rw-r--r--compiler-rt/lib/interception/interception.h33
-rw-r--r--compiler-rt/lib/lsan/lsan.cpp14
-rw-r--r--compiler-rt/lib/lsan/lsan_allocator.cpp16
-rw-r--r--compiler-rt/lib/lsan/lsan_allocator.h2
-rw-r--r--compiler-rt/lib/lsan/lsan_common.cpp2
-rw-r--r--compiler-rt/lib/lsan/lsan_common.h8
-rw-r--r--compiler-rt/lib/lsan/lsan_interceptors.cpp2
-rw-r--r--compiler-rt/lib/lsan/lsan_posix.cpp6
-rw-r--r--compiler-rt/lib/lsan/lsan_thread.cpp9
-rw-r--r--compiler-rt/lib/memprof/memprof_allocator.cpp4
-rw-r--r--compiler-rt/lib/memprof/memprof_allocator.h1
-rw-r--r--compiler-rt/lib/memprof/memprof_descriptions.cpp4
-rw-r--r--compiler-rt/lib/memprof/memprof_interceptors.cpp2
-rw-r--r--compiler-rt/lib/memprof/memprof_interceptors.h6
-rw-r--r--compiler-rt/lib/memprof/memprof_rtl.cpp18
-rw-r--r--compiler-rt/lib/memprof/memprof_stack.h9
-rw-r--r--compiler-rt/lib/memprof/memprof_thread.cpp12
-rw-r--r--compiler-rt/lib/memprof/memprof_thread.h3
-rw-r--r--compiler-rt/lib/msan/msan.cpp14
-rw-r--r--compiler-rt/lib/msan/msan.h10
-rw-r--r--compiler-rt/lib/msan/msan_allocator.cpp6
-rw-r--r--compiler-rt/lib/msan/msan_allocator.h1
-rw-r--r--compiler-rt/lib/msan/msan_blacklist.txt10
-rw-r--r--compiler-rt/lib/msan/msan_chained_origin_depot.cpp103
-rw-r--r--compiler-rt/lib/msan/msan_chained_origin_depot.h14
-rw-r--r--compiler-rt/lib/msan/msan_ignorelist.txt10
-rw-r--r--compiler-rt/lib/msan/msan_interceptors.cpp21
-rw-r--r--compiler-rt/lib/msan/msan_poisoning.cpp2
-rw-r--r--compiler-rt/lib/orc/adt.h113
-rw-r--r--compiler-rt/lib/orc/c_api.h208
-rw-r--r--compiler-rt/lib/orc/common.h48
-rw-r--r--compiler-rt/lib/orc/compiler.h65
-rw-r--r--compiler-rt/lib/orc/endianness.h143
-rw-r--r--compiler-rt/lib/orc/error.h428
-rw-r--r--compiler-rt/lib/orc/executor_address.h208
-rw-r--r--compiler-rt/lib/orc/extensible_rtti.cpp24
-rw-r--r--compiler-rt/lib/orc/extensible_rtti.h145
-rw-r--r--compiler-rt/lib/orc/log_error_to_stderr.cpp19
-rw-r--r--compiler-rt/lib/orc/macho_platform.cpp731
-rw-r--r--compiler-rt/lib/orc/macho_platform.h135
-rw-r--r--compiler-rt/lib/orc/macho_tlv.x86-64.S68
-rw-r--r--compiler-rt/lib/orc/run_program_wrapper.cpp51
-rw-r--r--compiler-rt/lib/orc/simple_packed_serialization.h579
-rw-r--r--compiler-rt/lib/orc/stl_extras.h46
-rw-r--r--compiler-rt/lib/orc/wrapper_function_utils.h367
-rw-r--r--compiler-rt/lib/profile/GCDAProfiling.c29
-rw-r--r--compiler-rt/lib/profile/InstrProfiling.h19
-rw-r--r--compiler-rt/lib/profile/InstrProfilingBiasVar.c15
-rw-r--r--compiler-rt/lib/profile/InstrProfilingBuffer.c11
-rw-r--r--compiler-rt/lib/profile/InstrProfilingFile.c339
-rw-r--r--compiler-rt/lib/profile/InstrProfilingInternal.c10
-rw-r--r--compiler-rt/lib/profile/InstrProfilingInternal.h10
-rw-r--r--compiler-rt/lib/profile/InstrProfilingMerge.c46
-rw-r--r--compiler-rt/lib/profile/InstrProfilingPlatformDarwin.c6
-rw-r--r--compiler-rt/lib/profile/InstrProfilingPlatformFuchsia.c33
-rw-r--r--compiler-rt/lib/profile/InstrProfilingPlatformLinux.c138
-rw-r--r--compiler-rt/lib/profile/InstrProfilingPlatformOther.c5
-rw-r--r--compiler-rt/lib/profile/InstrProfilingPlatformWindows.c5
-rw-r--r--compiler-rt/lib/profile/InstrProfilingPort.h2
-rw-r--r--compiler-rt/lib/profile/InstrProfilingUtil.c27
-rw-r--r--compiler-rt/lib/profile/InstrProfilingUtil.h11
-rw-r--r--compiler-rt/lib/profile/InstrProfilingWriter.c16
-rw-r--r--compiler-rt/lib/profile/WindowsMMap.c12
-rw-r--r--compiler-rt/lib/profile/WindowsMMap.h10
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_addrhashmap.h106
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_allocator.cpp38
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_allocator_combined.h8
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_allocator_local_cache.h19
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_allocator_primary32.h7
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_allocator_primary64.h253
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_allocator_secondary.h8
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_allocator_size_class_map.h2
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_atomic_clang_mips.h8
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_chained_origin_depot.cpp108
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_chained_origin_depot.h88
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_common.cpp25
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_common.h73
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors.inc113
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors_ioctl.inc9
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors_vfork_aarch64.inc.S5
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_common_libcdep.cpp11
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_common_nolibc.cpp1
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_deadlock_detector1.cpp2
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_deadlock_detector2.cpp32
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_errno.h3
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_file.cpp3
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_flags.cpp7
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_flags.inc2
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_fuchsia.cpp101
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_fuchsia.h2
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_internal_defs.h3
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_libc.h3
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_libignore.cpp2
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_linux.cpp118
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_linux.h3
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_linux_libcdep.cpp459
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_local_address_space_view.h2
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_mac.cpp107
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_mac.h17
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_malloc_mac.inc6
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_mutex.cpp225
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_mutex.h368
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_netbsd.cpp17
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_platform.h39
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_platform_interceptors.h39
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_freebsd.cpp3
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_posix.cpp14
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_posix.h20
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_posix.cpp4
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_posix.h4
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_posix_libcdep.cpp24
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_printf.cpp84
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_procmaps_common.cpp2
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_procmaps_mac.cpp4
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_ptrauth.h20
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_quarantine.h3
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_rtems.cpp281
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_rtems.h20
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_solaris.cpp18
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_stacktrace.cpp3
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_stacktrace.h36
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_stacktrace_libcdep.cpp187
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_suppressions.cpp4
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_libcdep.cpp2
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_markup.cpp17
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_posix_libcdep.cpp11
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_report.cpp26
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_rtems.h40
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_win.cpp23
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_termination.cpp33
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_thread_registry.cpp41
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_thread_registry.h25
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_thread_safety.h42
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_win.cpp98
-rw-r--r--compiler-rt/lib/sanitizer_common/symbolizer/sanitizer_symbolize.cpp57
-rwxr-xr-xcompiler-rt/lib/sanitizer_common/symbolizer/scripts/build_symbolizer.sh2
-rw-r--r--compiler-rt/lib/scudo/scudo_allocator.cpp21
-rw-r--r--compiler-rt/lib/scudo/scudo_termination.cpp2
-rw-r--r--compiler-rt/lib/scudo/scudo_tsd.h6
-rw-r--r--compiler-rt/lib/scudo/scudo_tsd_exclusive.inc16
-rw-r--r--compiler-rt/lib/scudo/scudo_tsd_shared.cpp2
-rw-r--r--compiler-rt/lib/scudo/scudo_tsd_shared.inc3
-rw-r--r--compiler-rt/lib/scudo/standalone/allocator_config.h86
-rw-r--r--compiler-rt/lib/scudo/standalone/bytemap.h7
-rw-r--r--compiler-rt/lib/scudo/standalone/combined.h759
-rw-r--r--compiler-rt/lib/scudo/standalone/common.cpp16
-rw-r--r--compiler-rt/lib/scudo/standalone/common.h9
-rw-r--r--compiler-rt/lib/scudo/standalone/flags.cpp15
-rw-r--r--compiler-rt/lib/scudo/standalone/flags.inc6
-rw-r--r--compiler-rt/lib/scudo/standalone/flags_parser.h2
-rw-r--r--compiler-rt/lib/scudo/standalone/fuchsia.cpp29
-rw-r--r--compiler-rt/lib/scudo/standalone/fuzz/get_error_info_fuzzer.cpp14
-rw-r--r--compiler-rt/lib/scudo/standalone/include/scudo/interface.h25
-rw-r--r--compiler-rt/lib/scudo/standalone/internal_defs.h65
-rw-r--r--compiler-rt/lib/scudo/standalone/linux.cpp23
-rw-r--r--compiler-rt/lib/scudo/standalone/list.h6
-rw-r--r--compiler-rt/lib/scudo/standalone/local_cache.h85
-rw-r--r--compiler-rt/lib/scudo/standalone/memtag.h274
-rw-r--r--compiler-rt/lib/scudo/standalone/mutex.h5
-rw-r--r--compiler-rt/lib/scudo/standalone/options.h4
-rw-r--r--compiler-rt/lib/scudo/standalone/platform.h8
-rw-r--r--compiler-rt/lib/scudo/standalone/primary32.h49
-rw-r--r--compiler-rt/lib/scudo/standalone/primary64.h133
-rw-r--r--compiler-rt/lib/scudo/standalone/quarantine.h30
-rw-r--r--compiler-rt/lib/scudo/standalone/release.h34
-rw-r--r--compiler-rt/lib/scudo/standalone/report.cpp6
-rw-r--r--compiler-rt/lib/scudo/standalone/secondary.h440
-rw-r--r--compiler-rt/lib/scudo/standalone/size_class_map.h129
-rw-r--r--compiler-rt/lib/scudo/standalone/stack_depot.h6
-rw-r--r--compiler-rt/lib/scudo/standalone/stats.h27
-rw-r--r--compiler-rt/lib/scudo/standalone/string_utils.cpp23
-rw-r--r--compiler-rt/lib/scudo/standalone/string_utils.h12
-rw-r--r--compiler-rt/lib/scudo/standalone/trusty.cpp100
-rw-r--r--compiler-rt/lib/scudo/standalone/trusty.h24
-rw-r--r--compiler-rt/lib/scudo/standalone/tsd.h13
-rw-r--r--compiler-rt/lib/scudo/standalone/tsd_exclusive.h47
-rw-r--r--compiler-rt/lib/scudo/standalone/tsd_shared.h42
-rw-r--r--compiler-rt/lib/scudo/standalone/vector.h31
-rw-r--r--compiler-rt/lib/scudo/standalone/wrappers_c.cpp1
-rw-r--r--compiler-rt/lib/scudo/standalone/wrappers_c.inc8
-rw-r--r--compiler-rt/lib/scudo/standalone/wrappers_c_bionic.cpp23
-rw-r--r--compiler-rt/lib/tsan/rtl/tsan_clock.cpp66
-rw-r--r--compiler-rt/lib/tsan/rtl/tsan_clock.h16
-rw-r--r--compiler-rt/lib/tsan/rtl/tsan_defs.h30
-rw-r--r--compiler-rt/lib/tsan/rtl/tsan_dense_alloc.h33
-rw-r--r--compiler-rt/lib/tsan/rtl/tsan_external.cpp4
-rw-r--r--compiler-rt/lib/tsan/rtl/tsan_interceptors.h16
-rw-r--r--compiler-rt/lib/tsan/rtl/tsan_interceptors_mac.cpp9
-rw-r--r--compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp132
-rw-r--r--compiler-rt/lib/tsan/rtl/tsan_interface.cpp8
-rw-r--r--compiler-rt/lib/tsan/rtl/tsan_interface.h12
-rw-r--r--compiler-rt/lib/tsan/rtl/tsan_interface_ann.cpp9
-rw-r--r--compiler-rt/lib/tsan/rtl/tsan_interface_atomic.cpp74
-rw-r--r--compiler-rt/lib/tsan/rtl/tsan_interface_inl.h22
-rw-r--r--compiler-rt/lib/tsan/rtl/tsan_interface_java.cpp1
-rw-r--r--compiler-rt/lib/tsan/rtl/tsan_mman.cpp7
-rw-r--r--compiler-rt/lib/tsan/rtl/tsan_mutex.cpp289
-rw-r--r--compiler-rt/lib/tsan/rtl/tsan_mutex.h90
-rw-r--r--compiler-rt/lib/tsan/rtl/tsan_platform.h170
-rw-r--r--compiler-rt/lib/tsan/rtl/tsan_platform_linux.cpp22
-rw-r--r--compiler-rt/lib/tsan/rtl/tsan_platform_mac.cpp2
-rw-r--r--compiler-rt/lib/tsan/rtl/tsan_platform_posix.cpp22
-rw-r--r--compiler-rt/lib/tsan/rtl/tsan_report.cpp14
-rw-r--r--compiler-rt/lib/tsan/rtl/tsan_rtl.cpp198
-rw-r--r--compiler-rt/lib/tsan/rtl/tsan_rtl.h35
-rw-r--r--compiler-rt/lib/tsan/rtl/tsan_rtl_mutex.cpp73
-rw-r--r--compiler-rt/lib/tsan/rtl/tsan_rtl_report.cpp60
-rw-r--r--compiler-rt/lib/tsan/rtl/tsan_rtl_s390x.S47
-rw-r--r--compiler-rt/lib/tsan/rtl/tsan_rtl_thread.cpp25
-rw-r--r--compiler-rt/lib/tsan/rtl/tsan_stack_trace.cpp8
-rw-r--r--compiler-rt/lib/tsan/rtl/tsan_stat.cpp186
-rw-r--r--compiler-rt/lib/tsan/rtl/tsan_stat.h191
-rw-r--r--compiler-rt/lib/tsan/rtl/tsan_sync.cpp13
-rw-r--r--compiler-rt/lib/tsan/rtl/tsan_sync.h9
-rw-r--r--compiler-rt/lib/tsan/rtl/tsan_trace.h5
-rw-r--r--compiler-rt/lib/tsan/rtl/tsan_update_shadow_word_inl.h12
-rw-r--r--compiler-rt/lib/ubsan/ubsan_diag.cpp8
-rw-r--r--compiler-rt/lib/ubsan/ubsan_diag_standalone.cpp8
-rw-r--r--compiler-rt/lib/ubsan/ubsan_init.cpp9
-rw-r--r--compiler-rt/lib/ubsan/ubsan_monitor.cpp6
-rw-r--r--compiler-rt/lib/ubsan/ubsan_platform.h8
-rw-r--r--compiler-rt/lib/ubsan/ubsan_value.cpp2
-rw-r--r--compiler-rt/lib/xray/xray_fdr_logging.cpp11
-rw-r--r--compiler-rt/lib/xray/xray_trampoline_x86_64.S77
-rw-r--r--compiler-rt/lib/xray/xray_utils.cpp1
365 files changed, 14611 insertions, 6280 deletions
diff --git a/compiler-rt/lib/asan/asan_allocator.cpp b/compiler-rt/lib/asan/asan_allocator.cpp
index cd97b37652f8..414fba3b427d 100644
--- a/compiler-rt/lib/asan/asan_allocator.cpp
+++ b/compiler-rt/lib/asan/asan_allocator.cpp
@@ -476,7 +476,7 @@ struct Allocator {
return false;
if (m->Beg() != addr) return false;
AsanThread *t = GetCurrentThread();
- m->SetAllocContext(t ? t->tid() : 0, StackDepotPut(*stack));
+ m->SetAllocContext(t ? t->tid() : kMainTid, StackDepotPut(*stack));
return true;
}
@@ -570,7 +570,7 @@ struct Allocator {
m->SetUsedSize(size);
m->user_requested_alignment_log = user_requested_alignment_log;
- m->SetAllocContext(t ? t->tid() : 0, StackDepotPut(*stack));
+ m->SetAllocContext(t ? t->tid() : kMainTid, StackDepotPut(*stack));
uptr size_rounded_down_to_granularity =
RoundDownTo(size, SHADOW_GRANULARITY);
@@ -852,12 +852,12 @@ struct Allocator {
quarantine.PrintStats();
}
- void ForceLock() {
+ void ForceLock() ACQUIRE(fallback_mutex) {
allocator.ForceLock();
fallback_mutex.Lock();
}
- void ForceUnlock() {
+ void ForceUnlock() RELEASE(fallback_mutex) {
fallback_mutex.Unlock();
allocator.ForceUnlock();
}
@@ -1081,11 +1081,9 @@ uptr asan_mz_size(const void *ptr) {
return instance.AllocationSize(reinterpret_cast<uptr>(ptr));
}
-void asan_mz_force_lock() {
- instance.ForceLock();
-}
+void asan_mz_force_lock() NO_THREAD_SAFETY_ANALYSIS { instance.ForceLock(); }
-void asan_mz_force_unlock() {
+void asan_mz_force_unlock() NO_THREAD_SAFETY_ANALYSIS {
instance.ForceUnlock();
}
diff --git a/compiler-rt/lib/asan/asan_descriptions.cpp b/compiler-rt/lib/asan/asan_descriptions.cpp
index 153c874a4e77..2ba8a02f8410 100644
--- a/compiler-rt/lib/asan/asan_descriptions.cpp
+++ b/compiler-rt/lib/asan/asan_descriptions.cpp
@@ -44,11 +44,11 @@ void DescribeThread(AsanThreadContext *context) {
CHECK(context);
asanThreadRegistry().CheckLocked();
// No need to announce the main thread.
- if (context->tid == 0 || context->announced) {
+ if (context->tid == kMainTid || context->announced) {
return;
}
context->announced = true;
- InternalScopedString str(1024);
+ InternalScopedString str;
str.append("Thread %s", AsanThreadIdAndName(context).c_str());
if (context->parent_tid == kInvalidTid) {
str.append(" created by unknown thread\n");
@@ -77,7 +77,6 @@ static bool GetShadowKind(uptr addr, ShadowKind *shadow_kind) {
} else if (AddrIsInLowShadow(addr)) {
*shadow_kind = kShadowKindLow;
} else {
- CHECK(0 && "Address is not in memory and not in shadow?");
return false;
}
return true;
@@ -126,7 +125,7 @@ static void GetAccessToHeapChunkInformation(ChunkAccess *descr,
static void PrintHeapChunkAccess(uptr addr, const ChunkAccess &descr) {
Decorator d;
- InternalScopedString str(4096);
+ InternalScopedString str;
str.append("%s", d.Location());
switch (descr.access_type) {
case kAccessTypeLeft:
@@ -243,7 +242,7 @@ static void PrintAccessAndVarIntersection(const StackVarDescr &var, uptr addr,
else if (addr >= prev_var_end && addr - prev_var_end >= var.beg - addr_end)
pos_descr = "underflows";
}
- InternalScopedString str(1024);
+ InternalScopedString str;
str.append(" [%zd, %zd)", var.beg, var_end);
// Render variable name.
str.append(" '");
@@ -276,7 +275,7 @@ bool DescribeAddressIfStack(uptr addr, uptr access_size) {
// Global descriptions
static void DescribeAddressRelativeToGlobal(uptr addr, uptr access_size,
const __asan_global &g) {
- InternalScopedString str(4096);
+ InternalScopedString str;
Decorator d;
str.append("%s", d.Location());
if (addr < g.beg) {
@@ -464,7 +463,13 @@ AddressDescription::AddressDescription(uptr addr, uptr access_size,
return;
}
data.kind = kAddressKindWild;
- addr = 0;
+ data.wild.addr = addr;
+ data.wild.access_size = access_size;
+}
+
+void WildAddressDescription::Print() const {
+ Printf("Address %p is a wild pointer inside of access range of size %p.\n",
+ addr, access_size);
}
void PrintAddressDescription(uptr addr, uptr access_size,
diff --git a/compiler-rt/lib/asan/asan_descriptions.h b/compiler-rt/lib/asan/asan_descriptions.h
index ee0e2061559e..650e2eb9173a 100644
--- a/compiler-rt/lib/asan/asan_descriptions.h
+++ b/compiler-rt/lib/asan/asan_descriptions.h
@@ -146,6 +146,13 @@ struct StackAddressDescription {
bool GetStackAddressInformation(uptr addr, uptr access_size,
StackAddressDescription *descr);
+struct WildAddressDescription {
+ uptr addr;
+ uptr access_size;
+
+ void Print() const;
+};
+
struct GlobalAddressDescription {
uptr addr;
// Assume address is close to at most four globals.
@@ -193,7 +200,7 @@ class AddressDescription {
HeapAddressDescription heap;
StackAddressDescription stack;
GlobalAddressDescription global;
- uptr addr;
+ WildAddressDescription wild;
};
};
@@ -211,7 +218,7 @@ class AddressDescription {
uptr Address() const {
switch (data.kind) {
case kAddressKindWild:
- return data.addr;
+ return data.wild.addr;
case kAddressKindShadow:
return data.shadow.addr;
case kAddressKindHeap:
@@ -226,7 +233,7 @@ class AddressDescription {
void Print(const char *bug_descr = nullptr) const {
switch (data.kind) {
case kAddressKindWild:
- Printf("Address %p is a wild pointer.\n", data.addr);
+ data.wild.Print();
return;
case kAddressKindShadow:
return data.shadow.Print();
diff --git a/compiler-rt/lib/asan/asan_errors.cpp b/compiler-rt/lib/asan/asan_errors.cpp
index 541c6e0353b5..45166c064877 100644
--- a/compiler-rt/lib/asan/asan_errors.cpp
+++ b/compiler-rt/lib/asan/asan_errors.cpp
@@ -343,7 +343,8 @@ void ErrorODRViolation::Print() {
Report("ERROR: AddressSanitizer: %s (%p):\n", scariness.GetDescription(),
global1.beg);
Printf("%s", d.Default());
- InternalScopedString g1_loc(256), g2_loc(256);
+ InternalScopedString g1_loc;
+ InternalScopedString g2_loc;
PrintGlobalLocation(&g1_loc, global1);
PrintGlobalLocation(&g2_loc, global2);
Printf(" [1] size=%zd '%s' %s\n", global1.size,
@@ -360,7 +361,7 @@ void ErrorODRViolation::Print() {
Report(
"HINT: if you don't care about these errors you may set "
"ASAN_OPTIONS=detect_odr_violation=0\n");
- InternalScopedString error_msg(256);
+ InternalScopedString error_msg;
error_msg.append("%s: global '%s' at %s", scariness.GetDescription(),
MaybeDemangleGlobalName(global1.name), g1_loc.data());
ReportErrorSummary(error_msg.data());
@@ -532,7 +533,6 @@ static void PrintLegend(InternalScopedString *str) {
PrintShadowByte(str, " ASan internal: ", kAsanInternalHeapMagic);
PrintShadowByte(str, " Left alloca redzone: ", kAsanAllocaLeftMagic);
PrintShadowByte(str, " Right alloca redzone: ", kAsanAllocaRightMagic);
- PrintShadowByte(str, " Shadow gap: ", kAsanShadowGap);
}
static void PrintShadowBytes(InternalScopedString *str, const char *before,
@@ -554,7 +554,7 @@ static void PrintShadowMemoryForAddress(uptr addr) {
uptr shadow_addr = MemToShadow(addr);
const uptr n_bytes_per_row = 16;
uptr aligned_shadow = shadow_addr & ~(n_bytes_per_row - 1);
- InternalScopedString str(4096 * 8);
+ InternalScopedString str;
str.append("Shadow bytes around the buggy address:\n");
for (int i = -5; i <= 5; i++) {
uptr row_shadow_addr = aligned_shadow + i * n_bytes_per_row;
diff --git a/compiler-rt/lib/asan/asan_fake_stack.cpp b/compiler-rt/lib/asan/asan_fake_stack.cpp
index 295e6debc96c..bf5c342ee59d 100644
--- a/compiler-rt/lib/asan/asan_fake_stack.cpp
+++ b/compiler-rt/lib/asan/asan_fake_stack.cpp
@@ -65,7 +65,7 @@ FakeStack *FakeStack::Create(uptr stack_size_log) {
void FakeStack::Destroy(int tid) {
PoisonAll(0);
if (Verbosity() >= 2) {
- InternalScopedString str(kNumberOfSizeClasses * 50);
+ InternalScopedString str;
for (uptr class_id = 0; class_id < kNumberOfSizeClasses; class_id++)
str.append("%zd: %zd/%zd; ", class_id, hint_position_[class_id],
NumberOfFrames(stack_size_log(), class_id));
@@ -187,7 +187,7 @@ void SetTLSFakeStack(FakeStack *fs) { }
static FakeStack *GetFakeStack() {
AsanThread *t = GetCurrentThread();
if (!t) return nullptr;
- return t->fake_stack();
+ return t->get_or_create_fake_stack();
}
static FakeStack *GetFakeStackFast() {
@@ -198,7 +198,13 @@ static FakeStack *GetFakeStackFast() {
return GetFakeStack();
}
-ALWAYS_INLINE uptr OnMalloc(uptr class_id, uptr size) {
+static FakeStack *GetFakeStackFastAlways() {
+ if (FakeStack *fs = GetTLSFakeStack())
+ return fs;
+ return GetFakeStack();
+}
+
+static ALWAYS_INLINE uptr OnMalloc(uptr class_id, uptr size) {
FakeStack *fs = GetFakeStackFast();
if (!fs) return 0;
uptr local_stack;
@@ -210,7 +216,21 @@ ALWAYS_INLINE uptr OnMalloc(uptr class_id, uptr size) {
return ptr;
}
-ALWAYS_INLINE void OnFree(uptr ptr, uptr class_id, uptr size) {
+static ALWAYS_INLINE uptr OnMallocAlways(uptr class_id, uptr size) {
+ FakeStack *fs = GetFakeStackFastAlways();
+ if (!fs)
+ return 0;
+ uptr local_stack;
+ uptr real_stack = reinterpret_cast<uptr>(&local_stack);
+ FakeFrame *ff = fs->Allocate(fs->stack_size_log(), class_id, real_stack);
+ if (!ff)
+ return 0; // Out of fake stack.
+ uptr ptr = reinterpret_cast<uptr>(ff);
+ SetShadow(ptr, size, class_id, 0);
+ return ptr;
+}
+
+static ALWAYS_INLINE void OnFree(uptr ptr, uptr class_id, uptr size) {
FakeStack::Deallocate(ptr, class_id);
SetShadow(ptr, size, class_id, kMagic8);
}
@@ -219,14 +239,18 @@ ALWAYS_INLINE void OnFree(uptr ptr, uptr class_id, uptr size) {
// ---------------------- Interface ---------------- {{{1
using namespace __asan;
-#define DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(class_id) \
- extern "C" SANITIZER_INTERFACE_ATTRIBUTE uptr \
- __asan_stack_malloc_##class_id(uptr size) { \
- return OnMalloc(class_id, size); \
- } \
- extern "C" SANITIZER_INTERFACE_ATTRIBUTE void __asan_stack_free_##class_id( \
- uptr ptr, uptr size) { \
- OnFree(ptr, class_id, size); \
+#define DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(class_id) \
+ extern "C" SANITIZER_INTERFACE_ATTRIBUTE uptr \
+ __asan_stack_malloc_##class_id(uptr size) { \
+ return OnMalloc(class_id, size); \
+ } \
+ extern "C" SANITIZER_INTERFACE_ATTRIBUTE uptr \
+ __asan_stack_malloc_always_##class_id(uptr size) { \
+ return OnMallocAlways(class_id, size); \
+ } \
+ extern "C" SANITIZER_INTERFACE_ATTRIBUTE void __asan_stack_free_##class_id( \
+ uptr ptr, uptr size) { \
+ OnFree(ptr, class_id, size); \
}
DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(0)
@@ -240,7 +264,11 @@ DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(7)
DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(8)
DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(9)
DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(10)
+
extern "C" {
+// TODO: remove this method and fix tests that use it by setting
+// -asan-use-after-return=never, after modal UAR flag lands
+// (https://github.com/google/sanitizers/issues/1394)
SANITIZER_INTERFACE_ATTRIBUTE
void *__asan_get_current_fake_stack() { return GetFakeStackFast(); }
diff --git a/compiler-rt/lib/asan/asan_flags.cpp b/compiler-rt/lib/asan/asan_flags.cpp
index cb6a89fe32ce..c64e46470287 100644
--- a/compiler-rt/lib/asan/asan_flags.cpp
+++ b/compiler-rt/lib/asan/asan_flags.cpp
@@ -155,10 +155,6 @@ void InitializeFlags() {
CHECK_LE(f->max_redzone, 2048);
CHECK(IsPowerOfTwo(f->redzone));
CHECK(IsPowerOfTwo(f->max_redzone));
- if (SANITIZER_RTEMS) {
- CHECK(!f->unmap_shadow_on_exit);
- CHECK(!f->protect_shadow_gap);
- }
// quarantine_size is deprecated but we still honor it.
// quarantine_size can not be used together with quarantine_size_mb.
diff --git a/compiler-rt/lib/asan/asan_flags.inc b/compiler-rt/lib/asan/asan_flags.inc
index 43c70dbca56b..514b225c4073 100644
--- a/compiler-rt/lib/asan/asan_flags.inc
+++ b/compiler-rt/lib/asan/asan_flags.inc
@@ -87,8 +87,7 @@ ASAN_FLAG(bool, check_malloc_usable_size, true,
"295.*.")
ASAN_FLAG(bool, unmap_shadow_on_exit, false,
"If set, explicitly unmaps the (huge) shadow at exit.")
-ASAN_FLAG(bool, protect_shadow_gap, !SANITIZER_RTEMS,
- "If set, mprotect the shadow gap")
+ASAN_FLAG(bool, protect_shadow_gap, true, "If set, mprotect the shadow gap")
ASAN_FLAG(bool, print_stats, false,
"Print various statistics after printing an error message or if "
"atexit=1.")
diff --git a/compiler-rt/lib/asan/asan_fuchsia.cpp b/compiler-rt/lib/asan/asan_fuchsia.cpp
index 6c61344f87cf..b0c7255144ac 100644
--- a/compiler-rt/lib/asan/asan_fuchsia.cpp
+++ b/compiler-rt/lib/asan/asan_fuchsia.cpp
@@ -81,7 +81,7 @@ void AsanTSDInit(void (*destructor)(void *tsd)) {
void PlatformTSDDtor(void *tsd) { UNREACHABLE(__func__); }
static inline size_t AsanThreadMmapSize() {
- return RoundUpTo(sizeof(AsanThread), PAGE_SIZE);
+ return RoundUpTo(sizeof(AsanThread), _zx_system_get_page_size());
}
struct AsanThread::InitOptions {
diff --git a/compiler-rt/lib/asan/asan_blacklist.txt b/compiler-rt/lib/asan/asan_ignorelist.txt
index c25921fd5fe7..5ce59926ce2f 100644
--- a/compiler-rt/lib/asan/asan_blacklist.txt
+++ b/compiler-rt/lib/asan/asan_ignorelist.txt
@@ -1,6 +1,6 @@
-# Blacklist for AddressSanitizer. Turns off instrumentation of particular
-# functions or sources. Use with care. You may set location of blacklist
-# at compile-time using -fsanitize-blacklist=<path> flag.
+# Ignorelist for AddressSanitizer. Turns off instrumentation of particular
+# functions or sources. Use with care. You may set location of ignorelist
+# at compile-time using -fsanitize-ignorelist=<path> flag.
# Example usage:
# fun:*bad_function_name*
diff --git a/compiler-rt/lib/asan/asan_interceptors.cpp b/compiler-rt/lib/asan/asan_interceptors.cpp
index cd07d51878b1..d0a6dd48a748 100644
--- a/compiler-rt/lib/asan/asan_interceptors.cpp
+++ b/compiler-rt/lib/asan/asan_interceptors.cpp
@@ -23,25 +23,25 @@
#include "lsan/lsan_common.h"
#include "sanitizer_common/sanitizer_libc.h"
-// There is no general interception at all on Fuchsia and RTEMS.
+// There is no general interception at all on Fuchsia.
// Only the functions in asan_interceptors_memintrinsics.cpp are
// really defined to replace libc functions.
-#if !SANITIZER_FUCHSIA && !SANITIZER_RTEMS
+#if !SANITIZER_FUCHSIA
-#if SANITIZER_POSIX
-#include "sanitizer_common/sanitizer_posix.h"
-#endif
+# if SANITIZER_POSIX
+# include "sanitizer_common/sanitizer_posix.h"
+# endif
-#if ASAN_INTERCEPT__UNWIND_RAISEEXCEPTION || \
- ASAN_INTERCEPT__SJLJ_UNWIND_RAISEEXCEPTION
-#include <unwind.h>
-#endif
+# if ASAN_INTERCEPT__UNWIND_RAISEEXCEPTION || \
+ ASAN_INTERCEPT__SJLJ_UNWIND_RAISEEXCEPTION
+# include <unwind.h>
+# endif
-#if defined(__i386) && SANITIZER_LINUX
-#define ASAN_PTHREAD_CREATE_VERSION "GLIBC_2.1"
-#elif defined(__mips__) && SANITIZER_LINUX
-#define ASAN_PTHREAD_CREATE_VERSION "GLIBC_2.2"
-#endif
+# if defined(__i386) && SANITIZER_LINUX
+# define ASAN_PTHREAD_CREATE_VERSION "GLIBC_2.1"
+# elif defined(__mips__) && SANITIZER_LINUX
+# define ASAN_PTHREAD_CREATE_VERSION "GLIBC_2.2"
+# endif
namespace __asan {
@@ -90,8 +90,10 @@ DECLARE_REAL_AND_INTERCEPTOR(void, free, void *)
(void) ctx; \
#define COMMON_INTERCEPT_FUNCTION(name) ASAN_INTERCEPT_FUNC(name)
-#define COMMON_INTERCEPT_FUNCTION_VER(name, ver) \
+#define COMMON_INTERCEPT_FUNCTION_VER(name, ver) \
ASAN_INTERCEPT_FUNC_VER(name, ver)
+#define COMMON_INTERCEPT_FUNCTION_VER_UNVERSIONED_FALLBACK(name, ver) \
+ ASAN_INTERCEPT_FUNC_VER_UNVERSIONED_FALLBACK(name, ver)
#define COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ptr, size) \
ASAN_WRITE_RANGE(ctx, ptr, size)
#define COMMON_INTERCEPTOR_READ_RANGE(ctx, ptr, size) \
@@ -672,6 +674,7 @@ void InitializeAsanInterceptors() {
// Intercept threading-related functions
#if ASAN_INTERCEPT_PTHREAD_CREATE
+// TODO: this should probably have an unversioned fallback for newer arches?
#if defined(ASAN_PTHREAD_CREATE_VERSION)
ASAN_INTERCEPT_FUNC_VER(pthread_create, ASAN_PTHREAD_CREATE_VERSION);
#else
diff --git a/compiler-rt/lib/asan/asan_interceptors.h b/compiler-rt/lib/asan/asan_interceptors.h
index 45cdb80b1b64..a9249dea45b9 100644
--- a/compiler-rt/lib/asan/asan_interceptors.h
+++ b/compiler-rt/lib/asan/asan_interceptors.h
@@ -34,10 +34,10 @@ void InitializePlatformInterceptors();
} // namespace __asan
-// There is no general interception at all on Fuchsia and RTEMS.
+// There is no general interception at all on Fuchsia.
// Only the functions in asan_interceptors_memintrinsics.h are
// really defined to replace libc functions.
-#if !SANITIZER_FUCHSIA && !SANITIZER_RTEMS
+#if !SANITIZER_FUCHSIA
// Use macro to describe if specific function should be
// intercepted on a given platform.
@@ -145,6 +145,13 @@ DECLARE_REAL(char*, strstr, const char *s1, const char *s2)
VReport(1, "AddressSanitizer: failed to intercept '%s@@%s'\n", #name, \
#ver); \
} while (0)
+#define ASAN_INTERCEPT_FUNC_VER_UNVERSIONED_FALLBACK(name, ver) \
+ do { \
+ if (!INTERCEPT_FUNCTION_VER(name, ver) && !INTERCEPT_FUNCTION(name)) \
+ VReport(1, "AddressSanitizer: failed to intercept '%s@@%s' or '%s'\n", \
+ #name, #ver, #name); \
+ } while (0)
+
#else
// OS X interceptors don't need to be initialized with INTERCEPT_FUNCTION.
#define ASAN_INTERCEPT_FUNC(name)
diff --git a/compiler-rt/lib/asan/asan_interceptors_memintrinsics.cpp b/compiler-rt/lib/asan/asan_interceptors_memintrinsics.cpp
index ccdd5159042c..9c316bb95749 100644
--- a/compiler-rt/lib/asan/asan_interceptors_memintrinsics.cpp
+++ b/compiler-rt/lib/asan/asan_interceptors_memintrinsics.cpp
@@ -30,9 +30,9 @@ void *__asan_memmove(void *to, const void *from, uptr size) {
ASAN_MEMMOVE_IMPL(nullptr, to, from, size);
}
-#if SANITIZER_FUCHSIA || SANITIZER_RTEMS
+#if SANITIZER_FUCHSIA
-// Fuchsia and RTEMS don't use sanitizer_common_interceptors.inc, but
+// Fuchsia doesn't use sanitizer_common_interceptors.inc, but
// the only things there it wants are these three. Just define them
// as aliases here rather than repeating the contents.
@@ -40,4 +40,4 @@ extern "C" decltype(__asan_memcpy) memcpy[[gnu::alias("__asan_memcpy")]];
extern "C" decltype(__asan_memmove) memmove[[gnu::alias("__asan_memmove")]];
extern "C" decltype(__asan_memset) memset[[gnu::alias("__asan_memset")]];
-#endif // SANITIZER_FUCHSIA || SANITIZER_RTEMS
+#endif // SANITIZER_FUCHSIA
diff --git a/compiler-rt/lib/asan/asan_interface.inc b/compiler-rt/lib/asan/asan_interface.inc
index 948010439827..ea28fc8ae87c 100644
--- a/compiler-rt/lib/asan/asan_interface.inc
+++ b/compiler-rt/lib/asan/asan_interface.inc
@@ -134,6 +134,17 @@ INTERFACE_FUNCTION(__asan_stack_malloc_7)
INTERFACE_FUNCTION(__asan_stack_malloc_8)
INTERFACE_FUNCTION(__asan_stack_malloc_9)
INTERFACE_FUNCTION(__asan_stack_malloc_10)
+INTERFACE_FUNCTION(__asan_stack_malloc_always_0)
+INTERFACE_FUNCTION(__asan_stack_malloc_always_1)
+INTERFACE_FUNCTION(__asan_stack_malloc_always_2)
+INTERFACE_FUNCTION(__asan_stack_malloc_always_3)
+INTERFACE_FUNCTION(__asan_stack_malloc_always_4)
+INTERFACE_FUNCTION(__asan_stack_malloc_always_5)
+INTERFACE_FUNCTION(__asan_stack_malloc_always_6)
+INTERFACE_FUNCTION(__asan_stack_malloc_always_7)
+INTERFACE_FUNCTION(__asan_stack_malloc_always_8)
+INTERFACE_FUNCTION(__asan_stack_malloc_always_9)
+INTERFACE_FUNCTION(__asan_stack_malloc_always_10)
INTERFACE_FUNCTION(__asan_store1)
INTERFACE_FUNCTION(__asan_store2)
INTERFACE_FUNCTION(__asan_store4)
diff --git a/compiler-rt/lib/asan/asan_internal.h b/compiler-rt/lib/asan/asan_internal.h
index cfb54927c6cf..ad3320304d0d 100644
--- a/compiler-rt/lib/asan/asan_internal.h
+++ b/compiler-rt/lib/asan/asan_internal.h
@@ -35,11 +35,11 @@
// If set, values like allocator chunk size, as well as defaults for some flags
// will be changed towards less memory overhead.
#ifndef ASAN_LOW_MEMORY
-# if SANITIZER_IOS || SANITIZER_ANDROID || SANITIZER_RTEMS
-# define ASAN_LOW_MEMORY 1
-# else
-# define ASAN_LOW_MEMORY 0
-# endif
+# if SANITIZER_IOS || SANITIZER_ANDROID
+# define ASAN_LOW_MEMORY 1
+# else
+# define ASAN_LOW_MEMORY 0
+# endif
#endif
#ifndef ASAN_DYNAMIC
@@ -77,7 +77,7 @@ void InitializeShadowMemory();
// asan_malloc_linux.cpp / asan_malloc_mac.cpp
void ReplaceSystemMalloc();
-// asan_linux.cpp / asan_mac.cpp / asan_rtems.cpp / asan_win.cpp
+// asan_linux.cpp / asan_mac.cpp / asan_win.cpp
uptr FindDynamicShadowStart();
void *AsanDoesNotSupportStaticLinkage();
void AsanCheckDynamicRTPrereqs();
@@ -159,9 +159,6 @@ const int kAsanArrayCookieMagic = 0xac;
const int kAsanIntraObjectRedzone = 0xbb;
const int kAsanAllocaLeftMagic = 0xca;
const int kAsanAllocaRightMagic = 0xcb;
-// Used to populate the shadow gap for systems without memory
-// protection there (i.e. Myriad).
-const int kAsanShadowGap = 0xcc;
static const uptr kCurrentStackFrameMagic = 0x41B58AB3;
static const uptr kRetiredStackFrameMagic = 0x45E0360E;
diff --git a/compiler-rt/lib/asan/asan_malloc_linux.cpp b/compiler-rt/lib/asan/asan_malloc_linux.cpp
index 9c3f0a5338ee..c6bec8551bc5 100644
--- a/compiler-rt/lib/asan/asan_malloc_linux.cpp
+++ b/compiler-rt/lib/asan/asan_malloc_linux.cpp
@@ -15,23 +15,22 @@
#include "sanitizer_common/sanitizer_platform.h"
#if SANITIZER_FREEBSD || SANITIZER_FUCHSIA || SANITIZER_LINUX || \
- SANITIZER_NETBSD || SANITIZER_RTEMS || SANITIZER_SOLARIS
+ SANITIZER_NETBSD || SANITIZER_SOLARIS
-#include "sanitizer_common/sanitizer_allocator_checks.h"
-#include "sanitizer_common/sanitizer_errno.h"
-#include "sanitizer_common/sanitizer_tls_get_addr.h"
-#include "asan_allocator.h"
-#include "asan_interceptors.h"
-#include "asan_internal.h"
-#include "asan_malloc_local.h"
-#include "asan_stack.h"
+# include "asan_allocator.h"
+# include "asan_interceptors.h"
+# include "asan_internal.h"
+# include "asan_stack.h"
+# include "sanitizer_common/sanitizer_allocator_checks.h"
+# include "sanitizer_common/sanitizer_errno.h"
+# include "sanitizer_common/sanitizer_tls_get_addr.h"
// ---------------------- Replacement functions ---------------- {{{1
using namespace __asan;
static uptr allocated_for_dlsym;
static uptr last_dlsym_alloc_size_in_words;
-static const uptr kDlsymAllocPoolSize = SANITIZER_RTEMS ? 4096 : 1024;
+static const uptr kDlsymAllocPoolSize = 1024;
static uptr alloc_memory_for_dlsym[kDlsymAllocPoolSize];
static inline bool IsInDlsymAllocPool(const void *ptr) {
@@ -82,27 +81,12 @@ static int PosixMemalignFromLocalPool(void **memptr, uptr alignment,
return 0;
}
-#if SANITIZER_RTEMS
-void* MemalignFromLocalPool(uptr alignment, uptr size) {
- void *ptr = nullptr;
- alignment = Max(alignment, kWordSize);
- PosixMemalignFromLocalPool(&ptr, alignment, size);
- return ptr;
-}
-
-bool IsFromLocalPool(const void *ptr) {
- return IsInDlsymAllocPool(ptr);
-}
-#endif
-
static inline bool MaybeInDlsym() {
// Fuchsia doesn't use dlsym-based interceptors.
return !SANITIZER_FUCHSIA && asan_init_is_running;
}
-static inline bool UseLocalPool() {
- return EarlyMalloc() || MaybeInDlsym();
-}
+static inline bool UseLocalPool() { return MaybeInDlsym(); }
static void *ReallocFromLocalPool(void *ptr, uptr size) {
const uptr offset = (uptr)ptr - (uptr)alloc_memory_for_dlsym;
diff --git a/compiler-rt/lib/asan/asan_malloc_local.h b/compiler-rt/lib/asan/asan_malloc_local.h
deleted file mode 100644
index e2c9be0379f2..000000000000
--- a/compiler-rt/lib/asan/asan_malloc_local.h
+++ /dev/null
@@ -1,52 +0,0 @@
-//===-- asan_malloc_local.h -------------------------------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-//
-// This file is a part of AddressSanitizer, an address sanity checker.
-//
-// Provide interfaces to check for and handle local pool memory allocation.
-//===----------------------------------------------------------------------===//
-
-#ifndef ASAN_MALLOC_LOCAL_H
-#define ASAN_MALLOC_LOCAL_H
-
-#include "sanitizer_common/sanitizer_platform.h"
-#include "asan_internal.h"
-
-static inline bool EarlyMalloc() {
- return SANITIZER_RTEMS &&
- (!__asan::asan_inited || __asan::asan_init_is_running);
-}
-
-#if SANITIZER_RTEMS
-
-bool IsFromLocalPool(const void *ptr);
-void *MemalignFromLocalPool(uptr alignment, uptr size);
-
-// On RTEMS, we use the local pool to handle memory allocation when the ASan
-// run-time is not up. This macro is expanded in the context of the operator new
-// implementation.
-#define MAYBE_ALLOCATE_FROM_LOCAL_POOL(nothrow) \
- do { \
- if (UNLIKELY(EarlyMalloc())) { \
- void *res = MemalignFromLocalPool(SHADOW_GRANULARITY, size); \
- if (!nothrow) \
- CHECK(res); \
- return res; \
- } \
- } while (0)
-
-#define IS_FROM_LOCAL_POOL(ptr) UNLIKELY(IsFromLocalPool(ptr))
-
-#else // SANITIZER_RTEMS
-
-#define MAYBE_ALLOCATE_FROM_LOCAL_POOL(nothrow)
-#define IS_FROM_LOCAL_POOL(ptr) 0
-
-#endif // SANITIZER_RTEMS
-
-#endif // ASAN_MALLOC_LOCAL_H
diff --git a/compiler-rt/lib/asan/asan_mapping.h b/compiler-rt/lib/asan/asan_mapping.h
index f239c3ee2ff9..e5a7f2007aea 100644
--- a/compiler-rt/lib/asan/asan_mapping.h
+++ b/compiler-rt/lib/asan/asan_mapping.h
@@ -72,6 +72,13 @@
// || `[0x2000000000, 0x23ffffffff]` || LowShadow ||
// || `[0x0000000000, 0x1fffffffff]` || LowMem ||
//
+// Default Linux/RISCV64 Sv39 mapping:
+// || `[0x1555550000, 0x3fffffffff]` || HighMem ||
+// || `[0x0fffffa000, 0x1555555fff]` || HighShadow ||
+// || `[0x0effffa000, 0x0fffff9fff]` || ShadowGap ||
+// || `[0x0d55550000, 0x0effff9fff]` || LowShadow ||
+// || `[0x0000000000, 0x0d5554ffff]` || LowMem ||
+//
// Default Linux/AArch64 (39-bit VMA) mapping:
// || `[0x2000000000, 0x7fffffffff]` || highmem ||
// || `[0x1400000000, 0x1fffffffff]` || highshadow ||
@@ -79,20 +86,6 @@
// || `[0x1000000000, 0x11ffffffff]` || lowshadow ||
// || `[0x0000000000, 0x0fffffffff]` || lowmem ||
//
-// RISC-V has only 38 bits for task size
-// Low mem size is set with kRiscv64_ShadowOffset64 in
-// compiler-rt/lib/asan/asan_allocator.h and in
-// llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp with
-// kRiscv64_ShadowOffset64, High mem top border is set with
-// GetMaxVirtualAddress() in
-// compiler-rt/lib/sanitizer_common/sanitizer_linux.cpp
-// Default Linux/RISCV64 Sv39/Sv48 mapping:
-// || `[0x000820000000, 0x003fffffffff]` || HighMem ||
-// || `[0x000124000000, 0x00081fffffff]` || HighShadow ||
-// || `[0x000024000000, 0x000123ffffff]` || ShadowGap ||
-// || `[0x000020000000, 0x000023ffffff]` || LowShadow ||
-// || `[0x000000000000, 0x00001fffffff]` || LowMem ||
-//
// Default Linux/AArch64 (42-bit VMA) mapping:
// || `[0x10000000000, 0x3ffffffffff]` || highmem ||
// || `[0x0a000000000, 0x0ffffffffff]` || highshadow ||
@@ -157,17 +150,11 @@
// || `[0x36000000, 0x39ffffff]` || ShadowGap ||
// || `[0x30000000, 0x35ffffff]` || LowShadow ||
// || `[0x00000000, 0x2fffffff]` || LowMem ||
-//
-// Shadow mapping on Myriad2 (for shadow scale 5):
-// || `[0x9ff80000, 0x9fffffff]` || ShadowGap ||
-// || `[0x9f000000, 0x9ff7ffff]` || LowShadow ||
-// || `[0x80000000, 0x9effffff]` || LowMem ||
-// || `[0x00000000, 0x7fffffff]` || Ignored ||
#if defined(ASAN_SHADOW_SCALE)
static const u64 kDefaultShadowScale = ASAN_SHADOW_SCALE;
#else
-static const u64 kDefaultShadowScale = SANITIZER_MYRIAD2 ? 5 : 3;
+static const u64 kDefaultShadowScale = 3;
#endif
static const u64 kDefaultShadowSentinel = ~(uptr)0;
static const u64 kDefaultShadowOffset32 = 1ULL << 29; // 0x20000000
@@ -175,7 +162,7 @@ static const u64 kDefaultShadowOffset64 = 1ULL << 44;
static const u64 kDefaultShort64bitShadowOffset =
0x7FFFFFFF & (~0xFFFULL << kDefaultShadowScale); // < 2G.
static const u64 kAArch64_ShadowOffset64 = 1ULL << 36;
-static const u64 kRiscv64_ShadowOffset64 = 0x20000000;
+static const u64 kRiscv64_ShadowOffset64 = 0xd55550000;
static const u64 kMIPS32_ShadowOffset32 = 0x0aaa0000;
static const u64 kMIPS64_ShadowOffset64 = 1ULL << 37;
static const u64 kPPC64_ShadowOffset64 = 1ULL << 44;
@@ -187,15 +174,6 @@ static const u64 kNetBSD_ShadowOffset32 = 1ULL << 30; // 0x40000000
static const u64 kNetBSD_ShadowOffset64 = 1ULL << 46; // 0x400000000000
static const u64 kWindowsShadowOffset32 = 3ULL << 28; // 0x30000000
-static const u64 kMyriadMemoryOffset32 = 0x80000000ULL;
-static const u64 kMyriadMemorySize32 = 0x20000000ULL;
-static const u64 kMyriadMemoryEnd32 =
- kMyriadMemoryOffset32 + kMyriadMemorySize32 - 1;
-static const u64 kMyriadShadowOffset32 =
- (kMyriadMemoryOffset32 + kMyriadMemorySize32 -
- (kMyriadMemorySize32 >> kDefaultShadowScale));
-static const u64 kMyriadCacheBitMask32 = 0x40000000ULL;
-
#define SHADOW_SCALE kDefaultShadowScale
#if SANITIZER_FUCHSIA
@@ -213,8 +191,6 @@ static const u64 kMyriadCacheBitMask32 = 0x40000000ULL;
# define SHADOW_OFFSET kWindowsShadowOffset32
# elif SANITIZER_IOS
# define SHADOW_OFFSET __asan_shadow_memory_dynamic_address
-# elif SANITIZER_MYRIAD2
-# define SHADOW_OFFSET kMyriadShadowOffset32
# else
# define SHADOW_OFFSET kDefaultShadowOffset32
# endif
@@ -285,10 +261,8 @@ extern uptr kHighMemEnd, kMidMemBeg, kMidMemEnd; // Initialized in __asan_init.
} // namespace __asan
-#if SANITIZER_MYRIAD2
-#include "asan_mapping_myriad.h"
-#elif defined(__sparc__) && SANITIZER_WORDSIZE == 64
-#include "asan_mapping_sparc64.h"
+#if defined(__sparc__) && SANITIZER_WORDSIZE == 64
+# include "asan_mapping_sparc64.h"
#else
#define MEM_TO_SHADOW(mem) (((mem) >> SHADOW_SCALE) + (SHADOW_OFFSET))
@@ -370,7 +344,7 @@ static inline bool AddrIsInShadowGap(uptr a) {
} // namespace __asan
-#endif // SANITIZER_MYRIAD2
+#endif
namespace __asan {
@@ -400,8 +374,6 @@ static inline bool AddrIsAlignedByGranularity(uptr a) {
static inline bool AddressIsPoisoned(uptr a) {
PROFILE_ASAN_MAPPING();
- if (SANITIZER_MYRIAD2 && !AddrIsInMem(a) && !AddrIsInShadow(a))
- return false;
const uptr kAccessSize = 1;
u8 *shadow_address = (u8*)MEM_TO_SHADOW(a);
s8 shadow_value = *shadow_address;
diff --git a/compiler-rt/lib/asan/asan_mapping_myriad.h b/compiler-rt/lib/asan/asan_mapping_myriad.h
deleted file mode 100644
index 6969e3a49310..000000000000
--- a/compiler-rt/lib/asan/asan_mapping_myriad.h
+++ /dev/null
@@ -1,85 +0,0 @@
-//===-- asan_mapping_myriad.h -----------------------------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-//
-// This file is a part of AddressSanitizer, an address sanity checker.
-//
-// Myriad-specific definitions for ASan memory mapping.
-//===----------------------------------------------------------------------===//
-#ifndef ASAN_MAPPING_MYRIAD_H
-#define ASAN_MAPPING_MYRIAD_H
-
-#define RAW_ADDR(mem) ((mem) & ~kMyriadCacheBitMask32)
-#define MEM_TO_SHADOW(mem) \
- (((RAW_ADDR(mem) - kLowMemBeg) >> SHADOW_SCALE) + (SHADOW_OFFSET))
-
-#define kLowMemBeg kMyriadMemoryOffset32
-#define kLowMemEnd (SHADOW_OFFSET - 1)
-
-#define kLowShadowBeg SHADOW_OFFSET
-#define kLowShadowEnd MEM_TO_SHADOW(kLowMemEnd)
-
-#define kHighMemBeg 0
-
-#define kHighShadowBeg 0
-#define kHighShadowEnd 0
-
-#define kMidShadowBeg 0
-#define kMidShadowEnd 0
-
-#define kShadowGapBeg (kLowShadowEnd + 1)
-#define kShadowGapEnd kMyriadMemoryEnd32
-
-#define kShadowGap2Beg 0
-#define kShadowGap2End 0
-
-#define kShadowGap3Beg 0
-#define kShadowGap3End 0
-
-namespace __asan {
-
-static inline bool AddrIsInLowMem(uptr a) {
- PROFILE_ASAN_MAPPING();
- a = RAW_ADDR(a);
- return a >= kLowMemBeg && a <= kLowMemEnd;
-}
-
-static inline bool AddrIsInLowShadow(uptr a) {
- PROFILE_ASAN_MAPPING();
- a = RAW_ADDR(a);
- return a >= kLowShadowBeg && a <= kLowShadowEnd;
-}
-
-static inline bool AddrIsInMidMem(uptr a) {
- PROFILE_ASAN_MAPPING();
- return false;
-}
-
-static inline bool AddrIsInMidShadow(uptr a) {
- PROFILE_ASAN_MAPPING();
- return false;
-}
-
-static inline bool AddrIsInHighMem(uptr a) {
- PROFILE_ASAN_MAPPING();
- return false;
-}
-
-static inline bool AddrIsInHighShadow(uptr a) {
- PROFILE_ASAN_MAPPING();
- return false;
-}
-
-static inline bool AddrIsInShadowGap(uptr a) {
- PROFILE_ASAN_MAPPING();
- a = RAW_ADDR(a);
- return a >= kShadowGapBeg && a <= kShadowGapEnd;
-}
-
-} // namespace __asan
-
-#endif // ASAN_MAPPING_MYRIAD_H
diff --git a/compiler-rt/lib/asan/asan_new_delete.cpp b/compiler-rt/lib/asan/asan_new_delete.cpp
index 5dfcc00fd5d1..da446072de18 100644
--- a/compiler-rt/lib/asan/asan_new_delete.cpp
+++ b/compiler-rt/lib/asan/asan_new_delete.cpp
@@ -11,16 +11,14 @@
// Interceptors for operators new and delete.
//===----------------------------------------------------------------------===//
+#include <stddef.h>
+
#include "asan_allocator.h"
#include "asan_internal.h"
-#include "asan_malloc_local.h"
#include "asan_report.h"
#include "asan_stack.h"
-
#include "interception/interception.h"
-#include <stddef.h>
-
// C++ operators can't have dllexport attributes on Windows. We export them
// anyway by passing extra -export flags to the linker, which is exactly that
// dllexport would normally do. We need to export them in order to make the
@@ -45,7 +43,7 @@ COMMENT_EXPORT("??_V@YAXPAX@Z") // operator delete[]
#endif
#undef COMMENT_EXPORT
#else
-#define CXX_OPERATOR_ATTRIBUTE INTERCEPTOR_ATTRIBUTE
+#define CXX_OPERATOR_ATTRIBUTE INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
#endif
using namespace __asan;
@@ -72,14 +70,12 @@ enum class align_val_t: size_t {};
// For local pool allocation, align to SHADOW_GRANULARITY to match asan
// allocator behavior.
#define OPERATOR_NEW_BODY(type, nothrow) \
- MAYBE_ALLOCATE_FROM_LOCAL_POOL(nothrow); \
GET_STACK_TRACE_MALLOC; \
void *res = asan_memalign(0, size, &stack, type); \
if (!nothrow && UNLIKELY(!res)) \
ReportOutOfMemory(size, &stack); \
return res;
#define OPERATOR_NEW_BODY_ALIGN(type, nothrow) \
- MAYBE_ALLOCATE_FROM_LOCAL_POOL(nothrow); \
GET_STACK_TRACE_MALLOC; \
void *res = asan_memalign((uptr)align, size, &stack, type); \
if (!nothrow && UNLIKELY(!res)) \
@@ -135,23 +131,19 @@ INTERCEPTOR(void *, _ZnamRKSt9nothrow_t, size_t size, std::nothrow_t const&) {
#endif // !SANITIZER_MAC
#define OPERATOR_DELETE_BODY(type) \
- if (IS_FROM_LOCAL_POOL(ptr)) return;\
- GET_STACK_TRACE_FREE;\
+ GET_STACK_TRACE_FREE; \
asan_delete(ptr, 0, 0, &stack, type);
#define OPERATOR_DELETE_BODY_SIZE(type) \
- if (IS_FROM_LOCAL_POOL(ptr)) return;\
- GET_STACK_TRACE_FREE;\
+ GET_STACK_TRACE_FREE; \
asan_delete(ptr, size, 0, &stack, type);
#define OPERATOR_DELETE_BODY_ALIGN(type) \
- if (IS_FROM_LOCAL_POOL(ptr)) return;\
- GET_STACK_TRACE_FREE;\
+ GET_STACK_TRACE_FREE; \
asan_delete(ptr, 0, static_cast<uptr>(align), &stack, type);
#define OPERATOR_DELETE_BODY_SIZE_ALIGN(type) \
- if (IS_FROM_LOCAL_POOL(ptr)) return;\
- GET_STACK_TRACE_FREE;\
+ GET_STACK_TRACE_FREE; \
asan_delete(ptr, size, static_cast<uptr>(align), &stack, type);
#if !SANITIZER_MAC
diff --git a/compiler-rt/lib/asan/asan_poisoning.cpp b/compiler-rt/lib/asan/asan_poisoning.cpp
index 44f872ef6190..5f215fe0f9bb 100644
--- a/compiler-rt/lib/asan/asan_poisoning.cpp
+++ b/compiler-rt/lib/asan/asan_poisoning.cpp
@@ -173,17 +173,13 @@ int __asan_address_is_poisoned(void const volatile *addr) {
}
uptr __asan_region_is_poisoned(uptr beg, uptr size) {
- if (!size) return 0;
+ if (!size)
+ return 0;
uptr end = beg + size;
- if (SANITIZER_MYRIAD2) {
- // On Myriad, address not in DRAM range need to be treated as
- // unpoisoned.
- if (!AddrIsInMem(beg) && !AddrIsInShadow(beg)) return 0;
- if (!AddrIsInMem(end) && !AddrIsInShadow(end)) return 0;
- } else {
- if (!AddrIsInMem(beg)) return beg;
- if (!AddrIsInMem(end)) return end;
- }
+ if (!AddrIsInMem(beg))
+ return beg;
+ if (!AddrIsInMem(end))
+ return end;
CHECK_LT(beg, end);
uptr aligned_b = RoundUpTo(beg, SHADOW_GRANULARITY);
uptr aligned_e = RoundDownTo(end, SHADOW_GRANULARITY);
@@ -192,8 +188,7 @@ uptr __asan_region_is_poisoned(uptr beg, uptr size) {
// First check the first and the last application bytes,
// then check the SHADOW_GRANULARITY-aligned region by calling
// mem_is_zero on the corresponding shadow.
- if (!__asan::AddressIsPoisoned(beg) &&
- !__asan::AddressIsPoisoned(end - 1) &&
+ if (!__asan::AddressIsPoisoned(beg) && !__asan::AddressIsPoisoned(end - 1) &&
(shadow_end <= shadow_beg ||
__sanitizer::mem_is_zero((const char *)shadow_beg,
shadow_end - shadow_beg)))
@@ -364,7 +359,7 @@ void __sanitizer_annotate_contiguous_container(const void *beg_p,
&stack);
}
CHECK_LE(end - beg,
- FIRST_32_SECOND_64(1UL << 30, 1ULL << 34)); // Sanity check.
+ FIRST_32_SECOND_64(1UL << 30, 1ULL << 40)); // Sanity check.
uptr a = RoundDownTo(Min(old_mid, new_mid), granularity);
uptr c = RoundUpTo(Max(old_mid, new_mid), granularity);
diff --git a/compiler-rt/lib/asan/asan_poisoning.h b/compiler-rt/lib/asan/asan_poisoning.h
index 62dd9bd0edd3..3d536f2d3097 100644
--- a/compiler-rt/lib/asan/asan_poisoning.h
+++ b/compiler-rt/lib/asan/asan_poisoning.h
@@ -51,9 +51,6 @@ ALWAYS_INLINE void FastPoisonShadow(uptr aligned_beg, uptr aligned_size,
// probably provide higher-level interface for these operations.
// For now, just memset on Windows.
if (value || SANITIZER_WINDOWS == 1 ||
- // RTEMS doesn't have have pages, let alone a fast way to zero
- // them, so default to memset.
- SANITIZER_RTEMS == 1 ||
shadow_end - shadow_beg < common_flags()->clear_shadow_mmap_threshold) {
REAL(memset)((void*)shadow_beg, value, shadow_end - shadow_beg);
} else {
diff --git a/compiler-rt/lib/asan/asan_posix.cpp b/compiler-rt/lib/asan/asan_posix.cpp
index d7f19d846544..63ad735f8bba 100644
--- a/compiler-rt/lib/asan/asan_posix.cpp
+++ b/compiler-rt/lib/asan/asan_posix.cpp
@@ -56,7 +56,7 @@ bool PlatformUnpoisonStacks() {
if (signal_stack.ss_flags != SS_ONSTACK)
return false;
- // Since we're on the signal altnerate stack, we cannot find the DEFAULT
+ // Since we're on the signal alternate stack, we cannot find the DEFAULT
// stack bottom using a local variable.
uptr default_bottom, tls_addr, tls_size, stack_size;
GetThreadStackAndTls(/*main=*/false, &default_bottom, &stack_size, &tls_addr,
diff --git a/compiler-rt/lib/asan/asan_rtems.cpp b/compiler-rt/lib/asan/asan_rtems.cpp
deleted file mode 100644
index ea0b4ad9db68..000000000000
--- a/compiler-rt/lib/asan/asan_rtems.cpp
+++ /dev/null
@@ -1,266 +0,0 @@
-//===-- asan_rtems.cpp ----------------------------------------------------===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-//
-// This file is a part of AddressSanitizer, an address sanity checker.
-//
-// RTEMS-specific details.
-//===----------------------------------------------------------------------===//
-
-#include "sanitizer_common/sanitizer_rtems.h"
-#if SANITIZER_RTEMS
-
-#include "asan_internal.h"
-#include "asan_interceptors.h"
-#include "asan_mapping.h"
-#include "asan_poisoning.h"
-#include "asan_report.h"
-#include "asan_stack.h"
-#include "sanitizer_common/sanitizer_common.h"
-#include "sanitizer_common/sanitizer_libc.h"
-
-#include <pthread.h>
-#include <stdlib.h>
-
-namespace __asan {
-
-static void ResetShadowMemory() {
- uptr shadow_start = SHADOW_OFFSET;
- uptr shadow_end = MEM_TO_SHADOW(kMyriadMemoryEnd32);
- uptr gap_start = MEM_TO_SHADOW(shadow_start);
- uptr gap_end = MEM_TO_SHADOW(shadow_end);
-
- REAL(memset)((void *)shadow_start, 0, shadow_end - shadow_start);
- REAL(memset)((void *)gap_start, kAsanShadowGap, gap_end - gap_start);
-}
-
-void InitializeShadowMemory() {
- kHighMemEnd = 0;
- kMidMemBeg = 0;
- kMidMemEnd = 0;
-
- ResetShadowMemory();
-}
-
-void AsanApplyToGlobals(globals_op_fptr op, const void *needle) {
- UNIMPLEMENTED();
-}
-
-void FlushUnneededASanShadowMemory(uptr p, uptr size) {
- // Since asan's mapping is compacting, the shadow chunk may be
- // not page-aligned, so we only flush the page-aligned portion.
- ReleaseMemoryPagesToOS(MemToShadow(p), MemToShadow(p + size));
-}
-
-void AsanCheckDynamicRTPrereqs() {}
-void AsanCheckIncompatibleRT() {}
-void InitializeAsanInterceptors() {}
-void InitializePlatformInterceptors() {}
-void InitializePlatformExceptionHandlers() {}
-
-// RTEMS only support static linking; it sufficies to return with no
-// error.
-void *AsanDoesNotSupportStaticLinkage() { return nullptr; }
-
-void AsanOnDeadlySignal(int signo, void *siginfo, void *context) {
- UNIMPLEMENTED();
-}
-
-bool PlatformUnpoisonStacks() { return false; }
-
-void EarlyInit() {
- // Provide early initialization of shadow memory so that
- // instrumented code running before full initialzation will not
- // report spurious errors.
- ResetShadowMemory();
-}
-
-// We can use a plain thread_local variable for TSD.
-static thread_local void *per_thread;
-
-void *AsanTSDGet() { return per_thread; }
-
-void AsanTSDSet(void *tsd) { per_thread = tsd; }
-
-// There's no initialization needed, and the passed-in destructor
-// will never be called. Instead, our own thread destruction hook
-// (below) will call AsanThread::TSDDtor directly.
-void AsanTSDInit(void (*destructor)(void *tsd)) {
- DCHECK(destructor == &PlatformTSDDtor);
-}
-
-void PlatformTSDDtor(void *tsd) { UNREACHABLE(__func__); }
-
-//
-// Thread registration. We provide an API similar to the Fushia port.
-//
-
-struct AsanThread::InitOptions {
- uptr stack_bottom, stack_size, tls_bottom, tls_size;
-};
-
-// Shared setup between thread creation and startup for the initial thread.
-static AsanThread *CreateAsanThread(StackTrace *stack, u32 parent_tid,
- uptr user_id, bool detached,
- uptr stack_bottom, uptr stack_size,
- uptr tls_bottom, uptr tls_size) {
- // In lieu of AsanThread::Create.
- AsanThread *thread = (AsanThread *)MmapOrDie(sizeof(AsanThread), __func__);
- AsanThreadContext::CreateThreadContextArgs args = {thread, stack};
- asanThreadRegistry().CreateThread(user_id, detached, parent_tid, &args);
-
- // On other systems, AsanThread::Init() is called from the new
- // thread itself. But on RTEMS we already know the stack address
- // range beforehand, so we can do most of the setup right now.
- const AsanThread::InitOptions options = {stack_bottom, stack_size,
- tls_bottom, tls_size};
- thread->Init(&options);
- return thread;
-}
-
-// This gets the same arguments passed to Init by CreateAsanThread, above.
-// We're in the creator thread before the new thread is actually started, but
-// its stack and tls address range are already known.
-void AsanThread::SetThreadStackAndTls(const AsanThread::InitOptions *options) {
- DCHECK_NE(GetCurrentThread(), this);
- DCHECK_NE(GetCurrentThread(), nullptr);
- CHECK_NE(options->stack_bottom, 0);
- CHECK_NE(options->stack_size, 0);
- stack_bottom_ = options->stack_bottom;
- stack_top_ = options->stack_bottom + options->stack_size;
- tls_begin_ = options->tls_bottom;
- tls_end_ = options->tls_bottom + options->tls_size;
-}
-
-// Called by __asan::AsanInitInternal (asan_rtl.c). Unlike other ports, the
-// main thread on RTEMS does not require special treatment; its AsanThread is
-// already created by the provided hooks. This function simply looks up and
-// returns the created thread.
-AsanThread *CreateMainThread() {
- return GetThreadContextByTidLocked(0)->thread;
-}
-
-// This is called before each thread creation is attempted. So, in
-// its first call, the calling thread is the initial and sole thread.
-static void *BeforeThreadCreateHook(uptr user_id, bool detached,
- uptr stack_bottom, uptr stack_size,
- uptr tls_bottom, uptr tls_size) {
- EnsureMainThreadIDIsCorrect();
- // Strict init-order checking is thread-hostile.
- if (flags()->strict_init_order) StopInitOrderChecking();
-
- GET_STACK_TRACE_THREAD;
- u32 parent_tid = GetCurrentTidOrInvalid();
-
- return CreateAsanThread(&stack, parent_tid, user_id, detached,
- stack_bottom, stack_size, tls_bottom, tls_size);
-}
-
-// This is called after creating a new thread (in the creating thread),
-// with the pointer returned by BeforeThreadCreateHook (above).
-static void ThreadCreateHook(void *hook, bool aborted) {
- AsanThread *thread = static_cast<AsanThread *>(hook);
- if (!aborted) {
- // The thread was created successfully.
- // ThreadStartHook is already running in the new thread.
- } else {
- // The thread wasn't created after all.
- // Clean up everything we set up in BeforeThreadCreateHook.
- asanThreadRegistry().FinishThread(thread->tid());
- UnmapOrDie(thread, sizeof(AsanThread));
- }
-}
-
-// This is called (1) in the newly-created thread before it runs anything else,
-// with the pointer returned by BeforeThreadCreateHook (above). (2) before a
-// thread restart.
-static void ThreadStartHook(void *hook, uptr os_id) {
- if (!hook)
- return;
-
- AsanThread *thread = static_cast<AsanThread *>(hook);
- SetCurrentThread(thread);
-
- ThreadStatus status =
- asanThreadRegistry().GetThreadLocked(thread->tid())->status;
- DCHECK(status == ThreadStatusCreated || status == ThreadStatusRunning);
- // Determine whether we are starting or restarting the thread.
- if (status == ThreadStatusCreated) {
- // In lieu of AsanThread::ThreadStart.
- asanThreadRegistry().StartThread(thread->tid(), os_id, ThreadType::Regular,
- nullptr);
- } else {
- // In a thread restart, a thread may resume execution at an
- // arbitrary function entry point, with its stack and TLS state
- // reset. We unpoison the stack in that case.
- PoisonShadow(thread->stack_bottom(), thread->stack_size(), 0);
- }
-}
-
-// Each thread runs this just before it exits,
-// with the pointer returned by BeforeThreadCreateHook (above).
-// All per-thread destructors have already been called.
-static void ThreadExitHook(void *hook, uptr os_id) {
- AsanThread *thread = static_cast<AsanThread *>(hook);
- if (thread)
- AsanThread::TSDDtor(thread->context());
-}
-
-static void HandleExit() {
- // Disable ASan by setting it to uninitialized. Also reset the
- // shadow memory to avoid reporting errors after the run-time has
- // been desroyed.
- if (asan_inited) {
- asan_inited = false;
- ResetShadowMemory();
- }
-}
-
-bool HandleDlopenInit() {
- // Not supported on this platform.
- static_assert(!SANITIZER_SUPPORTS_INIT_FOR_DLOPEN,
- "Expected SANITIZER_SUPPORTS_INIT_FOR_DLOPEN to be false");
- return false;
-}
-} // namespace __asan
-
-// These are declared (in extern "C") by <some_path/sanitizer.h>.
-// The system runtime will call our definitions directly.
-
-extern "C" {
-void __sanitizer_early_init() {
- __asan::EarlyInit();
-}
-
-void *__sanitizer_before_thread_create_hook(uptr thread, bool detached,
- const char *name,
- void *stack_base, size_t stack_size,
- void *tls_base, size_t tls_size) {
- return __asan::BeforeThreadCreateHook(
- thread, detached,
- reinterpret_cast<uptr>(stack_base), stack_size,
- reinterpret_cast<uptr>(tls_base), tls_size);
-}
-
-void __sanitizer_thread_create_hook(void *handle, uptr thread, int status) {
- __asan::ThreadCreateHook(handle, status != 0);
-}
-
-void __sanitizer_thread_start_hook(void *handle, uptr self) {
- __asan::ThreadStartHook(handle, self);
-}
-
-void __sanitizer_thread_exit_hook(void *handle, uptr self) {
- __asan::ThreadExitHook(handle, self);
-}
-
-void __sanitizer_exit() {
- __asan::HandleExit();
-}
-} // "C"
-
-#endif // SANITIZER_RTEMS
diff --git a/compiler-rt/lib/asan/asan_rtl.cpp b/compiler-rt/lib/asan/asan_rtl.cpp
index 7b5a929963c6..bfaa3bc27027 100644
--- a/compiler-rt/lib/asan/asan_rtl.cpp
+++ b/compiler-rt/lib/asan/asan_rtl.cpp
@@ -13,6 +13,7 @@
#include "asan_activation.h"
#include "asan_allocator.h"
+#include "asan_fake_stack.h"
#include "asan_interceptors.h"
#include "asan_interface_internal.h"
#include "asan_internal.h"
@@ -23,11 +24,11 @@
#include "asan_stats.h"
#include "asan_suppressions.h"
#include "asan_thread.h"
+#include "lsan/lsan_common.h"
#include "sanitizer_common/sanitizer_atomic.h"
#include "sanitizer_common/sanitizer_flags.h"
#include "sanitizer_common/sanitizer_libc.h"
#include "sanitizer_common/sanitizer_symbolizer.h"
-#include "lsan/lsan_common.h"
#include "ubsan/ubsan_init.h"
#include "ubsan/ubsan_platform.h"
@@ -62,19 +63,9 @@ static void AsanDie() {
}
}
-static void AsanCheckFailed(const char *file, int line, const char *cond,
- u64 v1, u64 v2) {
- Report("AddressSanitizer CHECK failed: %s:%d \"%s\" (0x%zx, 0x%zx)\n", file,
- line, cond, (uptr)v1, (uptr)v2);
-
- // Print a stack trace the first time we come here. Otherwise, we probably
- // failed a CHECK during symbolization.
- static atomic_uint32_t num_calls;
- if (atomic_fetch_add(&num_calls, 1, memory_order_relaxed) == 0) {
- PRINT_CURRENT_STACK_CHECK();
- }
-
- Die();
+static void CheckUnwind() {
+ GET_STACK_TRACE(kStackTraceMax, common_flags()->fast_unwind_on_check);
+ stack.Print();
}
// -------------------------- Globals --------------------- {{{1
@@ -91,6 +82,17 @@ void ShowStatsAndAbort() {
Die();
}
+NOINLINE
+static void ReportGenericErrorWrapper(uptr addr, bool is_write, int size,
+ int exp_arg, bool fatal) {
+ if (__asan_test_only_reported_buggy_pointer) {
+ *__asan_test_only_reported_buggy_pointer = addr;
+ } else {
+ GET_CALLER_PC_BP_SP;
+ ReportGenericError(pc, bp, sp, addr, is_write, size, exp_arg, fatal);
+ }
+}
+
// --------------- LowLevelAllocateCallbac ---------- {{{1
static void OnLowLevelAllocate(uptr ptr, uptr size) {
PoisonShadow(ptr, size, kAsanInternalHeapMagic);
@@ -147,24 +149,16 @@ ASAN_REPORT_ERROR_N(load, false)
ASAN_REPORT_ERROR_N(store, true)
#define ASAN_MEMORY_ACCESS_CALLBACK_BODY(type, is_write, size, exp_arg, fatal) \
- if (SANITIZER_MYRIAD2 && !AddrIsInMem(addr) && !AddrIsInShadow(addr)) \
- return; \
- uptr sp = MEM_TO_SHADOW(addr); \
- uptr s = size <= SHADOW_GRANULARITY ? *reinterpret_cast<u8 *>(sp) \
- : *reinterpret_cast<u16 *>(sp); \
- if (UNLIKELY(s)) { \
- if (UNLIKELY(size >= SHADOW_GRANULARITY || \
- ((s8)((addr & (SHADOW_GRANULARITY - 1)) + size - 1)) >= \
- (s8)s)) { \
- if (__asan_test_only_reported_buggy_pointer) { \
- *__asan_test_only_reported_buggy_pointer = addr; \
- } else { \
- GET_CALLER_PC_BP_SP; \
- ReportGenericError(pc, bp, sp, addr, is_write, size, exp_arg, \
- fatal); \
- } \
- } \
- }
+ uptr sp = MEM_TO_SHADOW(addr); \
+ uptr s = size <= SHADOW_GRANULARITY ? *reinterpret_cast<u8 *>(sp) \
+ : *reinterpret_cast<u16 *>(sp); \
+ if (UNLIKELY(s)) { \
+ if (UNLIKELY(size >= SHADOW_GRANULARITY || \
+ ((s8)((addr & (SHADOW_GRANULARITY - 1)) + size - 1)) >= \
+ (s8)s)) { \
+ ReportGenericErrorWrapper(addr, is_write, size, exp_arg, fatal); \
+ } \
+ }
#define ASAN_MEMORY_ACCESS_CALLBACK(type, is_write, size) \
extern "C" NOINLINE INTERFACE_ATTRIBUTE \
@@ -315,7 +309,6 @@ static void asan_atexit() {
}
static void InitializeHighMemEnd() {
-#if !SANITIZER_MYRIAD2
#if !ASAN_FIXED_MAPPING
kHighMemEnd = GetMaxUserVirtualAddress();
// Increase kHighMemEnd to make sure it's properly
@@ -323,7 +316,6 @@ static void InitializeHighMemEnd() {
kHighMemEnd |= (GetMmapGranularity() << SHADOW_SCALE) - 1;
#endif // !ASAN_FIXED_MAPPING
CHECK_EQ((kHighMemBeg % GetMmapGranularity()), 0);
-#endif // !SANITIZER_MYRIAD2
}
void PrintAddressSpaceLayout() {
@@ -432,7 +424,7 @@ static void AsanInitInternal() {
// Install tool-specific callbacks in sanitizer_common.
AddDieCallback(AsanDie);
- SetCheckFailedCallback(AsanCheckFailed);
+ SetCheckUnwindCallback(CheckUnwind);
SetPrintfAndReportCallback(AppendToErrorMessageBuffer);
__sanitizer_set_report_path(common_flags()->log_path);
@@ -568,7 +560,7 @@ void UnpoisonStack(uptr bottom, uptr top, const char *type) {
type, top, bottom, top - bottom, top - bottom);
return;
}
- PoisonShadow(bottom, top - bottom, 0);
+ PoisonShadow(bottom, RoundUpTo(top - bottom, SHADOW_GRANULARITY), 0);
}
static void UnpoisonDefaultStack() {
@@ -579,9 +571,6 @@ static void UnpoisonDefaultStack() {
const uptr page_size = GetPageSizeCached();
top = curr_thread->stack_top();
bottom = ((uptr)&local_stack - page_size) & ~(page_size - 1);
- } else if (SANITIZER_RTEMS) {
- // Give up On RTEMS.
- return;
} else {
CHECK(!SANITIZER_FUCHSIA);
// If we haven't seen this thread, try asking the OS for stack bounds.
@@ -596,8 +585,12 @@ static void UnpoisonDefaultStack() {
static void UnpoisonFakeStack() {
AsanThread *curr_thread = GetCurrentThread();
- if (curr_thread && curr_thread->has_fake_stack())
- curr_thread->fake_stack()->HandleNoReturn();
+ if (!curr_thread)
+ return;
+ FakeStack *stack = curr_thread->get_fake_stack();
+ if (!stack)
+ return;
+ stack->HandleNoReturn();
}
} // namespace __asan
diff --git a/compiler-rt/lib/asan/asan_shadow_setup.cpp b/compiler-rt/lib/asan/asan_shadow_setup.cpp
index 2ead4425add8..6e6260d3413f 100644
--- a/compiler-rt/lib/asan/asan_shadow_setup.cpp
+++ b/compiler-rt/lib/asan/asan_shadow_setup.cpp
@@ -13,12 +13,11 @@
#include "sanitizer_common/sanitizer_platform.h"
-// asan_fuchsia.cpp and asan_rtems.cpp have their own
-// InitializeShadowMemory implementation.
-#if !SANITIZER_FUCHSIA && !SANITIZER_RTEMS
+// asan_fuchsia.cpp has their own InitializeShadowMemory implementation.
+#if !SANITIZER_FUCHSIA
-#include "asan_internal.h"
-#include "asan_mapping.h"
+# include "asan_internal.h"
+# include "asan_mapping.h"
namespace __asan {
@@ -123,4 +122,4 @@ void InitializeShadowMemory() {
} // namespace __asan
-#endif // !SANITIZER_FUCHSIA && !SANITIZER_RTEMS
+#endif // !SANITIZER_FUCHSIA
diff --git a/compiler-rt/lib/asan/asan_stack.cpp b/compiler-rt/lib/asan/asan_stack.cpp
index b7f4e6aeeab0..048295d6928a 100644
--- a/compiler-rt/lib/asan/asan_stack.cpp
+++ b/compiler-rt/lib/asan/asan_stack.cpp
@@ -74,7 +74,8 @@ void __sanitizer::BufferedStackTrace::UnwindImpl(
if (SANITIZER_MIPS && t &&
!IsValidFrame(bp, t->stack_top(), t->stack_bottom()))
return;
- Unwind(max_depth, pc, bp, context, 0, 0, false);
+ Unwind(max_depth, pc, bp, context, t ? t->stack_top() : 0,
+ t ? t->stack_bottom() : 0, false);
}
// ------------------ Interface -------------- {{{1
diff --git a/compiler-rt/lib/asan/asan_stack.h b/compiler-rt/lib/asan/asan_stack.h
index 47ca85a16443..b9575d2f427e 100644
--- a/compiler-rt/lib/asan/asan_stack.h
+++ b/compiler-rt/lib/asan/asan_stack.h
@@ -54,9 +54,6 @@ u32 GetMallocContextSize();
#define GET_STACK_TRACE_FATAL_HERE \
GET_STACK_TRACE(kStackTraceMax, common_flags()->fast_unwind_on_fatal)
-#define GET_STACK_TRACE_CHECK_HERE \
- GET_STACK_TRACE(kStackTraceMax, common_flags()->fast_unwind_on_check)
-
#define GET_STACK_TRACE_THREAD \
GET_STACK_TRACE(kStackTraceMax, true)
@@ -71,10 +68,4 @@ u32 GetMallocContextSize();
stack.Print(); \
}
-#define PRINT_CURRENT_STACK_CHECK() \
- { \
- GET_STACK_TRACE_CHECK_HERE; \
- stack.Print(); \
- }
-
#endif // ASAN_STACK_H
diff --git a/compiler-rt/lib/asan/asan_thread.cpp b/compiler-rt/lib/asan/asan_thread.cpp
index 19ac6c1627ca..35d4467e7b53 100644
--- a/compiler-rt/lib/asan/asan_thread.cpp
+++ b/compiler-rt/lib/asan/asan_thread.cpp
@@ -60,8 +60,8 @@ ThreadRegistry &asanThreadRegistry() {
// in TSD and can't reliably tell when no more TSD destructors will
// be called. It would be wrong to reuse AsanThreadContext for another
// thread before all TSD destructors will be called for it.
- asan_thread_registry = new(thread_registry_placeholder) ThreadRegistry(
- GetAsanThreadContext, kMaxNumberOfThreads, kMaxNumberOfThreads);
+ asan_thread_registry =
+ new (thread_registry_placeholder) ThreadRegistry(GetAsanThreadContext);
initialized = true;
}
return *asan_thread_registry;
@@ -100,18 +100,27 @@ void AsanThread::Destroy() {
int tid = this->tid();
VReport(1, "T%d exited\n", tid);
- malloc_storage().CommitBack();
- if (common_flags()->use_sigaltstack) UnsetAlternateSignalStack();
- asanThreadRegistry().FinishThread(tid);
- FlushToDeadThreadStats(&stats_);
- // We also clear the shadow on thread destruction because
- // some code may still be executing in later TSD destructors
- // and we don't want it to have any poisoned stack.
- ClearShadowForThreadStackAndTLS();
- DeleteFakeStack(tid);
+ bool was_running =
+ (asanThreadRegistry().FinishThread(tid) == ThreadStatusRunning);
+ if (was_running) {
+ if (AsanThread *thread = GetCurrentThread())
+ CHECK_EQ(this, thread);
+ malloc_storage().CommitBack();
+ if (common_flags()->use_sigaltstack)
+ UnsetAlternateSignalStack();
+ FlushToDeadThreadStats(&stats_);
+ // We also clear the shadow on thread destruction because
+ // some code may still be executing in later TSD destructors
+ // and we don't want it to have any poisoned stack.
+ ClearShadowForThreadStackAndTLS();
+ DeleteFakeStack(tid);
+ } else {
+ CHECK_NE(this, GetCurrentThread());
+ }
uptr size = RoundUpTo(sizeof(AsanThread), GetPageSizeCached());
UnmapOrDie(this, size);
- DTLS_Destroy();
+ if (was_running)
+ DTLS_Destroy();
}
void AsanThread::StartSwitchFiber(FakeStack **fake_stack_save, uptr bottom,
@@ -219,7 +228,7 @@ FakeStack *AsanThread::AsyncSignalSafeLazyInitFakeStack() {
}
void AsanThread::Init(const InitOptions *options) {
- DCHECK_NE(tid(), ThreadRegistry::kUnknownTid);
+ DCHECK_NE(tid(), kInvalidTid);
next_stack_top_ = next_stack_bottom_ = 0;
atomic_store(&stack_switching_, false, memory_order_release);
CHECK_EQ(this->stack_size(), 0U);
@@ -248,10 +257,9 @@ void AsanThread::Init(const InitOptions *options) {
&local);
}
-// Fuchsia and RTEMS don't use ThreadStart.
-// asan_fuchsia.c/asan_rtems.c define CreateMainThread and
-// SetThreadStackAndTls.
-#if !SANITIZER_FUCHSIA && !SANITIZER_RTEMS
+// Fuchsia doesn't use ThreadStart.
+// asan_fuchsia.c definies CreateMainThread and SetThreadStackAndTls.
+#if !SANITIZER_FUCHSIA
thread_return_t AsanThread::ThreadStart(tid_t os_id) {
Init();
@@ -282,7 +290,7 @@ thread_return_t AsanThread::ThreadStart(tid_t os_id) {
AsanThread *CreateMainThread() {
AsanThread *main_thread = AsanThread::Create(
- /* start_routine */ nullptr, /* arg */ nullptr, /* parent_tid */ 0,
+ /* start_routine */ nullptr, /* arg */ nullptr, /* parent_tid */ kMainTid,
/* stack */ nullptr, /* detached */ true);
SetCurrentThread(main_thread);
main_thread->ThreadStart(internal_getpid());
@@ -296,9 +304,9 @@ void AsanThread::SetThreadStackAndTls(const InitOptions *options) {
DCHECK_EQ(options, nullptr);
uptr tls_size = 0;
uptr stack_size = 0;
- GetThreadStackAndTls(tid() == 0, &stack_bottom_, &stack_size, &tls_begin_,
- &tls_size);
- stack_top_ = stack_bottom_ + stack_size;
+ GetThreadStackAndTls(tid() == kMainTid, &stack_bottom_, &stack_size,
+ &tls_begin_, &tls_size);
+ stack_top_ = RoundDownTo(stack_bottom_ + stack_size, SHADOW_GRANULARITY);
tls_end_ = tls_begin_ + tls_size;
dtls_ = DTLS_Get();
@@ -308,7 +316,7 @@ void AsanThread::SetThreadStackAndTls(const InitOptions *options) {
}
}
-#endif // !SANITIZER_FUCHSIA && !SANITIZER_RTEMS
+#endif // !SANITIZER_FUCHSIA
void AsanThread::ClearShadowForThreadStackAndTLS() {
if (stack_top_ != stack_bottom_)
@@ -330,8 +338,8 @@ bool AsanThread::GetStackFrameAccessByAddr(uptr addr,
uptr bottom = 0;
if (AddrIsInStack(addr)) {
bottom = stack_bottom();
- } else if (has_fake_stack()) {
- bottom = fake_stack()->AddrIsInFakeStack(addr);
+ } else if (FakeStack *fake_stack = get_fake_stack()) {
+ bottom = fake_stack->AddrIsInFakeStack(addr);
CHECK(bottom);
access->offset = addr - bottom;
access->frame_pc = ((uptr*)bottom)[2];
@@ -371,8 +379,8 @@ uptr AsanThread::GetStackVariableShadowStart(uptr addr) {
uptr bottom = 0;
if (AddrIsInStack(addr)) {
bottom = stack_bottom();
- } else if (has_fake_stack()) {
- bottom = fake_stack()->AddrIsInFakeStack(addr);
+ } else if (FakeStack *fake_stack = get_fake_stack()) {
+ bottom = fake_stack->AddrIsInFakeStack(addr);
if (bottom == 0) {
return 0;
}
@@ -400,19 +408,19 @@ bool AsanThread::AddrIsInStack(uptr addr) {
static bool ThreadStackContainsAddress(ThreadContextBase *tctx_base,
void *addr) {
- AsanThreadContext *tctx = static_cast<AsanThreadContext*>(tctx_base);
+ AsanThreadContext *tctx = static_cast<AsanThreadContext *>(tctx_base);
AsanThread *t = tctx->thread;
- if (!t) return false;
- if (t->AddrIsInStack((uptr)addr)) return true;
- if (t->has_fake_stack() && t->fake_stack()->AddrIsInFakeStack((uptr)addr))
+ if (!t)
+ return false;
+ if (t->AddrIsInStack((uptr)addr))
return true;
- return false;
+ FakeStack *fake_stack = t->get_fake_stack();
+ if (!fake_stack)
+ return false;
+ return fake_stack->AddrIsInFakeStack((uptr)addr);
}
AsanThread *GetCurrentThread() {
- if (SANITIZER_RTEMS && !asan_inited)
- return nullptr;
-
AsanThreadContext *context =
reinterpret_cast<AsanThreadContext *>(AsanTSDGet());
if (!context) {
@@ -422,7 +430,7 @@ AsanThread *GetCurrentThread() {
// address. We are not entirely sure that we have correct main thread
// limits, so only do this magic on Android, and only if the found thread
// is the main thread.
- AsanThreadContext *tctx = GetThreadContextByTidLocked(0);
+ AsanThreadContext *tctx = GetThreadContextByTidLocked(kMainTid);
if (tctx && ThreadStackContainsAddress(tctx, &context)) {
SetCurrentThread(tctx->thread);
return tctx->thread;
@@ -459,7 +467,7 @@ AsanThread *FindThreadByStackAddress(uptr addr) {
void EnsureMainThreadIDIsCorrect() {
AsanThreadContext *context =
reinterpret_cast<AsanThreadContext *>(AsanTSDGet());
- if (context && (context->tid == 0))
+ if (context && (context->tid == kMainTid))
context->os_id = GetTid();
}
@@ -494,8 +502,12 @@ void GetAllThreadAllocatorCachesLocked(InternalMmapVector<uptr> *caches) {}
void ForEachExtraStackRange(tid_t os_id, RangeIteratorCallback callback,
void *arg) {
__asan::AsanThread *t = __asan::GetAsanThreadByOsIDLocked(os_id);
- if (t && t->has_fake_stack())
- t->fake_stack()->ForEachFakeFrame(callback, arg);
+ if (!t)
+ return;
+ __asan::FakeStack *fake_stack = t->get_fake_stack();
+ if (!fake_stack)
+ return;
+ fake_stack->ForEachFakeFrame(callback, arg);
}
void LockThreadRegistry() {
diff --git a/compiler-rt/lib/asan/asan_thread.h b/compiler-rt/lib/asan/asan_thread.h
index c33955eee367..801a3960ec6c 100644
--- a/compiler-rt/lib/asan/asan_thread.h
+++ b/compiler-rt/lib/asan/asan_thread.h
@@ -28,9 +28,6 @@ struct DTLS;
namespace __asan {
-const u32 kInvalidTid = 0xffffff; // Must fit into 24 bits.
-const u32 kMaxNumberOfThreads = (1 << 22); // 4M
-
class AsanThread;
// These objects are created for every thread and are never deleted,
@@ -105,17 +102,18 @@ class AsanThread {
void FinishSwitchFiber(FakeStack *fake_stack_save, uptr *bottom_old,
uptr *size_old);
- bool has_fake_stack() {
- return !atomic_load(&stack_switching_, memory_order_relaxed) &&
- (reinterpret_cast<uptr>(fake_stack_) > 1);
+ FakeStack *get_fake_stack() {
+ if (atomic_load(&stack_switching_, memory_order_relaxed))
+ return nullptr;
+ if (reinterpret_cast<uptr>(fake_stack_) <= 1)
+ return nullptr;
+ return fake_stack_;
}
- FakeStack *fake_stack() {
- if (!__asan_option_detect_stack_use_after_return)
- return nullptr;
+ FakeStack *get_or_create_fake_stack() {
if (atomic_load(&stack_switching_, memory_order_relaxed))
return nullptr;
- if (!has_fake_stack())
+ if (reinterpret_cast<uptr>(fake_stack_) <= 1)
return AsyncSignalSafeLazyInitFakeStack();
return fake_stack_;
}
diff --git a/compiler-rt/lib/builtins/aarch64/fp_mode.c b/compiler-rt/lib/builtins/aarch64/fp_mode.c
index 5a413689d2c8..94c2ff3bb26d 100644
--- a/compiler-rt/lib/builtins/aarch64/fp_mode.c
+++ b/compiler-rt/lib/builtins/aarch64/fp_mode.c
@@ -23,24 +23,25 @@
#ifndef __ARM_FP
// For soft float targets, allow changing rounding mode by overriding the weak
// __aarch64_fe_default_rmode symbol.
-FE_ROUND_MODE __attribute__((weak)) __aarch64_fe_default_rmode = FE_TONEAREST;
+CRT_FE_ROUND_MODE __attribute__((weak)) __aarch64_fe_default_rmode =
+ CRT_FE_TONEAREST;
#endif
-FE_ROUND_MODE __fe_getround() {
+CRT_FE_ROUND_MODE __fe_getround() {
#ifdef __ARM_FP
uint64_t fpcr;
__asm__ __volatile__("mrs %0, fpcr" : "=r" (fpcr));
fpcr = fpcr >> AARCH64_RMODE_SHIFT & AARCH64_RMODE_MASK;
switch (fpcr) {
case AARCH64_UPWARD:
- return FE_UPWARD;
+ return CRT_FE_UPWARD;
case AARCH64_DOWNWARD:
- return FE_DOWNWARD;
+ return CRT_FE_DOWNWARD;
case AARCH64_TOWARDZERO:
- return FE_TOWARDZERO;
+ return CRT_FE_TOWARDZERO;
case AARCH64_TONEAREST:
default:
- return FE_TONEAREST;
+ return CRT_FE_TONEAREST;
}
#else
return __aarch64_fe_default_rmode;
diff --git a/compiler-rt/lib/builtins/arm/fp_mode.c b/compiler-rt/lib/builtins/arm/fp_mode.c
index 300b71935ad4..f356e0b1316b 100644
--- a/compiler-rt/lib/builtins/arm/fp_mode.c
+++ b/compiler-rt/lib/builtins/arm/fp_mode.c
@@ -18,29 +18,30 @@
ARM_DOWNWARD | ARM_TOWARDZERO)
#define ARM_RMODE_SHIFT 22
-#define ARM_INEXACT 0x1000
+#define ARM_INEXACT 0x10
#ifndef __ARM_FP
// For soft float targets, allow changing rounding mode by overriding the weak
// __arm_fe_default_rmode symbol.
-FE_ROUND_MODE __attribute__((weak)) __arm_fe_default_rmode = FE_TONEAREST;
+CRT_FE_ROUND_MODE __attribute__((weak)) __arm_fe_default_rmode =
+ CRT_FE_TONEAREST;
#endif
-FE_ROUND_MODE __fe_getround() {
+CRT_FE_ROUND_MODE __fe_getround() {
#ifdef __ARM_FP
uint32_t fpscr;
__asm__ __volatile__("vmrs %0, fpscr" : "=r" (fpscr));
fpscr = fpscr >> ARM_RMODE_SHIFT & ARM_RMODE_MASK;
switch (fpscr) {
case ARM_UPWARD:
- return FE_UPWARD;
+ return CRT_FE_UPWARD;
case ARM_DOWNWARD:
- return FE_DOWNWARD;
+ return CRT_FE_DOWNWARD;
case ARM_TOWARDZERO:
- return FE_TOWARDZERO;
+ return CRT_FE_TOWARDZERO;
case ARM_TONEAREST:
default:
- return FE_TONEAREST;
+ return CRT_FE_TONEAREST;
}
#else
return __arm_fe_default_rmode;
diff --git a/compiler-rt/lib/builtins/assembly.h b/compiler-rt/lib/builtins/assembly.h
index f6ce6a9fccff..9c015059af5a 100644
--- a/compiler-rt/lib/builtins/assembly.h
+++ b/compiler-rt/lib/builtins/assembly.h
@@ -105,9 +105,11 @@
.popsection
#if BTI_FLAG != 0
-#define BTI_C bti c
+#define BTI_C hint #34
+#define BTI_J hint #36
#else
#define BTI_C
+#define BTI_J
#endif
#if (BTI_FLAG | PAC_FLAG) != 0
@@ -204,8 +206,11 @@
#ifdef VISIBILITY_HIDDEN
#define DECLARE_SYMBOL_VISIBILITY(name) \
HIDDEN(SYMBOL_NAME(name)) SEPARATOR
+#define DECLARE_SYMBOL_VISIBILITY_UNMANGLED(name) \
+ HIDDEN(name) SEPARATOR
#else
#define DECLARE_SYMBOL_VISIBILITY(name)
+#define DECLARE_SYMBOL_VISIBILITY_UNMANGLED(name)
#endif
#define DEFINE_COMPILERRT_FUNCTION(name) \
@@ -248,7 +253,7 @@
FUNC_ALIGN \
.globl name SEPARATOR \
SYMBOL_IS_FUNC(name) SEPARATOR \
- DECLARE_SYMBOL_VISIBILITY(name) SEPARATOR \
+ DECLARE_SYMBOL_VISIBILITY_UNMANGLED(name) SEPARATOR \
CFI_START SEPARATOR \
DECLARE_FUNC_ENCODING \
name: SEPARATOR BTI_C
diff --git a/compiler-rt/lib/builtins/atomic.c b/compiler-rt/lib/builtins/atomic.c
index f48cdc10ccf7..64bf72dfa345 100644
--- a/compiler-rt/lib/builtins/atomic.c
+++ b/compiler-rt/lib/builtins/atomic.c
@@ -24,11 +24,15 @@
//===----------------------------------------------------------------------===//
#include <stdbool.h>
+#include <stddef.h>
#include <stdint.h>
-#include <string.h>
#include "assembly.h"
+// We use __builtin_mem* here to avoid dependencies on libc-provided headers.
+#define memcpy __builtin_memcpy
+#define memcmp __builtin_memcmp
+
// Clang objects if you redefine a builtin. This little hack allows us to
// define a function with the same name as an intrinsic.
#pragma redefine_extname __atomic_load_c SYMBOL_NAME(__atomic_load)
@@ -52,7 +56,7 @@ static const long SPINLOCK_MASK = SPINLOCK_COUNT - 1;
// defined. Each platform should define the Lock type, and corresponding
// lock() and unlock() functions.
////////////////////////////////////////////////////////////////////////////////
-#ifdef __FreeBSD__
+#if defined(__FreeBSD__) || defined(__DragonFly__)
#include <errno.h>
// clang-format off
#include <sys/types.h>
diff --git a/compiler-rt/lib/builtins/clear_cache.c b/compiler-rt/lib/builtins/clear_cache.c
index 5a443ddd4b03..3c12b74e8fa6 100644
--- a/compiler-rt/lib/builtins/clear_cache.c
+++ b/compiler-rt/lib/builtins/clear_cache.c
@@ -7,7 +7,9 @@
//===----------------------------------------------------------------------===//
#include "int_lib.h"
+#if defined(__linux__)
#include <assert.h>
+#endif
#include <stddef.h>
#if __APPLE__
@@ -125,6 +127,7 @@ void __clear_cache(void *start, void *end) {
for (addr = xstart & ~(icache_line_size - 1); addr < xend;
addr += icache_line_size)
__asm __volatile("ic ivau, %0" ::"r"(addr));
+ __asm __volatile("dsb ish");
}
__asm __volatile("isb sy");
#elif defined(__powerpc64__)
diff --git a/compiler-rt/lib/builtins/comparedf2.c b/compiler-rt/lib/builtins/comparedf2.c
index 58290d87de65..e1fc12c54d48 100644
--- a/compiler-rt/lib/builtins/comparedf2.c
+++ b/compiler-rt/lib/builtins/comparedf2.c
@@ -39,47 +39,9 @@
#define DOUBLE_PRECISION
#include "fp_lib.h"
-enum LE_RESULT { LE_LESS = -1, LE_EQUAL = 0, LE_GREATER = 1, LE_UNORDERED = 1 };
+#include "fp_compare_impl.inc"
-COMPILER_RT_ABI enum LE_RESULT __ledf2(fp_t a, fp_t b) {
-
- const srep_t aInt = toRep(a);
- const srep_t bInt = toRep(b);
- const rep_t aAbs = aInt & absMask;
- const rep_t bAbs = bInt & absMask;
-
- // If either a or b is NaN, they are unordered.
- if (aAbs > infRep || bAbs > infRep)
- return LE_UNORDERED;
-
- // If a and b are both zeros, they are equal.
- if ((aAbs | bAbs) == 0)
- return LE_EQUAL;
-
- // If at least one of a and b is positive, we get the same result comparing
- // a and b as signed integers as we would with a floating-point compare.
- if ((aInt & bInt) >= 0) {
- if (aInt < bInt)
- return LE_LESS;
- else if (aInt == bInt)
- return LE_EQUAL;
- else
- return LE_GREATER;
- }
-
- // Otherwise, both are negative, so we need to flip the sense of the
- // comparison to get the correct result. (This assumes a twos- or ones-
- // complement integer representation; if integers are represented in a
- // sign-magnitude representation, then this flip is incorrect).
- else {
- if (aInt > bInt)
- return LE_LESS;
- else if (aInt == bInt)
- return LE_EQUAL;
- else
- return LE_GREATER;
- }
-}
+COMPILER_RT_ABI CMP_RESULT __ledf2(fp_t a, fp_t b) { return __leXf2__(a, b); }
#if defined(__ELF__)
// Alias for libgcc compatibility
@@ -89,48 +51,12 @@ COMPILER_RT_ALIAS(__ledf2, __eqdf2)
COMPILER_RT_ALIAS(__ledf2, __ltdf2)
COMPILER_RT_ALIAS(__ledf2, __nedf2)
-enum GE_RESULT {
- GE_LESS = -1,
- GE_EQUAL = 0,
- GE_GREATER = 1,
- GE_UNORDERED = -1 // Note: different from LE_UNORDERED
-};
-
-COMPILER_RT_ABI enum GE_RESULT __gedf2(fp_t a, fp_t b) {
-
- const srep_t aInt = toRep(a);
- const srep_t bInt = toRep(b);
- const rep_t aAbs = aInt & absMask;
- const rep_t bAbs = bInt & absMask;
-
- if (aAbs > infRep || bAbs > infRep)
- return GE_UNORDERED;
- if ((aAbs | bAbs) == 0)
- return GE_EQUAL;
- if ((aInt & bInt) >= 0) {
- if (aInt < bInt)
- return GE_LESS;
- else if (aInt == bInt)
- return GE_EQUAL;
- else
- return GE_GREATER;
- } else {
- if (aInt > bInt)
- return GE_LESS;
- else if (aInt == bInt)
- return GE_EQUAL;
- else
- return GE_GREATER;
- }
-}
+COMPILER_RT_ABI CMP_RESULT __gedf2(fp_t a, fp_t b) { return __geXf2__(a, b); }
COMPILER_RT_ALIAS(__gedf2, __gtdf2)
-COMPILER_RT_ABI int
-__unorddf2(fp_t a, fp_t b) {
- const rep_t aAbs = toRep(a) & absMask;
- const rep_t bAbs = toRep(b) & absMask;
- return aAbs > infRep || bAbs > infRep;
+COMPILER_RT_ABI CMP_RESULT __unorddf2(fp_t a, fp_t b) {
+ return __unordXf2__(a, b);
}
#if defined(__ARM_EABI__)
diff --git a/compiler-rt/lib/builtins/comparesf2.c b/compiler-rt/lib/builtins/comparesf2.c
index 1cb99e468c18..b8a955448f57 100644
--- a/compiler-rt/lib/builtins/comparesf2.c
+++ b/compiler-rt/lib/builtins/comparesf2.c
@@ -39,47 +39,9 @@
#define SINGLE_PRECISION
#include "fp_lib.h"
-enum LE_RESULT { LE_LESS = -1, LE_EQUAL = 0, LE_GREATER = 1, LE_UNORDERED = 1 };
+#include "fp_compare_impl.inc"
-COMPILER_RT_ABI enum LE_RESULT __lesf2(fp_t a, fp_t b) {
-
- const srep_t aInt = toRep(a);
- const srep_t bInt = toRep(b);
- const rep_t aAbs = aInt & absMask;
- const rep_t bAbs = bInt & absMask;
-
- // If either a or b is NaN, they are unordered.
- if (aAbs > infRep || bAbs > infRep)
- return LE_UNORDERED;
-
- // If a and b are both zeros, they are equal.
- if ((aAbs | bAbs) == 0)
- return LE_EQUAL;
-
- // If at least one of a and b is positive, we get the same result comparing
- // a and b as signed integers as we would with a fp_ting-point compare.
- if ((aInt & bInt) >= 0) {
- if (aInt < bInt)
- return LE_LESS;
- else if (aInt == bInt)
- return LE_EQUAL;
- else
- return LE_GREATER;
- }
-
- // Otherwise, both are negative, so we need to flip the sense of the
- // comparison to get the correct result. (This assumes a twos- or ones-
- // complement integer representation; if integers are represented in a
- // sign-magnitude representation, then this flip is incorrect).
- else {
- if (aInt > bInt)
- return LE_LESS;
- else if (aInt == bInt)
- return LE_EQUAL;
- else
- return LE_GREATER;
- }
-}
+COMPILER_RT_ABI CMP_RESULT __lesf2(fp_t a, fp_t b) { return __leXf2__(a, b); }
#if defined(__ELF__)
// Alias for libgcc compatibility
@@ -89,48 +51,12 @@ COMPILER_RT_ALIAS(__lesf2, __eqsf2)
COMPILER_RT_ALIAS(__lesf2, __ltsf2)
COMPILER_RT_ALIAS(__lesf2, __nesf2)
-enum GE_RESULT {
- GE_LESS = -1,
- GE_EQUAL = 0,
- GE_GREATER = 1,
- GE_UNORDERED = -1 // Note: different from LE_UNORDERED
-};
-
-COMPILER_RT_ABI enum GE_RESULT __gesf2(fp_t a, fp_t b) {
-
- const srep_t aInt = toRep(a);
- const srep_t bInt = toRep(b);
- const rep_t aAbs = aInt & absMask;
- const rep_t bAbs = bInt & absMask;
-
- if (aAbs > infRep || bAbs > infRep)
- return GE_UNORDERED;
- if ((aAbs | bAbs) == 0)
- return GE_EQUAL;
- if ((aInt & bInt) >= 0) {
- if (aInt < bInt)
- return GE_LESS;
- else if (aInt == bInt)
- return GE_EQUAL;
- else
- return GE_GREATER;
- } else {
- if (aInt > bInt)
- return GE_LESS;
- else if (aInt == bInt)
- return GE_EQUAL;
- else
- return GE_GREATER;
- }
-}
+COMPILER_RT_ABI CMP_RESULT __gesf2(fp_t a, fp_t b) { return __geXf2__(a, b); }
COMPILER_RT_ALIAS(__gesf2, __gtsf2)
-COMPILER_RT_ABI int
-__unordsf2(fp_t a, fp_t b) {
- const rep_t aAbs = toRep(a) & absMask;
- const rep_t bAbs = toRep(b) & absMask;
- return aAbs > infRep || bAbs > infRep;
+COMPILER_RT_ABI CMP_RESULT __unordsf2(fp_t a, fp_t b) {
+ return __unordXf2__(a, b);
}
#if defined(__ARM_EABI__)
diff --git a/compiler-rt/lib/builtins/comparetf2.c b/compiler-rt/lib/builtins/comparetf2.c
index 2eb34cf37fbc..f1592454138c 100644
--- a/compiler-rt/lib/builtins/comparetf2.c
+++ b/compiler-rt/lib/builtins/comparetf2.c
@@ -40,45 +40,9 @@
#include "fp_lib.h"
#if defined(CRT_HAS_128BIT) && defined(CRT_LDBL_128BIT)
-enum LE_RESULT { LE_LESS = -1, LE_EQUAL = 0, LE_GREATER = 1, LE_UNORDERED = 1 };
+#include "fp_compare_impl.inc"
-COMPILER_RT_ABI enum LE_RESULT __letf2(fp_t a, fp_t b) {
-
- const srep_t aInt = toRep(a);
- const srep_t bInt = toRep(b);
- const rep_t aAbs = aInt & absMask;
- const rep_t bAbs = bInt & absMask;
-
- // If either a or b is NaN, they are unordered.
- if (aAbs > infRep || bAbs > infRep)
- return LE_UNORDERED;
-
- // If a and b are both zeros, they are equal.
- if ((aAbs | bAbs) == 0)
- return LE_EQUAL;
-
- // If at least one of a and b is positive, we get the same result comparing
- // a and b as signed integers as we would with a floating-point compare.
- if ((aInt & bInt) >= 0) {
- if (aInt < bInt)
- return LE_LESS;
- else if (aInt == bInt)
- return LE_EQUAL;
- else
- return LE_GREATER;
- } else {
- // Otherwise, both are negative, so we need to flip the sense of the
- // comparison to get the correct result. (This assumes a twos- or ones-
- // complement integer representation; if integers are represented in a
- // sign-magnitude representation, then this flip is incorrect).
- if (aInt > bInt)
- return LE_LESS;
- else if (aInt == bInt)
- return LE_EQUAL;
- else
- return LE_GREATER;
- }
-}
+COMPILER_RT_ABI CMP_RESULT __letf2(fp_t a, fp_t b) { return __leXf2__(a, b); }
#if defined(__ELF__)
// Alias for libgcc compatibility
@@ -88,47 +52,12 @@ COMPILER_RT_ALIAS(__letf2, __eqtf2)
COMPILER_RT_ALIAS(__letf2, __lttf2)
COMPILER_RT_ALIAS(__letf2, __netf2)
-enum GE_RESULT {
- GE_LESS = -1,
- GE_EQUAL = 0,
- GE_GREATER = 1,
- GE_UNORDERED = -1 // Note: different from LE_UNORDERED
-};
-
-COMPILER_RT_ABI enum GE_RESULT __getf2(fp_t a, fp_t b) {
-
- const srep_t aInt = toRep(a);
- const srep_t bInt = toRep(b);
- const rep_t aAbs = aInt & absMask;
- const rep_t bAbs = bInt & absMask;
-
- if (aAbs > infRep || bAbs > infRep)
- return GE_UNORDERED;
- if ((aAbs | bAbs) == 0)
- return GE_EQUAL;
- if ((aInt & bInt) >= 0) {
- if (aInt < bInt)
- return GE_LESS;
- else if (aInt == bInt)
- return GE_EQUAL;
- else
- return GE_GREATER;
- } else {
- if (aInt > bInt)
- return GE_LESS;
- else if (aInt == bInt)
- return GE_EQUAL;
- else
- return GE_GREATER;
- }
-}
+COMPILER_RT_ABI CMP_RESULT __getf2(fp_t a, fp_t b) { return __geXf2__(a, b); }
COMPILER_RT_ALIAS(__getf2, __gttf2)
-COMPILER_RT_ABI int __unordtf2(fp_t a, fp_t b) {
- const rep_t aAbs = toRep(a) & absMask;
- const rep_t bAbs = toRep(b) & absMask;
- return aAbs > infRep || bAbs > infRep;
+COMPILER_RT_ABI CMP_RESULT __unordtf2(fp_t a, fp_t b) {
+ return __unordXf2__(a, b);
}
#endif
diff --git a/compiler-rt/lib/builtins/cpu_model.c b/compiler-rt/lib/builtins/cpu_model.c
index 51bedd98c3d3..6ee42911b204 100644
--- a/compiler-rt/lib/builtins/cpu_model.c
+++ b/compiler-rt/lib/builtins/cpu_model.c
@@ -99,6 +99,7 @@ enum ProcessorSubtypes {
INTEL_COREI7_SAPPHIRERAPIDS,
INTEL_COREI7_ALDERLAKE,
AMDFAM19H_ZNVER3,
+ INTEL_COREI7_ROCKETLAKE,
CPU_SUBTYPE_MAX
};
@@ -384,6 +385,13 @@ getIntelProcessorTypeAndSubtype(unsigned Family, unsigned Model,
*Subtype = INTEL_COREI7_SKYLAKE;
break;
+ // Rocketlake:
+ case 0xa7:
+ CPU = "rocketlake";
+ *Type = INTEL_COREI7;
+ *Subtype = INTEL_COREI7_ROCKETLAKE;
+ break;
+
// Skylake Xeon:
case 0x55:
*Type = INTEL_COREI7;
diff --git a/compiler-rt/lib/builtins/divdc3.c b/compiler-rt/lib/builtins/divdc3.c
index c2cf62874603..5581182f3bfd 100644
--- a/compiler-rt/lib/builtins/divdc3.c
+++ b/compiler-rt/lib/builtins/divdc3.c
@@ -20,17 +20,19 @@
COMPILER_RT_ABI Dcomplex __divdc3(double __a, double __b, double __c,
double __d) {
int __ilogbw = 0;
- double __logbw = __compiler_rt_logb(crt_fmax(crt_fabs(__c), crt_fabs(__d)));
+ double __logbw = __compiler_rt_logb(__compiler_rt_fmax(crt_fabs(__c),
+ crt_fabs(__d)));
if (crt_isfinite(__logbw)) {
__ilogbw = (int)__logbw;
- __c = crt_scalbn(__c, -__ilogbw);
- __d = crt_scalbn(__d, -__ilogbw);
+ __c = __compiler_rt_scalbn(__c, -__ilogbw);
+ __d = __compiler_rt_scalbn(__d, -__ilogbw);
}
double __denom = __c * __c + __d * __d;
Dcomplex z;
- COMPLEX_REAL(z) = crt_scalbn((__a * __c + __b * __d) / __denom, -__ilogbw);
+ COMPLEX_REAL(z) =
+ __compiler_rt_scalbn((__a * __c + __b * __d) / __denom, -__ilogbw);
COMPLEX_IMAGINARY(z) =
- crt_scalbn((__b * __c - __a * __d) / __denom, -__ilogbw);
+ __compiler_rt_scalbn((__b * __c - __a * __d) / __denom, -__ilogbw);
if (crt_isnan(COMPLEX_REAL(z)) && crt_isnan(COMPLEX_IMAGINARY(z))) {
if ((__denom == 0.0) && (!crt_isnan(__a) || !crt_isnan(__b))) {
COMPLEX_REAL(z) = crt_copysign(CRT_INFINITY, __c) * __a;
diff --git a/compiler-rt/lib/builtins/divsc3.c b/compiler-rt/lib/builtins/divsc3.c
index 1a63634dde21..aa4fd8e79e0c 100644
--- a/compiler-rt/lib/builtins/divsc3.c
+++ b/compiler-rt/lib/builtins/divsc3.c
@@ -20,17 +20,18 @@
COMPILER_RT_ABI Fcomplex __divsc3(float __a, float __b, float __c, float __d) {
int __ilogbw = 0;
float __logbw =
- __compiler_rt_logbf(crt_fmaxf(crt_fabsf(__c), crt_fabsf(__d)));
+ __compiler_rt_logbf(__compiler_rt_fmaxf(crt_fabsf(__c), crt_fabsf(__d)));
if (crt_isfinite(__logbw)) {
__ilogbw = (int)__logbw;
- __c = crt_scalbnf(__c, -__ilogbw);
- __d = crt_scalbnf(__d, -__ilogbw);
+ __c = __compiler_rt_scalbnf(__c, -__ilogbw);
+ __d = __compiler_rt_scalbnf(__d, -__ilogbw);
}
float __denom = __c * __c + __d * __d;
Fcomplex z;
- COMPLEX_REAL(z) = crt_scalbnf((__a * __c + __b * __d) / __denom, -__ilogbw);
+ COMPLEX_REAL(z) =
+ __compiler_rt_scalbnf((__a * __c + __b * __d) / __denom, -__ilogbw);
COMPLEX_IMAGINARY(z) =
- crt_scalbnf((__b * __c - __a * __d) / __denom, -__ilogbw);
+ __compiler_rt_scalbnf((__b * __c - __a * __d) / __denom, -__ilogbw);
if (crt_isnan(COMPLEX_REAL(z)) && crt_isnan(COMPLEX_IMAGINARY(z))) {
if ((__denom == 0) && (!crt_isnan(__a) || !crt_isnan(__b))) {
COMPLEX_REAL(z) = crt_copysignf(CRT_INFINITY, __c) * __a;
diff --git a/compiler-rt/lib/builtins/divtc3.c b/compiler-rt/lib/builtins/divtc3.c
index 37c71400e370..0e4799295f32 100644
--- a/compiler-rt/lib/builtins/divtc3.c
+++ b/compiler-rt/lib/builtins/divtc3.c
@@ -21,17 +21,18 @@ COMPILER_RT_ABI Lcomplex __divtc3(long double __a, long double __b,
long double __c, long double __d) {
int __ilogbw = 0;
long double __logbw =
- __compiler_rt_logbl(crt_fmaxl(crt_fabsl(__c), crt_fabsl(__d)));
+ __compiler_rt_logbl(__compiler_rt_fmaxl(crt_fabsl(__c), crt_fabsl(__d)));
if (crt_isfinite(__logbw)) {
__ilogbw = (int)__logbw;
- __c = crt_scalbnl(__c, -__ilogbw);
- __d = crt_scalbnl(__d, -__ilogbw);
+ __c = __compiler_rt_scalbnl(__c, -__ilogbw);
+ __d = __compiler_rt_scalbnl(__d, -__ilogbw);
}
long double __denom = __c * __c + __d * __d;
Lcomplex z;
- COMPLEX_REAL(z) = crt_scalbnl((__a * __c + __b * __d) / __denom, -__ilogbw);
+ COMPLEX_REAL(z) =
+ __compiler_rt_scalbnl((__a * __c + __b * __d) / __denom, -__ilogbw);
COMPLEX_IMAGINARY(z) =
- crt_scalbnl((__b * __c - __a * __d) / __denom, -__ilogbw);
+ __compiler_rt_scalbnl((__b * __c - __a * __d) / __denom, -__ilogbw);
if (crt_isnan(COMPLEX_REAL(z)) && crt_isnan(COMPLEX_IMAGINARY(z))) {
if ((__denom == 0.0) && (!crt_isnan(__a) || !crt_isnan(__b))) {
COMPLEX_REAL(z) = crt_copysignl(CRT_INFINITY, __c) * __a;
diff --git a/compiler-rt/lib/builtins/fixdfdi.c b/compiler-rt/lib/builtins/fixdfdi.c
index 2ed5261c51b7..511568fc12fd 100644
--- a/compiler-rt/lib/builtins/fixdfdi.c
+++ b/compiler-rt/lib/builtins/fixdfdi.c
@@ -9,7 +9,7 @@
#define DOUBLE_PRECISION
#include "fp_lib.h"
-#ifndef __SOFT_FP__
+#ifndef __SOFTFP__
// Support for systems that have hardware floating-point; can set the invalid
// flag as a side-effect of computation.
diff --git a/compiler-rt/lib/builtins/fixsfdi.c b/compiler-rt/lib/builtins/fixsfdi.c
index 615e93d4f8d9..0cf71c30311a 100644
--- a/compiler-rt/lib/builtins/fixsfdi.c
+++ b/compiler-rt/lib/builtins/fixsfdi.c
@@ -9,7 +9,7 @@
#define SINGLE_PRECISION
#include "fp_lib.h"
-#ifndef __SOFT_FP__
+#ifndef __SOFTFP__
// Support for systems that have hardware floating-point; can set the invalid
// flag as a side-effect of computation.
diff --git a/compiler-rt/lib/builtins/fixunsdfdi.c b/compiler-rt/lib/builtins/fixunsdfdi.c
index d2ba73825cfe..ccb256d2c7e0 100644
--- a/compiler-rt/lib/builtins/fixunsdfdi.c
+++ b/compiler-rt/lib/builtins/fixunsdfdi.c
@@ -9,7 +9,7 @@
#define DOUBLE_PRECISION
#include "fp_lib.h"
-#ifndef __SOFT_FP__
+#ifndef __SOFTFP__
// Support for systems that have hardware floating-point; can set the invalid
// flag as a side-effect of computation.
diff --git a/compiler-rt/lib/builtins/fixunssfdi.c b/compiler-rt/lib/builtins/fixunssfdi.c
index 2b90dafad8c2..647185fbabf1 100644
--- a/compiler-rt/lib/builtins/fixunssfdi.c
+++ b/compiler-rt/lib/builtins/fixunssfdi.c
@@ -9,7 +9,7 @@
#define SINGLE_PRECISION
#include "fp_lib.h"
-#ifndef __SOFT_FP__
+#ifndef __SOFTFP__
// Support for systems that have hardware floating-point; can set the invalid
// flag as a side-effect of computation.
diff --git a/compiler-rt/lib/builtins/floatdidf.c b/compiler-rt/lib/builtins/floatdidf.c
index b2d8f2b44b6d..7ecb30bca71e 100644
--- a/compiler-rt/lib/builtins/floatdidf.c
+++ b/compiler-rt/lib/builtins/floatdidf.c
@@ -20,7 +20,7 @@
// seee eeee eeee mmmm mmmm mmmm mmmm mmmm | mmmm mmmm mmmm mmmm mmmm mmmm mmmm
// mmmm
-#ifndef __SOFT_FP__
+#ifndef __SOFTFP__
// Support for systems that have hardware floating-point; we'll set the inexact
// flag as a side-effect of this computation.
diff --git a/compiler-rt/lib/builtins/floatundidf.c b/compiler-rt/lib/builtins/floatundidf.c
index 4c445b118080..e5e533042a34 100644
--- a/compiler-rt/lib/builtins/floatundidf.c
+++ b/compiler-rt/lib/builtins/floatundidf.c
@@ -20,7 +20,7 @@
#include "int_lib.h"
-#ifndef __SOFT_FP__
+#ifndef __SOFTFP__
// Support for systems that have hardware floating-point; we'll set the inexact
// flag as a side-effect of this computation.
diff --git a/compiler-rt/lib/builtins/fp_add_impl.inc b/compiler-rt/lib/builtins/fp_add_impl.inc
index ab6321349032..7133358df9bd 100644
--- a/compiler-rt/lib/builtins/fp_add_impl.inc
+++ b/compiler-rt/lib/builtins/fp_add_impl.inc
@@ -151,19 +151,19 @@ static __inline fp_t __addXf3__(fp_t a, fp_t b) {
// Perform the final rounding. The result may overflow to infinity, but
// that is the correct result in that case.
switch (__fe_getround()) {
- case FE_TONEAREST:
+ case CRT_FE_TONEAREST:
if (roundGuardSticky > 0x4)
result++;
if (roundGuardSticky == 0x4)
result += result & 1;
break;
- case FE_DOWNWARD:
+ case CRT_FE_DOWNWARD:
if (resultSign && roundGuardSticky) result++;
break;
- case FE_UPWARD:
+ case CRT_FE_UPWARD:
if (!resultSign && roundGuardSticky) result++;
break;
- case FE_TOWARDZERO:
+ case CRT_FE_TOWARDZERO:
break;
}
if (roundGuardSticky)
diff --git a/compiler-rt/lib/builtins/fp_compare_impl.inc b/compiler-rt/lib/builtins/fp_compare_impl.inc
new file mode 100644
index 000000000000..40fc7df4c679
--- /dev/null
+++ b/compiler-rt/lib/builtins/fp_compare_impl.inc
@@ -0,0 +1,116 @@
+//===-- lib/fp_compare_impl.inc - Floating-point comparison -------*- C -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "fp_lib.h"
+
+// GCC uses long (at least for x86_64) as the return type of the comparison
+// functions. We need to ensure that the return value is sign-extended in the
+// same way as GCC expects (since otherwise GCC-generated __builtin_isinf
+// returns true for finite 128-bit floating-point numbers).
+#ifdef __aarch64__
+// AArch64 GCC overrides libgcc_cmp_return to use int instead of long.
+typedef int CMP_RESULT;
+#elif __SIZEOF_POINTER__ == 8 && __SIZEOF_LONG__ == 4
+// LLP64 ABIs use long long instead of long.
+typedef long long CMP_RESULT;
+#else
+// Otherwise the comparison functions return long.
+typedef long CMP_RESULT;
+#endif
+
+#if !defined(__clang__) && defined(__GNUC__)
+// GCC uses a special __libgcc_cmp_return__ mode to define the return type, so
+// check that we are ABI-compatible when compiling the builtins with GCC.
+typedef int GCC_CMP_RESULT __attribute__((__mode__(__libgcc_cmp_return__)));
+_Static_assert(sizeof(GCC_CMP_RESULT) == sizeof(CMP_RESULT),
+ "SOFTFP ABI not compatible with GCC");
+#endif
+
+enum {
+ LE_LESS = -1,
+ LE_EQUAL = 0,
+ LE_GREATER = 1,
+ LE_UNORDERED = 1,
+};
+
+static inline CMP_RESULT __leXf2__(fp_t a, fp_t b) {
+ const srep_t aInt = toRep(a);
+ const srep_t bInt = toRep(b);
+ const rep_t aAbs = aInt & absMask;
+ const rep_t bAbs = bInt & absMask;
+
+ // If either a or b is NaN, they are unordered.
+ if (aAbs > infRep || bAbs > infRep)
+ return LE_UNORDERED;
+
+ // If a and b are both zeros, they are equal.
+ if ((aAbs | bAbs) == 0)
+ return LE_EQUAL;
+
+ // If at least one of a and b is positive, we get the same result comparing
+ // a and b as signed integers as we would with a floating-point compare.
+ if ((aInt & bInt) >= 0) {
+ if (aInt < bInt)
+ return LE_LESS;
+ else if (aInt == bInt)
+ return LE_EQUAL;
+ else
+ return LE_GREATER;
+ } else {
+ // Otherwise, both are negative, so we need to flip the sense of the
+ // comparison to get the correct result. (This assumes a twos- or ones-
+ // complement integer representation; if integers are represented in a
+ // sign-magnitude representation, then this flip is incorrect).
+ if (aInt > bInt)
+ return LE_LESS;
+ else if (aInt == bInt)
+ return LE_EQUAL;
+ else
+ return LE_GREATER;
+ }
+}
+
+enum {
+ GE_LESS = -1,
+ GE_EQUAL = 0,
+ GE_GREATER = 1,
+ GE_UNORDERED = -1 // Note: different from LE_UNORDERED
+};
+
+static inline CMP_RESULT __geXf2__(fp_t a, fp_t b) {
+ const srep_t aInt = toRep(a);
+ const srep_t bInt = toRep(b);
+ const rep_t aAbs = aInt & absMask;
+ const rep_t bAbs = bInt & absMask;
+
+ if (aAbs > infRep || bAbs > infRep)
+ return GE_UNORDERED;
+ if ((aAbs | bAbs) == 0)
+ return GE_EQUAL;
+ if ((aInt & bInt) >= 0) {
+ if (aInt < bInt)
+ return GE_LESS;
+ else if (aInt == bInt)
+ return GE_EQUAL;
+ else
+ return GE_GREATER;
+ } else {
+ if (aInt > bInt)
+ return GE_LESS;
+ else if (aInt == bInt)
+ return GE_EQUAL;
+ else
+ return GE_GREATER;
+ }
+}
+
+static inline CMP_RESULT __unordXf2__(fp_t a, fp_t b) {
+ const rep_t aAbs = toRep(a) & absMask;
+ const rep_t bAbs = toRep(b) & absMask;
+ return aAbs > infRep || bAbs > infRep;
+}
diff --git a/compiler-rt/lib/builtins/fp_lib.h b/compiler-rt/lib/builtins/fp_lib.h
index f22feafa4e69..3fb13a033a14 100644
--- a/compiler-rt/lib/builtins/fp_lib.h
+++ b/compiler-rt/lib/builtins/fp_lib.h
@@ -299,28 +299,119 @@ static __inline fp_t __compiler_rt_logbX(fp_t x) {
return exp - exponentBias - shift; // Unbias exponent
}
}
+
+// Avoid using scalbn from libm. Unlike libc/libm scalbn, this function never
+// sets errno on underflow/overflow.
+static __inline fp_t __compiler_rt_scalbnX(fp_t x, int y) {
+ const rep_t rep = toRep(x);
+ int exp = (rep & exponentMask) >> significandBits;
+
+ if (x == 0.0 || exp == maxExponent)
+ return x; // +/- 0.0, NaN, or inf: return x
+
+ // Normalize subnormal input.
+ rep_t sig = rep & significandMask;
+ if (exp == 0) {
+ exp += normalize(&sig);
+ sig &= ~implicitBit; // clear the implicit bit again
+ }
+
+ if (__builtin_sadd_overflow(exp, y, &exp)) {
+ // Saturate the exponent, which will guarantee an underflow/overflow below.
+ exp = (y >= 0) ? INT_MAX : INT_MIN;
+ }
+
+ // Return this value: [+/-] 1.sig * 2 ** (exp - exponentBias).
+ const rep_t sign = rep & signBit;
+ if (exp >= maxExponent) {
+ // Overflow, which could produce infinity or the largest-magnitude value,
+ // depending on the rounding mode.
+ return fromRep(sign | ((rep_t)(maxExponent - 1) << significandBits)) * 2.0f;
+ } else if (exp <= 0) {
+ // Subnormal or underflow. Use floating-point multiply to handle truncation
+ // correctly.
+ fp_t tmp = fromRep(sign | (REP_C(1) << significandBits) | sig);
+ exp += exponentBias - 1;
+ if (exp < 1)
+ exp = 1;
+ tmp *= fromRep((rep_t)exp << significandBits);
+ return tmp;
+ } else
+ return fromRep(sign | ((rep_t)exp << significandBits) | sig);
+}
+
+// Avoid using fmax from libm.
+static __inline fp_t __compiler_rt_fmaxX(fp_t x, fp_t y) {
+ // If either argument is NaN, return the other argument. If both are NaN,
+ // arbitrarily return the second one. Otherwise, if both arguments are +/-0,
+ // arbitrarily return the first one.
+ return (crt_isnan(x) || x < y) ? y : x;
+}
+
#endif
#if defined(SINGLE_PRECISION)
+
static __inline fp_t __compiler_rt_logbf(fp_t x) {
return __compiler_rt_logbX(x);
}
+static __inline fp_t __compiler_rt_scalbnf(fp_t x, int y) {
+ return __compiler_rt_scalbnX(x, y);
+}
+static __inline fp_t __compiler_rt_fmaxf(fp_t x, fp_t y) {
+#if defined(__aarch64__)
+ // Use __builtin_fmaxf which turns into an fmaxnm instruction on AArch64.
+ return __builtin_fmaxf(x, y);
+#else
+ // __builtin_fmaxf frequently turns into a libm call, so inline the function.
+ return __compiler_rt_fmaxX(x, y);
+#endif
+}
+
#elif defined(DOUBLE_PRECISION)
+
static __inline fp_t __compiler_rt_logb(fp_t x) {
return __compiler_rt_logbX(x);
}
+static __inline fp_t __compiler_rt_scalbn(fp_t x, int y) {
+ return __compiler_rt_scalbnX(x, y);
+}
+static __inline fp_t __compiler_rt_fmax(fp_t x, fp_t y) {
+#if defined(__aarch64__)
+ // Use __builtin_fmax which turns into an fmaxnm instruction on AArch64.
+ return __builtin_fmax(x, y);
+#else
+ // __builtin_fmax frequently turns into a libm call, so inline the function.
+ return __compiler_rt_fmaxX(x, y);
+#endif
+}
+
#elif defined(QUAD_PRECISION)
+
#if defined(CRT_LDBL_128BIT)
static __inline fp_t __compiler_rt_logbl(fp_t x) {
return __compiler_rt_logbX(x);
}
+static __inline fp_t __compiler_rt_scalbnl(fp_t x, int y) {
+ return __compiler_rt_scalbnX(x, y);
+}
+static __inline fp_t __compiler_rt_fmaxl(fp_t x, fp_t y) {
+ return __compiler_rt_fmaxX(x, y);
+}
#else
// The generic implementation only works for ieee754 floating point. For other
// floating point types, continue to rely on the libm implementation for now.
static __inline long double __compiler_rt_logbl(long double x) {
return crt_logbl(x);
}
-#endif
-#endif
+static __inline long double __compiler_rt_scalbnl(long double x, int y) {
+ return crt_scalbnl(x, y);
+}
+static __inline long double __compiler_rt_fmaxl(long double x, long double y) {
+ return crt_fmaxl(x, y);
+}
+#endif // CRT_LDBL_128BIT
+
+#endif // *_PRECISION
#endif // FP_LIB_HEADER
diff --git a/compiler-rt/lib/builtins/fp_mode.c b/compiler-rt/lib/builtins/fp_mode.c
index c1b6c1f6b8a3..b84df8abb27d 100644
--- a/compiler-rt/lib/builtins/fp_mode.c
+++ b/compiler-rt/lib/builtins/fp_mode.c
@@ -15,9 +15,7 @@
#include "fp_mode.h"
// IEEE-754 default rounding (to nearest, ties to even).
-FE_ROUND_MODE __fe_getround() {
- return FE_TONEAREST;
-}
+CRT_FE_ROUND_MODE __fe_getround() { return CRT_FE_TONEAREST; }
int __fe_raise_inexact() {
return 0;
diff --git a/compiler-rt/lib/builtins/fp_mode.h b/compiler-rt/lib/builtins/fp_mode.h
index 4ba682c384f2..26a3f4d10942 100644
--- a/compiler-rt/lib/builtins/fp_mode.h
+++ b/compiler-rt/lib/builtins/fp_mode.h
@@ -17,13 +17,13 @@
#define FP_MODE
typedef enum {
- FE_TONEAREST,
- FE_DOWNWARD,
- FE_UPWARD,
- FE_TOWARDZERO
-} FE_ROUND_MODE;
+ CRT_FE_TONEAREST,
+ CRT_FE_DOWNWARD,
+ CRT_FE_UPWARD,
+ CRT_FE_TOWARDZERO
+} CRT_FE_ROUND_MODE;
-FE_ROUND_MODE __fe_getround(void);
+CRT_FE_ROUND_MODE __fe_getround(void);
int __fe_raise_inexact(void);
#endif // FP_MODE_H
diff --git a/compiler-rt/lib/builtins/gcc_personality_v0.c b/compiler-rt/lib/builtins/gcc_personality_v0.c
index d12ee03c49fe..afb9e2e113de 100644
--- a/compiler-rt/lib/builtins/gcc_personality_v0.c
+++ b/compiler-rt/lib/builtins/gcc_personality_v0.c
@@ -7,6 +7,7 @@
//===----------------------------------------------------------------------===//
#include "int_lib.h"
+#include <stddef.h>
#include <unwind.h>
#if defined(__arm__) && !defined(__ARM_DWARF_EH__) && \
@@ -20,6 +21,15 @@
#include "unwind-ehabi-helpers.h"
#endif
+#if defined(__SEH__) && !defined(__USING_SJLJ_EXCEPTIONS__)
+#include <windows.h>
+#include <winnt.h>
+
+EXCEPTION_DISPOSITION _GCC_specific_handler(PEXCEPTION_RECORD, void *, PCONTEXT,
+ PDISPATCHER_CONTEXT,
+ _Unwind_Personality_Fn);
+#endif
+
// Pointer encodings documented at:
// http://refspecs.freestandards.org/LSB_1.3.0/gLSB/gLSB/ehframehdr.html
@@ -43,9 +53,9 @@
#define DW_EH_PE_indirect 0x80 // gcc extension
// read a uleb128 encoded value and advance pointer
-static uintptr_t readULEB128(const uint8_t **data) {
- uintptr_t result = 0;
- uintptr_t shift = 0;
+static size_t readULEB128(const uint8_t **data) {
+ size_t result = 0;
+ size_t shift = 0;
unsigned char byte;
const uint8_t *p = *data;
do {
@@ -168,6 +178,10 @@ COMPILER_RT_ABI _Unwind_Reason_Code __gcc_personality_sj0(
COMPILER_RT_ABI _Unwind_Reason_Code __gcc_personality_v0(
_Unwind_State state, struct _Unwind_Exception *exceptionObject,
struct _Unwind_Context *context)
+#elif defined(__SEH__)
+static _Unwind_Reason_Code __gcc_personality_imp(
+ int version, _Unwind_Action actions, uint64_t exceptionClass,
+ struct _Unwind_Exception *exceptionObject, struct _Unwind_Context *context)
#else
COMPILER_RT_ABI _Unwind_Reason_Code __gcc_personality_v0(
int version, _Unwind_Action actions, uint64_t exceptionClass,
@@ -211,8 +225,8 @@ COMPILER_RT_ABI _Unwind_Reason_Code __gcc_personality_v0(
const uint8_t *p = callSiteTableStart;
while (p < callSiteTableEnd) {
uintptr_t start = readEncodedPointer(&p, callSiteEncoding);
- uintptr_t length = readEncodedPointer(&p, callSiteEncoding);
- uintptr_t landingPad = readEncodedPointer(&p, callSiteEncoding);
+ size_t length = readEncodedPointer(&p, callSiteEncoding);
+ size_t landingPad = readEncodedPointer(&p, callSiteEncoding);
readULEB128(&p); // action value not used for C code
if (landingPad == 0)
continue; // no landing pad for this entry
@@ -232,3 +246,12 @@ COMPILER_RT_ABI _Unwind_Reason_Code __gcc_personality_v0(
// No landing pad found, continue unwinding.
return continueUnwind(exceptionObject, context);
}
+
+#if defined(__SEH__) && !defined(__USING_SJLJ_EXCEPTIONS__)
+COMPILER_RT_ABI EXCEPTION_DISPOSITION
+__gcc_personality_seh0(PEXCEPTION_RECORD ms_exc, void *this_frame,
+ PCONTEXT ms_orig_context, PDISPATCHER_CONTEXT ms_disp) {
+ return _GCC_specific_handler(ms_exc, this_frame, ms_orig_context, ms_disp,
+ __gcc_personality_imp);
+}
+#endif
diff --git a/compiler-rt/lib/builtins/i386/fp_mode.c b/compiler-rt/lib/builtins/i386/fp_mode.c
index 62ab771222c0..80e272e4c9a3 100644
--- a/compiler-rt/lib/builtins/i386/fp_mode.c
+++ b/compiler-rt/lib/builtins/i386/fp_mode.c
@@ -14,22 +14,22 @@
#define X87_TOWARDZERO 0x0c00
#define X87_RMODE_MASK (X87_TONEAREST | X87_UPWARD | X87_DOWNWARD | X87_TOWARDZERO)
-FE_ROUND_MODE __fe_getround() {
+CRT_FE_ROUND_MODE __fe_getround() {
// Assume that the rounding mode state for the fpu agrees with the SSE unit.
unsigned short cw;
__asm__ __volatile__ ("fnstcw %0" : "=m" (cw));
switch (cw & X87_RMODE_MASK) {
case X87_TONEAREST:
- return FE_TONEAREST;
+ return CRT_FE_TONEAREST;
case X87_DOWNWARD:
- return FE_DOWNWARD;
+ return CRT_FE_DOWNWARD;
case X87_UPWARD:
- return FE_UPWARD;
+ return CRT_FE_UPWARD;
case X87_TOWARDZERO:
- return FE_TOWARDZERO;
+ return CRT_FE_TOWARDZERO;
}
- return FE_TONEAREST;
+ return CRT_FE_TONEAREST;
}
int __fe_raise_inexact() {
diff --git a/compiler-rt/lib/builtins/int_lib.h b/compiler-rt/lib/builtins/int_lib.h
index 991c4a99ea6e..fb791ebc42eb 100644
--- a/compiler-rt/lib/builtins/int_lib.h
+++ b/compiler-rt/lib/builtins/int_lib.h
@@ -70,12 +70,21 @@
#error Unsupported target
#endif
-#if defined(__NetBSD__) && (defined(_KERNEL) || defined(_STANDALONE))
+#if (defined(__FreeBSD__) || defined(__NetBSD__)) && \
+ (defined(_KERNEL) || defined(_STANDALONE))
//
// Kernel and boot environment can't use normal headers,
// so use the equivalent system headers.
+// NB: FreeBSD (and OpenBSD) deprecate machine/limits.h in
+// favour of sys/limits.h, so prefer the former, but fall
+// back on the latter if not available since NetBSD only has
+// the latter.
//
+#if defined(__has_include) && __has_include(<sys/limits.h>)
+#include <sys/limits.h>
+#else
#include <machine/limits.h>
+#endif
#include <sys/stdint.h>
#include <sys/types.h>
#else
@@ -144,6 +153,19 @@ int __inline __builtin_clzll(uint64_t value) {
#endif
#define __builtin_clzl __builtin_clzll
+
+bool __inline __builtin_sadd_overflow(int x, int y, int *result) {
+ if ((x < 0) != (y < 0)) {
+ *result = x + y;
+ return false;
+ }
+ int tmp = (unsigned int)x + (unsigned int)y;
+ if ((tmp < 0) != (x < 0))
+ return true;
+ *result = tmp;
+ return false;
+}
+
#endif // defined(_MSC_VER) && !defined(__clang__)
#endif // INT_LIB_H
diff --git a/compiler-rt/lib/builtins/int_math.h b/compiler-rt/lib/builtins/int_math.h
index 58d8990f31b1..48b9580f5961 100644
--- a/compiler-rt/lib/builtins/int_math.h
+++ b/compiler-rt/lib/builtins/int_math.h
@@ -78,12 +78,8 @@
#endif
#if defined(_MSC_VER) && !defined(__clang__)
-#define crt_fmax(x, y) __max((x), (y))
-#define crt_fmaxf(x, y) __max((x), (y))
#define crt_fmaxl(x, y) __max((x), (y))
#else
-#define crt_fmax(x, y) __builtin_fmax((x), (y))
-#define crt_fmaxf(x, y) __builtin_fmaxf((x), (y))
#define crt_fmaxl(x, y) __builtin_fmaxl((x), (y))
#endif
@@ -94,12 +90,8 @@
#endif
#if defined(_MSC_VER) && !defined(__clang__)
-#define crt_scalbn(x, y) scalbn((x), (y))
-#define crt_scalbnf(x, y) scalbnf((x), (y))
#define crt_scalbnl(x, y) scalbnl((x), (y))
#else
-#define crt_scalbn(x, y) __builtin_scalbn((x), (y))
-#define crt_scalbnf(x, y) __builtin_scalbnf((x), (y))
#define crt_scalbnl(x, y) __builtin_scalbnl((x), (y))
#endif
diff --git a/compiler-rt/lib/builtins/int_types.h b/compiler-rt/lib/builtins/int_types.h
index 705355a4840d..7a72de480676 100644
--- a/compiler-rt/lib/builtins/int_types.h
+++ b/compiler-rt/lib/builtins/int_types.h
@@ -121,6 +121,15 @@ static __inline tu_int make_tu(du_int h, du_int l) {
#endif // CRT_HAS_128BIT
+// FreeBSD's boot environment does not support using floating-point and poisons
+// the float and double keywords.
+#if defined(__FreeBSD__) && defined(_STANDALONE)
+#define CRT_HAS_FLOATING_POINT 0
+#else
+#define CRT_HAS_FLOATING_POINT 1
+#endif
+
+#if CRT_HAS_FLOATING_POINT
typedef union {
su_int u;
float f;
@@ -130,6 +139,7 @@ typedef union {
udwords u;
double f;
} double_bits;
+#endif
typedef struct {
#if _YUGA_LITTLE_ENDIAN
@@ -155,6 +165,7 @@ typedef struct {
#define HAS_80_BIT_LONG_DOUBLE 0
#endif
+#if CRT_HAS_FLOATING_POINT
typedef union {
uqwords u;
long double f;
@@ -183,4 +194,5 @@ typedef struct {
#define COMPLEX_REAL(x) (x).real
#define COMPLEX_IMAGINARY(x) (x).imaginary
#endif
+#endif
#endif // INT_TYPES_H
diff --git a/compiler-rt/lib/builtins/int_util.c b/compiler-rt/lib/builtins/int_util.c
index 226a6e93440d..bbb735ccf279 100644
--- a/compiler-rt/lib/builtins/int_util.c
+++ b/compiler-rt/lib/builtins/int_util.c
@@ -33,35 +33,31 @@ void __compilerrt_abort_impl(const char *file, int line, const char *function) {
NORETURN extern void __assert_rtn(const char *func, const char *file, int line,
const char *message);
-#ifndef _WIN32
__attribute__((weak))
__attribute__((visibility("hidden")))
-#endif
void __compilerrt_abort_impl(const char *file, int line, const char *function) {
__assert_rtn(function, file, line, "libcompiler_rt abort");
}
-#elif __Fuchsia__
-
-#ifndef _WIN32
-__attribute__((weak))
-__attribute__((visibility("hidden")))
-#endif
-void __compilerrt_abort_impl(const char *file, int line, const char *function) {
- __builtin_trap();
-}
-
#else
-// Get the system definition of abort()
+#ifdef _WIN32
#include <stdlib.h>
+#endif
#ifndef _WIN32
__attribute__((weak))
__attribute__((visibility("hidden")))
#endif
void __compilerrt_abort_impl(const char *file, int line, const char *function) {
+#if !__STDC_HOSTED__
+ // Avoid depending on libc when compiling with -ffreestanding.
+ __builtin_trap();
+#elif defined(_WIN32)
abort();
+#else
+ __builtin_abort();
+#endif
}
#endif
diff --git a/compiler-rt/lib/builtins/ppc/atomic.exp b/compiler-rt/lib/builtins/ppc/atomic.exp
new file mode 100644
index 000000000000..98f759de982f
--- /dev/null
+++ b/compiler-rt/lib/builtins/ppc/atomic.exp
@@ -0,0 +1,41 @@
+__atomic_compare_exchange
+__atomic_compare_exchange_1
+__atomic_compare_exchange_2
+__atomic_compare_exchange_4
+__atomic_compare_exchange_8
+__atomic_exchange
+__atomic_exchange_1
+__atomic_exchange_2
+__atomic_exchange_4
+__atomic_exchange_8
+__atomic_fetch_add_1
+__atomic_fetch_add_2
+__atomic_fetch_add_4
+__atomic_fetch_add_8
+__atomic_fetch_and_1
+__atomic_fetch_and_2
+__atomic_fetch_and_4
+__atomic_fetch_and_8
+__atomic_fetch_or_1
+__atomic_fetch_or_2
+__atomic_fetch_or_4
+__atomic_fetch_or_8
+__atomic_fetch_sub_1
+__atomic_fetch_sub_2
+__atomic_fetch_sub_4
+__atomic_fetch_sub_8
+__atomic_fetch_xor_1
+__atomic_fetch_xor_2
+__atomic_fetch_xor_4
+__atomic_fetch_xor_8
+__atomic_is_lock_free
+__atomic_load
+__atomic_load_1
+__atomic_load_2
+__atomic_load_4
+__atomic_load_8
+__atomic_store
+__atomic_store_1
+__atomic_store_2
+__atomic_store_4
+__atomic_store_8
diff --git a/compiler-rt/lib/builtins/ppc/divtc3.c b/compiler-rt/lib/builtins/ppc/divtc3.c
index afaccf5a8fd6..671bd4ddbbd7 100644
--- a/compiler-rt/lib/builtins/ppc/divtc3.c
+++ b/compiler-rt/lib/builtins/ppc/divtc3.c
@@ -27,15 +27,16 @@ long double _Complex __divtc3(long double a, long double b, long double c,
int ilogbw = 0;
const double logbw =
- __compiler_rt_logb(crt_fmax(crt_fabs(cDD.s.hi), crt_fabs(dDD.s.hi)));
+ __compiler_rt_logb(__compiler_rt_fmax(crt_fabs(cDD.s.hi),
+ crt_fabs(dDD.s.hi)));
if (crt_isfinite(logbw)) {
ilogbw = (int)logbw;
- cDD.s.hi = crt_scalbn(cDD.s.hi, -ilogbw);
- cDD.s.lo = crt_scalbn(cDD.s.lo, -ilogbw);
- dDD.s.hi = crt_scalbn(dDD.s.hi, -ilogbw);
- dDD.s.lo = crt_scalbn(dDD.s.lo, -ilogbw);
+ cDD.s.hi = __compiler_rt_scalbn(cDD.s.hi, -ilogbw);
+ cDD.s.lo = __compiler_rt_scalbn(cDD.s.lo, -ilogbw);
+ dDD.s.hi = __compiler_rt_scalbn(dDD.s.hi, -ilogbw);
+ dDD.s.lo = __compiler_rt_scalbn(dDD.s.lo, -ilogbw);
}
const long double denom =
@@ -48,10 +49,10 @@ long double _Complex __divtc3(long double a, long double b, long double c,
DD real = {.ld = __gcc_qdiv(realNumerator, denom)};
DD imag = {.ld = __gcc_qdiv(imagNumerator, denom)};
- real.s.hi = crt_scalbn(real.s.hi, -ilogbw);
- real.s.lo = crt_scalbn(real.s.lo, -ilogbw);
- imag.s.hi = crt_scalbn(imag.s.hi, -ilogbw);
- imag.s.lo = crt_scalbn(imag.s.lo, -ilogbw);
+ real.s.hi = __compiler_rt_scalbn(real.s.hi, -ilogbw);
+ real.s.lo = __compiler_rt_scalbn(real.s.lo, -ilogbw);
+ imag.s.hi = __compiler_rt_scalbn(imag.s.hi, -ilogbw);
+ imag.s.lo = __compiler_rt_scalbn(imag.s.lo, -ilogbw);
if (crt_isnan(real.s.hi) && crt_isnan(imag.s.hi)) {
DD aDD = {.ld = a};
diff --git a/compiler-rt/lib/builtins/riscv/restore.S b/compiler-rt/lib/builtins/riscv/restore.S
new file mode 100644
index 000000000000..12f0d3365655
--- /dev/null
+++ b/compiler-rt/lib/builtins/riscv/restore.S
@@ -0,0 +1,166 @@
+//===-- restore.S - restore up to 12 callee-save registers ----------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Multiple entry points depending on number of registers to restore
+//
+//===----------------------------------------------------------------------===//
+
+// All of the entry points are in the same section since we rely on many of
+// them falling through into each other and don't want the linker to
+// accidentally split them up, garbage collect, or reorder them.
+//
+// The entry points are grouped up into 2s for rv64 and 4s for rv32 since this
+// is the minimum grouping which will maintain the required 16-byte stack
+// alignment.
+
+ .text
+
+#if __riscv_xlen == 32
+
+ .globl __riscv_restore_12
+ .type __riscv_restore_12,@function
+__riscv_restore_12:
+ lw s11, 12(sp)
+ addi sp, sp, 16
+ // fallthrough into __riscv_restore_11/10/9/8
+
+ .globl __riscv_restore_11
+ .type __riscv_restore_11,@function
+ .globl __riscv_restore_10
+ .type __riscv_restore_10,@function
+ .globl __riscv_restore_9
+ .type __riscv_restore_9,@function
+ .globl __riscv_restore_8
+ .type __riscv_restore_8,@function
+__riscv_restore_11:
+__riscv_restore_10:
+__riscv_restore_9:
+__riscv_restore_8:
+ lw s10, 0(sp)
+ lw s9, 4(sp)
+ lw s8, 8(sp)
+ lw s7, 12(sp)
+ addi sp, sp, 16
+ // fallthrough into __riscv_restore_7/6/5/4
+
+ .globl __riscv_restore_7
+ .type __riscv_restore_7,@function
+ .globl __riscv_restore_6
+ .type __riscv_restore_6,@function
+ .globl __riscv_restore_5
+ .type __riscv_restore_5,@function
+ .globl __riscv_restore_4
+ .type __riscv_restore_4,@function
+__riscv_restore_7:
+__riscv_restore_6:
+__riscv_restore_5:
+__riscv_restore_4:
+ lw s6, 0(sp)
+ lw s5, 4(sp)
+ lw s4, 8(sp)
+ lw s3, 12(sp)
+ addi sp, sp, 16
+ // fallthrough into __riscv_restore_3/2/1/0
+
+ .globl __riscv_restore_3
+ .type __riscv_restore_3,@function
+ .globl __riscv_restore_2
+ .type __riscv_restore_2,@function
+ .globl __riscv_restore_1
+ .type __riscv_restore_1,@function
+ .globl __riscv_restore_0
+ .type __riscv_restore_0,@function
+__riscv_restore_3:
+__riscv_restore_2:
+__riscv_restore_1:
+__riscv_restore_0:
+ lw s2, 0(sp)
+ lw s1, 4(sp)
+ lw s0, 8(sp)
+ lw ra, 12(sp)
+ addi sp, sp, 16
+ ret
+
+#elif __riscv_xlen == 64
+
+ .globl __riscv_restore_12
+ .type __riscv_restore_12,@function
+__riscv_restore_12:
+ ld s11, 8(sp)
+ addi sp, sp, 16
+ // fallthrough into __riscv_restore_11/10/9/8
+
+ .globl __riscv_restore_11
+ .type __riscv_restore_11,@function
+ .globl __riscv_restore_10
+ .type __riscv_restore_10,@function
+__riscv_restore_11:
+__riscv_restore_10:
+ ld s10, 0(sp)
+ ld s9, 8(sp)
+ addi sp, sp, 16
+ // fallthrough into __riscv_restore_9/8
+
+ .globl __riscv_restore_9
+ .type __riscv_restore_9,@function
+ .globl __riscv_restore_8
+ .type __riscv_restore_8,@function
+__riscv_restore_9:
+__riscv_restore_8:
+ ld s8, 0(sp)
+ ld s7, 8(sp)
+ addi sp, sp, 16
+ // fallthrough into __riscv_restore_7/6
+
+ .globl __riscv_restore_7
+ .type __riscv_restore_7,@function
+ .globl __riscv_restore_6
+ .type __riscv_restore_6,@function
+__riscv_restore_7:
+__riscv_restore_6:
+ ld s6, 0(sp)
+ ld s5, 8(sp)
+ addi sp, sp, 16
+ // fallthrough into __riscv_restore_5/4
+
+ .globl __riscv_restore_5
+ .type __riscv_restore_5,@function
+ .globl __riscv_restore_4
+ .type __riscv_restore_4,@function
+__riscv_restore_5:
+__riscv_restore_4:
+ ld s4, 0(sp)
+ ld s3, 8(sp)
+ addi sp, sp, 16
+ // fallthrough into __riscv_restore_3/2
+
+ .globl __riscv_restore_3
+ .type __riscv_restore_3,@function
+ .globl __riscv_restore_2
+ .type __riscv_restore_2,@function
+ .globl __riscv_restore_1
+ .type __riscv_restore_1,@function
+ .globl __riscv_restore_0
+ .type __riscv_restore_0,@function
+__riscv_restore_3:
+__riscv_restore_2:
+ ld s2, 0(sp)
+ ld s1, 8(sp)
+ addi sp, sp, 16
+ // fallthrough into __riscv_restore_1/0
+
+__riscv_restore_1:
+__riscv_restore_0:
+ ld s0, 0(sp)
+ ld ra, 8(sp)
+ addi sp, sp, 16
+ ret
+
+#else
+# error "xlen must be 32 or 64 for save-restore implementation
+#endif
diff --git a/compiler-rt/lib/builtins/riscv/save.S b/compiler-rt/lib/builtins/riscv/save.S
new file mode 100644
index 000000000000..d811bf584fc3
--- /dev/null
+++ b/compiler-rt/lib/builtins/riscv/save.S
@@ -0,0 +1,184 @@
+//===-- save.S - save up to 12 callee-saved registers ---------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Multiple entry points depending on number of registers to save
+//
+//===----------------------------------------------------------------------===//
+
+// The entry points are grouped up into 2s for rv64 and 4s for rv32 since this
+// is the minimum grouping which will maintain the required 16-byte stack
+// alignment.
+
+ .text
+
+#if __riscv_xlen == 32
+
+ .globl __riscv_save_12
+ .type __riscv_save_12,@function
+__riscv_save_12:
+ addi sp, sp, -64
+ mv t1, zero
+ sw s11, 12(sp)
+ j .Lriscv_save_11_8
+
+ .globl __riscv_save_11
+ .type __riscv_save_11,@function
+ .globl __riscv_save_10
+ .type __riscv_save_10,@function
+ .globl __riscv_save_9
+ .type __riscv_save_9,@function
+ .globl __riscv_save_8
+ .type __riscv_save_8,@function
+__riscv_save_11:
+__riscv_save_10:
+__riscv_save_9:
+__riscv_save_8:
+ addi sp, sp, -64
+ li t1, 16
+.Lriscv_save_11_8:
+ sw s10, 16(sp)
+ sw s9, 20(sp)
+ sw s8, 24(sp)
+ sw s7, 28(sp)
+ j .Lriscv_save_7_4
+
+ .globl __riscv_save_7
+ .type __riscv_save_7,@function
+ .globl __riscv_save_6
+ .type __riscv_save_6,@function
+ .globl __riscv_save_5
+ .type __riscv_save_5,@function
+ .globl __riscv_save_4
+ .type __riscv_save_4,@function
+__riscv_save_7:
+__riscv_save_6:
+__riscv_save_5:
+__riscv_save_4:
+ addi sp, sp, -64
+ li t1, 32
+.Lriscv_save_7_4:
+ sw s6, 32(sp)
+ sw s5, 36(sp)
+ sw s4, 40(sp)
+ sw s3, 44(sp)
+ sw s2, 48(sp)
+ sw s1, 52(sp)
+ sw s0, 56(sp)
+ sw ra, 60(sp)
+ add sp, sp, t1
+ jr t0
+
+ .globl __riscv_save_3
+ .type __riscv_save_3,@function
+ .globl __riscv_save_2
+ .type __riscv_save_2,@function
+ .globl __riscv_save_1
+ .type __riscv_save_1,@function
+ .globl __riscv_save_0
+ .type __riscv_save_0,@function
+__riscv_save_3:
+__riscv_save_2:
+__riscv_save_1:
+__riscv_save_0:
+ addi sp, sp, -16
+ sw s2, 0(sp)
+ sw s1, 4(sp)
+ sw s0, 8(sp)
+ sw ra, 12(sp)
+ jr t0
+
+#elif __riscv_xlen == 64
+
+ .globl __riscv_save_12
+ .type __riscv_save_12,@function
+__riscv_save_12:
+ addi sp, sp, -112
+ mv t1, zero
+ sd s11, 8(sp)
+ j .Lriscv_save_11_10
+
+ .globl __riscv_save_11
+ .type __riscv_save_11,@function
+ .globl __riscv_save_10
+ .type __riscv_save_10,@function
+__riscv_save_11:
+__riscv_save_10:
+ addi sp, sp, -112
+ li t1, 16
+.Lriscv_save_11_10:
+ sd s10, 16(sp)
+ sd s9, 24(sp)
+ j .Lriscv_save_9_8
+
+ .globl __riscv_save_9
+ .type __riscv_save_9,@function
+ .globl __riscv_save_8
+ .type __riscv_save_8,@function
+__riscv_save_9:
+__riscv_save_8:
+ addi sp, sp, -112
+ li t1, 32
+.Lriscv_save_9_8:
+ sd s8, 32(sp)
+ sd s7, 40(sp)
+ j .Lriscv_save_7_6
+
+ .globl __riscv_save_7
+ .type __riscv_save_7,@function
+ .globl __riscv_save_6
+ .type __riscv_save_6,@function
+__riscv_save_7:
+__riscv_save_6:
+ addi sp, sp, -112
+ li t1, 48
+.Lriscv_save_7_6:
+ sd s6, 48(sp)
+ sd s5, 56(sp)
+ j .Lriscv_save_5_4
+
+ .globl __riscv_save_5
+ .type __riscv_save_5,@function
+ .globl __riscv_save_4
+ .type __riscv_save_4,@function
+__riscv_save_5:
+__riscv_save_4:
+ addi sp, sp, -112
+ li t1, 64
+.Lriscv_save_5_4:
+ sd s4, 64(sp)
+ sd s3, 72(sp)
+ j .Lriscv_save_3_2
+
+ .globl __riscv_save_3
+ .type __riscv_save_3,@function
+ .globl __riscv_save_2
+ .type __riscv_save_2,@function
+__riscv_save_3:
+__riscv_save_2:
+ addi sp, sp, -112
+ li t1, 80
+.Lriscv_save_3_2:
+ sd s2, 80(sp)
+ sd s1, 88(sp)
+ sd s0, 96(sp)
+ sd ra, 104(sp)
+ add sp, sp, t1
+ jr t0
+
+ .globl __riscv_save_1
+ .type __riscv_save_1,@function
+ .globl __riscv_save_0
+ .type __riscv_save_0,@function
+ addi sp, sp, -16
+ sd s0, 0(sp)
+ sd ra, 8(sp)
+ jr t0
+
+#else
+# error "xlen must be 32 or 64 for save-restore implementation
+#endif
diff --git a/compiler-rt/lib/cfi/cfi.cpp b/compiler-rt/lib/cfi/cfi.cpp
index b75c72b215c2..f691cfb94cfc 100644
--- a/compiler-rt/lib/cfi/cfi.cpp
+++ b/compiler-rt/lib/cfi/cfi.cpp
@@ -322,14 +322,14 @@ void InitShadow() {
THREADLOCAL int in_loader;
BlockingMutex shadow_update_lock(LINKER_INITIALIZED);
-void EnterLoader() {
+void EnterLoader() NO_THREAD_SAFETY_ANALYSIS {
if (in_loader == 0) {
shadow_update_lock.Lock();
}
++in_loader;
}
-void ExitLoader() {
+void ExitLoader() NO_THREAD_SAFETY_ANALYSIS {
CHECK(in_loader > 0);
--in_loader;
UpdateShadow();
diff --git a/compiler-rt/lib/cfi/cfi_blacklist.txt b/compiler-rt/lib/cfi/cfi_ignorelist.txt
index 4a0f03949ca8..4a0f03949ca8 100644
--- a/compiler-rt/lib/cfi/cfi_blacklist.txt
+++ b/compiler-rt/lib/cfi/cfi_ignorelist.txt
diff --git a/compiler-rt/lib/dfsan/dfsan.cpp b/compiler-rt/lib/dfsan/dfsan.cpp
index c17bfe0ccb32..6f9ae141d7ab 100644
--- a/compiler-rt/lib/dfsan/dfsan.cpp
+++ b/compiler-rt/lib/dfsan/dfsan.cpp
@@ -20,6 +20,10 @@
#include "dfsan/dfsan.h"
+#include "dfsan/dfsan_chained_origin_depot.h"
+#include "dfsan/dfsan_flags.h"
+#include "dfsan/dfsan_origin.h"
+#include "dfsan/dfsan_thread.h"
#include "sanitizer_common/sanitizer_atomic.h"
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_file.h"
@@ -27,216 +31,96 @@
#include "sanitizer_common/sanitizer_flags.h"
#include "sanitizer_common/sanitizer_internal_defs.h"
#include "sanitizer_common/sanitizer_libc.h"
+#include "sanitizer_common/sanitizer_report_decorator.h"
#include "sanitizer_common/sanitizer_stacktrace.h"
using namespace __dfsan;
-typedef atomic_uint16_t atomic_dfsan_label;
-static const dfsan_label kInitializingLabel = -1;
-
-static const uptr kNumLabels = 1 << (sizeof(dfsan_label) * 8);
-
-static atomic_dfsan_label __dfsan_last_label;
-static dfsan_label_info __dfsan_label_info[kNumLabels];
-
Flags __dfsan::flags_data;
// The size of TLS variables. These constants must be kept in sync with the ones
// in DataFlowSanitizer.cpp.
static const int kDFsanArgTlsSize = 800;
static const int kDFsanRetvalTlsSize = 800;
+static const int kDFsanArgOriginTlsSize = 800;
SANITIZER_INTERFACE_ATTRIBUTE THREADLOCAL u64
__dfsan_retval_tls[kDFsanRetvalTlsSize / sizeof(u64)];
+SANITIZER_INTERFACE_ATTRIBUTE THREADLOCAL u32 __dfsan_retval_origin_tls;
SANITIZER_INTERFACE_ATTRIBUTE THREADLOCAL u64
__dfsan_arg_tls[kDFsanArgTlsSize / sizeof(u64)];
-
-SANITIZER_INTERFACE_ATTRIBUTE uptr __dfsan_shadow_ptr_mask;
+SANITIZER_INTERFACE_ATTRIBUTE THREADLOCAL u32
+ __dfsan_arg_origin_tls[kDFsanArgOriginTlsSize / sizeof(u32)];
+
+// Instrumented code may set this value in terms of -dfsan-track-origins.
+// * undefined or 0: do not track origins.
+// * 1: track origins at memory store operations.
+// * 2: track origins at memory load and store operations.
+// TODO: track callsites.
+extern "C" SANITIZER_WEAK_ATTRIBUTE const int __dfsan_track_origins;
+
+extern "C" SANITIZER_INTERFACE_ATTRIBUTE int dfsan_get_track_origins() {
+ return &__dfsan_track_origins ? __dfsan_track_origins : 0;
+}
// On Linux/x86_64, memory is laid out as follows:
//
-// +--------------------+ 0x800000000000 (top of memory)
-// | application memory |
-// +--------------------+ 0x700000008000 (kAppAddr)
-// | |
-// | unused |
-// | |
-// +--------------------+ 0x200200000000 (kUnusedAddr)
-// | union table |
-// +--------------------+ 0x200000000000 (kUnionTableAddr)
-// | shadow memory |
-// +--------------------+ 0x000000010000 (kShadowAddr)
-// | reserved by kernel |
-// +--------------------+ 0x000000000000
-//
-// To derive a shadow memory address from an application memory address,
-// bits 44-46 are cleared to bring the address into the range
-// [0x000000008000,0x100000000000). Then the address is shifted left by 1 to
-// account for the double byte representation of shadow labels and move the
-// address into the shadow memory range. See the function shadow_for below.
-
-// On Linux/MIPS64, memory is laid out as follows:
-//
-// +--------------------+ 0x10000000000 (top of memory)
-// | application memory |
-// +--------------------+ 0xF000008000 (kAppAddr)
-// | |
-// | unused |
-// | |
-// +--------------------+ 0x2200000000 (kUnusedAddr)
-// | union table |
-// +--------------------+ 0x2000000000 (kUnionTableAddr)
-// | shadow memory |
-// +--------------------+ 0x0000010000 (kShadowAddr)
-// | reserved by kernel |
-// +--------------------+ 0x0000000000
-
-// On Linux/AArch64 (39-bit VMA), memory is laid out as follow:
+// +--------------------+ 0x800000000000 (top of memory)
+// | application 3 |
+// +--------------------+ 0x700000000000
+// | invalid |
+// +--------------------+ 0x610000000000
+// | origin 1 |
+// +--------------------+ 0x600000000000
+// | application 2 |
+// +--------------------+ 0x510000000000
+// | shadow 1 |
+// +--------------------+ 0x500000000000
+// | invalid |
+// +--------------------+ 0x400000000000
+// | origin 3 |
+// +--------------------+ 0x300000000000
+// | shadow 3 |
+// +--------------------+ 0x200000000000
+// | origin 2 |
+// +--------------------+ 0x110000000000
+// | invalid |
+// +--------------------+ 0x100000000000
+// | shadow 2 |
+// +--------------------+ 0x010000000000
+// | application 1 |
+// +--------------------+ 0x000000000000
//
-// +--------------------+ 0x8000000000 (top of memory)
-// | application memory |
-// +--------------------+ 0x7000008000 (kAppAddr)
-// | |
-// | unused |
-// | |
-// +--------------------+ 0x1200000000 (kUnusedAddr)
-// | union table |
-// +--------------------+ 0x1000000000 (kUnionTableAddr)
-// | shadow memory |
-// +--------------------+ 0x0000010000 (kShadowAddr)
-// | reserved by kernel |
-// +--------------------+ 0x0000000000
-
-// On Linux/AArch64 (42-bit VMA), memory is laid out as follow:
-//
-// +--------------------+ 0x40000000000 (top of memory)
-// | application memory |
-// +--------------------+ 0x3ff00008000 (kAppAddr)
-// | |
-// | unused |
-// | |
-// +--------------------+ 0x1200000000 (kUnusedAddr)
-// | union table |
-// +--------------------+ 0x8000000000 (kUnionTableAddr)
-// | shadow memory |
-// +--------------------+ 0x0000010000 (kShadowAddr)
-// | reserved by kernel |
-// +--------------------+ 0x0000000000
-
-// On Linux/AArch64 (48-bit VMA), memory is laid out as follow:
-//
-// +--------------------+ 0x1000000000000 (top of memory)
-// | application memory |
-// +--------------------+ 0xffff00008000 (kAppAddr)
-// | unused |
-// +--------------------+ 0xaaaab0000000 (top of PIE address)
-// | application PIE |
-// +--------------------+ 0xaaaaa0000000 (top of PIE address)
-// | |
-// | unused |
-// | |
-// +--------------------+ 0x1200000000 (kUnusedAddr)
-// | union table |
-// +--------------------+ 0x8000000000 (kUnionTableAddr)
-// | shadow memory |
-// +--------------------+ 0x0000010000 (kShadowAddr)
-// | reserved by kernel |
-// +--------------------+ 0x0000000000
-
-typedef atomic_dfsan_label dfsan_union_table_t[kNumLabels][kNumLabels];
-
-#ifdef DFSAN_RUNTIME_VMA
-// Runtime detected VMA size.
-int __dfsan::vmaSize;
-#endif
-
-static uptr UnusedAddr() {
- return UnionTableAddr() + sizeof(dfsan_union_table_t);
-}
-
-static atomic_dfsan_label *union_table(dfsan_label l1, dfsan_label l2) {
- return &(*(dfsan_union_table_t *) UnionTableAddr())[l1][l2];
-}
-
-// Checks we do not run out of labels.
-static void dfsan_check_label(dfsan_label label) {
- if (label == kInitializingLabel) {
- Report("FATAL: DataFlowSanitizer: out of labels\n");
- Die();
- }
-}
-
-// Resolves the union of two unequal labels. Nonequality is a precondition for
-// this function (the instrumentation pass inlines the equality test).
-extern "C" SANITIZER_INTERFACE_ATTRIBUTE
-dfsan_label __dfsan_union(dfsan_label l1, dfsan_label l2) {
- DCHECK_NE(l1, l2);
-
- if (l1 == 0)
- return l2;
- if (l2 == 0)
- return l1;
-
- // If no labels have been created, yet l1 and l2 are non-zero, we are using
- // fast16labels mode.
- if (atomic_load(&__dfsan_last_label, memory_order_relaxed) == 0)
- return l1 | l2;
-
- if (l1 > l2)
- Swap(l1, l2);
-
- atomic_dfsan_label *table_ent = union_table(l1, l2);
- // We need to deal with the case where two threads concurrently request
- // a union of the same pair of labels. If the table entry is uninitialized,
- // (i.e. 0) use a compare-exchange to set the entry to kInitializingLabel
- // (i.e. -1) to mark that we are initializing it.
- dfsan_label label = 0;
- if (atomic_compare_exchange_strong(table_ent, &label, kInitializingLabel,
- memory_order_acquire)) {
- // Check whether l2 subsumes l1. We don't need to check whether l1
- // subsumes l2 because we are guaranteed here that l1 < l2, and (at least
- // in the cases we are interested in) a label may only subsume labels
- // created earlier (i.e. with a lower numerical value).
- if (__dfsan_label_info[l2].l1 == l1 ||
- __dfsan_label_info[l2].l2 == l1) {
- label = l2;
- } else {
- label =
- atomic_fetch_add(&__dfsan_last_label, 1, memory_order_relaxed) + 1;
- dfsan_check_label(label);
- __dfsan_label_info[label].l1 = l1;
- __dfsan_label_info[label].l2 = l2;
- }
- atomic_store(table_ent, label, memory_order_release);
- } else if (label == kInitializingLabel) {
- // Another thread is initializing the entry. Wait until it is finished.
- do {
- internal_sched_yield();
- label = atomic_load(table_ent, memory_order_acquire);
- } while (label == kInitializingLabel);
- }
- return label;
-}
+// MEM_TO_SHADOW(mem) = mem ^ 0x500000000000
+// SHADOW_TO_ORIGIN(shadow) = shadow + 0x100000000000
extern "C" SANITIZER_INTERFACE_ATTRIBUTE
dfsan_label __dfsan_union_load(const dfsan_label *ls, uptr n) {
dfsan_label label = ls[0];
- for (uptr i = 1; i != n; ++i) {
- dfsan_label next_label = ls[i];
- if (label != next_label)
- label = __dfsan_union(label, next_label);
- }
- return label;
-}
-
-extern "C" SANITIZER_INTERFACE_ATTRIBUTE
-dfsan_label __dfsan_union_load_fast16labels(const dfsan_label *ls, uptr n) {
- dfsan_label label = ls[0];
for (uptr i = 1; i != n; ++i)
label |= ls[i];
return label;
}
+// Return the union of all the n labels from addr at the high 32 bit, and the
+// origin of the first taint byte at the low 32 bit.
+extern "C" SANITIZER_INTERFACE_ATTRIBUTE u64
+__dfsan_load_label_and_origin(const void *addr, uptr n) {
+ dfsan_label label = 0;
+ u64 ret = 0;
+ uptr p = (uptr)addr;
+ dfsan_label *s = shadow_for((void *)p);
+ for (uptr i = 0; i < n; ++i) {
+ dfsan_label l = s[i];
+ if (!l)
+ continue;
+ label |= l;
+ if (!ret)
+ ret = *(dfsan_origin *)origin_for((void *)(p + i));
+ }
+ return ret | (u64)label << 32;
+}
+
extern "C" SANITIZER_INTERFACE_ATTRIBUTE
void __dfsan_unimplemented(char *fname) {
if (flags().warn_unimplemented)
@@ -261,30 +145,241 @@ __dfsan_vararg_wrapper(const char *fname) {
Die();
}
-// Like __dfsan_union, but for use from the client or custom functions. Hence
-// the equality comparison is done here before calling __dfsan_union.
+// Resolves the union of two labels.
SANITIZER_INTERFACE_ATTRIBUTE dfsan_label
dfsan_union(dfsan_label l1, dfsan_label l2) {
- if (l1 == l2)
- return l1;
- return __dfsan_union(l1, l2);
+ return l1 | l2;
}
-extern "C" SANITIZER_INTERFACE_ATTRIBUTE
-dfsan_label dfsan_create_label(const char *desc, void *userdata) {
- dfsan_label label =
- atomic_fetch_add(&__dfsan_last_label, 1, memory_order_relaxed) + 1;
- dfsan_check_label(label);
- __dfsan_label_info[label].l1 = __dfsan_label_info[label].l2 = 0;
- __dfsan_label_info[label].desc = desc;
- __dfsan_label_info[label].userdata = userdata;
- return label;
+static const uptr kOriginAlign = sizeof(dfsan_origin);
+static const uptr kOriginAlignMask = ~(kOriginAlign - 1UL);
+
+static uptr OriginAlignUp(uptr u) {
+ return (u + kOriginAlign - 1) & kOriginAlignMask;
+}
+
+static uptr OriginAlignDown(uptr u) { return u & kOriginAlignMask; }
+
+// Return the origin of the first taint byte in the size bytes from the address
+// addr.
+static dfsan_origin GetOriginIfTainted(uptr addr, uptr size) {
+ for (uptr i = 0; i < size; ++i, ++addr) {
+ dfsan_label *s = shadow_for((void *)addr);
+
+ if (*s) {
+ // Validate address region.
+ CHECK(MEM_IS_SHADOW(s));
+ return *(dfsan_origin *)origin_for((void *)addr);
+ }
+ }
+ return 0;
+}
+
+// For platforms which support slow unwinder only, we need to restrict the store
+// context size to 1, basically only storing the current pc, because the slow
+// unwinder which is based on libunwind is not async signal safe and causes
+// random freezes in forking applications as well as in signal handlers.
+// DFSan supports only Linux. So we do not restrict the store context size.
+#define GET_STORE_STACK_TRACE_PC_BP(pc, bp) \
+ BufferedStackTrace stack; \
+ stack.Unwind(pc, bp, nullptr, true, flags().store_context_size);
+
+#define PRINT_CALLER_STACK_TRACE \
+ { \
+ GET_CALLER_PC_BP_SP; \
+ (void)sp; \
+ GET_STORE_STACK_TRACE_PC_BP(pc, bp) \
+ stack.Print(); \
+ }
+
+// Return a chain with the previous ID id and the current stack.
+// from_init = true if this is the first chain of an origin tracking path.
+static u32 ChainOrigin(u32 id, StackTrace *stack, bool from_init = false) {
+ // StackDepot is not async signal safe. Do not create new chains in a signal
+ // handler.
+ DFsanThread *t = GetCurrentThread();
+ if (t && t->InSignalHandler())
+ return id;
+
+ // As an optimization the origin of an application byte is updated only when
+ // its shadow is non-zero. Because we are only interested in the origins of
+ // taint labels, it does not matter what origin a zero label has. This reduces
+ // memory write cost. MSan does similar optimization. The following invariant
+ // may not hold because of some bugs. We check the invariant to help debug.
+ if (!from_init && id == 0 && flags().check_origin_invariant) {
+ Printf(" DFSan found invalid origin invariant\n");
+ PRINT_CALLER_STACK_TRACE
+ }
+
+ Origin o = Origin::FromRawId(id);
+ stack->tag = StackTrace::TAG_UNKNOWN;
+ Origin chained = Origin::CreateChainedOrigin(o, stack);
+ return chained.raw_id();
+}
+
+static void ChainAndWriteOriginIfTainted(uptr src, uptr size, uptr dst,
+ StackTrace *stack) {
+ dfsan_origin o = GetOriginIfTainted(src, size);
+ if (o) {
+ o = ChainOrigin(o, stack);
+ *(dfsan_origin *)origin_for((void *)dst) = o;
+ }
+}
+
+// Copy the origins of the size bytes from src to dst. The source and target
+// memory ranges cannot be overlapped. This is used by memcpy. stack records the
+// stack trace of the memcpy. When dst and src are not 4-byte aligned properly,
+// origins at the unaligned address boundaries may be overwritten because four
+// contiguous bytes share the same origin.
+static void CopyOrigin(const void *dst, const void *src, uptr size,
+ StackTrace *stack) {
+ uptr d = (uptr)dst;
+ uptr beg = OriginAlignDown(d);
+ // Copy left unaligned origin if that memory is tainted.
+ if (beg < d) {
+ ChainAndWriteOriginIfTainted((uptr)src, beg + kOriginAlign - d, beg, stack);
+ beg += kOriginAlign;
+ }
+
+ uptr end = OriginAlignDown(d + size);
+ // If both ends fall into the same 4-byte slot, we are done.
+ if (end < beg)
+ return;
+
+ // Copy right unaligned origin if that memory is tainted.
+ if (end < d + size)
+ ChainAndWriteOriginIfTainted((uptr)src + (end - d), (d + size) - end, end,
+ stack);
+
+ if (beg >= end)
+ return;
+
+ // Align src up.
+ uptr src_a = OriginAlignUp((uptr)src);
+ dfsan_origin *src_o = origin_for((void *)src_a);
+ u32 *src_s = (u32 *)shadow_for((void *)src_a);
+ dfsan_origin *src_end = origin_for((void *)(src_a + (end - beg)));
+ dfsan_origin *dst_o = origin_for((void *)beg);
+ dfsan_origin last_src_o = 0;
+ dfsan_origin last_dst_o = 0;
+ for (; src_o < src_end; ++src_o, ++src_s, ++dst_o) {
+ if (!*src_s)
+ continue;
+ if (*src_o != last_src_o) {
+ last_src_o = *src_o;
+ last_dst_o = ChainOrigin(last_src_o, stack);
+ }
+ *dst_o = last_dst_o;
+ }
+}
+
+// Copy the origins of the size bytes from src to dst. The source and target
+// memory ranges may be overlapped. So the copy is done in a reverse order.
+// This is used by memmove. stack records the stack trace of the memmove.
+static void ReverseCopyOrigin(const void *dst, const void *src, uptr size,
+ StackTrace *stack) {
+ uptr d = (uptr)dst;
+ uptr end = OriginAlignDown(d + size);
+
+ // Copy right unaligned origin if that memory is tainted.
+ if (end < d + size)
+ ChainAndWriteOriginIfTainted((uptr)src + (end - d), (d + size) - end, end,
+ stack);
+
+ uptr beg = OriginAlignDown(d);
+
+ if (beg + kOriginAlign < end) {
+ // Align src up.
+ uptr src_a = OriginAlignUp((uptr)src);
+ void *src_end = (void *)(src_a + end - beg - kOriginAlign);
+ dfsan_origin *src_end_o = origin_for(src_end);
+ u32 *src_end_s = (u32 *)shadow_for(src_end);
+ dfsan_origin *src_begin_o = origin_for((void *)src_a);
+ dfsan_origin *dst = origin_for((void *)(end - kOriginAlign));
+ dfsan_origin last_src_o = 0;
+ dfsan_origin last_dst_o = 0;
+ for (; src_end_o >= src_begin_o; --src_end_o, --src_end_s, --dst) {
+ if (!*src_end_s)
+ continue;
+ if (*src_end_o != last_src_o) {
+ last_src_o = *src_end_o;
+ last_dst_o = ChainOrigin(last_src_o, stack);
+ }
+ *dst = last_dst_o;
+ }
+ }
+
+ // Copy left unaligned origin if that memory is tainted.
+ if (beg < d)
+ ChainAndWriteOriginIfTainted((uptr)src, beg + kOriginAlign - d, beg, stack);
+}
+
+// Copy or move the origins of the len bytes from src to dst. The source and
+// target memory ranges may or may not be overlapped. This is used by memory
+// transfer operations. stack records the stack trace of the memory transfer
+// operation.
+static void MoveOrigin(const void *dst, const void *src, uptr size,
+ StackTrace *stack) {
+ // Validate address regions.
+ if (!MEM_IS_SHADOW(shadow_for(dst)) ||
+ !MEM_IS_SHADOW(shadow_for((void *)((uptr)dst + size))) ||
+ !MEM_IS_SHADOW(shadow_for(src)) ||
+ !MEM_IS_SHADOW(shadow_for((void *)((uptr)src + size)))) {
+ CHECK(false);
+ return;
+ }
+ // If destination origin range overlaps with source origin range, move
+ // origins by copying origins in a reverse order; otherwise, copy origins in
+ // a normal order. The orders of origin transfer are consistent with the
+ // orders of how memcpy and memmove transfer user data.
+ uptr src_aligned_beg = reinterpret_cast<uptr>(src) & ~3UL;
+ uptr src_aligned_end = (reinterpret_cast<uptr>(src) + size) & ~3UL;
+ uptr dst_aligned_beg = reinterpret_cast<uptr>(dst) & ~3UL;
+ if (dst_aligned_beg < src_aligned_end && dst_aligned_beg >= src_aligned_beg)
+ return ReverseCopyOrigin(dst, src, size, stack);
+ return CopyOrigin(dst, src, size, stack);
}
-static void WriteShadowIfDifferent(dfsan_label label, uptr shadow_addr,
- uptr size) {
- dfsan_label *labelp = (dfsan_label *)shadow_addr;
- for (; size != 0; --size, ++labelp) {
+// Set the size bytes from the addres dst to be the origin value.
+static void SetOrigin(const void *dst, uptr size, u32 origin) {
+ if (size == 0)
+ return;
+
+ // Origin mapping is 4 bytes per 4 bytes of application memory.
+ // Here we extend the range such that its left and right bounds are both
+ // 4 byte aligned.
+ uptr x = unaligned_origin_for((uptr)dst);
+ uptr beg = OriginAlignDown(x);
+ uptr end = OriginAlignUp(x + size); // align up.
+ u64 origin64 = ((u64)origin << 32) | origin;
+ // This is like memset, but the value is 32-bit. We unroll by 2 to write
+ // 64 bits at once. May want to unroll further to get 128-bit stores.
+ if (beg & 7ULL) {
+ if (*(u32 *)beg != origin)
+ *(u32 *)beg = origin;
+ beg += 4;
+ }
+ for (uptr addr = beg; addr < (end & ~7UL); addr += 8) {
+ if (*(u64 *)addr == origin64)
+ continue;
+ *(u64 *)addr = origin64;
+ }
+ if (end & 7ULL)
+ if (*(u32 *)(end - kOriginAlign) != origin)
+ *(u32 *)(end - kOriginAlign) = origin;
+}
+
+static void WriteShadowInRange(dfsan_label label, uptr beg_shadow_addr,
+ uptr end_shadow_addr) {
+ // TODO: After changing dfsan_label to 8bit, use internal_memset when label
+ // is not 0.
+ dfsan_label *labelp = (dfsan_label *)beg_shadow_addr;
+ if (label) {
+ for (; (uptr)labelp < end_shadow_addr; ++labelp) *labelp = label;
+ return;
+ }
+
+ for (; (uptr)labelp < end_shadow_addr; ++labelp) {
// Don't write the label if it is already the value we need it to be.
// In a program where most addresses are not labeled, it is common that
// a page of shadow memory is entirely zeroed. The Linux copy-on-write
@@ -293,55 +388,174 @@ static void WriteShadowIfDifferent(dfsan_label label, uptr shadow_addr,
// the value written does not change the value in memory. Avoiding the
// write when both |label| and |*labelp| are zero dramatically reduces
// the amount of real memory used by large programs.
- if (label == *labelp)
+ if (!*labelp)
continue;
- *labelp = label;
+ *labelp = 0;
}
}
-extern "C" SANITIZER_INTERFACE_ATTRIBUTE void __dfsan_set_label(
- dfsan_label label, void *addr, uptr size) {
- const uptr beg_shadow_addr = (uptr)__dfsan::shadow_for(addr);
+static void WriteShadowWithSize(dfsan_label label, uptr shadow_addr,
+ uptr size) {
+ WriteShadowInRange(label, shadow_addr, shadow_addr + size * sizeof(label));
+}
- if (0 != label) {
- WriteShadowIfDifferent(label, beg_shadow_addr, size);
+#define RET_CHAIN_ORIGIN(id) \
+ GET_CALLER_PC_BP_SP; \
+ (void)sp; \
+ GET_STORE_STACK_TRACE_PC_BP(pc, bp); \
+ return ChainOrigin(id, &stack);
+
+// Return a new origin chain with the previous ID id and the current stack
+// trace.
+extern "C" SANITIZER_INTERFACE_ATTRIBUTE dfsan_origin
+__dfsan_chain_origin(dfsan_origin id) {
+ RET_CHAIN_ORIGIN(id)
+}
+
+// Return a new origin chain with the previous ID id and the current stack
+// trace if the label is tainted.
+extern "C" SANITIZER_INTERFACE_ATTRIBUTE dfsan_origin
+__dfsan_chain_origin_if_tainted(dfsan_label label, dfsan_origin id) {
+ if (!label)
+ return id;
+ RET_CHAIN_ORIGIN(id)
+}
+
+// Copy or move the origins of the len bytes from src to dst.
+extern "C" SANITIZER_INTERFACE_ATTRIBUTE void __dfsan_mem_origin_transfer(
+ const void *dst, const void *src, uptr len) {
+ if (src == dst)
return;
+ GET_CALLER_PC_BP;
+ GET_STORE_STACK_TRACE_PC_BP(pc, bp);
+ MoveOrigin(dst, src, len, &stack);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE void dfsan_mem_origin_transfer(const void *dst,
+ const void *src,
+ uptr len) {
+ __dfsan_mem_origin_transfer(dst, src, len);
+}
+
+namespace __dfsan {
+
+bool dfsan_inited = false;
+bool dfsan_init_is_running = false;
+
+void dfsan_copy_memory(void *dst, const void *src, uptr size) {
+ internal_memcpy(dst, src, size);
+ internal_memcpy((void *)shadow_for(dst), (const void *)shadow_for(src),
+ size * sizeof(dfsan_label));
+ if (dfsan_get_track_origins())
+ dfsan_mem_origin_transfer(dst, src, size);
+}
+
+} // namespace __dfsan
+
+// If the label s is tainted, set the size bytes from the address p to be a new
+// origin chain with the previous ID o and the current stack trace. This is
+// used by instrumentation to reduce code size when too much code is inserted.
+extern "C" SANITIZER_INTERFACE_ATTRIBUTE void __dfsan_maybe_store_origin(
+ dfsan_label s, void *p, uptr size, dfsan_origin o) {
+ if (UNLIKELY(s)) {
+ GET_CALLER_PC_BP_SP;
+ (void)sp;
+ GET_STORE_STACK_TRACE_PC_BP(pc, bp);
+ SetOrigin(p, size, ChainOrigin(o, &stack));
}
+}
+
+// Releases the pages within the origin address range.
+static void ReleaseOrigins(void *addr, uptr size) {
+ const uptr beg_origin_addr = (uptr)__dfsan::origin_for(addr);
+ const void *end_addr = (void *)((uptr)addr + size);
+ const uptr end_origin_addr = (uptr)__dfsan::origin_for(end_addr);
- // If label is 0, releases the pages within the shadow address range, and sets
- // the shadow addresses not on the pages to be 0.
+ if (end_origin_addr - beg_origin_addr <
+ common_flags()->clear_shadow_mmap_threshold)
+ return;
+
+ const uptr page_size = GetPageSizeCached();
+ const uptr beg_aligned = RoundUpTo(beg_origin_addr, page_size);
+ const uptr end_aligned = RoundDownTo(end_origin_addr, page_size);
+
+ if (!MmapFixedSuperNoReserve(beg_aligned, end_aligned - beg_aligned))
+ Die();
+}
+
+// Releases the pages within the shadow address range, and sets
+// the shadow addresses not on the pages to be 0.
+static void ReleaseOrClearShadows(void *addr, uptr size) {
+ const uptr beg_shadow_addr = (uptr)__dfsan::shadow_for(addr);
const void *end_addr = (void *)((uptr)addr + size);
const uptr end_shadow_addr = (uptr)__dfsan::shadow_for(end_addr);
+
+ if (end_shadow_addr - beg_shadow_addr <
+ common_flags()->clear_shadow_mmap_threshold)
+ return WriteShadowWithSize(0, beg_shadow_addr, size);
+
const uptr page_size = GetPageSizeCached();
const uptr beg_aligned = RoundUpTo(beg_shadow_addr, page_size);
const uptr end_aligned = RoundDownTo(end_shadow_addr, page_size);
- // dfsan_set_label can be called from the following cases
- // 1) mapped ranges by new/delete and malloc/free. This case has shadow memory
- // size > 100k, and happens less frequently.
- // 2) zero-filling internal data structures by utility libraries. This case
- // has shadow memory size < 32k, and happens more often.
- // Set kNumPagesThreshold to be 8 to avoid releasing small pages.
- const int kNumPagesThreshold = 8;
- if (beg_aligned + kNumPagesThreshold * page_size >= end_aligned)
- return WriteShadowIfDifferent(label, beg_shadow_addr, size);
+ if (beg_aligned >= end_aligned) {
+ WriteShadowWithSize(0, beg_shadow_addr, size);
+ } else {
+ if (beg_aligned != beg_shadow_addr)
+ WriteShadowInRange(0, beg_shadow_addr, beg_aligned);
+ if (end_aligned != end_shadow_addr)
+ WriteShadowInRange(0, end_aligned, end_shadow_addr);
+ if (!MmapFixedSuperNoReserve(beg_aligned, end_aligned - beg_aligned))
+ Die();
+ }
+}
- WriteShadowIfDifferent(label, beg_shadow_addr, beg_aligned - beg_shadow_addr);
- ReleaseMemoryPagesToOS(beg_aligned, end_aligned);
- WriteShadowIfDifferent(label, end_aligned, end_shadow_addr - end_aligned);
+void SetShadow(dfsan_label label, void *addr, uptr size, dfsan_origin origin) {
+ if (0 != label) {
+ const uptr beg_shadow_addr = (uptr)__dfsan::shadow_for(addr);
+ WriteShadowWithSize(label, beg_shadow_addr, size);
+ if (dfsan_get_track_origins())
+ SetOrigin(addr, size, origin);
+ return;
+ }
+
+ if (dfsan_get_track_origins())
+ ReleaseOrigins(addr, size);
+
+ ReleaseOrClearShadows(addr, size);
+}
+
+extern "C" SANITIZER_INTERFACE_ATTRIBUTE void __dfsan_set_label(
+ dfsan_label label, dfsan_origin origin, void *addr, uptr size) {
+ SetShadow(label, addr, size, origin);
}
SANITIZER_INTERFACE_ATTRIBUTE
void dfsan_set_label(dfsan_label label, void *addr, uptr size) {
- __dfsan_set_label(label, addr, size);
+ dfsan_origin init_origin = 0;
+ if (label && dfsan_get_track_origins()) {
+ GET_CALLER_PC_BP;
+ GET_STORE_STACK_TRACE_PC_BP(pc, bp);
+ init_origin = ChainOrigin(0, &stack, true);
+ }
+ SetShadow(label, addr, size, init_origin);
}
SANITIZER_INTERFACE_ATTRIBUTE
void dfsan_add_label(dfsan_label label, void *addr, uptr size) {
+ if (0 == label)
+ return;
+
+ if (dfsan_get_track_origins()) {
+ GET_CALLER_PC_BP;
+ GET_STORE_STACK_TRACE_PC_BP(pc, bp);
+ dfsan_origin init_origin = ChainOrigin(0, &stack, true);
+ SetOrigin(addr, size, init_origin);
+ }
+
for (dfsan_label *labelp = shadow_for(addr); size != 0; --size, ++labelp)
- if (*labelp != label)
- *labelp = __dfsan_union(*labelp, label);
+ *labelp |= label;
}
// Unlike the other dfsan interface functions the behavior of this function
@@ -354,6 +568,30 @@ __dfsw_dfsan_get_label(long data, dfsan_label data_label,
return data_label;
}
+extern "C" SANITIZER_INTERFACE_ATTRIBUTE dfsan_label __dfso_dfsan_get_label(
+ long data, dfsan_label data_label, dfsan_label *ret_label,
+ dfsan_origin data_origin, dfsan_origin *ret_origin) {
+ *ret_label = 0;
+ *ret_origin = 0;
+ return data_label;
+}
+
+// This function is used if dfsan_get_origin is called when origin tracking is
+// off.
+extern "C" SANITIZER_INTERFACE_ATTRIBUTE dfsan_origin __dfsw_dfsan_get_origin(
+ long data, dfsan_label data_label, dfsan_label *ret_label) {
+ *ret_label = 0;
+ return 0;
+}
+
+extern "C" SANITIZER_INTERFACE_ATTRIBUTE dfsan_origin __dfso_dfsan_get_origin(
+ long data, dfsan_label data_label, dfsan_label *ret_label,
+ dfsan_origin data_origin, dfsan_origin *ret_origin) {
+ *ret_label = 0;
+ *ret_origin = 0;
+ return data_origin;
+}
+
SANITIZER_INTERFACE_ATTRIBUTE dfsan_label
dfsan_read_label(const void *addr, uptr size) {
if (size == 0)
@@ -361,75 +599,195 @@ dfsan_read_label(const void *addr, uptr size) {
return __dfsan_union_load(shadow_for(addr), size);
}
-extern "C" SANITIZER_INTERFACE_ATTRIBUTE
-const struct dfsan_label_info *dfsan_get_label_info(dfsan_label label) {
- return &__dfsan_label_info[label];
+SANITIZER_INTERFACE_ATTRIBUTE dfsan_origin
+dfsan_read_origin_of_first_taint(const void *addr, uptr size) {
+ return GetOriginIfTainted((uptr)addr, size);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE void dfsan_set_label_origin(dfsan_label label,
+ dfsan_origin origin,
+ void *addr,
+ uptr size) {
+ __dfsan_set_label(label, origin, addr, size);
}
extern "C" SANITIZER_INTERFACE_ATTRIBUTE int
dfsan_has_label(dfsan_label label, dfsan_label elem) {
- if (label == elem)
- return true;
- const dfsan_label_info *info = dfsan_get_label_info(label);
- if (info->l1 != 0) {
- return dfsan_has_label(info->l1, elem) || dfsan_has_label(info->l2, elem);
- } else {
- return false;
- }
+ return (label & elem) == elem;
}
-extern "C" SANITIZER_INTERFACE_ATTRIBUTE dfsan_label
-dfsan_has_label_with_desc(dfsan_label label, const char *desc) {
- const dfsan_label_info *info = dfsan_get_label_info(label);
- if (info->l1 != 0) {
- return dfsan_has_label_with_desc(info->l1, desc) ||
- dfsan_has_label_with_desc(info->l2, desc);
- } else {
- return internal_strcmp(desc, info->desc) == 0;
+class Decorator : public __sanitizer::SanitizerCommonDecorator {
+ public:
+ Decorator() : SanitizerCommonDecorator() {}
+ const char *Origin() const { return Magenta(); }
+};
+
+namespace {
+
+void PrintNoOriginTrackingWarning() {
+ Decorator d;
+ Printf(
+ " %sDFSan: origin tracking is not enabled. Did you specify the "
+ "-dfsan-track-origins=1 option?%s\n",
+ d.Warning(), d.Default());
+}
+
+void PrintNoTaintWarning(const void *address) {
+ Decorator d;
+ Printf(" %sDFSan: no tainted value at %x%s\n", d.Warning(), address,
+ d.Default());
+}
+
+void PrintInvalidOriginWarning(dfsan_label label, const void *address) {
+ Decorator d;
+ Printf(
+ " %sTaint value 0x%x (at %p) has invalid origin tracking. This can "
+ "be a DFSan bug.%s\n",
+ d.Warning(), label, address, d.Default());
+}
+
+bool PrintOriginTraceToStr(const void *addr, const char *description,
+ InternalScopedString *out) {
+ CHECK(out);
+ CHECK(dfsan_get_track_origins());
+ Decorator d;
+
+ const dfsan_label label = *__dfsan::shadow_for(addr);
+ CHECK(label);
+
+ const dfsan_origin origin = *__dfsan::origin_for(addr);
+
+ out->append(" %sTaint value 0x%x (at %p) origin tracking (%s)%s\n",
+ d.Origin(), label, addr, description ? description : "",
+ d.Default());
+
+ Origin o = Origin::FromRawId(origin);
+ bool found = false;
+
+ while (o.isChainedOrigin()) {
+ StackTrace stack;
+ dfsan_origin origin_id = o.raw_id();
+ o = o.getNextChainedOrigin(&stack);
+ if (o.isChainedOrigin())
+ out->append(
+ " %sOrigin value: 0x%x, Taint value was stored to memory at%s\n",
+ d.Origin(), origin_id, d.Default());
+ else
+ out->append(" %sOrigin value: 0x%x, Taint value was created at%s\n",
+ d.Origin(), origin_id, d.Default());
+
+ // Includes a trailing newline, so no need to add it again.
+ stack.PrintTo(out);
+ found = true;
}
+
+ return found;
}
-extern "C" SANITIZER_INTERFACE_ATTRIBUTE uptr
-dfsan_get_label_count(void) {
- dfsan_label max_label_allocated =
- atomic_load(&__dfsan_last_label, memory_order_relaxed);
+} // namespace
+
+extern "C" SANITIZER_INTERFACE_ATTRIBUTE void dfsan_print_origin_trace(
+ const void *addr, const char *description) {
+ if (!dfsan_get_track_origins()) {
+ PrintNoOriginTrackingWarning();
+ return;
+ }
+
+ const dfsan_label label = *__dfsan::shadow_for(addr);
+ if (!label) {
+ PrintNoTaintWarning(addr);
+ return;
+ }
+
+ InternalScopedString trace;
+ bool success = PrintOriginTraceToStr(addr, description, &trace);
+
+ if (trace.length())
+ Printf("%s", trace.data());
- return static_cast<uptr>(max_label_allocated);
+ if (!success)
+ PrintInvalidOriginWarning(label, addr);
}
-extern "C" SANITIZER_INTERFACE_ATTRIBUTE void
-dfsan_dump_labels(int fd) {
- dfsan_label last_label =
- atomic_load(&__dfsan_last_label, memory_order_relaxed);
- for (uptr l = 1; l <= last_label; ++l) {
- char buf[64];
- internal_snprintf(buf, sizeof(buf), "%u %u %u ", l,
- __dfsan_label_info[l].l1, __dfsan_label_info[l].l2);
- WriteToFile(fd, buf, internal_strlen(buf));
- if (__dfsan_label_info[l].l1 == 0 && __dfsan_label_info[l].desc) {
- WriteToFile(fd, __dfsan_label_info[l].desc,
- internal_strlen(__dfsan_label_info[l].desc));
- }
- WriteToFile(fd, "\n", 1);
+extern "C" SANITIZER_INTERFACE_ATTRIBUTE size_t
+dfsan_sprint_origin_trace(const void *addr, const char *description,
+ char *out_buf, size_t out_buf_size) {
+ CHECK(out_buf);
+
+ if (!dfsan_get_track_origins()) {
+ PrintNoOriginTrackingWarning();
+ return 0;
}
+
+ const dfsan_label label = *__dfsan::shadow_for(addr);
+ if (!label) {
+ PrintNoTaintWarning(addr);
+ return 0;
+ }
+
+ InternalScopedString trace;
+ bool success = PrintOriginTraceToStr(addr, description, &trace);
+
+ if (!success) {
+ PrintInvalidOriginWarning(label, addr);
+ return 0;
+ }
+
+ if (out_buf_size) {
+ internal_strncpy(out_buf, trace.data(), out_buf_size - 1);
+ out_buf[out_buf_size - 1] = '\0';
+ }
+
+ return trace.length();
}
-#define GET_FATAL_STACK_TRACE_PC_BP(pc, bp) \
- BufferedStackTrace stack; \
- stack.Unwind(pc, bp, nullptr, common_flags()->fast_unwind_on_fatal);
+extern "C" SANITIZER_INTERFACE_ATTRIBUTE dfsan_origin
+dfsan_get_init_origin(const void *addr) {
+ if (!dfsan_get_track_origins())
+ return 0;
+
+ const dfsan_label label = *__dfsan::shadow_for(addr);
+ if (!label)
+ return 0;
+
+ const dfsan_origin origin = *__dfsan::origin_for(addr);
+
+ Origin o = Origin::FromRawId(origin);
+ dfsan_origin origin_id = o.raw_id();
+ while (o.isChainedOrigin()) {
+ StackTrace stack;
+ origin_id = o.raw_id();
+ o = o.getNextChainedOrigin(&stack);
+ }
+ return origin_id;
+}
void __sanitizer::BufferedStackTrace::UnwindImpl(uptr pc, uptr bp,
void *context,
bool request_fast,
u32 max_depth) {
- Unwind(max_depth, pc, bp, context, 0, 0, false);
+ using namespace __dfsan;
+ DFsanThread *t = GetCurrentThread();
+ if (!t || !StackTrace::WillUseFastUnwind(request_fast)) {
+ return Unwind(max_depth, pc, bp, context, 0, 0, false);
+ }
+ Unwind(max_depth, pc, bp, nullptr, t->stack_top(), t->stack_bottom(), true);
}
extern "C" SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_print_stack_trace() {
- GET_FATAL_STACK_TRACE_PC_BP(StackTrace::GetCurrentPc(), GET_CURRENT_FRAME());
+ GET_CALLER_PC_BP;
+ GET_STORE_STACK_TRACE_PC_BP(pc, bp);
stack.Print();
}
+extern "C" SANITIZER_INTERFACE_ATTRIBUTE size_t
+dfsan_sprint_stack_trace(char *out_buf, size_t out_buf_size) {
+ CHECK(out_buf);
+ GET_CALLER_PC_BP;
+ GET_STORE_STACK_TRACE_PC_BP(pc, bp);
+ return stack.PrintTo(out_buf, out_buf_size);
+}
+
void Flags::SetDefaults() {
#define DFSAN_FLAG(Type, Name, DefaultValue, Description) Name = DefaultValue;
#include "dfsan_flags.inc"
@@ -445,6 +803,12 @@ static void RegisterDfsanFlags(FlagParser *parser, Flags *f) {
static void InitializeFlags() {
SetCommonFlagsDefaults();
+ {
+ CommonFlags cf;
+ cf.CopyFrom(*common_flags());
+ cf.intercept_tls_get_addr = true;
+ OverrideCommonFlags(cf);
+ }
flags().SetDefaults();
FlagParser parser;
@@ -456,73 +820,207 @@ static void InitializeFlags() {
if (common_flags()->help) parser.PrintFlagDescriptions();
}
-static void InitializePlatformEarly() {
- AvoidCVE_2016_2143();
-#ifdef DFSAN_RUNTIME_VMA
- __dfsan::vmaSize =
- (MostSignificantSetBitIndex(GET_CURRENT_FRAME()) + 1);
- if (__dfsan::vmaSize == 39 || __dfsan::vmaSize == 42 ||
- __dfsan::vmaSize == 48) {
- __dfsan_shadow_ptr_mask = ShadowMask();
- } else {
- Printf("FATAL: DataFlowSanitizer: unsupported VMA range\n");
- Printf("FATAL: Found %d - Supported 39, 42, and 48\n", __dfsan::vmaSize);
- Die();
+SANITIZER_INTERFACE_ATTRIBUTE
+void dfsan_clear_arg_tls(uptr offset, uptr size) {
+ internal_memset((void *)((uptr)__dfsan_arg_tls + offset), 0, size);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void dfsan_clear_thread_local_state() {
+ internal_memset(__dfsan_arg_tls, 0, sizeof(__dfsan_arg_tls));
+ internal_memset(__dfsan_retval_tls, 0, sizeof(__dfsan_retval_tls));
+
+ if (dfsan_get_track_origins()) {
+ internal_memset(__dfsan_arg_origin_tls, 0, sizeof(__dfsan_arg_origin_tls));
+ internal_memset(&__dfsan_retval_origin_tls, 0,
+ sizeof(__dfsan_retval_origin_tls));
}
-#endif
}
-static void dfsan_fini() {
- if (internal_strcmp(flags().dump_labels_at_exit, "") != 0) {
- fd_t fd = OpenFile(flags().dump_labels_at_exit, WrOnly);
- if (fd == kInvalidFd) {
- Report("WARNING: DataFlowSanitizer: unable to open output file %s\n",
- flags().dump_labels_at_exit);
- return;
+extern "C" void dfsan_flush() {
+ const uptr maxVirtualAddress = GetMaxUserVirtualAddress();
+ for (unsigned i = 0; i < kMemoryLayoutSize; ++i) {
+ uptr start = kMemoryLayout[i].start;
+ uptr end = kMemoryLayout[i].end;
+ uptr size = end - start;
+ MappingDesc::Type type = kMemoryLayout[i].type;
+
+ if (type != MappingDesc::SHADOW && type != MappingDesc::ORIGIN)
+ continue;
+
+ // Check if the segment should be mapped based on platform constraints.
+ if (start >= maxVirtualAddress)
+ continue;
+
+ if (!MmapFixedSuperNoReserve(start, size, kMemoryLayout[i].name)) {
+ Printf("FATAL: DataFlowSanitizer: failed to clear memory region\n");
+ Die();
}
+ }
+}
- Report("INFO: DataFlowSanitizer: dumping labels to %s\n",
- flags().dump_labels_at_exit);
- dfsan_dump_labels(fd);
- CloseFile(fd);
+// TODO: CheckMemoryLayoutSanity is based on msan.
+// Consider refactoring these into a shared implementation.
+static void CheckMemoryLayoutSanity() {
+ uptr prev_end = 0;
+ for (unsigned i = 0; i < kMemoryLayoutSize; ++i) {
+ uptr start = kMemoryLayout[i].start;
+ uptr end = kMemoryLayout[i].end;
+ MappingDesc::Type type = kMemoryLayout[i].type;
+ CHECK_LT(start, end);
+ CHECK_EQ(prev_end, start);
+ CHECK(addr_is_type(start, type));
+ CHECK(addr_is_type((start + end) / 2, type));
+ CHECK(addr_is_type(end - 1, type));
+ if (type == MappingDesc::APP) {
+ uptr addr = start;
+ CHECK(MEM_IS_SHADOW(MEM_TO_SHADOW(addr)));
+ CHECK(MEM_IS_ORIGIN(MEM_TO_ORIGIN(addr)));
+ CHECK_EQ(MEM_TO_ORIGIN(addr), SHADOW_TO_ORIGIN(MEM_TO_SHADOW(addr)));
+
+ addr = (start + end) / 2;
+ CHECK(MEM_IS_SHADOW(MEM_TO_SHADOW(addr)));
+ CHECK(MEM_IS_ORIGIN(MEM_TO_ORIGIN(addr)));
+ CHECK_EQ(MEM_TO_ORIGIN(addr), SHADOW_TO_ORIGIN(MEM_TO_SHADOW(addr)));
+
+ addr = end - 1;
+ CHECK(MEM_IS_SHADOW(MEM_TO_SHADOW(addr)));
+ CHECK(MEM_IS_ORIGIN(MEM_TO_ORIGIN(addr)));
+ CHECK_EQ(MEM_TO_ORIGIN(addr), SHADOW_TO_ORIGIN(MEM_TO_SHADOW(addr)));
+ }
+ prev_end = end;
}
}
-extern "C" void dfsan_flush() {
- if (!MmapFixedNoReserve(ShadowAddr(), UnusedAddr() - ShadowAddr()))
- Die();
+// TODO: CheckMemoryRangeAvailability is based on msan.
+// Consider refactoring these into a shared implementation.
+static bool CheckMemoryRangeAvailability(uptr beg, uptr size) {
+ if (size > 0) {
+ uptr end = beg + size - 1;
+ if (!MemoryRangeIsAvailable(beg, end)) {
+ Printf("FATAL: Memory range %p - %p is not available.\n", beg, end);
+ return false;
+ }
+ }
+ return true;
+}
+
+// TODO: ProtectMemoryRange is based on msan.
+// Consider refactoring these into a shared implementation.
+static bool ProtectMemoryRange(uptr beg, uptr size, const char *name) {
+ if (size > 0) {
+ void *addr = MmapFixedNoAccess(beg, size, name);
+ if (beg == 0 && addr) {
+ // Depending on the kernel configuration, we may not be able to protect
+ // the page at address zero.
+ uptr gap = 16 * GetPageSizeCached();
+ beg += gap;
+ size -= gap;
+ addr = MmapFixedNoAccess(beg, size, name);
+ }
+ if ((uptr)addr != beg) {
+ uptr end = beg + size - 1;
+ Printf("FATAL: Cannot protect memory range %p - %p (%s).\n", beg, end,
+ name);
+ return false;
+ }
+ }
+ return true;
+}
+
+// TODO: InitShadow is based on msan.
+// Consider refactoring these into a shared implementation.
+bool InitShadow(bool init_origins) {
+ // Let user know mapping parameters first.
+ VPrintf(1, "dfsan_init %p\n", &__dfsan::dfsan_init);
+ for (unsigned i = 0; i < kMemoryLayoutSize; ++i)
+ VPrintf(1, "%s: %zx - %zx\n", kMemoryLayout[i].name, kMemoryLayout[i].start,
+ kMemoryLayout[i].end - 1);
+
+ CheckMemoryLayoutSanity();
+
+ if (!MEM_IS_APP(&__dfsan::dfsan_init)) {
+ Printf("FATAL: Code %p is out of application range. Non-PIE build?\n",
+ (uptr)&__dfsan::dfsan_init);
+ return false;
+ }
+
+ const uptr maxVirtualAddress = GetMaxUserVirtualAddress();
+
+ for (unsigned i = 0; i < kMemoryLayoutSize; ++i) {
+ uptr start = kMemoryLayout[i].start;
+ uptr end = kMemoryLayout[i].end;
+ uptr size = end - start;
+ MappingDesc::Type type = kMemoryLayout[i].type;
+
+ // Check if the segment should be mapped based on platform constraints.
+ if (start >= maxVirtualAddress)
+ continue;
+
+ bool map = type == MappingDesc::SHADOW ||
+ (init_origins && type == MappingDesc::ORIGIN);
+ bool protect = type == MappingDesc::INVALID ||
+ (!init_origins && type == MappingDesc::ORIGIN);
+ CHECK(!(map && protect));
+ if (!map && !protect)
+ CHECK(type == MappingDesc::APP);
+ if (map) {
+ if (!CheckMemoryRangeAvailability(start, size))
+ return false;
+ if (!MmapFixedSuperNoReserve(start, size, kMemoryLayout[i].name))
+ return false;
+ if (common_flags()->use_madv_dontdump)
+ DontDumpShadowMemory(start, size);
+ }
+ if (protect) {
+ if (!CheckMemoryRangeAvailability(start, size))
+ return false;
+ if (!ProtectMemoryRange(start, size, kMemoryLayout[i].name))
+ return false;
+ }
+ }
+
+ return true;
}
-static void dfsan_init(int argc, char **argv, char **envp) {
+static void DFsanInit(int argc, char **argv, char **envp) {
+ CHECK(!dfsan_init_is_running);
+ if (dfsan_inited)
+ return;
+ dfsan_init_is_running = true;
+ SanitizerToolName = "DataflowSanitizer";
+
+ AvoidCVE_2016_2143();
+
InitializeFlags();
- ::InitializePlatformEarly();
+ CheckASLR();
- if (!MmapFixedSuperNoReserve(ShadowAddr(), UnusedAddr() - ShadowAddr()))
- Die();
- if (common_flags()->use_madv_dontdump)
- DontDumpShadowMemory(ShadowAddr(), UnusedAddr() - ShadowAddr());
+ InitShadow(dfsan_get_track_origins());
+
+ initialize_interceptors();
- // Protect the region of memory we don't use, to preserve the one-to-one
- // mapping from application to shadow memory. But if ASLR is disabled, Linux
- // will load our executable in the middle of our unused region. This mostly
- // works so long as the program doesn't use too much memory. We support this
- // case by disabling memory protection when ASLR is disabled.
- uptr init_addr = (uptr)&dfsan_init;
- if (!(init_addr >= UnusedAddr() && init_addr < AppAddr()))
- MmapFixedNoAccess(UnusedAddr(), AppAddr() - UnusedAddr());
+ // Set up threads
+ DFsanTSDInit(DFsanTSDDtor);
- InitializeInterceptors();
+ dfsan_allocator_init();
- // Register the fini callback to run when the program terminates successfully
- // or it is killed by the runtime.
- Atexit(dfsan_fini);
- AddDieCallback(dfsan_fini);
+ DFsanThread *main_thread = DFsanThread::Create(nullptr, nullptr, nullptr);
+ SetCurrentThread(main_thread);
+ main_thread->ThreadStart();
- __dfsan_label_info[kInitializingLabel].desc = "<init label>";
+ dfsan_init_is_running = false;
+ dfsan_inited = true;
}
+namespace __dfsan {
+
+void dfsan_init() { DFsanInit(0, nullptr, nullptr); }
+
+} // namespace __dfsan
+
#if SANITIZER_CAN_USE_PREINIT_ARRAY
-__attribute__((section(".preinit_array"), used))
-static void (*dfsan_init_ptr)(int, char **, char **) = dfsan_init;
+__attribute__((section(".preinit_array"),
+ used)) static void (*dfsan_init_ptr)(int, char **,
+ char **) = DFsanInit;
#endif
diff --git a/compiler-rt/lib/dfsan/dfsan.h b/compiler-rt/lib/dfsan/dfsan.h
index d662391216e4..b212298157eb 100644
--- a/compiler-rt/lib/dfsan/dfsan.h
+++ b/compiler-rt/lib/dfsan/dfsan.h
@@ -15,26 +15,37 @@
#define DFSAN_H
#include "sanitizer_common/sanitizer_internal_defs.h"
+
#include "dfsan_platform.h"
+using __sanitizer::u32;
+using __sanitizer::u8;
using __sanitizer::uptr;
-using __sanitizer::u16;
// Copy declarations from public sanitizer/dfsan_interface.h header here.
-typedef u16 dfsan_label;
-
-struct dfsan_label_info {
- dfsan_label l1;
- dfsan_label l2;
- const char *desc;
- void *userdata;
-};
+typedef u8 dfsan_label;
+typedef u32 dfsan_origin;
extern "C" {
void dfsan_add_label(dfsan_label label, void *addr, uptr size);
void dfsan_set_label(dfsan_label label, void *addr, uptr size);
dfsan_label dfsan_read_label(const void *addr, uptr size);
dfsan_label dfsan_union(dfsan_label l1, dfsan_label l2);
+// Zero out [offset, offset+size) from __dfsan_arg_tls.
+void dfsan_clear_arg_tls(uptr offset, uptr size);
+// Zero out the TLS storage.
+void dfsan_clear_thread_local_state();
+
+// Return the origin associated with the first taint byte in the size bytes
+// from the address addr.
+dfsan_origin dfsan_read_origin_of_first_taint(const void *addr, uptr size);
+
+// Set the data within [addr, addr+size) with label and origin.
+void dfsan_set_label_origin(dfsan_label label, dfsan_origin origin, void *addr,
+ uptr size);
+
+// Copy or move the origins of the len bytes from src to dst.
+void dfsan_mem_origin_transfer(const void *dst, const void *src, uptr len);
} // extern "C"
template <typename T>
@@ -44,29 +55,48 @@ void dfsan_set_label(dfsan_label label, T &data) { // NOLINT
namespace __dfsan {
-void InitializeInterceptors();
+extern bool dfsan_inited;
+extern bool dfsan_init_is_running;
+
+void initialize_interceptors();
inline dfsan_label *shadow_for(void *ptr) {
- return (dfsan_label *) ((((uptr) ptr) & ShadowMask()) << 1);
+ return (dfsan_label *)MEM_TO_SHADOW(ptr);
}
inline const dfsan_label *shadow_for(const void *ptr) {
return shadow_for(const_cast<void *>(ptr));
}
-struct Flags {
-#define DFSAN_FLAG(Type, Name, DefaultValue, Description) Type Name;
-#include "dfsan_flags.inc"
-#undef DFSAN_FLAG
+inline uptr unaligned_origin_for(uptr ptr) { return MEM_TO_ORIGIN(ptr); }
- void SetDefaults();
-};
+inline dfsan_origin *origin_for(void *ptr) {
+ auto aligned_addr = unaligned_origin_for(reinterpret_cast<uptr>(ptr)) &
+ ~(sizeof(dfsan_origin) - 1);
+ return reinterpret_cast<dfsan_origin *>(aligned_addr);
+}
-extern Flags flags_data;
-inline Flags &flags() {
- return flags_data;
+inline const dfsan_origin *origin_for(const void *ptr) {
+ return origin_for(const_cast<void *>(ptr));
}
+void dfsan_copy_memory(void *dst, const void *src, uptr size);
+
+void dfsan_allocator_init();
+void dfsan_deallocate(void *ptr);
+
+void *dfsan_malloc(uptr size);
+void *dfsan_calloc(uptr nmemb, uptr size);
+void *dfsan_realloc(void *ptr, uptr size);
+void *dfsan_reallocarray(void *ptr, uptr nmemb, uptr size);
+void *dfsan_valloc(uptr size);
+void *dfsan_pvalloc(uptr size);
+void *dfsan_aligned_alloc(uptr alignment, uptr size);
+void *dfsan_memalign(uptr alignment, uptr size);
+int dfsan_posix_memalign(void **memptr, uptr alignment, uptr size);
+
+void dfsan_init();
+
} // namespace __dfsan
#endif // DFSAN_H
diff --git a/compiler-rt/lib/dfsan/dfsan.syms.extra b/compiler-rt/lib/dfsan/dfsan.syms.extra
index 0d507eef0814..e34766c3ba1c 100644
--- a/compiler-rt/lib/dfsan/dfsan.syms.extra
+++ b/compiler-rt/lib/dfsan/dfsan.syms.extra
@@ -1,3 +1,4 @@
dfsan_*
__dfsan_*
__dfsw_*
+__dfso_*
diff --git a/compiler-rt/lib/dfsan/dfsan_allocator.cpp b/compiler-rt/lib/dfsan/dfsan_allocator.cpp
new file mode 100644
index 000000000000..b2e94564446e
--- /dev/null
+++ b/compiler-rt/lib/dfsan/dfsan_allocator.cpp
@@ -0,0 +1,287 @@
+//===-- dfsan_allocator.cpp -------------------------- --------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of DataflowSanitizer.
+//
+// DataflowSanitizer allocator.
+//===----------------------------------------------------------------------===//
+
+#include "dfsan_allocator.h"
+
+#include "dfsan.h"
+#include "dfsan_flags.h"
+#include "dfsan_thread.h"
+#include "sanitizer_common/sanitizer_allocator.h"
+#include "sanitizer_common/sanitizer_allocator_checks.h"
+#include "sanitizer_common/sanitizer_allocator_interface.h"
+#include "sanitizer_common/sanitizer_allocator_report.h"
+#include "sanitizer_common/sanitizer_errno.h"
+
+namespace __dfsan {
+
+struct Metadata {
+ uptr requested_size;
+};
+
+struct DFsanMapUnmapCallback {
+ void OnMap(uptr p, uptr size) const { dfsan_set_label(0, (void *)p, size); }
+ void OnUnmap(uptr p, uptr size) const { dfsan_set_label(0, (void *)p, size); }
+};
+
+static const uptr kAllocatorSpace = 0x700000000000ULL;
+static const uptr kMaxAllowedMallocSize = 8UL << 30;
+
+struct AP64 { // Allocator64 parameters. Deliberately using a short name.
+ static const uptr kSpaceBeg = kAllocatorSpace;
+ static const uptr kSpaceSize = 0x40000000000; // 4T.
+ static const uptr kMetadataSize = sizeof(Metadata);
+ typedef DefaultSizeClassMap SizeClassMap;
+ typedef DFsanMapUnmapCallback MapUnmapCallback;
+ static const uptr kFlags = 0;
+ using AddressSpaceView = LocalAddressSpaceView;
+};
+
+typedef SizeClassAllocator64<AP64> PrimaryAllocator;
+
+typedef CombinedAllocator<PrimaryAllocator> Allocator;
+typedef Allocator::AllocatorCache AllocatorCache;
+
+static Allocator allocator;
+static AllocatorCache fallback_allocator_cache;
+static StaticSpinMutex fallback_mutex;
+
+static uptr max_malloc_size;
+
+void dfsan_allocator_init() {
+ SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null);
+ allocator.Init(common_flags()->allocator_release_to_os_interval_ms);
+ if (common_flags()->max_allocation_size_mb)
+ max_malloc_size = Min(common_flags()->max_allocation_size_mb << 20,
+ kMaxAllowedMallocSize);
+ else
+ max_malloc_size = kMaxAllowedMallocSize;
+}
+
+AllocatorCache *GetAllocatorCache(DFsanThreadLocalMallocStorage *ms) {
+ CHECK(ms);
+ CHECK_LE(sizeof(AllocatorCache), sizeof(ms->allocator_cache));
+ return reinterpret_cast<AllocatorCache *>(ms->allocator_cache);
+}
+
+void DFsanThreadLocalMallocStorage::CommitBack() {
+ allocator.SwallowCache(GetAllocatorCache(this));
+}
+
+static void *DFsanAllocate(uptr size, uptr alignment, bool zeroise) {
+ if (size > max_malloc_size) {
+ if (AllocatorMayReturnNull()) {
+ Report("WARNING: DataflowSanitizer failed to allocate 0x%zx bytes\n",
+ size);
+ return nullptr;
+ }
+ BufferedStackTrace stack;
+ ReportAllocationSizeTooBig(size, max_malloc_size, &stack);
+ }
+ DFsanThread *t = GetCurrentThread();
+ void *allocated;
+ if (t) {
+ AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage());
+ allocated = allocator.Allocate(cache, size, alignment);
+ } else {
+ SpinMutexLock l(&fallback_mutex);
+ AllocatorCache *cache = &fallback_allocator_cache;
+ allocated = allocator.Allocate(cache, size, alignment);
+ }
+ if (UNLIKELY(!allocated)) {
+ SetAllocatorOutOfMemory();
+ if (AllocatorMayReturnNull())
+ return nullptr;
+ BufferedStackTrace stack;
+ ReportOutOfMemory(size, &stack);
+ }
+ Metadata *meta =
+ reinterpret_cast<Metadata *>(allocator.GetMetaData(allocated));
+ meta->requested_size = size;
+ if (zeroise) {
+ internal_memset(allocated, 0, size);
+ dfsan_set_label(0, allocated, size);
+ } else if (flags().zero_in_malloc) {
+ dfsan_set_label(0, allocated, size);
+ }
+ return allocated;
+}
+
+void dfsan_deallocate(void *p) {
+ CHECK(p);
+ Metadata *meta = reinterpret_cast<Metadata *>(allocator.GetMetaData(p));
+ uptr size = meta->requested_size;
+ meta->requested_size = 0;
+ if (flags().zero_in_free)
+ dfsan_set_label(0, p, size);
+ DFsanThread *t = GetCurrentThread();
+ if (t) {
+ AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage());
+ allocator.Deallocate(cache, p);
+ } else {
+ SpinMutexLock l(&fallback_mutex);
+ AllocatorCache *cache = &fallback_allocator_cache;
+ allocator.Deallocate(cache, p);
+ }
+}
+
+void *DFsanReallocate(void *old_p, uptr new_size, uptr alignment) {
+ Metadata *meta = reinterpret_cast<Metadata *>(allocator.GetMetaData(old_p));
+ uptr old_size = meta->requested_size;
+ uptr actually_allocated_size = allocator.GetActuallyAllocatedSize(old_p);
+ if (new_size <= actually_allocated_size) {
+ // We are not reallocating here.
+ meta->requested_size = new_size;
+ if (new_size > old_size && flags().zero_in_malloc)
+ dfsan_set_label(0, (char *)old_p + old_size, new_size - old_size);
+ return old_p;
+ }
+ uptr memcpy_size = Min(new_size, old_size);
+ void *new_p = DFsanAllocate(new_size, alignment, false /*zeroise*/);
+ if (new_p) {
+ dfsan_copy_memory(new_p, old_p, memcpy_size);
+ dfsan_deallocate(old_p);
+ }
+ return new_p;
+}
+
+void *DFsanCalloc(uptr nmemb, uptr size) {
+ if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) {
+ if (AllocatorMayReturnNull())
+ return nullptr;
+ BufferedStackTrace stack;
+ ReportCallocOverflow(nmemb, size, &stack);
+ }
+ return DFsanAllocate(nmemb * size, sizeof(u64), true /*zeroise*/);
+}
+
+static uptr AllocationSize(const void *p) {
+ if (!p)
+ return 0;
+ const void *beg = allocator.GetBlockBegin(p);
+ if (beg != p)
+ return 0;
+ Metadata *b = (Metadata *)allocator.GetMetaData(p);
+ return b->requested_size;
+}
+
+void *dfsan_malloc(uptr size) {
+ return SetErrnoOnNull(DFsanAllocate(size, sizeof(u64), false /*zeroise*/));
+}
+
+void *dfsan_calloc(uptr nmemb, uptr size) {
+ return SetErrnoOnNull(DFsanCalloc(nmemb, size));
+}
+
+void *dfsan_realloc(void *ptr, uptr size) {
+ if (!ptr)
+ return SetErrnoOnNull(DFsanAllocate(size, sizeof(u64), false /*zeroise*/));
+ if (size == 0) {
+ dfsan_deallocate(ptr);
+ return nullptr;
+ }
+ return SetErrnoOnNull(DFsanReallocate(ptr, size, sizeof(u64)));
+}
+
+void *dfsan_reallocarray(void *ptr, uptr nmemb, uptr size) {
+ if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) {
+ errno = errno_ENOMEM;
+ if (AllocatorMayReturnNull())
+ return nullptr;
+ BufferedStackTrace stack;
+ ReportReallocArrayOverflow(nmemb, size, &stack);
+ }
+ return dfsan_realloc(ptr, nmemb * size);
+}
+
+void *dfsan_valloc(uptr size) {
+ return SetErrnoOnNull(
+ DFsanAllocate(size, GetPageSizeCached(), false /*zeroise*/));
+}
+
+void *dfsan_pvalloc(uptr size) {
+ uptr PageSize = GetPageSizeCached();
+ if (UNLIKELY(CheckForPvallocOverflow(size, PageSize))) {
+ errno = errno_ENOMEM;
+ if (AllocatorMayReturnNull())
+ return nullptr;
+ BufferedStackTrace stack;
+ ReportPvallocOverflow(size, &stack);
+ }
+ // pvalloc(0) should allocate one page.
+ size = size ? RoundUpTo(size, PageSize) : PageSize;
+ return SetErrnoOnNull(DFsanAllocate(size, PageSize, false /*zeroise*/));
+}
+
+void *dfsan_aligned_alloc(uptr alignment, uptr size) {
+ if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(alignment, size))) {
+ errno = errno_EINVAL;
+ if (AllocatorMayReturnNull())
+ return nullptr;
+ BufferedStackTrace stack;
+ ReportInvalidAlignedAllocAlignment(size, alignment, &stack);
+ }
+ return SetErrnoOnNull(DFsanAllocate(size, alignment, false /*zeroise*/));
+}
+
+void *dfsan_memalign(uptr alignment, uptr size) {
+ if (UNLIKELY(!IsPowerOfTwo(alignment))) {
+ errno = errno_EINVAL;
+ if (AllocatorMayReturnNull())
+ return nullptr;
+ BufferedStackTrace stack;
+ ReportInvalidAllocationAlignment(alignment, &stack);
+ }
+ return SetErrnoOnNull(DFsanAllocate(size, alignment, false /*zeroise*/));
+}
+
+int dfsan_posix_memalign(void **memptr, uptr alignment, uptr size) {
+ if (UNLIKELY(!CheckPosixMemalignAlignment(alignment))) {
+ if (AllocatorMayReturnNull())
+ return errno_EINVAL;
+ BufferedStackTrace stack;
+ ReportInvalidPosixMemalignAlignment(alignment, &stack);
+ }
+ void *ptr = DFsanAllocate(size, alignment, false /*zeroise*/);
+ if (UNLIKELY(!ptr))
+ // OOM error is already taken care of by DFsanAllocate.
+ return errno_ENOMEM;
+ CHECK(IsAligned((uptr)ptr, alignment));
+ *memptr = ptr;
+ return 0;
+}
+
+} // namespace __dfsan
+
+using namespace __dfsan;
+
+uptr __sanitizer_get_current_allocated_bytes() {
+ uptr stats[AllocatorStatCount];
+ allocator.GetStats(stats);
+ return stats[AllocatorStatAllocated];
+}
+
+uptr __sanitizer_get_heap_size() {
+ uptr stats[AllocatorStatCount];
+ allocator.GetStats(stats);
+ return stats[AllocatorStatMapped];
+}
+
+uptr __sanitizer_get_free_bytes() { return 1; }
+
+uptr __sanitizer_get_unmapped_bytes() { return 1; }
+
+uptr __sanitizer_get_estimated_allocated_size(uptr size) { return size; }
+
+int __sanitizer_get_ownership(const void *p) { return AllocationSize(p) != 0; }
+
+uptr __sanitizer_get_allocated_size(const void *p) { return AllocationSize(p); }
diff --git a/compiler-rt/lib/dfsan/dfsan_allocator.h b/compiler-rt/lib/dfsan/dfsan_allocator.h
new file mode 100644
index 000000000000..3b4171b6314d
--- /dev/null
+++ b/compiler-rt/lib/dfsan/dfsan_allocator.h
@@ -0,0 +1,30 @@
+//===-- dfsan_allocator.h ---------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of DataflowSanitizer.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef DFSAN_ALLOCATOR_H
+#define DFSAN_ALLOCATOR_H
+
+#include "sanitizer_common/sanitizer_common.h"
+
+namespace __dfsan {
+
+struct DFsanThreadLocalMallocStorage {
+ ALIGNED(8) uptr allocator_cache[96 * (512 * 8 + 16)]; // Opaque.
+ void CommitBack();
+
+ private:
+ // These objects are allocated via mmap() and are zero-initialized.
+ DFsanThreadLocalMallocStorage() {}
+};
+
+} // namespace __dfsan
+#endif // DFSAN_ALLOCATOR_H
diff --git a/compiler-rt/lib/dfsan/dfsan_chained_origin_depot.cpp b/compiler-rt/lib/dfsan/dfsan_chained_origin_depot.cpp
new file mode 100644
index 000000000000..9ec598bf2ce9
--- /dev/null
+++ b/compiler-rt/lib/dfsan/dfsan_chained_origin_depot.cpp
@@ -0,0 +1,22 @@
+//===-- dfsan_chained_origin_depot.cpp ------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of DataFlowSanitizer.
+//
+// A storage for chained origins.
+//===----------------------------------------------------------------------===//
+
+#include "dfsan_chained_origin_depot.h"
+
+namespace __dfsan {
+
+static ChainedOriginDepot chainedOriginDepot;
+
+ChainedOriginDepot* GetChainedOriginDepot() { return &chainedOriginDepot; }
+
+} // namespace __dfsan
diff --git a/compiler-rt/lib/dfsan/dfsan_chained_origin_depot.h b/compiler-rt/lib/dfsan/dfsan_chained_origin_depot.h
new file mode 100644
index 000000000000..d715ef707f41
--- /dev/null
+++ b/compiler-rt/lib/dfsan/dfsan_chained_origin_depot.h
@@ -0,0 +1,26 @@
+//===-- dfsan_chained_origin_depot.h ----------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of DataFlowSanitizer.
+//
+// A storage for chained origins.
+//===----------------------------------------------------------------------===//
+
+#ifndef DFSAN_CHAINED_ORIGIN_DEPOT_H
+#define DFSAN_CHAINED_ORIGIN_DEPOT_H
+
+#include "sanitizer_common/sanitizer_chained_origin_depot.h"
+#include "sanitizer_common/sanitizer_common.h"
+
+namespace __dfsan {
+
+ChainedOriginDepot* GetChainedOriginDepot();
+
+} // namespace __dfsan
+
+#endif // DFSAN_CHAINED_ORIGIN_DEPOT_H
diff --git a/compiler-rt/lib/dfsan/dfsan_custom.cpp b/compiler-rt/lib/dfsan/dfsan_custom.cpp
index 94901cee0d5c..3185184f29c8 100644
--- a/compiler-rt/lib/dfsan/dfsan_custom.cpp
+++ b/compiler-rt/lib/dfsan/dfsan_custom.cpp
@@ -1,4 +1,4 @@
-//===-- dfsan.cpp ---------------------------------------------------------===//
+//===-- dfsan_custom.cpp --------------------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
@@ -37,9 +37,13 @@
#include <unistd.h>
#include "dfsan/dfsan.h"
+#include "dfsan/dfsan_chained_origin_depot.h"
+#include "dfsan/dfsan_flags.h"
+#include "dfsan/dfsan_thread.h"
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_internal_defs.h"
#include "sanitizer_common/sanitizer_linux.h"
+#include "sanitizer_common/sanitizer_stackdepot.h"
using namespace __dfsan;
@@ -51,6 +55,30 @@ using namespace __dfsan;
#define DECLARE_WEAK_INTERCEPTOR_HOOK(f, ...) \
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void f(__VA_ARGS__);
+// Async-safe, non-reentrant spin lock.
+class SignalSpinLocker {
+ public:
+ SignalSpinLocker() {
+ sigset_t all_set;
+ sigfillset(&all_set);
+ pthread_sigmask(SIG_SETMASK, &all_set, &saved_thread_mask_);
+ sigactions_mu.Lock();
+ }
+ ~SignalSpinLocker() {
+ sigactions_mu.Unlock();
+ pthread_sigmask(SIG_SETMASK, &saved_thread_mask_, nullptr);
+ }
+
+ private:
+ static StaticSpinMutex sigactions_mu;
+ sigset_t saved_thread_mask_;
+
+ SignalSpinLocker(const SignalSpinLocker &) = delete;
+ SignalSpinLocker &operator=(const SignalSpinLocker &) = delete;
+};
+
+StaticSpinMutex SignalSpinLocker::sigactions_mu;
+
extern "C" {
SANITIZER_INTERFACE_ATTRIBUTE int
__dfsw_stat(const char *path, struct stat *buf, dfsan_label path_label,
@@ -62,6 +90,14 @@ __dfsw_stat(const char *path, struct stat *buf, dfsan_label path_label,
return ret;
}
+SANITIZER_INTERFACE_ATTRIBUTE int __dfso_stat(
+ const char *path, struct stat *buf, dfsan_label path_label,
+ dfsan_label buf_label, dfsan_label *ret_label, dfsan_origin path_origin,
+ dfsan_origin buf_origin, dfsan_origin *ret_origin) {
+ int ret = __dfsw_stat(path, buf, path_label, buf_label, ret_label);
+ return ret;
+}
+
SANITIZER_INTERFACE_ATTRIBUTE int __dfsw_fstat(int fd, struct stat *buf,
dfsan_label fd_label,
dfsan_label buf_label,
@@ -73,27 +109,58 @@ SANITIZER_INTERFACE_ATTRIBUTE int __dfsw_fstat(int fd, struct stat *buf,
return ret;
}
-SANITIZER_INTERFACE_ATTRIBUTE char *__dfsw_strchr(const char *s, int c,
- dfsan_label s_label,
- dfsan_label c_label,
- dfsan_label *ret_label) {
+SANITIZER_INTERFACE_ATTRIBUTE int __dfso_fstat(
+ int fd, struct stat *buf, dfsan_label fd_label, dfsan_label buf_label,
+ dfsan_label *ret_label, dfsan_origin fd_origin, dfsan_origin buf_origin,
+ dfsan_origin *ret_origin) {
+ int ret = __dfsw_fstat(fd, buf, fd_label, buf_label, ret_label);
+ return ret;
+}
+
+static char *dfsan_strchr_with_label(const char *s, int c, size_t *bytes_read,
+ dfsan_label s_label, dfsan_label c_label,
+ dfsan_label *ret_label) {
+ char *match_pos = nullptr;
for (size_t i = 0;; ++i) {
if (s[i] == c || s[i] == 0) {
- if (flags().strict_data_dependencies) {
- *ret_label = s_label;
- } else {
- *ret_label = dfsan_union(dfsan_read_label(s, i + 1),
- dfsan_union(s_label, c_label));
- }
-
// If s[i] is the \0 at the end of the string, and \0 is not the
// character we are searching for, then return null.
- if (s[i] == 0 && c != 0) {
- return nullptr;
- }
- return const_cast<char *>(s + i);
+ *bytes_read = i + 1;
+ match_pos = s[i] == 0 && c != 0 ? nullptr : const_cast<char *>(s + i);
+ break;
}
}
+ if (flags().strict_data_dependencies)
+ *ret_label = s_label;
+ else
+ *ret_label = dfsan_union(dfsan_read_label(s, *bytes_read),
+ dfsan_union(s_label, c_label));
+ return match_pos;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE char *__dfsw_strchr(const char *s, int c,
+ dfsan_label s_label,
+ dfsan_label c_label,
+ dfsan_label *ret_label) {
+ size_t bytes_read;
+ return dfsan_strchr_with_label(s, c, &bytes_read, s_label, c_label,
+ ret_label);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE char *__dfso_strchr(
+ const char *s, int c, dfsan_label s_label, dfsan_label c_label,
+ dfsan_label *ret_label, dfsan_origin s_origin, dfsan_origin c_origin,
+ dfsan_origin *ret_origin) {
+ size_t bytes_read;
+ char *r =
+ dfsan_strchr_with_label(s, c, &bytes_read, s_label, c_label, ret_label);
+ if (flags().strict_data_dependencies) {
+ *ret_origin = s_origin;
+ } else if (*ret_label) {
+ dfsan_origin o = dfsan_read_origin_of_first_taint(s, bytes_read);
+ *ret_origin = o ? o : (s_label ? s_origin : c_origin);
+ }
+ return r;
}
SANITIZER_INTERFACE_ATTRIBUTE char *__dfsw_strpbrk(const char *s,
@@ -114,36 +181,87 @@ SANITIZER_INTERFACE_ATTRIBUTE char *__dfsw_strpbrk(const char *s,
return const_cast<char *>(ret);
}
+SANITIZER_INTERFACE_ATTRIBUTE char *__dfso_strpbrk(
+ const char *s, const char *accept, dfsan_label s_label,
+ dfsan_label accept_label, dfsan_label *ret_label, dfsan_origin s_origin,
+ dfsan_origin accept_origin, dfsan_origin *ret_origin) {
+ const char *ret = __dfsw_strpbrk(s, accept, s_label, accept_label, ret_label);
+ if (flags().strict_data_dependencies) {
+ if (ret)
+ *ret_origin = s_origin;
+ } else {
+ if (*ret_label) {
+ size_t s_bytes_read = (ret ? ret - s : strlen(s)) + 1;
+ dfsan_origin o = dfsan_read_origin_of_first_taint(s, s_bytes_read);
+ if (o) {
+ *ret_origin = o;
+ } else {
+ o = dfsan_read_origin_of_first_taint(accept, strlen(accept) + 1);
+ *ret_origin = o ? o : (s_label ? s_origin : accept_origin);
+ }
+ }
+ }
+ return const_cast<char *>(ret);
+}
+
static int dfsan_memcmp_bcmp(const void *s1, const void *s2, size_t n,
- dfsan_label s1_label, dfsan_label s2_label,
- dfsan_label n_label, dfsan_label *ret_label) {
+ size_t *bytes_read) {
const char *cs1 = (const char *) s1, *cs2 = (const char *) s2;
for (size_t i = 0; i != n; ++i) {
if (cs1[i] != cs2[i]) {
- if (flags().strict_data_dependencies) {
- *ret_label = 0;
- } else {
- *ret_label = dfsan_union(dfsan_read_label(cs1, i + 1),
- dfsan_read_label(cs2, i + 1));
- }
+ *bytes_read = i + 1;
return cs1[i] - cs2[i];
}
}
-
- if (flags().strict_data_dependencies) {
- *ret_label = 0;
- } else {
- *ret_label = dfsan_union(dfsan_read_label(cs1, n),
- dfsan_read_label(cs2, n));
- }
+ *bytes_read = n;
return 0;
}
+static dfsan_label dfsan_get_memcmp_label(const void *s1, const void *s2,
+ size_t pos) {
+ if (flags().strict_data_dependencies)
+ return 0;
+ return dfsan_union(dfsan_read_label(s1, pos), dfsan_read_label(s2, pos));
+}
+
+static void dfsan_get_memcmp_origin(const void *s1, const void *s2, size_t pos,
+ dfsan_label *ret_label,
+ dfsan_origin *ret_origin) {
+ *ret_label = dfsan_get_memcmp_label(s1, s2, pos);
+ if (*ret_label == 0)
+ return;
+ dfsan_origin o = dfsan_read_origin_of_first_taint(s1, pos);
+ *ret_origin = o ? o : dfsan_read_origin_of_first_taint(s2, pos);
+}
+
+static int dfsan_memcmp_bcmp_label(const void *s1, const void *s2, size_t n,
+ dfsan_label *ret_label) {
+ size_t bytes_read;
+ int r = dfsan_memcmp_bcmp(s1, s2, n, &bytes_read);
+ *ret_label = dfsan_get_memcmp_label(s1, s2, bytes_read);
+ return r;
+}
+
+static int dfsan_memcmp_bcmp_origin(const void *s1, const void *s2, size_t n,
+ dfsan_label *ret_label,
+ dfsan_origin *ret_origin) {
+ size_t bytes_read;
+ int r = dfsan_memcmp_bcmp(s1, s2, n, &bytes_read);
+ dfsan_get_memcmp_origin(s1, s2, bytes_read, ret_label, ret_origin);
+ return r;
+}
+
DECLARE_WEAK_INTERCEPTOR_HOOK(dfsan_weak_hook_memcmp, uptr caller_pc,
const void *s1, const void *s2, size_t n,
dfsan_label s1_label, dfsan_label s2_label,
dfsan_label n_label)
+DECLARE_WEAK_INTERCEPTOR_HOOK(dfsan_weak_hook_origin_memcmp, uptr caller_pc,
+ const void *s1, const void *s2, size_t n,
+ dfsan_label s1_label, dfsan_label s2_label,
+ dfsan_label n_label, dfsan_origin s1_origin,
+ dfsan_origin s2_origin, dfsan_origin n_origin)
+
SANITIZER_INTERFACE_ATTRIBUTE int __dfsw_memcmp(const void *s1, const void *s2,
size_t n, dfsan_label s1_label,
dfsan_label s2_label,
@@ -151,7 +269,18 @@ SANITIZER_INTERFACE_ATTRIBUTE int __dfsw_memcmp(const void *s1, const void *s2,
dfsan_label *ret_label) {
CALL_WEAK_INTERCEPTOR_HOOK(dfsan_weak_hook_memcmp, GET_CALLER_PC(), s1, s2, n,
s1_label, s2_label, n_label);
- return dfsan_memcmp_bcmp(s1, s2, n, s1_label, s2_label, n_label, ret_label);
+ return dfsan_memcmp_bcmp_label(s1, s2, n, ret_label);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE int __dfso_memcmp(
+ const void *s1, const void *s2, size_t n, dfsan_label s1_label,
+ dfsan_label s2_label, dfsan_label n_label, dfsan_label *ret_label,
+ dfsan_origin s1_origin, dfsan_origin s2_origin, dfsan_origin n_origin,
+ dfsan_origin *ret_origin) {
+ CALL_WEAK_INTERCEPTOR_HOOK(dfsan_weak_hook_origin_memcmp, GET_CALLER_PC(), s1,
+ s2, n, s1_label, s2_label, n_label, s1_origin,
+ s2_origin, n_origin);
+ return dfsan_memcmp_bcmp_origin(s1, s2, n, ret_label, ret_origin);
}
SANITIZER_INTERFACE_ATTRIBUTE int __dfsw_bcmp(const void *s1, const void *s2,
@@ -159,51 +288,97 @@ SANITIZER_INTERFACE_ATTRIBUTE int __dfsw_bcmp(const void *s1, const void *s2,
dfsan_label s2_label,
dfsan_label n_label,
dfsan_label *ret_label) {
- return dfsan_memcmp_bcmp(s1, s2, n, s1_label, s2_label, n_label, ret_label);
+ return dfsan_memcmp_bcmp_label(s1, s2, n, ret_label);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE int __dfso_bcmp(
+ const void *s1, const void *s2, size_t n, dfsan_label s1_label,
+ dfsan_label s2_label, dfsan_label n_label, dfsan_label *ret_label,
+ dfsan_origin s1_origin, dfsan_origin s2_origin, dfsan_origin n_origin,
+ dfsan_origin *ret_origin) {
+ return dfsan_memcmp_bcmp_origin(s1, s2, n, ret_label, ret_origin);
+}
+
+// When n == 0, compare strings without byte limit.
+// When n > 0, compare the first (at most) n bytes of s1 and s2.
+static int dfsan_strncmp(const char *s1, const char *s2, size_t n,
+ size_t *bytes_read) {
+ for (size_t i = 0;; ++i) {
+ if (s1[i] != s2[i] || s1[i] == 0 || s2[i] == 0 || (n > 0 && i == n - 1)) {
+ *bytes_read = i + 1;
+ return s1[i] - s2[i];
+ }
+ }
}
DECLARE_WEAK_INTERCEPTOR_HOOK(dfsan_weak_hook_strcmp, uptr caller_pc,
const char *s1, const char *s2,
dfsan_label s1_label, dfsan_label s2_label)
+DECLARE_WEAK_INTERCEPTOR_HOOK(dfsan_weak_hook_origin_strcmp, uptr caller_pc,
+ const char *s1, const char *s2,
+ dfsan_label s1_label, dfsan_label s2_label,
+ dfsan_origin s1_origin, dfsan_origin s2_origin)
+
SANITIZER_INTERFACE_ATTRIBUTE int __dfsw_strcmp(const char *s1, const char *s2,
dfsan_label s1_label,
dfsan_label s2_label,
dfsan_label *ret_label) {
CALL_WEAK_INTERCEPTOR_HOOK(dfsan_weak_hook_strcmp, GET_CALLER_PC(), s1, s2,
s1_label, s2_label);
- for (size_t i = 0;; ++i) {
- if (s1[i] != s2[i] || s1[i] == 0 || s2[i] == 0) {
- if (flags().strict_data_dependencies) {
- *ret_label = 0;
- } else {
- *ret_label = dfsan_union(dfsan_read_label(s1, i + 1),
- dfsan_read_label(s2, i + 1));
- }
- return s1[i] - s2[i];
- }
- }
- return 0;
-}
-
-SANITIZER_INTERFACE_ATTRIBUTE int
-__dfsw_strcasecmp(const char *s1, const char *s2, dfsan_label s1_label,
- dfsan_label s2_label, dfsan_label *ret_label) {
+ size_t bytes_read;
+ int r = dfsan_strncmp(s1, s2, 0, &bytes_read);
+ *ret_label = dfsan_get_memcmp_label(s1, s2, bytes_read);
+ return r;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE int __dfso_strcmp(
+ const char *s1, const char *s2, dfsan_label s1_label, dfsan_label s2_label,
+ dfsan_label *ret_label, dfsan_origin s1_origin, dfsan_origin s2_origin,
+ dfsan_origin *ret_origin) {
+ CALL_WEAK_INTERCEPTOR_HOOK(dfsan_weak_hook_origin_strcmp, GET_CALLER_PC(), s1,
+ s2, s1_label, s2_label, s1_origin, s2_origin);
+ size_t bytes_read;
+ int r = dfsan_strncmp(s1, s2, 0, &bytes_read);
+ dfsan_get_memcmp_origin(s1, s2, bytes_read, ret_label, ret_origin);
+ return r;
+}
+
+// When n == 0, compare strings without byte limit.
+// When n > 0, compare the first (at most) n bytes of s1 and s2.
+static int dfsan_strncasecmp(const char *s1, const char *s2, size_t n,
+ size_t *bytes_read) {
for (size_t i = 0;; ++i) {
char s1_lower = tolower(s1[i]);
char s2_lower = tolower(s2[i]);
- if (s1_lower != s2_lower || s1[i] == 0 || s2[i] == 0) {
- if (flags().strict_data_dependencies) {
- *ret_label = 0;
- } else {
- *ret_label = dfsan_union(dfsan_read_label(s1, i + 1),
- dfsan_read_label(s2, i + 1));
- }
+ if (s1_lower != s2_lower || s1[i] == 0 || s2[i] == 0 ||
+ (n > 0 && i == n - 1)) {
+ *bytes_read = i + 1;
return s1_lower - s2_lower;
}
}
- return 0;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE int __dfsw_strcasecmp(const char *s1,
+ const char *s2,
+ dfsan_label s1_label,
+ dfsan_label s2_label,
+ dfsan_label *ret_label) {
+ size_t bytes_read;
+ int r = dfsan_strncasecmp(s1, s2, 0, &bytes_read);
+ *ret_label = dfsan_get_memcmp_label(s1, s2, bytes_read);
+ return r;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE int __dfso_strcasecmp(
+ const char *s1, const char *s2, dfsan_label s1_label, dfsan_label s2_label,
+ dfsan_label *ret_label, dfsan_origin s1_origin, dfsan_origin s2_origin,
+ dfsan_origin *ret_origin) {
+ size_t bytes_read;
+ int r = dfsan_strncasecmp(s1, s2, 0, &bytes_read);
+ dfsan_get_memcmp_origin(s1, s2, bytes_read, ret_label, ret_origin);
+ return r;
}
DECLARE_WEAK_INTERCEPTOR_HOOK(dfsan_weak_hook_strncmp, uptr caller_pc,
@@ -211,6 +386,12 @@ DECLARE_WEAK_INTERCEPTOR_HOOK(dfsan_weak_hook_strncmp, uptr caller_pc,
dfsan_label s1_label, dfsan_label s2_label,
dfsan_label n_label)
+DECLARE_WEAK_INTERCEPTOR_HOOK(dfsan_weak_hook_origin_strncmp, uptr caller_pc,
+ const char *s1, const char *s2, size_t n,
+ dfsan_label s1_label, dfsan_label s2_label,
+ dfsan_label n_label, dfsan_origin s1_origin,
+ dfsan_origin s2_origin, dfsan_origin n_origin)
+
SANITIZER_INTERFACE_ATTRIBUTE int __dfsw_strncmp(const char *s1, const char *s2,
size_t n, dfsan_label s1_label,
dfsan_label s2_label,
@@ -224,56 +405,63 @@ SANITIZER_INTERFACE_ATTRIBUTE int __dfsw_strncmp(const char *s1, const char *s2,
CALL_WEAK_INTERCEPTOR_HOOK(dfsan_weak_hook_strncmp, GET_CALLER_PC(), s1, s2,
n, s1_label, s2_label, n_label);
- for (size_t i = 0;; ++i) {
- if (s1[i] != s2[i] || s1[i] == 0 || s2[i] == 0 || i == n - 1) {
- if (flags().strict_data_dependencies) {
- *ret_label = 0;
- } else {
- *ret_label = dfsan_union(dfsan_read_label(s1, i + 1),
- dfsan_read_label(s2, i + 1));
- }
- return s1[i] - s2[i];
- }
- }
- return 0;
+ size_t bytes_read;
+ int r = dfsan_strncmp(s1, s2, n, &bytes_read);
+ *ret_label = dfsan_get_memcmp_label(s1, s2, bytes_read);
+ return r;
}
-SANITIZER_INTERFACE_ATTRIBUTE int
-__dfsw_strncasecmp(const char *s1, const char *s2, size_t n,
- dfsan_label s1_label, dfsan_label s2_label,
- dfsan_label n_label, dfsan_label *ret_label) {
+SANITIZER_INTERFACE_ATTRIBUTE int __dfso_strncmp(
+ const char *s1, const char *s2, size_t n, dfsan_label s1_label,
+ dfsan_label s2_label, dfsan_label n_label, dfsan_label *ret_label,
+ dfsan_origin s1_origin, dfsan_origin s2_origin, dfsan_origin n_origin,
+ dfsan_origin *ret_origin) {
if (n == 0) {
*ret_label = 0;
return 0;
}
- for (size_t i = 0;; ++i) {
- char s1_lower = tolower(s1[i]);
- char s2_lower = tolower(s2[i]);
+ CALL_WEAK_INTERCEPTOR_HOOK(dfsan_weak_hook_origin_strncmp, GET_CALLER_PC(),
+ s1, s2, n, s1_label, s2_label, n_label, s1_origin,
+ s2_origin, n_origin);
- if (s1_lower != s2_lower || s1[i] == 0 || s2[i] == 0 || i == n - 1) {
- if (flags().strict_data_dependencies) {
- *ret_label = 0;
- } else {
- *ret_label = dfsan_union(dfsan_read_label(s1, i + 1),
- dfsan_read_label(s2, i + 1));
- }
- return s1_lower - s2_lower;
- }
+ size_t bytes_read;
+ int r = dfsan_strncmp(s1, s2, n, &bytes_read);
+ dfsan_get_memcmp_origin(s1, s2, bytes_read, ret_label, ret_origin);
+ return r;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE int __dfsw_strncasecmp(
+ const char *s1, const char *s2, size_t n, dfsan_label s1_label,
+ dfsan_label s2_label, dfsan_label n_label, dfsan_label *ret_label) {
+ if (n == 0) {
+ *ret_label = 0;
+ return 0;
}
- return 0;
+
+ size_t bytes_read;
+ int r = dfsan_strncasecmp(s1, s2, n, &bytes_read);
+ *ret_label = dfsan_get_memcmp_label(s1, s2, bytes_read);
+ return r;
}
-SANITIZER_INTERFACE_ATTRIBUTE void *__dfsw_calloc(size_t nmemb, size_t size,
- dfsan_label nmemb_label,
- dfsan_label size_label,
- dfsan_label *ret_label) {
- void *p = calloc(nmemb, size);
- dfsan_set_label(0, p, nmemb * size);
- *ret_label = 0;
- return p;
+SANITIZER_INTERFACE_ATTRIBUTE int __dfso_strncasecmp(
+ const char *s1, const char *s2, size_t n, dfsan_label s1_label,
+ dfsan_label s2_label, dfsan_label n_label, dfsan_label *ret_label,
+ dfsan_origin s1_origin, dfsan_origin s2_origin, dfsan_origin n_origin,
+ dfsan_origin *ret_origin) {
+ if (n == 0) {
+ *ret_label = 0;
+ return 0;
+ }
+
+ size_t bytes_read;
+ int r = dfsan_strncasecmp(s1, s2, n, &bytes_read);
+ dfsan_get_memcmp_origin(s1, s2, bytes_read, ret_label, ret_origin);
+ return r;
}
+
SANITIZER_INTERFACE_ATTRIBUTE size_t
__dfsw_strlen(const char *s, dfsan_label s_label, dfsan_label *ret_label) {
size_t ret = strlen(s);
@@ -285,6 +473,28 @@ __dfsw_strlen(const char *s, dfsan_label s_label, dfsan_label *ret_label) {
return ret;
}
+SANITIZER_INTERFACE_ATTRIBUTE size_t __dfso_strlen(const char *s,
+ dfsan_label s_label,
+ dfsan_label *ret_label,
+ dfsan_origin s_origin,
+ dfsan_origin *ret_origin) {
+ size_t ret = __dfsw_strlen(s, s_label, ret_label);
+ if (!flags().strict_data_dependencies)
+ *ret_origin = dfsan_read_origin_of_first_taint(s, ret + 1);
+ return ret;
+}
+
+static void *dfsan_memmove(void *dest, const void *src, size_t n) {
+ dfsan_label *sdest = shadow_for(dest);
+ const dfsan_label *ssrc = shadow_for(src);
+ internal_memmove((void *)sdest, (const void *)ssrc, n * sizeof(dfsan_label));
+ return internal_memmove(dest, src, n);
+}
+
+static void *dfsan_memmove_with_origin(void *dest, const void *src, size_t n) {
+ dfsan_mem_origin_transfer(dest, src, n);
+ return dfsan_memmove(dest, src, n);
+}
static void *dfsan_memcpy(void *dest, const void *src, size_t n) {
dfsan_label *sdest = shadow_for(dest);
@@ -293,11 +503,22 @@ static void *dfsan_memcpy(void *dest, const void *src, size_t n) {
return internal_memcpy(dest, src, n);
}
+static void *dfsan_memcpy_with_origin(void *dest, const void *src, size_t n) {
+ dfsan_mem_origin_transfer(dest, src, n);
+ return dfsan_memcpy(dest, src, n);
+}
+
static void dfsan_memset(void *s, int c, dfsan_label c_label, size_t n) {
internal_memset(s, c, n);
dfsan_set_label(c_label, s, n);
}
+static void dfsan_memset_with_origin(void *s, int c, dfsan_label c_label,
+ dfsan_origin c_origin, size_t n) {
+ internal_memset(s, c, n);
+ dfsan_set_label_origin(c_label, c_origin, s, n);
+}
+
SANITIZER_INTERFACE_ATTRIBUTE
void *__dfsw_memcpy(void *dest, const void *src, size_t n,
dfsan_label dest_label, dfsan_label src_label,
@@ -307,6 +528,36 @@ void *__dfsw_memcpy(void *dest, const void *src, size_t n,
}
SANITIZER_INTERFACE_ATTRIBUTE
+void *__dfso_memcpy(void *dest, const void *src, size_t n,
+ dfsan_label dest_label, dfsan_label src_label,
+ dfsan_label n_label, dfsan_label *ret_label,
+ dfsan_origin dest_origin, dfsan_origin src_origin,
+ dfsan_origin n_origin, dfsan_origin *ret_origin) {
+ *ret_label = dest_label;
+ *ret_origin = dest_origin;
+ return dfsan_memcpy_with_origin(dest, src, n);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void *__dfsw_memmove(void *dest, const void *src, size_t n,
+ dfsan_label dest_label, dfsan_label src_label,
+ dfsan_label n_label, dfsan_label *ret_label) {
+ *ret_label = dest_label;
+ return dfsan_memmove(dest, src, n);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void *__dfso_memmove(void *dest, const void *src, size_t n,
+ dfsan_label dest_label, dfsan_label src_label,
+ dfsan_label n_label, dfsan_label *ret_label,
+ dfsan_origin dest_origin, dfsan_origin src_origin,
+ dfsan_origin n_origin, dfsan_origin *ret_origin) {
+ *ret_label = dest_label;
+ *ret_origin = dest_origin;
+ return dfsan_memmove_with_origin(dest, src, n);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
void *__dfsw_memset(void *s, int c, size_t n,
dfsan_label s_label, dfsan_label c_label,
dfsan_label n_label, dfsan_label *ret_label) {
@@ -315,6 +566,49 @@ void *__dfsw_memset(void *s, int c, size_t n,
return s;
}
+SANITIZER_INTERFACE_ATTRIBUTE
+void *__dfso_memset(void *s, int c, size_t n, dfsan_label s_label,
+ dfsan_label c_label, dfsan_label n_label,
+ dfsan_label *ret_label, dfsan_origin s_origin,
+ dfsan_origin c_origin, dfsan_origin n_origin,
+ dfsan_origin *ret_origin) {
+ dfsan_memset_with_origin(s, c, c_label, c_origin, n);
+ *ret_label = s_label;
+ *ret_origin = s_origin;
+ return s;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE char *__dfsw_strcat(char *dest, const char *src,
+ dfsan_label dest_label,
+ dfsan_label src_label,
+ dfsan_label *ret_label) {
+ size_t dest_len = strlen(dest);
+ char *ret = strcat(dest, src); // NOLINT
+ dfsan_label *sdest = shadow_for(dest + dest_len);
+ const dfsan_label *ssrc = shadow_for(src);
+ internal_memcpy((void *)sdest, (const void *)ssrc,
+ strlen(src) * sizeof(dfsan_label));
+ *ret_label = dest_label;
+ return ret;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE char *__dfso_strcat(
+ char *dest, const char *src, dfsan_label dest_label, dfsan_label src_label,
+ dfsan_label *ret_label, dfsan_origin dest_origin, dfsan_origin src_origin,
+ dfsan_origin *ret_origin) {
+ size_t dest_len = strlen(dest);
+ char *ret = strcat(dest, src); // NOLINT
+ dfsan_label *sdest = shadow_for(dest + dest_len);
+ const dfsan_label *ssrc = shadow_for(src);
+ size_t src_len = strlen(src);
+ dfsan_mem_origin_transfer(dest + dest_len, src, src_len);
+ internal_memcpy((void *)sdest, (const void *)ssrc,
+ src_len * sizeof(dfsan_label));
+ *ret_label = dest_label;
+ *ret_origin = dest_origin;
+ return ret;
+}
+
SANITIZER_INTERFACE_ATTRIBUTE char *
__dfsw_strdup(const char *s, dfsan_label s_label, dfsan_label *ret_label) {
size_t len = strlen(s);
@@ -324,6 +618,18 @@ __dfsw_strdup(const char *s, dfsan_label s_label, dfsan_label *ret_label) {
return static_cast<char *>(p);
}
+SANITIZER_INTERFACE_ATTRIBUTE char *__dfso_strdup(const char *s,
+ dfsan_label s_label,
+ dfsan_label *ret_label,
+ dfsan_origin s_origin,
+ dfsan_origin *ret_origin) {
+ size_t len = strlen(s);
+ void *p = malloc(len + 1);
+ dfsan_memcpy_with_origin(p, s, len + 1);
+ *ret_label = 0;
+ return static_cast<char *>(p);
+}
+
SANITIZER_INTERFACE_ATTRIBUTE char *
__dfsw_strncpy(char *s1, const char *s2, size_t n, dfsan_label s1_label,
dfsan_label s2_label, dfsan_label n_label,
@@ -340,6 +646,24 @@ __dfsw_strncpy(char *s1, const char *s2, size_t n, dfsan_label s1_label,
return s1;
}
+SANITIZER_INTERFACE_ATTRIBUTE char *__dfso_strncpy(
+ char *s1, const char *s2, size_t n, dfsan_label s1_label,
+ dfsan_label s2_label, dfsan_label n_label, dfsan_label *ret_label,
+ dfsan_origin s1_origin, dfsan_origin s2_origin, dfsan_origin n_origin,
+ dfsan_origin *ret_origin) {
+ size_t len = strlen(s2);
+ if (len < n) {
+ dfsan_memcpy_with_origin(s1, s2, len + 1);
+ dfsan_memset_with_origin(s1 + len + 1, 0, 0, 0, n - len - 1);
+ } else {
+ dfsan_memcpy_with_origin(s1, s2, n);
+ }
+
+ *ret_label = s1_label;
+ *ret_origin = s1_origin;
+ return s1;
+}
+
SANITIZER_INTERFACE_ATTRIBUTE ssize_t
__dfsw_pread(int fd, void *buf, size_t count, off_t offset,
dfsan_label fd_label, dfsan_label buf_label,
@@ -352,6 +676,16 @@ __dfsw_pread(int fd, void *buf, size_t count, off_t offset,
return ret;
}
+SANITIZER_INTERFACE_ATTRIBUTE ssize_t __dfso_pread(
+ int fd, void *buf, size_t count, off_t offset, dfsan_label fd_label,
+ dfsan_label buf_label, dfsan_label count_label, dfsan_label offset_label,
+ dfsan_label *ret_label, dfsan_origin fd_origin, dfsan_origin buf_origin,
+ dfsan_origin count_origin, dfsan_label offset_origin,
+ dfsan_origin *ret_origin) {
+ return __dfsw_pread(fd, buf, count, offset, fd_label, buf_label, count_label,
+ offset_label, ret_label);
+}
+
SANITIZER_INTERFACE_ATTRIBUTE ssize_t
__dfsw_read(int fd, void *buf, size_t count,
dfsan_label fd_label, dfsan_label buf_label,
@@ -364,6 +698,15 @@ __dfsw_read(int fd, void *buf, size_t count,
return ret;
}
+SANITIZER_INTERFACE_ATTRIBUTE ssize_t __dfso_read(
+ int fd, void *buf, size_t count, dfsan_label fd_label,
+ dfsan_label buf_label, dfsan_label count_label, dfsan_label *ret_label,
+ dfsan_origin fd_origin, dfsan_origin buf_origin, dfsan_origin count_origin,
+ dfsan_origin *ret_origin) {
+ return __dfsw_read(fd, buf, count, fd_label, buf_label, count_label,
+ ret_label);
+}
+
SANITIZER_INTERFACE_ATTRIBUTE int __dfsw_clock_gettime(clockid_t clk_id,
struct timespec *tp,
dfsan_label clk_id_label,
@@ -376,7 +719,14 @@ SANITIZER_INTERFACE_ATTRIBUTE int __dfsw_clock_gettime(clockid_t clk_id,
return ret;
}
-static void unpoison(const void *ptr, uptr size) {
+SANITIZER_INTERFACE_ATTRIBUTE int __dfso_clock_gettime(
+ clockid_t clk_id, struct timespec *tp, dfsan_label clk_id_label,
+ dfsan_label tp_label, dfsan_label *ret_label, dfsan_origin clk_id_origin,
+ dfsan_origin tp_origin, dfsan_origin *ret_origin) {
+ return __dfsw_clock_gettime(clk_id, tp, clk_id_label, tp_label, ret_label);
+}
+
+static void dfsan_set_zero_label(const void *ptr, uptr size) {
dfsan_set_label(0, const_cast<void *>(ptr), size);
}
@@ -389,23 +739,48 @@ __dfsw_dlopen(const char *filename, int flag, dfsan_label filename_label,
void *handle = dlopen(filename, flag);
link_map *map = GET_LINK_MAP_BY_DLOPEN_HANDLE(handle);
if (map)
- ForEachMappedRegion(map, unpoison);
+ ForEachMappedRegion(map, dfsan_set_zero_label);
*ret_label = 0;
return handle;
}
-struct pthread_create_info {
- void *(*start_routine_trampoline)(void *, void *, dfsan_label, dfsan_label *);
- void *start_routine;
- void *arg;
-};
+SANITIZER_INTERFACE_ATTRIBUTE void *__dfso_dlopen(
+ const char *filename, int flag, dfsan_label filename_label,
+ dfsan_label flag_label, dfsan_label *ret_label,
+ dfsan_origin filename_origin, dfsan_origin flag_origin,
+ dfsan_origin *ret_origin) {
+ return __dfsw_dlopen(filename, flag, filename_label, flag_label, ret_label);
+}
-static void *pthread_create_cb(void *p) {
- pthread_create_info pci(*(pthread_create_info *)p);
- free(p);
- dfsan_label ret_label;
- return pci.start_routine_trampoline(pci.start_routine, pci.arg, 0,
- &ret_label);
+static void *DFsanThreadStartFunc(void *arg) {
+ DFsanThread *t = (DFsanThread *)arg;
+ SetCurrentThread(t);
+ return t->ThreadStart();
+}
+
+static int dfsan_pthread_create(pthread_t *thread, const pthread_attr_t *attr,
+ void *start_routine_trampoline,
+ void *start_routine, void *arg,
+ dfsan_label *ret_label,
+ bool track_origins = false) {
+ pthread_attr_t myattr;
+ if (!attr) {
+ pthread_attr_init(&myattr);
+ attr = &myattr;
+ }
+
+ // Ensure that the thread stack is large enough to hold all TLS data.
+ AdjustStackSize((void *)(const_cast<pthread_attr_t *>(attr)));
+
+ DFsanThread *t =
+ DFsanThread::Create(start_routine_trampoline,
+ (thread_callback_t)start_routine, arg, track_origins);
+ int res = pthread_create(thread, attr, DFsanThreadStartFunc, t);
+
+ if (attr == &myattr)
+ pthread_attr_destroy(&myattr);
+ *ret_label = 0;
+ return res;
}
SANITIZER_INTERFACE_ATTRIBUTE int __dfsw_pthread_create(
@@ -415,16 +790,22 @@ SANITIZER_INTERFACE_ATTRIBUTE int __dfsw_pthread_create(
void *start_routine, void *arg, dfsan_label thread_label,
dfsan_label attr_label, dfsan_label start_routine_label,
dfsan_label arg_label, dfsan_label *ret_label) {
- pthread_create_info *pci =
- (pthread_create_info *)malloc(sizeof(pthread_create_info));
- pci->start_routine_trampoline = start_routine_trampoline;
- pci->start_routine = start_routine;
- pci->arg = arg;
- int rv = pthread_create(thread, attr, pthread_create_cb, (void *)pci);
- if (rv != 0)
- free(pci);
- *ret_label = 0;
- return rv;
+ return dfsan_pthread_create(thread, attr, (void *)start_routine_trampoline,
+ start_routine, arg, ret_label);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE int __dfso_pthread_create(
+ pthread_t *thread, const pthread_attr_t *attr,
+ void *(*start_routine_trampoline)(void *, void *, dfsan_label,
+ dfsan_label *, dfsan_origin,
+ dfsan_origin *),
+ void *start_routine, void *arg, dfsan_label thread_label,
+ dfsan_label attr_label, dfsan_label start_routine_label,
+ dfsan_label arg_label, dfsan_label *ret_label, dfsan_origin thread_origin,
+ dfsan_origin attr_origin, dfsan_origin start_routine_origin,
+ dfsan_origin arg_origin, dfsan_origin *ret_origin) {
+ return dfsan_pthread_create(thread, attr, (void *)start_routine_trampoline,
+ start_routine, arg, ret_label, true);
}
SANITIZER_INTERFACE_ATTRIBUTE int __dfsw_pthread_join(pthread_t thread,
@@ -439,6 +820,15 @@ SANITIZER_INTERFACE_ATTRIBUTE int __dfsw_pthread_join(pthread_t thread,
return ret;
}
+SANITIZER_INTERFACE_ATTRIBUTE int __dfso_pthread_join(
+ pthread_t thread, void **retval, dfsan_label thread_label,
+ dfsan_label retval_label, dfsan_label *ret_label,
+ dfsan_origin thread_origin, dfsan_origin retval_origin,
+ dfsan_origin *ret_origin) {
+ return __dfsw_pthread_join(thread, retval, thread_label, retval_label,
+ ret_label);
+}
+
struct dl_iterate_phdr_info {
int (*callback_trampoline)(void *callback, struct dl_phdr_info *info,
size_t size, void *data, dfsan_label info_label,
@@ -448,6 +838,17 @@ struct dl_iterate_phdr_info {
void *data;
};
+struct dl_iterate_phdr_origin_info {
+ int (*callback_trampoline)(void *callback, struct dl_phdr_info *info,
+ size_t size, void *data, dfsan_label info_label,
+ dfsan_label size_label, dfsan_label data_label,
+ dfsan_label *ret_label, dfsan_origin info_origin,
+ dfsan_origin size_origin, dfsan_origin data_origin,
+ dfsan_origin *ret_origin);
+ void *callback;
+ void *data;
+};
+
int dl_iterate_phdr_cb(struct dl_phdr_info *info, size_t size, void *data) {
dl_iterate_phdr_info *dipi = (dl_iterate_phdr_info *)data;
dfsan_set_label(0, *info);
@@ -461,6 +862,21 @@ int dl_iterate_phdr_cb(struct dl_phdr_info *info, size_t size, void *data) {
0, &ret_label);
}
+int dl_iterate_phdr_origin_cb(struct dl_phdr_info *info, size_t size,
+ void *data) {
+ dl_iterate_phdr_origin_info *dipi = (dl_iterate_phdr_origin_info *)data;
+ dfsan_set_label(0, *info);
+ dfsan_set_label(0, const_cast<char *>(info->dlpi_name),
+ strlen(info->dlpi_name) + 1);
+ dfsan_set_label(
+ 0, const_cast<char *>(reinterpret_cast<const char *>(info->dlpi_phdr)),
+ sizeof(*info->dlpi_phdr) * info->dlpi_phnum);
+ dfsan_label ret_label;
+ dfsan_origin ret_origin;
+ return dipi->callback_trampoline(dipi->callback, info, size, dipi->data, 0, 0,
+ 0, &ret_label, 0, 0, 0, &ret_origin);
+}
+
SANITIZER_INTERFACE_ATTRIBUTE int __dfsw_dl_iterate_phdr(
int (*callback_trampoline)(void *callback, struct dl_phdr_info *info,
size_t size, void *data, dfsan_label info_label,
@@ -473,6 +889,23 @@ SANITIZER_INTERFACE_ATTRIBUTE int __dfsw_dl_iterate_phdr(
return dl_iterate_phdr(dl_iterate_phdr_cb, &dipi);
}
+SANITIZER_INTERFACE_ATTRIBUTE int __dfso_dl_iterate_phdr(
+ int (*callback_trampoline)(void *callback, struct dl_phdr_info *info,
+ size_t size, void *data, dfsan_label info_label,
+ dfsan_label size_label, dfsan_label data_label,
+ dfsan_label *ret_label, dfsan_origin info_origin,
+ dfsan_origin size_origin,
+ dfsan_origin data_origin,
+ dfsan_origin *ret_origin),
+ void *callback, void *data, dfsan_label callback_label,
+ dfsan_label data_label, dfsan_label *ret_label,
+ dfsan_origin callback_origin, dfsan_origin data_origin,
+ dfsan_origin *ret_origin) {
+ dl_iterate_phdr_origin_info dipi = {callback_trampoline, callback, data};
+ *ret_label = 0;
+ return dl_iterate_phdr(dl_iterate_phdr_origin_cb, &dipi);
+}
+
// This function is only available for glibc 2.27 or newer. Mark it weak so
// linking succeeds with older glibcs.
SANITIZER_WEAK_ATTRIBUTE void _dl_get_tls_static_info(size_t *sizep,
@@ -487,6 +920,13 @@ SANITIZER_INTERFACE_ATTRIBUTE void __dfsw__dl_get_tls_static_info(
dfsan_set_label(0, alignp, sizeof(*alignp));
}
+SANITIZER_INTERFACE_ATTRIBUTE void __dfso__dl_get_tls_static_info(
+ size_t *sizep, size_t *alignp, dfsan_label sizep_label,
+ dfsan_label alignp_label, dfsan_origin sizep_origin,
+ dfsan_origin alignp_origin) {
+ __dfsw__dl_get_tls_static_info(sizep, alignp, sizep_label, alignp_label);
+}
+
SANITIZER_INTERFACE_ATTRIBUTE
char *__dfsw_ctime_r(const time_t *timep, char *buf, dfsan_label timep_label,
dfsan_label buf_label, dfsan_label *ret_label) {
@@ -502,6 +942,25 @@ char *__dfsw_ctime_r(const time_t *timep, char *buf, dfsan_label timep_label,
}
SANITIZER_INTERFACE_ATTRIBUTE
+char *__dfso_ctime_r(const time_t *timep, char *buf, dfsan_label timep_label,
+ dfsan_label buf_label, dfsan_label *ret_label,
+ dfsan_origin timep_origin, dfsan_origin buf_origin,
+ dfsan_origin *ret_origin) {
+ char *ret = ctime_r(timep, buf);
+ if (ret) {
+ dfsan_set_label_origin(
+ dfsan_read_label(timep, sizeof(time_t)),
+ dfsan_read_origin_of_first_taint(timep, sizeof(time_t)), buf,
+ strlen(buf) + 1);
+ *ret_label = buf_label;
+ *ret_origin = buf_origin;
+ } else {
+ *ret_label = 0;
+ }
+ return ret;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
char *__dfsw_fgets(char *s, int size, FILE *stream, dfsan_label s_label,
dfsan_label size_label, dfsan_label stream_label,
dfsan_label *ret_label) {
@@ -516,6 +975,19 @@ char *__dfsw_fgets(char *s, int size, FILE *stream, dfsan_label s_label,
}
SANITIZER_INTERFACE_ATTRIBUTE
+char *__dfso_fgets(char *s, int size, FILE *stream, dfsan_label s_label,
+ dfsan_label size_label, dfsan_label stream_label,
+ dfsan_label *ret_label, dfsan_origin s_origin,
+ dfsan_origin size_origin, dfsan_origin stream_origin,
+ dfsan_origin *ret_origin) {
+ char *ret = __dfsw_fgets(s, size, stream, s_label, size_label, stream_label,
+ ret_label);
+ if (ret)
+ *ret_origin = s_origin;
+ return ret;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
char *__dfsw_getcwd(char *buf, size_t size, dfsan_label buf_label,
dfsan_label size_label, dfsan_label *ret_label) {
char *ret = getcwd(buf, size);
@@ -529,16 +1001,32 @@ char *__dfsw_getcwd(char *buf, size_t size, dfsan_label buf_label,
}
SANITIZER_INTERFACE_ATTRIBUTE
+char *__dfso_getcwd(char *buf, size_t size, dfsan_label buf_label,
+ dfsan_label size_label, dfsan_label *ret_label,
+ dfsan_origin buf_origin, dfsan_origin size_origin,
+ dfsan_origin *ret_origin) {
+ char *ret = __dfsw_getcwd(buf, size, buf_label, size_label, ret_label);
+ if (ret)
+ *ret_origin = buf_origin;
+ return ret;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
char *__dfsw_get_current_dir_name(dfsan_label *ret_label) {
char *ret = get_current_dir_name();
- if (ret) {
+ if (ret)
dfsan_set_label(0, ret, strlen(ret) + 1);
- }
*ret_label = 0;
return ret;
}
SANITIZER_INTERFACE_ATTRIBUTE
+char *__dfso_get_current_dir_name(dfsan_label *ret_label,
+ dfsan_origin *ret_origin) {
+ return __dfsw_get_current_dir_name(ret_label);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
int __dfsw_gethostname(char *name, size_t len, dfsan_label name_label,
dfsan_label len_label, dfsan_label *ret_label) {
int ret = gethostname(name, len);
@@ -550,6 +1038,14 @@ int __dfsw_gethostname(char *name, size_t len, dfsan_label name_label,
}
SANITIZER_INTERFACE_ATTRIBUTE
+int __dfso_gethostname(char *name, size_t len, dfsan_label name_label,
+ dfsan_label len_label, dfsan_label *ret_label,
+ dfsan_origin name_origin, dfsan_origin len_origin,
+ dfsan_label *ret_origin) {
+ return __dfsw_gethostname(name, len, name_label, len_label, ret_label);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
int __dfsw_getrlimit(int resource, struct rlimit *rlim,
dfsan_label resource_label, dfsan_label rlim_label,
dfsan_label *ret_label) {
@@ -562,6 +1058,15 @@ int __dfsw_getrlimit(int resource, struct rlimit *rlim,
}
SANITIZER_INTERFACE_ATTRIBUTE
+int __dfso_getrlimit(int resource, struct rlimit *rlim,
+ dfsan_label resource_label, dfsan_label rlim_label,
+ dfsan_label *ret_label, dfsan_origin resource_origin,
+ dfsan_origin rlim_origin, dfsan_origin *ret_origin) {
+ return __dfsw_getrlimit(resource, rlim, resource_label, rlim_label,
+ ret_label);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
int __dfsw_getrusage(int who, struct rusage *usage, dfsan_label who_label,
dfsan_label usage_label, dfsan_label *ret_label) {
int ret = getrusage(who, usage);
@@ -573,6 +1078,14 @@ int __dfsw_getrusage(int who, struct rusage *usage, dfsan_label who_label,
}
SANITIZER_INTERFACE_ATTRIBUTE
+int __dfso_getrusage(int who, struct rusage *usage, dfsan_label who_label,
+ dfsan_label usage_label, dfsan_label *ret_label,
+ dfsan_origin who_origin, dfsan_origin usage_origin,
+ dfsan_label *ret_origin) {
+ return __dfsw_getrusage(who, usage, who_label, usage_label, ret_label);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
char *__dfsw_strcpy(char *dest, const char *src, dfsan_label dst_label,
dfsan_label src_label, dfsan_label *ret_label) {
char *ret = strcpy(dest, src); // NOLINT
@@ -585,14 +1098,34 @@ char *__dfsw_strcpy(char *dest, const char *src, dfsan_label dst_label,
}
SANITIZER_INTERFACE_ATTRIBUTE
-long int __dfsw_strtol(const char *nptr, char **endptr, int base,
- dfsan_label nptr_label, dfsan_label endptr_label,
- dfsan_label base_label, dfsan_label *ret_label) {
- char *tmp_endptr;
- long int ret = strtol(nptr, &tmp_endptr, base);
- if (endptr) {
- *endptr = tmp_endptr;
+char *__dfso_strcpy(char *dest, const char *src, dfsan_label dst_label,
+ dfsan_label src_label, dfsan_label *ret_label,
+ dfsan_origin dst_origin, dfsan_origin src_origin,
+ dfsan_origin *ret_origin) {
+ char *ret = strcpy(dest, src); // NOLINT
+ if (ret) {
+ size_t str_len = strlen(src) + 1;
+ dfsan_mem_origin_transfer(dest, src, str_len);
+ internal_memcpy(shadow_for(dest), shadow_for(src),
+ sizeof(dfsan_label) * str_len);
}
+ *ret_label = dst_label;
+ *ret_origin = dst_origin;
+ return ret;
+}
+
+static long int dfsan_strtol(const char *nptr, char **endptr, int base,
+ char **tmp_endptr) {
+ assert(tmp_endptr);
+ long int ret = strtol(nptr, tmp_endptr, base);
+ if (endptr)
+ *endptr = *tmp_endptr;
+ return ret;
+}
+
+static void dfsan_strtolong_label(const char *nptr, const char *tmp_endptr,
+ dfsan_label base_label,
+ dfsan_label *ret_label) {
if (tmp_endptr > nptr) {
// If *tmp_endptr is '\0' include its label as well.
*ret_label = dfsan_union(
@@ -601,18 +1134,58 @@ long int __dfsw_strtol(const char *nptr, char **endptr, int base,
} else {
*ret_label = 0;
}
+}
+
+static void dfsan_strtolong_origin(const char *nptr, const char *tmp_endptr,
+ dfsan_label base_label,
+ dfsan_label *ret_label,
+ dfsan_origin base_origin,
+ dfsan_origin *ret_origin) {
+ if (tmp_endptr > nptr) {
+ // When multiple inputs are tainted, we propagate one of its origins.
+ // Because checking if base_label is tainted does not need additional
+ // computation, we prefer to propagating base_origin.
+ *ret_origin = base_label
+ ? base_origin
+ : dfsan_read_origin_of_first_taint(
+ nptr, tmp_endptr - nptr + (*tmp_endptr ? 0 : 1));
+ }
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+long int __dfsw_strtol(const char *nptr, char **endptr, int base,
+ dfsan_label nptr_label, dfsan_label endptr_label,
+ dfsan_label base_label, dfsan_label *ret_label) {
+ char *tmp_endptr;
+ long int ret = dfsan_strtol(nptr, endptr, base, &tmp_endptr);
+ dfsan_strtolong_label(nptr, tmp_endptr, base_label, ret_label);
return ret;
}
SANITIZER_INTERFACE_ATTRIBUTE
-double __dfsw_strtod(const char *nptr, char **endptr,
+long int __dfso_strtol(const char *nptr, char **endptr, int base,
dfsan_label nptr_label, dfsan_label endptr_label,
- dfsan_label *ret_label) {
+ dfsan_label base_label, dfsan_label *ret_label,
+ dfsan_origin nptr_origin, dfsan_origin endptr_origin,
+ dfsan_origin base_origin, dfsan_origin *ret_origin) {
char *tmp_endptr;
- double ret = strtod(nptr, &tmp_endptr);
- if (endptr) {
- *endptr = tmp_endptr;
- }
+ long int ret = dfsan_strtol(nptr, endptr, base, &tmp_endptr);
+ dfsan_strtolong_label(nptr, tmp_endptr, base_label, ret_label);
+ dfsan_strtolong_origin(nptr, tmp_endptr, base_label, ret_label, base_origin,
+ ret_origin);
+ return ret;
+}
+
+static double dfsan_strtod(const char *nptr, char **endptr, char **tmp_endptr) {
+ assert(tmp_endptr);
+ double ret = strtod(nptr, tmp_endptr);
+ if (endptr)
+ *endptr = *tmp_endptr;
+ return ret;
+}
+
+static void dfsan_strtod_label(const char *nptr, const char *tmp_endptr,
+ dfsan_label *ret_label) {
if (tmp_endptr > nptr) {
// If *tmp_endptr is '\0' include its label as well.
*ret_label = dfsan_read_label(
@@ -621,46 +1194,109 @@ double __dfsw_strtod(const char *nptr, char **endptr,
} else {
*ret_label = 0;
}
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+double __dfsw_strtod(const char *nptr, char **endptr, dfsan_label nptr_label,
+ dfsan_label endptr_label, dfsan_label *ret_label) {
+ char *tmp_endptr;
+ double ret = dfsan_strtod(nptr, endptr, &tmp_endptr);
+ dfsan_strtod_label(nptr, tmp_endptr, ret_label);
return ret;
}
SANITIZER_INTERFACE_ATTRIBUTE
-long long int __dfsw_strtoll(const char *nptr, char **endptr, int base,
- dfsan_label nptr_label, dfsan_label endptr_label,
- dfsan_label base_label, dfsan_label *ret_label) {
+double __dfso_strtod(const char *nptr, char **endptr, dfsan_label nptr_label,
+ dfsan_label endptr_label, dfsan_label *ret_label,
+ dfsan_origin nptr_origin, dfsan_origin endptr_origin,
+ dfsan_origin *ret_origin) {
char *tmp_endptr;
- long long int ret = strtoll(nptr, &tmp_endptr, base);
- if (endptr) {
- *endptr = tmp_endptr;
- }
+ double ret = dfsan_strtod(nptr, endptr, &tmp_endptr);
+ dfsan_strtod_label(nptr, tmp_endptr, ret_label);
if (tmp_endptr > nptr) {
// If *tmp_endptr is '\0' include its label as well.
- *ret_label = dfsan_union(
- base_label,
- dfsan_read_label(nptr, tmp_endptr - nptr + (*tmp_endptr ? 0 : 1)));
+ *ret_origin = dfsan_read_origin_of_first_taint(
+ nptr, tmp_endptr - nptr + (*tmp_endptr ? 0 : 1));
} else {
- *ret_label = 0;
+ *ret_origin = 0;
}
return ret;
}
+static long long int dfsan_strtoll(const char *nptr, char **endptr, int base,
+ char **tmp_endptr) {
+ assert(tmp_endptr);
+ long long int ret = strtoll(nptr, tmp_endptr, base);
+ if (endptr)
+ *endptr = *tmp_endptr;
+ return ret;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+long long int __dfsw_strtoll(const char *nptr, char **endptr, int base,
+ dfsan_label nptr_label, dfsan_label endptr_label,
+ dfsan_label base_label, dfsan_label *ret_label) {
+ char *tmp_endptr;
+ long long int ret = dfsan_strtoll(nptr, endptr, base, &tmp_endptr);
+ dfsan_strtolong_label(nptr, tmp_endptr, base_label, ret_label);
+ return ret;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+long long int __dfso_strtoll(const char *nptr, char **endptr, int base,
+ dfsan_label nptr_label, dfsan_label endptr_label,
+ dfsan_label base_label, dfsan_label *ret_label,
+ dfsan_origin nptr_origin,
+ dfsan_origin endptr_origin,
+ dfsan_origin base_origin,
+ dfsan_origin *ret_origin) {
+ char *tmp_endptr;
+ long long int ret = dfsan_strtoll(nptr, endptr, base, &tmp_endptr);
+ dfsan_strtolong_label(nptr, tmp_endptr, base_label, ret_label);
+ dfsan_strtolong_origin(nptr, tmp_endptr, base_label, ret_label, base_origin,
+ ret_origin);
+ return ret;
+}
+
+static unsigned long int dfsan_strtoul(const char *nptr, char **endptr,
+ int base, char **tmp_endptr) {
+ assert(tmp_endptr);
+ unsigned long int ret = strtoul(nptr, tmp_endptr, base);
+ if (endptr)
+ *endptr = *tmp_endptr;
+ return ret;
+}
+
SANITIZER_INTERFACE_ATTRIBUTE
unsigned long int __dfsw_strtoul(const char *nptr, char **endptr, int base,
dfsan_label nptr_label, dfsan_label endptr_label,
dfsan_label base_label, dfsan_label *ret_label) {
char *tmp_endptr;
- unsigned long int ret = strtoul(nptr, &tmp_endptr, base);
- if (endptr) {
- *endptr = tmp_endptr;
- }
- if (tmp_endptr > nptr) {
- // If *tmp_endptr is '\0' include its label as well.
- *ret_label = dfsan_union(
- base_label,
- dfsan_read_label(nptr, tmp_endptr - nptr + (*tmp_endptr ? 0 : 1)));
- } else {
- *ret_label = 0;
- }
+ unsigned long int ret = dfsan_strtoul(nptr, endptr, base, &tmp_endptr);
+ dfsan_strtolong_label(nptr, tmp_endptr, base_label, ret_label);
+ return ret;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+unsigned long int __dfso_strtoul(
+ const char *nptr, char **endptr, int base, dfsan_label nptr_label,
+ dfsan_label endptr_label, dfsan_label base_label, dfsan_label *ret_label,
+ dfsan_origin nptr_origin, dfsan_origin endptr_origin,
+ dfsan_origin base_origin, dfsan_origin *ret_origin) {
+ char *tmp_endptr;
+ unsigned long int ret = dfsan_strtoul(nptr, endptr, base, &tmp_endptr);
+ dfsan_strtolong_label(nptr, tmp_endptr, base_label, ret_label);
+ dfsan_strtolong_origin(nptr, tmp_endptr, base_label, ret_label, base_origin,
+ ret_origin);
+ return ret;
+}
+
+static long long unsigned int dfsan_strtoull(const char *nptr, char **endptr,
+ int base, char **tmp_endptr) {
+ assert(tmp_endptr);
+ long long unsigned int ret = strtoull(nptr, tmp_endptr, base);
+ if (endptr)
+ *endptr = *tmp_endptr;
return ret;
}
@@ -671,18 +1307,22 @@ long long unsigned int __dfsw_strtoull(const char *nptr, char **endptr,
dfsan_label base_label,
dfsan_label *ret_label) {
char *tmp_endptr;
- long long unsigned int ret = strtoull(nptr, &tmp_endptr, base);
- if (endptr) {
- *endptr = tmp_endptr;
- }
- if (tmp_endptr > nptr) {
- // If *tmp_endptr is '\0' include its label as well.
- *ret_label = dfsan_union(
- base_label,
- dfsan_read_label(nptr, tmp_endptr - nptr + (*tmp_endptr ? 0 : 1)));
- } else {
- *ret_label = 0;
- }
+ long long unsigned int ret = dfsan_strtoull(nptr, endptr, base, &tmp_endptr);
+ dfsan_strtolong_label(nptr, tmp_endptr, base_label, ret_label);
+ return ret;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+long long unsigned int __dfso_strtoull(
+ const char *nptr, char **endptr, int base, dfsan_label nptr_label,
+ dfsan_label endptr_label, dfsan_label base_label, dfsan_label *ret_label,
+ dfsan_origin nptr_origin, dfsan_origin endptr_origin,
+ dfsan_origin base_origin, dfsan_origin *ret_origin) {
+ char *tmp_endptr;
+ long long unsigned int ret = dfsan_strtoull(nptr, endptr, base, &tmp_endptr);
+ dfsan_strtolong_label(nptr, tmp_endptr, base_label, ret_label);
+ dfsan_strtolong_origin(nptr, tmp_endptr, base_label, ret_label, base_origin,
+ ret_origin);
return ret;
}
@@ -697,6 +1337,12 @@ time_t __dfsw_time(time_t *t, dfsan_label t_label, dfsan_label *ret_label) {
}
SANITIZER_INTERFACE_ATTRIBUTE
+time_t __dfso_time(time_t *t, dfsan_label t_label, dfsan_label *ret_label,
+ dfsan_origin t_origin, dfsan_origin *ret_origin) {
+ return __dfsw_time(t, t_label, ret_label);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
int __dfsw_inet_pton(int af, const char *src, void *dst, dfsan_label af_label,
dfsan_label src_label, dfsan_label dst_label,
dfsan_label *ret_label) {
@@ -710,6 +1356,24 @@ int __dfsw_inet_pton(int af, const char *src, void *dst, dfsan_label af_label,
}
SANITIZER_INTERFACE_ATTRIBUTE
+int __dfso_inet_pton(int af, const char *src, void *dst, dfsan_label af_label,
+ dfsan_label src_label, dfsan_label dst_label,
+ dfsan_label *ret_label, dfsan_origin af_origin,
+ dfsan_origin src_origin, dfsan_origin dst_origin,
+ dfsan_origin *ret_origin) {
+ int ret = inet_pton(af, src, dst);
+ if (ret == 1) {
+ int src_len = strlen(src) + 1;
+ dfsan_set_label_origin(
+ dfsan_read_label(src, src_len),
+ dfsan_read_origin_of_first_taint(src, src_len), dst,
+ af == AF_INET ? sizeof(struct in_addr) : sizeof(in6_addr));
+ }
+ *ret_label = 0;
+ return ret;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
struct tm *__dfsw_localtime_r(const time_t *timep, struct tm *result,
dfsan_label timep_label, dfsan_label result_label,
dfsan_label *ret_label) {
@@ -725,6 +1389,26 @@ struct tm *__dfsw_localtime_r(const time_t *timep, struct tm *result,
}
SANITIZER_INTERFACE_ATTRIBUTE
+struct tm *__dfso_localtime_r(const time_t *timep, struct tm *result,
+ dfsan_label timep_label, dfsan_label result_label,
+ dfsan_label *ret_label, dfsan_origin timep_origin,
+ dfsan_origin result_origin,
+ dfsan_origin *ret_origin) {
+ struct tm *ret = localtime_r(timep, result);
+ if (ret) {
+ dfsan_set_label_origin(
+ dfsan_read_label(timep, sizeof(time_t)),
+ dfsan_read_origin_of_first_taint(timep, sizeof(time_t)), result,
+ sizeof(struct tm));
+ *ret_label = result_label;
+ *ret_origin = result_origin;
+ } else {
+ *ret_label = 0;
+ }
+ return ret;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
int __dfsw_getpwuid_r(id_t uid, struct passwd *pwd,
char *buf, size_t buflen, struct passwd **result,
dfsan_label uid_label, dfsan_label pwd_label,
@@ -743,6 +1427,19 @@ int __dfsw_getpwuid_r(id_t uid, struct passwd *pwd,
}
SANITIZER_INTERFACE_ATTRIBUTE
+int __dfso_getpwuid_r(id_t uid, struct passwd *pwd, char *buf, size_t buflen,
+ struct passwd **result, dfsan_label uid_label,
+ dfsan_label pwd_label, dfsan_label buf_label,
+ dfsan_label buflen_label, dfsan_label result_label,
+ dfsan_label *ret_label, dfsan_origin uid_origin,
+ dfsan_origin pwd_origin, dfsan_origin buf_origin,
+ dfsan_origin buflen_origin, dfsan_origin result_origin,
+ dfsan_origin *ret_origin) {
+ return __dfsw_getpwuid_r(uid, pwd, buf, buflen, result, uid_label, pwd_label,
+ buf_label, buflen_label, result_label, ret_label);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
int __dfsw_epoll_wait(int epfd, struct epoll_event *events, int maxevents,
int timeout, dfsan_label epfd_label,
dfsan_label events_label, dfsan_label maxevents_label,
@@ -755,6 +1452,19 @@ int __dfsw_epoll_wait(int epfd, struct epoll_event *events, int maxevents,
}
SANITIZER_INTERFACE_ATTRIBUTE
+int __dfso_epoll_wait(int epfd, struct epoll_event *events, int maxevents,
+ int timeout, dfsan_label epfd_label,
+ dfsan_label events_label, dfsan_label maxevents_label,
+ dfsan_label timeout_label, dfsan_label *ret_label,
+ dfsan_origin epfd_origin, dfsan_origin events_origin,
+ dfsan_origin maxevents_origin,
+ dfsan_origin timeout_origin, dfsan_origin *ret_origin) {
+ return __dfsw_epoll_wait(epfd, events, maxevents, timeout, epfd_label,
+ events_label, maxevents_label, timeout_label,
+ ret_label);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
int __dfsw_poll(struct pollfd *fds, nfds_t nfds, int timeout,
dfsan_label dfs_label, dfsan_label nfds_label,
dfsan_label timeout_label, dfsan_label *ret_label) {
@@ -769,6 +1479,16 @@ int __dfsw_poll(struct pollfd *fds, nfds_t nfds, int timeout,
}
SANITIZER_INTERFACE_ATTRIBUTE
+int __dfso_poll(struct pollfd *fds, nfds_t nfds, int timeout,
+ dfsan_label dfs_label, dfsan_label nfds_label,
+ dfsan_label timeout_label, dfsan_label *ret_label,
+ dfsan_origin dfs_origin, dfsan_origin nfds_origin,
+ dfsan_origin timeout_origin, dfsan_origin *ret_origin) {
+ return __dfsw_poll(fds, nfds, timeout, dfs_label, nfds_label, timeout_label,
+ ret_label);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
int __dfsw_select(int nfds, fd_set *readfds, fd_set *writefds,
fd_set *exceptfds, struct timeval *timeout,
dfsan_label nfds_label, dfsan_label readfds_label,
@@ -792,6 +1512,20 @@ int __dfsw_select(int nfds, fd_set *readfds, fd_set *writefds,
}
SANITIZER_INTERFACE_ATTRIBUTE
+int __dfso_select(int nfds, fd_set *readfds, fd_set *writefds,
+ fd_set *exceptfds, struct timeval *timeout,
+ dfsan_label nfds_label, dfsan_label readfds_label,
+ dfsan_label writefds_label, dfsan_label exceptfds_label,
+ dfsan_label timeout_label, dfsan_label *ret_label,
+ dfsan_origin nfds_origin, dfsan_origin readfds_origin,
+ dfsan_origin writefds_origin, dfsan_origin exceptfds_origin,
+ dfsan_origin timeout_origin, dfsan_origin *ret_origin) {
+ return __dfsw_select(nfds, readfds, writefds, exceptfds, timeout, nfds_label,
+ readfds_label, writefds_label, exceptfds_label,
+ timeout_label, ret_label);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
int __dfsw_sched_getaffinity(pid_t pid, size_t cpusetsize, cpu_set_t *mask,
dfsan_label pid_label,
dfsan_label cpusetsize_label,
@@ -805,19 +1539,142 @@ int __dfsw_sched_getaffinity(pid_t pid, size_t cpusetsize, cpu_set_t *mask,
}
SANITIZER_INTERFACE_ATTRIBUTE
+int __dfso_sched_getaffinity(pid_t pid, size_t cpusetsize, cpu_set_t *mask,
+ dfsan_label pid_label,
+ dfsan_label cpusetsize_label,
+ dfsan_label mask_label, dfsan_label *ret_label,
+ dfsan_origin pid_origin,
+ dfsan_origin cpusetsize_origin,
+ dfsan_origin mask_origin,
+ dfsan_origin *ret_origin) {
+ return __dfsw_sched_getaffinity(pid, cpusetsize, mask, pid_label,
+ cpusetsize_label, mask_label, ret_label);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
int __dfsw_sigemptyset(sigset_t *set, dfsan_label set_label,
dfsan_label *ret_label) {
int ret = sigemptyset(set);
dfsan_set_label(0, set, sizeof(sigset_t));
+ *ret_label = 0;
return ret;
}
SANITIZER_INTERFACE_ATTRIBUTE
+int __dfso_sigemptyset(sigset_t *set, dfsan_label set_label,
+ dfsan_label *ret_label, dfsan_origin set_origin,
+ dfsan_origin *ret_origin) {
+ return __dfsw_sigemptyset(set, set_label, ret_label);
+}
+
+class SignalHandlerScope {
+ public:
+ SignalHandlerScope() {
+ if (DFsanThread *t = GetCurrentThread())
+ t->EnterSignalHandler();
+ }
+ ~SignalHandlerScope() {
+ if (DFsanThread *t = GetCurrentThread())
+ t->LeaveSignalHandler();
+ }
+};
+
+// Clear DFSan runtime TLS state at the end of a scope.
+//
+// Implementation must be async-signal-safe and use small data size, because
+// instances of this class may live on the signal handler stack.
+//
+// DFSan uses TLS to pass metadata of arguments and return values. When an
+// instrumented function accesses the TLS, if a signal callback happens, and the
+// callback calls other instrumented functions with updating the same TLS, the
+// TLS is in an inconsistent state after the callback ends. This may cause
+// either under-tainting or over-tainting.
+//
+// The current implementation simply resets TLS at restore. This prevents from
+// over-tainting. Although under-tainting may still happen, a taint flow can be
+// found eventually if we run a DFSan-instrumented program multiple times. The
+// alternative option is saving the entire TLS. However the TLS storage takes
+// 2k bytes, and signal calls could be nested. So it does not seem worth.
+class ScopedClearThreadLocalState {
+ public:
+ ScopedClearThreadLocalState() {}
+ ~ScopedClearThreadLocalState() { dfsan_clear_thread_local_state(); }
+};
+
+// SignalSpinLocker::sigactions_mu guarantees atomicity of sigaction() calls.
+const int kMaxSignals = 1024;
+static atomic_uintptr_t sigactions[kMaxSignals];
+
+static void SignalHandler(int signo) {
+ SignalHandlerScope signal_handler_scope;
+ ScopedClearThreadLocalState scoped_clear_tls;
+
+ // Clear shadows for all inputs provided by system. This is why DFSan
+ // instrumentation generates a trampoline function to each function pointer,
+ // and uses the trampoline to clear shadows. However sigaction does not use
+ // a function pointer directly, so we have to do this manually.
+ dfsan_clear_arg_tls(0, sizeof(dfsan_label));
+
+ typedef void (*signal_cb)(int x);
+ signal_cb cb =
+ (signal_cb)atomic_load(&sigactions[signo], memory_order_relaxed);
+ cb(signo);
+}
+
+static void SignalAction(int signo, siginfo_t *si, void *uc) {
+ SignalHandlerScope signal_handler_scope;
+ ScopedClearThreadLocalState scoped_clear_tls;
+
+ // Clear shadows for all inputs provided by system. Similar to SignalHandler.
+ dfsan_clear_arg_tls(0, 3 * sizeof(dfsan_label));
+ dfsan_set_label(0, si, sizeof(*si));
+ dfsan_set_label(0, uc, sizeof(ucontext_t));
+
+ typedef void (*sigaction_cb)(int, siginfo_t *, void *);
+ sigaction_cb cb =
+ (sigaction_cb)atomic_load(&sigactions[signo], memory_order_relaxed);
+ cb(signo, si, uc);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
int __dfsw_sigaction(int signum, const struct sigaction *act,
struct sigaction *oldact, dfsan_label signum_label,
dfsan_label act_label, dfsan_label oldact_label,
dfsan_label *ret_label) {
- int ret = sigaction(signum, act, oldact);
+ CHECK_LT(signum, kMaxSignals);
+ SignalSpinLocker lock;
+ uptr old_cb = atomic_load(&sigactions[signum], memory_order_relaxed);
+ struct sigaction new_act;
+ struct sigaction *pnew_act = act ? &new_act : nullptr;
+ if (act) {
+ internal_memcpy(pnew_act, act, sizeof(struct sigaction));
+ if (pnew_act->sa_flags & SA_SIGINFO) {
+ uptr cb = (uptr)(pnew_act->sa_sigaction);
+ if (cb != (uptr)SIG_IGN && cb != (uptr)SIG_DFL) {
+ atomic_store(&sigactions[signum], cb, memory_order_relaxed);
+ pnew_act->sa_sigaction = SignalAction;
+ }
+ } else {
+ uptr cb = (uptr)(pnew_act->sa_handler);
+ if (cb != (uptr)SIG_IGN && cb != (uptr)SIG_DFL) {
+ atomic_store(&sigactions[signum], cb, memory_order_relaxed);
+ pnew_act->sa_handler = SignalHandler;
+ }
+ }
+ }
+
+ int ret = sigaction(signum, pnew_act, oldact);
+
+ if (ret == 0 && oldact) {
+ if (oldact->sa_flags & SA_SIGINFO) {
+ if (oldact->sa_sigaction == SignalAction)
+ oldact->sa_sigaction = (decltype(oldact->sa_sigaction))old_cb;
+ } else {
+ if (oldact->sa_handler == SignalHandler)
+ oldact->sa_handler = (decltype(oldact->sa_handler))old_cb;
+ }
+ }
+
if (oldact) {
dfsan_set_label(0, oldact, sizeof(struct sigaction));
}
@@ -826,6 +1683,56 @@ int __dfsw_sigaction(int signum, const struct sigaction *act,
}
SANITIZER_INTERFACE_ATTRIBUTE
+int __dfso_sigaction(int signum, const struct sigaction *act,
+ struct sigaction *oldact, dfsan_label signum_label,
+ dfsan_label act_label, dfsan_label oldact_label,
+ dfsan_label *ret_label, dfsan_origin signum_origin,
+ dfsan_origin act_origin, dfsan_origin oldact_origin,
+ dfsan_origin *ret_origin) {
+ return __dfsw_sigaction(signum, act, oldact, signum_label, act_label,
+ oldact_label, ret_label);
+}
+
+static sighandler_t dfsan_signal(int signum, sighandler_t handler,
+ dfsan_label *ret_label) {
+ CHECK_LT(signum, kMaxSignals);
+ SignalSpinLocker lock;
+ uptr old_cb = atomic_load(&sigactions[signum], memory_order_relaxed);
+ if (handler != SIG_IGN && handler != SIG_DFL) {
+ atomic_store(&sigactions[signum], (uptr)handler, memory_order_relaxed);
+ handler = &SignalHandler;
+ }
+
+ sighandler_t ret = signal(signum, handler);
+
+ if (ret == SignalHandler)
+ ret = (sighandler_t)old_cb;
+
+ *ret_label = 0;
+ return ret;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+sighandler_t __dfsw_signal(int signum,
+ void *(*handler_trampoline)(void *, int, dfsan_label,
+ dfsan_label *),
+ sighandler_t handler, dfsan_label signum_label,
+ dfsan_label handler_label, dfsan_label *ret_label) {
+ return dfsan_signal(signum, handler, ret_label);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+sighandler_t __dfso_signal(
+ int signum,
+ void *(*handler_trampoline)(void *, int, dfsan_label, dfsan_label *,
+ dfsan_origin, dfsan_origin *),
+ sighandler_t handler, dfsan_label signum_label, dfsan_label handler_label,
+ dfsan_label *ret_label, dfsan_origin signum_origin,
+ dfsan_origin handler_origin, dfsan_origin *ret_origin) {
+ return dfsan_signal(signum, handler, ret_label);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
int __dfsw_sigaltstack(const stack_t *ss, stack_t *old_ss, dfsan_label ss_label,
dfsan_label old_ss_label, dfsan_label *ret_label) {
int ret = sigaltstack(ss, old_ss);
@@ -836,6 +1743,14 @@ int __dfsw_sigaltstack(const stack_t *ss, stack_t *old_ss, dfsan_label ss_label,
}
SANITIZER_INTERFACE_ATTRIBUTE
+int __dfso_sigaltstack(const stack_t *ss, stack_t *old_ss, dfsan_label ss_label,
+ dfsan_label old_ss_label, dfsan_label *ret_label,
+ dfsan_origin ss_origin, dfsan_origin old_ss_origin,
+ dfsan_origin *ret_origin) {
+ return __dfsw_sigaltstack(ss, old_ss, ss_label, old_ss_label, ret_label);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
int __dfsw_gettimeofday(struct timeval *tv, struct timezone *tz,
dfsan_label tv_label, dfsan_label tz_label,
dfsan_label *ret_label) {
@@ -850,6 +1765,14 @@ int __dfsw_gettimeofday(struct timeval *tv, struct timezone *tz,
return ret;
}
+SANITIZER_INTERFACE_ATTRIBUTE
+int __dfso_gettimeofday(struct timeval *tv, struct timezone *tz,
+ dfsan_label tv_label, dfsan_label tz_label,
+ dfsan_label *ret_label, dfsan_origin tv_origin,
+ dfsan_origin tz_origin, dfsan_origin *ret_origin) {
+ return __dfsw_gettimeofday(tv, tz, tv_label, tz_label, ret_label);
+}
+
SANITIZER_INTERFACE_ATTRIBUTE void *__dfsw_memchr(void *s, int c, size_t n,
dfsan_label s_label,
dfsan_label c_label,
@@ -868,6 +1791,24 @@ SANITIZER_INTERFACE_ATTRIBUTE void *__dfsw_memchr(void *s, int c, size_t n,
return ret;
}
+SANITIZER_INTERFACE_ATTRIBUTE void *__dfso_memchr(
+ void *s, int c, size_t n, dfsan_label s_label, dfsan_label c_label,
+ dfsan_label n_label, dfsan_label *ret_label, dfsan_origin s_origin,
+ dfsan_origin c_origin, dfsan_origin n_origin, dfsan_origin *ret_origin) {
+ void *ret = __dfsw_memchr(s, c, n, s_label, c_label, n_label, ret_label);
+ if (flags().strict_data_dependencies) {
+ if (ret)
+ *ret_origin = s_origin;
+ } else {
+ size_t len =
+ ret ? reinterpret_cast<char *>(ret) - reinterpret_cast<char *>(s) + 1
+ : n;
+ dfsan_origin o = dfsan_read_origin_of_first_taint(s, len);
+ *ret_origin = o ? o : (s_label ? s_origin : c_origin);
+ }
+ return ret;
+}
+
SANITIZER_INTERFACE_ATTRIBUTE char *__dfsw_strrchr(char *s, int c,
dfsan_label s_label,
dfsan_label c_label,
@@ -884,6 +1825,23 @@ SANITIZER_INTERFACE_ATTRIBUTE char *__dfsw_strrchr(char *s, int c,
return ret;
}
+SANITIZER_INTERFACE_ATTRIBUTE char *__dfso_strrchr(
+ char *s, int c, dfsan_label s_label, dfsan_label c_label,
+ dfsan_label *ret_label, dfsan_origin s_origin, dfsan_origin c_origin,
+ dfsan_origin *ret_origin) {
+ char *ret = __dfsw_strrchr(s, c, s_label, c_label, ret_label);
+ if (flags().strict_data_dependencies) {
+ if (ret)
+ *ret_origin = s_origin;
+ } else {
+ size_t s_len = strlen(s) + 1;
+ dfsan_origin o = dfsan_read_origin_of_first_taint(s, s_len);
+ *ret_origin = o ? o : (s_label ? s_origin : c_origin);
+ }
+
+ return ret;
+}
+
SANITIZER_INTERFACE_ATTRIBUTE char *__dfsw_strstr(char *haystack, char *needle,
dfsan_label haystack_label,
dfsan_label needle_label,
@@ -902,6 +1860,33 @@ SANITIZER_INTERFACE_ATTRIBUTE char *__dfsw_strstr(char *haystack, char *needle,
return ret;
}
+SANITIZER_INTERFACE_ATTRIBUTE char *__dfso_strstr(char *haystack, char *needle,
+ dfsan_label haystack_label,
+ dfsan_label needle_label,
+ dfsan_label *ret_label,
+ dfsan_origin haystack_origin,
+ dfsan_origin needle_origin,
+ dfsan_origin *ret_origin) {
+ char *ret =
+ __dfsw_strstr(haystack, needle, haystack_label, needle_label, ret_label);
+ if (flags().strict_data_dependencies) {
+ if (ret)
+ *ret_origin = haystack_origin;
+ } else {
+ size_t needle_len = strlen(needle);
+ size_t len = ret ? ret + needle_len - haystack : strlen(haystack) + 1;
+ dfsan_origin o = dfsan_read_origin_of_first_taint(haystack, len);
+ if (o) {
+ *ret_origin = o;
+ } else {
+ o = dfsan_read_origin_of_first_taint(needle, needle_len + 1);
+ *ret_origin = o ? o : (haystack_label ? haystack_origin : needle_origin);
+ }
+ }
+
+ return ret;
+}
+
SANITIZER_INTERFACE_ATTRIBUTE int __dfsw_nanosleep(const struct timespec *req,
struct timespec *rem,
dfsan_label req_label,
@@ -916,6 +1901,13 @@ SANITIZER_INTERFACE_ATTRIBUTE int __dfsw_nanosleep(const struct timespec *req,
return ret;
}
+SANITIZER_INTERFACE_ATTRIBUTE int __dfso_nanosleep(
+ const struct timespec *req, struct timespec *rem, dfsan_label req_label,
+ dfsan_label rem_label, dfsan_label *ret_label, dfsan_origin req_origin,
+ dfsan_origin rem_origin, dfsan_origin *ret_origin) {
+ return __dfsw_nanosleep(req, rem, req_label, rem_label, ret_label);
+}
+
static void clear_msghdr_labels(size_t bytes_written, struct msghdr *msg) {
dfsan_set_label(0, msg, sizeof(*msg));
dfsan_set_label(0, msg->msg_name, msg->msg_namelen);
@@ -944,6 +1936,19 @@ SANITIZER_INTERFACE_ATTRIBUTE int __dfsw_recvmmsg(
return ret;
}
+SANITIZER_INTERFACE_ATTRIBUTE int __dfso_recvmmsg(
+ int sockfd, struct mmsghdr *msgvec, unsigned int vlen, int flags,
+ struct timespec *timeout, dfsan_label sockfd_label,
+ dfsan_label msgvec_label, dfsan_label vlen_label, dfsan_label flags_label,
+ dfsan_label timeout_label, dfsan_label *ret_label,
+ dfsan_origin sockfd_origin, dfsan_origin msgvec_origin,
+ dfsan_origin vlen_origin, dfsan_origin flags_origin,
+ dfsan_origin timeout_origin, dfsan_origin *ret_origin) {
+ return __dfsw_recvmmsg(sockfd, msgvec, vlen, flags, timeout, sockfd_label,
+ msgvec_label, vlen_label, flags_label, timeout_label,
+ ret_label);
+}
+
SANITIZER_INTERFACE_ATTRIBUTE ssize_t __dfsw_recvmsg(
int sockfd, struct msghdr *msg, int flags, dfsan_label sockfd_label,
dfsan_label msg_label, dfsan_label flags_label, dfsan_label *ret_label) {
@@ -954,6 +1959,15 @@ SANITIZER_INTERFACE_ATTRIBUTE ssize_t __dfsw_recvmsg(
return ret;
}
+SANITIZER_INTERFACE_ATTRIBUTE ssize_t __dfso_recvmsg(
+ int sockfd, struct msghdr *msg, int flags, dfsan_label sockfd_label,
+ dfsan_label msg_label, dfsan_label flags_label, dfsan_label *ret_label,
+ dfsan_origin sockfd_origin, dfsan_origin msg_origin,
+ dfsan_origin flags_origin, dfsan_origin *ret_origin) {
+ return __dfsw_recvmsg(sockfd, msg, flags, sockfd_label, msg_label,
+ flags_label, ret_label);
+}
+
SANITIZER_INTERFACE_ATTRIBUTE int
__dfsw_socketpair(int domain, int type, int protocol, int sv[2],
dfsan_label domain_label, dfsan_label type_label,
@@ -967,6 +1981,16 @@ __dfsw_socketpair(int domain, int type, int protocol, int sv[2],
return ret;
}
+SANITIZER_INTERFACE_ATTRIBUTE int __dfso_socketpair(
+ int domain, int type, int protocol, int sv[2], dfsan_label domain_label,
+ dfsan_label type_label, dfsan_label protocol_label, dfsan_label sv_label,
+ dfsan_label *ret_label, dfsan_origin domain_origin,
+ dfsan_origin type_origin, dfsan_origin protocol_origin,
+ dfsan_origin sv_origin, dfsan_origin *ret_origin) {
+ return __dfsw_socketpair(domain, type, protocol, sv, domain_label, type_label,
+ protocol_label, sv_label, ret_label);
+}
+
SANITIZER_INTERFACE_ATTRIBUTE int __dfsw_getsockopt(
int sockfd, int level, int optname, void *optval, socklen_t *optlen,
dfsan_label sockfd_label, dfsan_label level_label,
@@ -981,6 +2005,19 @@ SANITIZER_INTERFACE_ATTRIBUTE int __dfsw_getsockopt(
return ret;
}
+SANITIZER_INTERFACE_ATTRIBUTE int __dfso_getsockopt(
+ int sockfd, int level, int optname, void *optval, socklen_t *optlen,
+ dfsan_label sockfd_label, dfsan_label level_label,
+ dfsan_label optname_label, dfsan_label optval_label,
+ dfsan_label optlen_label, dfsan_label *ret_label,
+ dfsan_origin sockfd_origin, dfsan_origin level_origin,
+ dfsan_origin optname_origin, dfsan_origin optval_origin,
+ dfsan_origin optlen_origin, dfsan_origin *ret_origin) {
+ return __dfsw_getsockopt(sockfd, level, optname, optval, optlen, sockfd_label,
+ level_label, optname_label, optval_label,
+ optlen_label, ret_label);
+}
+
SANITIZER_INTERFACE_ATTRIBUTE int __dfsw_getsockname(
int sockfd, struct sockaddr *addr, socklen_t *addrlen,
dfsan_label sockfd_label, dfsan_label addr_label, dfsan_label addrlen_label,
@@ -996,6 +2033,16 @@ SANITIZER_INTERFACE_ATTRIBUTE int __dfsw_getsockname(
return ret;
}
+SANITIZER_INTERFACE_ATTRIBUTE int __dfso_getsockname(
+ int sockfd, struct sockaddr *addr, socklen_t *addrlen,
+ dfsan_label sockfd_label, dfsan_label addr_label, dfsan_label addrlen_label,
+ dfsan_label *ret_label, dfsan_origin sockfd_origin,
+ dfsan_origin addr_origin, dfsan_origin addrlen_origin,
+ dfsan_origin *ret_origin) {
+ return __dfsw_getsockname(sockfd, addr, addrlen, sockfd_label, addr_label,
+ addrlen_label, ret_label);
+}
+
SANITIZER_INTERFACE_ATTRIBUTE int __dfsw_getpeername(
int sockfd, struct sockaddr *addr, socklen_t *addrlen,
dfsan_label sockfd_label, dfsan_label addr_label, dfsan_label addrlen_label,
@@ -1011,6 +2058,16 @@ SANITIZER_INTERFACE_ATTRIBUTE int __dfsw_getpeername(
return ret;
}
+SANITIZER_INTERFACE_ATTRIBUTE int __dfso_getpeername(
+ int sockfd, struct sockaddr *addr, socklen_t *addrlen,
+ dfsan_label sockfd_label, dfsan_label addr_label, dfsan_label addrlen_label,
+ dfsan_label *ret_label, dfsan_origin sockfd_origin,
+ dfsan_origin addr_origin, dfsan_origin addrlen_origin,
+ dfsan_origin *ret_origin) {
+ return __dfsw_getpeername(sockfd, addr, addrlen, sockfd_label, addr_label,
+ addrlen_label, ret_label);
+}
+
// Type of the trampoline function passed to the custom version of
// dfsan_set_write_callback.
typedef void (*write_trampoline_t)(
@@ -1018,6 +2075,11 @@ typedef void (*write_trampoline_t)(
int fd, const void *buf, ssize_t count,
dfsan_label fd_label, dfsan_label buf_label, dfsan_label count_label);
+typedef void (*write_origin_trampoline_t)(
+ void *callback, int fd, const void *buf, ssize_t count,
+ dfsan_label fd_label, dfsan_label buf_label, dfsan_label count_label,
+ dfsan_origin fd_origin, dfsan_origin buf_origin, dfsan_origin count_origin);
+
// Calls to dfsan_set_write_callback() set the values in this struct.
// Calls to the custom version of write() read (and invoke) them.
static struct {
@@ -1025,6 +2087,11 @@ static struct {
void *write_callback = nullptr;
} write_callback_info;
+static struct {
+ write_origin_trampoline_t write_callback_trampoline = nullptr;
+ void *write_callback = nullptr;
+} write_origin_callback_info;
+
SANITIZER_INTERFACE_ATTRIBUTE void
__dfsw_dfsan_set_write_callback(
write_trampoline_t write_callback_trampoline,
@@ -1035,6 +2102,15 @@ __dfsw_dfsan_set_write_callback(
write_callback_info.write_callback = write_callback;
}
+SANITIZER_INTERFACE_ATTRIBUTE void __dfso_dfsan_set_write_callback(
+ write_origin_trampoline_t write_callback_trampoline, void *write_callback,
+ dfsan_label write_callback_label, dfsan_label *ret_label,
+ dfsan_origin write_callback_origin, dfsan_origin *ret_origin) {
+ write_origin_callback_info.write_callback_trampoline =
+ write_callback_trampoline;
+ write_origin_callback_info.write_callback = write_callback;
+}
+
SANITIZER_INTERFACE_ATTRIBUTE int
__dfsw_write(int fd, const void *buf, size_t count,
dfsan_label fd_label, dfsan_label buf_label,
@@ -1049,6 +2125,21 @@ __dfsw_write(int fd, const void *buf, size_t count,
*ret_label = 0;
return write(fd, buf, count);
}
+
+SANITIZER_INTERFACE_ATTRIBUTE int __dfso_write(
+ int fd, const void *buf, size_t count, dfsan_label fd_label,
+ dfsan_label buf_label, dfsan_label count_label, dfsan_label *ret_label,
+ dfsan_origin fd_origin, dfsan_origin buf_origin, dfsan_origin count_origin,
+ dfsan_origin *ret_origin) {
+ if (write_origin_callback_info.write_callback) {
+ write_origin_callback_info.write_callback_trampoline(
+ write_origin_callback_info.write_callback, fd, buf, count, fd_label,
+ buf_label, count_label, fd_origin, buf_origin, count_origin);
+ }
+
+ *ret_label = 0;
+ return write(fd, buf, count);
+}
} // namespace __dfsan
// Type used to extract a dfsan_label with va_arg()
@@ -1138,6 +2229,7 @@ struct Formatter {
// positional arguments.
static int format_buffer(char *str, size_t size, const char *fmt,
dfsan_label *va_labels, dfsan_label *ret_label,
+ dfsan_origin *va_origins, dfsan_origin *ret_origin,
va_list ap) {
Formatter formatter(str, fmt, size);
@@ -1193,8 +2285,13 @@ static int format_buffer(char *str, size_t size, const char *fmt,
default:
retval = formatter.format(va_arg(ap, int));
}
- dfsan_set_label(*va_labels++, formatter.str_cur(),
- formatter.num_written_bytes(retval));
+ if (va_origins == nullptr)
+ dfsan_set_label(*va_labels++, formatter.str_cur(),
+ formatter.num_written_bytes(retval));
+ else
+ dfsan_set_label_origin(*va_labels++, *va_origins++,
+ formatter.str_cur(),
+ formatter.num_written_bytes(retval));
end_fmt = true;
break;
@@ -1211,21 +2308,36 @@ static int format_buffer(char *str, size_t size, const char *fmt,
} else {
retval = formatter.format(va_arg(ap, double));
}
- dfsan_set_label(*va_labels++, formatter.str_cur(),
- formatter.num_written_bytes(retval));
+ if (va_origins == nullptr)
+ dfsan_set_label(*va_labels++, formatter.str_cur(),
+ formatter.num_written_bytes(retval));
+ else
+ dfsan_set_label_origin(*va_labels++, *va_origins++,
+ formatter.str_cur(),
+ formatter.num_written_bytes(retval));
end_fmt = true;
break;
case 'c':
retval = formatter.format(va_arg(ap, int));
- dfsan_set_label(*va_labels++, formatter.str_cur(),
- formatter.num_written_bytes(retval));
+ if (va_origins == nullptr)
+ dfsan_set_label(*va_labels++, formatter.str_cur(),
+ formatter.num_written_bytes(retval));
+ else
+ dfsan_set_label_origin(*va_labels++, *va_origins++,
+ formatter.str_cur(),
+ formatter.num_written_bytes(retval));
end_fmt = true;
break;
case 's': {
char *arg = va_arg(ap, char *);
retval = formatter.format(arg);
+ if (va_origins) {
+ va_origins++;
+ dfsan_mem_origin_transfer(formatter.str_cur(), arg,
+ formatter.num_written_bytes(retval));
+ }
va_labels++;
internal_memcpy(shadow_for(formatter.str_cur()), shadow_for(arg),
sizeof(dfsan_label) *
@@ -1236,8 +2348,13 @@ static int format_buffer(char *str, size_t size, const char *fmt,
case 'p':
retval = formatter.format(va_arg(ap, void *));
- dfsan_set_label(*va_labels++, formatter.str_cur(),
- formatter.num_written_bytes(retval));
+ if (va_origins == nullptr)
+ dfsan_set_label(*va_labels++, formatter.str_cur(),
+ formatter.num_written_bytes(retval));
+ else
+ dfsan_set_label_origin(*va_labels++, *va_origins++,
+ formatter.str_cur(),
+ formatter.num_written_bytes(retval));
end_fmt = true;
break;
@@ -1245,6 +2362,8 @@ static int format_buffer(char *str, size_t size, const char *fmt,
int *ptr = va_arg(ap, int *);
*ptr = (int)formatter.str_off;
va_labels++;
+ if (va_origins)
+ va_origins++;
dfsan_set_label(0, ptr, sizeof(ptr));
end_fmt = true;
break;
@@ -1260,6 +2379,8 @@ static int format_buffer(char *str, size_t size, const char *fmt,
case '*':
formatter.width = va_arg(ap, int);
va_labels++;
+ if (va_origins)
+ va_origins++;
break;
default:
@@ -1277,6 +2398,8 @@ static int format_buffer(char *str, size_t size, const char *fmt,
}
*ret_label = 0;
+ if (ret_origin)
+ *ret_origin = 0;
// Number of bytes written in total.
return formatter.str_off;
@@ -1289,7 +2412,22 @@ int __dfsw_sprintf(char *str, const char *format, dfsan_label str_label,
dfsan_label *ret_label, ...) {
va_list ap;
va_start(ap, ret_label);
- int ret = format_buffer(str, ~0ul, format, va_labels, ret_label, ap);
+ int ret = format_buffer(str, ~0ul, format, va_labels, ret_label, nullptr,
+ nullptr, ap);
+ va_end(ap);
+ return ret;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+int __dfso_sprintf(char *str, const char *format, dfsan_label str_label,
+ dfsan_label format_label, dfsan_label *va_labels,
+ dfsan_label *ret_label, dfsan_origin str_origin,
+ dfsan_origin format_origin, dfsan_origin *va_origins,
+ dfsan_origin *ret_origin, ...) {
+ va_list ap;
+ va_start(ap, ret_origin);
+ int ret = format_buffer(str, ~0ul, format, va_labels, ret_label, va_origins,
+ ret_origin, ap);
va_end(ap);
return ret;
}
@@ -1301,11 +2439,52 @@ int __dfsw_snprintf(char *str, size_t size, const char *format,
dfsan_label *ret_label, ...) {
va_list ap;
va_start(ap, ret_label);
- int ret = format_buffer(str, size, format, va_labels, ret_label, ap);
+ int ret = format_buffer(str, size, format, va_labels, ret_label, nullptr,
+ nullptr, ap);
+ va_end(ap);
+ return ret;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+int __dfso_snprintf(char *str, size_t size, const char *format,
+ dfsan_label str_label, dfsan_label size_label,
+ dfsan_label format_label, dfsan_label *va_labels,
+ dfsan_label *ret_label, dfsan_origin str_origin,
+ dfsan_origin size_origin, dfsan_origin format_origin,
+ dfsan_origin *va_origins, dfsan_origin *ret_origin, ...) {
+ va_list ap;
+ va_start(ap, ret_origin);
+ int ret = format_buffer(str, size, format, va_labels, ret_label, va_origins,
+ ret_origin, ap);
va_end(ap);
return ret;
}
+static void BeforeFork() {
+ StackDepotLockAll();
+ GetChainedOriginDepot()->LockAll();
+}
+
+static void AfterFork() {
+ GetChainedOriginDepot()->UnlockAll();
+ StackDepotUnlockAll();
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+pid_t __dfsw_fork(dfsan_label *ret_label) {
+ pid_t pid = fork();
+ *ret_label = 0;
+ return pid;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+pid_t __dfso_fork(dfsan_label *ret_label, dfsan_origin *ret_origin) {
+ BeforeFork();
+ pid_t pid = __dfsw_fork(ret_label);
+ AfterFork();
+ return pid;
+}
+
// Default empty implementations (weak). Users should redefine them.
SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_pc_guard, u32 *) {}
SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_pc_guard_init, u32 *,
diff --git a/compiler-rt/lib/dfsan/dfsan_flags.h b/compiler-rt/lib/dfsan/dfsan_flags.h
new file mode 100644
index 000000000000..ec7edf6112a9
--- /dev/null
+++ b/compiler-rt/lib/dfsan/dfsan_flags.h
@@ -0,0 +1,32 @@
+//===-- dfsan_flags.h -------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of DataFlowSanitizer.
+//
+// DFSan flags.
+//===----------------------------------------------------------------------===//
+
+#ifndef DFSAN_FLAGS_H
+#define DFSAN_FLAGS_H
+
+namespace __dfsan {
+
+struct Flags {
+#define DFSAN_FLAG(Type, Name, DefaultValue, Description) Type Name;
+#include "dfsan_flags.inc"
+#undef DFSAN_FLAG
+
+ void SetDefaults();
+};
+
+extern Flags flags_data;
+inline Flags &flags() { return flags_data; }
+
+} // namespace __dfsan
+
+#endif // DFSAN_FLAGS_H
diff --git a/compiler-rt/lib/dfsan/dfsan_flags.inc b/compiler-rt/lib/dfsan/dfsan_flags.inc
index cdd0035c9b2d..67fda0eee490 100644
--- a/compiler-rt/lib/dfsan/dfsan_flags.inc
+++ b/compiler-rt/lib/dfsan/dfsan_flags.inc
@@ -16,7 +16,7 @@
// DFSAN_FLAG(Type, Name, DefaultValue, Description)
// See COMMON_FLAG in sanitizer_flags.inc for more details.
-DFSAN_FLAG(bool, warn_unimplemented, true,
+DFSAN_FLAG(bool, warn_unimplemented, false,
"Whether to warn on unimplemented functions.")
DFSAN_FLAG(bool, warn_nonzero_labels, false,
"Whether to warn on unimplemented functions.")
@@ -26,6 +26,18 @@ DFSAN_FLAG(
"(e.g., when comparing strings, ignore the fact that the output of the"
"comparison might be data-dependent on the content of the strings). This"
"applies only to the custom functions defined in 'custom.c'.")
-DFSAN_FLAG(const char *, dump_labels_at_exit, "", "The path of the file where "
- "to dump the labels when the "
- "program terminates.")
+DFSAN_FLAG(
+ int, origin_history_size, Origin::kMaxDepth,
+ "The limit of origin chain length. Non-positive values mean unlimited.")
+DFSAN_FLAG(
+ int, origin_history_per_stack_limit, 20000,
+ "The limit of origin node's references count. "
+ "Non-positive values mean unlimited.")
+DFSAN_FLAG(int, store_context_size, 20,
+ "The depth limit of origin tracking stack traces.")
+DFSAN_FLAG(bool, check_origin_invariant, false,
+ "Whether to check if the origin invariant holds.")
+DFSAN_FLAG(bool, zero_in_malloc, true,
+ "Whether to zero shadow space of new allocated memory.")
+DFSAN_FLAG(bool, zero_in_free, true,
+ "Whether to zero shadow space of deallocated memory.")
diff --git a/compiler-rt/lib/dfsan/dfsan_interceptors.cpp b/compiler-rt/lib/dfsan/dfsan_interceptors.cpp
index 7efb182ac8d4..92be4fc87d49 100644
--- a/compiler-rt/lib/dfsan/dfsan_interceptors.cpp
+++ b/compiler-rt/lib/dfsan/dfsan_interceptors.cpp
@@ -15,8 +15,14 @@
#include <unistd.h>
#include "dfsan/dfsan.h"
+#include "dfsan/dfsan_thread.h"
#include "interception/interception.h"
+#include "sanitizer_common/sanitizer_allocator_interface.h"
#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_errno.h"
+#include "sanitizer_common/sanitizer_platform_limits_posix.h"
+#include "sanitizer_common/sanitizer_posix.h"
+#include "sanitizer_common/sanitizer_tls_get_addr.h"
using namespace __sanitizer;
@@ -26,44 +32,213 @@ bool interceptors_initialized;
} // namespace
-INTERCEPTOR(void *, mmap, void *addr, SIZE_T length, int prot, int flags,
- int fd, OFF_T offset) {
- void *res;
+INTERCEPTOR(void *, reallocarray, void *ptr, SIZE_T nmemb, SIZE_T size) {
+ return __dfsan::dfsan_reallocarray(ptr, nmemb, size);
+}
+
+INTERCEPTOR(void *, __libc_memalign, SIZE_T alignment, SIZE_T size) {
+ void *ptr = __dfsan::dfsan_memalign(alignment, size);
+ if (ptr)
+ DTLS_on_libc_memalign(ptr, size);
+ return ptr;
+}
+
+INTERCEPTOR(void *, aligned_alloc, SIZE_T alignment, SIZE_T size) {
+ return __dfsan::dfsan_aligned_alloc(alignment, size);
+}
+
+static uptr allocated_for_dlsym;
+static const uptr kDlsymAllocPoolSize = 1024;
+static uptr alloc_memory_for_dlsym[kDlsymAllocPoolSize];
+
+static bool IsInDlsymAllocPool(const void *ptr) {
+ uptr off = (uptr)ptr - (uptr)alloc_memory_for_dlsym;
+ return off < sizeof(alloc_memory_for_dlsym);
+}
+
+static void *AllocateFromLocalPool(uptr size_in_bytes) {
+ uptr size_in_words = RoundUpTo(size_in_bytes, kWordSize) / kWordSize;
+ void *mem = (void *)&alloc_memory_for_dlsym[allocated_for_dlsym];
+ allocated_for_dlsym += size_in_words;
+ CHECK_LT(allocated_for_dlsym, kDlsymAllocPoolSize);
+ return mem;
+}
+
+INTERCEPTOR(void *, calloc, SIZE_T nmemb, SIZE_T size) {
+ if (UNLIKELY(!__dfsan::dfsan_inited))
+ // Hack: dlsym calls calloc before REAL(calloc) is retrieved from dlsym.
+ return AllocateFromLocalPool(nmemb * size);
+ return __dfsan::dfsan_calloc(nmemb, size);
+}
+
+INTERCEPTOR(void *, realloc, void *ptr, SIZE_T size) {
+ if (UNLIKELY(IsInDlsymAllocPool(ptr))) {
+ uptr offset = (uptr)ptr - (uptr)alloc_memory_for_dlsym;
+ uptr copy_size = Min(size, kDlsymAllocPoolSize - offset);
+ void *new_ptr;
+ if (UNLIKELY(!__dfsan::dfsan_inited)) {
+ new_ptr = AllocateFromLocalPool(copy_size);
+ } else {
+ copy_size = size;
+ new_ptr = __dfsan::dfsan_malloc(copy_size);
+ }
+ internal_memcpy(new_ptr, ptr, copy_size);
+ return new_ptr;
+ }
+ return __dfsan::dfsan_realloc(ptr, size);
+}
+
+INTERCEPTOR(void *, malloc, SIZE_T size) {
+ if (UNLIKELY(!__dfsan::dfsan_inited))
+ // Hack: dlsym calls malloc before REAL(malloc) is retrieved from dlsym.
+ return AllocateFromLocalPool(size);
+ return __dfsan::dfsan_malloc(size);
+}
+
+INTERCEPTOR(void, free, void *ptr) {
+ if (!ptr || UNLIKELY(IsInDlsymAllocPool(ptr)))
+ return;
+ return __dfsan::dfsan_deallocate(ptr);
+}
+
+INTERCEPTOR(void, cfree, void *ptr) {
+ if (!ptr || UNLIKELY(IsInDlsymAllocPool(ptr)))
+ return;
+ return __dfsan::dfsan_deallocate(ptr);
+}
- // interceptors_initialized is set to true during preinit_array, when we're
- // single-threaded. So we don't need to worry about accessing it atomically.
- if (!interceptors_initialized)
- res = (void *)syscall(__NR_mmap, addr, length, prot, flags, fd, offset);
- else
- res = REAL(mmap)(addr, length, prot, flags, fd, offset);
+INTERCEPTOR(int, posix_memalign, void **memptr, SIZE_T alignment, SIZE_T size) {
+ CHECK_NE(memptr, 0);
+ int res = __dfsan::dfsan_posix_memalign(memptr, alignment, size);
+ if (!res)
+ dfsan_set_label(0, memptr, sizeof(*memptr));
+ return res;
+}
+
+INTERCEPTOR(void *, memalign, SIZE_T alignment, SIZE_T size) {
+ return __dfsan::dfsan_memalign(alignment, size);
+}
+
+INTERCEPTOR(void *, valloc, SIZE_T size) { return __dfsan::dfsan_valloc(size); }
+
+INTERCEPTOR(void *, pvalloc, SIZE_T size) {
+ return __dfsan::dfsan_pvalloc(size);
+}
+
+INTERCEPTOR(void, mallinfo, __sanitizer_struct_mallinfo *sret) {
+ internal_memset(sret, 0, sizeof(*sret));
+ dfsan_set_label(0, sret, sizeof(*sret));
+}
+
+INTERCEPTOR(int, mallopt, int cmd, int value) { return 0; }
+
+INTERCEPTOR(void, malloc_stats, void) {
+ // FIXME: implement, but don't call REAL(malloc_stats)!
+}
+
+INTERCEPTOR(uptr, malloc_usable_size, void *ptr) {
+ return __sanitizer_get_allocated_size(ptr);
+}
- if (res != (void *)-1)
+#define ENSURE_DFSAN_INITED() \
+ do { \
+ CHECK(!__dfsan::dfsan_init_is_running); \
+ if (!__dfsan::dfsan_inited) { \
+ __dfsan::dfsan_init(); \
+ } \
+ } while (0)
+
+#define COMMON_INTERCEPTOR_ENTER(func, ...) \
+ if (__dfsan::dfsan_init_is_running) \
+ return REAL(func)(__VA_ARGS__); \
+ ENSURE_DFSAN_INITED(); \
+ dfsan_set_label(0, __errno_location(), sizeof(int)); /* NOLINT */
+
+INTERCEPTOR(void *, mmap, void *addr, SIZE_T length, int prot, int flags,
+ int fd, OFF_T offset) {
+ if (common_flags()->detect_write_exec)
+ ReportMmapWriteExec(prot);
+ if (!__dfsan::dfsan_inited)
+ return (void *)internal_mmap(addr, length, prot, flags, fd, offset);
+ COMMON_INTERCEPTOR_ENTER(mmap, addr, length, prot, flags, fd, offset);
+ void *res = REAL(mmap)(addr, length, prot, flags, fd, offset);
+ if (res != (void *)-1) {
dfsan_set_label(0, res, RoundUpTo(length, GetPageSizeCached()));
+ }
return res;
}
INTERCEPTOR(void *, mmap64, void *addr, SIZE_T length, int prot, int flags,
int fd, OFF64_T offset) {
+ if (common_flags()->detect_write_exec)
+ ReportMmapWriteExec(prot);
+ if (!__dfsan::dfsan_inited)
+ return (void *)internal_mmap(addr, length, prot, flags, fd, offset);
+ COMMON_INTERCEPTOR_ENTER(mmap64, addr, length, prot, flags, fd, offset);
void *res = REAL(mmap64)(addr, length, prot, flags, fd, offset);
- if (res != (void *)-1)
+ if (res != (void *)-1) {
dfsan_set_label(0, res, RoundUpTo(length, GetPageSizeCached()));
+ }
return res;
}
INTERCEPTOR(int, munmap, void *addr, SIZE_T length) {
+ if (!__dfsan::dfsan_inited)
+ return internal_munmap(addr, length);
+ COMMON_INTERCEPTOR_ENTER(munmap, addr, length);
int res = REAL(munmap)(addr, length);
if (res != -1)
dfsan_set_label(0, addr, RoundUpTo(length, GetPageSizeCached()));
return res;
}
+#define COMMON_INTERCEPTOR_GET_TLS_RANGE(begin, end) \
+ if (__dfsan::DFsanThread *t = __dfsan::GetCurrentThread()) { \
+ *begin = t->tls_begin(); \
+ *end = t->tls_end(); \
+ } else { \
+ *begin = *end = 0; \
+ }
+#define COMMON_INTERCEPTOR_INITIALIZE_RANGE(ptr, size) \
+ dfsan_set_label(0, ptr, size)
+
+INTERCEPTOR(void *, __tls_get_addr, void *arg) {
+ COMMON_INTERCEPTOR_ENTER(__tls_get_addr, arg);
+ void *res = REAL(__tls_get_addr)(arg);
+ uptr tls_begin, tls_end;
+ COMMON_INTERCEPTOR_GET_TLS_RANGE(&tls_begin, &tls_end);
+ DTLS::DTV *dtv = DTLS_on_tls_get_addr(arg, res, tls_begin, tls_end);
+ if (dtv) {
+ // New DTLS block has been allocated.
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE((void *)dtv->beg, dtv->size);
+ }
+ return res;
+}
+
namespace __dfsan {
-void InitializeInterceptors() {
+void initialize_interceptors() {
CHECK(!interceptors_initialized);
+ INTERCEPT_FUNCTION(aligned_alloc);
+ INTERCEPT_FUNCTION(calloc);
+ INTERCEPT_FUNCTION(cfree);
+ INTERCEPT_FUNCTION(free);
+ INTERCEPT_FUNCTION(mallinfo);
+ INTERCEPT_FUNCTION(malloc);
+ INTERCEPT_FUNCTION(malloc_stats);
+ INTERCEPT_FUNCTION(malloc_usable_size);
+ INTERCEPT_FUNCTION(mallopt);
+ INTERCEPT_FUNCTION(memalign);
INTERCEPT_FUNCTION(mmap);
INTERCEPT_FUNCTION(mmap64);
INTERCEPT_FUNCTION(munmap);
+ INTERCEPT_FUNCTION(posix_memalign);
+ INTERCEPT_FUNCTION(pvalloc);
+ INTERCEPT_FUNCTION(realloc);
+ INTERCEPT_FUNCTION(reallocarray);
+ INTERCEPT_FUNCTION(valloc);
+ INTERCEPT_FUNCTION(__tls_get_addr);
+ INTERCEPT_FUNCTION(__libc_memalign);
interceptors_initialized = true;
}
diff --git a/compiler-rt/lib/dfsan/dfsan_new_delete.cpp b/compiler-rt/lib/dfsan/dfsan_new_delete.cpp
new file mode 100644
index 000000000000..7ac906e81077
--- /dev/null
+++ b/compiler-rt/lib/dfsan/dfsan_new_delete.cpp
@@ -0,0 +1,124 @@
+//===-- dfsan_new_delete.cpp ----------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of DataflowSanitizer.
+//
+// Interceptors for operators new and delete.
+//===----------------------------------------------------------------------===//
+
+#include <stddef.h>
+
+#include "dfsan.h"
+#include "interception/interception.h"
+#include "sanitizer_common/sanitizer_allocator.h"
+#include "sanitizer_common/sanitizer_allocator_report.h"
+
+using namespace __dfsan;
+
+// Fake std::nothrow_t and std::align_val_t to avoid including <new>.
+namespace std {
+struct nothrow_t {};
+enum class align_val_t : size_t {};
+} // namespace std
+
+// TODO(alekseys): throw std::bad_alloc instead of dying on OOM.
+#define OPERATOR_NEW_BODY(nothrow) \
+ void *res = dfsan_malloc(size); \
+ if (!nothrow && UNLIKELY(!res)) { \
+ BufferedStackTrace stack; \
+ ReportOutOfMemory(size, &stack); \
+ } \
+ return res
+#define OPERATOR_NEW_BODY_ALIGN(nothrow) \
+ void *res = dfsan_memalign((uptr)align, size); \
+ if (!nothrow && UNLIKELY(!res)) { \
+ BufferedStackTrace stack; \
+ ReportOutOfMemory(size, &stack); \
+ } \
+ return res;
+
+INTERCEPTOR_ATTRIBUTE
+void *operator new(size_t size) { OPERATOR_NEW_BODY(false /*nothrow*/); }
+INTERCEPTOR_ATTRIBUTE
+void *operator new[](size_t size) { OPERATOR_NEW_BODY(false /*nothrow*/); }
+INTERCEPTOR_ATTRIBUTE
+void *operator new(size_t size, std::nothrow_t const &) {
+ OPERATOR_NEW_BODY(true /*nothrow*/);
+}
+INTERCEPTOR_ATTRIBUTE
+void *operator new[](size_t size, std::nothrow_t const &) {
+ OPERATOR_NEW_BODY(true /*nothrow*/);
+}
+INTERCEPTOR_ATTRIBUTE
+void *operator new(size_t size, std::align_val_t align) {
+ OPERATOR_NEW_BODY_ALIGN(false /*nothrow*/);
+}
+INTERCEPTOR_ATTRIBUTE
+void *operator new[](size_t size, std::align_val_t align) {
+ OPERATOR_NEW_BODY_ALIGN(false /*nothrow*/);
+}
+INTERCEPTOR_ATTRIBUTE
+void *operator new(size_t size, std::align_val_t align,
+ std::nothrow_t const &) {
+ OPERATOR_NEW_BODY_ALIGN(true /*nothrow*/);
+}
+INTERCEPTOR_ATTRIBUTE
+void *operator new[](size_t size, std::align_val_t align,
+ std::nothrow_t const &) {
+ OPERATOR_NEW_BODY_ALIGN(true /*nothrow*/);
+}
+
+#define OPERATOR_DELETE_BODY \
+ if (ptr) \
+ dfsan_deallocate(ptr)
+
+INTERCEPTOR_ATTRIBUTE
+void operator delete(void *ptr)NOEXCEPT { OPERATOR_DELETE_BODY; }
+INTERCEPTOR_ATTRIBUTE
+void operator delete[](void *ptr) NOEXCEPT { OPERATOR_DELETE_BODY; }
+INTERCEPTOR_ATTRIBUTE
+void operator delete(void *ptr, std::nothrow_t const &) {
+ OPERATOR_DELETE_BODY;
+}
+INTERCEPTOR_ATTRIBUTE
+void operator delete[](void *ptr, std::nothrow_t const &) {
+ OPERATOR_DELETE_BODY;
+}
+INTERCEPTOR_ATTRIBUTE
+void operator delete(void *ptr, size_t size)NOEXCEPT { OPERATOR_DELETE_BODY; }
+INTERCEPTOR_ATTRIBUTE
+void operator delete[](void *ptr, size_t size) NOEXCEPT {
+ OPERATOR_DELETE_BODY;
+}
+INTERCEPTOR_ATTRIBUTE
+void operator delete(void *ptr, std::align_val_t align)NOEXCEPT {
+ OPERATOR_DELETE_BODY;
+}
+INTERCEPTOR_ATTRIBUTE
+void operator delete[](void *ptr, std::align_val_t align) NOEXCEPT {
+ OPERATOR_DELETE_BODY;
+}
+INTERCEPTOR_ATTRIBUTE
+void operator delete(void *ptr, std::align_val_t align,
+ std::nothrow_t const &) {
+ OPERATOR_DELETE_BODY;
+}
+INTERCEPTOR_ATTRIBUTE
+void operator delete[](void *ptr, std::align_val_t align,
+ std::nothrow_t const &) {
+ OPERATOR_DELETE_BODY;
+}
+INTERCEPTOR_ATTRIBUTE
+void operator delete(void *ptr, size_t size, std::align_val_t align)NOEXCEPT {
+ OPERATOR_DELETE_BODY;
+}
+INTERCEPTOR_ATTRIBUTE
+void operator delete[](void *ptr, size_t size,
+ std::align_val_t align) NOEXCEPT {
+ OPERATOR_DELETE_BODY;
+}
diff --git a/compiler-rt/lib/dfsan/dfsan_origin.h b/compiler-rt/lib/dfsan/dfsan_origin.h
new file mode 100644
index 000000000000..89fd7f99599f
--- /dev/null
+++ b/compiler-rt/lib/dfsan/dfsan_origin.h
@@ -0,0 +1,127 @@
+//===-- dfsan_origin.h ----------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of DataFlowSanitizer.
+//
+// Origin id utils.
+//===----------------------------------------------------------------------===//
+
+#ifndef DFSAN_ORIGIN_H
+#define DFSAN_ORIGIN_H
+
+#include "dfsan_chained_origin_depot.h"
+#include "dfsan_flags.h"
+#include "sanitizer_common/sanitizer_stackdepot.h"
+
+namespace __dfsan {
+
+// Origin handling.
+//
+// Origin is a 32-bit identifier that is attached to any taint value in the
+// program and describes how this memory came to be tainted.
+//
+// Chained origin id is like:
+// zzzz xxxx xxxx xxxx
+//
+// Chained origin id describes an event of storing a taint value to
+// memory. The xxx part is a value of ChainedOriginDepot, which is a mapping of
+// (stack_id, prev_id) -> id, where
+// * stack_id describes the event.
+// StackDepot keeps a mapping between those and corresponding stack traces.
+// * prev_id is another origin id that describes the earlier part of the
+// taint value history. 0 prev_id indicates the start of a chain.
+// Following a chain of prev_id provides the full recorded history of a taint
+// value.
+//
+// This, effectively, defines a forest where nodes are points in value history
+// marked with origin ids, and edges are events that are marked with stack_id.
+//
+// The "zzzz" bits of chained origin id are used to store the length of the
+// origin chain.
+
+class Origin {
+ public:
+ static bool isValidId(u32 id) { return id != 0; }
+
+ u32 raw_id() const { return raw_id_; }
+
+ bool isChainedOrigin() const { return Origin::isValidId(raw_id_); }
+
+ u32 getChainedId() const {
+ CHECK(Origin::isValidId(raw_id_));
+ return raw_id_ & kChainedIdMask;
+ }
+
+ // Returns the next origin in the chain and the current stack trace.
+ //
+ // It scans a partition of StackDepot linearly, and is used only by origin
+ // tracking report.
+ Origin getNextChainedOrigin(StackTrace *stack) const {
+ CHECK(Origin::isValidId(raw_id_));
+ u32 prev_id;
+ u32 stack_id = GetChainedOriginDepot()->Get(getChainedId(), &prev_id);
+ if (stack)
+ *stack = StackDepotGet(stack_id);
+ return Origin(prev_id);
+ }
+
+ static Origin CreateChainedOrigin(Origin prev, StackTrace *stack) {
+ int depth = prev.isChainedOrigin() ? prev.depth() : -1;
+ // depth is the length of the chain minus 1.
+ // origin_history_size of 0 means unlimited depth.
+ if (flags().origin_history_size > 0) {
+ ++depth;
+ if (depth >= flags().origin_history_size || depth > kMaxDepth)
+ return prev;
+ }
+
+ StackDepotHandle h = StackDepotPut_WithHandle(*stack);
+ if (!h.valid())
+ return prev;
+
+ if (flags().origin_history_per_stack_limit > 0) {
+ int use_count = h.use_count();
+ if (use_count > flags().origin_history_per_stack_limit)
+ return prev;
+ }
+
+ u32 chained_id;
+ bool inserted =
+ GetChainedOriginDepot()->Put(h.id(), prev.raw_id(), &chained_id);
+ CHECK((chained_id & kChainedIdMask) == chained_id);
+
+ if (inserted && flags().origin_history_per_stack_limit > 0)
+ h.inc_use_count_unsafe();
+
+ return Origin((depth << kDepthShift) | chained_id);
+ }
+
+ static Origin FromRawId(u32 id) { return Origin(id); }
+
+ private:
+ static const int kDepthBits = 4;
+ static const int kDepthShift = 32 - kDepthBits;
+
+ static const u32 kChainedIdMask = ((u32)-1) >> kDepthBits;
+
+ u32 raw_id_;
+
+ explicit Origin(u32 raw_id) : raw_id_(raw_id) {}
+
+ int depth() const {
+ CHECK(isChainedOrigin());
+ return (raw_id_ >> kDepthShift) & ((1 << kDepthBits) - 1);
+ }
+
+ public:
+ static const int kMaxDepth = (1 << kDepthBits) - 1;
+};
+
+} // namespace __dfsan
+
+#endif // DFSAN_ORIGIN_H
diff --git a/compiler-rt/lib/dfsan/dfsan_platform.h b/compiler-rt/lib/dfsan/dfsan_platform.h
index 4ff68b9d40b6..9b4333ee99d0 100644
--- a/compiler-rt/lib/dfsan/dfsan_platform.h
+++ b/compiler-rt/lib/dfsan/dfsan_platform.h
@@ -14,101 +14,74 @@
#ifndef DFSAN_PLATFORM_H
#define DFSAN_PLATFORM_H
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_platform.h"
+
namespace __dfsan {
-#if defined(__x86_64__)
-struct Mapping {
- static const uptr kShadowAddr = 0x10000;
- static const uptr kUnionTableAddr = 0x200000000000;
- static const uptr kAppAddr = 0x700000008000;
- static const uptr kShadowMask = ~0x700000000000;
-};
-#elif defined(__mips64)
-struct Mapping {
- static const uptr kShadowAddr = 0x10000;
- static const uptr kUnionTableAddr = 0x2000000000;
- static const uptr kAppAddr = 0xF000008000;
- static const uptr kShadowMask = ~0xF000000000;
-};
-#elif defined(__aarch64__)
-struct Mapping39 {
- static const uptr kShadowAddr = 0x10000;
- static const uptr kUnionTableAddr = 0x1000000000;
- static const uptr kAppAddr = 0x7000008000;
- static const uptr kShadowMask = ~0x7800000000;
-};
+using __sanitizer::uptr;
-struct Mapping42 {
- static const uptr kShadowAddr = 0x10000;
- static const uptr kUnionTableAddr = 0x8000000000;
- static const uptr kAppAddr = 0x3ff00008000;
- static const uptr kShadowMask = ~0x3c000000000;
-};
+// TODO: The memory mapping code to setup a 1:1 shadow is based on msan.
+// Consider refactoring these into a shared implementation.
-struct Mapping48 {
- static const uptr kShadowAddr = 0x10000;
- static const uptr kUnionTableAddr = 0x8000000000;
- static const uptr kAppAddr = 0xffff00008000;
- static const uptr kShadowMask = ~0xfffff0000000;
+struct MappingDesc {
+ uptr start;
+ uptr end;
+ enum Type { INVALID, APP, SHADOW, ORIGIN } type;
+ const char *name;
};
-extern int vmaSize;
-# define DFSAN_RUNTIME_VMA 1
-#else
-# error "DFSan not supported for this platform!"
-#endif
-
-enum MappingType {
- MAPPING_SHADOW_ADDR,
- MAPPING_UNION_TABLE_ADDR,
- MAPPING_APP_ADDR,
- MAPPING_SHADOW_MASK
-};
+#if SANITIZER_LINUX && SANITIZER_WORDSIZE == 64
-template<typename Mapping, int Type>
-uptr MappingImpl(void) {
- switch (Type) {
- case MAPPING_SHADOW_ADDR: return Mapping::kShadowAddr;
- case MAPPING_UNION_TABLE_ADDR: return Mapping::kUnionTableAddr;
- case MAPPING_APP_ADDR: return Mapping::kAppAddr;
- case MAPPING_SHADOW_MASK: return Mapping::kShadowMask;
- }
-}
+// All of the following configurations are supported.
+// ASLR disabled: main executable and DSOs at 0x555550000000
+// PIE and ASLR: main executable and DSOs at 0x7f0000000000
+// non-PIE: main executable below 0x100000000, DSOs at 0x7f0000000000
+// Heap at 0x700000000000.
+const MappingDesc kMemoryLayout[] = {
+ {0x000000000000ULL, 0x010000000000ULL, MappingDesc::APP, "app-1"},
+ {0x010000000000ULL, 0x100000000000ULL, MappingDesc::SHADOW, "shadow-2"},
+ {0x100000000000ULL, 0x110000000000ULL, MappingDesc::INVALID, "invalid"},
+ {0x110000000000ULL, 0x200000000000ULL, MappingDesc::ORIGIN, "origin-2"},
+ {0x200000000000ULL, 0x300000000000ULL, MappingDesc::SHADOW, "shadow-3"},
+ {0x300000000000ULL, 0x400000000000ULL, MappingDesc::ORIGIN, "origin-3"},
+ {0x400000000000ULL, 0x500000000000ULL, MappingDesc::INVALID, "invalid"},
+ {0x500000000000ULL, 0x510000000000ULL, MappingDesc::SHADOW, "shadow-1"},
+ {0x510000000000ULL, 0x600000000000ULL, MappingDesc::APP, "app-2"},
+ {0x600000000000ULL, 0x610000000000ULL, MappingDesc::ORIGIN, "origin-1"},
+ {0x610000000000ULL, 0x700000000000ULL, MappingDesc::INVALID, "invalid"},
+ {0x700000000000ULL, 0x800000000000ULL, MappingDesc::APP, "app-3"}};
+# define MEM_TO_SHADOW(mem) (((uptr)(mem)) ^ 0x500000000000ULL)
+# define SHADOW_TO_ORIGIN(mem) (((uptr)(mem)) + 0x100000000000ULL)
-template<int Type>
-uptr MappingArchImpl(void) {
-#ifdef __aarch64__
- switch (vmaSize) {
- case 39: return MappingImpl<Mapping39, Type>();
- case 42: return MappingImpl<Mapping42, Type>();
- case 48: return MappingImpl<Mapping48, Type>();
- }
- DCHECK(0);
- return 0;
#else
- return MappingImpl<Mapping, Type>();
+# error "Unsupported platform"
#endif
-}
-ALWAYS_INLINE
-uptr ShadowAddr() {
- return MappingArchImpl<MAPPING_SHADOW_ADDR>();
-}
+const uptr kMemoryLayoutSize = sizeof(kMemoryLayout) / sizeof(kMemoryLayout[0]);
-ALWAYS_INLINE
-uptr UnionTableAddr() {
- return MappingArchImpl<MAPPING_UNION_TABLE_ADDR>();
-}
+#define MEM_TO_ORIGIN(mem) (SHADOW_TO_ORIGIN(MEM_TO_SHADOW((mem))))
-ALWAYS_INLINE
-uptr AppAddr() {
- return MappingArchImpl<MAPPING_APP_ADDR>();
+#ifndef __clang__
+__attribute__((optimize("unroll-loops")))
+#endif
+inline bool
+addr_is_type(uptr addr, MappingDesc::Type mapping_type) {
+// It is critical for performance that this loop is unrolled (because then it is
+// simplified into just a few constant comparisons).
+#ifdef __clang__
+# pragma unroll
+#endif
+ for (unsigned i = 0; i < kMemoryLayoutSize; ++i)
+ if (kMemoryLayout[i].type == mapping_type &&
+ addr >= kMemoryLayout[i].start && addr < kMemoryLayout[i].end)
+ return true;
+ return false;
}
-ALWAYS_INLINE
-uptr ShadowMask() {
- return MappingArchImpl<MAPPING_SHADOW_MASK>();
-}
+#define MEM_IS_APP(mem) addr_is_type((uptr)(mem), MappingDesc::APP)
+#define MEM_IS_SHADOW(mem) addr_is_type((uptr)(mem), MappingDesc::SHADOW)
+#define MEM_IS_ORIGIN(mem) addr_is_type((uptr)(mem), MappingDesc::ORIGIN)
} // namespace __dfsan
diff --git a/compiler-rt/lib/dfsan/dfsan_thread.cpp b/compiler-rt/lib/dfsan/dfsan_thread.cpp
new file mode 100644
index 000000000000..6869cf231587
--- /dev/null
+++ b/compiler-rt/lib/dfsan/dfsan_thread.cpp
@@ -0,0 +1,146 @@
+#include "dfsan_thread.h"
+
+#include <pthread.h>
+
+#include "dfsan.h"
+#include "sanitizer_common/sanitizer_tls_get_addr.h"
+
+namespace __dfsan {
+
+DFsanThread *DFsanThread::Create(void *start_routine_trampoline,
+ thread_callback_t start_routine, void *arg,
+ bool track_origins) {
+ uptr PageSize = GetPageSizeCached();
+ uptr size = RoundUpTo(sizeof(DFsanThread), PageSize);
+ DFsanThread *thread = (DFsanThread *)MmapOrDie(size, __func__);
+ thread->start_routine_trampoline_ = start_routine_trampoline;
+ thread->start_routine_ = start_routine;
+ thread->arg_ = arg;
+ thread->track_origins_ = track_origins;
+ thread->destructor_iterations_ = GetPthreadDestructorIterations();
+
+ return thread;
+}
+
+void DFsanThread::SetThreadStackAndTls() {
+ uptr tls_size = 0;
+ uptr stack_size = 0;
+ GetThreadStackAndTls(IsMainThread(), &stack_.bottom, &stack_size, &tls_begin_,
+ &tls_size);
+ stack_.top = stack_.bottom + stack_size;
+ tls_end_ = tls_begin_ + tls_size;
+
+ int local;
+ CHECK(AddrIsInStack((uptr)&local));
+}
+
+void DFsanThread::ClearShadowForThreadStackAndTLS() {
+ dfsan_set_label(0, (void *)stack_.bottom, stack_.top - stack_.bottom);
+ if (tls_begin_ != tls_end_)
+ dfsan_set_label(0, (void *)tls_begin_, tls_end_ - tls_begin_);
+ DTLS *dtls = DTLS_Get();
+ CHECK_NE(dtls, 0);
+ ForEachDVT(dtls, [](const DTLS::DTV &dtv, int id) {
+ dfsan_set_label(0, (void *)(dtv.beg), dtv.size);
+ });
+}
+
+void DFsanThread::Init() {
+ SetThreadStackAndTls();
+ ClearShadowForThreadStackAndTLS();
+}
+
+void DFsanThread::TSDDtor(void *tsd) {
+ DFsanThread *t = (DFsanThread *)tsd;
+ t->Destroy();
+}
+
+void DFsanThread::Destroy() {
+ malloc_storage().CommitBack();
+ // We also clear the shadow on thread destruction because
+ // some code may still be executing in later TSD destructors
+ // and we don't want it to have any poisoned stack.
+ ClearShadowForThreadStackAndTLS();
+ uptr size = RoundUpTo(sizeof(DFsanThread), GetPageSizeCached());
+ UnmapOrDie(this, size);
+ DTLS_Destroy();
+}
+
+thread_return_t DFsanThread::ThreadStart() {
+ Init();
+
+ if (!start_routine_) {
+ // start_routine_ == 0 if we're on the main thread or on one of the
+ // OS X libdispatch worker threads. But nobody is supposed to call
+ // ThreadStart() for the worker threads.
+ return 0;
+ }
+
+ CHECK(start_routine_trampoline_);
+
+ typedef void *(*thread_callback_trampoline_t)(void *, void *, dfsan_label,
+ dfsan_label *);
+ typedef void *(*thread_callback_origin_trampoline_t)(
+ void *, void *, dfsan_label, dfsan_label *, dfsan_origin, dfsan_origin *);
+
+ dfsan_label ret_label;
+ if (!track_origins_)
+ return ((thread_callback_trampoline_t)
+ start_routine_trampoline_)((void *)start_routine_, arg_, 0,
+ &ret_label);
+
+ dfsan_origin ret_origin;
+ return ((thread_callback_origin_trampoline_t)
+ start_routine_trampoline_)((void *)start_routine_, arg_, 0,
+ &ret_label, 0, &ret_origin);
+}
+
+DFsanThread::StackBounds DFsanThread::GetStackBounds() const {
+ return {stack_.bottom, stack_.top};
+}
+
+uptr DFsanThread::stack_top() { return GetStackBounds().top; }
+
+uptr DFsanThread::stack_bottom() { return GetStackBounds().bottom; }
+
+bool DFsanThread::AddrIsInStack(uptr addr) {
+ const auto bounds = GetStackBounds();
+ return addr >= bounds.bottom && addr < bounds.top;
+}
+
+static pthread_key_t tsd_key;
+static bool tsd_key_inited = false;
+
+void DFsanTSDInit(void (*destructor)(void *tsd)) {
+ CHECK(!tsd_key_inited);
+ tsd_key_inited = true;
+ CHECK_EQ(0, pthread_key_create(&tsd_key, destructor));
+}
+
+static THREADLOCAL DFsanThread *dfsan_current_thread;
+
+DFsanThread *GetCurrentThread() { return dfsan_current_thread; }
+
+void SetCurrentThread(DFsanThread *t) {
+ // Make sure we do not reset the current DFsanThread.
+ CHECK_EQ(0, dfsan_current_thread);
+ dfsan_current_thread = t;
+ // Make sure that DFsanTSDDtor gets called at the end.
+ CHECK(tsd_key_inited);
+ pthread_setspecific(tsd_key, t);
+}
+
+void DFsanTSDDtor(void *tsd) {
+ DFsanThread *t = (DFsanThread *)tsd;
+ if (t->destructor_iterations_ > 1) {
+ t->destructor_iterations_--;
+ CHECK_EQ(0, pthread_setspecific(tsd_key, tsd));
+ return;
+ }
+ dfsan_current_thread = nullptr;
+ // Make sure that signal handler can not see a stale current thread pointer.
+ atomic_signal_fence(memory_order_seq_cst);
+ DFsanThread::TSDDtor(tsd);
+}
+
+} // namespace __dfsan
diff --git a/compiler-rt/lib/dfsan/dfsan_thread.h b/compiler-rt/lib/dfsan/dfsan_thread.h
new file mode 100644
index 000000000000..8dde626f5569
--- /dev/null
+++ b/compiler-rt/lib/dfsan/dfsan_thread.h
@@ -0,0 +1,83 @@
+//===-- dfsan_thread.h -------------------------------------------*- C++
+//-*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of DataFlowSanitizer.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef DFSAN_THREAD_H
+#define DFSAN_THREAD_H
+
+#include "dfsan_allocator.h"
+#include "sanitizer_common/sanitizer_common.h"
+
+namespace __dfsan {
+
+class DFsanThread {
+ public:
+ // NOTE: There is no DFsanThread constructor. It is allocated
+ // via mmap() and *must* be valid in zero-initialized state.
+
+ static DFsanThread *Create(void *start_routine_trampoline,
+ thread_callback_t start_routine, void *arg,
+ bool track_origins = false);
+ static void TSDDtor(void *tsd);
+ void Destroy();
+
+ void Init(); // Should be called from the thread itself.
+ thread_return_t ThreadStart();
+
+ uptr stack_top();
+ uptr stack_bottom();
+ uptr tls_begin() { return tls_begin_; }
+ uptr tls_end() { return tls_end_; }
+ bool IsMainThread() { return start_routine_ == nullptr; }
+
+ bool InSignalHandler() { return in_signal_handler_; }
+ void EnterSignalHandler() { in_signal_handler_++; }
+ void LeaveSignalHandler() { in_signal_handler_--; }
+
+ DFsanThreadLocalMallocStorage &malloc_storage() { return malloc_storage_; }
+
+ int destructor_iterations_;
+
+ private:
+ void SetThreadStackAndTls();
+ void ClearShadowForThreadStackAndTLS();
+ struct StackBounds {
+ uptr bottom;
+ uptr top;
+ };
+ StackBounds GetStackBounds() const;
+
+ bool AddrIsInStack(uptr addr);
+
+ void *start_routine_trampoline_;
+ thread_callback_t start_routine_;
+ void *arg_;
+ bool track_origins_;
+
+ StackBounds stack_;
+
+ uptr tls_begin_;
+ uptr tls_end_;
+
+ unsigned in_signal_handler_;
+
+ DFsanThreadLocalMallocStorage malloc_storage_;
+};
+
+DFsanThread *GetCurrentThread();
+void SetCurrentThread(DFsanThread *t);
+void DFsanTSDInit(void (*destructor)(void *tsd));
+void DFsanTSDDtor(void *tsd);
+
+} // namespace __dfsan
+
+#endif // DFSAN_THREAD_H
diff --git a/compiler-rt/lib/dfsan/done_abilist.txt b/compiler-rt/lib/dfsan/done_abilist.txt
index e90dbc17a3cd..3c2670e04c29 100644
--- a/compiler-rt/lib/dfsan/done_abilist.txt
+++ b/compiler-rt/lib/dfsan/done_abilist.txt
@@ -28,12 +28,39 @@ fun:dfsan_set_write_callback=uninstrumented
fun:dfsan_set_write_callback=custom
fun:dfsan_flush=uninstrumented
fun:dfsan_flush=discard
+fun:dfsan_print_origin_trace=uninstrumented
+fun:dfsan_print_origin_trace=discard
+fun:dfsan_sprint_origin_trace=uninstrumented
+fun:dfsan_sprint_origin_trace=discard
+fun:dfsan_sprint_stack_trace=uninstrumented
+fun:dfsan_sprint_stack_trace=discard
+fun:dfsan_get_origin=uninstrumented
+fun:dfsan_get_origin=custom
+fun:dfsan_get_init_origin=uninstrumented
+fun:dfsan_get_init_origin=discard
+fun:dfsan_get_track_origins=uninstrumented
+fun:dfsan_get_track_origins=discard
###############################################################################
# glibc
###############################################################################
+# Functions of memory allocators
+fun:__libc_memalign=discard
+fun:aligned_alloc=discard
+fun:calloc=discard
+fun:cfree=discard
+fun:mallinfo=discard
fun:malloc=discard
fun:free=discard
+fun:malloc_stats=discard
+fun:malloc_usable_size=discard
+fun:mallopt=discard
+fun:memalign=discard
+fun:posix_memalign=discard
+fun:pvalloc=discard
+fun:realloc=discard
+fun:reallocarray=discard
+fun:valloc=discard
# Functions that return a value that depends on the input, but the output might
# not be necessarily data-dependent on the input.
@@ -148,7 +175,6 @@ fun:open=discard
fun:openat=discard
fun:pipe=discard
fun:posix_fadvise=discard
-fun:posix_memalign=discard
fun:prctl=discard
fun:printf=discard
fun:pthread_sigmask=discard
@@ -180,11 +206,11 @@ fun:symlink=discard
fun:syscall=discard
fun:unlink=discard
fun:uselocale=discard
+fun:vfprintf=discard
# Functions that produce output does not depend on the input (need to zero the
# shadow manually).
fun:_dl_get_tls_static_info=custom
-fun:calloc=custom
fun:clock_gettime=custom
fun:dlopen=custom
fun:epoll_wait=custom
@@ -214,6 +240,7 @@ fun:ctime_r=custom
fun:inet_pton=custom
fun:localtime_r=custom
fun:memcpy=custom
+fun:memmove=custom
fun:memset=custom
fun:strcpy=custom
fun:strdup=custom
@@ -223,6 +250,7 @@ fun:strtol=custom
fun:strtoll=custom
fun:strtoul=custom
fun:strtoull=custom
+fun:strcat=custom
# Functions that produce an output that is computed from the input, but is not
# necessarily data dependent.
@@ -252,6 +280,7 @@ fun:sched_getaffinity=custom
fun:select=custom
fun:sigemptyset=custom
fun:sigaction=custom
+fun:signal=custom
fun:gettimeofday=custom
# sprintf-like
@@ -262,6 +291,9 @@ fun:snprintf=custom
fun:asprintf=discard
fun:qsort=discard
+# fork
+fun:fork=custom
+
###############################################################################
# pthread
###############################################################################
@@ -386,6 +418,38 @@ fun:__sanitizer_cov_trace_pc*=discard
fun:__sanitizer_cov_pcs_init=uninstrumented
fun:__sanitizer_cov_pcs_init=discard
+fun:__sanitizer_get_current_allocated_bytes=uninstrumented
+fun:__sanitizer_get_current_allocated_bytes=discard
+fun:__sanitizer_get_heap_size=uninstrumented
+fun:__sanitizer_get_heap_size=discard
+fun:__sanitizer_get_free_bytes=uninstrumented
+fun:__sanitizer_get_free_bytes=discard
+fun:__sanitizer_get_unmapped_bytes=uninstrumented
+fun:__sanitizer_get_unmapped_bytes=discard
+fun:__sanitizer_get_estimated_allocated_size=uninstrumented
+fun:__sanitizer_get_estimated_allocated_size=discard
+fun:__sanitizer_get_ownership=uninstrumented
+fun:__sanitizer_get_ownership=discard
+fun:__sanitizer_get_allocated_size=uninstrumented
+fun:__sanitizer_get_allocated_size=discard
+fun:__sanitizer_print_stack_trace=uninstrumented
+fun:__sanitizer_print_stack_trace=discard
+
+fun:TcmallocSlab_Internal_PushBatch_FixedShift=uninstrumented
+fun:TcmallocSlab_Internal_PushBatch_FixedShift=discard
+fun:TcmallocSlab_Internal_PushBatch_FixedShift_VCPU=uninstrumented
+fun:TcmallocSlab_Internal_PushBatch_FixedShift_VCPU=discard
+fun:TcmallocSlab_Internal_PerCpuCmpxchg64=uninstrumented
+fun:TcmallocSlab_Internal_PerCpuCmpxchg64=discard
+fun:TcmallocSlab_Internal_PerCpuCmpxchg64_VCPU=uninstrumented
+fun:TcmallocSlab_Internal_PerCpuCmpxchg64_VCPU=discard
+fun:TcmallocSlab_Internal_PopBatch_FixedShift=uninstrumented
+fun:TcmallocSlab_Internal_PopBatch_FixedShift=discard
+fun:TcmallocSlab_Internal_PopBatch_FixedShift_VCPU=uninstrumented
+fun:TcmallocSlab_Internal_PopBatch_FixedShift_VCPU=discard
+
# Ignores the dfsan wrappers.
fun:__dfsw_*=uninstrumented
fun:__dfsw_*=discard
+fun:__dfso_*=uninstrumented
+fun:__dfso_*=discard
diff --git a/compiler-rt/lib/fuzzer/FuzzerBuiltins.h b/compiler-rt/lib/fuzzer/FuzzerBuiltins.h
index 4c0ada82662d..ce0bd5cb47f4 100644
--- a/compiler-rt/lib/fuzzer/FuzzerBuiltins.h
+++ b/compiler-rt/lib/fuzzer/FuzzerBuiltins.h
@@ -26,7 +26,6 @@ inline uint32_t Bswap(uint32_t x) { return __builtin_bswap32(x); }
inline uint64_t Bswap(uint64_t x) { return __builtin_bswap64(x); }
inline uint32_t Clzll(unsigned long long X) { return __builtin_clzll(X); }
-inline uint32_t Clz(unsigned long long X) { return __builtin_clz(X); }
inline int Popcountll(unsigned long long X) { return __builtin_popcountll(X); }
} // namespace fuzzer
diff --git a/compiler-rt/lib/fuzzer/FuzzerBuiltinsMsvc.h b/compiler-rt/lib/fuzzer/FuzzerBuiltinsMsvc.h
index c5bec9787d8e..ab191b60ef6e 100644
--- a/compiler-rt/lib/fuzzer/FuzzerBuiltinsMsvc.h
+++ b/compiler-rt/lib/fuzzer/FuzzerBuiltinsMsvc.h
@@ -52,12 +52,6 @@ inline uint32_t Clzll(uint64_t X) {
return 64;
}
-inline uint32_t Clz(uint32_t X) {
- unsigned long LeadZeroIdx = 0;
- if (_BitScanReverse(&LeadZeroIdx, X)) return 31 - LeadZeroIdx;
- return 32;
-}
-
inline int Popcountll(unsigned long long X) {
#if !defined(_M_ARM) && !defined(_M_X64)
return __popcnt(X) + __popcnt(X >> 32);
diff --git a/compiler-rt/lib/fuzzer/FuzzerCorpus.h b/compiler-rt/lib/fuzzer/FuzzerCorpus.h
index daea4f5213b1..f8c126072c96 100644
--- a/compiler-rt/lib/fuzzer/FuzzerCorpus.h
+++ b/compiler-rt/lib/fuzzer/FuzzerCorpus.h
@@ -44,7 +44,7 @@ struct InputInfo {
// Power schedule.
bool NeedsEnergyUpdate = false;
double Energy = 0.0;
- size_t SumIncidence = 0;
+ double SumIncidence = 0.0;
Vector<std::pair<uint32_t, uint16_t>> FeatureFreqs;
// Delete feature Idx and its frequency from FeatureFreqs.
@@ -74,27 +74,28 @@ struct InputInfo {
void UpdateEnergy(size_t GlobalNumberOfFeatures, bool ScalePerExecTime,
std::chrono::microseconds AverageUnitExecutionTime) {
Energy = 0.0;
- SumIncidence = 0;
+ SumIncidence = 0.0;
// Apply add-one smoothing to locally discovered features.
for (auto F : FeatureFreqs) {
- size_t LocalIncidence = F.second + 1;
- Energy -= LocalIncidence * logl(LocalIncidence);
+ double LocalIncidence = F.second + 1;
+ Energy -= LocalIncidence * log(LocalIncidence);
SumIncidence += LocalIncidence;
}
// Apply add-one smoothing to locally undiscovered features.
- // PreciseEnergy -= 0; // since logl(1.0) == 0)
- SumIncidence += (GlobalNumberOfFeatures - FeatureFreqs.size());
+ // PreciseEnergy -= 0; // since log(1.0) == 0)
+ SumIncidence +=
+ static_cast<double>(GlobalNumberOfFeatures - FeatureFreqs.size());
// Add a single locally abundant feature apply add-one smoothing.
- size_t AbdIncidence = NumExecutedMutations + 1;
- Energy -= AbdIncidence * logl(AbdIncidence);
+ double AbdIncidence = static_cast<double>(NumExecutedMutations + 1);
+ Energy -= AbdIncidence * log(AbdIncidence);
SumIncidence += AbdIncidence;
// Normalize.
if (SumIncidence != 0)
- Energy = (Energy / SumIncidence) + logl(SumIncidence);
+ Energy = Energy / SumIncidence + log(SumIncidence);
if (ScalePerExecTime) {
// Scaling to favor inputs with lower execution time.
@@ -213,6 +214,8 @@ public:
assert(!U.empty());
if (FeatureDebug)
Printf("ADD_TO_CORPUS %zd NF %zd\n", Inputs.size(), NumFeatures);
+ // Inputs.size() is cast to uint32_t below.
+ assert(Inputs.size() < std::numeric_limits<uint32_t>::max());
Inputs.push_back(new InputInfo());
InputInfo &II = *Inputs.back();
II.U = U;
@@ -224,7 +227,7 @@ public:
II.HasFocusFunction = HasFocusFunction;
// Assign maximal energy to the new seed.
II.Energy = RareFeatures.empty() ? 1.0 : log(RareFeatures.size());
- II.SumIncidence = RareFeatures.size();
+ II.SumIncidence = static_cast<double>(RareFeatures.size());
II.NeedsEnergyUpdate = false;
std::sort(II.UniqFeatureSet.begin(), II.UniqFeatureSet.end());
ComputeSHA1(U.data(), U.size(), II.Sha1);
@@ -399,7 +402,7 @@ public:
// Zero energy seeds will never be fuzzed and remain zero energy.
if (II->Energy > 0.0) {
II->SumIncidence += 1;
- II->Energy += logl(II->SumIncidence) / II->SumIncidence;
+ II->Energy += log(II->SumIncidence) / II->SumIncidence;
}
}
@@ -426,7 +429,8 @@ public:
NumUpdatedFeatures++;
if (FeatureDebug)
Printf("ADD FEATURE %zd sz %d\n", Idx, NewSize);
- SmallestElementPerFeature[Idx] = Inputs.size();
+ // Inputs.size() is guaranteed to be less than UINT32_MAX by AddToCorpus.
+ SmallestElementPerFeature[Idx] = static_cast<uint32_t>(Inputs.size());
InputSizesPerFeature[Idx] = NewSize;
return true;
}
@@ -464,7 +468,7 @@ private:
static const bool FeatureDebug = false;
- size_t GetFeature(size_t Idx) const { return InputSizesPerFeature[Idx]; }
+ uint32_t GetFeature(size_t Idx) const { return InputSizesPerFeature[Idx]; }
void ValidateFeatureSet() {
if (FeatureDebug)
@@ -539,9 +543,11 @@ private:
if (VanillaSchedule) {
for (size_t i = 0; i < N; i++)
- Weights[i] = Inputs[i]->NumFeatures
- ? (i + 1) * (Inputs[i]->HasFocusFunction ? 1000 : 1)
- : 0.;
+ Weights[i] =
+ Inputs[i]->NumFeatures
+ ? static_cast<double>((i + 1) *
+ (Inputs[i]->HasFocusFunction ? 1000 : 1))
+ : 0.;
}
if (FeatureDebug) {
diff --git a/compiler-rt/lib/fuzzer/FuzzerDataFlowTrace.cpp b/compiler-rt/lib/fuzzer/FuzzerDataFlowTrace.cpp
index 0e9cdf7e66b1..23d422590d19 100644
--- a/compiler-rt/lib/fuzzer/FuzzerDataFlowTrace.cpp
+++ b/compiler-rt/lib/fuzzer/FuzzerDataFlowTrace.cpp
@@ -60,6 +60,7 @@ bool BlockCoverage::AppendCoverage(std::istream &IN) {
CoveredBlocks.push_back(BB);
}
if (CoveredBlocks.empty()) return false;
+ // Ensures no CoverageVector is longer than UINT32_MAX.
uint32_t NumBlocks = CoveredBlocks.back();
CoveredBlocks.pop_back();
for (auto BB : CoveredBlocks)
@@ -200,7 +201,8 @@ bool DataFlowTrace::Init(const std::string &DirPath, std::string *FocusFunction,
Printf("INFO: AUTOFOCUS: %zd %s\n", FocusFuncIdx,
FunctionNames[FocusFuncIdx].c_str());
for (size_t i = 0; i < NumFunctions; i++) {
- if (!Weights[i]) continue;
+ if (Weights[i] == 0.0)
+ continue;
Printf(" [%zd] W %g\tBB-tot %u\tBB-cov %u\tEntryFreq %u:\t%s\n", i,
Weights[i], Coverage.GetNumberOfBlocks(i),
Coverage.GetNumberOfCoveredBlocks(i), Coverage.GetCounter(i, 0),
diff --git a/compiler-rt/lib/fuzzer/FuzzerDataFlowTrace.h b/compiler-rt/lib/fuzzer/FuzzerDataFlowTrace.h
index d6e3de30a4ef..07c03bb25651 100644
--- a/compiler-rt/lib/fuzzer/FuzzerDataFlowTrace.h
+++ b/compiler-rt/lib/fuzzer/FuzzerDataFlowTrace.h
@@ -42,7 +42,8 @@ int CollectDataFlow(const std::string &DFTBinary, const std::string &DirPath,
const Vector<SizedFile> &CorporaFiles);
class BlockCoverage {
- public:
+public:
+ // These functions guarantee no CoverageVector is longer than UINT32_MAX.
bool AppendCoverage(std::istream &IN);
bool AppendCoverage(const std::string &S);
@@ -50,7 +51,8 @@ class BlockCoverage {
uint32_t GetCounter(size_t FunctionId, size_t BasicBlockId) {
auto It = Functions.find(FunctionId);
- if (It == Functions.end()) return 0;
+ if (It == Functions.end())
+ return 0;
const auto &Counters = It->second;
if (BasicBlockId < Counters.size())
return Counters[BasicBlockId];
@@ -61,7 +63,7 @@ class BlockCoverage {
auto It = Functions.find(FunctionId);
if (It == Functions.end()) return 0;
const auto &Counters = It->second;
- return Counters.size();
+ return static_cast<uint32_t>(Counters.size());
}
uint32_t GetNumberOfCoveredBlocks(size_t FunctionId) {
@@ -78,8 +80,7 @@ class BlockCoverage {
Vector<double> FunctionWeights(size_t NumFunctions) const;
void clear() { Functions.clear(); }
- private:
-
+private:
typedef Vector<uint32_t> CoverageVector;
uint32_t NumberOfCoveredBlocks(const CoverageVector &Counters) const {
@@ -91,7 +92,8 @@ class BlockCoverage {
}
uint32_t NumberOfUncoveredBlocks(const CoverageVector &Counters) const {
- return Counters.size() - NumberOfCoveredBlocks(Counters);
+ return static_cast<uint32_t>(Counters.size()) -
+ NumberOfCoveredBlocks(Counters);
}
uint32_t SmallestNonZeroCounter(const CoverageVector &Counters) const {
diff --git a/compiler-rt/lib/fuzzer/FuzzerDictionary.h b/compiler-rt/lib/fuzzer/FuzzerDictionary.h
index 301c5d9afecf..db55907d9363 100644
--- a/compiler-rt/lib/fuzzer/FuzzerDictionary.h
+++ b/compiler-rt/lib/fuzzer/FuzzerDictionary.h
@@ -23,12 +23,14 @@ template <size_t kMaxSizeT> class FixedWord {
public:
static const size_t kMaxSize = kMaxSizeT;
FixedWord() {}
- FixedWord(const uint8_t *B, uint8_t S) { Set(B, S); }
+ FixedWord(const uint8_t *B, size_t S) { Set(B, S); }
- void Set(const uint8_t *B, uint8_t S) {
+ void Set(const uint8_t *B, size_t S) {
+ static_assert(kMaxSizeT <= std::numeric_limits<uint8_t>::max(),
+ "FixedWord::kMaxSizeT cannot fit in a uint8_t.");
assert(S <= kMaxSize);
memcpy(Data, B, S);
- Size = S;
+ Size = static_cast<uint8_t>(S);
}
bool operator==(const FixedWord<kMaxSize> &w) const {
diff --git a/compiler-rt/lib/fuzzer/FuzzerDriver.cpp b/compiler-rt/lib/fuzzer/FuzzerDriver.cpp
index 447cafce7fd4..ceaa9070512f 100644
--- a/compiler-rt/lib/fuzzer/FuzzerDriver.cpp
+++ b/compiler-rt/lib/fuzzer/FuzzerDriver.cpp
@@ -159,14 +159,14 @@ static bool ParseOneFlag(const char *Param) {
const char *Str = FlagValue(Param, Name);
if (Str) {
if (FlagDescriptions[F].IntFlag) {
- int Val = MyStol(Str);
- *FlagDescriptions[F].IntFlag = Val;
+ auto Val = MyStol(Str);
+ *FlagDescriptions[F].IntFlag = static_cast<int>(Val);
if (Flags.verbosity >= 2)
Printf("Flag: %s %d\n", Name, Val);
return true;
} else if (FlagDescriptions[F].UIntFlag) {
- unsigned int Val = std::stoul(Str);
- *FlagDescriptions[F].UIntFlag = Val;
+ auto Val = std::stoul(Str);
+ *FlagDescriptions[F].UIntFlag = static_cast<unsigned int>(Val);
if (Flags.verbosity >= 2)
Printf("Flag: %s %u\n", Name, Val);
return true;
@@ -789,8 +789,8 @@ int FuzzerDriver(int *argc, char ***argv, UserCallback Callback) {
unsigned Seed = Flags.seed;
// Initialize Seed.
if (Seed == 0)
- Seed =
- std::chrono::system_clock::now().time_since_epoch().count() + GetPid();
+ Seed = static_cast<unsigned>(
+ std::chrono::system_clock::now().time_since_epoch().count() + GetPid());
if (Flags.verbosity)
Printf("INFO: Seed: %u\n", Seed);
diff --git a/compiler-rt/lib/fuzzer/FuzzerFork.cpp b/compiler-rt/lib/fuzzer/FuzzerFork.cpp
index 84725d22a9c7..5134a5d979e6 100644
--- a/compiler-rt/lib/fuzzer/FuzzerFork.cpp
+++ b/compiler-rt/lib/fuzzer/FuzzerFork.cpp
@@ -142,7 +142,9 @@ struct GlobalEnv {
CollectDFT(SF);
}
auto Time2 = std::chrono::system_clock::now();
- Job->DftTimeInSeconds = duration_cast<seconds>(Time2 - Time1).count();
+ auto DftTimeInSeconds = duration_cast<seconds>(Time2 - Time1).count();
+ assert(DftTimeInSeconds < std::numeric_limits<int>::max());
+ Job->DftTimeInSeconds = static_cast<int>(DftTimeInSeconds);
}
if (!Seeds.empty()) {
Job->SeedListPath =
@@ -314,8 +316,11 @@ void FuzzWithFork(Random &Rand, const FuzzingOptions &Options,
Env.Files.push_back(File.File);
} else {
auto CFPath = DirPlusFile(Env.TempDir, "merge.txt");
- CrashResistantMerge(Env.Args, {}, SeedFiles, &Env.Files, {}, &Env.Features,
- {}, &Env.Cov, CFPath, false);
+ Set<uint32_t> NewFeatures, NewCov;
+ CrashResistantMerge(Env.Args, {}, SeedFiles, &Env.Files, Env.Features,
+ &NewFeatures, Env.Cov, &NewCov, CFPath, false);
+ Env.Features.insert(NewFeatures.begin(), NewFeatures.end());
+ Env.Cov.insert(NewFeatures.begin(), NewFeatures.end());
RemoveFile(CFPath);
}
Printf("INFO: -fork=%d: %zd seed inputs, starting to fuzz in %s\n", NumJobs,
diff --git a/compiler-rt/lib/fuzzer/FuzzerIO.cpp b/compiler-rt/lib/fuzzer/FuzzerIO.cpp
index 54a7219fc0e0..7f149ac6c485 100644
--- a/compiler-rt/lib/fuzzer/FuzzerIO.cpp
+++ b/compiler-rt/lib/fuzzer/FuzzerIO.cpp
@@ -90,8 +90,9 @@ void AppendToFile(const uint8_t *Data, size_t Size, const std::string &Path) {
fclose(Out);
}
-void ReadDirToVectorOfUnits(const char *Path, Vector<Unit> *V,
- long *Epoch, size_t MaxSize, bool ExitOnError) {
+void ReadDirToVectorOfUnits(const char *Path, Vector<Unit> *V, long *Epoch,
+ size_t MaxSize, bool ExitOnError,
+ Vector<std::string> *VPaths) {
long E = Epoch ? *Epoch : 0;
Vector<std::string> Files;
ListFilesInDirRecursive(Path, Epoch, &Files, /*TopDir*/true);
@@ -103,12 +104,14 @@ void ReadDirToVectorOfUnits(const char *Path, Vector<Unit> *V,
if ((NumLoaded & (NumLoaded - 1)) == 0 && NumLoaded >= 1024)
Printf("Loaded %zd/%zd files from %s\n", NumLoaded, Files.size(), Path);
auto S = FileToVector(X, MaxSize, ExitOnError);
- if (!S.empty())
+ if (!S.empty()) {
V->push_back(S);
+ if (VPaths)
+ VPaths->push_back(X);
+ }
}
}
-
void GetSizedFilesFromDir(const std::string &Dir, Vector<SizedFile> *V) {
Vector<std::string> Files;
ListFilesInDirRecursive(Dir, 0, &Files, /*TopDir*/true);
diff --git a/compiler-rt/lib/fuzzer/FuzzerIO.h b/compiler-rt/lib/fuzzer/FuzzerIO.h
index abd25110d07d..bde18267ea36 100644
--- a/compiler-rt/lib/fuzzer/FuzzerIO.h
+++ b/compiler-rt/lib/fuzzer/FuzzerIO.h
@@ -32,8 +32,9 @@ void WriteToFile(const Unit &U, const std::string &Path);
void AppendToFile(const uint8_t *Data, size_t Size, const std::string &Path);
void AppendToFile(const std::string &Data, const std::string &Path);
-void ReadDirToVectorOfUnits(const char *Path, Vector<Unit> *V,
- long *Epoch, size_t MaxSize, bool ExitOnError);
+void ReadDirToVectorOfUnits(const char *Path, Vector<Unit> *V, long *Epoch,
+ size_t MaxSize, bool ExitOnError,
+ Vector<std::string> *VPaths = 0);
// Returns "Dir/FileName" or equivalent for the current OS.
std::string DirPlusFile(const std::string &DirPath,
diff --git a/compiler-rt/lib/fuzzer/FuzzerInterceptors.cpp b/compiler-rt/lib/fuzzer/FuzzerInterceptors.cpp
index b87798603fda..d5b0a42fd3fb 100644
--- a/compiler-rt/lib/fuzzer/FuzzerInterceptors.cpp
+++ b/compiler-rt/lib/fuzzer/FuzzerInterceptors.cpp
@@ -25,6 +25,7 @@
}
#include <cassert>
+#include <cstddef> // for size_t
#include <cstdint>
#include <dlfcn.h> // for dlsym()
diff --git a/compiler-rt/lib/fuzzer/FuzzerLoop.cpp b/compiler-rt/lib/fuzzer/FuzzerLoop.cpp
index 6e3bf44f8b45..86a78ab75174 100644
--- a/compiler-rt/lib/fuzzer/FuzzerLoop.cpp
+++ b/compiler-rt/lib/fuzzer/FuzzerLoop.cpp
@@ -414,19 +414,25 @@ void Fuzzer::RereadOutputCorpus(size_t MaxSize) {
if (Options.OutputCorpus.empty() || !Options.ReloadIntervalSec)
return;
Vector<Unit> AdditionalCorpus;
- ReadDirToVectorOfUnits(Options.OutputCorpus.c_str(), &AdditionalCorpus,
- &EpochOfLastReadOfOutputCorpus, MaxSize,
- /*ExitOnError*/ false);
+ Vector<std::string> AdditionalCorpusPaths;
+ ReadDirToVectorOfUnits(
+ Options.OutputCorpus.c_str(), &AdditionalCorpus,
+ &EpochOfLastReadOfOutputCorpus, MaxSize,
+ /*ExitOnError*/ false,
+ (Options.Verbosity >= 2 ? &AdditionalCorpusPaths : nullptr));
if (Options.Verbosity >= 2)
Printf("Reload: read %zd new units.\n", AdditionalCorpus.size());
bool Reloaded = false;
- for (auto &U : AdditionalCorpus) {
+ for (size_t i = 0; i != AdditionalCorpus.size(); ++i) {
+ auto &U = AdditionalCorpus[i];
if (U.size() > MaxSize)
U.resize(MaxSize);
if (!Corpus.HasUnit(U)) {
if (RunOne(U.data(), U.size())) {
CheckExitOnSrcPosOrItem();
Reloaded = true;
+ if (Options.Verbosity >= 2)
+ Printf("Reloaded %s\n", AdditionalCorpusPaths[i].c_str());
}
}
}
@@ -440,8 +446,9 @@ void Fuzzer::PrintPulseAndReportSlowInput(const uint8_t *Data, size_t Size) {
if (!(TotalNumberOfRuns & (TotalNumberOfRuns - 1)) &&
secondsSinceProcessStartUp() >= 2)
PrintStats("pulse ");
- if (TimeOfUnit > TimeOfLongestUnitInSeconds * 1.1 &&
- TimeOfUnit >= Options.ReportSlowUnits) {
+ auto Threshhold =
+ static_cast<long>(static_cast<double>(TimeOfLongestUnitInSeconds) * 1.1);
+ if (TimeOfUnit > Threshhold && TimeOfUnit >= Options.ReportSlowUnits) {
TimeOfLongestUnitInSeconds = TimeOfUnit;
Printf("Slowest unit: %zd s:\n", TimeOfLongestUnitInSeconds);
WriteUnitToFileWithPrefix({Data, Data + Size}, "slow-unit-");
@@ -501,6 +508,8 @@ bool Fuzzer::RunOne(const uint8_t *Data, size_t Size, bool MayDeleteFile,
bool *FoundUniqFeatures) {
if (!Size)
return false;
+ // Largest input length should be INT_MAX.
+ assert(Size < std::numeric_limits<uint32_t>::max());
ExecuteCallback(Data, Size);
auto TimeOfUnit = duration_cast<microseconds>(UnitStopTime - UnitStartTime);
@@ -508,8 +517,8 @@ bool Fuzzer::RunOne(const uint8_t *Data, size_t Size, bool MayDeleteFile,
UniqFeatureSetTmp.clear();
size_t FoundUniqFeaturesOfII = 0;
size_t NumUpdatesBefore = Corpus.NumFeatureUpdates();
- TPC.CollectFeatures([&](size_t Feature) {
- if (Corpus.AddFeature(Feature, Size, Options.Shrink))
+ TPC.CollectFeatures([&](uint32_t Feature) {
+ if (Corpus.AddFeature(Feature, static_cast<uint32_t>(Size), Options.Shrink))
UniqFeatureSetTmp.push_back(Feature);
if (Options.Entropic)
Corpus.UpdateFeatureFrequency(II, Feature);
@@ -575,7 +584,10 @@ static bool LooseMemeq(const uint8_t *A, const uint8_t *B, size_t Size) {
!memcmp(A + Size - Limit / 2, B + Size - Limit / 2, Limit / 2);
}
-void Fuzzer::ExecuteCallback(const uint8_t *Data, size_t Size) {
+// This method is not inlined because it would cause a test to fail where it
+// is part of the stack unwinding. See D97975 for details.
+ATTRIBUTE_NOINLINE void Fuzzer::ExecuteCallback(const uint8_t *Data,
+ size_t Size) {
TPC.RecordInitialStack();
TotalNumberOfRuns++;
assert(InFuzzingThread());
diff --git a/compiler-rt/lib/fuzzer/FuzzerMerge.cpp b/compiler-rt/lib/fuzzer/FuzzerMerge.cpp
index e3ad8b3851e7..162453ceae2c 100644
--- a/compiler-rt/lib/fuzzer/FuzzerMerge.cpp
+++ b/compiler-rt/lib/fuzzer/FuzzerMerge.cpp
@@ -82,9 +82,9 @@ bool Merger::Parse(std::istream &IS, bool ParseCoverage) {
while (std::getline(IS, Line, '\n')) {
std::istringstream ISS1(Line);
std::string Marker;
- size_t N;
- ISS1 >> Marker;
- ISS1 >> N;
+ uint32_t N;
+ if (!(ISS1 >> Marker) || !(ISS1 >> N))
+ return false;
if (Marker == "STARTED") {
// STARTED FILE_ID FILE_SIZE
if (ExpectedStartMarker != N)
@@ -137,6 +137,8 @@ size_t Merger::Merge(const Set<uint32_t> &InitialFeatures,
const Set<uint32_t> &InitialCov, Set<uint32_t> *NewCov,
Vector<std::string> *NewFiles) {
NewFiles->clear();
+ NewFeatures->clear();
+ NewCov->clear();
assert(NumFilesInFirstCorpus <= Files.size());
Set<uint32_t> AllFeatures = InitialFeatures;
diff --git a/compiler-rt/lib/fuzzer/FuzzerMutate.cpp b/compiler-rt/lib/fuzzer/FuzzerMutate.cpp
index cf34a9fe8e2e..4650f1beceac 100644
--- a/compiler-rt/lib/fuzzer/FuzzerMutate.cpp
+++ b/compiler-rt/lib/fuzzer/FuzzerMutate.cpp
@@ -61,14 +61,20 @@ MutationDispatcher::MutationDispatcher(Random &Rand,
}
static char RandCh(Random &Rand) {
- if (Rand.RandBool()) return Rand(256);
+ if (Rand.RandBool())
+ return static_cast<char>(Rand(256));
const char Special[] = "!*'();:@&=+$,/?%#[]012Az-`~.\xff\x00";
return Special[Rand(sizeof(Special) - 1)];
}
size_t MutationDispatcher::Mutate_Custom(uint8_t *Data, size_t Size,
size_t MaxSize) {
- return EF->LLVMFuzzerCustomMutator(Data, Size, MaxSize, Rand.Rand());
+ if (EF->__msan_unpoison)
+ EF->__msan_unpoison(Data, Size);
+ if (EF->__msan_unpoison_param)
+ EF->__msan_unpoison_param(4);
+ return EF->LLVMFuzzerCustomMutator(Data, Size, MaxSize,
+ Rand.Rand<unsigned int>());
}
size_t MutationDispatcher::Mutate_CustomCrossOver(uint8_t *Data, size_t Size,
@@ -81,8 +87,18 @@ size_t MutationDispatcher::Mutate_CustomCrossOver(uint8_t *Data, size_t Size,
return 0;
CustomCrossOverInPlaceHere.resize(MaxSize);
auto &U = CustomCrossOverInPlaceHere;
+
+ if (EF->__msan_unpoison) {
+ EF->__msan_unpoison(Data, Size);
+ EF->__msan_unpoison(Other.data(), Other.size());
+ EF->__msan_unpoison(U.data(), U.size());
+ }
+ if (EF->__msan_unpoison_param)
+ EF->__msan_unpoison_param(7);
size_t NewSize = EF->LLVMFuzzerCustomCrossOver(
- Data, Size, Other.data(), Other.size(), U.data(), U.size(), Rand.Rand());
+ Data, Size, Other.data(), Other.size(), U.data(), U.size(),
+ Rand.Rand<unsigned int>());
+
if (!NewSize)
return 0;
assert(NewSize <= MaxSize && "CustomCrossOver returned overisized unit");
@@ -135,7 +151,8 @@ size_t MutationDispatcher::Mutate_InsertRepeatedBytes(uint8_t *Data,
// Insert new values at Data[Idx].
memmove(Data + Idx + N, Data + Idx, Size - Idx);
// Give preference to 0x00 and 0xff.
- uint8_t Byte = Rand.RandBool() ? Rand(256) : (Rand.RandBool() ? 0 : 255);
+ uint8_t Byte = static_cast<uint8_t>(
+ Rand.RandBool() ? Rand(256) : (Rand.RandBool() ? 0 : 255));
for (size_t i = 0; i < N; i++)
Data[Idx + i] = Byte;
return Size + N;
@@ -178,7 +195,8 @@ size_t MutationDispatcher::ApplyDictionaryEntry(uint8_t *Data, size_t Size,
Size += W.size();
} else { // Overwrite some bytes with W.
if (W.size() > Size) return 0;
- size_t Idx = UsePositionHint ? DE.GetPositionHint() : Rand(Size - W.size());
+ size_t Idx =
+ UsePositionHint ? DE.GetPositionHint() : Rand(Size + 1 - W.size());
memcpy(Data + Idx, W.data(), W.size());
}
return Size;
@@ -227,8 +245,8 @@ DictionaryEntry MutationDispatcher::MakeDictionaryEntryFromCMP(
T Arg1, T Arg2, const uint8_t *Data, size_t Size) {
if (Rand.RandBool()) Arg1 = Bswap(Arg1);
if (Rand.RandBool()) Arg2 = Bswap(Arg2);
- T Arg1Mutation = Arg1 + Rand(-1, 1);
- T Arg2Mutation = Arg2 + Rand(-1, 1);
+ T Arg1Mutation = static_cast<T>(Arg1 + Rand(-1, 1));
+ T Arg2Mutation = static_cast<T>(Arg2 + Rand(-1, 1));
return MakeDictionaryEntryFromCMP(&Arg1, &Arg2, &Arg1Mutation, &Arg2Mutation,
sizeof(Arg1), Data, Size);
}
@@ -245,23 +263,23 @@ size_t MutationDispatcher::Mutate_AddWordFromTORC(
DictionaryEntry DE;
switch (Rand(4)) {
case 0: {
- auto X = TPC.TORC8.Get(Rand.Rand());
+ auto X = TPC.TORC8.Get(Rand.Rand<size_t>());
DE = MakeDictionaryEntryFromCMP(X.A, X.B, Data, Size);
} break;
case 1: {
- auto X = TPC.TORC4.Get(Rand.Rand());
+ auto X = TPC.TORC4.Get(Rand.Rand<size_t>());
if ((X.A >> 16) == 0 && (X.B >> 16) == 0 && Rand.RandBool())
DE = MakeDictionaryEntryFromCMP((uint16_t)X.A, (uint16_t)X.B, Data, Size);
else
DE = MakeDictionaryEntryFromCMP(X.A, X.B, Data, Size);
} break;
case 2: {
- auto X = TPC.TORCW.Get(Rand.Rand());
+ auto X = TPC.TORCW.Get(Rand.Rand<size_t>());
DE = MakeDictionaryEntryFromCMP(X.A, X.B, Data, Size);
} break;
case 3: if (Options.UseMemmem) {
- auto X = TPC.MMT.Get(Rand.Rand());
- DE = DictionaryEntry(X);
+ auto X = TPC.MMT.Get(Rand.Rand<size_t>());
+ DE = DictionaryEntry(X);
} break;
default:
assert(0);
@@ -387,12 +405,12 @@ size_t ChangeBinaryInteger(uint8_t *Data, size_t Size, Random &Rand) {
assert(Off + sizeof(T) <= Size);
T Val;
if (Off < 64 && !Rand(4)) {
- Val = Size;
+ Val = static_cast<T>(Size);
if (Rand.RandBool())
Val = Bswap(Val);
} else {
memcpy(&Val, Data + Off, sizeof(Val));
- T Add = Rand(21);
+ T Add = static_cast<T>(Rand(21));
Add -= 10;
if (Rand.RandBool())
Val = Bswap(T(Bswap(Val) + Add)); // Add assuming different endiannes.
@@ -462,7 +480,7 @@ void MutationDispatcher::RecordSuccessfulMutationSequence() {
assert(DE->GetW().size());
// Linear search is fine here as this happens seldom.
if (!PersistentAutoDictionary.ContainsWord(DE->GetW()))
- PersistentAutoDictionary.push_back({DE->GetW(), 1});
+ PersistentAutoDictionary.push_back(*DE);
}
}
diff --git a/compiler-rt/lib/fuzzer/FuzzerRandom.h b/compiler-rt/lib/fuzzer/FuzzerRandom.h
index 659283eee207..ad6c07eb5ef5 100644
--- a/compiler-rt/lib/fuzzer/FuzzerRandom.h
+++ b/compiler-rt/lib/fuzzer/FuzzerRandom.h
@@ -18,18 +18,27 @@ class Random : public std::minstd_rand {
public:
Random(unsigned int seed) : std::minstd_rand(seed) {}
result_type operator()() { return this->std::minstd_rand::operator()(); }
- size_t Rand() { return this->operator()(); }
- size_t RandBool() { return Rand() % 2; }
+ template <typename T>
+ typename std::enable_if<std::is_integral<T>::value, T>::type Rand() {
+ return static_cast<T>(this->operator()());
+ }
+ size_t RandBool() { return this->operator()() % 2; }
size_t SkewTowardsLast(size_t n) {
size_t T = this->operator()(n * n);
- size_t Res = sqrt(T);
+ size_t Res = static_cast<size_t>(sqrt(T));
return Res;
}
- size_t operator()(size_t n) { return n ? Rand() % n : 0; }
- intptr_t operator()(intptr_t From, intptr_t To) {
+ template <typename T>
+ typename std::enable_if<std::is_integral<T>::value, T>::type operator()(T n) {
+ return n ? Rand<T>() % n : 0;
+ }
+ template <typename T>
+ typename std::enable_if<std::is_integral<T>::value, T>::type
+ operator()(T From, T To) {
assert(From < To);
- intptr_t RangeSize = To - From + 1;
- return operator()(RangeSize) + From;
+ auto RangeSize = static_cast<unsigned long long>(To) -
+ static_cast<unsigned long long>(From) + 1;
+ return static_cast<T>(this->operator()(RangeSize) + From);
}
};
diff --git a/compiler-rt/lib/fuzzer/FuzzerSHA1.cpp b/compiler-rt/lib/fuzzer/FuzzerSHA1.cpp
index 2005dc700305..b05655c8ef4d 100644
--- a/compiler-rt/lib/fuzzer/FuzzerSHA1.cpp
+++ b/compiler-rt/lib/fuzzer/FuzzerSHA1.cpp
@@ -134,12 +134,13 @@ void sha1_hashBlock(sha1nfo *s) {
s->state[4] += e;
}
-void sha1_addUncounted(sha1nfo *s, uint8_t data) {
- uint8_t * const b = (uint8_t*) s->buffer;
+// Adds the least significant byte of |data|.
+void sha1_addUncounted(sha1nfo *s, uint32_t data) {
+ uint8_t *const b = (uint8_t *)s->buffer;
#ifdef SHA_BIG_ENDIAN
- b[s->bufferOffset] = data;
+ b[s->bufferOffset] = static_cast<uint8_t>(data);
#else
- b[s->bufferOffset ^ 3] = data;
+ b[s->bufferOffset ^ 3] = static_cast<uint8_t>(data);
#endif
s->bufferOffset++;
if (s->bufferOffset == BLOCK_LENGTH) {
diff --git a/compiler-rt/lib/fuzzer/FuzzerTracePC.cpp b/compiler-rt/lib/fuzzer/FuzzerTracePC.cpp
index 91e94d824002..d808b9b00fa3 100644
--- a/compiler-rt/lib/fuzzer/FuzzerTracePC.cpp
+++ b/compiler-rt/lib/fuzzer/FuzzerTracePC.cpp
@@ -106,6 +106,15 @@ void TracePC::PrintModuleInfo() {
}
if (size_t NumExtraCounters = ExtraCountersEnd() - ExtraCountersBegin())
Printf("INFO: %zd Extra Counters\n", NumExtraCounters);
+
+ size_t MaxFeatures = CollectFeatures([](uint32_t) {});
+ if (MaxFeatures > std::numeric_limits<uint32_t>::max())
+ Printf("WARNING: The coverage PC tables may produce up to %zu features.\n"
+ "This exceeds the maximum 32-bit value. Some features may be\n"
+ "ignored, and fuzzing may become less precise. If possible,\n"
+ "consider refactoring the fuzzer into several smaller fuzzers\n"
+ "linked against only a portion of the current target.\n",
+ MaxFeatures);
}
ATTRIBUTE_NO_SANITIZE_ALL
@@ -356,7 +365,7 @@ void TracePC::AddValueForMemcmp(void *caller_pc, const void *s1, const void *s2,
uint8_t HammingDistance = 0;
for (; I < Len; I++) {
if (B1[I] != B2[I] || (StopAtZero && B1[I] == 0)) {
- HammingDistance = Popcountll(B1[I] ^ B2[I]);
+ HammingDistance = static_cast<uint8_t>(Popcountll(B1[I] ^ B2[I]));
break;
}
}
diff --git a/compiler-rt/lib/fuzzer/FuzzerTracePC.h b/compiler-rt/lib/fuzzer/FuzzerTracePC.h
index 00909230731d..a93732972f7d 100644
--- a/compiler-rt/lib/fuzzer/FuzzerTracePC.h
+++ b/compiler-rt/lib/fuzzer/FuzzerTracePC.h
@@ -54,7 +54,7 @@ struct MemMemTable {
void Add(const uint8_t *Data, size_t Size) {
if (Size <= 2) return;
Size = std::min(Size, Word::GetMaxSize());
- size_t Idx = SimpleFastHash(Data, Size) % kSize;
+ auto Idx = SimpleFastHash(Data, Size) % kSize;
MemMemWords[Idx].Set(Data, Size);
}
const Word &Get(size_t Idx) {
@@ -79,7 +79,7 @@ class TracePC {
void SetPrintNewPCs(bool P) { DoPrintNewPCs = P; }
void SetPrintNewFuncs(size_t P) { NumPrintNewFuncs = P; }
void UpdateObservedPCs();
- template <class Callback> void CollectFeatures(Callback CB) const;
+ template <class Callback> size_t CollectFeatures(Callback CB) const;
void ResetMaps() {
ValueProfileMap.Reset();
@@ -193,7 +193,7 @@ size_t ForEachNonZeroByte(const uint8_t *Begin, const uint8_t *End,
Handle8bitCounter(FirstFeature, P - Begin, V);
// Iterate by Step bytes at a time.
- for (; P < End; P += Step)
+ for (; P + Step <= End; P += Step)
if (LargeType Bundle = *reinterpret_cast<const LargeType *>(P)) {
Bundle = HostToLE(Bundle);
for (size_t I = 0; I < Step; I++, Bundle >>= 8)
@@ -234,16 +234,16 @@ unsigned CounterToFeature(T Counter) {
return Bit;
}
-template <class Callback> // void Callback(size_t Feature)
-ATTRIBUTE_NO_SANITIZE_ADDRESS
-ATTRIBUTE_NOINLINE
-void TracePC::CollectFeatures(Callback HandleFeature) const {
+template <class Callback> // void Callback(uint32_t Feature)
+ATTRIBUTE_NO_SANITIZE_ADDRESS ATTRIBUTE_NOINLINE size_t
+TracePC::CollectFeatures(Callback HandleFeature) const {
auto Handle8bitCounter = [&](size_t FirstFeature,
size_t Idx, uint8_t Counter) {
if (UseCounters)
- HandleFeature(FirstFeature + Idx * 8 + CounterToFeature(Counter));
+ HandleFeature(static_cast<uint32_t>(FirstFeature + Idx * 8 +
+ CounterToFeature(Counter)));
else
- HandleFeature(FirstFeature + Idx);
+ HandleFeature(static_cast<uint32_t>(FirstFeature + Idx));
};
size_t FirstFeature = 0;
@@ -263,16 +263,18 @@ void TracePC::CollectFeatures(Callback HandleFeature) const {
if (UseValueProfileMask) {
ValueProfileMap.ForEach([&](size_t Idx) {
- HandleFeature(FirstFeature + Idx);
+ HandleFeature(static_cast<uint32_t>(FirstFeature + Idx));
});
FirstFeature += ValueProfileMap.SizeInBits();
}
// Step function, grows similar to 8 * Log_2(A).
- auto StackDepthStepFunction = [](uint32_t A) -> uint32_t {
- if (!A) return A;
- uint32_t Log2 = Log(A);
- if (Log2 < 3) return A;
+ auto StackDepthStepFunction = [](size_t A) -> size_t {
+ if (!A)
+ return A;
+ auto Log2 = Log(A);
+ if (Log2 < 3)
+ return A;
Log2 -= 3;
return (Log2 + 1) * 8 + ((A >> Log2) & 7);
};
@@ -280,8 +282,13 @@ void TracePC::CollectFeatures(Callback HandleFeature) const {
assert(StackDepthStepFunction(1024 * 4) == 80);
assert(StackDepthStepFunction(1024 * 1024) == 144);
- if (auto MaxStackOffset = GetMaxStackOffset())
- HandleFeature(FirstFeature + StackDepthStepFunction(MaxStackOffset / 8));
+ if (auto MaxStackOffset = GetMaxStackOffset()) {
+ HandleFeature(static_cast<uint32_t>(
+ FirstFeature + StackDepthStepFunction(MaxStackOffset / 8)));
+ FirstFeature += StackDepthStepFunction(std::numeric_limits<size_t>::max());
+ }
+
+ return FirstFeature;
}
extern TracePC TPC;
diff --git a/compiler-rt/lib/fuzzer/FuzzerUtil.cpp b/compiler-rt/lib/fuzzer/FuzzerUtil.cpp
index 7eecb68d0729..05185499bdd1 100644
--- a/compiler-rt/lib/fuzzer/FuzzerUtil.cpp
+++ b/compiler-rt/lib/fuzzer/FuzzerUtil.cpp
@@ -111,7 +111,7 @@ bool ParseOneDictionaryEntry(const std::string &Str, Unit *U) {
char Hex[] = "0xAA";
Hex[2] = Str[Pos + 2];
Hex[3] = Str[Pos + 3];
- U->push_back(strtol(Hex, nullptr, 16));
+ U->push_back(static_cast<uint8_t>(strtol(Hex, nullptr, 16)));
Pos += 3;
continue;
}
@@ -226,10 +226,11 @@ unsigned NumberOfCpuCores() {
return N;
}
-size_t SimpleFastHash(const uint8_t *Data, size_t Size) {
- size_t Res = 0;
+uint64_t SimpleFastHash(const void *Data, size_t Size, uint64_t Initial) {
+ uint64_t Res = Initial;
+ const uint8_t *Bytes = static_cast<const uint8_t *>(Data);
for (size_t i = 0; i < Size; i++)
- Res = Res * 11 + Data[i];
+ Res = Res * 11 + Bytes[i];
return Res;
}
diff --git a/compiler-rt/lib/fuzzer/FuzzerUtil.h b/compiler-rt/lib/fuzzer/FuzzerUtil.h
index e90be085008e..a188a7be32a5 100644
--- a/compiler-rt/lib/fuzzer/FuzzerUtil.h
+++ b/compiler-rt/lib/fuzzer/FuzzerUtil.h
@@ -88,9 +88,11 @@ std::string DisassembleCmd(const std::string &FileName);
std::string SearchRegexCmd(const std::string &Regex);
-size_t SimpleFastHash(const uint8_t *Data, size_t Size);
+uint64_t SimpleFastHash(const void *Data, size_t Size, uint64_t Initial = 0);
-inline uint32_t Log(uint32_t X) { return 32 - Clz(X) - 1; }
+inline size_t Log(size_t X) {
+ return static_cast<size_t>((sizeof(unsigned long long) * 8) - Clzll(X) - 1);
+}
inline size_t PageSize() { return 4096; }
inline uint8_t *RoundUpByPage(uint8_t *P) {
diff --git a/compiler-rt/lib/fuzzer/FuzzerUtilFuchsia.cpp b/compiler-rt/lib/fuzzer/FuzzerUtilFuchsia.cpp
index af4394616776..5034b4a28d3f 100644
--- a/compiler-rt/lib/fuzzer/FuzzerUtilFuchsia.cpp
+++ b/compiler-rt/lib/fuzzer/FuzzerUtilFuchsia.cpp
@@ -515,7 +515,7 @@ int ExecuteCommand(const Command &Cmd) {
return rc;
}
- return Info.return_code;
+ return static_cast<int>(Info.return_code);
}
bool ExecuteCommand(const Command &BaseCmd, std::string *CmdOutput) {
diff --git a/compiler-rt/lib/fuzzer/FuzzerUtilPosix.cpp b/compiler-rt/lib/fuzzer/FuzzerUtilPosix.cpp
index afb733409ab5..0446d732a9ec 100644
--- a/compiler-rt/lib/fuzzer/FuzzerUtilPosix.cpp
+++ b/compiler-rt/lib/fuzzer/FuzzerUtilPosix.cpp
@@ -77,10 +77,13 @@ static void SetSigaction(int signum,
return;
}
- sigact = {};
- sigact.sa_flags = SA_SIGINFO;
- sigact.sa_sigaction = callback;
- if (sigaction(signum, &sigact, 0)) {
+ struct sigaction new_sigact = {};
+ // Address sanitizer needs SA_ONSTACK (causing the signal handler to run on a
+ // dedicated stack) in order to be able to detect stack overflows; keep the
+ // flag if it's set.
+ new_sigact.sa_flags = SA_SIGINFO | (sigact.sa_flags & SA_ONSTACK);
+ new_sigact.sa_sigaction = callback;
+ if (sigaction(signum, &new_sigact, nullptr)) {
Printf("libFuzzer: sigaction failed with %d\n", errno);
exit(1);
}
diff --git a/compiler-rt/lib/gwp_asan/common.cpp b/compiler-rt/lib/gwp_asan/common.cpp
index 483694d57b7e..b0f6c58bf496 100644
--- a/compiler-rt/lib/gwp_asan/common.cpp
+++ b/compiler-rt/lib/gwp_asan/common.cpp
@@ -40,7 +40,7 @@ constexpr size_t AllocationMetadata::kMaxTraceLengthToCollect;
void AllocationMetadata::RecordAllocation(uintptr_t AllocAddr,
size_t AllocSize) {
Addr = AllocAddr;
- Size = AllocSize;
+ RequestedSize = AllocSize;
IsDeallocated = false;
AllocationTrace.ThreadID = getThreadID();
diff --git a/compiler-rt/lib/gwp_asan/common.h b/compiler-rt/lib/gwp_asan/common.h
index d197711c77fc..7ce367e3ffe9 100644
--- a/compiler-rt/lib/gwp_asan/common.h
+++ b/compiler-rt/lib/gwp_asan/common.h
@@ -49,7 +49,7 @@ struct AllocationMetadata {
static constexpr size_t kMaxTraceLengthToCollect = 128;
// Records the given allocation metadata into this struct.
- void RecordAllocation(uintptr_t Addr, size_t Size);
+ void RecordAllocation(uintptr_t Addr, size_t RequestedSize);
// Record that this allocation is now deallocated.
void RecordDeallocation();
@@ -70,7 +70,7 @@ struct AllocationMetadata {
// valid, as the allocation has never occurred.
uintptr_t Addr = 0;
// Represents the actual size of the allocation.
- size_t Size = 0;
+ size_t RequestedSize = 0;
CallSiteInfo AllocationTrace;
CallSiteInfo DeallocationTrace;
@@ -83,6 +83,8 @@ struct AllocationMetadata {
// crash handler. This, in conjunction with the Metadata array, forms the entire
// set of information required for understanding a GWP-ASan crash.
struct AllocatorState {
+ constexpr AllocatorState() {}
+
// Returns whether the provided pointer is a current sampled allocation that
// is owned by this pool.
GWP_ASAN_ALWAYS_INLINE bool pointerIsMine(const void *Ptr) const {
diff --git a/compiler-rt/lib/gwp_asan/crash_handler.cpp b/compiler-rt/lib/gwp_asan/crash_handler.cpp
index bd7ca5abbb6b..6b4c39edb294 100644
--- a/compiler-rt/lib/gwp_asan/crash_handler.cpp
+++ b/compiler-rt/lib/gwp_asan/crash_handler.cpp
@@ -103,7 +103,7 @@ uintptr_t __gwp_asan_get_allocation_address(
size_t __gwp_asan_get_allocation_size(
const gwp_asan::AllocationMetadata *AllocationMeta) {
- return AllocationMeta->Size;
+ return AllocationMeta->RequestedSize;
}
uint64_t __gwp_asan_get_allocation_thread_id(
diff --git a/compiler-rt/lib/gwp_asan/guarded_pool_allocator.cpp b/compiler-rt/lib/gwp_asan/guarded_pool_allocator.cpp
index a1dbbe4f25e9..8ce5fc9c4dfc 100644
--- a/compiler-rt/lib/gwp_asan/guarded_pool_allocator.cpp
+++ b/compiler-rt/lib/gwp_asan/guarded_pool_allocator.cpp
@@ -12,6 +12,7 @@
#include "gwp_asan/utilities.h"
#include <assert.h>
+#include <stddef.h>
using AllocationMetadata = gwp_asan::AllocationMetadata;
using Error = gwp_asan::Error;
@@ -32,6 +33,8 @@ size_t roundUpTo(size_t Size, size_t Boundary) {
uintptr_t getPageAddr(uintptr_t Ptr, uintptr_t PageSize) {
return Ptr & ~(PageSize - 1);
}
+
+bool isPowerOfTwo(uintptr_t X) { return (X & (X - 1)) == 0; }
} // anonymous namespace
// Gets the singleton implementation of this class. Thread-compatible until
@@ -63,8 +66,6 @@ void GuardedPoolAllocator::init(const options::Options &Opts) {
assert((PageSize & (PageSize - 1)) == 0);
State.PageSize = PageSize;
- PerfectlyRightAlign = Opts.PerfectlyRightAlign;
-
size_t PoolBytesRequired =
PageSize * (1 + State.MaxSimultaneousAllocations) +
State.MaxSimultaneousAllocations * State.maximumAllocationSize();
@@ -102,9 +103,15 @@ void GuardedPoolAllocator::init(const options::Options &Opts) {
installAtFork();
}
-void GuardedPoolAllocator::disable() { PoolMutex.lock(); }
+void GuardedPoolAllocator::disable() {
+ PoolMutex.lock();
+ BacktraceMutex.lock();
+}
-void GuardedPoolAllocator::enable() { PoolMutex.unlock(); }
+void GuardedPoolAllocator::enable() {
+ PoolMutex.unlock();
+ BacktraceMutex.unlock();
+}
void GuardedPoolAllocator::iterate(void *Base, size_t Size, iterate_callback Cb,
void *Arg) {
@@ -113,7 +120,7 @@ void GuardedPoolAllocator::iterate(void *Base, size_t Size, iterate_callback Cb,
const AllocationMetadata &Meta = Metadata[i];
if (Meta.Addr && !Meta.IsDeallocated && Meta.Addr >= Start &&
Meta.Addr < Start + Size)
- Cb(Meta.Addr, Meta.Size, Arg);
+ Cb(Meta.Addr, Meta.RequestedSize, Arg);
}
}
@@ -138,7 +145,45 @@ void GuardedPoolAllocator::uninitTestOnly() {
*getThreadLocals() = ThreadLocalPackedVariables();
}
-void *GuardedPoolAllocator::allocate(size_t Size) {
+// Note, minimum backing allocation size in GWP-ASan is always one page, and
+// each slot could potentially be multiple pages (but always in
+// page-increments). Thus, for anything that requires less than page size
+// alignment, we don't need to allocate extra padding to ensure the alignment
+// can be met.
+size_t GuardedPoolAllocator::getRequiredBackingSize(size_t Size,
+ size_t Alignment,
+ size_t PageSize) {
+ assert(isPowerOfTwo(Alignment) && "Alignment must be a power of two!");
+ assert(Alignment != 0 && "Alignment should be non-zero");
+ assert(Size != 0 && "Size should be non-zero");
+
+ if (Alignment <= PageSize)
+ return Size;
+
+ return Size + Alignment - PageSize;
+}
+
+uintptr_t GuardedPoolAllocator::alignUp(uintptr_t Ptr, size_t Alignment) {
+ assert(isPowerOfTwo(Alignment) && "Alignment must be a power of two!");
+ assert(Alignment != 0 && "Alignment should be non-zero");
+ if ((Ptr & (Alignment - 1)) == 0)
+ return Ptr;
+
+ Ptr += Alignment - (Ptr & (Alignment - 1));
+ return Ptr;
+}
+
+uintptr_t GuardedPoolAllocator::alignDown(uintptr_t Ptr, size_t Alignment) {
+ assert(isPowerOfTwo(Alignment) && "Alignment must be a power of two!");
+ assert(Alignment != 0 && "Alignment should be non-zero");
+ if ((Ptr & (Alignment - 1)) == 0)
+ return Ptr;
+
+ Ptr -= Ptr & (Alignment - 1);
+ return Ptr;
+}
+
+void *GuardedPoolAllocator::allocate(size_t Size, size_t Alignment) {
// GuardedPagePoolEnd == 0 when GWP-ASan is disabled. If we are disabled, fall
// back to the supporting allocator.
if (State.GuardedPagePoolEnd == 0) {
@@ -148,14 +193,24 @@ void *GuardedPoolAllocator::allocate(size_t Size) {
return nullptr;
}
+ if (Size == 0)
+ Size = 1;
+ if (Alignment == 0)
+ Alignment = alignof(max_align_t);
+
+ if (!isPowerOfTwo(Alignment) || Alignment > State.maximumAllocationSize() ||
+ Size > State.maximumAllocationSize())
+ return nullptr;
+
+ size_t BackingSize = getRequiredBackingSize(Size, Alignment, State.PageSize);
+ if (BackingSize > State.maximumAllocationSize())
+ return nullptr;
+
// Protect against recursivity.
if (getThreadLocals()->RecursiveGuard)
return nullptr;
ScopedRecursiveGuard SRG;
- if (Size == 0 || Size > State.maximumAllocationSize())
- return nullptr;
-
size_t Index;
{
ScopedLock L(PoolMutex);
@@ -165,28 +220,35 @@ void *GuardedPoolAllocator::allocate(size_t Size) {
if (Index == kInvalidSlotID)
return nullptr;
- uintptr_t Ptr = State.slotToAddr(Index);
- // Should we right-align this allocation?
- if (getRandomUnsigned32() % 2 == 0) {
- AlignmentStrategy Align = AlignmentStrategy::DEFAULT;
- if (PerfectlyRightAlign)
- Align = AlignmentStrategy::PERFECT;
- Ptr +=
- State.maximumAllocationSize() - rightAlignedAllocationSize(Size, Align);
- }
- AllocationMetadata *Meta = addrToMetadata(Ptr);
+ uintptr_t SlotStart = State.slotToAddr(Index);
+ AllocationMetadata *Meta = addrToMetadata(SlotStart);
+ uintptr_t SlotEnd = State.slotToAddr(Index) + State.maximumAllocationSize();
+ uintptr_t UserPtr;
+ // Randomly choose whether to left-align or right-align the allocation, and
+ // then apply the necessary adjustments to get an aligned pointer.
+ if (getRandomUnsigned32() % 2 == 0)
+ UserPtr = alignUp(SlotStart, Alignment);
+ else
+ UserPtr = alignDown(SlotEnd - Size, Alignment);
+
+ assert(UserPtr >= SlotStart);
+ assert(UserPtr + Size <= SlotEnd);
// If a slot is multiple pages in size, and the allocation takes up a single
// page, we can improve overflow detection by leaving the unused pages as
// unmapped.
const size_t PageSize = State.PageSize;
- allocateInGuardedPool(reinterpret_cast<void *>(getPageAddr(Ptr, PageSize)),
- roundUpTo(Size, PageSize));
+ allocateInGuardedPool(
+ reinterpret_cast<void *>(getPageAddr(UserPtr, PageSize)),
+ roundUpTo(Size, PageSize));
- Meta->RecordAllocation(Ptr, Size);
- Meta->AllocationTrace.RecordBacktrace(Backtrace);
+ Meta->RecordAllocation(UserPtr, Size);
+ {
+ ScopedLock UL(BacktraceMutex);
+ Meta->AllocationTrace.RecordBacktrace(Backtrace);
+ }
- return reinterpret_cast<void *>(Ptr);
+ return reinterpret_cast<void *>(UserPtr);
}
void GuardedPoolAllocator::trapOnAddress(uintptr_t Address, Error E) {
@@ -196,7 +258,10 @@ void GuardedPoolAllocator::trapOnAddress(uintptr_t Address, Error E) {
// Raise a SEGV by touching first guard page.
volatile char *p = reinterpret_cast<char *>(State.GuardedPagePool);
*p = 0;
- __builtin_unreachable();
+ // Normally, would be __builtin_unreachable(), but because of
+ // https://bugs.llvm.org/show_bug.cgi?id=47480, unreachable will DCE the
+ // volatile store above, even though it has side effects.
+ __builtin_trap();
}
void GuardedPoolAllocator::stop() {
@@ -233,6 +298,7 @@ void GuardedPoolAllocator::deallocate(void *Ptr) {
// otherwise non-reentrant unwinders may deadlock.
if (!getThreadLocals()->RecursiveGuard) {
ScopedRecursiveGuard SRG;
+ ScopedLock UL(BacktraceMutex);
Meta->DeallocationTrace.RecordBacktrace(Backtrace);
}
}
@@ -250,7 +316,7 @@ size_t GuardedPoolAllocator::getSize(const void *Ptr) {
ScopedLock L(PoolMutex);
AllocationMetadata *Meta = addrToMetadata(reinterpret_cast<uintptr_t>(Ptr));
assert(Meta->Addr == reinterpret_cast<uintptr_t>(Ptr));
- return Meta->Size;
+ return Meta->RequestedSize;
}
AllocationMetadata *GuardedPoolAllocator::addrToMetadata(uintptr_t Ptr) const {
diff --git a/compiler-rt/lib/gwp_asan/guarded_pool_allocator.h b/compiler-rt/lib/gwp_asan/guarded_pool_allocator.h
index b9972ffd98f7..6d2ce2576c13 100644
--- a/compiler-rt/lib/gwp_asan/guarded_pool_allocator.h
+++ b/compiler-rt/lib/gwp_asan/guarded_pool_allocator.h
@@ -19,6 +19,7 @@
#include <stddef.h>
#include <stdint.h>
+// IWYU pragma: no_include <__stddef_max_align_t.h>
namespace gwp_asan {
// This class is the primary implementation of the allocator portion of GWP-
@@ -93,10 +94,13 @@ public:
return State.pointerIsMine(Ptr);
}
- // Allocate memory in a guarded slot, and return a pointer to the new
- // allocation. Returns nullptr if the pool is empty, the requested size is too
- // large for this pool to handle, or the requested size is zero.
- void *allocate(size_t Size);
+ // Allocate memory in a guarded slot, with the specified `Alignment`. Returns
+ // nullptr if the pool is empty, if the alignnment is not a power of two, or
+ // if the size/alignment makes the allocation too large for this pool to
+ // handle. By default, uses strong alignment (i.e. `max_align_t`), see
+ // http://www.open-std.org/jtc1/sc22/wg14/www/docs/n2293.htm for discussion of
+ // alignment issues in the standard.
+ void *allocate(size_t Size, size_t Alignment = alignof(max_align_t));
// Deallocate memory in a guarded slot. The provided pointer must have been
// allocated using this pool. This will set the guarded slot as inaccessible.
@@ -111,6 +115,18 @@ public:
// Returns a pointer to the AllocatorState region.
const AllocatorState *getAllocatorState() const { return &State; }
+ // Exposed as protected for testing.
+protected:
+ // Returns the actual allocation size required to service an allocation with
+ // the provided Size and Alignment.
+ static size_t getRequiredBackingSize(size_t Size, size_t Alignment,
+ size_t PageSize);
+
+ // Returns the provided pointer that meets the specified alignment, depending
+ // on whether it's left or right aligned.
+ static uintptr_t alignUp(uintptr_t Ptr, size_t Alignment);
+ static uintptr_t alignDown(uintptr_t Ptr, size_t Alignment);
+
private:
// Name of actively-occupied slot mappings.
static constexpr const char *kGwpAsanAliveSlotName = "GWP-ASan Alive Slot";
@@ -180,6 +196,10 @@ private:
// A mutex to protect the guarded slot and metadata pool for this class.
Mutex PoolMutex;
+ // Some unwinders can grab the libdl lock. In order to provide atfork
+ // protection, we need to ensure that we allow an unwinding thread to release
+ // the libdl lock before forking.
+ Mutex BacktraceMutex;
// Record the number allocations that we've sampled. We store this amount so
// that we don't randomly choose to recycle a slot that previously had an
// allocation before all the slots have been utilised.
diff --git a/compiler-rt/lib/gwp_asan/optional/backtrace_fuchsia.cpp b/compiler-rt/lib/gwp_asan/optional/backtrace_fuchsia.cpp
index 879312a7631e..09b0325a6fc7 100644
--- a/compiler-rt/lib/gwp_asan/optional/backtrace_fuchsia.cpp
+++ b/compiler-rt/lib/gwp_asan/optional/backtrace_fuchsia.cpp
@@ -8,12 +8,18 @@
#include "gwp_asan/optional/backtrace.h"
-// GWP-ASan on Fuchsia doesn't currently support backtraces.
+#include <zircon/sanitizer.h>
namespace gwp_asan {
namespace backtrace {
-options::Backtrace_t getBacktraceFunction() { return nullptr; }
+// Fuchsia's C library provides safe, fast, best-effort backtraces itself.
+options::Backtrace_t getBacktraceFunction() {
+ return __sanitizer_fast_backtrace;
+}
+
+// These are only used in fatal signal handling, which is not used on Fuchsia.
+
PrintBacktrace_t getPrintBacktraceFunction() { return nullptr; }
SegvBacktrace_t getSegvBacktraceFunction() { return nullptr; }
diff --git a/compiler-rt/lib/gwp_asan/options.inc b/compiler-rt/lib/gwp_asan/options.inc
index 4834daef6003..9900a2ac40df 100644
--- a/compiler-rt/lib/gwp_asan/options.inc
+++ b/compiler-rt/lib/gwp_asan/options.inc
@@ -23,16 +23,6 @@ GWP_ASAN_OPTION(bool, Enabled, GWP_ASAN_DEFAULT_ENABLED,
"Is GWP-ASan enabled? Defaults to " GWP_ASAN_STRINGIFY(
GWP_ASAN_DEFAULT_ENABLED) ".")
-GWP_ASAN_OPTION(
- bool, PerfectlyRightAlign, false,
- "When allocations are right-aligned, should we perfectly align them up to "
- "the page boundary? By default (false), we round up allocation size to the "
- "nearest power of two (1, 2, 4, 8, 16) up to a maximum of 16-byte "
- "alignment for performance reasons. For Bionic, we use 8-byte alignment by "
- "default. Setting this to true can find single byte buffer-overflows for "
- "multibyte allocations at the cost of performance, and may be incompatible "
- "with some architectures.")
-
GWP_ASAN_OPTION(int, MaxSimultaneousAllocations, 16,
"Number of simultaneously-guarded allocations available in the "
"pool. Defaults to 16.")
diff --git a/compiler-rt/lib/gwp_asan/platform_specific/guarded_pool_allocator_fuchsia.cpp b/compiler-rt/lib/gwp_asan/platform_specific/guarded_pool_allocator_fuchsia.cpp
index f58d4b104b39..ca5231a235f5 100644
--- a/compiler-rt/lib/gwp_asan/platform_specific/guarded_pool_allocator_fuchsia.cpp
+++ b/compiler-rt/lib/gwp_asan/platform_specific/guarded_pool_allocator_fuchsia.cpp
@@ -12,7 +12,6 @@
#include <assert.h>
#include <stdint.h>
#include <string.h>
-#include <zircon/limits.h>
#include <zircon/process.h>
#include <zircon/syscalls.h>
@@ -97,7 +96,9 @@ void GuardedPoolAllocator::deallocateInGuardedPool(void *Ptr,
Check(Status == ZX_OK, "Vmar unmapping failed");
}
-size_t GuardedPoolAllocator::getPlatformPageSize() { return ZX_PAGE_SIZE; }
+size_t GuardedPoolAllocator::getPlatformPageSize() {
+ return _zx_system_get_page_size();
+}
void GuardedPoolAllocator::installAtFork() {}
} // namespace gwp_asan
diff --git a/compiler-rt/lib/gwp_asan/platform_specific/utilities_posix.cpp b/compiler-rt/lib/gwp_asan/platform_specific/utilities_posix.cpp
index 28fd22fa7606..735796305509 100644
--- a/compiler-rt/lib/gwp_asan/platform_specific/utilities_posix.cpp
+++ b/compiler-rt/lib/gwp_asan/platform_specific/utilities_posix.cpp
@@ -6,7 +6,10 @@
//
//===----------------------------------------------------------------------===//
+#include <features.h> // IWYU pragma: keep (for __BIONIC__ macro)
+
#ifdef __BIONIC__
+#include "gwp_asan/definitions.h"
#include <stdlib.h>
extern "C" GWP_ASAN_WEAK void android_set_abort_message(const char *);
#else // __BIONIC__
diff --git a/compiler-rt/lib/gwp_asan/utilities.cpp b/compiler-rt/lib/gwp_asan/utilities.cpp
deleted file mode 100644
index 287630f89531..000000000000
--- a/compiler-rt/lib/gwp_asan/utilities.cpp
+++ /dev/null
@@ -1,63 +0,0 @@
-//===-- utilities.cpp -------------------------------------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#include "gwp_asan/utilities.h"
-
-#include <assert.h>
-
-namespace gwp_asan {
-// See `bionic/tests/malloc_test.cpp` in the Android source for documentation
-// regarding their alignment guarantees. We always round up to the closest
-// 8-byte window. As GWP-ASan's malloc(X) can always get exactly an X-sized
-// allocation, an allocation that rounds up to 16-bytes will always be given a
-// 16-byte aligned allocation.
-static size_t alignBionic(size_t RealAllocationSize) {
- if (RealAllocationSize % 8 == 0)
- return RealAllocationSize;
- return RealAllocationSize + 8 - (RealAllocationSize % 8);
-}
-
-static size_t alignPowerOfTwo(size_t RealAllocationSize) {
- if (RealAllocationSize <= 2)
- return RealAllocationSize;
- if (RealAllocationSize <= 4)
- return 4;
- if (RealAllocationSize <= 8)
- return 8;
- if (RealAllocationSize % 16 == 0)
- return RealAllocationSize;
- return RealAllocationSize + 16 - (RealAllocationSize % 16);
-}
-
-#ifdef __BIONIC__
-static constexpr AlignmentStrategy PlatformDefaultAlignment =
- AlignmentStrategy::BIONIC;
-#else // __BIONIC__
-static constexpr AlignmentStrategy PlatformDefaultAlignment =
- AlignmentStrategy::POWER_OF_TWO;
-#endif // __BIONIC__
-
-size_t rightAlignedAllocationSize(size_t RealAllocationSize,
- AlignmentStrategy Align) {
- assert(RealAllocationSize > 0);
- if (Align == AlignmentStrategy::DEFAULT)
- Align = PlatformDefaultAlignment;
-
- switch (Align) {
- case AlignmentStrategy::BIONIC:
- return alignBionic(RealAllocationSize);
- case AlignmentStrategy::POWER_OF_TWO:
- return alignPowerOfTwo(RealAllocationSize);
- case AlignmentStrategy::PERFECT:
- return RealAllocationSize;
- case AlignmentStrategy::DEFAULT:
- __builtin_unreachable();
- }
- __builtin_unreachable();
-}
-} // namespace gwp_asan
diff --git a/compiler-rt/lib/gwp_asan/utilities.h b/compiler-rt/lib/gwp_asan/utilities.h
index cee5672b491d..d8bc0e491a3d 100644
--- a/compiler-rt/lib/gwp_asan/utilities.h
+++ b/compiler-rt/lib/gwp_asan/utilities.h
@@ -23,19 +23,6 @@ GWP_ASAN_ALWAYS_INLINE void Check(bool Condition, const char *Message) {
return;
die(Message);
}
-
-enum class AlignmentStrategy {
- // Default => POWER_OF_TWO on most platforms, BIONIC for Android Bionic.
- DEFAULT,
- POWER_OF_TWO,
- BIONIC,
- PERFECT,
-};
-
-// Returns the real size of a right-aligned allocation.
-size_t rightAlignedAllocationSize(
- size_t RealAllocationSize,
- AlignmentStrategy Align = AlignmentStrategy::DEFAULT);
} // namespace gwp_asan
#endif // GWP_ASAN_UTILITIES_H_
diff --git a/compiler-rt/lib/hwasan/hwasan.cpp b/compiler-rt/lib/hwasan/hwasan.cpp
index c5322110cb66..cbe0dee66dcd 100644
--- a/compiler-rt/lib/hwasan/hwasan.cpp
+++ b/compiler-rt/lib/hwasan/hwasan.cpp
@@ -50,6 +50,11 @@ bool hwasan_init_is_running;
int hwasan_report_count = 0;
+uptr kLowShadowStart;
+uptr kLowShadowEnd;
+uptr kHighShadowStart;
+uptr kHighShadowEnd;
+
void Flags::SetDefaults() {
#define HWASAN_FLAG(Type, Name, DefaultValue, Description) Name = DefaultValue;
#include "hwasan_flags.inc"
@@ -128,16 +133,11 @@ static void InitializeFlags() {
if (common_flags()->help) parser.PrintFlagDescriptions();
}
-static void HWAsanCheckFailed(const char *file, int line, const char *cond,
- u64 v1, u64 v2) {
- Report("HWAddressSanitizer CHECK failed: %s:%d \"%s\" (0x%zx, 0x%zx)\n", file,
- line, cond, (uptr)v1, (uptr)v2);
- PRINT_CURRENT_STACK_CHECK();
- Die();
+static void CheckUnwind() {
+ GET_FATAL_STACK_TRACE_PC_BP(StackTrace::GetCurrentPc(), GET_CURRENT_FRAME());
+ stack.Print();
}
-static constexpr uptr kMemoryUsageBufferSize = 4096;
-
static void HwasanFormatMemoryUsage(InternalScopedString &s) {
HwasanThreadList &thread_list = hwasanThreadList();
auto thread_stats = thread_list.GetThreadStats();
@@ -155,6 +155,8 @@ static void HwasanFormatMemoryUsage(InternalScopedString &s) {
}
#if SANITIZER_ANDROID
+static constexpr uptr kMemoryUsageBufferSize = 4096;
+
static char *memory_usage_buffer = nullptr;
static void InitMemoryUsage() {
@@ -171,7 +173,7 @@ void UpdateMemoryUsage() {
return;
if (!memory_usage_buffer)
InitMemoryUsage();
- InternalScopedString s(kMemoryUsageBufferSize);
+ InternalScopedString s;
HwasanFormatMemoryUsage(s);
internal_strncpy(memory_usage_buffer, s.data(), kMemoryUsageBufferSize - 1);
memory_usage_buffer[kMemoryUsageBufferSize - 1] = '\0';
@@ -180,6 +182,65 @@ void UpdateMemoryUsage() {
void UpdateMemoryUsage() {}
#endif
+void HwasanAtExit() {
+ if (common_flags()->print_module_map)
+ DumpProcessMap();
+ if (flags()->print_stats && (flags()->atexit || hwasan_report_count > 0))
+ ReportStats();
+ if (hwasan_report_count > 0) {
+ // ReportAtExitStatistics();
+ if (common_flags()->exitcode)
+ internal__exit(common_flags()->exitcode);
+ }
+}
+
+void HandleTagMismatch(AccessInfo ai, uptr pc, uptr frame, void *uc,
+ uptr *registers_frame) {
+ InternalMmapVector<BufferedStackTrace> stack_buffer(1);
+ BufferedStackTrace *stack = stack_buffer.data();
+ stack->Reset();
+ stack->Unwind(pc, frame, uc, common_flags()->fast_unwind_on_fatal);
+
+ // The second stack frame contains the failure __hwasan_check function, as
+ // we have a stack frame for the registers saved in __hwasan_tag_mismatch that
+ // we wish to ignore. This (currently) only occurs on AArch64, as x64
+ // implementations use SIGTRAP to implement the failure, and thus do not go
+ // through the stack saver.
+ if (registers_frame && stack->trace && stack->size > 0) {
+ stack->trace++;
+ stack->size--;
+ }
+
+ bool fatal = flags()->halt_on_error || !ai.recover;
+ ReportTagMismatch(stack, ai.addr, ai.size, ai.is_store, fatal,
+ registers_frame);
+}
+
+void HwasanTagMismatch(uptr addr, uptr access_info, uptr *registers_frame,
+ size_t outsize) {
+ __hwasan::AccessInfo ai;
+ ai.is_store = access_info & 0x10;
+ ai.is_load = !ai.is_store;
+ ai.recover = access_info & 0x20;
+ ai.addr = addr;
+ if ((access_info & 0xf) == 0xf)
+ ai.size = outsize;
+ else
+ ai.size = 1 << (access_info & 0xf);
+
+ HandleTagMismatch(ai, (uptr)__builtin_return_address(0),
+ (uptr)__builtin_frame_address(0), nullptr, registers_frame);
+ __builtin_unreachable();
+}
+
+Thread *GetCurrentThread() {
+ uptr *ThreadLongPtr = GetCurrentThreadLongPtr();
+ if (UNLIKELY(*ThreadLongPtr == 0))
+ return nullptr;
+ auto *R = (StackAllocationsRingBuffer *)ThreadLongPtr;
+ return hwasanThreadList().GetThreadByBufferAddress((uptr)R->Next());
+}
+
} // namespace __hwasan
using namespace __hwasan;
@@ -219,7 +280,7 @@ static void InitLoadedGlobals() {
static void InitInstrumentation() {
if (hwasan_instrumentation_inited) return;
- InitPrctl();
+ InitializeOsSupport();
if (!InitShadow()) {
Printf("FATAL: HWAddressSanitizer cannot mmap the shadow memory.\n");
@@ -228,7 +289,6 @@ static void InitInstrumentation() {
}
InitThreads();
- hwasanThreadList().CreateCurrentThread();
hwasan_instrumentation_inited = 1;
}
@@ -271,7 +331,7 @@ void __hwasan_init() {
InitializeFlags();
// Install tool-specific callbacks in sanitizer_common.
- SetCheckFailedCallback(HWAsanCheckFailed);
+ SetCheckUnwindCallback(CheckUnwind);
__sanitizer_set_report_path(common_flags()->log_path);
@@ -493,12 +553,12 @@ extern "C" void *__hwasan_extra_spill_area() {
}
void __hwasan_print_memory_usage() {
- InternalScopedString s(kMemoryUsageBufferSize);
+ InternalScopedString s;
HwasanFormatMemoryUsage(s);
Printf("%s\n", s.data());
}
-static const u8 kFallbackTag = 0xBB;
+static const u8 kFallbackTag = 0xBB & kTagMask;
u8 __hwasan_generate_tag() {
Thread *t = GetCurrentThread();
@@ -519,4 +579,12 @@ void __sanitizer_print_stack_trace() {
GET_FATAL_STACK_TRACE_PC_BP(StackTrace::GetCurrentPc(), GET_CURRENT_FRAME());
stack.Print();
}
+
+// Entry point for interoperability between __hwasan_tag_mismatch (ASM) and the
+// rest of the mismatch handling code (C++).
+void __hwasan_tag_mismatch4(uptr addr, uptr access_info, uptr *registers_frame,
+ size_t outsize) {
+ __hwasan::HwasanTagMismatch(addr, access_info, registers_frame, outsize);
+}
+
} // extern "C"
diff --git a/compiler-rt/lib/hwasan/hwasan.h b/compiler-rt/lib/hwasan/hwasan.h
index d4521efd089a..7338b696ad34 100644
--- a/compiler-rt/lib/hwasan/hwasan.h
+++ b/compiler-rt/lib/hwasan/hwasan.h
@@ -14,11 +14,12 @@
#ifndef HWASAN_H
#define HWASAN_H
+#include "hwasan_flags.h"
+#include "hwasan_interface_internal.h"
+#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_flags.h"
#include "sanitizer_common/sanitizer_internal_defs.h"
#include "sanitizer_common/sanitizer_stacktrace.h"
-#include "hwasan_interface_internal.h"
-#include "hwasan_flags.h"
#include "ubsan/ubsan_platform.h"
#ifndef HWASAN_CONTAINS_UBSAN
@@ -35,10 +36,38 @@
typedef u8 tag_t;
+#if defined(HWASAN_ALIASING_MODE)
+# if !defined(__x86_64__)
+# error Aliasing mode is only supported on x86_64
+# endif
+// Tags are done in middle bits using userspace aliasing.
+constexpr unsigned kAddressTagShift = 39;
+constexpr unsigned kTagBits = 3;
+
+// The alias region is placed next to the shadow so the upper bits of all
+// taggable addresses matches the upper bits of the shadow base. This shift
+// value determines which upper bits must match. It has a floor of 44 since the
+// shadow is always 8TB.
+// TODO(morehouse): In alias mode we can shrink the shadow and use a
+// simpler/faster shadow calculation.
+constexpr unsigned kTaggableRegionCheckShift =
+ __sanitizer::Max(kAddressTagShift + kTagBits + 1U, 44U);
+#elif defined(__x86_64__)
+// Tags are done in upper bits using Intel LAM.
+constexpr unsigned kAddressTagShift = 57;
+constexpr unsigned kTagBits = 6;
+#else
// TBI (Top Byte Ignore) feature of AArch64: bits [63:56] are ignored in address
// translation and can be used to store a tag.
-const unsigned kAddressTagShift = 56;
-const uptr kAddressTagMask = 0xFFUL << kAddressTagShift;
+constexpr unsigned kAddressTagShift = 56;
+constexpr unsigned kTagBits = 8;
+#endif // defined(HWASAN_ALIASING_MODE)
+
+// Mask for extracting tag bits from the lower 8 bits.
+constexpr uptr kTagMask = (1UL << kTagBits) - 1;
+
+// Mask for extracting tag bits from full pointers.
+constexpr uptr kAddressTagMask = kTagMask << kAddressTagShift;
// Minimal alignment of the shadow base address. Determines the space available
// for threads and stack histories. This is an ABI constant.
@@ -50,7 +79,7 @@ const unsigned kRecordFPLShift = 4;
const unsigned kRecordFPModulus = 1 << (64 - kRecordFPShift + kRecordFPLShift);
static inline tag_t GetTagFromPointer(uptr p) {
- return p >> kAddressTagShift;
+ return (p >> kAddressTagShift) & kTagMask;
}
static inline uptr UntagAddr(uptr tagged_addr) {
@@ -73,7 +102,7 @@ extern bool hwasan_init_is_running;
extern int hwasan_report_count;
bool InitShadow();
-void InitPrctl();
+void InitializeOsSupport();
void InitThreads();
void InitializeInterceptors();
@@ -105,17 +134,9 @@ void InstallAtExitHandler();
if (hwasan_inited) \
stack.Unwind(pc, bp, nullptr, common_flags()->fast_unwind_on_fatal)
-#define GET_FATAL_STACK_TRACE_HERE \
- GET_FATAL_STACK_TRACE_PC_BP(StackTrace::GetCurrentPc(), GET_CURRENT_FRAME())
-
-#define PRINT_CURRENT_STACK_CHECK() \
- { \
- GET_FATAL_STACK_TRACE_HERE; \
- stack.Print(); \
- }
-
void HwasanTSDInit();
void HwasanTSDThreadInit();
+void HwasanAtExit();
void HwasanOnDeadlySignal(int signo, void *info, void *context);
@@ -125,6 +146,26 @@ void AppendToErrorMessageBuffer(const char *buffer);
void AndroidTestTlsSlot();
+// This is a compiler-generated struct that can be shared between hwasan
+// implementations.
+struct AccessInfo {
+ uptr addr;
+ uptr size;
+ bool is_store;
+ bool is_load;
+ bool recover;
+};
+
+// Given access info and frame information, unwind the stack and report the tag
+// mismatch.
+void HandleTagMismatch(AccessInfo ai, uptr pc, uptr frame, void *uc,
+ uptr *registers_frame = nullptr);
+
+// This dispatches to HandleTagMismatch but sets up the AccessInfo, program
+// counter, and frame pointer.
+void HwasanTagMismatch(uptr addr, uptr access_info, uptr *registers_frame,
+ size_t outsize);
+
} // namespace __hwasan
#define HWASAN_MALLOC_HOOK(ptr, size) \
@@ -162,4 +203,12 @@ typedef struct __hw_jmp_buf_struct __hw_jmp_buf[1];
typedef struct __hw_jmp_buf_struct __hw_sigjmp_buf[1];
#endif // HWASAN_WITH_INTERCEPTORS && __aarch64__
+#define ENSURE_HWASAN_INITED() \
+ do { \
+ CHECK(!hwasan_init_is_running); \
+ if (!hwasan_inited) { \
+ __hwasan_init(); \
+ } \
+ } while (0)
+
#endif // HWASAN_H
diff --git a/compiler-rt/lib/hwasan/hwasan_allocation_functions.cpp b/compiler-rt/lib/hwasan/hwasan_allocation_functions.cpp
new file mode 100644
index 000000000000..6c2a6077866f
--- /dev/null
+++ b/compiler-rt/lib/hwasan/hwasan_allocation_functions.cpp
@@ -0,0 +1,172 @@
+//===-- hwasan_allocation_functions.cpp -----------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of HWAddressSanitizer.
+//
+// Definitions for __sanitizer allocation functions.
+//
+//===----------------------------------------------------------------------===//
+
+#include "hwasan.h"
+#include "interception/interception.h"
+#include "sanitizer_common/sanitizer_allocator_interface.h"
+#include "sanitizer_common/sanitizer_tls_get_addr.h"
+
+using namespace __hwasan;
+
+static uptr allocated_for_dlsym;
+static const uptr kDlsymAllocPoolSize = 1024;
+static uptr alloc_memory_for_dlsym[kDlsymAllocPoolSize];
+
+static bool IsInDlsymAllocPool(const void *ptr) {
+ uptr off = (uptr)ptr - (uptr)alloc_memory_for_dlsym;
+ return off < sizeof(alloc_memory_for_dlsym);
+}
+
+static void *AllocateFromLocalPool(uptr size_in_bytes) {
+ uptr size_in_words = RoundUpTo(size_in_bytes, kWordSize) / kWordSize;
+ void *mem = (void *)&alloc_memory_for_dlsym[allocated_for_dlsym];
+ allocated_for_dlsym += size_in_words;
+ CHECK_LT(allocated_for_dlsym, kDlsymAllocPoolSize);
+ return mem;
+}
+
+int __sanitizer_posix_memalign(void **memptr, uptr alignment, uptr size) {
+ GET_MALLOC_STACK_TRACE;
+ CHECK_NE(memptr, 0);
+ int res = hwasan_posix_memalign(memptr, alignment, size, &stack);
+ return res;
+}
+
+void *__sanitizer_memalign(uptr alignment, uptr size) {
+ GET_MALLOC_STACK_TRACE;
+ return hwasan_memalign(alignment, size, &stack);
+}
+
+void *__sanitizer_aligned_alloc(uptr alignment, uptr size) {
+ GET_MALLOC_STACK_TRACE;
+ return hwasan_aligned_alloc(alignment, size, &stack);
+}
+
+void *__sanitizer___libc_memalign(uptr alignment, uptr size) {
+ GET_MALLOC_STACK_TRACE;
+ void *ptr = hwasan_memalign(alignment, size, &stack);
+ if (ptr)
+ DTLS_on_libc_memalign(ptr, size);
+ return ptr;
+}
+
+void *__sanitizer_valloc(uptr size) {
+ GET_MALLOC_STACK_TRACE;
+ return hwasan_valloc(size, &stack);
+}
+
+void *__sanitizer_pvalloc(uptr size) {
+ GET_MALLOC_STACK_TRACE;
+ return hwasan_pvalloc(size, &stack);
+}
+
+void __sanitizer_free(void *ptr) {
+ GET_MALLOC_STACK_TRACE;
+ if (!ptr || UNLIKELY(IsInDlsymAllocPool(ptr)))
+ return;
+ hwasan_free(ptr, &stack);
+}
+
+void __sanitizer_cfree(void *ptr) {
+ GET_MALLOC_STACK_TRACE;
+ if (!ptr || UNLIKELY(IsInDlsymAllocPool(ptr)))
+ return;
+ hwasan_free(ptr, &stack);
+}
+
+uptr __sanitizer_malloc_usable_size(const void *ptr) {
+ return __sanitizer_get_allocated_size(ptr);
+}
+
+struct __sanitizer_struct_mallinfo __sanitizer_mallinfo() {
+ __sanitizer_struct_mallinfo sret;
+ internal_memset(&sret, 0, sizeof(sret));
+ return sret;
+}
+
+int __sanitizer_mallopt(int cmd, int value) { return 0; }
+
+void __sanitizer_malloc_stats(void) {
+ // FIXME: implement, but don't call REAL(malloc_stats)!
+}
+
+void *__sanitizer_calloc(uptr nmemb, uptr size) {
+ GET_MALLOC_STACK_TRACE;
+ if (UNLIKELY(!hwasan_inited))
+ // Hack: dlsym calls calloc before REAL(calloc) is retrieved from dlsym.
+ return AllocateFromLocalPool(nmemb * size);
+ return hwasan_calloc(nmemb, size, &stack);
+}
+
+void *__sanitizer_realloc(void *ptr, uptr size) {
+ GET_MALLOC_STACK_TRACE;
+ if (UNLIKELY(IsInDlsymAllocPool(ptr))) {
+ uptr offset = (uptr)ptr - (uptr)alloc_memory_for_dlsym;
+ uptr copy_size = Min(size, kDlsymAllocPoolSize - offset);
+ void *new_ptr;
+ if (UNLIKELY(!hwasan_inited)) {
+ new_ptr = AllocateFromLocalPool(copy_size);
+ } else {
+ copy_size = size;
+ new_ptr = hwasan_malloc(copy_size, &stack);
+ }
+ internal_memcpy(new_ptr, ptr, copy_size);
+ return new_ptr;
+ }
+ return hwasan_realloc(ptr, size, &stack);
+}
+
+void *__sanitizer_reallocarray(void *ptr, uptr nmemb, uptr size) {
+ GET_MALLOC_STACK_TRACE;
+ return hwasan_reallocarray(ptr, nmemb, size, &stack);
+}
+
+void *__sanitizer_malloc(uptr size) {
+ GET_MALLOC_STACK_TRACE;
+ if (UNLIKELY(!hwasan_init_is_running))
+ ENSURE_HWASAN_INITED();
+ if (UNLIKELY(!hwasan_inited))
+ // Hack: dlsym calls malloc before REAL(malloc) is retrieved from dlsym.
+ return AllocateFromLocalPool(size);
+ return hwasan_malloc(size, &stack);
+}
+
+#if HWASAN_WITH_INTERCEPTORS
+# define INTERCEPTOR_ALIAS(RET, FN, ARGS...) \
+ extern "C" SANITIZER_INTERFACE_ATTRIBUTE RET WRAP(FN)(ARGS) \
+ ALIAS("__sanitizer_" #FN); \
+ extern "C" SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE RET FN( \
+ ARGS) ALIAS("__sanitizer_" #FN)
+
+INTERCEPTOR_ALIAS(int, posix_memalign, void **memptr, SIZE_T alignment,
+ SIZE_T size);
+INTERCEPTOR_ALIAS(void *, aligned_alloc, SIZE_T alignment, SIZE_T size);
+INTERCEPTOR_ALIAS(void *, __libc_memalign, SIZE_T alignment, SIZE_T size);
+INTERCEPTOR_ALIAS(void *, valloc, SIZE_T size);
+INTERCEPTOR_ALIAS(void, free, void *ptr);
+INTERCEPTOR_ALIAS(uptr, malloc_usable_size, const void *ptr);
+INTERCEPTOR_ALIAS(void *, calloc, SIZE_T nmemb, SIZE_T size);
+INTERCEPTOR_ALIAS(void *, realloc, void *ptr, SIZE_T size);
+INTERCEPTOR_ALIAS(void *, reallocarray, void *ptr, SIZE_T nmemb, SIZE_T size);
+INTERCEPTOR_ALIAS(void *, malloc, SIZE_T size);
+
+# if !SANITIZER_FREEBSD && !SANITIZER_NETBSD
+INTERCEPTOR_ALIAS(void *, memalign, SIZE_T alignment, SIZE_T size);
+INTERCEPTOR_ALIAS(void *, pvalloc, SIZE_T size);
+INTERCEPTOR_ALIAS(void, cfree, void *ptr);
+INTERCEPTOR_ALIAS(__sanitizer_struct_mallinfo, mallinfo);
+INTERCEPTOR_ALIAS(int, mallopt, int cmd, int value);
+INTERCEPTOR_ALIAS(void, malloc_stats, void);
+# endif
+#endif // #if HWASAN_WITH_INTERCEPTORS
diff --git a/compiler-rt/lib/hwasan/hwasan_allocator.cpp b/compiler-rt/lib/hwasan/hwasan_allocator.cpp
index 0b6b7347892e..ef6d4d6c7678 100644
--- a/compiler-rt/lib/hwasan/hwasan_allocator.cpp
+++ b/compiler-rt/lib/hwasan/hwasan_allocator.cpp
@@ -29,8 +29,8 @@ static AllocatorCache fallback_allocator_cache;
static SpinMutex fallback_mutex;
static atomic_uint8_t hwasan_allocator_tagging_enabled;
-static const tag_t kFallbackAllocTag = 0xBB;
-static const tag_t kFallbackFreeTag = 0xBC;
+static constexpr tag_t kFallbackAllocTag = 0xBB & kTagMask;
+static constexpr tag_t kFallbackFreeTag = 0xBC;
enum RightAlignMode {
kRightAlignNever,
@@ -80,11 +80,29 @@ void GetAllocatorStats(AllocatorStatCounters s) {
allocator.GetStats(s);
}
+uptr GetAliasRegionStart() {
+#if defined(HWASAN_ALIASING_MODE)
+ constexpr uptr kAliasRegionOffset = 1ULL << (kTaggableRegionCheckShift - 1);
+ uptr AliasRegionStart =
+ __hwasan_shadow_memory_dynamic_address + kAliasRegionOffset;
+
+ CHECK_EQ(AliasRegionStart >> kTaggableRegionCheckShift,
+ __hwasan_shadow_memory_dynamic_address >> kTaggableRegionCheckShift);
+ CHECK_EQ(
+ (AliasRegionStart + kAliasRegionOffset - 1) >> kTaggableRegionCheckShift,
+ __hwasan_shadow_memory_dynamic_address >> kTaggableRegionCheckShift);
+ return AliasRegionStart;
+#else
+ return 0;
+#endif
+}
+
void HwasanAllocatorInit() {
atomic_store_relaxed(&hwasan_allocator_tagging_enabled,
!flags()->disable_allocator_tagging);
SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null);
- allocator.Init(common_flags()->allocator_release_to_os_interval_ms);
+ allocator.Init(common_flags()->allocator_release_to_os_interval_ms,
+ GetAliasRegionStart());
for (uptr i = 0; i < sizeof(tail_magic); i++)
tail_magic[i] = GetCurrentThread()->GenerateRandomTag();
}
@@ -148,7 +166,8 @@ static void *HwasanAllocate(StackTrace *stack, uptr orig_size, uptr alignment,
// Tagging can only be skipped when both tag_in_malloc and tag_in_free are
// false. When tag_in_malloc = false and tag_in_free = true malloc needs to
// retag to 0.
- if ((flags()->tag_in_malloc || flags()->tag_in_free) &&
+ if (InTaggableRegion(reinterpret_cast<uptr>(user_ptr)) &&
+ (flags()->tag_in_malloc || flags()->tag_in_free) &&
atomic_load_relaxed(&hwasan_allocator_tagging_enabled)) {
if (flags()->tag_in_malloc && malloc_bisect(stack, orig_size)) {
tag_t tag = t ? t->GenerateRandomTag() : kFallbackAllocTag;
@@ -175,6 +194,8 @@ static void *HwasanAllocate(StackTrace *stack, uptr orig_size, uptr alignment,
static bool PointerAndMemoryTagsMatch(void *tagged_ptr) {
CHECK(tagged_ptr);
uptr tagged_uptr = reinterpret_cast<uptr>(tagged_ptr);
+ if (!InTaggableRegion(tagged_uptr))
+ return true;
tag_t mem_tag = *reinterpret_cast<tag_t *>(
MemToShadow(reinterpret_cast<uptr>(UntagPtr(tagged_ptr))));
return PossiblyShortTagMatches(mem_tag, tagged_uptr, 1);
@@ -187,9 +208,12 @@ static void HwasanDeallocate(StackTrace *stack, void *tagged_ptr) {
if (!PointerAndMemoryTagsMatch(tagged_ptr))
ReportInvalidFree(stack, reinterpret_cast<uptr>(tagged_ptr));
- void *untagged_ptr = UntagPtr(tagged_ptr);
+ void *untagged_ptr = InTaggableRegion(reinterpret_cast<uptr>(tagged_ptr))
+ ? UntagPtr(tagged_ptr)
+ : tagged_ptr;
void *aligned_ptr = reinterpret_cast<void *>(
RoundDownTo(reinterpret_cast<uptr>(untagged_ptr), kShadowAlignment));
+ tag_t pointer_tag = GetTagFromPointer(reinterpret_cast<uptr>(tagged_ptr));
Metadata *meta =
reinterpret_cast<Metadata *>(allocator.GetMetaData(aligned_ptr));
uptr orig_size = meta->get_requested_size();
@@ -219,10 +243,27 @@ static void HwasanDeallocate(StackTrace *stack, void *tagged_ptr) {
Min(TaggedSize(orig_size), (uptr)flags()->max_free_fill_size);
internal_memset(aligned_ptr, flags()->free_fill_byte, fill_size);
}
- if (flags()->tag_in_free && malloc_bisect(stack, 0) &&
- atomic_load_relaxed(&hwasan_allocator_tagging_enabled))
+ if (InTaggableRegion(reinterpret_cast<uptr>(tagged_ptr)) &&
+ flags()->tag_in_free && malloc_bisect(stack, 0) &&
+ atomic_load_relaxed(&hwasan_allocator_tagging_enabled)) {
+ // Always store full 8-bit tags on free to maximize UAF detection.
+ tag_t tag;
+ if (t) {
+ // Make sure we are not using a short granule tag as a poison tag. This
+ // would make us attempt to read the memory on a UaF.
+ // The tag can be zero if tagging is disabled on this thread.
+ do {
+ tag = t->GenerateRandomTag(/*num_bits=*/8);
+ } while (
+ UNLIKELY((tag < kShadowAlignment || tag == pointer_tag) && tag != 0));
+ } else {
+ static_assert(kFallbackFreeTag >= kShadowAlignment,
+ "fallback tag must not be a short granule tag.");
+ tag = kFallbackFreeTag;
+ }
TagMemoryAligned(reinterpret_cast<uptr>(aligned_ptr), TaggedSize(orig_size),
- t ? t->GenerateRandomTag() : kFallbackFreeTag);
+ tag);
+ }
if (t) {
allocator.Deallocate(t->allocator_cache(), aligned_ptr);
if (auto *ha = t->heap_allocations())
@@ -365,7 +406,7 @@ int hwasan_posix_memalign(void **memptr, uptr alignment, uptr size,
// OOM error is already taken care of by HwasanAllocate.
return errno_ENOMEM;
CHECK(IsAligned((uptr)ptr, alignment));
- *(void **)UntagPtr(memptr) = ptr;
+ *memptr = ptr;
return 0;
}
diff --git a/compiler-rt/lib/hwasan/hwasan_allocator.h b/compiler-rt/lib/hwasan/hwasan_allocator.h
index 43670a6a3fb7..35c3d6b4bf43 100644
--- a/compiler-rt/lib/hwasan/hwasan_allocator.h
+++ b/compiler-rt/lib/hwasan/hwasan_allocator.h
@@ -13,13 +13,16 @@
#ifndef HWASAN_ALLOCATOR_H
#define HWASAN_ALLOCATOR_H
+#include "hwasan.h"
+#include "hwasan_interface_internal.h"
+#include "hwasan_mapping.h"
+#include "hwasan_poisoning.h"
#include "sanitizer_common/sanitizer_allocator.h"
#include "sanitizer_common/sanitizer_allocator_checks.h"
#include "sanitizer_common/sanitizer_allocator_interface.h"
#include "sanitizer_common/sanitizer_allocator_report.h"
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_ring_buffer.h"
-#include "hwasan_poisoning.h"
#if !defined(__aarch64__) && !defined(__x86_64__)
#error Unsupported platform
@@ -55,7 +58,12 @@ static const uptr kMaxAllowedMallocSize = 1UL << 40; // 1T
struct AP64 {
static const uptr kSpaceBeg = ~0ULL;
+
+#if defined(HWASAN_ALIASING_MODE)
+ static const uptr kSpaceSize = 1ULL << kAddressTagShift;
+#else
static const uptr kSpaceSize = 0x2000000000ULL;
+#endif
static const uptr kMetadataSize = sizeof(Metadata);
typedef __sanitizer::VeryDenseSizeClassMap SizeClassMap;
using AddressSpaceView = LocalAddressSpaceView;
@@ -102,6 +110,16 @@ typedef RingBuffer<HeapAllocationRecord> HeapAllocationsRingBuffer;
void GetAllocatorStats(AllocatorStatCounters s);
+inline bool InTaggableRegion(uptr addr) {
+#if defined(HWASAN_ALIASING_MODE)
+ // Aliases are mapped next to shadow so that the upper bits match the shadow
+ // base.
+ return (addr >> kTaggableRegionCheckShift) ==
+ (GetShadowOffset() >> kTaggableRegionCheckShift);
+#endif
+ return true;
+}
+
} // namespace __hwasan
#endif // HWASAN_ALLOCATOR_H
diff --git a/compiler-rt/lib/hwasan/hwasan_blacklist.txt b/compiler-rt/lib/hwasan/hwasan_blacklist.txt
deleted file mode 100644
index 395ba28f0212..000000000000
--- a/compiler-rt/lib/hwasan/hwasan_blacklist.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-# Blacklist for HWAddressSanitizer. Turns off instrumentation of particular
-# functions or sources. Use with care. You may set location of blacklist
-# at compile-time using -fsanitize-blacklist=<path> flag.
-
-# Example usage:
-# fun:*bad_function_name*
-# src:file_with_tricky_code.cc
diff --git a/compiler-rt/lib/hwasan/hwasan_checks.h b/compiler-rt/lib/hwasan/hwasan_checks.h
index a8de0fef20f0..ab543ea88beb 100644
--- a/compiler-rt/lib/hwasan/hwasan_checks.h
+++ b/compiler-rt/lib/hwasan/hwasan_checks.h
@@ -13,6 +13,7 @@
#ifndef HWASAN_CHECKS_H
#define HWASAN_CHECKS_H
+#include "hwasan_allocator.h"
#include "hwasan_mapping.h"
#include "sanitizer_common/sanitizer_common.h"
@@ -81,6 +82,8 @@ enum class AccessType { Load, Store };
template <ErrorAction EA, AccessType AT, unsigned LogSize>
__attribute__((always_inline, nodebug)) static void CheckAddress(uptr p) {
+ if (!InTaggableRegion(p))
+ return;
uptr ptr_raw = p & ~kAddressTagMask;
tag_t mem_tag = *(tag_t *)MemToShadow(ptr_raw);
if (UNLIKELY(!PossiblyShortTagMatches(mem_tag, p, 1 << LogSize))) {
@@ -94,7 +97,7 @@ __attribute__((always_inline, nodebug)) static void CheckAddress(uptr p) {
template <ErrorAction EA, AccessType AT>
__attribute__((always_inline, nodebug)) static void CheckAddressSized(uptr p,
uptr sz) {
- if (sz == 0)
+ if (sz == 0 || !InTaggableRegion(p))
return;
tag_t ptr_tag = GetTagFromPointer(p);
uptr ptr_raw = p & ~kAddressTagMask;
diff --git a/compiler-rt/lib/hwasan/hwasan_dynamic_shadow.cpp b/compiler-rt/lib/hwasan/hwasan_dynamic_shadow.cpp
index 12730b29bae3..7642ba6c0bf0 100644
--- a/compiler-rt/lib/hwasan/hwasan_dynamic_shadow.cpp
+++ b/compiler-rt/lib/hwasan/hwasan_dynamic_shadow.cpp
@@ -12,15 +12,17 @@
///
//===----------------------------------------------------------------------===//
-#include "hwasan.h"
#include "hwasan_dynamic_shadow.h"
-#include "hwasan_mapping.h"
-#include "sanitizer_common/sanitizer_common.h"
-#include "sanitizer_common/sanitizer_posix.h"
#include <elf.h>
#include <link.h>
+#include "hwasan.h"
+#include "hwasan_mapping.h"
+#include "hwasan_thread_list.h"
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_posix.h"
+
// The code in this file needs to run in an unrelocated binary. It should not
// access any external symbol, including its own non-hidden globals.
@@ -111,12 +113,27 @@ uptr FindDynamicShadowStart(uptr shadow_size_bytes) {
}
} // namespace __hwasan
+
+#elif SANITIZER_FUCHSIA
+
+namespace __hwasan {
+
+void InitShadowGOT() {}
+
+} // namespace __hwasan
+
#else
namespace __hwasan {
void InitShadowGOT() {}
uptr FindDynamicShadowStart(uptr shadow_size_bytes) {
+# if defined(HWASAN_ALIASING_MODE)
+ constexpr uptr kAliasSize = 1ULL << kAddressTagShift;
+ constexpr uptr kNumAliases = 1ULL << kTagBits;
+ return MapDynamicShadowAndAliases(shadow_size_bytes, kAliasSize, kNumAliases,
+ RingBufferSize());
+# endif
return MapDynamicShadow(shadow_size_bytes, kShadowScale, kShadowBaseAlignment,
kHighMemEnd);
}
diff --git a/compiler-rt/lib/hwasan/hwasan_flags.h b/compiler-rt/lib/hwasan/hwasan_flags.h
index 0a6998f675d6..b17750158d02 100644
--- a/compiler-rt/lib/hwasan/hwasan_flags.h
+++ b/compiler-rt/lib/hwasan/hwasan_flags.h
@@ -12,6 +12,8 @@
#ifndef HWASAN_FLAGS_H
#define HWASAN_FLAGS_H
+#include "sanitizer_common/sanitizer_internal_defs.h"
+
namespace __hwasan {
struct Flags {
diff --git a/compiler-rt/lib/hwasan/hwasan_flags.inc b/compiler-rt/lib/hwasan/hwasan_flags.inc
index 8e431d9c4ff9..18ea47f981be 100644
--- a/compiler-rt/lib/hwasan/hwasan_flags.inc
+++ b/compiler-rt/lib/hwasan/hwasan_flags.inc
@@ -72,3 +72,12 @@ HWASAN_FLAG(uptr, malloc_bisect_right, 0,
HWASAN_FLAG(bool, malloc_bisect_dump, false,
"Print all allocations within [malloc_bisect_left, "
"malloc_bisect_right] range ")
+
+
+// Exit if we fail to enable the AArch64 kernel ABI relaxation which allows
+// tagged pointers in syscalls. This is the default, but being able to disable
+// that behaviour is useful for running the testsuite on more platforms (the
+// testsuite can run since we manually ensure any pointer arguments to syscalls
+// are untagged before the call.
+HWASAN_FLAG(bool, fail_without_syscall_abi, true,
+ "Exit if fail to request relaxed syscall ABI.")
diff --git a/compiler-rt/lib/hwasan/hwasan_fuchsia.cpp b/compiler-rt/lib/hwasan/hwasan_fuchsia.cpp
new file mode 100644
index 000000000000..e299a7e862eb
--- /dev/null
+++ b/compiler-rt/lib/hwasan/hwasan_fuchsia.cpp
@@ -0,0 +1,213 @@
+//===-- hwasan_fuchsia.cpp --------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file is a part of HWAddressSanitizer and contains Fuchsia-specific
+/// code.
+///
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_common/sanitizer_fuchsia.h"
+#if SANITIZER_FUCHSIA
+
+#include "hwasan.h"
+#include "hwasan_interface_internal.h"
+#include "hwasan_report.h"
+#include "hwasan_thread.h"
+#include "hwasan_thread_list.h"
+
+// This TLS variable contains the location of the stack ring buffer and can be
+// used to always find the hwasan thread object associated with the current
+// running thread.
+[[gnu::tls_model("initial-exec")]]
+SANITIZER_INTERFACE_ATTRIBUTE
+THREADLOCAL uptr __hwasan_tls;
+
+namespace __hwasan {
+
+bool InitShadow() {
+ __sanitizer::InitShadowBounds();
+ CHECK_NE(__sanitizer::ShadowBounds.shadow_limit, 0);
+
+ // These variables are used by MemIsShadow for asserting we have a correct
+ // shadow address. On Fuchsia, we only have one region of shadow, so the
+ // bounds of Low shadow can be zero while High shadow represents the true
+ // bounds. Note that these are inclusive ranges.
+ kLowShadowStart = 0;
+ kLowShadowEnd = 0;
+ kHighShadowStart = __sanitizer::ShadowBounds.shadow_base;
+ kHighShadowEnd = __sanitizer::ShadowBounds.shadow_limit - 1;
+
+ return true;
+}
+
+bool MemIsApp(uptr p) {
+ CHECK(GetTagFromPointer(p) == 0);
+ return __sanitizer::ShadowBounds.shadow_limit <= p &&
+ p <= (__sanitizer::ShadowBounds.memory_limit - 1);
+}
+
+// These are known parameters passed to the hwasan runtime on thread creation.
+struct Thread::InitState {
+ uptr stack_bottom, stack_top;
+};
+
+static void FinishThreadInitialization(Thread *thread);
+
+void InitThreads() {
+ // This is the minimal alignment needed for the storage where hwasan threads
+ // and their stack ring buffers are placed. This alignment is necessary so the
+ // stack ring buffer can perform a simple calculation to get the next element
+ // in the RB. The instructions for this calculation are emitted by the
+ // compiler. (Full explanation in hwasan_thread_list.h.)
+ uptr alloc_size = UINT64_C(1) << kShadowBaseAlignment;
+ uptr thread_start = reinterpret_cast<uptr>(
+ MmapAlignedOrDieOnFatalError(alloc_size, alloc_size, __func__));
+
+ InitThreadList(thread_start, alloc_size);
+
+ // Create the hwasan thread object for the current (main) thread. Stack info
+ // for this thread is known from information passed via
+ // __sanitizer_startup_hook.
+ const Thread::InitState state = {
+ .stack_bottom = __sanitizer::MainThreadStackBase,
+ .stack_top =
+ __sanitizer::MainThreadStackBase + __sanitizer::MainThreadStackSize,
+ };
+ FinishThreadInitialization(hwasanThreadList().CreateCurrentThread(&state));
+}
+
+uptr *GetCurrentThreadLongPtr() { return &__hwasan_tls; }
+
+// This is called from the parent thread before the new thread is created. Here
+// we can propagate known info like the stack bounds to Thread::Init before
+// jumping into the thread. We cannot initialize the stack ring buffer yet since
+// we have not entered the new thread.
+static void *BeforeThreadCreateHook(uptr user_id, bool detached,
+ const char *name, uptr stack_bottom,
+ uptr stack_size) {
+ const Thread::InitState state = {
+ .stack_bottom = stack_bottom,
+ .stack_top = stack_bottom + stack_size,
+ };
+ return hwasanThreadList().CreateCurrentThread(&state);
+}
+
+// This sets the stack top and bottom according to the InitState passed to
+// CreateCurrentThread above.
+void Thread::InitStackAndTls(const InitState *state) {
+ CHECK_NE(state->stack_bottom, 0);
+ CHECK_NE(state->stack_top, 0);
+ stack_bottom_ = state->stack_bottom;
+ stack_top_ = state->stack_top;
+ tls_end_ = tls_begin_ = 0;
+}
+
+// This is called after creating a new thread with the pointer returned by
+// BeforeThreadCreateHook. We are still in the creating thread and should check
+// if it was actually created correctly.
+static void ThreadCreateHook(void *hook, bool aborted) {
+ Thread *thread = static_cast<Thread *>(hook);
+ if (!aborted) {
+ // The thread was created successfully.
+ // ThreadStartHook can already be running in the new thread.
+ } else {
+ // The thread wasn't created after all.
+ // Clean up everything we set up in BeforeThreadCreateHook.
+ atomic_signal_fence(memory_order_seq_cst);
+ hwasanThreadList().ReleaseThread(thread);
+ }
+}
+
+// This is called in the newly-created thread before it runs anything else,
+// with the pointer returned by BeforeThreadCreateHook (above). Here we can
+// setup the stack ring buffer.
+static void ThreadStartHook(void *hook, thrd_t self) {
+ Thread *thread = static_cast<Thread *>(hook);
+ FinishThreadInitialization(thread);
+ thread->InitRandomState();
+}
+
+// This is the function that sets up the stack ring buffer and enables us to use
+// GetCurrentThread. This function should only be called while IN the thread
+// that we want to create the hwasan thread object for so __hwasan_tls can be
+// properly referenced.
+static void FinishThreadInitialization(Thread *thread) {
+ CHECK_NE(thread, nullptr);
+
+ // The ring buffer is located immediately before the thread object.
+ uptr stack_buffer_size = hwasanThreadList().GetRingBufferSize();
+ uptr stack_buffer_start = reinterpret_cast<uptr>(thread) - stack_buffer_size;
+ thread->InitStackRingBuffer(stack_buffer_start, stack_buffer_size);
+}
+
+static void ThreadExitHook(void *hook, thrd_t self) {
+ Thread *thread = static_cast<Thread *>(hook);
+ atomic_signal_fence(memory_order_seq_cst);
+ hwasanThreadList().ReleaseThread(thread);
+}
+
+uptr TagMemoryAligned(uptr p, uptr size, tag_t tag) {
+ CHECK(IsAligned(p, kShadowAlignment));
+ CHECK(IsAligned(size, kShadowAlignment));
+ __sanitizer_fill_shadow(p, size, tag,
+ common_flags()->clear_shadow_mmap_threshold);
+ return AddTagToPointer(p, tag);
+}
+
+// Not implemented because Fuchsia does not use signal handlers.
+void HwasanOnDeadlySignal(int signo, void *info, void *context) {}
+
+// Not implemented because Fuchsia does not use interceptors.
+void InitializeInterceptors() {}
+
+// Not implemented because this is only relevant for Android.
+void AndroidTestTlsSlot() {}
+
+// TSD was normally used on linux as a means of calling the hwasan thread exit
+// handler passed to pthread_key_create. This is not needed on Fuchsia because
+// we will be using __sanitizer_thread_exit_hook.
+void HwasanTSDInit() {}
+void HwasanTSDThreadInit() {}
+
+// On linux, this just would call `atexit(HwasanAtExit)`. The functions in
+// HwasanAtExit are unimplemented for Fuchsia and effectively no-ops, so this
+// function is unneeded.
+void InstallAtExitHandler() {}
+
+// TODO(fxbug.dev/81499): Once we finalize the tagged pointer ABI in zircon, we should come back
+// here and implement the appropriate check that TBI is enabled.
+void InitializeOsSupport() {}
+
+} // namespace __hwasan
+
+extern "C" {
+
+void *__sanitizer_before_thread_create_hook(thrd_t thread, bool detached,
+ const char *name, void *stack_base,
+ size_t stack_size) {
+ return __hwasan::BeforeThreadCreateHook(
+ reinterpret_cast<uptr>(thread), detached, name,
+ reinterpret_cast<uptr>(stack_base), stack_size);
+}
+
+void __sanitizer_thread_create_hook(void *hook, thrd_t thread, int error) {
+ __hwasan::ThreadCreateHook(hook, error != thrd_success);
+}
+
+void __sanitizer_thread_start_hook(void *hook, thrd_t self) {
+ __hwasan::ThreadStartHook(hook, reinterpret_cast<uptr>(self));
+}
+
+void __sanitizer_thread_exit_hook(void *hook, thrd_t self) {
+ __hwasan::ThreadExitHook(hook, self);
+}
+
+} // extern "C"
+
+#endif // SANITIZER_FUCHSIA
diff --git a/compiler-rt/lib/hwasan/hwasan_ignorelist.txt b/compiler-rt/lib/hwasan/hwasan_ignorelist.txt
new file mode 100644
index 000000000000..70590c970f55
--- /dev/null
+++ b/compiler-rt/lib/hwasan/hwasan_ignorelist.txt
@@ -0,0 +1,7 @@
+# Ignorelist for HWAddressSanitizer. Turns off instrumentation of particular
+# functions or sources. Use with care. You may set location of ignorelist
+# at compile-time using -fsanitize-ignorelist=<path> flag.
+
+# Example usage:
+# fun:*bad_function_name*
+# src:file_with_tricky_code.cc
diff --git a/compiler-rt/lib/hwasan/hwasan_interceptors.cpp b/compiler-rt/lib/hwasan/hwasan_interceptors.cpp
index 44e569ee6d72..68f8adec0776 100644
--- a/compiler-rt/lib/hwasan/hwasan_interceptors.cpp
+++ b/compiler-rt/lib/hwasan/hwasan_interceptors.cpp
@@ -16,192 +16,14 @@
#include "interception/interception.h"
#include "hwasan.h"
-#include "hwasan_allocator.h"
-#include "hwasan_mapping.h"
#include "hwasan_thread.h"
-#include "hwasan_poisoning.h"
-#include "hwasan_report.h"
-#include "sanitizer_common/sanitizer_platform_limits_posix.h"
-#include "sanitizer_common/sanitizer_allocator.h"
-#include "sanitizer_common/sanitizer_allocator_interface.h"
-#include "sanitizer_common/sanitizer_allocator_internal.h"
-#include "sanitizer_common/sanitizer_atomic.h"
-#include "sanitizer_common/sanitizer_common.h"
-#include "sanitizer_common/sanitizer_errno.h"
#include "sanitizer_common/sanitizer_stackdepot.h"
-#include "sanitizer_common/sanitizer_libc.h"
-#include "sanitizer_common/sanitizer_linux.h"
-#include "sanitizer_common/sanitizer_tls_get_addr.h"
-#include <stdarg.h>
-// ACHTUNG! No other system header includes in this file.
-// Ideally, we should get rid of stdarg.h as well.
+#if !SANITIZER_FUCHSIA
using namespace __hwasan;
-using __sanitizer::memory_order;
-using __sanitizer::atomic_load;
-using __sanitizer::atomic_store;
-using __sanitizer::atomic_uintptr_t;
-
-static uptr allocated_for_dlsym;
-static const uptr kDlsymAllocPoolSize = 1024;
-static uptr alloc_memory_for_dlsym[kDlsymAllocPoolSize];
-
-static bool IsInDlsymAllocPool(const void *ptr) {
- uptr off = (uptr)ptr - (uptr)alloc_memory_for_dlsym;
- return off < sizeof(alloc_memory_for_dlsym);
-}
-
-static void *AllocateFromLocalPool(uptr size_in_bytes) {
- uptr size_in_words = RoundUpTo(size_in_bytes, kWordSize) / kWordSize;
- void *mem = (void *)&alloc_memory_for_dlsym[allocated_for_dlsym];
- allocated_for_dlsym += size_in_words;
- CHECK_LT(allocated_for_dlsym, kDlsymAllocPoolSize);
- return mem;
-}
-
-#define ENSURE_HWASAN_INITED() do { \
- CHECK(!hwasan_init_is_running); \
- if (!hwasan_inited) { \
- __hwasan_init(); \
- } \
-} while (0)
-
-
-int __sanitizer_posix_memalign(void **memptr, uptr alignment, uptr size) {
- GET_MALLOC_STACK_TRACE;
- CHECK_NE(memptr, 0);
- int res = hwasan_posix_memalign(memptr, alignment, size, &stack);
- return res;
-}
-
-void * __sanitizer_memalign(uptr alignment, uptr size) {
- GET_MALLOC_STACK_TRACE;
- return hwasan_memalign(alignment, size, &stack);
-}
-
-void * __sanitizer_aligned_alloc(uptr alignment, uptr size) {
- GET_MALLOC_STACK_TRACE;
- return hwasan_aligned_alloc(alignment, size, &stack);
-}
-
-void * __sanitizer___libc_memalign(uptr alignment, uptr size) {
- GET_MALLOC_STACK_TRACE;
- void *ptr = hwasan_memalign(alignment, size, &stack);
- if (ptr)
- DTLS_on_libc_memalign(ptr, size);
- return ptr;
-}
-
-void * __sanitizer_valloc(uptr size) {
- GET_MALLOC_STACK_TRACE;
- return hwasan_valloc(size, &stack);
-}
-
-void * __sanitizer_pvalloc(uptr size) {
- GET_MALLOC_STACK_TRACE;
- return hwasan_pvalloc(size, &stack);
-}
-
-void __sanitizer_free(void *ptr) {
- GET_MALLOC_STACK_TRACE;
- if (!ptr || UNLIKELY(IsInDlsymAllocPool(ptr))) return;
- hwasan_free(ptr, &stack);
-}
-
-void __sanitizer_cfree(void *ptr) {
- GET_MALLOC_STACK_TRACE;
- if (!ptr || UNLIKELY(IsInDlsymAllocPool(ptr))) return;
- hwasan_free(ptr, &stack);
-}
-
-uptr __sanitizer_malloc_usable_size(const void *ptr) {
- return __sanitizer_get_allocated_size(ptr);
-}
-
-struct __sanitizer_struct_mallinfo __sanitizer_mallinfo() {
- __sanitizer_struct_mallinfo sret;
- internal_memset(&sret, 0, sizeof(sret));
- return sret;
-}
-
-int __sanitizer_mallopt(int cmd, int value) {
- return 0;
-}
-
-void __sanitizer_malloc_stats(void) {
- // FIXME: implement, but don't call REAL(malloc_stats)!
-}
-
-void * __sanitizer_calloc(uptr nmemb, uptr size) {
- GET_MALLOC_STACK_TRACE;
- if (UNLIKELY(!hwasan_inited))
- // Hack: dlsym calls calloc before REAL(calloc) is retrieved from dlsym.
- return AllocateFromLocalPool(nmemb * size);
- return hwasan_calloc(nmemb, size, &stack);
-}
-
-void * __sanitizer_realloc(void *ptr, uptr size) {
- GET_MALLOC_STACK_TRACE;
- if (UNLIKELY(IsInDlsymAllocPool(ptr))) {
- uptr offset = (uptr)ptr - (uptr)alloc_memory_for_dlsym;
- uptr copy_size = Min(size, kDlsymAllocPoolSize - offset);
- void *new_ptr;
- if (UNLIKELY(!hwasan_inited)) {
- new_ptr = AllocateFromLocalPool(copy_size);
- } else {
- copy_size = size;
- new_ptr = hwasan_malloc(copy_size, &stack);
- }
- internal_memcpy(new_ptr, ptr, copy_size);
- return new_ptr;
- }
- return hwasan_realloc(ptr, size, &stack);
-}
-
-void * __sanitizer_reallocarray(void *ptr, uptr nmemb, uptr size) {
- GET_MALLOC_STACK_TRACE;
- return hwasan_reallocarray(ptr, nmemb, size, &stack);
-}
-
-void * __sanitizer_malloc(uptr size) {
- GET_MALLOC_STACK_TRACE;
- if (UNLIKELY(!hwasan_init_is_running))
- ENSURE_HWASAN_INITED();
- if (UNLIKELY(!hwasan_inited))
- // Hack: dlsym calls malloc before REAL(malloc) is retrieved from dlsym.
- return AllocateFromLocalPool(size);
- return hwasan_malloc(size, &stack);
-}
-
#if HWASAN_WITH_INTERCEPTORS
-#define INTERCEPTOR_ALIAS(RET, FN, ARGS...) \
- extern "C" SANITIZER_INTERFACE_ATTRIBUTE RET WRAP(FN)(ARGS) \
- ALIAS("__sanitizer_" #FN); \
- extern "C" SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE RET FN( \
- ARGS) ALIAS("__sanitizer_" #FN)
-
-INTERCEPTOR_ALIAS(int, posix_memalign, void **memptr, SIZE_T alignment,
- SIZE_T size);
-INTERCEPTOR_ALIAS(void *, aligned_alloc, SIZE_T alignment, SIZE_T size);
-INTERCEPTOR_ALIAS(void *, __libc_memalign, SIZE_T alignment, SIZE_T size);
-INTERCEPTOR_ALIAS(void *, valloc, SIZE_T size);
-INTERCEPTOR_ALIAS(void, free, void *ptr);
-INTERCEPTOR_ALIAS(uptr, malloc_usable_size, const void *ptr);
-INTERCEPTOR_ALIAS(void *, calloc, SIZE_T nmemb, SIZE_T size);
-INTERCEPTOR_ALIAS(void *, realloc, void *ptr, SIZE_T size);
-INTERCEPTOR_ALIAS(void *, reallocarray, void *ptr, SIZE_T nmemb, SIZE_T size);
-INTERCEPTOR_ALIAS(void *, malloc, SIZE_T size);
-
-#if !SANITIZER_FREEBSD && !SANITIZER_NETBSD
-INTERCEPTOR_ALIAS(void *, memalign, SIZE_T alignment, SIZE_T size);
-INTERCEPTOR_ALIAS(void *, pvalloc, SIZE_T size);
-INTERCEPTOR_ALIAS(void, cfree, void *ptr);
-INTERCEPTOR_ALIAS(__sanitizer_struct_mallinfo, mallinfo);
-INTERCEPTOR_ALIAS(int, mallopt, int cmd, int value);
-INTERCEPTOR_ALIAS(void, malloc_stats, void);
-#endif
struct ThreadStartArg {
thread_callback_t callback;
@@ -221,8 +43,7 @@ INTERCEPTOR(int, pthread_create, void *th, void *attr, void *(*callback)(void*),
ThreadStartArg *A = reinterpret_cast<ThreadStartArg *> (MmapOrDie(
GetPageSizeCached(), "pthread_create"));
*A = {callback, param};
- int res = REAL(pthread_create)(UntagPtr(th), UntagPtr(attr),
- &HwasanThreadStartFunc, A);
+ int res = REAL(pthread_create)(th, attr, &HwasanThreadStartFunc, A);
return res;
}
@@ -347,3 +168,5 @@ void InitializeInterceptors() {
inited = 1;
}
} // namespace __hwasan
+
+#endif // #if !SANITIZER_FUCHSIA
diff --git a/compiler-rt/lib/hwasan/hwasan_interceptors_vfork.S b/compiler-rt/lib/hwasan/hwasan_interceptors_vfork.S
index 23d565936d87..fd20825e3dac 100644
--- a/compiler-rt/lib/hwasan/hwasan_interceptors_vfork.S
+++ b/compiler-rt/lib/hwasan/hwasan_interceptors_vfork.S
@@ -1,4 +1,5 @@
#include "sanitizer_common/sanitizer_asm.h"
+#include "builtins/assembly.h"
#if defined(__linux__) && HWASAN_WITH_INTERCEPTORS
#define COMMON_INTERCEPTOR_SPILL_AREA __hwasan_extra_spill_area
@@ -9,3 +10,5 @@
#endif
NO_EXEC_STACK_DIRECTIVE
+
+GNU_PROPERTY_BTI_PAC
diff --git a/compiler-rt/lib/hwasan/hwasan_linux.cpp b/compiler-rt/lib/hwasan/hwasan_linux.cpp
index e99926d355cf..e22723529f44 100644
--- a/compiler-rt/lib/hwasan/hwasan_linux.cpp
+++ b/compiler-rt/lib/hwasan/hwasan_linux.cpp
@@ -69,10 +69,6 @@ static void ProtectGap(uptr addr, uptr size) {
uptr kLowMemStart;
uptr kLowMemEnd;
-uptr kLowShadowEnd;
-uptr kLowShadowStart;
-uptr kHighShadowStart;
-uptr kHighShadowEnd;
uptr kHighMemStart;
uptr kHighMemEnd;
@@ -114,37 +110,59 @@ static void InitializeShadowBaseAddress(uptr shadow_size_bytes) {
FindDynamicShadowStart(shadow_size_bytes);
}
-void InitPrctl() {
+void InitializeOsSupport() {
#define PR_SET_TAGGED_ADDR_CTRL 55
#define PR_GET_TAGGED_ADDR_CTRL 56
#define PR_TAGGED_ADDR_ENABLE (1UL << 0)
// Check we're running on a kernel that can use the tagged address ABI.
- if (internal_prctl(PR_GET_TAGGED_ADDR_CTRL, 0, 0, 0, 0) == (uptr)-1 &&
- errno == EINVAL) {
-#if SANITIZER_ANDROID
+ int local_errno = 0;
+ if (internal_iserror(internal_prctl(PR_GET_TAGGED_ADDR_CTRL, 0, 0, 0, 0),
+ &local_errno) &&
+ local_errno == EINVAL) {
+# if SANITIZER_ANDROID || defined(HWASAN_ALIASING_MODE)
// Some older Android kernels have the tagged pointer ABI on
// unconditionally, and hence don't have the tagged-addr prctl while still
// allow the ABI.
// If targeting Android and the prctl is not around we assume this is the
// case.
return;
-#else
- Printf(
- "FATAL: "
- "HWAddressSanitizer requires a kernel with tagged address ABI.\n");
- Die();
-#endif
+# else
+ if (flags()->fail_without_syscall_abi) {
+ Printf(
+ "FATAL: "
+ "HWAddressSanitizer requires a kernel with tagged address ABI.\n");
+ Die();
+ }
+# endif
}
// Turn on the tagged address ABI.
- if (internal_prctl(PR_SET_TAGGED_ADDR_CTRL, PR_TAGGED_ADDR_ENABLE, 0, 0, 0) ==
- (uptr)-1 ||
- !internal_prctl(PR_GET_TAGGED_ADDR_CTRL, 0, 0, 0, 0)) {
- Printf(
- "FATAL: HWAddressSanitizer failed to enable tagged address syscall "
- "ABI.\nSuggest check `sysctl abi.tagged_addr_disabled` "
- "configuration.\n");
- Die();
+ if ((internal_iserror(internal_prctl(PR_SET_TAGGED_ADDR_CTRL,
+ PR_TAGGED_ADDR_ENABLE, 0, 0, 0)) ||
+ !internal_prctl(PR_GET_TAGGED_ADDR_CTRL, 0, 0, 0, 0))) {
+# if defined(__x86_64__) && !defined(HWASAN_ALIASING_MODE)
+ // Try the new prctl API for Intel LAM. The API is based on a currently
+ // unsubmitted patch to the Linux kernel (as of May 2021) and is thus
+ // subject to change. Patch is here:
+ // https://lore.kernel.org/linux-mm/20210205151631.43511-12-kirill.shutemov@linux.intel.com/
+ int tag_bits = kTagBits;
+ int tag_shift = kAddressTagShift;
+ if (!internal_iserror(
+ internal_prctl(PR_SET_TAGGED_ADDR_CTRL, PR_TAGGED_ADDR_ENABLE,
+ reinterpret_cast<unsigned long>(&tag_bits),
+ reinterpret_cast<unsigned long>(&tag_shift), 0))) {
+ CHECK_EQ(tag_bits, kTagBits);
+ CHECK_EQ(tag_shift, kAddressTagShift);
+ return;
+ }
+# endif // defined(__x86_64__) && !defined(HWASAN_ALIASING_MODE)
+ if (flags()->fail_without_syscall_abi) {
+ Printf(
+ "FATAL: HWAddressSanitizer failed to enable tagged address syscall "
+ "ABI.\nSuggest check `sysctl abi.tagged_addr_disabled` "
+ "configuration.\n");
+ Die();
+ }
}
#undef PR_SET_TAGGED_ADDR_CTRL
#undef PR_GET_TAGGED_ADDR_CTRL
@@ -214,23 +232,16 @@ void InitThreads() {
ProtectGap(thread_space_end,
__hwasan_shadow_memory_dynamic_address - thread_space_end);
InitThreadList(thread_space_start, thread_space_end - thread_space_start);
+ hwasanThreadList().CreateCurrentThread();
}
bool MemIsApp(uptr p) {
+// Memory outside the alias range has non-zero tags.
+# if !defined(HWASAN_ALIASING_MODE)
CHECK(GetTagFromPointer(p) == 0);
- return p >= kHighMemStart || (p >= kLowMemStart && p <= kLowMemEnd);
-}
+# endif
-static void HwasanAtExit(void) {
- if (common_flags()->print_module_map)
- DumpProcessMap();
- if (flags()->print_stats && (flags()->atexit || hwasan_report_count > 0))
- ReportStats();
- if (hwasan_report_count > 0) {
- // ReportAtExitStatistics();
- if (common_flags()->exitcode)
- internal__exit(common_flags()->exitcode);
- }
+ return p >= kHighMemStart || (p >= kLowMemStart && p <= kLowMemEnd);
}
void InstallAtExitHandler() {
@@ -309,22 +320,6 @@ void AndroidTestTlsSlot() {
void AndroidTestTlsSlot() {}
#endif
-Thread *GetCurrentThread() {
- uptr *ThreadLongPtr = GetCurrentThreadLongPtr();
- if (UNLIKELY(*ThreadLongPtr == 0))
- return nullptr;
- auto *R = (StackAllocationsRingBuffer *)ThreadLongPtr;
- return hwasanThreadList().GetThreadByBufferAddress((uptr)R->Next());
-}
-
-struct AccessInfo {
- uptr addr;
- uptr size;
- bool is_store;
- bool is_load;
- bool recover;
-};
-
static AccessInfo GetAccessInfo(siginfo_t *info, ucontext_t *uc) {
// Access type is passed in a platform dependent way (see below) and encoded
// as 0xXY, where X&1 is 1 for store, 0 for load, and X&2 is 1 if the error is
@@ -375,28 +370,6 @@ static AccessInfo GetAccessInfo(siginfo_t *info, ucontext_t *uc) {
return AccessInfo{addr, size, is_store, !is_store, recover};
}
-static void HandleTagMismatch(AccessInfo ai, uptr pc, uptr frame,
- ucontext_t *uc, uptr *registers_frame = nullptr) {
- InternalMmapVector<BufferedStackTrace> stack_buffer(1);
- BufferedStackTrace *stack = stack_buffer.data();
- stack->Reset();
- stack->Unwind(pc, frame, uc, common_flags()->fast_unwind_on_fatal);
-
- // The second stack frame contains the failure __hwasan_check function, as
- // we have a stack frame for the registers saved in __hwasan_tag_mismatch that
- // we wish to ignore. This (currently) only occurs on AArch64, as x64
- // implementations use SIGTRAP to implement the failure, and thus do not go
- // through the stack saver.
- if (registers_frame && stack->trace && stack->size > 0) {
- stack->trace++;
- stack->size--;
- }
-
- bool fatal = flags()->halt_on_error || !ai.recover;
- ReportTagMismatch(stack, ai.addr, ai.size, ai.is_store, fatal,
- registers_frame);
-}
-
static bool HwasanOnSIGTRAP(int signo, siginfo_t *info, ucontext_t *uc) {
AccessInfo ai = GetAccessInfo(info, uc);
if (!ai.is_store && !ai.is_load)
@@ -429,27 +402,39 @@ void HwasanOnDeadlySignal(int signo, void *info, void *context) {
HandleDeadlySignal(info, context, GetTid(), &OnStackUnwind, nullptr);
}
+void Thread::InitStackAndTls(const InitState *) {
+ uptr tls_size;
+ uptr stack_size;
+ GetThreadStackAndTls(IsMainThread(), &stack_bottom_, &stack_size, &tls_begin_,
+ &tls_size);
+ stack_top_ = stack_bottom_ + stack_size;
+ tls_end_ = tls_begin_ + tls_size;
+}
-} // namespace __hwasan
-
-// Entry point for interoperability between __hwasan_tag_mismatch (ASM) and the
-// rest of the mismatch handling code (C++).
-void __hwasan_tag_mismatch4(uptr addr, uptr access_info, uptr *registers_frame,
- size_t outsize) {
- __hwasan::AccessInfo ai;
- ai.is_store = access_info & 0x10;
- ai.is_load = !ai.is_store;
- ai.recover = access_info & 0x20;
- ai.addr = addr;
- if ((access_info & 0xf) == 0xf)
- ai.size = outsize;
- else
- ai.size = 1 << (access_info & 0xf);
-
- __hwasan::HandleTagMismatch(ai, (uptr)__builtin_return_address(0),
- (uptr)__builtin_frame_address(0), nullptr,
- registers_frame);
- __builtin_unreachable();
+uptr TagMemoryAligned(uptr p, uptr size, tag_t tag) {
+ CHECK(IsAligned(p, kShadowAlignment));
+ CHECK(IsAligned(size, kShadowAlignment));
+ uptr shadow_start = MemToShadow(p);
+ uptr shadow_size = MemToShadowSize(size);
+
+ uptr page_size = GetPageSizeCached();
+ uptr page_start = RoundUpTo(shadow_start, page_size);
+ uptr page_end = RoundDownTo(shadow_start + shadow_size, page_size);
+ uptr threshold = common_flags()->clear_shadow_mmap_threshold;
+ if (SANITIZER_LINUX &&
+ UNLIKELY(page_end >= page_start + threshold && tag == 0)) {
+ internal_memset((void *)shadow_start, tag, page_start - shadow_start);
+ internal_memset((void *)page_end, tag,
+ shadow_start + shadow_size - page_end);
+ // For an anonymous private mapping MADV_DONTNEED will return a zero page on
+ // Linux.
+ ReleaseMemoryPagesToOSAndZeroFill(page_start, page_end);
+ } else {
+ internal_memset((void *)shadow_start, tag, shadow_size);
+ }
+ return AddTagToPointer(p, tag);
}
+} // namespace __hwasan
+
#endif // SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD
diff --git a/compiler-rt/lib/hwasan/hwasan_mapping.h b/compiler-rt/lib/hwasan/hwasan_mapping.h
index c149687bdfa6..79a143632f6a 100644
--- a/compiler-rt/lib/hwasan/hwasan_mapping.h
+++ b/compiler-rt/lib/hwasan/hwasan_mapping.h
@@ -48,12 +48,14 @@ extern uptr kHighShadowEnd;
extern uptr kHighMemStart;
extern uptr kHighMemEnd;
+inline uptr GetShadowOffset() {
+ return SANITIZER_FUCHSIA ? 0 : __hwasan_shadow_memory_dynamic_address;
+}
inline uptr MemToShadow(uptr untagged_addr) {
- return (untagged_addr >> kShadowScale) +
- __hwasan_shadow_memory_dynamic_address;
+ return (untagged_addr >> kShadowScale) + GetShadowOffset();
}
inline uptr ShadowToMem(uptr shadow_addr) {
- return (shadow_addr - __hwasan_shadow_memory_dynamic_address) << kShadowScale;
+ return (shadow_addr - GetShadowOffset()) << kShadowScale;
}
inline uptr MemToShadowSize(uptr size) {
return size >> kShadowScale;
@@ -61,6 +63,13 @@ inline uptr MemToShadowSize(uptr size) {
bool MemIsApp(uptr p);
+inline bool MemIsShadow(uptr p) {
+ return (kLowShadowStart <= p && p <= kLowShadowEnd) ||
+ (kHighShadowStart <= p && p <= kHighShadowEnd);
+}
+
+uptr GetAliasRegionStart();
+
} // namespace __hwasan
#endif // HWASAN_MAPPING_H
diff --git a/compiler-rt/lib/hwasan/hwasan_memintrinsics.cpp b/compiler-rt/lib/hwasan/hwasan_memintrinsics.cpp
index e82d77a1bc16..fab017aae60b 100644
--- a/compiler-rt/lib/hwasan/hwasan_memintrinsics.cpp
+++ b/compiler-rt/lib/hwasan/hwasan_memintrinsics.cpp
@@ -24,7 +24,7 @@ using namespace __hwasan;
void *__hwasan_memset(void *block, int c, uptr size) {
CheckAddressSized<ErrorAction::Recover, AccessType::Store>(
reinterpret_cast<uptr>(block), size);
- return memset(UntagPtr(block), c, size);
+ return memset(block, c, size);
}
void *__hwasan_memcpy(void *to, const void *from, uptr size) {
@@ -32,7 +32,7 @@ void *__hwasan_memcpy(void *to, const void *from, uptr size) {
reinterpret_cast<uptr>(to), size);
CheckAddressSized<ErrorAction::Recover, AccessType::Load>(
reinterpret_cast<uptr>(from), size);
- return memcpy(UntagPtr(to), UntagPtr(from), size);
+ return memcpy(to, from, size);
}
void *__hwasan_memmove(void *to, const void *from, uptr size) {
diff --git a/compiler-rt/lib/hwasan/hwasan_new_delete.cpp b/compiler-rt/lib/hwasan/hwasan_new_delete.cpp
index 8d01d3944f2b..4e057a651e1d 100644
--- a/compiler-rt/lib/hwasan/hwasan_new_delete.cpp
+++ b/compiler-rt/lib/hwasan/hwasan_new_delete.cpp
@@ -27,6 +27,12 @@
void *res = hwasan_malloc(size, &stack);\
if (!nothrow && UNLIKELY(!res)) ReportOutOfMemory(size, &stack);\
return res
+#define OPERATOR_NEW_ALIGN_BODY(nothrow) \
+ GET_MALLOC_STACK_TRACE; \
+ void *res = hwasan_aligned_alloc(static_cast<uptr>(align), size, &stack); \
+ if (!nothrow && UNLIKELY(!res)) \
+ ReportOutOfMemory(size, &stack); \
+ return res
#define OPERATOR_DELETE_BODY \
GET_MALLOC_STACK_TRACE; \
@@ -67,15 +73,63 @@ void *operator new[](size_t size, std::nothrow_t const&) {
OPERATOR_NEW_BODY(true /*nothrow*/);
}
-INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
-void operator delete(void *ptr) NOEXCEPT { OPERATOR_DELETE_BODY; }
-INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
-void operator delete[](void *ptr) NOEXCEPT { OPERATOR_DELETE_BODY; }
-INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
-void operator delete(void *ptr, std::nothrow_t const&) { OPERATOR_DELETE_BODY; }
-INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
-void operator delete[](void *ptr, std::nothrow_t const&) {
+INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void operator delete(void *ptr)
+ NOEXCEPT {
+ OPERATOR_DELETE_BODY;
+}
+INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void operator delete[](
+ void *ptr) NOEXCEPT {
+ OPERATOR_DELETE_BODY;
+}
+INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void operator delete(
+ void *ptr, std::nothrow_t const &) {
+ OPERATOR_DELETE_BODY;
+}
+INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void operator delete[](
+ void *ptr, std::nothrow_t const &) {
+ OPERATOR_DELETE_BODY;
+}
+
+#endif // OPERATOR_NEW_BODY
+
+#ifdef OPERATOR_NEW_ALIGN_BODY
+
+namespace std {
+enum class align_val_t : size_t {};
+} // namespace std
+
+INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void *operator new(
+ size_t size, std::align_val_t align) {
+ OPERATOR_NEW_ALIGN_BODY(false /*nothrow*/);
+}
+INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void *operator new[](
+ size_t size, std::align_val_t align) {
+ OPERATOR_NEW_ALIGN_BODY(false /*nothrow*/);
+}
+INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void *operator new(
+ size_t size, std::align_val_t align, std::nothrow_t const &) {
+ OPERATOR_NEW_ALIGN_BODY(true /*nothrow*/);
+}
+INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void *operator new[](
+ size_t size, std::align_val_t align, std::nothrow_t const &) {
+ OPERATOR_NEW_ALIGN_BODY(true /*nothrow*/);
+}
+
+INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void operator delete(
+ void *ptr, std::align_val_t align) NOEXCEPT {
+ OPERATOR_DELETE_BODY;
+}
+INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void operator delete[](
+ void *ptr, std::align_val_t) NOEXCEPT {
+ OPERATOR_DELETE_BODY;
+}
+INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void operator delete(
+ void *ptr, std::align_val_t, std::nothrow_t const &) NOEXCEPT {
+ OPERATOR_DELETE_BODY;
+}
+INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void operator delete[](
+ void *ptr, std::align_val_t, std::nothrow_t const &) NOEXCEPT {
OPERATOR_DELETE_BODY;
}
-#endif // OPERATOR_NEW_BODY
+#endif // OPERATOR_NEW_ALIGN_BODY
diff --git a/compiler-rt/lib/hwasan/hwasan_poisoning.cpp b/compiler-rt/lib/hwasan/hwasan_poisoning.cpp
index 2a0816428e75..5aafdb1884b5 100644
--- a/compiler-rt/lib/hwasan/hwasan_poisoning.cpp
+++ b/compiler-rt/lib/hwasan/hwasan_poisoning.cpp
@@ -19,30 +19,6 @@
namespace __hwasan {
-uptr TagMemoryAligned(uptr p, uptr size, tag_t tag) {
- CHECK(IsAligned(p, kShadowAlignment));
- CHECK(IsAligned(size, kShadowAlignment));
- uptr shadow_start = MemToShadow(p);
- uptr shadow_size = MemToShadowSize(size);
-
- uptr page_size = GetPageSizeCached();
- uptr page_start = RoundUpTo(shadow_start, page_size);
- uptr page_end = RoundDownTo(shadow_start + shadow_size, page_size);
- uptr threshold = common_flags()->clear_shadow_mmap_threshold;
- if (SANITIZER_LINUX &&
- UNLIKELY(page_end >= page_start + threshold && tag == 0)) {
- internal_memset((void *)shadow_start, tag, page_start - shadow_start);
- internal_memset((void *)page_end, tag,
- shadow_start + shadow_size - page_end);
- // For an anonymous private mapping MADV_DONTNEED will return a zero page on
- // Linux.
- ReleaseMemoryPagesToOSAndZeroFill(page_start, page_end);
- } else {
- internal_memset((void *)shadow_start, tag, shadow_size);
- }
- return AddTagToPointer(p, tag);
-}
-
uptr TagMemory(uptr p, uptr size, tag_t tag) {
uptr start = RoundDownTo(p, kShadowAlignment);
uptr end = RoundUpTo(p + size, kShadowAlignment);
diff --git a/compiler-rt/lib/hwasan/hwasan_report.cpp b/compiler-rt/lib/hwasan/hwasan_report.cpp
index 4448d9243767..44047c9fdaf8 100644
--- a/compiler-rt/lib/hwasan/hwasan_report.cpp
+++ b/compiler-rt/lib/hwasan/hwasan_report.cpp
@@ -224,7 +224,7 @@ static void PrintStackAllocations(StackAllocationsRingBuffer *sa,
// We didn't find any locals. Most likely we don't have symbols, so dump
// the information that we have for offline analysis.
- InternalScopedString frame_desc(GetPageSizeCached() * 2);
+ InternalScopedString frame_desc;
Printf("Previously allocated frames:\n");
for (uptr i = 0; i < frames; i++) {
const uptr *record_addr = &(*sa)[i];
@@ -236,12 +236,12 @@ static void PrintStackAllocations(StackAllocationsRingBuffer *sa,
frame_desc.append(" record_addr:0x%zx record:0x%zx",
reinterpret_cast<uptr>(record_addr), record);
if (SymbolizedStack *frame = Symbolizer::GetOrInit()->SymbolizePC(pc)) {
- RenderFrame(&frame_desc, " %F %L\n", 0, frame->info.address, &frame->info,
+ RenderFrame(&frame_desc, " %F %L", 0, frame->info.address, &frame->info,
common_flags()->symbolize_vs_style,
common_flags()->strip_path_prefix);
frame->ClearAll();
}
- Printf("%s", frame_desc.data());
+ Printf("%s\n", frame_desc.data());
frame_desc.clear();
}
}
@@ -296,6 +296,75 @@ static uptr GetGlobalSizeFromDescriptor(uptr ptr) {
return 0;
}
+static void ShowHeapOrGlobalCandidate(uptr untagged_addr, tag_t *candidate,
+ tag_t *left, tag_t *right) {
+ Decorator d;
+ uptr mem = ShadowToMem(reinterpret_cast<uptr>(candidate));
+ HwasanChunkView chunk = FindHeapChunkByAddress(mem);
+ if (chunk.IsAllocated()) {
+ uptr offset;
+ const char *whence;
+ if (untagged_addr < chunk.End() && untagged_addr >= chunk.Beg()) {
+ offset = untagged_addr - chunk.Beg();
+ whence = "inside";
+ } else if (candidate == left) {
+ offset = untagged_addr - chunk.End();
+ whence = "to the right of";
+ } else {
+ offset = chunk.Beg() - untagged_addr;
+ whence = "to the left of";
+ }
+ Printf("%s", d.Error());
+ Printf("\nCause: heap-buffer-overflow\n");
+ Printf("%s", d.Default());
+ Printf("%s", d.Location());
+ Printf("%p is located %zd bytes %s %zd-byte region [%p,%p)\n",
+ untagged_addr, offset, whence, chunk.UsedSize(), chunk.Beg(),
+ chunk.End());
+ Printf("%s", d.Allocation());
+ Printf("allocated here:\n");
+ Printf("%s", d.Default());
+ GetStackTraceFromId(chunk.GetAllocStackId()).Print();
+ return;
+ }
+ // Check whether the address points into a loaded library. If so, this is
+ // most likely a global variable.
+ const char *module_name;
+ uptr module_address;
+ Symbolizer *sym = Symbolizer::GetOrInit();
+ if (sym->GetModuleNameAndOffsetForPC(mem, &module_name, &module_address)) {
+ Printf("%s", d.Error());
+ Printf("\nCause: global-overflow\n");
+ Printf("%s", d.Default());
+ DataInfo info;
+ Printf("%s", d.Location());
+ if (sym->SymbolizeData(mem, &info) && info.start) {
+ Printf(
+ "%p is located %zd bytes to the %s of %zd-byte global variable "
+ "%s [%p,%p) in %s\n",
+ untagged_addr,
+ candidate == left ? untagged_addr - (info.start + info.size)
+ : info.start - untagged_addr,
+ candidate == left ? "right" : "left", info.size, info.name,
+ info.start, info.start + info.size, module_name);
+ } else {
+ uptr size = GetGlobalSizeFromDescriptor(mem);
+ if (size == 0)
+ // We couldn't find the size of the global from the descriptors.
+ Printf("%p is located to the %s of a global variable in (%s+0x%x)\n",
+ untagged_addr, candidate == left ? "right" : "left", module_name,
+ module_address);
+ else
+ Printf(
+ "%p is located to the %s of a %zd-byte global variable in "
+ "(%s+0x%x)\n",
+ untagged_addr, candidate == left ? "right" : "left", size,
+ module_name, module_address);
+ }
+ Printf("%s", d.Default());
+ }
+}
+
void PrintAddressDescription(
uptr tagged_addr, uptr access_size,
StackAllocationsRingBuffer *current_stack_allocations) {
@@ -317,78 +386,59 @@ void PrintAddressDescription(
d.Default());
}
+ tag_t addr_tag = GetTagFromPointer(tagged_addr);
+
+ bool on_stack = false;
+ // Check stack first. If the address is on the stack of a live thread, we
+ // know it cannot be a heap / global overflow.
+ hwasanThreadList().VisitAllLiveThreads([&](Thread *t) {
+ if (t->AddrIsInStack(untagged_addr)) {
+ on_stack = true;
+ // TODO(fmayer): figure out how to distinguish use-after-return and
+ // stack-buffer-overflow.
+ Printf("%s", d.Error());
+ Printf("\nCause: stack tag-mismatch\n");
+ Printf("%s", d.Location());
+ Printf("Address %p is located in stack of thread T%zd\n", untagged_addr,
+ t->unique_id());
+ Printf("%s", d.Default());
+ t->Announce();
+
+ auto *sa = (t == GetCurrentThread() && current_stack_allocations)
+ ? current_stack_allocations
+ : t->stack_allocations();
+ PrintStackAllocations(sa, addr_tag, untagged_addr);
+ num_descriptions_printed++;
+ }
+ });
+
// Check if this looks like a heap buffer overflow by scanning
// the shadow left and right and looking for the first adjacent
// object with a different memory tag. If that tag matches addr_tag,
// check the allocator if it has a live chunk there.
- tag_t addr_tag = GetTagFromPointer(tagged_addr);
tag_t *tag_ptr = reinterpret_cast<tag_t*>(MemToShadow(untagged_addr));
tag_t *candidate = nullptr, *left = tag_ptr, *right = tag_ptr;
- for (int i = 0; i < 1000; i++) {
- if (TagsEqual(addr_tag, left)) {
+ uptr candidate_distance = 0;
+ for (; candidate_distance < 1000; candidate_distance++) {
+ if (MemIsShadow(reinterpret_cast<uptr>(left)) &&
+ TagsEqual(addr_tag, left)) {
candidate = left;
break;
}
--left;
- if (TagsEqual(addr_tag, right)) {
+ if (MemIsShadow(reinterpret_cast<uptr>(right)) &&
+ TagsEqual(addr_tag, right)) {
candidate = right;
break;
}
++right;
}
- if (candidate) {
- uptr mem = ShadowToMem(reinterpret_cast<uptr>(candidate));
- HwasanChunkView chunk = FindHeapChunkByAddress(mem);
- if (chunk.IsAllocated()) {
- Printf("%s", d.Location());
- Printf("%p is located %zd bytes to the %s of %zd-byte region [%p,%p)\n",
- untagged_addr,
- candidate == left ? untagged_addr - chunk.End()
- : chunk.Beg() - untagged_addr,
- candidate == left ? "right" : "left", chunk.UsedSize(),
- chunk.Beg(), chunk.End());
- Printf("%s", d.Allocation());
- Printf("allocated here:\n");
- Printf("%s", d.Default());
- GetStackTraceFromId(chunk.GetAllocStackId()).Print();
- num_descriptions_printed++;
- } else {
- // Check whether the address points into a loaded library. If so, this is
- // most likely a global variable.
- const char *module_name;
- uptr module_address;
- Symbolizer *sym = Symbolizer::GetOrInit();
- if (sym->GetModuleNameAndOffsetForPC(mem, &module_name,
- &module_address)) {
- DataInfo info;
- if (sym->SymbolizeData(mem, &info) && info.start) {
- Printf(
- "%p is located %zd bytes to the %s of %zd-byte global variable "
- "%s [%p,%p) in %s\n",
- untagged_addr,
- candidate == left ? untagged_addr - (info.start + info.size)
- : info.start - untagged_addr,
- candidate == left ? "right" : "left", info.size, info.name,
- info.start, info.start + info.size, module_name);
- } else {
- uptr size = GetGlobalSizeFromDescriptor(mem);
- if (size == 0)
- // We couldn't find the size of the global from the descriptors.
- Printf(
- "%p is located to the %s of a global variable in (%s+0x%x)\n",
- untagged_addr, candidate == left ? "right" : "left",
- module_name, module_address);
- else
- Printf(
- "%p is located to the %s of a %zd-byte global variable in "
- "(%s+0x%x)\n",
- untagged_addr, candidate == left ? "right" : "left", size,
- module_name, module_address);
- }
- num_descriptions_printed++;
- }
- }
+ constexpr auto kCloseCandidateDistance = 1;
+
+ if (!on_stack && candidate && candidate_distance <= kCloseCandidateDistance) {
+ ShowHeapOrGlobalCandidate(untagged_addr, candidate, left, right);
+ num_descriptions_printed++;
}
hwasanThreadList().VisitAllLiveThreads([&](Thread *t) {
@@ -398,6 +448,8 @@ void PrintAddressDescription(
if (FindHeapAllocation(t->heap_allocations(), tagged_addr, &har,
&ring_index, &num_matching_addrs,
&num_matching_addrs_4b)) {
+ Printf("%s", d.Error());
+ Printf("\nCause: use-after-free\n");
Printf("%s", d.Location());
Printf("%p is located %zd bytes inside of %zd-byte region [%p,%p)\n",
untagged_addr, untagged_addr - UntagAddr(har.tagged_addr),
@@ -424,29 +476,25 @@ void PrintAddressDescription(
t->Announce();
num_descriptions_printed++;
}
-
- // Very basic check for stack memory.
- if (t->AddrIsInStack(untagged_addr)) {
- Printf("%s", d.Location());
- Printf("Address %p is located in stack of thread T%zd\n", untagged_addr,
- t->unique_id());
- Printf("%s", d.Default());
- t->Announce();
-
- auto *sa = (t == GetCurrentThread() && current_stack_allocations)
- ? current_stack_allocations
- : t->stack_allocations();
- PrintStackAllocations(sa, addr_tag, untagged_addr);
- num_descriptions_printed++;
- }
});
+ if (candidate && num_descriptions_printed == 0) {
+ ShowHeapOrGlobalCandidate(untagged_addr, candidate, left, right);
+ num_descriptions_printed++;
+ }
+
// Print the remaining threads, as an extra information, 1 line per thread.
hwasanThreadList().VisitAllLiveThreads([&](Thread *t) { t->Announce(); });
if (!num_descriptions_printed)
// We exhausted our possibilities. Bail out.
Printf("HWAddressSanitizer can not describe address in more detail.\n");
+ if (num_descriptions_printed > 1) {
+ Printf(
+ "There are %d potential causes, printed above in order "
+ "of likeliness.\n",
+ num_descriptions_printed);
+ }
}
void ReportStats() {}
@@ -459,7 +507,7 @@ static void PrintTagInfoAroundAddr(tag_t *tag_ptr, uptr num_rows,
RoundDownTo(reinterpret_cast<uptr>(tag_ptr), row_len));
tag_t *beg_row = center_row_beg - row_len * (num_rows / 2);
tag_t *end_row = center_row_beg + row_len * ((num_rows + 1) / 2);
- InternalScopedString s(GetPageSizeCached() * 8);
+ InternalScopedString s;
for (tag_t *row = beg_row; row < end_row; row += row_len) {
s.append("%s", row == center_row_beg ? "=>" : " ");
s.append("%p:", row);
@@ -538,6 +586,12 @@ void ReportTailOverwritten(StackTrace *stack, uptr tagged_addr, uptr orig_size,
Report("ERROR: %s: %s; heap object [%p,%p) of size %zd\n", SanitizerToolName,
bug_type, untagged_addr, untagged_addr + orig_size, orig_size);
Printf("\n%s", d.Default());
+ Printf(
+ "Stack of invalid access unknown. Issue detected at deallocation "
+ "time.\n");
+ Printf("%s", d.Allocation());
+ Printf("deallocated here:\n");
+ Printf("%s", d.Default());
stack->Print();
HwasanChunkView chunk = FindHeapChunkByAddress(untagged_addr);
if (chunk.Beg()) {
@@ -547,7 +601,7 @@ void ReportTailOverwritten(StackTrace *stack, uptr tagged_addr, uptr orig_size,
GetStackTraceFromId(chunk.GetAllocStackId()).Print();
}
- InternalScopedString s(GetPageSizeCached() * 8);
+ InternalScopedString s;
CHECK_GT(tail_size, 0U);
CHECK_LT(tail_size, kShadowAlignment);
u8 *tail = reinterpret_cast<u8*>(untagged_addr + orig_size);
@@ -657,8 +711,10 @@ void ReportRegisters(uptr *frame, uptr pc) {
frame[20], frame[21], frame[22], frame[23]);
Printf(" x24 %016llx x25 %016llx x26 %016llx x27 %016llx\n",
frame[24], frame[25], frame[26], frame[27]);
- Printf(" x28 %016llx x29 %016llx x30 %016llx\n",
- frame[28], frame[29], frame[30]);
+ // hwasan_check* reduces the stack pointer by 256, then __hwasan_tag_mismatch
+ // passes it to this function.
+ Printf(" x28 %016llx x29 %016llx x30 %016llx sp %016llx\n", frame[28],
+ frame[29], frame[30], reinterpret_cast<u8 *>(frame) + 256);
}
} // namespace __hwasan
diff --git a/compiler-rt/lib/hwasan/hwasan_setjmp.S b/compiler-rt/lib/hwasan/hwasan_setjmp.S
index 0c1354331940..381af63363cc 100644
--- a/compiler-rt/lib/hwasan/hwasan_setjmp.S
+++ b/compiler-rt/lib/hwasan/hwasan_setjmp.S
@@ -12,6 +12,7 @@
//===----------------------------------------------------------------------===//
#include "sanitizer_common/sanitizer_asm.h"
+#include "builtins/assembly.h"
#if HWASAN_WITH_INTERCEPTORS && defined(__aarch64__)
#include "sanitizer_common/sanitizer_platform.h"
@@ -34,6 +35,7 @@
ASM_TYPE_FUNCTION(__interceptor_setjmp)
__interceptor_setjmp:
CFI_STARTPROC
+ BTI_C
mov x1, #0
b __interceptor_sigsetjmp
CFI_ENDPROC
@@ -46,6 +48,7 @@ ASM_SIZE(__interceptor_setjmp)
ASM_TYPE_FUNCTION(__interceptor_setjmp_bionic)
__interceptor_setjmp_bionic:
CFI_STARTPROC
+ BTI_C
mov x1, #1
b __interceptor_sigsetjmp
CFI_ENDPROC
@@ -56,6 +59,7 @@ ASM_SIZE(__interceptor_setjmp_bionic)
ASM_TYPE_FUNCTION(__interceptor_sigsetjmp)
__interceptor_sigsetjmp:
CFI_STARTPROC
+ BTI_C
stp x19, x20, [x0, #0<<3]
stp x21, x22, [x0, #2<<3]
stp x23, x24, [x0, #4<<3]
@@ -98,3 +102,5 @@ ALIAS __interceptor_setjmp, _setjmp
// We do not need executable stack.
NO_EXEC_STACK_DIRECTIVE
+
+GNU_PROPERTY_BTI_PAC
diff --git a/compiler-rt/lib/hwasan/hwasan_tag_mismatch_aarch64.S b/compiler-rt/lib/hwasan/hwasan_tag_mismatch_aarch64.S
index 08df12736bb4..bcb0df420190 100644
--- a/compiler-rt/lib/hwasan/hwasan_tag_mismatch_aarch64.S
+++ b/compiler-rt/lib/hwasan/hwasan_tag_mismatch_aarch64.S
@@ -1,4 +1,5 @@
#include "sanitizer_common/sanitizer_asm.h"
+#include "builtins/assembly.h"
// The content of this file is AArch64-only:
#if defined(__aarch64__)
@@ -74,6 +75,8 @@
.global __hwasan_tag_mismatch
.type __hwasan_tag_mismatch, %function
__hwasan_tag_mismatch:
+ BTI_J
+
// Compute the granule position one past the end of the access.
mov x16, #1
and x17, x1, #0xf
@@ -106,6 +109,7 @@ __hwasan_tag_mismatch:
.type __hwasan_tag_mismatch_v2, %function
__hwasan_tag_mismatch_v2:
CFI_STARTPROC
+ BTI_J
// Set the CFA to be the return address for caller of __hwasan_check_*. Note
// that we do not emit CFI predicates to describe the contents of this stack
@@ -150,3 +154,5 @@ __hwasan_tag_mismatch_v2:
// We do not need executable stack.
NO_EXEC_STACK_DIRECTIVE
+
+GNU_PROPERTY_BTI_PAC
diff --git a/compiler-rt/lib/hwasan/hwasan_thread.cpp b/compiler-rt/lib/hwasan/hwasan_thread.cpp
index b81a6350c05c..ee747a3beea5 100644
--- a/compiler-rt/lib/hwasan/hwasan_thread.cpp
+++ b/compiler-rt/lib/hwasan/hwasan_thread.cpp
@@ -34,12 +34,28 @@ void Thread::InitRandomState() {
stack_allocations_->push(0);
}
-void Thread::Init(uptr stack_buffer_start, uptr stack_buffer_size) {
+void Thread::Init(uptr stack_buffer_start, uptr stack_buffer_size,
+ const InitState *state) {
+ CHECK_EQ(0, unique_id_); // try to catch bad stack reuse
+ CHECK_EQ(0, stack_top_);
+ CHECK_EQ(0, stack_bottom_);
+
static u64 unique_id;
unique_id_ = unique_id++;
if (auto sz = flags()->heap_history_size)
heap_allocations_ = HeapAllocationsRingBuffer::New(sz);
+ InitStackAndTls(state);
+#if !SANITIZER_FUCHSIA
+ // Do not initialize the stack ring buffer just yet on Fuchsia. Threads will
+ // be initialized before we enter the thread itself, so we will instead call
+ // this later.
+ InitStackRingBuffer(stack_buffer_start, stack_buffer_size);
+#endif
+}
+
+void Thread::InitStackRingBuffer(uptr stack_buffer_start,
+ uptr stack_buffer_size) {
HwasanTSDThreadInit(); // Only needed with interceptors.
uptr *ThreadLong = GetCurrentThreadLongPtr();
// The following implicitly sets (this) as the current thread.
@@ -51,13 +67,6 @@ void Thread::Init(uptr stack_buffer_start, uptr stack_buffer_size) {
// ScopedTaggingDisable needs GetCurrentThread to be set up.
ScopedTaggingDisabler disabler;
- uptr tls_size;
- uptr stack_size;
- GetThreadStackAndTls(IsMainThread(), &stack_bottom_, &stack_size, &tls_begin_,
- &tls_size);
- stack_top_ = stack_bottom_ + stack_size;
- tls_end_ = tls_begin_ + tls_size;
-
if (stack_bottom_) {
int local;
CHECK(AddrIsInStack((uptr)&local));
@@ -113,18 +122,21 @@ static u32 xorshift(u32 state) {
}
// Generate a (pseudo-)random non-zero tag.
-tag_t Thread::GenerateRandomTag() {
+tag_t Thread::GenerateRandomTag(uptr num_bits) {
+ DCHECK_GT(num_bits, 0);
if (tagging_disabled_) return 0;
tag_t tag;
+ const uptr tag_mask = (1ULL << num_bits) - 1;
do {
if (flags()->random_tags) {
if (!random_buffer_)
random_buffer_ = random_state_ = xorshift(random_state_);
CHECK(random_buffer_);
- tag = random_buffer_ & 0xFF;
- random_buffer_ >>= 8;
+ tag = random_buffer_ & tag_mask;
+ random_buffer_ >>= num_bits;
} else {
- tag = random_state_ = (random_state_ + 1) & 0xFF;
+ random_state_ += 1;
+ tag = random_state_ & tag_mask;
}
} while (!tag);
return tag;
diff --git a/compiler-rt/lib/hwasan/hwasan_thread.h b/compiler-rt/lib/hwasan/hwasan_thread.h
index 88958daf767c..9f20afe1dc76 100644
--- a/compiler-rt/lib/hwasan/hwasan_thread.h
+++ b/compiler-rt/lib/hwasan/hwasan_thread.h
@@ -23,8 +23,17 @@ typedef __sanitizer::CompactRingBuffer<uptr> StackAllocationsRingBuffer;
class Thread {
public:
- void Init(uptr stack_buffer_start, uptr stack_buffer_size); // Must be called from the thread itself.
+ // These are optional parameters that can be passed to Init.
+ struct InitState;
+
+ void Init(uptr stack_buffer_start, uptr stack_buffer_size,
+ const InitState *state = nullptr);
void InitRandomState();
+ void InitStackAndTls(const InitState *state = nullptr);
+
+ // Must be called from the thread itself.
+ void InitStackRingBuffer(uptr stack_buffer_start, uptr stack_buffer_size);
+
void Destroy();
uptr stack_top() { return stack_top_; }
@@ -42,7 +51,7 @@ class Thread {
HeapAllocationsRingBuffer *heap_allocations() { return heap_allocations_; }
StackAllocationsRingBuffer *stack_allocations() { return stack_allocations_; }
- tag_t GenerateRandomTag();
+ tag_t GenerateRandomTag(uptr num_bits = kTagBits);
void DisableTagging() { tagging_disabled_++; }
void EnableTagging() { tagging_disabled_--; }
diff --git a/compiler-rt/lib/hwasan/hwasan_thread_list.cpp b/compiler-rt/lib/hwasan/hwasan_thread_list.cpp
index a31eee84ed93..fa46e658b69d 100644
--- a/compiler-rt/lib/hwasan/hwasan_thread_list.cpp
+++ b/compiler-rt/lib/hwasan/hwasan_thread_list.cpp
@@ -12,4 +12,4 @@ void InitThreadList(uptr storage, uptr size) {
new (thread_list_placeholder) HwasanThreadList(storage, size);
}
-} // namespace
+} // namespace __hwasan
diff --git a/compiler-rt/lib/hwasan/hwasan_thread_list.h b/compiler-rt/lib/hwasan/hwasan_thread_list.h
index e596bde36662..15916a802d6e 100644
--- a/compiler-rt/lib/hwasan/hwasan_thread_list.h
+++ b/compiler-rt/lib/hwasan/hwasan_thread_list.h
@@ -85,21 +85,26 @@ class HwasanThreadList {
RoundUpTo(ring_buffer_size_ + sizeof(Thread), ring_buffer_size_ * 2);
}
- Thread *CreateCurrentThread() {
- Thread *t;
+ Thread *CreateCurrentThread(const Thread::InitState *state = nullptr) {
+ Thread *t = nullptr;
{
- SpinMutexLock l(&list_mutex_);
+ SpinMutexLock l(&free_list_mutex_);
if (!free_list_.empty()) {
t = free_list_.back();
free_list_.pop_back();
- uptr start = (uptr)t - ring_buffer_size_;
- internal_memset((void *)start, 0, ring_buffer_size_ + sizeof(Thread));
- } else {
- t = AllocThread();
}
+ }
+ if (t) {
+ uptr start = (uptr)t - ring_buffer_size_;
+ internal_memset((void *)start, 0, ring_buffer_size_ + sizeof(Thread));
+ } else {
+ t = AllocThread();
+ }
+ {
+ SpinMutexLock l(&live_list_mutex_);
live_list_.push_back(t);
}
- t->Init((uptr)t - ring_buffer_size_, ring_buffer_size_);
+ t->Init((uptr)t - ring_buffer_size_, ring_buffer_size_, state);
AddThreadStats(t);
return t;
}
@@ -110,6 +115,7 @@ class HwasanThreadList {
}
void RemoveThreadFromLiveList(Thread *t) {
+ SpinMutexLock l(&live_list_mutex_);
for (Thread *&t2 : live_list_)
if (t2 == t) {
// To remove t2, copy the last element of the list in t2's position, and
@@ -124,10 +130,10 @@ class HwasanThreadList {
void ReleaseThread(Thread *t) {
RemoveThreadStats(t);
t->Destroy();
- SpinMutexLock l(&list_mutex_);
+ DontNeedThread(t);
RemoveThreadFromLiveList(t);
+ SpinMutexLock l(&free_list_mutex_);
free_list_.push_back(t);
- DontNeedThread(t);
}
Thread *GetThreadByBufferAddress(uptr p) {
@@ -144,7 +150,7 @@ class HwasanThreadList {
template <class CB>
void VisitAllLiveThreads(CB cb) {
- SpinMutexLock l(&list_mutex_);
+ SpinMutexLock l(&live_list_mutex_);
for (Thread *t : live_list_) cb(t);
}
@@ -165,8 +171,11 @@ class HwasanThreadList {
return stats_;
}
+ uptr GetRingBufferSize() const { return ring_buffer_size_; }
+
private:
Thread *AllocThread() {
+ SpinMutexLock l(&free_space_mutex_);
uptr align = ring_buffer_size_ * 2;
CHECK(IsAligned(free_space_, align));
Thread *t = (Thread *)(free_space_ + ring_buffer_size_);
@@ -175,14 +184,16 @@ class HwasanThreadList {
return t;
}
+ SpinMutex free_space_mutex_;
uptr free_space_;
uptr free_space_end_;
uptr ring_buffer_size_;
uptr thread_alloc_size_;
+ SpinMutex free_list_mutex_;
InternalMmapVector<Thread *> free_list_;
+ SpinMutex live_list_mutex_;
InternalMmapVector<Thread *> live_list_;
- SpinMutex list_mutex_;
ThreadStats stats_;
SpinMutex stats_mutex_;
@@ -191,4 +202,4 @@ class HwasanThreadList {
void InitThreadList(uptr storage, uptr size);
HwasanThreadList &hwasanThreadList();
-} // namespace
+} // namespace __hwasan
diff --git a/compiler-rt/lib/interception/interception.h b/compiler-rt/lib/interception/interception.h
index cb0b5284ed26..d8dc092c45f5 100644
--- a/compiler-rt/lib/interception/interception.h
+++ b/compiler-rt/lib/interception/interception.h
@@ -16,10 +16,10 @@
#include "sanitizer_common/sanitizer_internal_defs.h"
-#if !SANITIZER_LINUX && !SANITIZER_FREEBSD && !SANITIZER_MAC && \
- !SANITIZER_NETBSD && !SANITIZER_WINDOWS && \
- !SANITIZER_FUCHSIA && !SANITIZER_RTEMS && !SANITIZER_SOLARIS
-# error "Interception doesn't work on this operating system."
+#if !SANITIZER_LINUX && !SANITIZER_FREEBSD && !SANITIZER_MAC && \
+ !SANITIZER_NETBSD && !SANITIZER_WINDOWS && !SANITIZER_FUCHSIA && \
+ !SANITIZER_SOLARIS
+# error "Interception doesn't work on this operating system."
#endif
// These typedefs should be used only in the interceptor definitions to replace
@@ -130,11 +130,6 @@ const interpose_substitution substitution_##func_name[] \
extern "C" ret_type func(__VA_ARGS__);
# define DECLARE_WRAPPER_WINAPI(ret_type, func, ...) \
extern "C" __declspec(dllimport) ret_type __stdcall func(__VA_ARGS__);
-#elif SANITIZER_RTEMS
-# define WRAP(x) x
-# define WRAPPER_NAME(x) #x
-# define INTERCEPTOR_ATTRIBUTE
-# define DECLARE_WRAPPER(ret_type, func, ...)
#elif SANITIZER_FREEBSD || SANITIZER_NETBSD
# define WRAP(x) __interceptor_ ## x
# define WRAPPER_NAME(x) "__interceptor_" #x
@@ -162,10 +157,6 @@ const interpose_substitution substitution_##func_name[] \
# define INTERCEPTOR_ATTRIBUTE __attribute__((visibility("default")))
# define REAL(x) __unsanitized_##x
# define DECLARE_REAL(ret_type, func, ...)
-#elif SANITIZER_RTEMS
-# define REAL(x) __real_ ## x
-# define DECLARE_REAL(ret_type, func, ...) \
- extern "C" ret_type REAL(func)(__VA_ARGS__);
#elif !SANITIZER_MAC
# define PTR_TO_REAL(x) real_##x
# define REAL(x) __interception::PTR_TO_REAL(x)
@@ -184,10 +175,10 @@ const interpose_substitution substitution_##func_name[] \
# define ASSIGN_REAL(x, y)
#endif // SANITIZER_MAC
-#if !SANITIZER_FUCHSIA && !SANITIZER_RTEMS
-# define DECLARE_REAL_AND_INTERCEPTOR(ret_type, func, ...) \
- DECLARE_REAL(ret_type, func, __VA_ARGS__) \
- extern "C" ret_type WRAP(func)(__VA_ARGS__);
+#if !SANITIZER_FUCHSIA
+# define DECLARE_REAL_AND_INTERCEPTOR(ret_type, func, ...) \
+ DECLARE_REAL(ret_type, func, __VA_ARGS__) \
+ extern "C" ret_type WRAP(func)(__VA_ARGS__);
// Declare an interceptor and its wrapper defined in a different translation
// unit (ex. asm).
# define DECLARE_EXTERN_INTERCEPTOR_AND_WRAPPER(ret_type, func, ...) \
@@ -202,11 +193,11 @@ const interpose_substitution substitution_##func_name[] \
// macros does its job. In exceptional cases you may need to call REAL(foo)
// without defining INTERCEPTOR(..., foo, ...). For example, if you override
// foo with an interceptor for other function.
-#if !SANITIZER_MAC && !SANITIZER_FUCHSIA && !SANITIZER_RTEMS
-# define DEFINE_REAL(ret_type, func, ...) \
+#if !SANITIZER_MAC && !SANITIZER_FUCHSIA
+# define DEFINE_REAL(ret_type, func, ...) \
typedef ret_type (*FUNC_TYPE(func))(__VA_ARGS__); \
- namespace __interception { \
- FUNC_TYPE(func) PTR_TO_REAL(func); \
+ namespace __interception { \
+ FUNC_TYPE(func) PTR_TO_REAL(func); \
}
#else
# define DEFINE_REAL(ret_type, func, ...)
diff --git a/compiler-rt/lib/lsan/lsan.cpp b/compiler-rt/lib/lsan/lsan.cpp
index 2c0a3bf0787c..b6adc248157b 100644
--- a/compiler-rt/lib/lsan/lsan.cpp
+++ b/compiler-rt/lib/lsan/lsan.cpp
@@ -35,18 +35,14 @@ void __sanitizer::BufferedStackTrace::UnwindImpl(
uptr pc, uptr bp, void *context, bool request_fast, u32 max_depth) {
using namespace __lsan;
uptr stack_top = 0, stack_bottom = 0;
- ThreadContext *t;
- if (StackTrace::WillUseFastUnwind(request_fast) &&
- (t = CurrentThreadContext())) {
+ if (ThreadContext *t = CurrentThreadContext()) {
stack_top = t->stack_end();
stack_bottom = t->stack_begin();
}
- if (!SANITIZER_MIPS || IsValidFrame(bp, stack_top, stack_bottom)) {
- if (StackTrace::WillUseFastUnwind(request_fast))
- Unwind(max_depth, pc, bp, nullptr, stack_top, stack_bottom, true);
- else
- Unwind(max_depth, pc, 0, context, 0, 0, false);
- }
+ if (SANITIZER_MIPS && !IsValidFrame(bp, stack_top, stack_bottom))
+ return;
+ bool fast = StackTrace::WillUseFastUnwind(request_fast);
+ Unwind(max_depth, pc, bp, context, stack_top, stack_bottom, fast);
}
using namespace __lsan;
diff --git a/compiler-rt/lib/lsan/lsan_allocator.cpp b/compiler-rt/lib/lsan/lsan_allocator.cpp
index 70422957e6f3..91e34ebb3214 100644
--- a/compiler-rt/lib/lsan/lsan_allocator.cpp
+++ b/compiler-rt/lib/lsan/lsan_allocator.cpp
@@ -123,14 +123,18 @@ void Deallocate(void *p) {
void *Reallocate(const StackTrace &stack, void *p, uptr new_size,
uptr alignment) {
- RegisterDeallocation(p);
if (new_size > max_malloc_size) {
- allocator.Deallocate(GetAllocatorCache(), p);
- return ReportAllocationSizeTooBig(new_size, stack);
+ ReportAllocationSizeTooBig(new_size, stack);
+ return nullptr;
}
- p = allocator.Reallocate(GetAllocatorCache(), p, new_size, alignment);
- RegisterAllocation(stack, p, new_size);
- return p;
+ RegisterDeallocation(p);
+ void *new_p =
+ allocator.Reallocate(GetAllocatorCache(), p, new_size, alignment);
+ if (new_p)
+ RegisterAllocation(stack, new_p, new_size);
+ else if (new_size != 0)
+ RegisterAllocation(stack, p, new_size);
+ return new_p;
}
void GetAllocatorCacheRange(uptr *begin, uptr *end) {
diff --git a/compiler-rt/lib/lsan/lsan_allocator.h b/compiler-rt/lib/lsan/lsan_allocator.h
index 17e13cd014ba..9d763789154f 100644
--- a/compiler-rt/lib/lsan/lsan_allocator.h
+++ b/compiler-rt/lib/lsan/lsan_allocator.h
@@ -50,7 +50,7 @@ struct ChunkMetadata {
};
#if defined(__mips64) || defined(__aarch64__) || defined(__i386__) || \
- defined(__arm__)
+ defined(__arm__) || SANITIZER_RISCV64
template <typename AddressSpaceViewTy>
struct AP32 {
static const uptr kSpaceBeg = 0;
diff --git a/compiler-rt/lib/lsan/lsan_common.cpp b/compiler-rt/lib/lsan/lsan_common.cpp
index d5b4132b24d5..74400d2e8426 100644
--- a/compiler-rt/lib/lsan/lsan_common.cpp
+++ b/compiler-rt/lib/lsan/lsan_common.cpp
@@ -895,7 +895,7 @@ void LeakReport::PrintSummary() {
bytes += leaks_[i].total_size;
allocations += leaks_[i].hit_count;
}
- InternalScopedString summary(kMaxSummaryLength);
+ InternalScopedString summary;
summary.append("%zu byte(s) leaked in %zu allocation(s).", bytes,
allocations);
ReportErrorSummary(summary.data());
diff --git a/compiler-rt/lib/lsan/lsan_common.h b/compiler-rt/lib/lsan/lsan_common.h
index b0ae6f020b63..776ca60b1e97 100644
--- a/compiler-rt/lib/lsan/lsan_common.h
+++ b/compiler-rt/lib/lsan/lsan_common.h
@@ -41,6 +41,8 @@
#define CAN_SANITIZE_LEAKS 1
#elif defined(__arm__) && SANITIZER_LINUX
#define CAN_SANITIZE_LEAKS 1
+#elif SANITIZER_RISCV64 && SANITIZER_LINUX
+#define CAN_SANITIZE_LEAKS 1
#elif SANITIZER_NETBSD || SANITIZER_FUCHSIA
#define CAN_SANITIZE_LEAKS 1
#else
@@ -64,8 +66,6 @@ enum ChunkTag {
kIgnored = 3
};
-const u32 kInvalidTid = (u32) -1;
-
struct Flags {
#define LSAN_FLAG(Type, Name, DefaultValue, Description) Type Name;
#include "lsan_flags.inc"
@@ -221,8 +221,8 @@ void UnlockAllocator();
// Returns true if [addr, addr + sizeof(void *)) is poisoned.
bool WordIsPoisoned(uptr addr);
// Wrappers for ThreadRegistry access.
-void LockThreadRegistry();
-void UnlockThreadRegistry();
+void LockThreadRegistry() NO_THREAD_SAFETY_ANALYSIS;
+void UnlockThreadRegistry() NO_THREAD_SAFETY_ANALYSIS;
ThreadRegistry *GetThreadRegistryLocked();
bool GetThreadRangesLocked(tid_t os_id, uptr *stack_begin, uptr *stack_end,
uptr *tls_begin, uptr *tls_end, uptr *cache_begin,
diff --git a/compiler-rt/lib/lsan/lsan_interceptors.cpp b/compiler-rt/lib/lsan/lsan_interceptors.cpp
index bf8d316770ee..90a90a56c54c 100644
--- a/compiler-rt/lib/lsan/lsan_interceptors.cpp
+++ b/compiler-rt/lib/lsan/lsan_interceptors.cpp
@@ -460,7 +460,7 @@ INTERCEPTOR(int, pthread_create, void *th, void *attr,
if (res == 0) {
int tid = ThreadCreate(GetCurrentThread(), *(uptr *)th,
IsStateDetached(detached));
- CHECK_NE(tid, 0);
+ CHECK_NE(tid, kMainTid);
atomic_store(&p.tid, tid, memory_order_release);
while (atomic_load(&p.tid, memory_order_acquire) != 0)
internal_sched_yield();
diff --git a/compiler-rt/lib/lsan/lsan_posix.cpp b/compiler-rt/lib/lsan/lsan_posix.cpp
index 8e05915dd1b9..5d1c3f6260dd 100644
--- a/compiler-rt/lib/lsan/lsan_posix.cpp
+++ b/compiler-rt/lib/lsan/lsan_posix.cpp
@@ -48,7 +48,7 @@ void ThreadStart(u32 tid, tid_t os_id, ThreadType thread_type) {
OnStartedArgs args;
uptr stack_size = 0;
uptr tls_size = 0;
- GetThreadStackAndTls(tid == 0, &args.stack_begin, &stack_size,
+ GetThreadStackAndTls(tid == kMainTid, &args.stack_begin, &stack_size,
&args.tls_begin, &tls_size);
args.stack_end = args.stack_begin + stack_size;
args.tls_end = args.tls_begin + tls_size;
@@ -75,8 +75,8 @@ bool GetThreadRangesLocked(tid_t os_id, uptr *stack_begin, uptr *stack_end,
}
void InitializeMainThread() {
- u32 tid = ThreadCreate(0, 0, true);
- CHECK_EQ(tid, 0);
+ u32 tid = ThreadCreate(kMainTid, 0, true);
+ CHECK_EQ(tid, kMainTid);
ThreadStart(tid, GetTid());
}
diff --git a/compiler-rt/lib/lsan/lsan_thread.cpp b/compiler-rt/lib/lsan/lsan_thread.cpp
index 371a1f29dfe0..1d224ebca693 100644
--- a/compiler-rt/lib/lsan/lsan_thread.cpp
+++ b/compiler-rt/lib/lsan/lsan_thread.cpp
@@ -30,13 +30,10 @@ static ThreadContextBase *CreateThreadContext(u32 tid) {
return new (mem) ThreadContext(tid);
}
-static const uptr kMaxThreads = 1 << 13;
-static const uptr kThreadQuarantineSize = 64;
-
void InitializeThreadRegistry() {
static ALIGNED(64) char thread_registry_placeholder[sizeof(ThreadRegistry)];
- thread_registry = new (thread_registry_placeholder)
- ThreadRegistry(CreateThreadContext, kMaxThreads, kThreadQuarantineSize);
+ thread_registry =
+ new (thread_registry_placeholder) ThreadRegistry(CreateThreadContext);
}
ThreadContextLsanBase::ThreadContextLsanBase(int tid)
@@ -94,7 +91,7 @@ void ThreadJoin(u32 tid) {
}
void EnsureMainThreadIDIsCorrect() {
- if (GetCurrentThread() == 0)
+ if (GetCurrentThread() == kMainTid)
CurrentThreadContext()->os_id = GetTid();
}
diff --git a/compiler-rt/lib/memprof/memprof_allocator.cpp b/compiler-rt/lib/memprof/memprof_allocator.cpp
index 259c7c144ab7..6f01d4dfcb84 100644
--- a/compiler-rt/lib/memprof/memprof_allocator.cpp
+++ b/compiler-rt/lib/memprof/memprof_allocator.cpp
@@ -736,12 +736,12 @@ struct Allocator {
void PrintStats() { allocator.PrintStats(); }
- void ForceLock() {
+ void ForceLock() NO_THREAD_SAFETY_ANALYSIS {
allocator.ForceLock();
fallback_mutex.Lock();
}
- void ForceUnlock() {
+ void ForceUnlock() NO_THREAD_SAFETY_ANALYSIS {
fallback_mutex.Unlock();
allocator.ForceUnlock();
}
diff --git a/compiler-rt/lib/memprof/memprof_allocator.h b/compiler-rt/lib/memprof/memprof_allocator.h
index 070b8b2f2737..f1438baaa20e 100644
--- a/compiler-rt/lib/memprof/memprof_allocator.h
+++ b/compiler-rt/lib/memprof/memprof_allocator.h
@@ -69,7 +69,6 @@ using MemprofAllocator = MemprofAllocatorASVT<LocalAddressSpaceView>;
using AllocatorCache = MemprofAllocator::AllocatorCache;
struct MemprofThreadLocalMallocStorage {
- uptr quarantine_cache[16];
AllocatorCache allocator_cache;
void CommitBack();
diff --git a/compiler-rt/lib/memprof/memprof_descriptions.cpp b/compiler-rt/lib/memprof/memprof_descriptions.cpp
index ebd81d6f2f23..669b1acd8c71 100644
--- a/compiler-rt/lib/memprof/memprof_descriptions.cpp
+++ b/compiler-rt/lib/memprof/memprof_descriptions.cpp
@@ -44,11 +44,11 @@ void DescribeThread(MemprofThreadContext *context) {
CHECK(context);
memprofThreadRegistry().CheckLocked();
// No need to announce the main thread.
- if (context->tid == 0 || context->announced) {
+ if (context->tid == kMainTid || context->announced) {
return;
}
context->announced = true;
- InternalScopedString str(1024);
+ InternalScopedString str;
str.append("Thread %s", MemprofThreadIdAndName(context).c_str());
if (context->parent_tid == kInvalidTid) {
str.append(" created by unknown thread\n");
diff --git a/compiler-rt/lib/memprof/memprof_interceptors.cpp b/compiler-rt/lib/memprof/memprof_interceptors.cpp
index caa629b9c474..e22768061e70 100644
--- a/compiler-rt/lib/memprof/memprof_interceptors.cpp
+++ b/compiler-rt/lib/memprof/memprof_interceptors.cpp
@@ -59,6 +59,8 @@ DECLARE_REAL_AND_INTERCEPTOR(void, free, void *)
#define COMMON_INTERCEPT_FUNCTION(name) MEMPROF_INTERCEPT_FUNC(name)
#define COMMON_INTERCEPT_FUNCTION_VER(name, ver) \
MEMPROF_INTERCEPT_FUNC_VER(name, ver)
+#define COMMON_INTERCEPT_FUNCTION_VER_UNVERSIONED_FALLBACK(name, ver) \
+ MEMPROF_INTERCEPT_FUNC_VER_UNVERSIONED_FALLBACK(name, ver)
#define COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ptr, size) \
MEMPROF_WRITE_RANGE(ptr, size)
#define COMMON_INTERCEPTOR_READ_RANGE(ctx, ptr, size) \
diff --git a/compiler-rt/lib/memprof/memprof_interceptors.h b/compiler-rt/lib/memprof/memprof_interceptors.h
index b6a4fa411254..ca5f3690430a 100644
--- a/compiler-rt/lib/memprof/memprof_interceptors.h
+++ b/compiler-rt/lib/memprof/memprof_interceptors.h
@@ -50,5 +50,11 @@ DECLARE_REAL(char *, strstr, const char *s1, const char *s2)
if (!INTERCEPT_FUNCTION_VER(name, ver)) \
VReport(1, "MemProfiler: failed to intercept '%s@@%s'\n", #name, #ver); \
} while (0)
+#define MEMPROF_INTERCEPT_FUNC_VER_UNVERSIONED_FALLBACK(name, ver) \
+ do { \
+ if (!INTERCEPT_FUNCTION_VER(name, ver) && !INTERCEPT_FUNCTION(name)) \
+ VReport(1, "MemProfiler: failed to intercept '%s@@%s' or '%s'\n", #name, \
+ #ver, #name); \
+ } while (0)
#endif // MEMPROF_INTERCEPTORS_H
diff --git a/compiler-rt/lib/memprof/memprof_rtl.cpp b/compiler-rt/lib/memprof/memprof_rtl.cpp
index d6d606f666ee..fee2912d64d4 100644
--- a/compiler-rt/lib/memprof/memprof_rtl.cpp
+++ b/compiler-rt/lib/memprof/memprof_rtl.cpp
@@ -48,19 +48,9 @@ static void MemprofDie() {
}
}
-static void MemprofCheckFailed(const char *file, int line, const char *cond,
- u64 v1, u64 v2) {
- Report("MemProfiler CHECK failed: %s:%d \"%s\" (0x%zx, 0x%zx)\n", file, line,
- cond, (uptr)v1, (uptr)v2);
-
- // Print a stack trace the first time we come here. Otherwise, we probably
- // failed a CHECK during symbolization.
- static atomic_uint32_t num_calls;
- if (atomic_fetch_add(&num_calls, 1, memory_order_relaxed) == 0) {
- PRINT_CURRENT_STACK_CHECK();
- }
-
- Die();
+static void CheckUnwind() {
+ GET_STACK_TRACE(kStackTraceMax, common_flags()->fast_unwind_on_check);
+ stack.Print();
}
// -------------------------- Globals --------------------- {{{1
@@ -174,7 +164,7 @@ static void MemprofInitInternal() {
// Install tool-specific callbacks in sanitizer_common.
AddDieCallback(MemprofDie);
- SetCheckFailedCallback(MemprofCheckFailed);
+ SetCheckUnwindCallback(CheckUnwind);
// Use profile name specified via the binary itself if it exists, and hasn't
// been overrriden by a flag at runtime.
diff --git a/compiler-rt/lib/memprof/memprof_stack.h b/compiler-rt/lib/memprof/memprof_stack.h
index 289a61e385a2..a8fdfc9def9d 100644
--- a/compiler-rt/lib/memprof/memprof_stack.h
+++ b/compiler-rt/lib/memprof/memprof_stack.h
@@ -50,9 +50,6 @@ u32 GetMallocContextSize();
#define GET_STACK_TRACE_FATAL_HERE \
GET_STACK_TRACE(kStackTraceMax, common_flags()->fast_unwind_on_fatal)
-#define GET_STACK_TRACE_CHECK_HERE \
- GET_STACK_TRACE(kStackTraceMax, common_flags()->fast_unwind_on_check)
-
#define GET_STACK_TRACE_THREAD GET_STACK_TRACE(kStackTraceMax, true)
#define GET_STACK_TRACE_MALLOC \
@@ -66,10 +63,4 @@ u32 GetMallocContextSize();
stack.Print(); \
}
-#define PRINT_CURRENT_STACK_CHECK() \
- { \
- GET_STACK_TRACE_CHECK_HERE; \
- stack.Print(); \
- }
-
#endif // MEMPROF_STACK_H
diff --git a/compiler-rt/lib/memprof/memprof_thread.cpp b/compiler-rt/lib/memprof/memprof_thread.cpp
index 1bfff69bf1be..5ae7a2ee85b9 100644
--- a/compiler-rt/lib/memprof/memprof_thread.cpp
+++ b/compiler-rt/lib/memprof/memprof_thread.cpp
@@ -57,8 +57,8 @@ ThreadRegistry &memprofThreadRegistry() {
// in TSD and can't reliably tell when no more TSD destructors will
// be called. It would be wrong to reuse MemprofThreadContext for another
// thread before all TSD destructors will be called for it.
- memprof_thread_registry = new (thread_registry_placeholder) ThreadRegistry(
- GetMemprofThreadContext, kMaxNumberOfThreads, kMaxNumberOfThreads);
+ memprof_thread_registry = new (thread_registry_placeholder)
+ ThreadRegistry(GetMemprofThreadContext);
initialized = true;
}
return *memprof_thread_registry;
@@ -156,7 +156,7 @@ MemprofThread::ThreadStart(tid_t os_id,
MemprofThread *CreateMainThread() {
MemprofThread *main_thread = MemprofThread::Create(
- /* start_routine */ nullptr, /* arg */ nullptr, /* parent_tid */ 0,
+ /* start_routine */ nullptr, /* arg */ nullptr, /* parent_tid */ kMainTid,
/* stack */ nullptr, /* detached */ true);
SetCurrentThread(main_thread);
main_thread->ThreadStart(internal_getpid(),
@@ -171,8 +171,8 @@ void MemprofThread::SetThreadStackAndTls(const InitOptions *options) {
DCHECK_EQ(options, nullptr);
uptr tls_size = 0;
uptr stack_size = 0;
- GetThreadStackAndTls(tid() == 0, &stack_bottom_, &stack_size, &tls_begin_,
- &tls_size);
+ GetThreadStackAndTls(tid() == kMainTid, &stack_bottom_, &stack_size,
+ &tls_begin_, &tls_size);
stack_top_ = stack_bottom_ + stack_size;
tls_end_ = tls_begin_ + tls_size;
dtls_ = DTLS_Get();
@@ -214,7 +214,7 @@ u32 GetCurrentTidOrInvalid() {
void EnsureMainThreadIDIsCorrect() {
MemprofThreadContext *context =
reinterpret_cast<MemprofThreadContext *>(TSDGet());
- if (context && (context->tid == 0))
+ if (context && (context->tid == kMainTid))
context->os_id = GetTid();
}
} // namespace __memprof
diff --git a/compiler-rt/lib/memprof/memprof_thread.h b/compiler-rt/lib/memprof/memprof_thread.h
index 2e1a8bb43b82..4c9313fcb369 100644
--- a/compiler-rt/lib/memprof/memprof_thread.h
+++ b/compiler-rt/lib/memprof/memprof_thread.h
@@ -27,9 +27,6 @@ struct DTLS;
namespace __memprof {
-const u32 kInvalidTid = 0xffffff; // Must fit into 24 bits.
-const u32 kMaxNumberOfThreads = (1 << 22); // 4M
-
class MemprofThread;
// These objects are created for every thread and are never deleted,
diff --git a/compiler-rt/lib/msan/msan.cpp b/compiler-rt/lib/msan/msan.cpp
index 4be1630cd302..4fa772fdcb6e 100644
--- a/compiler-rt/lib/msan/msan.cpp
+++ b/compiler-rt/lib/msan/msan.cpp
@@ -308,7 +308,8 @@ void __sanitizer::BufferedStackTrace::UnwindImpl(
if (!t || !StackTrace::WillUseFastUnwind(request_fast)) {
// Block reports from our interceptors during _Unwind_Backtrace.
SymbolizerScope sym_scope;
- return Unwind(max_depth, pc, bp, context, 0, 0, false);
+ return Unwind(max_depth, pc, bp, context, t ? t->stack_top() : 0,
+ t ? t->stack_bottom() : 0, false);
}
if (StackTrace::WillUseFastUnwind(request_fast))
Unwind(max_depth, pc, bp, nullptr, t->stack_top(), t->stack_bottom(), true);
@@ -410,12 +411,9 @@ static void MsanOnDeadlySignal(int signo, void *siginfo, void *context) {
HandleDeadlySignal(siginfo, context, GetTid(), &OnStackUnwind, nullptr);
}
-static void MsanCheckFailed(const char *file, int line, const char *cond,
- u64 v1, u64 v2) {
- Report("MemorySanitizer CHECK failed: %s:%d \"%s\" (0x%zx, 0x%zx)\n", file,
- line, cond, (uptr)v1, (uptr)v2);
- PRINT_CURRENT_STACK_CHECK();
- Die();
+static void CheckUnwind() {
+ GET_FATAL_STACK_TRACE_PC_BP(StackTrace::GetCurrentPc(), GET_CURRENT_FRAME());
+ stack.Print();
}
void __msan_init() {
@@ -430,7 +428,7 @@ void __msan_init() {
InitializeFlags();
// Install tool-specific callbacks in sanitizer_common.
- SetCheckFailedCallback(MsanCheckFailed);
+ SetCheckUnwindCallback(CheckUnwind);
__sanitizer_set_report_path(common_flags()->log_path);
diff --git a/compiler-rt/lib/msan/msan.h b/compiler-rt/lib/msan/msan.h
index e794c7c15f89..963b94a54087 100644
--- a/compiler-rt/lib/msan/msan.h
+++ b/compiler-rt/lib/msan/msan.h
@@ -296,7 +296,6 @@ char *GetProcSelfMaps();
void InitializeInterceptors();
void MsanAllocatorInit();
-void MsanAllocatorThreadFinish();
void MsanDeallocate(StackTrace *stack, void *ptr);
void *msan_malloc(uptr size, StackTrace *stack);
@@ -366,15 +365,6 @@ const int STACK_TRACE_TAG_POISON = StackTrace::TAG_CUSTOM + 1;
stack.Unwind(pc, bp, nullptr, common_flags()->fast_unwind_on_fatal); \
}
-#define GET_FATAL_STACK_TRACE_HERE \
- GET_FATAL_STACK_TRACE_PC_BP(StackTrace::GetCurrentPc(), GET_CURRENT_FRAME())
-
-#define PRINT_CURRENT_STACK_CHECK() \
- { \
- GET_FATAL_STACK_TRACE_HERE; \
- stack.Print(); \
- }
-
class ScopedThreadLocalStateBackup {
public:
ScopedThreadLocalStateBackup() { Backup(); }
diff --git a/compiler-rt/lib/msan/msan_allocator.cpp b/compiler-rt/lib/msan/msan_allocator.cpp
index 68be794106b1..a97bd8371e08 100644
--- a/compiler-rt/lib/msan/msan_allocator.cpp
+++ b/compiler-rt/lib/msan/msan_allocator.cpp
@@ -220,8 +220,8 @@ void MsanDeallocate(StackTrace *stack, void *p) {
}
}
-void *MsanReallocate(StackTrace *stack, void *old_p, uptr new_size,
- uptr alignment) {
+static void *MsanReallocate(StackTrace *stack, void *old_p, uptr new_size,
+ uptr alignment) {
Metadata *meta = reinterpret_cast<Metadata*>(allocator.GetMetaData(old_p));
uptr old_size = meta->requested_size;
uptr actually_allocated_size = allocator.GetActuallyAllocatedSize(old_p);
@@ -245,7 +245,7 @@ void *MsanReallocate(StackTrace *stack, void *old_p, uptr new_size,
return new_p;
}
-void *MsanCalloc(StackTrace *stack, uptr nmemb, uptr size) {
+static void *MsanCalloc(StackTrace *stack, uptr nmemb, uptr size) {
if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) {
if (AllocatorMayReturnNull())
return nullptr;
diff --git a/compiler-rt/lib/msan/msan_allocator.h b/compiler-rt/lib/msan/msan_allocator.h
index 42a5022c9e3e..365af4d0c4dd 100644
--- a/compiler-rt/lib/msan/msan_allocator.h
+++ b/compiler-rt/lib/msan/msan_allocator.h
@@ -18,7 +18,6 @@
namespace __msan {
struct MsanThreadLocalMallocStorage {
- uptr quarantine_cache[16];
// Allocator cache contains atomic_uint64_t which must be 8-byte aligned.
ALIGNED(8) uptr allocator_cache[96 * (512 * 8 + 16)]; // Opaque.
void CommitBack();
diff --git a/compiler-rt/lib/msan/msan_blacklist.txt b/compiler-rt/lib/msan/msan_blacklist.txt
deleted file mode 100644
index 3efef5712185..000000000000
--- a/compiler-rt/lib/msan/msan_blacklist.txt
+++ /dev/null
@@ -1,10 +0,0 @@
-# Blacklist for MemorySanitizer. Turns off instrumentation of particular
-# functions or sources. Use with care. You may set location of blacklist
-# at compile-time using -fsanitize-blacklist=<path> flag.
-
-# Example usage:
-# fun:*bad_function_name*
-# src:file_with_tricky_code.cc
-
-# https://bugs.llvm.org/show_bug.cgi?id=31877
-fun:__gxx_personality_*
diff --git a/compiler-rt/lib/msan/msan_chained_origin_depot.cpp b/compiler-rt/lib/msan/msan_chained_origin_depot.cpp
index d2897481a4b9..5dee80fd4692 100644
--- a/compiler-rt/lib/msan/msan_chained_origin_depot.cpp
+++ b/compiler-rt/lib/msan/msan_chained_origin_depot.cpp
@@ -1,4 +1,4 @@
-//===-- msan_chained_origin_depot.cpp ----------------------------------===//
+//===-- msan_chained_origin_depot.cpp -------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
@@ -6,118 +6,29 @@
//
//===----------------------------------------------------------------------===//
//
+// This file is a part of MemorySanitizer.
+//
// A storage for chained origins.
//===----------------------------------------------------------------------===//
#include "msan_chained_origin_depot.h"
-#include "sanitizer_common/sanitizer_stackdepotbase.h"
+#include "sanitizer_common/sanitizer_chained_origin_depot.h"
namespace __msan {
-struct ChainedOriginDepotDesc {
- u32 here_id;
- u32 prev_id;
-};
-
-struct ChainedOriginDepotNode {
- ChainedOriginDepotNode *link;
- u32 id;
- u32 here_id;
- u32 prev_id;
-
- typedef ChainedOriginDepotDesc args_type;
-
- bool eq(u32 hash, const args_type &args) const {
- return here_id == args.here_id && prev_id == args.prev_id;
- }
-
- static uptr storage_size(const args_type &args) {
- return sizeof(ChainedOriginDepotNode);
- }
-
- /* This is murmur2 hash for the 64->32 bit case.
- It does not behave all that well because the keys have a very biased
- distribution (I've seen 7-element buckets with the table only 14% full).
-
- here_id is built of
- * (1 bits) Reserved, zero.
- * (8 bits) Part id = bits 13..20 of the hash value of here_id's key.
- * (23 bits) Sequential number (each part has each own sequence).
-
- prev_id has either the same distribution as here_id (but with 3:8:21)
- split, or one of two reserved values (-1) or (-2). Either case can
- dominate depending on the workload.
- */
- static u32 hash(const args_type &args) {
- const u32 m = 0x5bd1e995;
- const u32 seed = 0x9747b28c;
- const u32 r = 24;
- u32 h = seed;
- u32 k = args.here_id;
- k *= m;
- k ^= k >> r;
- k *= m;
- h *= m;
- h ^= k;
-
- k = args.prev_id;
- k *= m;
- k ^= k >> r;
- k *= m;
- h *= m;
- h ^= k;
-
- h ^= h >> 13;
- h *= m;
- h ^= h >> 15;
- return h;
- }
- static bool is_valid(const args_type &args) { return true; }
- void store(const args_type &args, u32 other_hash) {
- here_id = args.here_id;
- prev_id = args.prev_id;
- }
-
- args_type load() const {
- args_type ret = {here_id, prev_id};
- return ret;
- }
-
- struct Handle {
- ChainedOriginDepotNode *node_;
- Handle() : node_(nullptr) {}
- explicit Handle(ChainedOriginDepotNode *node) : node_(node) {}
- bool valid() { return node_; }
- u32 id() { return node_->id; }
- int here_id() { return node_->here_id; }
- int prev_id() { return node_->prev_id; }
- };
-
- Handle get_handle() { return Handle(this); }
-
- typedef Handle handle_type;
-};
-
-static StackDepotBase<ChainedOriginDepotNode, 4, 20> chainedOriginDepot;
+static ChainedOriginDepot chainedOriginDepot;
StackDepotStats *ChainedOriginDepotGetStats() {
return chainedOriginDepot.GetStats();
}
bool ChainedOriginDepotPut(u32 here_id, u32 prev_id, u32 *new_id) {
- ChainedOriginDepotDesc desc = {here_id, prev_id};
- bool inserted;
- ChainedOriginDepotNode::Handle h = chainedOriginDepot.Put(desc, &inserted);
- *new_id = h.valid() ? h.id() : 0;
- return inserted;
+ return chainedOriginDepot.Put(here_id, prev_id, new_id);
}
-// Retrieves a stored stack trace by the id.
u32 ChainedOriginDepotGet(u32 id, u32 *other) {
- ChainedOriginDepotDesc desc = chainedOriginDepot.Get(id);
- *other = desc.prev_id;
- return desc.here_id;
+ return chainedOriginDepot.Get(id, other);
}
void ChainedOriginDepotLockAll() {
diff --git a/compiler-rt/lib/msan/msan_chained_origin_depot.h b/compiler-rt/lib/msan/msan_chained_origin_depot.h
index 2b4cb3647791..60ab182fa4c8 100644
--- a/compiler-rt/lib/msan/msan_chained_origin_depot.h
+++ b/compiler-rt/lib/msan/msan_chained_origin_depot.h
@@ -1,4 +1,4 @@
-//===-- msan_chained_origin_depot.h --------------------------*- C++ -*-===//
+//===-- msan_chained_origin_depot.h -----------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
@@ -6,8 +6,11 @@
//
//===----------------------------------------------------------------------===//
//
+// This file is a part of MemorySanitizer.
+//
// A storage for chained origins.
//===----------------------------------------------------------------------===//
+
#ifndef MSAN_CHAINED_ORIGIN_DEPOT_H
#define MSAN_CHAINED_ORIGIN_DEPOT_H
@@ -15,9 +18,16 @@
namespace __msan {
+// Gets the statistic of the origin chain storage.
StackDepotStats *ChainedOriginDepotGetStats();
+
+// Stores a chain with StackDepot ID here_id and previous chain ID prev_id.
+// If successful, returns true and the new chain id new_id.
+// If the same element already exists, returns false and sets new_id to the
+// existing ID.
bool ChainedOriginDepotPut(u32 here_id, u32 prev_id, u32 *new_id);
-// Retrieves a stored stack trace by the id.
+
+// Retrieves the stored StackDepot ID for the given origin ID.
u32 ChainedOriginDepotGet(u32 id, u32 *other);
void ChainedOriginDepotLockAll();
diff --git a/compiler-rt/lib/msan/msan_ignorelist.txt b/compiler-rt/lib/msan/msan_ignorelist.txt
new file mode 100644
index 000000000000..1fae64d880bc
--- /dev/null
+++ b/compiler-rt/lib/msan/msan_ignorelist.txt
@@ -0,0 +1,10 @@
+# Ignorelist for MemorySanitizer. Turns off instrumentation of particular
+# functions or sources. Use with care. You may set location of ignorelist
+# at compile-time using -fsanitize-ignorelist=<path> flag.
+
+# Example usage:
+# fun:*bad_function_name*
+# src:file_with_tricky_code.cc
+
+# https://bugs.llvm.org/show_bug.cgi?id=31877
+fun:__gxx_personality_*
diff --git a/compiler-rt/lib/msan/msan_interceptors.cpp b/compiler-rt/lib/msan/msan_interceptors.cpp
index 4eea94f1f969..760f74e927d0 100644
--- a/compiler-rt/lib/msan/msan_interceptors.cpp
+++ b/compiler-rt/lib/msan/msan_interceptors.cpp
@@ -21,6 +21,7 @@
#include "msan_report.h"
#include "msan_thread.h"
#include "msan_poisoning.h"
+#include "sanitizer_common/sanitizer_errno_codes.h"
#include "sanitizer_common/sanitizer_platform_limits_posix.h"
#include "sanitizer_common/sanitizer_platform_limits_netbsd.h"
#include "sanitizer_common/sanitizer_allocator.h"
@@ -1257,10 +1258,18 @@ int OnExit() {
VReport(1, "MemorySanitizer: failed to intercept '%s@@%s'\n", #name, \
#ver); \
} while (0)
+#define MSAN_INTERCEPT_FUNC_VER_UNVERSIONED_FALLBACK(name, ver) \
+ do { \
+ if (!INTERCEPT_FUNCTION_VER(name, ver) && !INTERCEPT_FUNCTION(name)) \
+ VReport(1, "MemorySanitizer: failed to intercept '%s@@%s' or '%s'\n", \
+ #name, #ver, #name); \
+ } while (0)
#define COMMON_INTERCEPT_FUNCTION(name) MSAN_INTERCEPT_FUNC(name)
-#define COMMON_INTERCEPT_FUNCTION_VER(name, ver) \
+#define COMMON_INTERCEPT_FUNCTION_VER(name, ver) \
MSAN_INTERCEPT_FUNC_VER(name, ver)
+#define COMMON_INTERCEPT_FUNCTION_VER_UNVERSIONED_FALLBACK(name, ver) \
+ MSAN_INTERCEPT_FUNC_VER_UNVERSIONED_FALLBACK(name, ver)
#define COMMON_INTERCEPTOR_UNPOISON_PARAM(count) \
UnpoisonParam(count)
#define COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ptr, size) \
@@ -1365,11 +1374,14 @@ static int sigaction_impl(int signo, const __sanitizer_sigaction *act,
static int sigaction_impl(int signo, const __sanitizer_sigaction *act,
__sanitizer_sigaction *oldact) {
ENSURE_MSAN_INITED();
+ if (signo <= 0 || signo >= kMaxSignals) {
+ errno = errno_EINVAL;
+ return -1;
+ }
if (act) read_sigaction(act);
int res;
if (flags()->wrap_signals) {
SpinMutexLock lock(&sigactions_mu);
- CHECK_LT(signo, kMaxSignals);
uptr old_cb = atomic_load(&sigactions[signo], memory_order_relaxed);
__sanitizer_sigaction new_act;
__sanitizer_sigaction *pnew_act = act ? &new_act : nullptr;
@@ -1403,8 +1415,11 @@ static int sigaction_impl(int signo, const __sanitizer_sigaction *act,
static uptr signal_impl(int signo, uptr cb) {
ENSURE_MSAN_INITED();
+ if (signo <= 0 || signo >= kMaxSignals) {
+ errno = errno_EINVAL;
+ return -1;
+ }
if (flags()->wrap_signals) {
- CHECK_LT(signo, kMaxSignals);
SpinMutexLock lock(&sigactions_mu);
if (cb != __sanitizer::sig_ign && cb != __sanitizer::sig_dfl) {
atomic_store(&sigactions[signo], cb, memory_order_relaxed);
diff --git a/compiler-rt/lib/msan/msan_poisoning.cpp b/compiler-rt/lib/msan/msan_poisoning.cpp
index a92b0565cfa8..15892392f74a 100644
--- a/compiler-rt/lib/msan/msan_poisoning.cpp
+++ b/compiler-rt/lib/msan/msan_poisoning.cpp
@@ -213,7 +213,7 @@ void SetShadow(const void *ptr, uptr size, u8 value) {
if (page_end != shadow_end) {
REAL(memset)((void *)page_end, 0, shadow_end - page_end);
}
- if (!MmapFixedNoReserve(page_beg, page_end - page_beg))
+ if (!MmapFixedSuperNoReserve(page_beg, page_end - page_beg))
Die();
}
}
diff --git a/compiler-rt/lib/orc/adt.h b/compiler-rt/lib/orc/adt.h
new file mode 100644
index 000000000000..33b731082f88
--- /dev/null
+++ b/compiler-rt/lib/orc/adt.h
@@ -0,0 +1,113 @@
+//===----------------------- adt.h - Handy ADTs -----------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of the ORC runtime support library.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef ORC_RT_ADT_H
+#define ORC_RT_ADT_H
+
+#include <cstring>
+#include <limits>
+#include <string>
+
+namespace __orc_rt {
+
+constexpr std::size_t dynamic_extent = std::numeric_limits<std::size_t>::max();
+
+/// A substitute for std::span (and llvm::ArrayRef).
+/// FIXME: Remove in favor of std::span once we can use c++20.
+template <typename T, std::size_t Extent = dynamic_extent> class span {
+public:
+ typedef T element_type;
+ typedef std::remove_cv<T> value_type;
+ typedef std::size_t size_type;
+ typedef std::ptrdiff_t difference_type;
+ typedef T *pointer;
+ typedef const T *const_pointer;
+ typedef T &reference;
+ typedef const T &const_reference;
+
+ typedef pointer iterator;
+
+ static constexpr std::size_t extent = Extent;
+
+ constexpr span() noexcept = default;
+ constexpr span(T *first, size_type count) noexcept
+ : Data(first), Size(count) {}
+
+ template <std::size_t N>
+ constexpr span(T (&arr)[N]) noexcept : Data(&arr[0]), Size(N) {}
+
+ constexpr iterator begin() const noexcept { return Data; }
+ constexpr iterator end() const noexcept { return Data + Size; }
+ constexpr pointer data() const noexcept { return Data; }
+ constexpr reference operator[](size_type idx) const { return Data[idx]; }
+ constexpr size_type size() const noexcept { return Size; }
+ constexpr bool empty() const noexcept { return Size == 0; }
+
+private:
+ T *Data = nullptr;
+ size_type Size = 0;
+};
+
+/// A substitue for std::string_view (and llvm::StringRef).
+/// FIXME: Remove in favor of std::string_view once we have c++17.
+class string_view {
+public:
+ typedef char value_type;
+ typedef char *pointer;
+ typedef const char *const_pointer;
+ typedef char &reference;
+ typedef const char &const_reference;
+ typedef std::size_t size_type;
+ typedef std::ptrdiff_t difference_type;
+
+ typedef const_pointer const_iterator;
+ typedef const_iterator iterator;
+
+ constexpr string_view() noexcept = default;
+ constexpr string_view(const char *S, size_type Count)
+ : Data(S), Size(Count) {}
+ string_view(const char *S) : Data(S), Size(strlen(S)) {}
+
+ constexpr const_iterator begin() const noexcept { return Data; }
+ constexpr const_iterator end() const noexcept { return Data + Size; }
+ constexpr const_pointer data() const noexcept { return Data; }
+ constexpr const_reference operator[](size_type idx) { return Data[idx]; }
+ constexpr size_type size() const noexcept { return Size; }
+ constexpr bool empty() const noexcept { return Size == 0; }
+
+ friend bool operator==(const string_view &LHS, const string_view &RHS) {
+ if (LHS.Size != RHS.Size)
+ return false;
+ if (LHS.Data == RHS.Data)
+ return true;
+ for (size_t I = 0; I != LHS.Size; ++I)
+ if (LHS.Data[I] != RHS.Data[I])
+ return false;
+ return true;
+ }
+
+ friend bool operator!=(const string_view &LHS, const string_view &RHS) {
+ return !(LHS == RHS);
+ }
+
+private:
+ const char *Data = nullptr;
+ size_type Size = 0;
+};
+
+inline std::string to_string(string_view SV) {
+ return std::string(SV.data(), SV.size());
+}
+
+} // end namespace __orc_rt
+
+#endif // ORC_RT_COMMON_H
diff --git a/compiler-rt/lib/orc/c_api.h b/compiler-rt/lib/orc/c_api.h
new file mode 100644
index 000000000000..6677da06ede5
--- /dev/null
+++ b/compiler-rt/lib/orc/c_api.h
@@ -0,0 +1,208 @@
+/*===- c_api.h - C API for the ORC runtime ------------------------*- C -*-===*\
+|* *|
+|* Part of the LLVM Project, under the Apache License v2.0 with LLVM *|
+|* Exceptions. *|
+|* See https://llvm.org/LICENSE.txt for license information. *|
+|* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception *|
+|* *|
+|*===----------------------------------------------------------------------===*|
+|* *|
+|* This file defines the C API for the ORC runtime *|
+|* *|
+|* NOTE: The OrtRTWrapperFunctionResult type must be kept in sync with the *|
+|* definition in llvm/include/llvm-c/OrcShared.h. *|
+|* *|
+\*===----------------------------------------------------------------------===*/
+
+#ifndef ORC_RT_C_API_H
+#define ORC_RT_C_API_H
+
+#include <assert.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+/* Helper to suppress strict prototype warnings. */
+#ifdef __clang__
+#define ORC_RT_C_STRICT_PROTOTYPES_BEGIN \
+ _Pragma("clang diagnostic push") \
+ _Pragma("clang diagnostic error \"-Wstrict-prototypes\"")
+#define ORC_RT_C_STRICT_PROTOTYPES_END _Pragma("clang diagnostic pop")
+#else
+#define ORC_RT_C_STRICT_PROTOTYPES_BEGIN
+#define ORC_RT_C_STRICT_PROTOTYPES_END
+#endif
+
+/* Helper to wrap C code for C++ */
+#ifdef __cplusplus
+#define ORC_RT_C_EXTERN_C_BEGIN \
+ extern "C" { \
+ ORC_RT_C_STRICT_PROTOTYPES_BEGIN
+#define ORC_RT_C_EXTERN_C_END \
+ ORC_RT_C_STRICT_PROTOTYPES_END \
+ }
+#else
+#define ORC_RT_C_EXTERN_C_BEGIN ORC_RT_C_STRICT_PROTOTYPES_BEGIN
+#define ORC_RT_C_EXTERN_C_END ORC_RT_C_STRICT_PROTOTYPES_END
+#endif
+
+ORC_RT_C_EXTERN_C_BEGIN
+
+typedef union {
+ char *ValuePtr;
+ char Value[sizeof(ValuePtr)];
+} __orc_rt_CWrapperFunctionResultDataUnion;
+
+/**
+ * __orc_rt_CWrapperFunctionResult is a kind of C-SmallVector with an
+ * out-of-band error state.
+ *
+ * If Size == 0 and Data.ValuePtr is non-zero then the value is in the
+ * 'out-of-band error' state, and Data.ValuePtr points at a malloc-allocated,
+ * null-terminated string error message.
+ *
+ * If Size <= sizeof(__orc_rt_CWrapperFunctionResultData) then the value is in
+ * the 'small' state and the content is held in the first Size bytes of
+ * Data.Value.
+ *
+ * If Size > sizeof(OrtRTCWrapperFunctionResultData) then the value is in the
+ * 'large' state and the content is held in the first Size bytes of the
+ * memory pointed to by Data.ValuePtr. This memory must have been allocated by
+ * malloc, and will be freed with free when this value is destroyed.
+ */
+typedef struct {
+ __orc_rt_CWrapperFunctionResultDataUnion Data;
+ size_t Size;
+} __orc_rt_CWrapperFunctionResult;
+
+typedef struct __orc_rt_CSharedOpaqueJITProcessControl
+ *__orc_rt_SharedJITProcessControlRef;
+
+/**
+ * Zero-initialize an __orc_rt_CWrapperFunctionResult.
+ */
+static inline void
+__orc_rt_CWrapperFunctionResultInit(__orc_rt_CWrapperFunctionResult *R) {
+ R->Size = 0;
+ R->Data.ValuePtr = 0;
+}
+
+/**
+ * Create an __orc_rt_CWrapperFunctionResult with an uninitialized buffer of
+ * size Size. The buffer is returned via the DataPtr argument.
+ */
+static inline char *
+__orc_rt_CWrapperFunctionResultAllocate(__orc_rt_CWrapperFunctionResult *R,
+ size_t Size) {
+ R->Size = Size;
+ if (Size <= sizeof(R->Data.Value))
+ return R->Data.Value;
+
+ R->Data.ValuePtr = (char *)malloc(Size);
+ return R->Data.ValuePtr;
+}
+
+/**
+ * Create an __orc_rt_WrapperFunctionResult from the given data range.
+ */
+static inline __orc_rt_CWrapperFunctionResult
+__orc_rt_CreateCWrapperFunctionResultFromRange(const char *Data, size_t Size) {
+ __orc_rt_CWrapperFunctionResult R;
+ R.Size = Size;
+ if (R.Size > sizeof(R.Data.Value)) {
+ char *Tmp = (char *)malloc(Size);
+ memcpy(Tmp, Data, Size);
+ R.Data.ValuePtr = Tmp;
+ } else
+ memcpy(R.Data.Value, Data, Size);
+ return R;
+}
+
+/**
+ * Create an __orc_rt_CWrapperFunctionResult by copying the given string,
+ * including the null-terminator.
+ *
+ * This function copies the input string. The client is responsible for freeing
+ * the ErrMsg arg.
+ */
+static inline __orc_rt_CWrapperFunctionResult
+__orc_rt_CreateCWrapperFunctionResultFromString(const char *Source) {
+ return __orc_rt_CreateCWrapperFunctionResultFromRange(Source,
+ strlen(Source) + 1);
+}
+
+/**
+ * Create an __orc_rt_CWrapperFunctionResult representing an out-of-band
+ * error.
+ *
+ * This function takes ownership of the string argument which must have been
+ * allocated with malloc.
+ */
+static inline __orc_rt_CWrapperFunctionResult
+__orc_rt_CreateCWrapperFunctionResultFromOutOfBandError(const char *ErrMsg) {
+ __orc_rt_CWrapperFunctionResult R;
+ R.Size = 0;
+ char *Tmp = (char *)malloc(strlen(ErrMsg) + 1);
+ strcpy(Tmp, ErrMsg);
+ R.Data.ValuePtr = Tmp;
+ return R;
+}
+
+/**
+ * This should be called to destroy __orc_rt_CWrapperFunctionResult values
+ * regardless of their state.
+ */
+static inline void
+__orc_rt_DisposeCWrapperFunctionResult(__orc_rt_CWrapperFunctionResult *R) {
+ if (R->Size > sizeof(R->Data.Value) ||
+ (R->Size == 0 && R->Data.ValuePtr))
+ free(R->Data.ValuePtr);
+}
+
+/**
+ * Get a pointer to the data contained in the given
+ * __orc_rt_CWrapperFunctionResult.
+ */
+static inline const char *
+__orc_rt_CWrapperFunctionResultData(const __orc_rt_CWrapperFunctionResult *R) {
+ assert((R->Size != 0 || R->Data.ValuePtr == nullptr) &&
+ "Cannot get data for out-of-band error value");
+ return R->Size > sizeof(R->Data.Value) ? R->Data.ValuePtr : R->Data.Value;
+}
+
+/**
+ * Safely get the size of the given __orc_rt_CWrapperFunctionResult.
+ *
+ * Asserts that we're not trying to access the size of an error value.
+ */
+static inline size_t
+__orc_rt_CWrapperFunctionResultSize(const __orc_rt_CWrapperFunctionResult *R) {
+ assert((R->Size != 0 || R->Data.ValuePtr == nullptr) &&
+ "Cannot get size for out-of-band error value");
+ return R->Size;
+}
+
+/**
+ * Returns 1 if this value is equivalent to a value just initialized by
+ * __orc_rt_CWrapperFunctionResultInit, 0 otherwise.
+ */
+static inline size_t
+__orc_rt_CWrapperFunctionResultEmpty(const __orc_rt_CWrapperFunctionResult *R) {
+ return R->Size == 0 && R->Data.ValuePtr == 0;
+}
+
+/**
+ * Returns a pointer to the out-of-band error string for this
+ * __orc_rt_CWrapperFunctionResult, or null if there is no error.
+ *
+ * The __orc_rt_CWrapperFunctionResult retains ownership of the error
+ * string, so it should be copied if the caller wishes to preserve it.
+ */
+static inline const char *__orc_rt_CWrapperFunctionResultGetOutOfBandError(
+ const __orc_rt_CWrapperFunctionResult *R) {
+ return R->Size == 0 ? R->Data.ValuePtr : 0;
+}
+
+ORC_RT_C_EXTERN_C_END
+
+#endif /* ORC_RT_C_API_H */
diff --git a/compiler-rt/lib/orc/common.h b/compiler-rt/lib/orc/common.h
new file mode 100644
index 000000000000..54e613ecb42e
--- /dev/null
+++ b/compiler-rt/lib/orc/common.h
@@ -0,0 +1,48 @@
+//===- common.h - Common utilities for the ORC runtime ----------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of the ORC runtime support library.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef ORC_RT_COMMON_H
+#define ORC_RT_COMMON_H
+
+#include "c_api.h"
+#include "compiler.h"
+#include <type_traits>
+
+/// This macro should be used to define tags that will be associated with
+/// handlers in the JIT process, and call can be used to define tags f
+#define ORC_RT_JIT_DISPATCH_TAG(X) \
+extern "C" char X; \
+char X = 0;
+
+/// Opaque struct for external symbols.
+struct __orc_rt_Opaque {};
+
+/// Error reporting function.
+extern "C" void __orc_rt_log_error(const char *ErrMsg);
+
+/// Context object for dispatching calls to the JIT object.
+///
+/// This is declared for use by the runtime, but should be implemented in the
+/// executor or provided by a definition added to the JIT before the runtime
+/// is loaded.
+extern "C" __orc_rt_Opaque __orc_rt_jit_dispatch_ctx ORC_RT_WEAK_IMPORT;
+
+/// For dispatching calls to the JIT object.
+///
+/// This is declared for use by the runtime, but should be implemented in the
+/// executor or provided by a definition added to the JIT before the runtime
+/// is loaded.
+extern "C" __orc_rt_CWrapperFunctionResult
+__orc_rt_jit_dispatch(__orc_rt_Opaque *DispatchCtx, const void *FnTag,
+ const char *Data, size_t Size) ORC_RT_WEAK_IMPORT;
+
+#endif // ORC_RT_COMMON_H
diff --git a/compiler-rt/lib/orc/compiler.h b/compiler-rt/lib/orc/compiler.h
new file mode 100644
index 000000000000..2e4cd144e335
--- /dev/null
+++ b/compiler-rt/lib/orc/compiler.h
@@ -0,0 +1,65 @@
+//===--------- compiler.h - Compiler abstraction support --------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of the ORC runtime support library.
+//
+// Most functionality in this file was swiped from llvm/Support/Compiler.h.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef ORC_RT_COMPILER_H
+#define ORC_RT_COMPILER_H
+
+#define ORC_RT_INTERFACE extern "C" __attribute__((visibility("default")))
+#define ORC_RT_HIDDEN __attribute__((visibility("hidden")))
+
+#ifndef __has_builtin
+# define __has_builtin(x) 0
+#endif
+
+// Only use __has_cpp_attribute in C++ mode. GCC defines __has_cpp_attribute in
+// C mode, but the :: in __has_cpp_attribute(scoped::attribute) is invalid.
+#ifndef ORC_RT_HAS_CPP_ATTRIBUTE
+#if defined(__cplusplus) && defined(__has_cpp_attribute)
+#define ORC_RT_HAS_CPP_ATTRIBUTE(x) __has_cpp_attribute(x)
+#else
+#define ORC_RT_HAS_CPP_ATTRIBUTE(x) 0
+#endif
+#endif
+
+// Use the 'nodiscard' attribute in C++17 or newer mode.
+#if defined(__cplusplus) && __cplusplus > 201402L && \
+ ORC_RT_HAS_CPP_ATTRIBUTE(nodiscard)
+#define ORC_RT_NODISCARD [[nodiscard]]
+#elif ORC_RT_HAS_CPP_ATTRIBUTE(clang::warn_unused_result)
+#define ORC_RT_NODISCARD [[clang::warn_unused_result]]
+// Clang in C++14 mode claims that it has the 'nodiscard' attribute, but also
+// warns in the pedantic mode that 'nodiscard' is a C++17 extension (PR33518).
+// Use the 'nodiscard' attribute in C++14 mode only with GCC.
+// TODO: remove this workaround when PR33518 is resolved.
+#elif defined(__GNUC__) && ORC_RT_HAS_CPP_ATTRIBUTE(nodiscard)
+#define ORC_RT_NODISCARD [[nodiscard]]
+#else
+#define ORC_RT_NODISCARD
+#endif
+
+#if __has_builtin(__builtin_expect)
+#define ORC_RT_LIKELY(EXPR) __builtin_expect((bool)(EXPR), true)
+#define ORC_RT_UNLIKELY(EXPR) __builtin_expect((bool)(EXPR), false)
+#else
+#define ORC_RT_LIKELY(EXPR) (EXPR)
+#define ORC_RT_UNLIKELY(EXPR) (EXPR)
+#endif
+
+#ifdef __APPLE__
+#define ORC_RT_WEAK_IMPORT __attribute__((weak_import))
+#else
+#define ORC_RT_WEAK_IMPORT __attribute__((weak))
+#endif
+
+#endif // ORC_RT_COMPILER_H
diff --git a/compiler-rt/lib/orc/endianness.h b/compiler-rt/lib/orc/endianness.h
new file mode 100644
index 000000000000..4ee5505ce6dd
--- /dev/null
+++ b/compiler-rt/lib/orc/endianness.h
@@ -0,0 +1,143 @@
+//===- endian.h - Endianness support ----------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares generic and optimized functions to swap the byte order of
+// an integral type.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef ORC_RT_ENDIAN_H
+#define ORC_RT_ENDIAN_H
+
+#include <cstddef>
+#include <cstdint>
+#include <type_traits>
+#if defined(_MSC_VER) && !defined(_DEBUG)
+#include <stdlib.h>
+#endif
+
+#if defined(__linux__) || defined(__GNU__) || defined(__HAIKU__) || \
+ defined(__Fuchsia__) || defined(__EMSCRIPTEN__)
+#include <endian.h>
+#elif defined(_AIX)
+#include <sys/machine.h>
+#elif defined(__sun)
+/* Solaris provides _BIG_ENDIAN/_LITTLE_ENDIAN selector in sys/types.h */
+#include <sys/types.h>
+#define BIG_ENDIAN 4321
+#define LITTLE_ENDIAN 1234
+#if defined(_BIG_ENDIAN)
+#define BYTE_ORDER BIG_ENDIAN
+#else
+#define BYTE_ORDER LITTLE_ENDIAN
+#endif
+#elif defined(__MVS__)
+#define BIG_ENDIAN 4321
+#define LITTLE_ENDIAN 1234
+#define BYTE_ORDER BIG_ENDIAN
+#else
+#if !defined(BYTE_ORDER) && !defined(_WIN32)
+#include <machine/endian.h>
+#endif
+#endif
+
+namespace __orc_rt {
+
+/// ByteSwap_16 - This function returns a byte-swapped representation of
+/// the 16-bit argument.
+inline uint16_t ByteSwap_16(uint16_t value) {
+#if defined(_MSC_VER) && !defined(_DEBUG)
+ // The DLL version of the runtime lacks these functions (bug!?), but in a
+ // release build they're replaced with BSWAP instructions anyway.
+ return _byteswap_ushort(value);
+#else
+ uint16_t Hi = value << 8;
+ uint16_t Lo = value >> 8;
+ return Hi | Lo;
+#endif
+}
+
+/// This function returns a byte-swapped representation of the 32-bit argument.
+inline uint32_t ByteSwap_32(uint32_t value) {
+#if defined(__llvm__) || (defined(__GNUC__) && !defined(__ICC))
+ return __builtin_bswap32(value);
+#elif defined(_MSC_VER) && !defined(_DEBUG)
+ return _byteswap_ulong(value);
+#else
+ uint32_t Byte0 = value & 0x000000FF;
+ uint32_t Byte1 = value & 0x0000FF00;
+ uint32_t Byte2 = value & 0x00FF0000;
+ uint32_t Byte3 = value & 0xFF000000;
+ return (Byte0 << 24) | (Byte1 << 8) | (Byte2 >> 8) | (Byte3 >> 24);
+#endif
+}
+
+/// This function returns a byte-swapped representation of the 64-bit argument.
+inline uint64_t ByteSwap_64(uint64_t value) {
+#if defined(__llvm__) || (defined(__GNUC__) && !defined(__ICC))
+ return __builtin_bswap64(value);
+#elif defined(_MSC_VER) && !defined(_DEBUG)
+ return _byteswap_uint64(value);
+#else
+ uint64_t Hi = ByteSwap_32(uint32_t(value));
+ uint32_t Lo = ByteSwap_32(uint32_t(value >> 32));
+ return (Hi << 32) | Lo;
+#endif
+}
+
+#if defined(BYTE_ORDER) && defined(BIG_ENDIAN) && BYTE_ORDER == BIG_ENDIAN
+constexpr bool IsBigEndianHost = true;
+#else
+constexpr bool IsBigEndianHost = false;
+#endif
+
+static const bool IsLittleEndianHost = !IsBigEndianHost;
+
+inline unsigned char getSwappedBytes(unsigned char C) { return C; }
+inline signed char getSwappedBytes(signed char C) { return C; }
+inline char getSwappedBytes(char C) { return C; }
+
+inline unsigned short getSwappedBytes(unsigned short C) {
+ return ByteSwap_16(C);
+}
+inline signed short getSwappedBytes(signed short C) { return ByteSwap_16(C); }
+
+inline unsigned int getSwappedBytes(unsigned int C) { return ByteSwap_32(C); }
+inline signed int getSwappedBytes(signed int C) { return ByteSwap_32(C); }
+
+inline unsigned long getSwappedBytes(unsigned long C) {
+ // Handle LLP64 and LP64 platforms.
+ return sizeof(long) == sizeof(int) ? ByteSwap_32((uint32_t)C)
+ : ByteSwap_64((uint64_t)C);
+}
+inline signed long getSwappedBytes(signed long C) {
+ // Handle LLP64 and LP64 platforms.
+ return sizeof(long) == sizeof(int) ? ByteSwap_32((uint32_t)C)
+ : ByteSwap_64((uint64_t)C);
+}
+
+inline unsigned long long getSwappedBytes(unsigned long long C) {
+ return ByteSwap_64(C);
+}
+inline signed long long getSwappedBytes(signed long long C) {
+ return ByteSwap_64(C);
+}
+
+template <typename T>
+inline std::enable_if_t<std::is_enum<T>::value, T> getSwappedBytes(T C) {
+ return static_cast<T>(
+ getSwappedBytes(static_cast<std::underlying_type_t<T>>(C)));
+}
+
+template <typename T> inline void swapByteOrder(T &Value) {
+ Value = getSwappedBytes(Value);
+}
+
+} // end namespace __orc_rt
+
+#endif // ORC_RT_ENDIAN_H
diff --git a/compiler-rt/lib/orc/error.h b/compiler-rt/lib/orc/error.h
new file mode 100644
index 000000000000..92ac5a884ac6
--- /dev/null
+++ b/compiler-rt/lib/orc/error.h
@@ -0,0 +1,428 @@
+//===-------- Error.h - Enforced error checking for ORC RT ------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef ORC_RT_ERROR_H
+#define ORC_RT_ERROR_H
+
+#include "compiler.h"
+#include "extensible_rtti.h"
+#include "stl_extras.h"
+
+#include <cassert>
+#include <memory>
+#include <string>
+#include <type_traits>
+
+namespace __orc_rt {
+
+/// Base class for all errors.
+class ErrorInfoBase : public RTTIExtends<ErrorInfoBase, RTTIRoot> {
+public:
+ virtual std::string toString() const = 0;
+};
+
+/// Represents an environmental error.
+class ORC_RT_NODISCARD Error {
+
+ template <typename ErrT, typename... ArgTs>
+ friend Error make_error(ArgTs &&...Args);
+
+ friend Error repackage_error(std::unique_ptr<ErrorInfoBase>);
+
+ template <typename ErrT> friend std::unique_ptr<ErrT> error_cast(Error &);
+
+ template <typename T> friend class Expected;
+
+public:
+ /// Destroy this error. Aborts if error was not checked, or was checked but
+ /// not handled.
+ ~Error() { assertIsChecked(); }
+
+ Error(const Error &) = delete;
+ Error &operator=(const Error &) = delete;
+
+ /// Move-construct an error. The newly constructed error is considered
+ /// unchecked, even if the source error had been checked. The original error
+ /// becomes a checked success value.
+ Error(Error &&Other) {
+ setChecked(true);
+ *this = std::move(Other);
+ }
+
+ /// Move-assign an error value. The current error must represent success, you
+ /// you cannot overwrite an unhandled error. The current error is then
+ /// considered unchecked. The source error becomes a checked success value,
+ /// regardless of its original state.
+ Error &operator=(Error &&Other) {
+ // Don't allow overwriting of unchecked values.
+ assertIsChecked();
+ setPtr(Other.getPtr());
+
+ // This Error is unchecked, even if the source error was checked.
+ setChecked(false);
+
+ // Null out Other's payload and set its checked bit.
+ Other.setPtr(nullptr);
+ Other.setChecked(true);
+
+ return *this;
+ }
+
+ /// Create a success value.
+ static Error success() { return Error(); }
+
+ /// Error values convert to true for failure values, false otherwise.
+ explicit operator bool() {
+ setChecked(getPtr() == nullptr);
+ return getPtr() != nullptr;
+ }
+
+ /// Return true if this Error contains a failure value of the given type.
+ template <typename ErrT> bool isA() const {
+ return getPtr() && getPtr()->isA<ErrT>();
+ }
+
+private:
+ Error() = default;
+
+ Error(std::unique_ptr<ErrorInfoBase> ErrInfo) {
+ auto RawErrPtr = reinterpret_cast<uintptr_t>(ErrInfo.release());
+ assert((RawErrPtr & 0x1) == 0 && "ErrorInfo is insufficiently aligned");
+ ErrPtr = RawErrPtr | 0x1;
+ }
+
+ void assertIsChecked() {
+ if (ORC_RT_UNLIKELY(!isChecked() || getPtr())) {
+ fprintf(stderr, "Error must be checked prior to destruction.\n");
+ abort(); // Some sort of JIT program abort?
+ }
+ }
+
+ template <typename ErrT = ErrorInfoBase> ErrT *getPtr() const {
+ return reinterpret_cast<ErrT *>(ErrPtr & ~uintptr_t(1));
+ }
+
+ void setPtr(ErrorInfoBase *Ptr) {
+ ErrPtr = (reinterpret_cast<uintptr_t>(Ptr) & ~uintptr_t(1)) | (ErrPtr & 1);
+ }
+
+ bool isChecked() const { return ErrPtr & 0x1; }
+
+ void setChecked(bool Checked) {
+ ErrPtr = (reinterpret_cast<uintptr_t>(ErrPtr) & ~uintptr_t(1)) | Checked;
+ }
+
+ template <typename ErrT = ErrorInfoBase> std::unique_ptr<ErrT> takePayload() {
+ static_assert(std::is_base_of<ErrorInfoBase, ErrT>::value,
+ "ErrT is not an ErrorInfoBase subclass");
+ std::unique_ptr<ErrT> Tmp(getPtr<ErrT>());
+ setPtr(nullptr);
+ setChecked(true);
+ return Tmp;
+ }
+
+ uintptr_t ErrPtr = 0;
+};
+
+/// Construct an error of ErrT with the given arguments.
+template <typename ErrT, typename... ArgTs> Error make_error(ArgTs &&...Args) {
+ static_assert(std::is_base_of<ErrorInfoBase, ErrT>::value,
+ "ErrT is not an ErrorInfoBase subclass");
+ return Error(std::make_unique<ErrT>(std::forward<ArgTs>(Args)...));
+}
+
+/// Construct an error of ErrT using a std::unique_ptr<ErrorInfoBase>. The
+/// primary use-case for this is 're-packaging' errors after inspecting them
+/// using error_cast, hence the name.
+inline Error repackage_error(std::unique_ptr<ErrorInfoBase> EIB) {
+ return Error(std::move(EIB));
+}
+
+/// If the argument is an error of type ErrT then this function unpacks it
+/// and returns a std::unique_ptr<ErrT>. Otherwise returns a nullptr and
+/// leaves the error untouched. Common usage looks like:
+///
+/// \code{.cpp}
+/// if (Error E = foo()) {
+/// if (auto EV1 = error_cast<ErrorType1>(E)) {
+/// // use unwrapped EV1 value.
+/// } else if (EV2 = error_cast<ErrorType2>(E)) {
+/// // use unwrapped EV2 value.
+/// } ...
+/// }
+/// \endcode
+template <typename ErrT> std::unique_ptr<ErrT> error_cast(Error &Err) {
+ static_assert(std::is_base_of<ErrorInfoBase, ErrT>::value,
+ "ErrT is not an ErrorInfoBase subclass");
+ if (Err.isA<ErrT>())
+ return Err.takePayload<ErrT>();
+ return nullptr;
+}
+
+/// Helper for Errors used as out-parameters.
+/// Sets the 'checked' flag on construction, resets it on destruction.
+class ErrorAsOutParameter {
+public:
+ ErrorAsOutParameter(Error *Err) : Err(Err) {
+ // Raise the checked bit if Err is success.
+ if (Err)
+ (void)!!*Err;
+ }
+
+ ~ErrorAsOutParameter() {
+ // Clear the checked bit.
+ if (Err && !*Err)
+ *Err = Error::success();
+ }
+
+private:
+ Error *Err;
+};
+
+template <typename T> class ORC_RT_NODISCARD Expected {
+
+ template <class OtherT> friend class Expected;
+
+ static constexpr bool IsRef = std::is_reference<T>::value;
+ using wrap = std::reference_wrapper<std::remove_reference_t<T>>;
+ using error_type = std::unique_ptr<ErrorInfoBase>;
+ using storage_type = std::conditional_t<IsRef, wrap, T>;
+ using value_type = T;
+
+ using reference = std::remove_reference_t<T> &;
+ using const_reference = const std::remove_reference_t<T> &;
+ using pointer = std::remove_reference_t<T> *;
+ using const_pointer = const std::remove_reference_t<T> *;
+
+public:
+ /// Create an Expected from a failure value.
+ Expected(Error Err) : HasError(true), Unchecked(true) {
+ assert(Err && "Cannot create Expected<T> from Error success value");
+ new (getErrorStorage()) error_type(Err.takePayload());
+ }
+
+ /// Create an Expected from a T value.
+ template <typename OtherT>
+ Expected(OtherT &&Val,
+ std::enable_if_t<std::is_convertible<OtherT, T>::value> * = nullptr)
+ : HasError(false), Unchecked(true) {
+ new (getStorage()) storage_type(std::forward<OtherT>(Val));
+ }
+
+ /// Move-construct an Expected<T> from an Expected<OtherT>.
+ Expected(Expected &&Other) { moveConstruct(std::move(Other)); }
+
+ /// Move construct an Expected<T> value from an Expected<OtherT>, where OtherT
+ /// must be convertible to T.
+ template <class OtherT>
+ Expected(
+ Expected<OtherT> &&Other,
+ std::enable_if_t<std::is_convertible<OtherT, T>::value> * = nullptr) {
+ moveConstruct(std::move(Other));
+ }
+
+ /// Move construct an Expected<T> value from an Expected<OtherT>, where OtherT
+ /// isn't convertible to T.
+ template <class OtherT>
+ explicit Expected(
+ Expected<OtherT> &&Other,
+ std::enable_if_t<!std::is_convertible<OtherT, T>::value> * = nullptr) {
+ moveConstruct(std::move(Other));
+ }
+
+ /// Move-assign from another Expected<T>.
+ Expected &operator=(Expected &&Other) {
+ moveAssign(std::move(Other));
+ return *this;
+ }
+
+ /// Destroy an Expected<T>.
+ ~Expected() {
+ assertIsChecked();
+ if (!HasError)
+ getStorage()->~storage_type();
+ else
+ getErrorStorage()->~error_type();
+ }
+
+ /// Returns true if this Expected value is in a success state (holding a T),
+ /// and false if this Expected value is in a failure state.
+ explicit operator bool() {
+ Unchecked = HasError;
+ return !HasError;
+ }
+
+ /// Returns true if this Expected value holds an Error of type error_type.
+ template <typename ErrT> bool isFailureOfType() const {
+ return HasError && (*getErrorStorage())->template isFailureOfType<ErrT>();
+ }
+
+ /// Take ownership of the stored error.
+ ///
+ /// If this Expected value is in a success state (holding a T) then this
+ /// method is a no-op and returns Error::success.
+ ///
+ /// If thsi Expected value is in a failure state (holding an Error) then this
+ /// method returns the contained error and leaves this Expected in an
+ /// 'empty' state from which it may be safely destructed but not otherwise
+ /// accessed.
+ Error takeError() {
+ Unchecked = false;
+ return HasError ? Error(std::move(*getErrorStorage())) : Error::success();
+ }
+
+ /// Returns a pointer to the stored T value.
+ pointer operator->() {
+ assertIsChecked();
+ return toPointer(getStorage());
+ }
+
+ /// Returns a pointer to the stored T value.
+ const_pointer operator->() const {
+ assertIsChecked();
+ return toPointer(getStorage());
+ }
+
+ /// Returns a reference to the stored T value.
+ reference operator*() {
+ assertIsChecked();
+ return *getStorage();
+ }
+
+ /// Returns a reference to the stored T value.
+ const_reference operator*() const {
+ assertIsChecked();
+ return *getStorage();
+ }
+
+private:
+ template <class T1>
+ static bool compareThisIfSameType(const T1 &a, const T1 &b) {
+ return &a == &b;
+ }
+
+ template <class T1, class T2>
+ static bool compareThisIfSameType(const T1 &a, const T2 &b) {
+ return false;
+ }
+
+ template <class OtherT> void moveConstruct(Expected<OtherT> &&Other) {
+ HasError = Other.HasError;
+ Unchecked = true;
+ Other.Unchecked = false;
+
+ if (!HasError)
+ new (getStorage()) storage_type(std::move(*Other.getStorage()));
+ else
+ new (getErrorStorage()) error_type(std::move(*Other.getErrorStorage()));
+ }
+
+ template <class OtherT> void moveAssign(Expected<OtherT> &&Other) {
+ assertIsChecked();
+
+ if (compareThisIfSameType(*this, Other))
+ return;
+
+ this->~Expected();
+ new (this) Expected(std::move(Other));
+ }
+
+ pointer toPointer(pointer Val) { return Val; }
+
+ const_pointer toPointer(const_pointer Val) const { return Val; }
+
+ pointer toPointer(wrap *Val) { return &Val->get(); }
+
+ const_pointer toPointer(const wrap *Val) const { return &Val->get(); }
+
+ storage_type *getStorage() {
+ assert(!HasError && "Cannot get value when an error exists!");
+ return reinterpret_cast<storage_type *>(&TStorage);
+ }
+
+ const storage_type *getStorage() const {
+ assert(!HasError && "Cannot get value when an error exists!");
+ return reinterpret_cast<const storage_type *>(&TStorage);
+ }
+
+ error_type *getErrorStorage() {
+ assert(HasError && "Cannot get error when a value exists!");
+ return reinterpret_cast<error_type *>(&ErrorStorage);
+ }
+
+ const error_type *getErrorStorage() const {
+ assert(HasError && "Cannot get error when a value exists!");
+ return reinterpret_cast<const error_type *>(&ErrorStorage);
+ }
+
+ void assertIsChecked() {
+ if (ORC_RT_UNLIKELY(Unchecked)) {
+ fprintf(stderr,
+ "Expected<T> must be checked before access or destruction.\n");
+ abort();
+ }
+ }
+
+ union {
+ std::aligned_union_t<1, storage_type> TStorage;
+ std::aligned_union_t<1, error_type> ErrorStorage;
+ };
+
+ bool HasError : 1;
+ bool Unchecked : 1;
+};
+
+/// Consume an error without doing anything.
+inline void consumeError(Error Err) {
+ if (Err)
+ (void)error_cast<ErrorInfoBase>(Err);
+}
+
+/// Consumes success values. It is a programmatic error to call this function
+/// on a failure value.
+inline void cantFail(Error Err) {
+ assert(!Err && "cantFail called on failure value");
+ consumeError(std::move(Err));
+}
+
+/// Auto-unwrap an Expected<T> value in the success state. It is a programmatic
+/// error to call this function on a failure value.
+template <typename T> T cantFail(Expected<T> E) {
+ assert(E && "cantFail called on failure value");
+ consumeError(E.takeError());
+ return std::move(*E);
+}
+
+/// Auto-unwrap an Expected<T> value in the success state. It is a programmatic
+/// error to call this function on a failure value.
+template <typename T> T &cantFail(Expected<T &> E) {
+ assert(E && "cantFail called on failure value");
+ consumeError(E.takeError());
+ return *E;
+}
+
+/// Convert the given error to a string. The error value is consumed in the
+/// process.
+inline std::string toString(Error Err) {
+ if (auto EIB = error_cast<ErrorInfoBase>(Err))
+ return EIB->toString();
+ return {};
+}
+
+class StringError : public RTTIExtends<StringError, ErrorInfoBase> {
+public:
+ StringError(std::string ErrMsg) : ErrMsg(std::move(ErrMsg)) {}
+ std::string toString() const override { return ErrMsg; }
+
+private:
+ std::string ErrMsg;
+};
+
+} // end namespace __orc_rt
+
+#endif // ORC_RT_ERROR_H
diff --git a/compiler-rt/lib/orc/executor_address.h b/compiler-rt/lib/orc/executor_address.h
new file mode 100644
index 000000000000..cfe985bdb60f
--- /dev/null
+++ b/compiler-rt/lib/orc/executor_address.h
@@ -0,0 +1,208 @@
+//===------ ExecutorAddress.h - Executing process address -------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Represents an address in the executing program.
+//
+// This file was derived from
+// llvm/include/llvm/ExecutionEngine/Orc/Shared/ExecutorAddress.h.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef ORC_RT_EXECUTOR_ADDRESS_H
+#define ORC_RT_EXECUTOR_ADDRESS_H
+
+#include "adt.h"
+#include "simple_packed_serialization.h"
+
+#include <cassert>
+#include <type_traits>
+
+namespace __orc_rt {
+
+/// Represents the difference between two addresses in the executor process.
+class ExecutorAddrDiff {
+public:
+ ExecutorAddrDiff() = default;
+ explicit ExecutorAddrDiff(uint64_t Value) : Value(Value) {}
+
+ uint64_t getValue() const { return Value; }
+
+private:
+ int64_t Value = 0;
+};
+
+/// Represents an address in the executor process.
+class ExecutorAddress {
+public:
+ ExecutorAddress() = default;
+ explicit ExecutorAddress(uint64_t Addr) : Addr(Addr) {}
+
+ /// Create an ExecutorAddress from the given pointer.
+ /// Warning: This should only be used when JITing in-process.
+ template <typename T> static ExecutorAddress fromPtr(T *Value) {
+ return ExecutorAddress(
+ static_cast<uint64_t>(reinterpret_cast<uintptr_t>(Value)));
+ }
+
+ /// Cast this ExecutorAddress to a pointer of the given type.
+ /// Warning: This should only be esude when JITing in-process.
+ template <typename T> T toPtr() const {
+ static_assert(std::is_pointer<T>::value, "T must be a pointer type");
+ uintptr_t IntPtr = static_cast<uintptr_t>(Addr);
+ assert(IntPtr == Addr &&
+ "JITTargetAddress value out of range for uintptr_t");
+ return reinterpret_cast<T>(IntPtr);
+ }
+
+ uint64_t getValue() const { return Addr; }
+ void setValue(uint64_t Addr) { this->Addr = Addr; }
+ bool isNull() const { return Addr == 0; }
+
+ explicit operator bool() const { return Addr != 0; }
+
+ friend bool operator==(const ExecutorAddress &LHS,
+ const ExecutorAddress &RHS) {
+ return LHS.Addr == RHS.Addr;
+ }
+
+ friend bool operator!=(const ExecutorAddress &LHS,
+ const ExecutorAddress &RHS) {
+ return LHS.Addr != RHS.Addr;
+ }
+
+ friend bool operator<(const ExecutorAddress &LHS,
+ const ExecutorAddress &RHS) {
+ return LHS.Addr < RHS.Addr;
+ }
+
+ friend bool operator<=(const ExecutorAddress &LHS,
+ const ExecutorAddress &RHS) {
+ return LHS.Addr <= RHS.Addr;
+ }
+
+ friend bool operator>(const ExecutorAddress &LHS,
+ const ExecutorAddress &RHS) {
+ return LHS.Addr > RHS.Addr;
+ }
+
+ friend bool operator>=(const ExecutorAddress &LHS,
+ const ExecutorAddress &RHS) {
+ return LHS.Addr >= RHS.Addr;
+ }
+
+ ExecutorAddress &operator++() {
+ ++Addr;
+ return *this;
+ }
+ ExecutorAddress &operator--() {
+ --Addr;
+ return *this;
+ }
+ ExecutorAddress operator++(int) { return ExecutorAddress(Addr++); }
+ ExecutorAddress operator--(int) { return ExecutorAddress(Addr++); }
+
+ ExecutorAddress &operator+=(const ExecutorAddrDiff Delta) {
+ Addr += Delta.getValue();
+ return *this;
+ }
+
+ ExecutorAddress &operator-=(const ExecutorAddrDiff Delta) {
+ Addr -= Delta.getValue();
+ return *this;
+ }
+
+private:
+ uint64_t Addr = 0;
+};
+
+/// Subtracting two addresses yields an offset.
+inline ExecutorAddrDiff operator-(const ExecutorAddress &LHS,
+ const ExecutorAddress &RHS) {
+ return ExecutorAddrDiff(LHS.getValue() - RHS.getValue());
+}
+
+/// Adding an offset and an address yields an address.
+inline ExecutorAddress operator+(const ExecutorAddress &LHS,
+ const ExecutorAddrDiff &RHS) {
+ return ExecutorAddress(LHS.getValue() + RHS.getValue());
+}
+
+/// Adding an address and an offset yields an address.
+inline ExecutorAddress operator+(const ExecutorAddrDiff &LHS,
+ const ExecutorAddress &RHS) {
+ return ExecutorAddress(LHS.getValue() + RHS.getValue());
+}
+
+/// Represents an address range in the exceutor process.
+struct ExecutorAddressRange {
+ ExecutorAddressRange() = default;
+ ExecutorAddressRange(ExecutorAddress StartAddress, ExecutorAddress EndAddress)
+ : StartAddress(StartAddress), EndAddress(EndAddress) {}
+
+ bool empty() const { return StartAddress == EndAddress; }
+ ExecutorAddrDiff size() const { return EndAddress - StartAddress; }
+
+ template <typename T> span<T> toSpan() const {
+ assert(size().getValue() % sizeof(T) == 0 &&
+ "AddressRange is not a multiple of sizeof(T)");
+ return span<T>(StartAddress.toPtr<T *>(), size().getValue() / sizeof(T));
+ }
+
+ ExecutorAddress StartAddress;
+ ExecutorAddress EndAddress;
+};
+
+/// SPS serializatior for ExecutorAddress.
+template <> class SPSSerializationTraits<SPSExecutorAddress, ExecutorAddress> {
+public:
+ static size_t size(const ExecutorAddress &EA) {
+ return SPSArgList<uint64_t>::size(EA.getValue());
+ }
+
+ static bool serialize(SPSOutputBuffer &BOB, const ExecutorAddress &EA) {
+ return SPSArgList<uint64_t>::serialize(BOB, EA.getValue());
+ }
+
+ static bool deserialize(SPSInputBuffer &BIB, ExecutorAddress &EA) {
+ uint64_t Tmp;
+ if (!SPSArgList<uint64_t>::deserialize(BIB, Tmp))
+ return false;
+ EA = ExecutorAddress(Tmp);
+ return true;
+ }
+};
+
+using SPSExecutorAddressRange =
+ SPSTuple<SPSExecutorAddress, SPSExecutorAddress>;
+
+/// Serialization traits for address ranges.
+template <>
+class SPSSerializationTraits<SPSExecutorAddressRange, ExecutorAddressRange> {
+public:
+ static size_t size(const ExecutorAddressRange &Value) {
+ return SPSArgList<SPSExecutorAddress, SPSExecutorAddress>::size(
+ Value.StartAddress, Value.EndAddress);
+ }
+
+ static bool serialize(SPSOutputBuffer &BOB,
+ const ExecutorAddressRange &Value) {
+ return SPSArgList<SPSExecutorAddress, SPSExecutorAddress>::serialize(
+ BOB, Value.StartAddress, Value.EndAddress);
+ }
+
+ static bool deserialize(SPSInputBuffer &BIB, ExecutorAddressRange &Value) {
+ return SPSArgList<SPSExecutorAddress, SPSExecutorAddress>::deserialize(
+ BIB, Value.StartAddress, Value.EndAddress);
+ }
+};
+
+using SPSExecutorAddressRangeSequence = SPSSequence<SPSExecutorAddressRange>;
+
+} // End namespace __orc_rt
+
+#endif // ORC_RT_EXECUTOR_ADDRESS_H
diff --git a/compiler-rt/lib/orc/extensible_rtti.cpp b/compiler-rt/lib/orc/extensible_rtti.cpp
new file mode 100644
index 000000000000..c6951a449a3d
--- /dev/null
+++ b/compiler-rt/lib/orc/extensible_rtti.cpp
@@ -0,0 +1,24 @@
+//===- extensible_rtti.cpp ------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of the ORC runtime support library.
+//
+// Note:
+// This source file was adapted from lib/Support/ExtensibleRTTI.cpp, however
+// the data structures are not shared and the code need not be kept in sync.
+//
+//===----------------------------------------------------------------------===//
+
+#include "extensible_rtti.h"
+
+namespace __orc_rt {
+
+char RTTIRoot::ID = 0;
+void RTTIRoot::anchor() {}
+
+} // end namespace __orc_rt
diff --git a/compiler-rt/lib/orc/extensible_rtti.h b/compiler-rt/lib/orc/extensible_rtti.h
new file mode 100644
index 000000000000..72f68242e7c4
--- /dev/null
+++ b/compiler-rt/lib/orc/extensible_rtti.h
@@ -0,0 +1,145 @@
+//===------ extensible_rtti.h - Extensible RTTI for ORC RT ------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// \file
+//
+// Provides an extensible RTTI mechanism, that can be used regardless of whether
+// the runtime is built with -frtti or not. This is predominantly used to
+// support error handling.
+//
+// The RTTIRoot class defines methods for comparing type ids. Implementations
+// of these methods can be injected into new classes using the RTTIExtends
+// class template.
+//
+// E.g.
+//
+// @code{.cpp}
+// class MyBaseClass : public RTTIExtends<MyBaseClass, RTTIRoot> {
+// public:
+// static char ID;
+// virtual void foo() = 0;
+// };
+//
+// class MyDerivedClass1 : public RTTIExtends<MyDerivedClass1, MyBaseClass> {
+// public:
+// static char ID;
+// void foo() override {}
+// };
+//
+// class MyDerivedClass2 : public RTTIExtends<MyDerivedClass2, MyBaseClass> {
+// public:
+// static char ID;
+// void foo() override {}
+// };
+//
+// char MyBaseClass::ID = 0;
+// char MyDerivedClass1::ID = 0;
+// char MyDerivedClass2:: ID = 0;
+//
+// void fn() {
+// std::unique_ptr<MyBaseClass> B = std::make_unique<MyDerivedClass1>();
+// outs() << isa<MyBaseClass>(B) << "\n"; // Outputs "1".
+// outs() << isa<MyDerivedClass1>(B) << "\n"; // Outputs "1".
+// outs() << isa<MyDerivedClass2>(B) << "\n"; // Outputs "0'.
+// }
+//
+// @endcode
+//
+// Note:
+// This header was adapted from llvm/Support/ExtensibleRTTI.h, however the
+// data structures are not shared and the code need not be kept in sync.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef ORC_RT_EXTENSIBLE_RTTI_H
+#define ORC_RT_EXTENSIBLE_RTTI_H
+
+namespace __orc_rt {
+
+template <typename ThisT, typename ParentT> class RTTIExtends;
+
+/// Base class for the extensible RTTI hierarchy.
+///
+/// This class defines virtual methods, dynamicClassID and isA, that enable
+/// type comparisons.
+class RTTIRoot {
+public:
+ virtual ~RTTIRoot() = default;
+
+ /// Returns the class ID for this type.
+ static const void *classID() { return &ID; }
+
+ /// Returns the class ID for the dynamic type of this RTTIRoot instance.
+ virtual const void *dynamicClassID() const = 0;
+
+ /// Returns true if this class's ID matches the given class ID.
+ virtual bool isA(const void *const ClassID) const {
+ return ClassID == classID();
+ }
+
+ /// Check whether this instance is a subclass of QueryT.
+ template <typename QueryT> bool isA() const { return isA(QueryT::classID()); }
+
+ static bool classof(const RTTIRoot *R) { return R->isA<RTTIRoot>(); }
+
+private:
+ virtual void anchor();
+
+ static char ID;
+};
+
+/// Inheritance utility for extensible RTTI.
+///
+/// Supports single inheritance only: A class can only have one
+/// ExtensibleRTTI-parent (i.e. a parent for which the isa<> test will work),
+/// though it can have many non-ExtensibleRTTI parents.
+///
+/// RTTIExtents uses CRTP so the first template argument to RTTIExtends is the
+/// newly introduced type, and the *second* argument is the parent class.
+///
+/// class MyType : public RTTIExtends<MyType, RTTIRoot> {
+/// public:
+/// static char ID;
+/// };
+///
+/// class MyDerivedType : public RTTIExtends<MyDerivedType, MyType> {
+/// public:
+/// static char ID;
+/// };
+///
+template <typename ThisT, typename ParentT> class RTTIExtends : public ParentT {
+public:
+ // Inherit constructors and isA methods from ParentT.
+ using ParentT::isA;
+ using ParentT::ParentT;
+
+ static char ID;
+
+ static const void *classID() { return &ThisT::ID; }
+
+ const void *dynamicClassID() const override { return &ThisT::ID; }
+
+ bool isA(const void *const ClassID) const override {
+ return ClassID == classID() || ParentT::isA(ClassID);
+ }
+
+ static bool classof(const RTTIRoot *R) { return R->isA<ThisT>(); }
+};
+
+template <typename ThisT, typename ParentT>
+char RTTIExtends<ThisT, ParentT>::ID = 0;
+
+/// Returns true if the given value is an instance of the template type
+/// parameter.
+template <typename To, typename From> bool isa(const From &Value) {
+ return To::classof(&Value);
+}
+
+} // end namespace __orc_rt
+
+#endif // ORC_RT_EXTENSIBLE_RTTI_H
diff --git a/compiler-rt/lib/orc/log_error_to_stderr.cpp b/compiler-rt/lib/orc/log_error_to_stderr.cpp
new file mode 100644
index 000000000000..4fabbc0d212a
--- /dev/null
+++ b/compiler-rt/lib/orc/log_error_to_stderr.cpp
@@ -0,0 +1,19 @@
+//===-- log_error_to_stderr.cpp -------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of the ORC runtime support library.
+//
+//===----------------------------------------------------------------------===//
+
+#include "compiler.h"
+
+#include <stdio.h>
+
+ORC_RT_INTERFACE void __orc_rt_log_error_to_stderr(const char *ErrMsg) {
+ fprintf(stderr, "orc runtime error: %s\n", ErrMsg);
+}
diff --git a/compiler-rt/lib/orc/macho_platform.cpp b/compiler-rt/lib/orc/macho_platform.cpp
new file mode 100644
index 000000000000..2a960fb548fa
--- /dev/null
+++ b/compiler-rt/lib/orc/macho_platform.cpp
@@ -0,0 +1,731 @@
+//===- macho_platform.cpp -------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains code required to load the rest of the MachO runtime.
+//
+//===----------------------------------------------------------------------===//
+
+#include "macho_platform.h"
+#include "common.h"
+#include "error.h"
+#include "wrapper_function_utils.h"
+
+#include <map>
+#include <mutex>
+#include <sstream>
+#include <unordered_map>
+#include <vector>
+
+using namespace __orc_rt;
+using namespace __orc_rt::macho;
+
+// Declare function tags for functions in the JIT process.
+ORC_RT_JIT_DISPATCH_TAG(__orc_rt_macho_get_initializers_tag)
+ORC_RT_JIT_DISPATCH_TAG(__orc_rt_macho_get_deinitializers_tag)
+ORC_RT_JIT_DISPATCH_TAG(__orc_rt_macho_symbol_lookup_tag)
+
+// eh-frame registration functions.
+// We expect these to be available for all processes.
+extern "C" void __register_frame(const void *);
+extern "C" void __deregister_frame(const void *);
+
+// Objective-C types.
+struct objc_class;
+struct objc_image_info;
+struct objc_object;
+struct objc_selector;
+
+using Class = objc_class *;
+using id = objc_object *;
+using SEL = objc_selector *;
+
+// Objective-C registration functions.
+// These are weakly imported. If the Objective-C runtime has not been loaded
+// then code containing Objective-C sections will generate an error.
+extern "C" id objc_msgSend(id, SEL, ...) ORC_RT_WEAK_IMPORT;
+extern "C" Class objc_readClassPair(Class,
+ const objc_image_info *) ORC_RT_WEAK_IMPORT;
+extern "C" SEL sel_registerName(const char *) ORC_RT_WEAK_IMPORT;
+
+// Swift types.
+class ProtocolRecord;
+class ProtocolConformanceRecord;
+
+extern "C" void
+swift_registerProtocols(const ProtocolRecord *begin,
+ const ProtocolRecord *end) ORC_RT_WEAK_IMPORT;
+
+extern "C" void swift_registerProtocolConformances(
+ const ProtocolConformanceRecord *begin,
+ const ProtocolConformanceRecord *end) ORC_RT_WEAK_IMPORT;
+
+namespace {
+
+template <typename HandleFDEFn>
+void walkEHFrameSection(span<const char> EHFrameSection,
+ HandleFDEFn HandleFDE) {
+ const char *CurCFIRecord = EHFrameSection.data();
+ uint64_t Size = *reinterpret_cast<const uint32_t *>(CurCFIRecord);
+
+ while (CurCFIRecord != EHFrameSection.end() && Size != 0) {
+ const char *OffsetField = CurCFIRecord + (Size == 0xffffffff ? 12 : 4);
+ if (Size == 0xffffffff)
+ Size = *reinterpret_cast<const uint64_t *>(CurCFIRecord + 4) + 12;
+ else
+ Size += 4;
+ uint32_t Offset = *reinterpret_cast<const uint32_t *>(OffsetField);
+
+ if (Offset != 0)
+ HandleFDE(CurCFIRecord);
+
+ CurCFIRecord += Size;
+ Size = *reinterpret_cast<const uint32_t *>(CurCFIRecord);
+ }
+}
+
+Error validatePointerSectionExtent(const char *SectionName,
+ const ExecutorAddressRange &SE) {
+ if (SE.size().getValue() % sizeof(uintptr_t)) {
+ std::ostringstream ErrMsg;
+ ErrMsg << std::hex << "Size of " << SectionName << " 0x"
+ << SE.StartAddress.getValue() << " -- 0x" << SE.EndAddress.getValue()
+ << " is not a pointer multiple";
+ return make_error<StringError>(ErrMsg.str());
+ }
+ return Error::success();
+}
+
+Error registerObjCSelectors(
+ const std::vector<ExecutorAddressRange> &ObjCSelRefsSections,
+ const MachOJITDylibInitializers &MOJDIs) {
+
+ if (ORC_RT_UNLIKELY(!sel_registerName))
+ return make_error<StringError>("sel_registerName is not available");
+
+ for (const auto &ObjCSelRefs : ObjCSelRefsSections) {
+
+ if (auto Err = validatePointerSectionExtent("__objc_selrefs", ObjCSelRefs))
+ return Err;
+
+ fprintf(stderr, "Processing selrefs section at 0x%llx\n",
+ ObjCSelRefs.StartAddress.getValue());
+ for (uintptr_t SelEntry : ObjCSelRefs.toSpan<uintptr_t>()) {
+ const char *SelName = reinterpret_cast<const char *>(SelEntry);
+ fprintf(stderr, "Registering selector \"%s\"\n", SelName);
+ auto Sel = sel_registerName(SelName);
+ *reinterpret_cast<SEL *>(SelEntry) = Sel;
+ }
+ }
+
+ return Error::success();
+}
+
+Error registerObjCClasses(
+ const std::vector<ExecutorAddressRange> &ObjCClassListSections,
+ const MachOJITDylibInitializers &MOJDIs) {
+
+ if (ObjCClassListSections.empty())
+ return Error::success();
+
+ if (ORC_RT_UNLIKELY(!objc_msgSend))
+ return make_error<StringError>("objc_msgSend is not available");
+ if (ORC_RT_UNLIKELY(!objc_readClassPair))
+ return make_error<StringError>("objc_readClassPair is not available");
+
+ struct ObjCClassCompiled {
+ void *Metaclass;
+ void *Parent;
+ void *Cache1;
+ void *Cache2;
+ void *Data;
+ };
+
+ auto *ImageInfo =
+ MOJDIs.ObjCImageInfoAddress.toPtr<const objc_image_info *>();
+ auto ClassSelector = sel_registerName("class");
+
+ for (const auto &ObjCClassList : ObjCClassListSections) {
+
+ if (auto Err =
+ validatePointerSectionExtent("__objc_classlist", ObjCClassList))
+ return Err;
+
+ for (uintptr_t ClassPtr : ObjCClassList.toSpan<uintptr_t>()) {
+ auto *Cls = reinterpret_cast<Class>(ClassPtr);
+ auto *ClassCompiled = reinterpret_cast<ObjCClassCompiled *>(ClassPtr);
+ objc_msgSend(reinterpret_cast<id>(ClassCompiled->Parent), ClassSelector);
+ auto Registered = objc_readClassPair(Cls, ImageInfo);
+
+ // FIXME: Improve diagnostic by reporting the failed class's name.
+ if (Registered != Cls)
+ return make_error<StringError>("Unable to register Objective-C class");
+ }
+ }
+ return Error::success();
+}
+
+Error registerSwift5Protocols(
+ const std::vector<ExecutorAddressRange> &Swift5ProtocolSections,
+ const MachOJITDylibInitializers &MOJDIs) {
+
+ if (ORC_RT_UNLIKELY(!Swift5ProtocolSections.empty() &&
+ !swift_registerProtocols))
+ return make_error<StringError>("swift_registerProtocols is not available");
+
+ for (const auto &Swift5Protocols : Swift5ProtocolSections)
+ swift_registerProtocols(
+ Swift5Protocols.StartAddress.toPtr<const ProtocolRecord *>(),
+ Swift5Protocols.EndAddress.toPtr<const ProtocolRecord *>());
+
+ return Error::success();
+}
+
+Error registerSwift5ProtocolConformances(
+ const std::vector<ExecutorAddressRange> &Swift5ProtocolConformanceSections,
+ const MachOJITDylibInitializers &MOJDIs) {
+
+ if (ORC_RT_UNLIKELY(!Swift5ProtocolConformanceSections.empty() &&
+ !swift_registerProtocolConformances))
+ return make_error<StringError>(
+ "swift_registerProtocolConformances is not available");
+
+ for (const auto &ProtoConfSec : Swift5ProtocolConformanceSections)
+ swift_registerProtocolConformances(
+ ProtoConfSec.StartAddress.toPtr<const ProtocolConformanceRecord *>(),
+ ProtoConfSec.EndAddress.toPtr<const ProtocolConformanceRecord *>());
+
+ return Error::success();
+}
+
+Error runModInits(const std::vector<ExecutorAddressRange> &ModInitsSections,
+ const MachOJITDylibInitializers &MOJDIs) {
+
+ for (const auto &ModInits : ModInitsSections) {
+ if (auto Err = validatePointerSectionExtent("__mod_inits", ModInits))
+ return Err;
+
+ using InitFunc = void (*)();
+ for (auto *Init : ModInits.toSpan<InitFunc>())
+ (*Init)();
+ }
+
+ return Error::success();
+}
+
+struct TLVDescriptor {
+ void *(*Thunk)(TLVDescriptor *) = nullptr;
+ unsigned long Key = 0;
+ unsigned long DataAddress = 0;
+};
+
+class MachOPlatformRuntimeState {
+private:
+ struct AtExitEntry {
+ void (*Func)(void *);
+ void *Arg;
+ };
+
+ using AtExitsVector = std::vector<AtExitEntry>;
+
+ struct PerJITDylibState {
+ void *Header = nullptr;
+ size_t RefCount = 0;
+ bool AllowReinitialization = false;
+ AtExitsVector AtExits;
+ };
+
+public:
+ static void initialize();
+ static MachOPlatformRuntimeState &get();
+ static void destroy();
+
+ MachOPlatformRuntimeState() = default;
+
+ // Delete copy and move constructors.
+ MachOPlatformRuntimeState(const MachOPlatformRuntimeState &) = delete;
+ MachOPlatformRuntimeState &
+ operator=(const MachOPlatformRuntimeState &) = delete;
+ MachOPlatformRuntimeState(MachOPlatformRuntimeState &&) = delete;
+ MachOPlatformRuntimeState &operator=(MachOPlatformRuntimeState &&) = delete;
+
+ Error registerObjectSections(MachOPerObjectSectionsToRegister POSR);
+ Error deregisterObjectSections(MachOPerObjectSectionsToRegister POSR);
+
+ const char *dlerror();
+ void *dlopen(string_view Name, int Mode);
+ int dlclose(void *DSOHandle);
+ void *dlsym(void *DSOHandle, string_view Symbol);
+
+ int registerAtExit(void (*F)(void *), void *Arg, void *DSOHandle);
+ void runAtExits(void *DSOHandle);
+
+ /// Returns the base address of the section containing ThreadData.
+ Expected<std::pair<const char *, size_t>>
+ getThreadDataSectionFor(const char *ThreadData);
+
+private:
+ PerJITDylibState *getJITDylibStateByHeaderAddr(void *DSOHandle);
+ PerJITDylibState *getJITDylibStateByName(string_view Path);
+ PerJITDylibState &getOrCreateJITDylibState(MachOJITDylibInitializers &MOJDIs);
+
+ Error registerThreadDataSection(span<const char> ThreadDataSec);
+
+ Expected<ExecutorAddress> lookupSymbolInJITDylib(void *DSOHandle,
+ string_view Symbol);
+
+ Expected<MachOJITDylibInitializerSequence>
+ getJITDylibInitializersByName(string_view Path);
+ Expected<void *> dlopenInitialize(string_view Path, int Mode);
+ Error initializeJITDylib(MachOJITDylibInitializers &MOJDIs);
+
+ static MachOPlatformRuntimeState *MOPS;
+
+ using InitSectionHandler =
+ Error (*)(const std::vector<ExecutorAddressRange> &Sections,
+ const MachOJITDylibInitializers &MOJDIs);
+ const std::vector<std::pair<const char *, InitSectionHandler>> InitSections =
+ {{"__DATA,__objc_selrefs", registerObjCSelectors},
+ {"__DATA,__objc_classlist", registerObjCClasses},
+ {"__TEXT,__swift5_protos", registerSwift5Protocols},
+ {"__TEXT,__swift5_proto", registerSwift5ProtocolConformances},
+ {"__DATA,__mod_init_func", runModInits}};
+
+ // FIXME: Move to thread-state.
+ std::string DLFcnError;
+
+ std::recursive_mutex JDStatesMutex;
+ std::unordered_map<void *, PerJITDylibState> JDStates;
+ std::unordered_map<std::string, void *> JDNameToHeader;
+
+ std::mutex ThreadDataSectionsMutex;
+ std::map<const char *, size_t> ThreadDataSections;
+};
+
+MachOPlatformRuntimeState *MachOPlatformRuntimeState::MOPS = nullptr;
+
+void MachOPlatformRuntimeState::initialize() {
+ assert(!MOPS && "MachOPlatformRuntimeState should be null");
+ MOPS = new MachOPlatformRuntimeState();
+}
+
+MachOPlatformRuntimeState &MachOPlatformRuntimeState::get() {
+ assert(MOPS && "MachOPlatformRuntimeState not initialized");
+ return *MOPS;
+}
+
+void MachOPlatformRuntimeState::destroy() {
+ assert(MOPS && "MachOPlatformRuntimeState not initialized");
+ delete MOPS;
+}
+
+Error MachOPlatformRuntimeState::registerObjectSections(
+ MachOPerObjectSectionsToRegister POSR) {
+ if (POSR.EHFrameSection.StartAddress)
+ walkEHFrameSection(POSR.EHFrameSection.toSpan<const char>(),
+ __register_frame);
+
+ if (POSR.ThreadDataSection.StartAddress) {
+ if (auto Err = registerThreadDataSection(
+ POSR.ThreadDataSection.toSpan<const char>()))
+ return Err;
+ }
+
+ return Error::success();
+}
+
+Error MachOPlatformRuntimeState::deregisterObjectSections(
+ MachOPerObjectSectionsToRegister POSR) {
+ if (POSR.EHFrameSection.StartAddress)
+ walkEHFrameSection(POSR.EHFrameSection.toSpan<const char>(),
+ __deregister_frame);
+
+ return Error::success();
+}
+
+const char *MachOPlatformRuntimeState::dlerror() { return DLFcnError.c_str(); }
+
+void *MachOPlatformRuntimeState::dlopen(string_view Path, int Mode) {
+ std::lock_guard<std::recursive_mutex> Lock(JDStatesMutex);
+
+ // Use fast path if all JITDylibs are already loaded and don't require
+ // re-running initializers.
+ if (auto *JDS = getJITDylibStateByName(Path)) {
+ if (!JDS->AllowReinitialization) {
+ ++JDS->RefCount;
+ return JDS->Header;
+ }
+ }
+
+ auto H = dlopenInitialize(Path, Mode);
+ if (!H) {
+ DLFcnError = toString(H.takeError());
+ return nullptr;
+ }
+
+ return *H;
+}
+
+int MachOPlatformRuntimeState::dlclose(void *DSOHandle) {
+ runAtExits(DSOHandle);
+ return 0;
+}
+
+void *MachOPlatformRuntimeState::dlsym(void *DSOHandle, string_view Symbol) {
+ auto Addr = lookupSymbolInJITDylib(DSOHandle, Symbol);
+ if (!Addr) {
+ DLFcnError = toString(Addr.takeError());
+ return 0;
+ }
+
+ return Addr->toPtr<void *>();
+}
+
+int MachOPlatformRuntimeState::registerAtExit(void (*F)(void *), void *Arg,
+ void *DSOHandle) {
+ // FIXME: Handle out-of-memory errors, returning -1 if OOM.
+ std::lock_guard<std::recursive_mutex> Lock(JDStatesMutex);
+ auto *JDS = getJITDylibStateByHeaderAddr(DSOHandle);
+ assert(JDS && "JITDylib state not initialized");
+ JDS->AtExits.push_back({F, Arg});
+ return 0;
+}
+
+void MachOPlatformRuntimeState::runAtExits(void *DSOHandle) {
+ // FIXME: Should atexits be allowed to run concurrently with access to
+ // JDState?
+ AtExitsVector V;
+ {
+ std::lock_guard<std::recursive_mutex> Lock(JDStatesMutex);
+ auto *JDS = getJITDylibStateByHeaderAddr(DSOHandle);
+ assert(JDS && "JITDlybi state not initialized");
+ std::swap(V, JDS->AtExits);
+ }
+
+ while (!V.empty()) {
+ auto &AE = V.back();
+ AE.Func(AE.Arg);
+ V.pop_back();
+ }
+}
+
+Expected<std::pair<const char *, size_t>>
+MachOPlatformRuntimeState::getThreadDataSectionFor(const char *ThreadData) {
+ std::lock_guard<std::mutex> Lock(ThreadDataSectionsMutex);
+ auto I = ThreadDataSections.upper_bound(ThreadData);
+ // Check that we have a valid entry covering this address.
+ if (I == ThreadDataSections.begin())
+ return make_error<StringError>("No thread local data section for key");
+ I = std::prev(I);
+ if (ThreadData >= I->first + I->second)
+ return make_error<StringError>("No thread local data section for key");
+ return *I;
+}
+
+MachOPlatformRuntimeState::PerJITDylibState *
+MachOPlatformRuntimeState::getJITDylibStateByHeaderAddr(void *DSOHandle) {
+ auto I = JDStates.find(DSOHandle);
+ if (I == JDStates.end())
+ return nullptr;
+ return &I->second;
+}
+
+MachOPlatformRuntimeState::PerJITDylibState *
+MachOPlatformRuntimeState::getJITDylibStateByName(string_view Name) {
+ // FIXME: Avoid creating string copy here.
+ auto I = JDNameToHeader.find(std::string(Name.data(), Name.size()));
+ if (I == JDNameToHeader.end())
+ return nullptr;
+ void *H = I->second;
+ auto J = JDStates.find(H);
+ assert(J != JDStates.end() &&
+ "JITDylib has name map entry but no header map entry");
+ return &J->second;
+}
+
+MachOPlatformRuntimeState::PerJITDylibState &
+MachOPlatformRuntimeState::getOrCreateJITDylibState(
+ MachOJITDylibInitializers &MOJDIs) {
+ void *Header = MOJDIs.MachOHeaderAddress.toPtr<void *>();
+
+ auto &JDS = JDStates[Header];
+
+ // If this entry hasn't been created yet.
+ if (!JDS.Header) {
+ assert(!JDNameToHeader.count(MOJDIs.Name) &&
+ "JITDylib has header map entry but no name map entry");
+ JDNameToHeader[MOJDIs.Name] = Header;
+ JDS.Header = Header;
+ }
+
+ return JDS;
+}
+
+Error MachOPlatformRuntimeState::registerThreadDataSection(
+ span<const char> ThreadDataSection) {
+ std::lock_guard<std::mutex> Lock(ThreadDataSectionsMutex);
+ auto I = ThreadDataSections.upper_bound(ThreadDataSection.data());
+ if (I != ThreadDataSections.begin()) {
+ auto J = std::prev(I);
+ if (J->first + J->second > ThreadDataSection.data())
+ return make_error<StringError>("Overlapping __thread_data sections");
+ }
+ ThreadDataSections.insert(
+ I, std::make_pair(ThreadDataSection.data(), ThreadDataSection.size()));
+ return Error::success();
+}
+
+Expected<ExecutorAddress>
+MachOPlatformRuntimeState::lookupSymbolInJITDylib(void *DSOHandle,
+ string_view Sym) {
+ Expected<ExecutorAddress> Result((ExecutorAddress()));
+ if (auto Err = WrapperFunction<SPSExpected<SPSExecutorAddress>(
+ SPSExecutorAddress,
+ SPSString)>::call(&__orc_rt_macho_symbol_lookup_tag, Result,
+ ExecutorAddress::fromPtr(DSOHandle), Sym))
+ return std::move(Err);
+ return Result;
+}
+
+Expected<MachOJITDylibInitializerSequence>
+MachOPlatformRuntimeState::getJITDylibInitializersByName(string_view Path) {
+ Expected<MachOJITDylibInitializerSequence> Result(
+ (MachOJITDylibInitializerSequence()));
+ std::string PathStr(Path.data(), Path.size());
+ if (auto Err =
+ WrapperFunction<SPSExpected<SPSMachOJITDylibInitializerSequence>(
+ SPSString)>::call(&__orc_rt_macho_get_initializers_tag, Result,
+ Path))
+ return std::move(Err);
+ return Result;
+}
+
+Expected<void *> MachOPlatformRuntimeState::dlopenInitialize(string_view Path,
+ int Mode) {
+ // Either our JITDylib wasn't loaded, or it or one of its dependencies allows
+ // reinitialization. We need to call in to the JIT to see if there's any new
+ // work pending.
+ auto InitSeq = getJITDylibInitializersByName(Path);
+ if (!InitSeq)
+ return InitSeq.takeError();
+
+ // Init sequences should be non-empty.
+ if (InitSeq->empty())
+ return make_error<StringError>(
+ "__orc_rt_macho_get_initializers returned an "
+ "empty init sequence");
+
+ // Otherwise register and run initializers for each JITDylib.
+ for (auto &MOJDIs : *InitSeq)
+ if (auto Err = initializeJITDylib(MOJDIs))
+ return std::move(Err);
+
+ // Return the header for the last item in the list.
+ auto *JDS = getJITDylibStateByHeaderAddr(
+ InitSeq->back().MachOHeaderAddress.toPtr<void *>());
+ assert(JDS && "Missing state entry for JD");
+ return JDS->Header;
+}
+
+Error MachOPlatformRuntimeState::initializeJITDylib(
+ MachOJITDylibInitializers &MOJDIs) {
+
+ auto &JDS = getOrCreateJITDylibState(MOJDIs);
+ ++JDS.RefCount;
+
+ for (auto &KV : InitSections) {
+ const auto &Name = KV.first;
+ const auto &Handler = KV.second;
+ auto I = MOJDIs.InitSections.find(Name);
+ if (I != MOJDIs.InitSections.end()) {
+ if (auto Err = Handler(I->second, MOJDIs))
+ return Err;
+ }
+ }
+
+ return Error::success();
+}
+
+class MachOPlatformRuntimeTLVManager {
+public:
+ void *getInstance(const char *ThreadData);
+
+private:
+ std::unordered_map<const char *, char *> Instances;
+ std::unordered_map<const char *, std::unique_ptr<char[]>> AllocatedSections;
+};
+
+void *MachOPlatformRuntimeTLVManager::getInstance(const char *ThreadData) {
+ auto I = Instances.find(ThreadData);
+ if (I != Instances.end())
+ return I->second;
+
+ auto TDS =
+ MachOPlatformRuntimeState::get().getThreadDataSectionFor(ThreadData);
+ if (!TDS) {
+ __orc_rt_log_error(toString(TDS.takeError()).c_str());
+ return nullptr;
+ }
+
+ auto &Allocated = AllocatedSections[TDS->first];
+ if (!Allocated) {
+ Allocated = std::make_unique<char[]>(TDS->second);
+ memcpy(Allocated.get(), TDS->first, TDS->second);
+ }
+
+ size_t ThreadDataDelta = ThreadData - TDS->first;
+ assert(ThreadDataDelta <= TDS->second && "ThreadData outside section bounds");
+
+ char *Instance = Allocated.get() + ThreadDataDelta;
+ Instances[ThreadData] = Instance;
+ return Instance;
+}
+
+void destroyMachOTLVMgr(void *MachOTLVMgr) {
+ delete static_cast<MachOPlatformRuntimeTLVManager *>(MachOTLVMgr);
+}
+
+} // end anonymous namespace
+
+//------------------------------------------------------------------------------
+// JIT entry points
+//------------------------------------------------------------------------------
+
+ORC_RT_INTERFACE __orc_rt_CWrapperFunctionResult
+__orc_rt_macho_platform_bootstrap(char *ArgData, size_t ArgSize) {
+ MachOPlatformRuntimeState::initialize();
+ return WrapperFunctionResult().release();
+}
+
+ORC_RT_INTERFACE __orc_rt_CWrapperFunctionResult
+__orc_rt_macho_platform_shutdown(char *ArgData, size_t ArgSize) {
+ MachOPlatformRuntimeState::destroy();
+ return WrapperFunctionResult().release();
+}
+
+/// Wrapper function for registering metadata on a per-object basis.
+ORC_RT_INTERFACE __orc_rt_CWrapperFunctionResult
+__orc_rt_macho_register_object_sections(char *ArgData, size_t ArgSize) {
+ return WrapperFunction<SPSError(SPSMachOPerObjectSectionsToRegister)>::handle(
+ ArgData, ArgSize,
+ [](MachOPerObjectSectionsToRegister &POSR) {
+ return MachOPlatformRuntimeState::get().registerObjectSections(
+ std::move(POSR));
+ })
+ .release();
+}
+
+/// Wrapper for releasing per-object metadat.
+ORC_RT_INTERFACE __orc_rt_CWrapperFunctionResult
+__orc_rt_macho_deregister_object_sections(char *ArgData, size_t ArgSize) {
+ return WrapperFunction<SPSError(SPSMachOPerObjectSectionsToRegister)>::handle(
+ ArgData, ArgSize,
+ [](MachOPerObjectSectionsToRegister &POSR) {
+ return MachOPlatformRuntimeState::get().deregisterObjectSections(
+ std::move(POSR));
+ })
+ .release();
+}
+
+//------------------------------------------------------------------------------
+// TLV support
+//------------------------------------------------------------------------------
+
+ORC_RT_INTERFACE void *__orc_rt_macho_tlv_get_addr_impl(TLVDescriptor *D) {
+ auto *TLVMgr = static_cast<MachOPlatformRuntimeTLVManager *>(
+ pthread_getspecific(D->Key));
+ if (!TLVMgr) {
+ TLVMgr = new MachOPlatformRuntimeTLVManager();
+ if (pthread_setspecific(D->Key, TLVMgr)) {
+ __orc_rt_log_error("Call to pthread_setspecific failed");
+ return nullptr;
+ }
+ }
+
+ return TLVMgr->getInstance(
+ reinterpret_cast<char *>(static_cast<uintptr_t>(D->DataAddress)));
+}
+
+ORC_RT_INTERFACE __orc_rt_CWrapperFunctionResult
+__orc_rt_macho_create_pthread_key(char *ArgData, size_t ArgSize) {
+ return WrapperFunction<SPSExpected<uint64_t>(void)>::handle(
+ ArgData, ArgSize,
+ []() -> Expected<uint64_t> {
+ pthread_key_t Key;
+ if (int Err = pthread_key_create(&Key, destroyMachOTLVMgr)) {
+ __orc_rt_log_error("Call to pthread_key_create failed");
+ return make_error<StringError>(strerror(Err));
+ }
+ return static_cast<uint64_t>(Key);
+ })
+ .release();
+}
+
+//------------------------------------------------------------------------------
+// cxa_atexit support
+//------------------------------------------------------------------------------
+
+int __orc_rt_macho_cxa_atexit(void (*func)(void *), void *arg,
+ void *dso_handle) {
+ return MachOPlatformRuntimeState::get().registerAtExit(func, arg, dso_handle);
+}
+
+void __orc_rt_macho_cxa_finalize(void *dso_handle) {
+ MachOPlatformRuntimeState::get().runAtExits(dso_handle);
+}
+
+//------------------------------------------------------------------------------
+// JIT'd dlfcn alternatives.
+//------------------------------------------------------------------------------
+
+const char *__orc_rt_macho_jit_dlerror() {
+ return MachOPlatformRuntimeState::get().dlerror();
+}
+
+void *__orc_rt_macho_jit_dlopen(const char *path, int mode) {
+ return MachOPlatformRuntimeState::get().dlopen(path, mode);
+}
+
+int __orc_rt_macho_jit_dlclose(void *dso_handle) {
+ return MachOPlatformRuntimeState::get().dlclose(dso_handle);
+}
+
+void *__orc_rt_macho_jit_dlsym(void *dso_handle, const char *symbol) {
+ return MachOPlatformRuntimeState::get().dlsym(dso_handle, symbol);
+}
+
+//------------------------------------------------------------------------------
+// MachO Run Program
+//------------------------------------------------------------------------------
+
+ORC_RT_INTERFACE int64_t __orc_rt_macho_run_program(const char *JITDylibName,
+ const char *EntrySymbolName,
+ int argc, char *argv[]) {
+ using MainTy = int (*)(int, char *[]);
+
+ void *H = __orc_rt_macho_jit_dlopen(JITDylibName,
+ __orc_rt::macho::ORC_RT_RTLD_LAZY);
+ if (!H) {
+ __orc_rt_log_error(__orc_rt_macho_jit_dlerror());
+ return -1;
+ }
+
+ auto *Main =
+ reinterpret_cast<MainTy>(__orc_rt_macho_jit_dlsym(H, EntrySymbolName));
+
+ if (!Main) {
+ __orc_rt_log_error(__orc_rt_macho_jit_dlerror());
+ return -1;
+ }
+
+ int Result = Main(argc, argv);
+
+ if (__orc_rt_macho_jit_dlclose(H) == -1)
+ __orc_rt_log_error(__orc_rt_macho_jit_dlerror());
+
+ return Result;
+}
diff --git a/compiler-rt/lib/orc/macho_platform.h b/compiler-rt/lib/orc/macho_platform.h
new file mode 100644
index 000000000000..6c05e844b0cd
--- /dev/null
+++ b/compiler-rt/lib/orc/macho_platform.h
@@ -0,0 +1,135 @@
+//===- macho_platform.h -----------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// ORC Runtime support for Darwin dynamic loading features.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef ORC_RT_MACHO_PLATFORM_H
+#define ORC_RT_MACHO_PLATFORM_H
+
+#include "common.h"
+#include "executor_address.h"
+
+// Atexit functions.
+ORC_RT_INTERFACE int __orc_rt_macho_cxa_atexit(void (*func)(void *), void *arg,
+ void *dso_handle);
+ORC_RT_INTERFACE void __orc_rt_macho_cxa_finalize(void *dso_handle);
+
+// dlfcn functions.
+ORC_RT_INTERFACE const char *__orc_rt_macho_jit_dlerror();
+ORC_RT_INTERFACE void *__orc_rt_macho_jit_dlopen(const char *path, int mode);
+ORC_RT_INTERFACE int __orc_rt_macho_jit_dlclose(void *dso_handle);
+ORC_RT_INTERFACE void *__orc_rt_macho_jit_dlsym(void *dso_handle,
+ const char *symbol);
+
+namespace __orc_rt {
+namespace macho {
+
+struct MachOPerObjectSectionsToRegister {
+ ExecutorAddressRange EHFrameSection;
+ ExecutorAddressRange ThreadDataSection;
+};
+
+struct MachOJITDylibInitializers {
+ using SectionList = std::vector<ExecutorAddressRange>;
+
+ MachOJITDylibInitializers() = default;
+ MachOJITDylibInitializers(std::string Name,
+ ExecutorAddress MachOHeaderAddress)
+ : Name(std::move(Name)),
+ MachOHeaderAddress(std::move(MachOHeaderAddress)) {}
+
+ std::string Name;
+ ExecutorAddress MachOHeaderAddress;
+ ExecutorAddress ObjCImageInfoAddress;
+
+ std::unordered_map<std::string, SectionList> InitSections;
+};
+
+class MachOJITDylibDeinitializers {};
+
+using MachOJITDylibInitializerSequence = std::vector<MachOJITDylibInitializers>;
+
+using MachOJITDylibDeinitializerSequence =
+ std::vector<MachOJITDylibDeinitializers>;
+
+enum dlopen_mode : int {
+ ORC_RT_RTLD_LAZY = 0x1,
+ ORC_RT_RTLD_NOW = 0x2,
+ ORC_RT_RTLD_LOCAL = 0x4,
+ ORC_RT_RTLD_GLOBAL = 0x8
+};
+
+} // end namespace macho
+
+using SPSMachOPerObjectSectionsToRegister =
+ SPSTuple<SPSExecutorAddressRange, SPSExecutorAddressRange>;
+
+template <>
+class SPSSerializationTraits<SPSMachOPerObjectSectionsToRegister,
+ macho::MachOPerObjectSectionsToRegister> {
+
+public:
+ static size_t size(const macho::MachOPerObjectSectionsToRegister &MOPOSR) {
+ return SPSMachOPerObjectSectionsToRegister::AsArgList::size(
+ MOPOSR.EHFrameSection, MOPOSR.ThreadDataSection);
+ }
+
+ static bool serialize(SPSOutputBuffer &OB,
+ const macho::MachOPerObjectSectionsToRegister &MOPOSR) {
+ return SPSMachOPerObjectSectionsToRegister::AsArgList::serialize(
+ OB, MOPOSR.EHFrameSection, MOPOSR.ThreadDataSection);
+ }
+
+ static bool deserialize(SPSInputBuffer &IB,
+ macho::MachOPerObjectSectionsToRegister &MOPOSR) {
+ return SPSMachOPerObjectSectionsToRegister::AsArgList::deserialize(
+ IB, MOPOSR.EHFrameSection, MOPOSR.ThreadDataSection);
+ }
+};
+
+using SPSNamedExecutorAddressRangeSequenceMap =
+ SPSSequence<SPSTuple<SPSString, SPSExecutorAddressRangeSequence>>;
+
+using SPSMachOJITDylibInitializers =
+ SPSTuple<SPSString, SPSExecutorAddress, SPSExecutorAddress,
+ SPSNamedExecutorAddressRangeSequenceMap>;
+
+using SPSMachOJITDylibInitializerSequence =
+ SPSSequence<SPSMachOJITDylibInitializers>;
+
+/// Serialization traits for MachOJITDylibInitializers.
+template <>
+class SPSSerializationTraits<SPSMachOJITDylibInitializers,
+ macho::MachOJITDylibInitializers> {
+public:
+ static size_t size(const macho::MachOJITDylibInitializers &MOJDIs) {
+ return SPSMachOJITDylibInitializers::AsArgList::size(
+ MOJDIs.Name, MOJDIs.MachOHeaderAddress, MOJDIs.ObjCImageInfoAddress,
+ MOJDIs.InitSections);
+ }
+
+ static bool serialize(SPSOutputBuffer &OB,
+ const macho::MachOJITDylibInitializers &MOJDIs) {
+ return SPSMachOJITDylibInitializers::AsArgList::serialize(
+ OB, MOJDIs.Name, MOJDIs.MachOHeaderAddress, MOJDIs.ObjCImageInfoAddress,
+ MOJDIs.InitSections);
+ }
+
+ static bool deserialize(SPSInputBuffer &IB,
+ macho::MachOJITDylibInitializers &MOJDIs) {
+ return SPSMachOJITDylibInitializers::AsArgList::deserialize(
+ IB, MOJDIs.Name, MOJDIs.MachOHeaderAddress, MOJDIs.ObjCImageInfoAddress,
+ MOJDIs.InitSections);
+ }
+};
+
+} // end namespace __orc_rt
+
+#endif // ORC_RT_MACHO_PLATFORM_H
diff --git a/compiler-rt/lib/orc/macho_tlv.x86-64.S b/compiler-rt/lib/orc/macho_tlv.x86-64.S
new file mode 100644
index 000000000000..0affe403eec2
--- /dev/null
+++ b/compiler-rt/lib/orc/macho_tlv.x86-64.S
@@ -0,0 +1,68 @@
+//===-- orc_rt_macho_tlv.x86-64.s -------------------------------*- ASM -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of the ORC runtime support library.
+//
+//===----------------------------------------------------------------------===//
+
+#define REGISTER_SAVE_SPACE_SIZE 512
+
+ .text
+
+ // returns address of TLV in %rax, all other registers preserved
+ .globl ___orc_rt_macho_tlv_get_addr
+___orc_rt_macho_tlv_get_addr:
+ pushq %rbp
+ movq %rsp, %rbp
+ subq $REGISTER_SAVE_SPACE_SIZE, %rsp
+ movq %rbx, -8(%rbp)
+ movq %rcx, -16(%rbp)
+ movq %rdx, -24(%rbp)
+ movq %rsi, -32(%rbp)
+ movq %rdi, -40(%rbp)
+ movq %r8, -48(%rbp)
+ movq %r9, -56(%rbp)
+ movq %r10, -64(%rbp)
+ movq %r11, -72(%rbp)
+ movq %r12, -80(%rbp)
+ movq %r13, -88(%rbp)
+ movq %r14, -96(%rbp)
+ movq %r15, -104(%rbp)
+ movdqa %xmm0, -128(%rbp)
+ movdqa %xmm1, -144(%rbp)
+ movdqa %xmm2, -160(%rbp)
+ movdqa %xmm3, -176(%rbp)
+ movdqa %xmm4, -192(%rbp)
+ movdqa %xmm5, -208(%rbp)
+ movdqa %xmm6, -224(%rbp)
+ movdqa %xmm7, -240(%rbp)
+ call ___orc_rt_macho_tlv_get_addr_impl
+ movq -8(%rbp), %rbx
+ movq -16(%rbp), %rcx
+ movq -24(%rbp), %rdx
+ movq -32(%rbp), %rsi
+ movq -40(%rbp), %rdi
+ movq -48(%rbp), %r8
+ movq -56(%rbp), %r9
+ movq -64(%rbp), %r10
+ movq -72(%rbp), %r11
+ movq -80(%rbp), %r12
+ movq -88(%rbp), %r13
+ movq -96(%rbp), %r14
+ movq -104(%rbp), %r15
+ movdqa -128(%rbp), %xmm0
+ movdqa -144(%rbp), %xmm1
+ movdqa -160(%rbp), %xmm2
+ movdqa -176(%rbp), %xmm3
+ movdqa -192(%rbp), %xmm4
+ movdqa -208(%rbp), %xmm5
+ movdqa -224(%rbp), %xmm6
+ movdqa -240(%rbp), %xmm7
+ addq $REGISTER_SAVE_SPACE_SIZE, %rsp
+ popq %rbp
+ ret
diff --git a/compiler-rt/lib/orc/run_program_wrapper.cpp b/compiler-rt/lib/orc/run_program_wrapper.cpp
new file mode 100644
index 000000000000..d0f88534aa9c
--- /dev/null
+++ b/compiler-rt/lib/orc/run_program_wrapper.cpp
@@ -0,0 +1,51 @@
+//===- run_program_wrapper.cpp --------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of the ORC runtime support library.
+//
+//===----------------------------------------------------------------------===//
+
+#include "adt.h"
+#include "common.h"
+#include "wrapper_function_utils.h"
+
+#include <vector>
+
+using namespace __orc_rt;
+
+extern "C" int64_t __orc_rt_run_program(const char *JITDylibName,
+ const char *EntrySymbolName, int argc,
+ char *argv[]);
+
+ORC_RT_INTERFACE __orc_rt_CWrapperFunctionResult
+__orc_rt_run_program_wrapper(const char *ArgData, size_t ArgSize) {
+ return WrapperFunction<int64_t(SPSString, SPSString,
+ SPSSequence<SPSString>)>::
+ handle(ArgData, ArgSize,
+ [](const std::string &JITDylibName,
+ const std::string &EntrySymbolName,
+ const std::vector<string_view> &Args) {
+ std::vector<std::unique_ptr<char[]>> ArgVStorage;
+ ArgVStorage.reserve(Args.size());
+ for (auto &Arg : Args) {
+ ArgVStorage.push_back(
+ std::make_unique<char[]>(Arg.size() + 1));
+ memcpy(ArgVStorage.back().get(), Arg.data(), Arg.size());
+ ArgVStorage.back()[Arg.size()] = '\0';
+ }
+ std::vector<char *> ArgV;
+ ArgV.reserve(ArgVStorage.size() + 1);
+ for (auto &ArgStorage : ArgVStorage)
+ ArgV.push_back(ArgStorage.get());
+ ArgV.push_back(nullptr);
+ return __orc_rt_run_program(JITDylibName.c_str(),
+ EntrySymbolName.c_str(),
+ ArgV.size() - 1, ArgV.data());
+ })
+ .release();
+}
diff --git a/compiler-rt/lib/orc/simple_packed_serialization.h b/compiler-rt/lib/orc/simple_packed_serialization.h
new file mode 100644
index 000000000000..b561a19d8f04
--- /dev/null
+++ b/compiler-rt/lib/orc/simple_packed_serialization.h
@@ -0,0 +1,579 @@
+//===--- simple_packed_serialization.h - simple serialization ---*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of the ORC runtime support library.
+//
+// The behavior of the utilities in this header must be synchronized with the
+// behavior of the utilities in
+// llvm/ExecutionEngine/Orc/Shared/WrapperFunctionUtils.h.
+//
+// The Simple Packed Serialization (SPS) utilities are used to generate
+// argument and return buffers for wrapper functions using the following
+// serialization scheme:
+//
+// Primitives:
+// bool, char, int8_t, uint8_t -- Two's complement 8-bit (0=false, 1=true)
+// int16_t, uint16_t -- Two's complement 16-bit little endian
+// int32_t, uint32_t -- Two's complement 32-bit little endian
+// int64_t, int64_t -- Two's complement 64-bit little endian
+//
+// Sequence<T>:
+// Serialized as the sequence length (as a uint64_t) followed by the
+// serialization of each of the elements without padding.
+//
+// Tuple<T1, ..., TN>:
+// Serialized as each of the element types from T1 to TN without padding.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef ORC_RT_SIMPLE_PACKED_SERIALIZATION_H
+#define ORC_RT_SIMPLE_PACKED_SERIALIZATION_H
+
+#include "adt.h"
+#include "endianness.h"
+#include "error.h"
+#include "stl_extras.h"
+
+#include <string>
+#include <tuple>
+#include <type_traits>
+#include <unordered_map>
+#include <utility>
+#include <vector>
+
+namespace __orc_rt {
+
+/// Output char buffer with overflow check.
+class SPSOutputBuffer {
+public:
+ SPSOutputBuffer(char *Buffer, size_t Remaining)
+ : Buffer(Buffer), Remaining(Remaining) {}
+ bool write(const char *Data, size_t Size) {
+ if (Size > Remaining)
+ return false;
+ memcpy(Buffer, Data, Size);
+ Buffer += Size;
+ Remaining -= Size;
+ return true;
+ }
+
+private:
+ char *Buffer = nullptr;
+ size_t Remaining = 0;
+};
+
+/// Input char buffer with underflow check.
+class SPSInputBuffer {
+public:
+ SPSInputBuffer() = default;
+ SPSInputBuffer(const char *Buffer, size_t Remaining)
+ : Buffer(Buffer), Remaining(Remaining) {}
+ bool read(char *Data, size_t Size) {
+ if (Size > Remaining)
+ return false;
+ memcpy(Data, Buffer, Size);
+ Buffer += Size;
+ Remaining -= Size;
+ return true;
+ }
+
+ const char *data() const { return Buffer; }
+ bool skip(size_t Size) {
+ if (Size > Remaining)
+ return false;
+ Buffer += Size;
+ Remaining -= Size;
+ return true;
+ }
+
+private:
+ const char *Buffer = nullptr;
+ size_t Remaining = 0;
+};
+
+/// Specialize to describe how to serialize/deserialize to/from the given
+/// concrete type.
+template <typename SPSTagT, typename ConcreteT, typename _ = void>
+class SPSSerializationTraits;
+
+/// A utility class for serializing to a blob from a variadic list.
+template <typename... ArgTs> class SPSArgList;
+
+// Empty list specialization for SPSArgList.
+template <> class SPSArgList<> {
+public:
+ static size_t size() { return 0; }
+
+ static bool serialize(SPSOutputBuffer &OB) { return true; }
+ static bool deserialize(SPSInputBuffer &IB) { return true; }
+};
+
+// Non-empty list specialization for SPSArgList.
+template <typename SPSTagT, typename... SPSTagTs>
+class SPSArgList<SPSTagT, SPSTagTs...> {
+public:
+ template <typename ArgT, typename... ArgTs>
+ static size_t size(const ArgT &Arg, const ArgTs &...Args) {
+ return SPSSerializationTraits<SPSTagT, ArgT>::size(Arg) +
+ SPSArgList<SPSTagTs...>::size(Args...);
+ }
+
+ template <typename ArgT, typename... ArgTs>
+ static bool serialize(SPSOutputBuffer &OB, const ArgT &Arg,
+ const ArgTs &...Args) {
+ return SPSSerializationTraits<SPSTagT, ArgT>::serialize(OB, Arg) &&
+ SPSArgList<SPSTagTs...>::serialize(OB, Args...);
+ }
+
+ template <typename ArgT, typename... ArgTs>
+ static bool deserialize(SPSInputBuffer &IB, ArgT &Arg, ArgTs &...Args) {
+ return SPSSerializationTraits<SPSTagT, ArgT>::deserialize(IB, Arg) &&
+ SPSArgList<SPSTagTs...>::deserialize(IB, Args...);
+ }
+};
+
+/// SPS serialization for integral types, bool, and char.
+template <typename SPSTagT>
+class SPSSerializationTraits<
+ SPSTagT, SPSTagT,
+ std::enable_if_t<std::is_same<SPSTagT, bool>::value ||
+ std::is_same<SPSTagT, char>::value ||
+ std::is_same<SPSTagT, int8_t>::value ||
+ std::is_same<SPSTagT, int16_t>::value ||
+ std::is_same<SPSTagT, int32_t>::value ||
+ std::is_same<SPSTagT, int64_t>::value ||
+ std::is_same<SPSTagT, uint8_t>::value ||
+ std::is_same<SPSTagT, uint16_t>::value ||
+ std::is_same<SPSTagT, uint32_t>::value ||
+ std::is_same<SPSTagT, uint64_t>::value>> {
+public:
+ static size_t size(const SPSTagT &Value) { return sizeof(SPSTagT); }
+
+ static bool serialize(SPSOutputBuffer &OB, const SPSTagT &Value) {
+ SPSTagT Tmp = Value;
+ if (IsBigEndianHost)
+ swapByteOrder(Tmp);
+ return OB.write(reinterpret_cast<const char *>(&Tmp), sizeof(Tmp));
+ }
+
+ static bool deserialize(SPSInputBuffer &IB, SPSTagT &Value) {
+ SPSTagT Tmp;
+ if (!IB.read(reinterpret_cast<char *>(&Tmp), sizeof(Tmp)))
+ return false;
+ if (IsBigEndianHost)
+ swapByteOrder(Tmp);
+ Value = Tmp;
+ return true;
+ }
+};
+
+/// Any empty placeholder suitable as a substitute for void when deserializing
+class SPSEmpty {};
+
+/// Represents an address in the executor.
+class SPSExecutorAddress {};
+
+/// SPS tag type for tuples.
+///
+/// A blob tuple should be serialized by serializing each of the elements in
+/// sequence.
+template <typename... SPSTagTs> class SPSTuple {
+public:
+ /// Convenience typedef of the corresponding arg list.
+ typedef SPSArgList<SPSTagTs...> AsArgList;
+};
+
+/// SPS tag type for sequences.
+///
+/// SPSSequences should be serialized as a uint64_t sequence length,
+/// followed by the serialization of each of the elements.
+template <typename SPSElementTagT> class SPSSequence;
+
+/// SPS tag type for strings, which are equivalent to sequences of chars.
+using SPSString = SPSSequence<char>;
+
+/// SPS tag type for maps.
+///
+/// SPS maps are just sequences of (Key, Value) tuples.
+template <typename SPSTagT1, typename SPSTagT2>
+using SPSMap = SPSSequence<SPSTuple<SPSTagT1, SPSTagT2>>;
+
+/// Serialization for SPSEmpty type.
+template <> class SPSSerializationTraits<SPSEmpty, SPSEmpty> {
+public:
+ static size_t size(const SPSEmpty &EP) { return 0; }
+ static bool serialize(SPSOutputBuffer &OB, const SPSEmpty &BE) {
+ return true;
+ }
+ static bool deserialize(SPSInputBuffer &IB, SPSEmpty &BE) { return true; }
+};
+
+/// Specialize this to implement 'trivial' sequence serialization for
+/// a concrete sequence type.
+///
+/// Trivial sequence serialization uses the sequence's 'size' member to get the
+/// length of the sequence, and uses a range-based for loop to iterate over the
+/// elements.
+///
+/// Specializing this template class means that you do not need to provide a
+/// specialization of SPSSerializationTraits for your type.
+template <typename SPSElementTagT, typename ConcreteSequenceT>
+class TrivialSPSSequenceSerialization {
+public:
+ static constexpr bool available = false;
+};
+
+/// Specialize this to implement 'trivial' sequence deserialization for
+/// a concrete sequence type.
+///
+/// Trivial deserialization calls a static 'reserve(SequenceT&)' method on your
+/// specialization (you must implement this) to reserve space, and then calls
+/// a static 'append(SequenceT&, ElementT&) method to append each of the
+/// deserialized elements.
+///
+/// Specializing this template class means that you do not need to provide a
+/// specialization of SPSSerializationTraits for your type.
+template <typename SPSElementTagT, typename ConcreteSequenceT>
+class TrivialSPSSequenceDeserialization {
+public:
+ static constexpr bool available = false;
+};
+
+/// Trivial std::string -> SPSSequence<char> serialization.
+template <> class TrivialSPSSequenceSerialization<char, std::string> {
+public:
+ static constexpr bool available = true;
+};
+
+/// Trivial SPSSequence<char> -> std::string deserialization.
+template <> class TrivialSPSSequenceDeserialization<char, std::string> {
+public:
+ static constexpr bool available = true;
+
+ using element_type = char;
+
+ static void reserve(std::string &S, uint64_t Size) { S.reserve(Size); }
+ static bool append(std::string &S, char C) {
+ S.push_back(C);
+ return true;
+ }
+};
+
+/// Trivial std::vector<T> -> SPSSequence<SPSElementTagT> serialization.
+template <typename SPSElementTagT, typename T>
+class TrivialSPSSequenceSerialization<SPSElementTagT, std::vector<T>> {
+public:
+ static constexpr bool available = true;
+};
+
+/// Trivial SPSSequence<SPSElementTagT> -> std::vector<T> deserialization.
+template <typename SPSElementTagT, typename T>
+class TrivialSPSSequenceDeserialization<SPSElementTagT, std::vector<T>> {
+public:
+ static constexpr bool available = true;
+
+ using element_type = typename std::vector<T>::value_type;
+
+ static void reserve(std::vector<T> &V, uint64_t Size) { V.reserve(Size); }
+ static bool append(std::vector<T> &V, T E) {
+ V.push_back(std::move(E));
+ return true;
+ }
+};
+
+/// Trivial std::unordered_map<K, V> -> SPSSequence<SPSTuple<SPSKey, SPSValue>>
+/// serialization.
+template <typename SPSKeyTagT, typename SPSValueTagT, typename K, typename V>
+class TrivialSPSSequenceSerialization<SPSTuple<SPSKeyTagT, SPSValueTagT>,
+ std::unordered_map<K, V>> {
+public:
+ static constexpr bool available = true;
+};
+
+/// Trivial SPSSequence<SPSTuple<SPSKey, SPSValue>> -> std::unordered_map<K, V>
+/// deserialization.
+template <typename SPSKeyTagT, typename SPSValueTagT, typename K, typename V>
+class TrivialSPSSequenceDeserialization<SPSTuple<SPSKeyTagT, SPSValueTagT>,
+ std::unordered_map<K, V>> {
+public:
+ static constexpr bool available = true;
+
+ using element_type = std::pair<K, V>;
+
+ static void reserve(std::unordered_map<K, V> &M, uint64_t Size) {
+ M.reserve(Size);
+ }
+ static bool append(std::unordered_map<K, V> &M, element_type E) {
+ return M.insert(std::move(E)).second;
+ }
+};
+
+/// 'Trivial' sequence serialization: Sequence is serialized as a uint64_t size
+/// followed by a for-earch loop over the elements of the sequence to serialize
+/// each of them.
+template <typename SPSElementTagT, typename SequenceT>
+class SPSSerializationTraits<SPSSequence<SPSElementTagT>, SequenceT,
+ std::enable_if_t<TrivialSPSSequenceSerialization<
+ SPSElementTagT, SequenceT>::available>> {
+public:
+ static size_t size(const SequenceT &S) {
+ size_t Size = SPSArgList<uint64_t>::size(static_cast<uint64_t>(S.size()));
+ for (const auto &E : S)
+ Size += SPSArgList<SPSElementTagT>::size(E);
+ return Size;
+ }
+
+ static bool serialize(SPSOutputBuffer &OB, const SequenceT &S) {
+ if (!SPSArgList<uint64_t>::serialize(OB, static_cast<uint64_t>(S.size())))
+ return false;
+ for (const auto &E : S)
+ if (!SPSArgList<SPSElementTagT>::serialize(OB, E))
+ return false;
+ return true;
+ }
+
+ static bool deserialize(SPSInputBuffer &IB, SequenceT &S) {
+ using TBSD = TrivialSPSSequenceDeserialization<SPSElementTagT, SequenceT>;
+ uint64_t Size;
+ if (!SPSArgList<uint64_t>::deserialize(IB, Size))
+ return false;
+ TBSD::reserve(S, Size);
+ for (size_t I = 0; I != Size; ++I) {
+ typename TBSD::element_type E;
+ if (!SPSArgList<SPSElementTagT>::deserialize(IB, E))
+ return false;
+ if (!TBSD::append(S, std::move(E)))
+ return false;
+ }
+ return true;
+ }
+};
+
+/// SPSTuple serialization for std::pair.
+template <typename SPSTagT1, typename SPSTagT2, typename T1, typename T2>
+class SPSSerializationTraits<SPSTuple<SPSTagT1, SPSTagT2>, std::pair<T1, T2>> {
+public:
+ static size_t size(const std::pair<T1, T2> &P) {
+ return SPSArgList<SPSTagT1>::size(P.first) +
+ SPSArgList<SPSTagT2>::size(P.second);
+ }
+
+ static bool serialize(SPSOutputBuffer &OB, const std::pair<T1, T2> &P) {
+ return SPSArgList<SPSTagT1>::serialize(OB, P.first) &&
+ SPSArgList<SPSTagT2>::serialize(OB, P.second);
+ }
+
+ static bool deserialize(SPSInputBuffer &IB, std::pair<T1, T2> &P) {
+ return SPSArgList<SPSTagT1>::deserialize(IB, P.first) &&
+ SPSArgList<SPSTagT2>::deserialize(IB, P.second);
+ }
+};
+
+/// Serialization for string_views.
+///
+/// Serialization is as for regular strings. Deserialization points directly
+/// into the blob.
+template <> class SPSSerializationTraits<SPSString, __orc_rt::string_view> {
+public:
+ static size_t size(const __orc_rt::string_view &S) {
+ return SPSArgList<uint64_t>::size(static_cast<uint64_t>(S.size())) +
+ S.size();
+ }
+
+ static bool serialize(SPSOutputBuffer &OB, const __orc_rt::string_view &S) {
+ if (!SPSArgList<uint64_t>::serialize(OB, static_cast<uint64_t>(S.size())))
+ return false;
+ return OB.write(S.data(), S.size());
+ }
+
+ static bool deserialize(SPSInputBuffer &IB, __orc_rt::string_view &S) {
+ const char *Data = nullptr;
+ uint64_t Size;
+ if (!SPSArgList<uint64_t>::deserialize(IB, Size))
+ return false;
+ Data = IB.data();
+ if (!IB.skip(Size))
+ return false;
+ S = {Data, Size};
+ return true;
+ }
+};
+
+/// SPS tag type for errors.
+class SPSError;
+
+/// SPS tag type for expecteds, which are either a T or a string representing
+/// an error.
+template <typename SPSTagT> class SPSExpected;
+
+namespace detail {
+
+/// Helper type for serializing Errors.
+///
+/// llvm::Errors are move-only, and not inspectable except by consuming them.
+/// This makes them unsuitable for direct serialization via
+/// SPSSerializationTraits, which needs to inspect values twice (once to
+/// determine the amount of space to reserve, and then again to serialize).
+///
+/// The SPSSerializableError type is a helper that can be
+/// constructed from an llvm::Error, but inspected more than once.
+struct SPSSerializableError {
+ bool HasError = false;
+ std::string ErrMsg;
+};
+
+/// Helper type for serializing Expected<T>s.
+///
+/// See SPSSerializableError for more details.
+///
+// FIXME: Use std::variant for storage once we have c++17.
+template <typename T> struct SPSSerializableExpected {
+ bool HasValue = false;
+ T Value{};
+ std::string ErrMsg;
+};
+
+inline SPSSerializableError toSPSSerializable(Error Err) {
+ if (Err)
+ return {true, toString(std::move(Err))};
+ return {false, {}};
+}
+
+inline Error fromSPSSerializable(SPSSerializableError BSE) {
+ if (BSE.HasError)
+ return make_error<StringError>(BSE.ErrMsg);
+ return Error::success();
+}
+
+template <typename T>
+SPSSerializableExpected<T> toSPSSerializable(Expected<T> E) {
+ if (E)
+ return {true, std::move(*E), {}};
+ else
+ return {false, {}, toString(E.takeError())};
+}
+
+template <typename T>
+Expected<T> fromSPSSerializable(SPSSerializableExpected<T> BSE) {
+ if (BSE.HasValue)
+ return std::move(BSE.Value);
+ else
+ return make_error<StringError>(BSE.ErrMsg);
+}
+
+} // end namespace detail
+
+/// Serialize to a SPSError from a detail::SPSSerializableError.
+template <>
+class SPSSerializationTraits<SPSError, detail::SPSSerializableError> {
+public:
+ static size_t size(const detail::SPSSerializableError &BSE) {
+ size_t Size = SPSArgList<bool>::size(BSE.HasError);
+ if (BSE.HasError)
+ Size += SPSArgList<SPSString>::size(BSE.ErrMsg);
+ return Size;
+ }
+
+ static bool serialize(SPSOutputBuffer &OB,
+ const detail::SPSSerializableError &BSE) {
+ if (!SPSArgList<bool>::serialize(OB, BSE.HasError))
+ return false;
+ if (BSE.HasError)
+ if (!SPSArgList<SPSString>::serialize(OB, BSE.ErrMsg))
+ return false;
+ return true;
+ }
+
+ static bool deserialize(SPSInputBuffer &IB,
+ detail::SPSSerializableError &BSE) {
+ if (!SPSArgList<bool>::deserialize(IB, BSE.HasError))
+ return false;
+
+ if (!BSE.HasError)
+ return true;
+
+ return SPSArgList<SPSString>::deserialize(IB, BSE.ErrMsg);
+ }
+};
+
+/// Serialize to a SPSExpected<SPSTagT> from a
+/// detail::SPSSerializableExpected<T>.
+template <typename SPSTagT, typename T>
+class SPSSerializationTraits<SPSExpected<SPSTagT>,
+ detail::SPSSerializableExpected<T>> {
+public:
+ static size_t size(const detail::SPSSerializableExpected<T> &BSE) {
+ size_t Size = SPSArgList<bool>::size(BSE.HasValue);
+ if (BSE.HasValue)
+ Size += SPSArgList<SPSTagT>::size(BSE.Value);
+ else
+ Size += SPSArgList<SPSString>::size(BSE.ErrMsg);
+ return Size;
+ }
+
+ static bool serialize(SPSOutputBuffer &OB,
+ const detail::SPSSerializableExpected<T> &BSE) {
+ if (!SPSArgList<bool>::serialize(OB, BSE.HasValue))
+ return false;
+
+ if (BSE.HasValue)
+ return SPSArgList<SPSTagT>::serialize(OB, BSE.Value);
+
+ return SPSArgList<SPSString>::serialize(OB, BSE.ErrMsg);
+ }
+
+ static bool deserialize(SPSInputBuffer &IB,
+ detail::SPSSerializableExpected<T> &BSE) {
+ if (!SPSArgList<bool>::deserialize(IB, BSE.HasValue))
+ return false;
+
+ if (BSE.HasValue)
+ return SPSArgList<SPSTagT>::deserialize(IB, BSE.Value);
+
+ return SPSArgList<SPSString>::deserialize(IB, BSE.ErrMsg);
+ }
+};
+
+/// Serialize to a SPSExpected<SPSTagT> from a detail::SPSSerializableError.
+template <typename SPSTagT>
+class SPSSerializationTraits<SPSExpected<SPSTagT>,
+ detail::SPSSerializableError> {
+public:
+ static size_t size(const detail::SPSSerializableError &BSE) {
+ assert(BSE.HasError && "Cannot serialize expected from a success value");
+ return SPSArgList<bool>::size(false) +
+ SPSArgList<SPSString>::size(BSE.ErrMsg);
+ }
+
+ static bool serialize(SPSOutputBuffer &OB,
+ const detail::SPSSerializableError &BSE) {
+ assert(BSE.HasError && "Cannot serialize expected from a success value");
+ if (!SPSArgList<bool>::serialize(OB, false))
+ return false;
+ return SPSArgList<SPSString>::serialize(OB, BSE.ErrMsg);
+ }
+};
+
+/// Serialize to a SPSExpected<SPSTagT> from a T.
+template <typename SPSTagT, typename T>
+class SPSSerializationTraits<SPSExpected<SPSTagT>, T> {
+public:
+ static size_t size(const T &Value) {
+ return SPSArgList<bool>::size(true) + SPSArgList<SPSTagT>::size(Value);
+ }
+
+ static bool serialize(SPSOutputBuffer &OB, const T &Value) {
+ if (!SPSArgList<bool>::serialize(OB, true))
+ return false;
+ return SPSArgList<SPSTagT>::serialize(Value);
+ }
+};
+
+} // end namespace __orc_rt
+
+#endif // ORC_RT_SIMPLE_PACKED_SERIALIZATION_H
diff --git a/compiler-rt/lib/orc/stl_extras.h b/compiler-rt/lib/orc/stl_extras.h
new file mode 100644
index 000000000000..ad7286e87ae3
--- /dev/null
+++ b/compiler-rt/lib/orc/stl_extras.h
@@ -0,0 +1,46 @@
+//===-------- stl_extras.h - Useful STL related functions-------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of the ORC runtime support library.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef ORC_RT_STL_EXTRAS_H
+#define ORC_RT_STL_EXTRAS_H
+
+#include <utility>
+#include <tuple>
+
+namespace __orc_rt {
+
+namespace detail {
+
+template <typename F, typename Tuple, std::size_t... I>
+decltype(auto) apply_tuple_impl(F &&f, Tuple &&t, std::index_sequence<I...>) {
+ return std::forward<F>(f)(std::get<I>(std::forward<Tuple>(t))...);
+}
+
+} // end namespace detail
+
+/// Given an input tuple (a1, a2, ..., an), pass the arguments of the
+/// tuple variadically to f as if by calling f(a1, a2, ..., an) and
+/// return the result.
+///
+/// FIXME: Switch to std::apply once we can use c++17.
+template <typename F, typename Tuple>
+decltype(auto) apply_tuple(F &&f, Tuple &&t) {
+ using Indices = std::make_index_sequence<
+ std::tuple_size<typename std::decay<Tuple>::type>::value>;
+
+ return detail::apply_tuple_impl(std::forward<F>(f), std::forward<Tuple>(t),
+ Indices{});
+}
+
+} // namespace __orc_rt
+
+#endif // ORC_RT_STL_EXTRAS
diff --git a/compiler-rt/lib/orc/wrapper_function_utils.h b/compiler-rt/lib/orc/wrapper_function_utils.h
new file mode 100644
index 000000000000..49faa03e5eb8
--- /dev/null
+++ b/compiler-rt/lib/orc/wrapper_function_utils.h
@@ -0,0 +1,367 @@
+//===-- wrapper_function_utils.h - Utilities for wrapper funcs --*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of the ORC runtime support library.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef ORC_RT_WRAPPER_FUNCTION_UTILS_H
+#define ORC_RT_WRAPPER_FUNCTION_UTILS_H
+
+#include "c_api.h"
+#include "common.h"
+#include "error.h"
+#include "simple_packed_serialization.h"
+#include <type_traits>
+
+namespace __orc_rt {
+
+/// C++ wrapper function result: Same as CWrapperFunctionResult but
+/// auto-releases memory.
+class WrapperFunctionResult {
+public:
+ /// Create a default WrapperFunctionResult.
+ WrapperFunctionResult() { __orc_rt_CWrapperFunctionResultInit(&R); }
+
+ /// Create a WrapperFunctionResult from a CWrapperFunctionResult. This
+ /// instance takes ownership of the result object and will automatically
+ /// call dispose on the result upon destruction.
+ WrapperFunctionResult(__orc_rt_CWrapperFunctionResult R) : R(R) {}
+
+ WrapperFunctionResult(const WrapperFunctionResult &) = delete;
+ WrapperFunctionResult &operator=(const WrapperFunctionResult &) = delete;
+
+ WrapperFunctionResult(WrapperFunctionResult &&Other) {
+ __orc_rt_CWrapperFunctionResultInit(&R);
+ std::swap(R, Other.R);
+ }
+
+ WrapperFunctionResult &operator=(WrapperFunctionResult &&Other) {
+ __orc_rt_CWrapperFunctionResult Tmp;
+ __orc_rt_CWrapperFunctionResultInit(&Tmp);
+ std::swap(Tmp, Other.R);
+ std::swap(R, Tmp);
+ return *this;
+ }
+
+ ~WrapperFunctionResult() { __orc_rt_DisposeCWrapperFunctionResult(&R); }
+
+ /// Relinquish ownership of and return the
+ /// __orc_rt_CWrapperFunctionResult.
+ __orc_rt_CWrapperFunctionResult release() {
+ __orc_rt_CWrapperFunctionResult Tmp;
+ __orc_rt_CWrapperFunctionResultInit(&Tmp);
+ std::swap(R, Tmp);
+ return Tmp;
+ }
+
+ /// Get a pointer to the data contained in this instance.
+ const char *data() const { return __orc_rt_CWrapperFunctionResultData(&R); }
+
+ /// Returns the size of the data contained in this instance.
+ size_t size() const { return __orc_rt_CWrapperFunctionResultSize(&R); }
+
+ /// Returns true if this value is equivalent to a default-constructed
+ /// WrapperFunctionResult.
+ bool empty() const { return __orc_rt_CWrapperFunctionResultEmpty(&R); }
+
+ /// Create a WrapperFunctionResult with the given size and return a pointer
+ /// to the underlying memory.
+ static char *allocate(WrapperFunctionResult &R, size_t Size) {
+ __orc_rt_DisposeCWrapperFunctionResult(&R.R);
+ __orc_rt_CWrapperFunctionResultInit(&R.R);
+ return __orc_rt_CWrapperFunctionResultAllocate(&R.R, Size);
+ }
+
+ /// Copy from the given char range.
+ static WrapperFunctionResult copyFrom(const char *Source, size_t Size) {
+ return __orc_rt_CreateCWrapperFunctionResultFromRange(Source, Size);
+ }
+
+ /// Copy from the given null-terminated string (includes the null-terminator).
+ static WrapperFunctionResult copyFrom(const char *Source) {
+ return __orc_rt_CreateCWrapperFunctionResultFromString(Source);
+ }
+
+ /// Copy from the given std::string (includes the null terminator).
+ static WrapperFunctionResult copyFrom(const std::string &Source) {
+ return copyFrom(Source.c_str());
+ }
+
+ /// Create an out-of-band error by copying the given string.
+ static WrapperFunctionResult createOutOfBandError(const char *Msg) {
+ return __orc_rt_CreateCWrapperFunctionResultFromOutOfBandError(Msg);
+ }
+
+ /// Create an out-of-band error by copying the given string.
+ static WrapperFunctionResult createOutOfBandError(const std::string &Msg) {
+ return createOutOfBandError(Msg.c_str());
+ }
+
+ /// If this value is an out-of-band error then this returns the error message,
+ /// otherwise returns nullptr.
+ const char *getOutOfBandError() const {
+ return __orc_rt_CWrapperFunctionResultGetOutOfBandError(&R);
+ }
+
+private:
+ __orc_rt_CWrapperFunctionResult R;
+};
+
+namespace detail {
+
+template <typename SPSArgListT, typename... ArgTs>
+Expected<WrapperFunctionResult>
+serializeViaSPSToWrapperFunctionResult(const ArgTs &...Args) {
+ WrapperFunctionResult Result;
+ char *DataPtr =
+ WrapperFunctionResult::allocate(Result, SPSArgListT::size(Args...));
+ SPSOutputBuffer OB(DataPtr, Result.size());
+ if (!SPSArgListT::serialize(OB, Args...))
+ return make_error<StringError>(
+ "Error serializing arguments to blob in call");
+ return std::move(Result);
+}
+
+template <typename RetT> class WrapperFunctionHandlerCaller {
+public:
+ template <typename HandlerT, typename ArgTupleT, std::size_t... I>
+ static decltype(auto) call(HandlerT &&H, ArgTupleT &Args,
+ std::index_sequence<I...>) {
+ return std::forward<HandlerT>(H)(std::get<I>(Args)...);
+ }
+};
+
+template <> class WrapperFunctionHandlerCaller<void> {
+public:
+ template <typename HandlerT, typename ArgTupleT, std::size_t... I>
+ static SPSEmpty call(HandlerT &&H, ArgTupleT &Args,
+ std::index_sequence<I...>) {
+ std::forward<HandlerT>(H)(std::get<I>(Args)...);
+ return SPSEmpty();
+ }
+};
+
+template <typename WrapperFunctionImplT,
+ template <typename> class ResultSerializer, typename... SPSTagTs>
+class WrapperFunctionHandlerHelper
+ : public WrapperFunctionHandlerHelper<
+ decltype(&std::remove_reference_t<WrapperFunctionImplT>::operator()),
+ ResultSerializer, SPSTagTs...> {};
+
+template <typename RetT, typename... ArgTs,
+ template <typename> class ResultSerializer, typename... SPSTagTs>
+class WrapperFunctionHandlerHelper<RetT(ArgTs...), ResultSerializer,
+ SPSTagTs...> {
+public:
+ using ArgTuple = std::tuple<std::decay_t<ArgTs>...>;
+ using ArgIndices = std::make_index_sequence<std::tuple_size<ArgTuple>::value>;
+
+ template <typename HandlerT>
+ static WrapperFunctionResult apply(HandlerT &&H, const char *ArgData,
+ size_t ArgSize) {
+ ArgTuple Args;
+ if (!deserialize(ArgData, ArgSize, Args, ArgIndices{}))
+ return WrapperFunctionResult::createOutOfBandError(
+ "Could not deserialize arguments for wrapper function call");
+
+ auto HandlerResult = WrapperFunctionHandlerCaller<RetT>::call(
+ std::forward<HandlerT>(H), Args, ArgIndices{});
+
+ if (auto Result = ResultSerializer<decltype(HandlerResult)>::serialize(
+ std::move(HandlerResult)))
+ return std::move(*Result);
+ else
+ return WrapperFunctionResult::createOutOfBandError(
+ toString(Result.takeError()));
+ }
+
+private:
+ template <std::size_t... I>
+ static bool deserialize(const char *ArgData, size_t ArgSize, ArgTuple &Args,
+ std::index_sequence<I...>) {
+ SPSInputBuffer IB(ArgData, ArgSize);
+ return SPSArgList<SPSTagTs...>::deserialize(IB, std::get<I>(Args)...);
+ }
+
+};
+
+// Map function references to function types.
+template <typename RetT, typename... ArgTs,
+ template <typename> class ResultSerializer, typename... SPSTagTs>
+class WrapperFunctionHandlerHelper<RetT (&)(ArgTs...), ResultSerializer,
+ SPSTagTs...>
+ : public WrapperFunctionHandlerHelper<RetT(ArgTs...), ResultSerializer,
+ SPSTagTs...> {};
+
+// Map non-const member function types to function types.
+template <typename ClassT, typename RetT, typename... ArgTs,
+ template <typename> class ResultSerializer, typename... SPSTagTs>
+class WrapperFunctionHandlerHelper<RetT (ClassT::*)(ArgTs...), ResultSerializer,
+ SPSTagTs...>
+ : public WrapperFunctionHandlerHelper<RetT(ArgTs...), ResultSerializer,
+ SPSTagTs...> {};
+
+// Map const member function types to function types.
+template <typename ClassT, typename RetT, typename... ArgTs,
+ template <typename> class ResultSerializer, typename... SPSTagTs>
+class WrapperFunctionHandlerHelper<RetT (ClassT::*)(ArgTs...) const,
+ ResultSerializer, SPSTagTs...>
+ : public WrapperFunctionHandlerHelper<RetT(ArgTs...), ResultSerializer,
+ SPSTagTs...> {};
+
+template <typename SPSRetTagT, typename RetT> class ResultSerializer {
+public:
+ static Expected<WrapperFunctionResult> serialize(RetT Result) {
+ return serializeViaSPSToWrapperFunctionResult<SPSArgList<SPSRetTagT>>(
+ Result);
+ }
+};
+
+template <typename SPSRetTagT> class ResultSerializer<SPSRetTagT, Error> {
+public:
+ static Expected<WrapperFunctionResult> serialize(Error Err) {
+ return serializeViaSPSToWrapperFunctionResult<SPSArgList<SPSRetTagT>>(
+ toSPSSerializable(std::move(Err)));
+ }
+};
+
+template <typename SPSRetTagT, typename T>
+class ResultSerializer<SPSRetTagT, Expected<T>> {
+public:
+ static Expected<WrapperFunctionResult> serialize(Expected<T> E) {
+ return serializeViaSPSToWrapperFunctionResult<SPSArgList<SPSRetTagT>>(
+ toSPSSerializable(std::move(E)));
+ }
+};
+
+template <typename SPSRetTagT, typename RetT> class ResultDeserializer {
+public:
+ static void makeSafe(RetT &Result) {}
+
+ static Error deserialize(RetT &Result, const char *ArgData, size_t ArgSize) {
+ SPSInputBuffer IB(ArgData, ArgSize);
+ if (!SPSArgList<SPSRetTagT>::deserialize(IB, Result))
+ return make_error<StringError>(
+ "Error deserializing return value from blob in call");
+ return Error::success();
+ }
+};
+
+template <> class ResultDeserializer<SPSError, Error> {
+public:
+ static void makeSafe(Error &Err) { cantFail(std::move(Err)); }
+
+ static Error deserialize(Error &Err, const char *ArgData, size_t ArgSize) {
+ SPSInputBuffer IB(ArgData, ArgSize);
+ SPSSerializableError BSE;
+ if (!SPSArgList<SPSError>::deserialize(IB, BSE))
+ return make_error<StringError>(
+ "Error deserializing return value from blob in call");
+ Err = fromSPSSerializable(std::move(BSE));
+ return Error::success();
+ }
+};
+
+template <typename SPSTagT, typename T>
+class ResultDeserializer<SPSExpected<SPSTagT>, Expected<T>> {
+public:
+ static void makeSafe(Expected<T> &E) { cantFail(E.takeError()); }
+
+ static Error deserialize(Expected<T> &E, const char *ArgData,
+ size_t ArgSize) {
+ SPSInputBuffer IB(ArgData, ArgSize);
+ SPSSerializableExpected<T> BSE;
+ if (!SPSArgList<SPSExpected<SPSTagT>>::deserialize(IB, BSE))
+ return make_error<StringError>(
+ "Error deserializing return value from blob in call");
+ E = fromSPSSerializable(std::move(BSE));
+ return Error::success();
+ }
+};
+
+} // end namespace detail
+
+template <typename SPSSignature> class WrapperFunction;
+
+template <typename SPSRetTagT, typename... SPSTagTs>
+class WrapperFunction<SPSRetTagT(SPSTagTs...)> {
+private:
+ template <typename RetT>
+ using ResultSerializer = detail::ResultSerializer<SPSRetTagT, RetT>;
+
+public:
+ template <typename RetT, typename... ArgTs>
+ static Error call(const void *FnTag, RetT &Result, const ArgTs &...Args) {
+
+ // RetT might be an Error or Expected value. Set the checked flag now:
+ // we don't want the user to have to check the unused result if this
+ // operation fails.
+ detail::ResultDeserializer<SPSRetTagT, RetT>::makeSafe(Result);
+
+ if (ORC_RT_UNLIKELY(!&__orc_rt_jit_dispatch_ctx))
+ return make_error<StringError>("__orc_rt_jit_dispatch_ctx not set");
+ if (ORC_RT_UNLIKELY(!&__orc_rt_jit_dispatch))
+ return make_error<StringError>("__orc_rt_jit_dispatch not set");
+
+ auto ArgBuffer =
+ detail::serializeViaSPSToWrapperFunctionResult<SPSArgList<SPSTagTs...>>(
+ Args...);
+ if (!ArgBuffer)
+ return ArgBuffer.takeError();
+
+ WrapperFunctionResult ResultBuffer =
+ __orc_rt_jit_dispatch(&__orc_rt_jit_dispatch_ctx, FnTag,
+ ArgBuffer->data(), ArgBuffer->size());
+ if (auto ErrMsg = ResultBuffer.getOutOfBandError())
+ return make_error<StringError>(ErrMsg);
+
+ return detail::ResultDeserializer<SPSRetTagT, RetT>::deserialize(
+ Result, ResultBuffer.data(), ResultBuffer.size());
+ }
+
+ template <typename HandlerT>
+ static WrapperFunctionResult handle(const char *ArgData, size_t ArgSize,
+ HandlerT &&Handler) {
+ using WFHH =
+ detail::WrapperFunctionHandlerHelper<HandlerT, ResultSerializer,
+ SPSTagTs...>;
+ return WFHH::apply(std::forward<HandlerT>(Handler), ArgData, ArgSize);
+ }
+
+private:
+ template <typename T> static const T &makeSerializable(const T &Value) {
+ return Value;
+ }
+
+ static detail::SPSSerializableError makeSerializable(Error Err) {
+ return detail::toSPSSerializable(std::move(Err));
+ }
+
+ template <typename T>
+ static detail::SPSSerializableExpected<T> makeSerializable(Expected<T> E) {
+ return detail::toSPSSerializable(std::move(E));
+ }
+};
+
+template <typename... SPSTagTs>
+class WrapperFunction<void(SPSTagTs...)>
+ : private WrapperFunction<SPSEmpty(SPSTagTs...)> {
+public:
+ template <typename... ArgTs>
+ static Error call(const void *FnTag, const ArgTs &...Args) {
+ SPSEmpty BE;
+ return WrapperFunction<SPSEmpty(SPSTagTs...)>::call(FnTag, BE, Args...);
+ }
+
+ using WrapperFunction<SPSEmpty(SPSTagTs...)>::handle;
+};
+
+} // end namespace __orc_rt
+
+#endif // ORC_RT_WRAPPER_FUNCTION_UTILS_H
diff --git a/compiler-rt/lib/profile/GCDAProfiling.c b/compiler-rt/lib/profile/GCDAProfiling.c
index 4293e8f7b5bf..8e51f57b09ff 100644
--- a/compiler-rt/lib/profile/GCDAProfiling.c
+++ b/compiler-rt/lib/profile/GCDAProfiling.c
@@ -23,6 +23,7 @@
#include <errno.h>
#include <fcntl.h>
+#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
@@ -38,29 +39,6 @@
#include <unistd.h>
#endif
-#if defined(__FreeBSD__) && defined(__i386__)
-#define I386_FREEBSD 1
-#else
-#define I386_FREEBSD 0
-#endif
-
-#if !defined(_MSC_VER) && !I386_FREEBSD
-#include <stdint.h>
-#endif
-
-#if defined(_MSC_VER)
-typedef unsigned char uint8_t;
-typedef unsigned int uint32_t;
-typedef unsigned long long uint64_t;
-#elif I386_FREEBSD
-/* System headers define 'size_t' incorrectly on x64 FreeBSD (prior to
- * FreeBSD 10, r232261) when compiled in 32-bit mode.
- */
-typedef unsigned char uint8_t;
-typedef unsigned int uint32_t;
-typedef unsigned long long uint64_t;
-#endif
-
#include "InstrProfiling.h"
#include "InstrProfilingUtil.h"
@@ -483,8 +461,9 @@ void llvm_gcda_summary_info() {
if (val != (uint32_t)-1) {
/* There are counters present in the file. Merge them. */
- if (val != (gcov_version >= 90 ? GCOV_TAG_OBJECT_SUMMARY
- : GCOV_TAG_PROGRAM_SUMMARY)) {
+ uint32_t gcov_tag =
+ gcov_version >= 90 ? GCOV_TAG_OBJECT_SUMMARY : GCOV_TAG_PROGRAM_SUMMARY;
+ if (val != gcov_tag) {
fprintf(stderr,
"profiling: %s: cannot merge previous run count: "
"corrupt object tag (0x%08x)\n",
diff --git a/compiler-rt/lib/profile/InstrProfiling.h b/compiler-rt/lib/profile/InstrProfiling.h
index 7d1c77a3fab3..237acb33ffa1 100644
--- a/compiler-rt/lib/profile/InstrProfiling.h
+++ b/compiler-rt/lib/profile/InstrProfiling.h
@@ -101,13 +101,13 @@ void __llvm_profile_reset_counters(void);
/*!
* \brief Merge profile data from buffer.
*
- * Read profile data form buffer \p Profile and merge with
- * in-process profile counters. The client is expected to
- * have checked or already knows the profile data in the
- * buffer matches the in-process counter structure before
- * calling it.
+ * Read profile data form buffer \p Profile and merge with in-process profile
+ * counters. The client is expected to have checked or already knows the profile
+ * data in the buffer matches the in-process counter structure before calling
+ * it. Returns 0 (success) if the profile data is valid. Upon reading
+ * invalid/corrupted profile data, returns 1 (failure).
*/
-void __llvm_profile_merge_from_buffer(const char *Profile, uint64_t Size);
+int __llvm_profile_merge_from_buffer(const char *Profile, uint64_t Size);
/*! \brief Check if profile in buffer matches the current binary.
*
@@ -319,11 +319,4 @@ extern uint64_t INSTR_PROF_RAW_VERSION_VAR; /* __llvm_profile_raw_version */
*/
extern char INSTR_PROF_PROFILE_NAME_VAR[1]; /* __llvm_profile_filename. */
-/*!
- * This variable is a weak symbol defined in InstrProfilingBiasVar.c. It
- * allows compiler instrumentation to provide overriding definition with
- * value from compiler command line. This variable has hidden visibility.
- */
-COMPILER_RT_VISIBILITY extern intptr_t __llvm_profile_counter_bias;
-
#endif /* PROFILE_INSTRPROFILING_H_ */
diff --git a/compiler-rt/lib/profile/InstrProfilingBiasVar.c b/compiler-rt/lib/profile/InstrProfilingBiasVar.c
deleted file mode 100644
index 05745fd858d9..000000000000
--- a/compiler-rt/lib/profile/InstrProfilingBiasVar.c
+++ /dev/null
@@ -1,15 +0,0 @@
-/*===- InstrProfilingBiasVar.c - profile counter bias variable setup ------===*\
-|*
-|* Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-|* See https://llvm.org/LICENSE.txt for license information.
-|* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-|*
-\*===----------------------------------------------------------------------===*/
-
-#include "InstrProfiling.h"
-
-/* The runtime should only provide its own definition of this symbol when the
- * user has not specified one. Set this up by moving the runtime's copy of this
- * symbol to an object file within the archive.
- */
-COMPILER_RT_WEAK intptr_t __llvm_profile_counter_bias = -1;
diff --git a/compiler-rt/lib/profile/InstrProfilingBuffer.c b/compiler-rt/lib/profile/InstrProfilingBuffer.c
index 07bb4d4e4f1b..21fa7ba1ddd6 100644
--- a/compiler-rt/lib/profile/InstrProfilingBuffer.c
+++ b/compiler-rt/lib/profile/InstrProfilingBuffer.c
@@ -67,13 +67,20 @@ static uint64_t calculateBytesNeededToPageAlign(uint64_t Offset) {
return 0;
}
+static int needsCounterPadding(void) {
+#if defined(__APPLE__)
+ return __llvm_profile_is_continuous_mode_enabled();
+#else
+ return 0;
+#endif
+}
+
COMPILER_RT_VISIBILITY
void __llvm_profile_get_padding_sizes_for_counters(
uint64_t DataSize, uint64_t CountersSize, uint64_t NamesSize,
uint64_t *PaddingBytesBeforeCounters, uint64_t *PaddingBytesAfterCounters,
uint64_t *PaddingBytesAfterNames) {
- if (!__llvm_profile_is_continuous_mode_enabled() ||
- lprofRuntimeCounterRelocation()) {
+ if (!needsCounterPadding()) {
*PaddingBytesBeforeCounters = 0;
*PaddingBytesAfterCounters = 0;
*PaddingBytesAfterNames = __llvm_profile_get_num_padding_bytes(NamesSize);
diff --git a/compiler-rt/lib/profile/InstrProfilingFile.c b/compiler-rt/lib/profile/InstrProfilingFile.c
index 42ffdae82622..518447e3e422 100644
--- a/compiler-rt/lib/profile/InstrProfilingFile.c
+++ b/compiler-rt/lib/profile/InstrProfilingFile.c
@@ -8,6 +8,7 @@
#if !defined(__Fuchsia__)
+#include <assert.h>
#include <errno.h>
#include <stdio.h>
#include <stdlib.h>
@@ -260,11 +261,16 @@ static int doProfileMerging(FILE *ProfileFile, int *MergeDone) {
return -1;
/* Now start merging */
- __llvm_profile_merge_from_buffer(ProfileBuffer, ProfileFileSize);
+ if (__llvm_profile_merge_from_buffer(ProfileBuffer, ProfileFileSize)) {
+ PROF_ERR("%s\n", "Invalid profile data to merge");
+ (void)munmap(ProfileBuffer, ProfileFileSize);
+ return -1;
+ }
- // Truncate the file in case merging of value profile did not happend to
+ // Truncate the file in case merging of value profile did not happen to
// prevent from leaving garbage data at the end of the profile file.
- COMPILER_RT_FTRUNCATE(ProfileFile, __llvm_profile_get_size_for_buffer());
+ (void)COMPILER_RT_FTRUNCATE(ProfileFile,
+ __llvm_profile_get_size_for_buffer());
(void)munmap(ProfileBuffer, ProfileFileSize);
*MergeDone = 1;
@@ -420,7 +426,8 @@ static void truncateCurrentFile(void) {
fclose(File);
}
-#if !defined(__Fuchsia__) && !defined(_WIN32)
+// TODO: Move these functions into InstrProfilingPlatform* files.
+#if defined(__APPLE__)
static void assertIsZero(int *i) {
if (*i)
PROF_WARN("Expected flag to be 0, but got: %d\n", *i);
@@ -442,111 +449,15 @@ static void unlockProfile(int *ProfileRequiresUnlock, FILE *File) {
if (!*ProfileRequiresUnlock) {
PROF_WARN("%s", "Expected to require profile unlock\n");
}
+
lprofUnlockFileHandle(File);
*ProfileRequiresUnlock = 0;
}
-#endif // !defined(__Fuchsia__) && !defined(_WIN32)
-
-static int writeMMappedFile(FILE *OutputFile, char **Profile) {
- if (!OutputFile)
- return -1;
-
- /* Write the data into a file. */
- setupIOBuffer();
- ProfDataWriter fileWriter;
- initFileWriter(&fileWriter, OutputFile);
- if (lprofWriteData(&fileWriter, NULL, 0)) {
- PROF_ERR("Failed to write profile: %s\n", strerror(errno));
- return -1;
- }
- fflush(OutputFile);
-
- /* Get the file size. */
- uint64_t FileSize = ftell(OutputFile);
-
- /* Map the profile. */
- *Profile = (char *)mmap(
- NULL, FileSize, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(OutputFile), 0);
- if (*Profile == MAP_FAILED) {
- PROF_ERR("Unable to mmap profile: %s\n", strerror(errno));
- return -1;
- }
-
- return 0;
-}
-
-static void relocateCounters(void) {
- if (!__llvm_profile_is_continuous_mode_enabled() ||
- !lprofRuntimeCounterRelocation())
- return;
-
- /* Get the sizes of various profile data sections. Taken from
- * __llvm_profile_get_size_for_buffer(). */
- const __llvm_profile_data *DataBegin = __llvm_profile_begin_data();
- const __llvm_profile_data *DataEnd = __llvm_profile_end_data();
- uint64_t DataSize = __llvm_profile_get_data_size(DataBegin, DataEnd);
- const uint64_t CountersOffset = sizeof(__llvm_profile_header) +
- (DataSize * sizeof(__llvm_profile_data));
-
- int Length = getCurFilenameLength();
- char *FilenameBuf = (char *)COMPILER_RT_ALLOCA(Length + 1);
- const char *Filename = getCurFilename(FilenameBuf, 0);
- if (!Filename)
- return;
-
- FILE *File = NULL;
- char *Profile = NULL;
-
- if (!doMerging()) {
- File = fopen(Filename, "w+b");
- if (!File)
- return;
-
- if (writeMMappedFile(File, &Profile) == -1) {
- fclose(File);
- return;
- }
- } else {
- File = lprofOpenFileEx(Filename);
- if (!File)
- return;
-
- uint64_t ProfileFileSize = 0;
- if (getProfileFileSizeForMerging(File, &ProfileFileSize) == -1) {
- lprofUnlockFileHandle(File);
- fclose(File);
- return;
- }
-
- if (!ProfileFileSize) {
- if (writeMMappedFile(File, &Profile) == -1) {
- fclose(File);
- return;
- }
- } else {
- /* The merged profile has a non-zero length. Check that it is compatible
- * with the data in this process. */
- if (mmapProfileForMerging(File, ProfileFileSize, &Profile) == -1) {
- fclose(File);
- return;
- }
- }
-
- lprofUnlockFileHandle(File);
- }
-
- /* Update the profile fields based on the current mapping. */
- __llvm_profile_counter_bias = (intptr_t)Profile -
- (uintptr_t)__llvm_profile_begin_counters() + CountersOffset;
-}
static void initializeProfileForContinuousMode(void) {
if (!__llvm_profile_is_continuous_mode_enabled())
return;
-#if defined(__Fuchsia__) || defined(_WIN32)
- PROF_ERR("%s\n", "Continuous mode not yet supported on Fuchsia or Windows.");
-#else // defined(__Fuchsia__) || defined(_WIN32)
/* Get the sizes of various profile data sections. Taken from
* __llvm_profile_get_size_for_buffer(). */
const __llvm_profile_data *DataBegin = __llvm_profile_begin_data();
@@ -636,40 +547,173 @@ static void initializeProfileForContinuousMode(void) {
}
}
- int Fileno = fileno(File);
-
- /* Determine how much padding is needed before/after the counters and after
- * the names. */
- uint64_t PaddingBytesBeforeCounters, PaddingBytesAfterCounters,
- PaddingBytesAfterNames;
- __llvm_profile_get_padding_sizes_for_counters(
- DataSize, CountersSize, NamesSize, &PaddingBytesBeforeCounters,
- &PaddingBytesAfterCounters, &PaddingBytesAfterNames);
-
- uint64_t PageAlignedCountersLength =
- (CountersSize * sizeof(uint64_t)) + PaddingBytesAfterCounters;
- uint64_t FileOffsetToCounters =
- CurrentFileOffset + sizeof(__llvm_profile_header) +
- (DataSize * sizeof(__llvm_profile_data)) + PaddingBytesBeforeCounters;
-
- uint64_t *CounterMmap = (uint64_t *)mmap(
- (void *)CountersBegin, PageAlignedCountersLength, PROT_READ | PROT_WRITE,
- MAP_FIXED | MAP_SHARED, Fileno, FileOffsetToCounters);
- if (CounterMmap != CountersBegin) {
- PROF_ERR(
- "Continuous counter sync mode is enabled, but mmap() failed (%s).\n"
- " - CountersBegin: %p\n"
- " - PageAlignedCountersLength: %" PRIu64 "\n"
- " - Fileno: %d\n"
- " - FileOffsetToCounters: %" PRIu64 "\n",
- strerror(errno), CountersBegin, PageAlignedCountersLength, Fileno,
- FileOffsetToCounters);
+ /* mmap() the profile counters so long as there is at least one counter.
+ * If there aren't any counters, mmap() would fail with EINVAL. */
+ if (CountersSize > 0) {
+ int Fileno = fileno(File);
+
+ /* Determine how much padding is needed before/after the counters and after
+ * the names. */
+ uint64_t PaddingBytesBeforeCounters, PaddingBytesAfterCounters,
+ PaddingBytesAfterNames;
+ __llvm_profile_get_padding_sizes_for_counters(
+ DataSize, CountersSize, NamesSize, &PaddingBytesBeforeCounters,
+ &PaddingBytesAfterCounters, &PaddingBytesAfterNames);
+
+ uint64_t PageAlignedCountersLength =
+ (CountersSize * sizeof(uint64_t)) + PaddingBytesAfterCounters;
+ uint64_t FileOffsetToCounters =
+ CurrentFileOffset + sizeof(__llvm_profile_header) +
+ (DataSize * sizeof(__llvm_profile_data)) + PaddingBytesBeforeCounters;
+
+ uint64_t *CounterMmap = (uint64_t *)mmap(
+ (void *)CountersBegin, PageAlignedCountersLength, PROT_READ | PROT_WRITE,
+ MAP_FIXED | MAP_SHARED, Fileno, FileOffsetToCounters);
+ if (CounterMmap != CountersBegin) {
+ PROF_ERR(
+ "Continuous counter sync mode is enabled, but mmap() failed (%s).\n"
+ " - CountersBegin: %p\n"
+ " - PageAlignedCountersLength: %" PRIu64 "\n"
+ " - Fileno: %d\n"
+ " - FileOffsetToCounters: %" PRIu64 "\n",
+ strerror(errno), CountersBegin, PageAlignedCountersLength, Fileno,
+ FileOffsetToCounters);
+ }
}
if (ProfileRequiresUnlock)
unlockProfile(&ProfileRequiresUnlock, File);
-#endif // defined(__Fuchsia__) || defined(_WIN32)
}
+#elif defined(__ELF__) || defined(_WIN32)
+
+#define INSTR_PROF_PROFILE_COUNTER_BIAS_DEFAULT_VAR \
+ INSTR_PROF_CONCAT(INSTR_PROF_PROFILE_COUNTER_BIAS_VAR, _default)
+intptr_t INSTR_PROF_PROFILE_COUNTER_BIAS_DEFAULT_VAR = 0;
+
+/* This variable is a weak external reference which could be used to detect
+ * whether or not the compiler defined this symbol. */
+#if defined(_WIN32)
+COMPILER_RT_VISIBILITY extern intptr_t INSTR_PROF_PROFILE_COUNTER_BIAS_VAR;
+#pragma comment(linker, "/alternatename:" \
+ INSTR_PROF_QUOTE(INSTR_PROF_PROFILE_COUNTER_BIAS_VAR) "=" \
+ INSTR_PROF_QUOTE(INSTR_PROF_PROFILE_COUNTER_BIAS_DEFAULT_VAR))
+#else
+COMPILER_RT_VISIBILITY extern intptr_t INSTR_PROF_PROFILE_COUNTER_BIAS_VAR
+ __attribute__((weak, alias(INSTR_PROF_QUOTE(
+ INSTR_PROF_PROFILE_COUNTER_BIAS_DEFAULT_VAR))));
+#endif
+
+static int writeMMappedFile(FILE *OutputFile, char **Profile) {
+ if (!OutputFile)
+ return -1;
+
+ /* Write the data into a file. */
+ setupIOBuffer();
+ ProfDataWriter fileWriter;
+ initFileWriter(&fileWriter, OutputFile);
+ if (lprofWriteData(&fileWriter, NULL, 0)) {
+ PROF_ERR("Failed to write profile: %s\n", strerror(errno));
+ return -1;
+ }
+ fflush(OutputFile);
+
+ /* Get the file size. */
+ uint64_t FileSize = ftell(OutputFile);
+
+ /* Map the profile. */
+ *Profile = (char *)mmap(
+ NULL, FileSize, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(OutputFile), 0);
+ if (*Profile == MAP_FAILED) {
+ PROF_ERR("Unable to mmap profile: %s\n", strerror(errno));
+ return -1;
+ }
+
+ return 0;
+}
+
+static void initializeProfileForContinuousMode(void) {
+ if (!__llvm_profile_is_continuous_mode_enabled())
+ return;
+
+ /* This symbol is defined by the compiler when runtime counter relocation is
+ * used and runtime provides a weak alias so we can check if it's defined. */
+ void *BiasAddr = &INSTR_PROF_PROFILE_COUNTER_BIAS_VAR;
+ void *BiasDefaultAddr = &INSTR_PROF_PROFILE_COUNTER_BIAS_DEFAULT_VAR;
+ if (BiasAddr == BiasDefaultAddr) {
+ PROF_ERR("%s\n", "__llvm_profile_counter_bias is undefined");
+ return;
+ }
+
+ /* Get the sizes of various profile data sections. Taken from
+ * __llvm_profile_get_size_for_buffer(). */
+ const __llvm_profile_data *DataBegin = __llvm_profile_begin_data();
+ const __llvm_profile_data *DataEnd = __llvm_profile_end_data();
+ const uint64_t *CountersBegin = __llvm_profile_begin_counters();
+ const uint64_t *CountersEnd = __llvm_profile_end_counters();
+ uint64_t DataSize = __llvm_profile_get_data_size(DataBegin, DataEnd);
+ const uint64_t CountersOffset =
+ sizeof(__llvm_profile_header) + (DataSize * sizeof(__llvm_profile_data));
+
+ int Length = getCurFilenameLength();
+ char *FilenameBuf = (char *)COMPILER_RT_ALLOCA(Length + 1);
+ const char *Filename = getCurFilename(FilenameBuf, 0);
+ if (!Filename)
+ return;
+
+ FILE *File = NULL;
+ char *Profile = NULL;
+
+ if (!doMerging()) {
+ File = fopen(Filename, "w+b");
+ if (!File)
+ return;
+
+ if (writeMMappedFile(File, &Profile) == -1) {
+ fclose(File);
+ return;
+ }
+ } else {
+ File = lprofOpenFileEx(Filename);
+ if (!File)
+ return;
+
+ uint64_t ProfileFileSize = 0;
+ if (getProfileFileSizeForMerging(File, &ProfileFileSize) == -1) {
+ lprofUnlockFileHandle(File);
+ fclose(File);
+ return;
+ }
+
+ if (!ProfileFileSize) {
+ if (writeMMappedFile(File, &Profile) == -1) {
+ fclose(File);
+ return;
+ }
+ } else {
+ /* The merged profile has a non-zero length. Check that it is compatible
+ * with the data in this process. */
+ if (mmapProfileForMerging(File, ProfileFileSize, &Profile) == -1) {
+ fclose(File);
+ return;
+ }
+ }
+
+ lprofUnlockFileHandle(File);
+ }
+
+ /* Update the profile fields based on the current mapping. */
+ INSTR_PROF_PROFILE_COUNTER_BIAS_VAR =
+ (intptr_t)Profile - (uintptr_t)CountersBegin +
+ CountersOffset;
+
+ /* Return the memory allocated for counters to OS. */
+ lprofReleaseMemoryPagesToOS((uintptr_t)CountersBegin, (uintptr_t)CountersEnd);
+}
+#else
+static void initializeProfileForContinuousMode(void) {
+ PROF_ERR("%s\n", "continuous mode is unsupported on this platform");
+}
+#endif
static const char *DefaultProfileName = "default.profraw";
static void resetFilenameToDefault(void) {
@@ -699,6 +743,13 @@ static unsigned getMergePoolSize(const char *FilenamePat, int *I) {
return 0;
}
+/* Assert that Idx does index past a string null terminator. Return the
+ * result of the check. */
+static int checkBounds(int Idx, int Strlen) {
+ assert(Idx <= Strlen && "Indexing past string null terminator");
+ return Idx <= Strlen;
+}
+
/* Parses the pattern string \p FilenamePat and stores the result to
* lprofcurFilename structure. */
static int parseFilenamePattern(const char *FilenamePat,
@@ -707,6 +758,7 @@ static int parseFilenamePattern(const char *FilenamePat,
char *PidChars = &lprofCurFilename.PidChars[0];
char *Hostname = &lprofCurFilename.Hostname[0];
int MergingEnabled = 0;
+ int FilenamePatLen = strlen(FilenamePat);
/* Clean up cached prefix and filename. */
if (lprofCurFilename.ProfilePathPrefix)
@@ -725,9 +777,12 @@ static int parseFilenamePattern(const char *FilenamePat,
lprofCurFilename.OwnsFilenamePat = 1;
}
/* Check the filename for "%p", which indicates a pid-substitution. */
- for (I = 0; FilenamePat[I]; ++I)
+ for (I = 0; checkBounds(I, FilenamePatLen) && FilenamePat[I]; ++I) {
if (FilenamePat[I] == '%') {
- if (FilenamePat[++I] == 'p') {
+ ++I; /* Advance to the next character. */
+ if (!checkBounds(I, FilenamePatLen))
+ break;
+ if (FilenamePat[I] == 'p') {
if (!NumPids++) {
if (snprintf(PidChars, MAX_PID_SIZE, "%ld", (long)getpid()) <= 0) {
PROF_WARN("Unable to get pid for filename pattern %s. Using the "
@@ -758,10 +813,14 @@ static int parseFilenamePattern(const char *FilenamePat,
FilenamePat);
return -1;
}
-
+#if defined(__APPLE__) || defined(__ELF__) || defined(_WIN32)
__llvm_profile_set_page_size(getpagesize());
__llvm_profile_enable_continuous_mode();
- I++; /* advance to 'c' */
+#else
+ PROF_WARN("%s", "Continous mode is currently only supported for Mach-O,"
+ " ELF and COFF formats.");
+ return -1;
+#endif
} else {
unsigned MergePoolSize = getMergePoolSize(FilenamePat, &I);
if (!MergePoolSize)
@@ -775,6 +834,7 @@ static int parseFilenamePattern(const char *FilenamePat,
lprofCurFilename.MergePoolSize = MergePoolSize;
}
}
+ }
lprofCurFilename.NumPids = NumPids;
lprofCurFilename.NumHosts = NumHosts;
@@ -817,12 +877,8 @@ static void parseAndSetFilename(const char *FilenamePat,
}
truncateCurrentFile();
- if (__llvm_profile_is_continuous_mode_enabled()) {
- if (lprofRuntimeCounterRelocation())
- relocateCounters();
- else
- initializeProfileForContinuousMode();
- }
+ if (__llvm_profile_is_continuous_mode_enabled())
+ initializeProfileForContinuousMode();
}
/* Return buffer length that is required to store the current profile
@@ -978,9 +1034,6 @@ void __llvm_profile_initialize_file(void) {
ProfileNameSpecifier PNS = PNS_unknown;
int hasCommandLineOverrider = (INSTR_PROF_PROFILE_NAME_VAR[0] != 0);
- if (__llvm_profile_counter_bias != -1)
- lprofSetRuntimeCounterRelocation(1);
-
EnvFilenamePat = getFilenamePatFromEnv();
if (EnvFilenamePat) {
/* Pass CopyFilenamePat = 1, to ensure that the filename would be valid
diff --git a/compiler-rt/lib/profile/InstrProfilingInternal.c b/compiler-rt/lib/profile/InstrProfilingInternal.c
index 6a54697df7f0..edd38ad765c5 100644
--- a/compiler-rt/lib/profile/InstrProfilingInternal.c
+++ b/compiler-rt/lib/profile/InstrProfilingInternal.c
@@ -23,14 +23,4 @@ COMPILER_RT_VISIBILITY void lprofSetProfileDumped(unsigned Value) {
ProfileDumped = Value;
}
-static unsigned RuntimeCounterRelocation = 0;
-
-COMPILER_RT_VISIBILITY unsigned lprofRuntimeCounterRelocation(void) {
- return RuntimeCounterRelocation;
-}
-
-COMPILER_RT_VISIBILITY void lprofSetRuntimeCounterRelocation(unsigned Value) {
- RuntimeCounterRelocation = Value;
-}
-
#endif
diff --git a/compiler-rt/lib/profile/InstrProfilingInternal.h b/compiler-rt/lib/profile/InstrProfilingInternal.h
index 904bd3945928..ffa790a4cb66 100644
--- a/compiler-rt/lib/profile/InstrProfilingInternal.h
+++ b/compiler-rt/lib/profile/InstrProfilingInternal.h
@@ -184,10 +184,6 @@ uint64_t lprofGetLoadModuleSignature();
unsigned lprofProfileDumped(void);
void lprofSetProfileDumped(unsigned);
-/* Return non zero value if counters are being relocated at runtime. */
-unsigned lprofRuntimeCounterRelocation(void);
-void lprofSetRuntimeCounterRelocation(unsigned);
-
COMPILER_RT_VISIBILITY extern void (*FreeHook)(void *);
COMPILER_RT_VISIBILITY extern uint8_t *DynamicBufferIOBuffer;
COMPILER_RT_VISIBILITY extern uint32_t VPBufferSize;
@@ -197,4 +193,10 @@ COMPILER_RT_VISIBILITY extern ValueProfNode *CurrentVNode;
COMPILER_RT_VISIBILITY extern ValueProfNode *EndVNode;
extern void (*VPMergeHook)(struct ValueProfData *, __llvm_profile_data *);
+/*
+ * Write binary ids into profiles if writer is given.
+ * Return -1 if an error occurs, otherwise, return total size of binary ids.
+ */
+int __llvm_write_binary_ids(ProfDataWriter *Writer);
+
#endif
diff --git a/compiler-rt/lib/profile/InstrProfilingMerge.c b/compiler-rt/lib/profile/InstrProfilingMerge.c
index 0fd9b2bcd41f..913228513259 100644
--- a/compiler-rt/lib/profile/InstrProfilingMerge.c
+++ b/compiler-rt/lib/profile/InstrProfilingMerge.c
@@ -82,13 +82,13 @@ int __llvm_profile_check_compatibility(const char *ProfileData,
}
COMPILER_RT_VISIBILITY
-void __llvm_profile_merge_from_buffer(const char *ProfileData,
- uint64_t ProfileSize) {
+int __llvm_profile_merge_from_buffer(const char *ProfileData,
+ uint64_t ProfileSize) {
__llvm_profile_data *SrcDataStart, *SrcDataEnd, *SrcData, *DstData;
__llvm_profile_header *Header = (__llvm_profile_header *)ProfileData;
uint64_t *SrcCountersStart;
const char *SrcNameStart;
- ValueProfData *SrcValueProfDataStart, *SrcValueProfData;
+ const char *SrcValueProfDataStart, *SrcValueProfData;
SrcDataStart =
(__llvm_profile_data *)(ProfileData + sizeof(__llvm_profile_header));
@@ -96,37 +96,47 @@ void __llvm_profile_merge_from_buffer(const char *ProfileData,
SrcCountersStart = (uint64_t *)SrcDataEnd;
SrcNameStart = (const char *)(SrcCountersStart + Header->CountersSize);
SrcValueProfDataStart =
- (ValueProfData *)(SrcNameStart + Header->NamesSize +
- __llvm_profile_get_num_padding_bytes(
- Header->NamesSize));
+ SrcNameStart + Header->NamesSize +
+ __llvm_profile_get_num_padding_bytes(Header->NamesSize);
+ if (SrcNameStart < (const char *)SrcCountersStart)
+ return 1;
for (SrcData = SrcDataStart,
DstData = (__llvm_profile_data *)__llvm_profile_begin_data(),
SrcValueProfData = SrcValueProfDataStart;
SrcData < SrcDataEnd; ++SrcData, ++DstData) {
- uint64_t *SrcCounters;
uint64_t *DstCounters = (uint64_t *)DstData->CounterPtr;
- unsigned I, NC, NVK = 0;
+ unsigned NVK = 0;
- NC = SrcData->NumCounters;
- SrcCounters = SrcCountersStart +
- ((size_t)SrcData->CounterPtr - Header->CountersDelta) /
- sizeof(uint64_t);
- for (I = 0; I < NC; I++)
+ unsigned NC = SrcData->NumCounters;
+ if (NC == 0)
+ return 1;
+ uint64_t *SrcCounters = SrcCountersStart + ((size_t)SrcData->CounterPtr -
+ Header->CountersDelta) /
+ sizeof(uint64_t);
+ if (SrcCounters < SrcCountersStart ||
+ (const char *)SrcCounters >= SrcNameStart ||
+ (const char *)(SrcCounters + NC) > SrcNameStart)
+ return 1;
+ for (unsigned I = 0; I < NC; I++)
DstCounters[I] += SrcCounters[I];
- /* Now merge value profile data. */
+ /* Now merge value profile data. */
if (!VPMergeHook)
continue;
- for (I = 0; I <= IPVK_Last; I++)
+ for (unsigned I = 0; I <= IPVK_Last; I++)
NVK += (SrcData->NumValueSites[I] != 0);
if (!NVK)
continue;
- VPMergeHook(SrcValueProfData, DstData);
- SrcValueProfData = (ValueProfData *)((char *)SrcValueProfData +
- SrcValueProfData->TotalSize);
+ if (SrcValueProfData >= ProfileData + ProfileSize)
+ return 1;
+ VPMergeHook((ValueProfData *)SrcValueProfData, DstData);
+ SrcValueProfData =
+ SrcValueProfData + ((ValueProfData *)SrcValueProfData)->TotalSize;
}
+
+ return 0;
}
diff --git a/compiler-rt/lib/profile/InstrProfilingPlatformDarwin.c b/compiler-rt/lib/profile/InstrProfilingPlatformDarwin.c
index 29541c74d5a6..c2e7fad98386 100644
--- a/compiler-rt/lib/profile/InstrProfilingPlatformDarwin.c
+++ b/compiler-rt/lib/profile/InstrProfilingPlatformDarwin.c
@@ -10,6 +10,7 @@
// with freestanding compilation. See `darwin_add_builtin_libraries`.
#include "InstrProfiling.h"
+#include "InstrProfilingInternal.h"
#if defined(__APPLE__)
/* Use linker magic to find the bounds of the Data section. */
@@ -67,4 +68,9 @@ ValueProfNode *__llvm_profile_end_vnodes(void) { return &VNodesEnd; }
COMPILER_RT_VISIBILITY ValueProfNode *CurrentVNode = &VNodesStart;
COMPILER_RT_VISIBILITY ValueProfNode *EndVNode = &VNodesEnd;
+
+COMPILER_RT_VISIBILITY int __llvm_write_binary_ids(ProfDataWriter *Writer) {
+ return 0;
+}
+
#endif
diff --git a/compiler-rt/lib/profile/InstrProfilingPlatformFuchsia.c b/compiler-rt/lib/profile/InstrProfilingPlatformFuchsia.c
index d8b7fa21d257..0146b14c193f 100644
--- a/compiler-rt/lib/profile/InstrProfilingPlatformFuchsia.c
+++ b/compiler-rt/lib/profile/InstrProfilingPlatformFuchsia.c
@@ -34,16 +34,14 @@
#include "InstrProfilingInternal.h"
#include "InstrProfilingUtil.h"
+/* This variable is an external reference to symbol defined by the compiler. */
+COMPILER_RT_VISIBILITY extern intptr_t INSTR_PROF_PROFILE_COUNTER_BIAS_VAR;
+
COMPILER_RT_VISIBILITY unsigned lprofProfileDumped() {
return 1;
}
COMPILER_RT_VISIBILITY void lprofSetProfileDumped(unsigned Value) {}
-COMPILER_RT_VISIBILITY unsigned lprofRuntimeCounterRelocation(void) {
- return 1;
-}
-COMPILER_RT_VISIBILITY void lprofSetRuntimeCounterRelocation(unsigned Value) {}
-
static const char ProfileSinkName[] = "llvm-profile";
static inline void lprofWrite(const char *fmt, ...) {
@@ -116,23 +114,22 @@ void __llvm_profile_initialize(void) {
return;
}
- /* This symbol is defined as weak and initialized to -1 by the runtimer, but
- * compiler will generate a strong definition initialized to 0 when runtime
- * counter relocation is used. */
- if (__llvm_profile_counter_bias == -1) {
- lprofWrite("LLVM Profile: counter relocation at runtime is required\n");
- return;
- }
-
const __llvm_profile_data *DataBegin = __llvm_profile_begin_data();
const __llvm_profile_data *DataEnd = __llvm_profile_end_data();
+ const uint64_t *CountersBegin = __llvm_profile_begin_counters();
+ const uint64_t *CountersEnd = __llvm_profile_end_counters();
const uint64_t DataSize = __llvm_profile_get_data_size(DataBegin, DataEnd);
const uint64_t CountersOffset =
sizeof(__llvm_profile_header) + (DataSize * sizeof(__llvm_profile_data));
+ uint64_t CountersSize = CountersEnd - CountersBegin;
+
+ /* Don't publish a VMO if there are no counters. */
+ if (!CountersSize)
+ return;
zx_status_t Status;
- /* Create VMO to hold the profile data. */
+ /* Create a VMO to hold the profile data. */
zx_handle_t Vmo = ZX_HANDLE_INVALID;
Status = _zx_vmo_create(0, ZX_VMO_RESIZABLE, &Vmo);
if (Status != ZX_OK) {
@@ -185,9 +182,11 @@ void __llvm_profile_initialize(void) {
lprofWrite("LLVM Profile: {{{dumpfile:%s:%s}}}\n", ProfileSinkName, VmoName);
/* Update the profile fields based on the current mapping. */
- __llvm_profile_counter_bias = (intptr_t)Mapping -
- (uintptr_t)__llvm_profile_begin_counters() +
- CountersOffset;
+ INSTR_PROF_PROFILE_COUNTER_BIAS_VAR =
+ (intptr_t)Mapping - (uintptr_t)CountersBegin + CountersOffset;
+
+ /* Return the memory allocated for counters to OS. */
+ lprofReleaseMemoryPagesToOS((uintptr_t)CountersBegin, (uintptr_t)CountersEnd);
}
#endif
diff --git a/compiler-rt/lib/profile/InstrProfilingPlatformLinux.c b/compiler-rt/lib/profile/InstrProfilingPlatformLinux.c
index c9fb481f8e90..508624a80cd6 100644
--- a/compiler-rt/lib/profile/InstrProfilingPlatformLinux.c
+++ b/compiler-rt/lib/profile/InstrProfilingPlatformLinux.c
@@ -9,9 +9,13 @@
#if defined(__linux__) || defined(__FreeBSD__) || defined(__Fuchsia__) || \
(defined(__sun__) && defined(__svr4__)) || defined(__NetBSD__)
+#include <elf.h>
+#include <link.h>
#include <stdlib.h>
+#include <string.h>
#include "InstrProfiling.h"
+#include "InstrProfilingInternal.h"
#define PROF_DATA_START INSTR_PROF_SECT_START(INSTR_PROF_DATA_COMMON)
#define PROF_DATA_STOP INSTR_PROF_SECT_STOP(INSTR_PROF_DATA_COMMON)
@@ -26,25 +30,17 @@
/* Declare section start and stop symbols for various sections
* generated by compiler instrumentation.
*/
-extern __llvm_profile_data PROF_DATA_START COMPILER_RT_VISIBILITY;
-extern __llvm_profile_data PROF_DATA_STOP COMPILER_RT_VISIBILITY;
-extern uint64_t PROF_CNTS_START COMPILER_RT_VISIBILITY;
-extern uint64_t PROF_CNTS_STOP COMPILER_RT_VISIBILITY;
-extern uint32_t PROF_ORDERFILE_START COMPILER_RT_VISIBILITY;
-extern char PROF_NAME_START COMPILER_RT_VISIBILITY;
-extern char PROF_NAME_STOP COMPILER_RT_VISIBILITY;
-extern ValueProfNode PROF_VNODES_START COMPILER_RT_VISIBILITY;
-extern ValueProfNode PROF_VNODES_STOP COMPILER_RT_VISIBILITY;
-
-/* Add dummy data to ensure the section is always created. */
-__llvm_profile_data
- __prof_data_sect_data[0] COMPILER_RT_SECTION(INSTR_PROF_DATA_SECT_NAME);
-uint64_t
- __prof_cnts_sect_data[0] COMPILER_RT_SECTION(INSTR_PROF_CNTS_SECT_NAME);
-uint32_t
- __prof_orderfile_sect_data[0] COMPILER_RT_SECTION(INSTR_PROF_ORDERFILE_SECT_NAME);
-const char __prof_nms_sect_data[0] COMPILER_RT_SECTION(INSTR_PROF_NAME_SECT_NAME);
-ValueProfNode __prof_vnodes_sect_data[0] COMPILER_RT_SECTION(INSTR_PROF_VNODES_SECT_NAME);
+extern __llvm_profile_data PROF_DATA_START COMPILER_RT_VISIBILITY
+ COMPILER_RT_WEAK;
+extern __llvm_profile_data PROF_DATA_STOP COMPILER_RT_VISIBILITY
+ COMPILER_RT_WEAK;
+extern uint64_t PROF_CNTS_START COMPILER_RT_VISIBILITY COMPILER_RT_WEAK;
+extern uint64_t PROF_CNTS_STOP COMPILER_RT_VISIBILITY COMPILER_RT_WEAK;
+extern uint32_t PROF_ORDERFILE_START COMPILER_RT_VISIBILITY COMPILER_RT_WEAK;
+extern char PROF_NAME_START COMPILER_RT_VISIBILITY COMPILER_RT_WEAK;
+extern char PROF_NAME_STOP COMPILER_RT_VISIBILITY COMPILER_RT_WEAK;
+extern ValueProfNode PROF_VNODES_START COMPILER_RT_VISIBILITY COMPILER_RT_WEAK;
+extern ValueProfNode PROF_VNODES_STOP COMPILER_RT_VISIBILITY COMPILER_RT_WEAK;
COMPILER_RT_VISIBILITY const __llvm_profile_data *
__llvm_profile_begin_data(void) {
@@ -80,4 +76,108 @@ COMPILER_RT_VISIBILITY ValueProfNode *__llvm_profile_end_vnodes(void) {
COMPILER_RT_VISIBILITY ValueProfNode *CurrentVNode = &PROF_VNODES_START;
COMPILER_RT_VISIBILITY ValueProfNode *EndVNode = &PROF_VNODES_STOP;
+static size_t RoundUp(size_t size, size_t align) {
+ return (size + align - 1) & ~(align - 1);
+}
+
+/*
+ * Write binary id length and then its data, because binary id does not
+ * have a fixed length.
+ */
+int WriteOneBinaryId(ProfDataWriter *Writer, uint64_t BinaryIdLen,
+ const uint8_t *BinaryIdData) {
+ ProfDataIOVec BinaryIdIOVec[] = {
+ {&BinaryIdLen, sizeof(uint64_t), 1, 0},
+ {BinaryIdData, sizeof(uint8_t), BinaryIdLen, 0}};
+ if (Writer->Write(Writer, BinaryIdIOVec,
+ sizeof(BinaryIdIOVec) / sizeof(*BinaryIdIOVec)))
+ return -1;
+
+ /* Successfully wrote binary id, report success. */
+ return 0;
+}
+
+/*
+ * Look for the note that has the name "GNU\0" and type NT_GNU_BUILD_ID
+ * that contains build id. If build id exists, write binary id.
+ *
+ * Each note in notes section starts with a struct which includes
+ * n_namesz, n_descsz, and n_type members. It is followed by the name
+ * (whose length is defined in n_namesz) and then by the descriptor
+ * (whose length is defined in n_descsz).
+ *
+ * Note sections like .note.ABI-tag and .note.gnu.build-id are aligned
+ * to 4 bytes, so round n_namesz and n_descsz to the nearest 4 bytes.
+ */
+int WriteBinaryIdForNote(ProfDataWriter *Writer, const ElfW(Nhdr) * Note) {
+ int BinaryIdSize = 0;
+
+ const char *NoteName = (const char *)Note + sizeof(ElfW(Nhdr));
+ if (Note->n_type == NT_GNU_BUILD_ID && Note->n_namesz == 4 &&
+ memcmp(NoteName, "GNU\0", 4) == 0) {
+
+ uint64_t BinaryIdLen = Note->n_descsz;
+ const uint8_t *BinaryIdData =
+ (const uint8_t *)(NoteName + RoundUp(Note->n_namesz, 4));
+ if (Writer != NULL &&
+ WriteOneBinaryId(Writer, BinaryIdLen, BinaryIdData) == -1)
+ return -1;
+
+ BinaryIdSize = sizeof(BinaryIdLen) + BinaryIdLen;
+ }
+
+ return BinaryIdSize;
+}
+
+/*
+ * Helper function that iterates through notes section and find build ids.
+ * If writer is given, write binary ids into profiles.
+ * If an error happens while writing, return -1.
+ */
+int WriteBinaryIds(ProfDataWriter *Writer, const ElfW(Nhdr) * Note,
+ const ElfW(Nhdr) * NotesEnd) {
+ int TotalBinaryIdsSize = 0;
+ while (Note < NotesEnd) {
+ int Result = WriteBinaryIdForNote(Writer, Note);
+ if (Result == -1)
+ return -1;
+ TotalBinaryIdsSize += Result;
+
+ /* Calculate the offset of the next note in notes section. */
+ size_t NoteOffset = sizeof(ElfW(Nhdr)) + RoundUp(Note->n_namesz, 4) +
+ RoundUp(Note->n_descsz, 4);
+ Note = (const ElfW(Nhdr) *)((const char *)(Note) + NoteOffset);
+ }
+
+ return TotalBinaryIdsSize;
+}
+
+/*
+ * Write binary ids into profiles if writer is given.
+ * Return the total size of binary ids.
+ * If an error happens while writing, return -1.
+ */
+COMPILER_RT_VISIBILITY int __llvm_write_binary_ids(ProfDataWriter *Writer) {
+ extern const ElfW(Ehdr) __ehdr_start __attribute__((visibility("hidden")));
+ const ElfW(Ehdr) *ElfHeader = &__ehdr_start;
+ const ElfW(Phdr) *ProgramHeader =
+ (const ElfW(Phdr) *)((uintptr_t)ElfHeader + ElfHeader->e_phoff);
+
+ uint32_t I;
+ /* Iterate through entries in the program header. */
+ for (I = 0; I < ElfHeader->e_phnum; I++) {
+ /* Look for the notes section in program header entries. */
+ if (ProgramHeader[I].p_type != PT_NOTE)
+ continue;
+
+ const ElfW(Nhdr) *Note =
+ (const ElfW(Nhdr) *)((uintptr_t)ElfHeader + ProgramHeader[I].p_offset);
+ const ElfW(Nhdr) *NotesEnd =
+ (const ElfW(Nhdr) *)((const char *)(Note) + ProgramHeader[I].p_filesz);
+ return WriteBinaryIds(Writer, Note, NotesEnd);
+ }
+
+ return 0;
+}
+
#endif
diff --git a/compiler-rt/lib/profile/InstrProfilingPlatformOther.c b/compiler-rt/lib/profile/InstrProfilingPlatformOther.c
index 56c5d8378c29..0e59148e2044 100644
--- a/compiler-rt/lib/profile/InstrProfilingPlatformOther.c
+++ b/compiler-rt/lib/profile/InstrProfilingPlatformOther.c
@@ -14,6 +14,7 @@
#include <stdio.h>
#include "InstrProfiling.h"
+#include "InstrProfilingInternal.h"
static const __llvm_profile_data *DataFirst = NULL;
static const __llvm_profile_data *DataLast = NULL;
@@ -97,4 +98,8 @@ ValueProfNode *__llvm_profile_end_vnodes(void) { return 0; }
COMPILER_RT_VISIBILITY ValueProfNode *CurrentVNode = 0;
COMPILER_RT_VISIBILITY ValueProfNode *EndVNode = 0;
+COMPILER_RT_VISIBILITY int __llvm_write_binary_ids(ProfDataWriter *Writer) {
+ return 0;
+}
+
#endif
diff --git a/compiler-rt/lib/profile/InstrProfilingPlatformWindows.c b/compiler-rt/lib/profile/InstrProfilingPlatformWindows.c
index 81b708bb2a38..a0192ced4f26 100644
--- a/compiler-rt/lib/profile/InstrProfilingPlatformWindows.c
+++ b/compiler-rt/lib/profile/InstrProfilingPlatformWindows.c
@@ -7,6 +7,7 @@
\*===----------------------------------------------------------------------===*/
#include "InstrProfiling.h"
+#include "InstrProfilingInternal.h"
#if defined(_WIN32)
@@ -65,4 +66,8 @@ ValueProfNode *__llvm_profile_end_vnodes(void) { return &VNodesEnd; }
ValueProfNode *CurrentVNode = &VNodesStart + 1;
ValueProfNode *EndVNode = &VNodesEnd;
+COMPILER_RT_VISIBILITY int __llvm_write_binary_ids(ProfDataWriter *Writer) {
+ return 0;
+}
+
#endif
diff --git a/compiler-rt/lib/profile/InstrProfilingPort.h b/compiler-rt/lib/profile/InstrProfilingPort.h
index cb66c5964ad1..ed0905cc5f20 100644
--- a/compiler-rt/lib/profile/InstrProfilingPort.h
+++ b/compiler-rt/lib/profile/InstrProfilingPort.h
@@ -23,6 +23,7 @@
#define COMPILER_RT_FTRUNCATE(f,l) _chsize(_fileno(f),l)
#define COMPILER_RT_ALWAYS_INLINE __forceinline
#define COMPILER_RT_CLEANUP(x)
+#define COMPILER_RT_USED
#elif __GNUC__
#ifdef _WIN32
#define COMPILER_RT_FTRUNCATE(f, l) _chsize(fileno(f), l)
@@ -37,6 +38,7 @@
#define COMPILER_RT_ALLOCA __builtin_alloca
#define COMPILER_RT_ALWAYS_INLINE inline __attribute((always_inline))
#define COMPILER_RT_CLEANUP(x) __attribute__((cleanup(x)))
+#define COMPILER_RT_USED __attribute__((used))
#endif
#if defined(__APPLE__)
diff --git a/compiler-rt/lib/profile/InstrProfilingUtil.c b/compiler-rt/lib/profile/InstrProfilingUtil.c
index bf5a9670fe18..4fa792b72eac 100644
--- a/compiler-rt/lib/profile/InstrProfilingUtil.c
+++ b/compiler-rt/lib/profile/InstrProfilingUtil.c
@@ -12,12 +12,13 @@
#include <windows.h>
#include "WindowsMMap.h"
#else
+#include <errno.h>
+#include <fcntl.h>
#include <sys/file.h>
+#include <sys/mman.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <unistd.h>
-#include <fcntl.h>
-#include <errno.h>
#endif
#ifdef COMPILER_RT_HAS_UNAME
@@ -32,6 +33,10 @@
#include <sys/prctl.h>
#endif
+#if defined(__Fuchsia__)
+#include <zircon/syscalls.h>
+#endif
+
#include "InstrProfiling.h"
#include "InstrProfilingUtil.h"
@@ -330,3 +335,21 @@ COMPILER_RT_VISIBILITY void lprofRestoreSigKill() {
prctl(PR_SET_PDEATHSIG, SIGKILL);
#endif
}
+
+COMPILER_RT_VISIBILITY int lprofReleaseMemoryPagesToOS(uintptr_t Begin,
+ uintptr_t End) {
+ size_t PageSize = getpagesize();
+ uintptr_t BeginAligned = lprofRoundUpTo((uintptr_t)Begin, PageSize);
+ uintptr_t EndAligned = lprofRoundDownTo((uintptr_t)End, PageSize);
+ if (BeginAligned < EndAligned) {
+#if defined(__Fuchsia__)
+ return _zx_vmar_op_range(_zx_vmar_root_self(), ZX_VMAR_OP_DECOMMIT,
+ (zx_vaddr_t)BeginAligned,
+ EndAligned - BeginAligned, NULL, 0);
+#else
+ return madvise((void *)BeginAligned, EndAligned - BeginAligned,
+ MADV_DONTNEED);
+#endif
+ }
+ return 0;
+}
diff --git a/compiler-rt/lib/profile/InstrProfilingUtil.h b/compiler-rt/lib/profile/InstrProfilingUtil.h
index 5f5c85091fe8..4a88a0358094 100644
--- a/compiler-rt/lib/profile/InstrProfilingUtil.h
+++ b/compiler-rt/lib/profile/InstrProfilingUtil.h
@@ -9,6 +9,7 @@
#ifndef PROFILE_INSTRPROFILINGUTIL_H
#define PROFILE_INSTRPROFILINGUTIL_H
+#include <inttypes.h>
#include <stddef.h>
#include <stdio.h>
@@ -73,4 +74,14 @@ int lprofSuspendSigKill();
/* Restore previously suspended SIGKILL. */
void lprofRestoreSigKill();
+static inline size_t lprofRoundUpTo(size_t x, size_t boundary) {
+ return (x + boundary - 1) & ~(boundary - 1);
+}
+
+static inline size_t lprofRoundDownTo(size_t x, size_t boundary) {
+ return x & ~(boundary - 1);
+}
+
+int lprofReleaseMemoryPagesToOS(uintptr_t Begin, uintptr_t End);
+
#endif /* PROFILE_INSTRPROFILINGUTIL_H */
diff --git a/compiler-rt/lib/profile/InstrProfilingWriter.c b/compiler-rt/lib/profile/InstrProfilingWriter.c
index 16ad965ff608..25f630293227 100644
--- a/compiler-rt/lib/profile/InstrProfilingWriter.c
+++ b/compiler-rt/lib/profile/InstrProfilingWriter.c
@@ -283,16 +283,24 @@ lprofWriteDataImpl(ProfDataWriter *Writer, const __llvm_profile_data *DataBegin,
#define INSTR_PROF_RAW_HEADER(Type, Name, Init) Header.Name = Init;
#include "profile/InstrProfData.inc"
- /* Write the data. */
- ProfDataIOVec IOVec[] = {
- {&Header, sizeof(__llvm_profile_header), 1, 0},
+ /* Write the profile header. */
+ ProfDataIOVec IOVec[] = {{&Header, sizeof(__llvm_profile_header), 1, 0}};
+ if (Writer->Write(Writer, IOVec, sizeof(IOVec) / sizeof(*IOVec)))
+ return -1;
+
+ /* Write the binary id lengths and data. */
+ if (__llvm_write_binary_ids(Writer) == -1)
+ return -1;
+
+ /* Write the profile data. */
+ ProfDataIOVec IOVecData[] = {
{DataBegin, sizeof(__llvm_profile_data), DataSize, 0},
{NULL, sizeof(uint8_t), PaddingBytesBeforeCounters, 1},
{CountersBegin, sizeof(uint64_t), CountersSize, 0},
{NULL, sizeof(uint8_t), PaddingBytesAfterCounters, 1},
{SkipNameDataWrite ? NULL : NamesBegin, sizeof(uint8_t), NamesSize, 0},
{NULL, sizeof(uint8_t), PaddingBytesAfterNames, 1}};
- if (Writer->Write(Writer, IOVec, sizeof(IOVec) / sizeof(*IOVec)))
+ if (Writer->Write(Writer, IOVecData, sizeof(IOVecData) / sizeof(*IOVecData)))
return -1;
/* Value profiling is not yet supported in continuous mode. */
diff --git a/compiler-rt/lib/profile/WindowsMMap.c b/compiler-rt/lib/profile/WindowsMMap.c
index 41cc67f41f1f..07c0a689feae 100644
--- a/compiler-rt/lib/profile/WindowsMMap.c
+++ b/compiler-rt/lib/profile/WindowsMMap.c
@@ -113,6 +113,18 @@ int msync(void *addr, size_t length, int flags)
}
COMPILER_RT_VISIBILITY
+int madvise(void *addr, size_t length, int advice)
+{
+ if (advice != MADV_DONTNEED)
+ return -1; /* Not supported. */
+
+ if (!VirtualUnlock(addr, length))
+ return -1;
+
+ return 0;
+}
+
+COMPILER_RT_VISIBILITY
int lock(HANDLE handle, DWORD lockType, BOOL blocking) {
DWORD flags = lockType;
if (!blocking)
diff --git a/compiler-rt/lib/profile/WindowsMMap.h b/compiler-rt/lib/profile/WindowsMMap.h
index c8d6250f41c1..68b8de2398d6 100644
--- a/compiler-rt/lib/profile/WindowsMMap.h
+++ b/compiler-rt/lib/profile/WindowsMMap.h
@@ -37,6 +37,14 @@
#define MS_SYNC 0x0010 /* msync synchronously */
/*
+ * madvise() flags
+ */
+
+#define MADV_NORMAL 0 /* no special treatment */
+#define MADV_WILLNEED 3 /* expect access in the near future */
+#define MADV_DONTNEED 4 /* do not expect access in the near future */
+
+/*
* flock() operations
*/
#define LOCK_SH 1 /* shared lock */
@@ -59,6 +67,8 @@ void munmap(void *addr, size_t length);
int msync(void *addr, size_t length, int flags);
+int madvise(void *addr, size_t length, int advice);
+
int flock(int fd, int operation);
#endif /* _WIN32 */
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_addrhashmap.h b/compiler-rt/lib/sanitizer_common/sanitizer_addrhashmap.h
index a033e788cbf8..15f81a04350f 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_addrhashmap.h
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_addrhashmap.h
@@ -162,8 +162,8 @@ AddrHashMap<T, kSize>::AddrHashMap() {
table_ = (Bucket*)MmapOrDie(kSize * sizeof(table_[0]), "AddrHashMap");
}
-template<typename T, uptr kSize>
-void AddrHashMap<T, kSize>::acquire(Handle *h) {
+template <typename T, uptr kSize>
+void AddrHashMap<T, kSize>::acquire(Handle *h) NO_THREAD_SAFETY_ANALYSIS {
uptr addr = h->addr_;
uptr hash = calcHash(addr);
Bucket *b = &table_[hash];
@@ -289,57 +289,57 @@ void AddrHashMap<T, kSize>::acquire(Handle *h) {
CHECK_EQ(atomic_load(&c->addr, memory_order_relaxed), 0);
h->addidx_ = i;
h->cell_ = c;
-}
-
-template<typename T, uptr kSize>
-void AddrHashMap<T, kSize>::release(Handle *h) {
- if (!h->cell_)
- return;
- Bucket *b = h->bucket_;
- Cell *c = h->cell_;
- uptr addr1 = atomic_load(&c->addr, memory_order_relaxed);
- if (h->created_) {
- // Denote completion of insertion.
- CHECK_EQ(addr1, 0);
- // After the following store, the element becomes available
- // for lock-free reads.
- atomic_store(&c->addr, h->addr_, memory_order_release);
- b->mtx.Unlock();
- } else if (h->remove_) {
- // Denote that the cell is empty now.
- CHECK_EQ(addr1, h->addr_);
- atomic_store(&c->addr, 0, memory_order_release);
- // See if we need to compact the bucket.
- AddBucket *add = (AddBucket*)atomic_load(&b->add, memory_order_relaxed);
- if (h->addidx_ == -1U) {
- // Removed from embed array, move an add element into the freed cell.
- if (add && add->size != 0) {
- uptr last = --add->size;
- Cell *c1 = &add->cells[last];
- c->val = c1->val;
- uptr addr1 = atomic_load(&c1->addr, memory_order_relaxed);
- atomic_store(&c->addr, addr1, memory_order_release);
- atomic_store(&c1->addr, 0, memory_order_release);
- }
- } else {
- // Removed from add array, compact it.
- uptr last = --add->size;
- Cell *c1 = &add->cells[last];
- if (c != c1) {
- *c = *c1;
- atomic_store(&c1->addr, 0, memory_order_relaxed);
- }
- }
- if (add && add->size == 0) {
- // FIXME(dvyukov): free add?
- }
- b->mtx.Unlock();
- } else {
- CHECK_EQ(addr1, h->addr_);
- if (h->addidx_ != -1U)
- b->mtx.ReadUnlock();
- }
-}
+ }
+
+ template <typename T, uptr kSize>
+ void AddrHashMap<T, kSize>::release(Handle *h) NO_THREAD_SAFETY_ANALYSIS {
+ if (!h->cell_)
+ return;
+ Bucket *b = h->bucket_;
+ Cell *c = h->cell_;
+ uptr addr1 = atomic_load(&c->addr, memory_order_relaxed);
+ if (h->created_) {
+ // Denote completion of insertion.
+ CHECK_EQ(addr1, 0);
+ // After the following store, the element becomes available
+ // for lock-free reads.
+ atomic_store(&c->addr, h->addr_, memory_order_release);
+ b->mtx.Unlock();
+ } else if (h->remove_) {
+ // Denote that the cell is empty now.
+ CHECK_EQ(addr1, h->addr_);
+ atomic_store(&c->addr, 0, memory_order_release);
+ // See if we need to compact the bucket.
+ AddBucket *add = (AddBucket *)atomic_load(&b->add, memory_order_relaxed);
+ if (h->addidx_ == -1U) {
+ // Removed from embed array, move an add element into the freed cell.
+ if (add && add->size != 0) {
+ uptr last = --add->size;
+ Cell *c1 = &add->cells[last];
+ c->val = c1->val;
+ uptr addr1 = atomic_load(&c1->addr, memory_order_relaxed);
+ atomic_store(&c->addr, addr1, memory_order_release);
+ atomic_store(&c1->addr, 0, memory_order_release);
+ }
+ } else {
+ // Removed from add array, compact it.
+ uptr last = --add->size;
+ Cell *c1 = &add->cells[last];
+ if (c != c1) {
+ *c = *c1;
+ atomic_store(&c1->addr, 0, memory_order_relaxed);
+ }
+ }
+ if (add && add->size == 0) {
+ // FIXME(dvyukov): free add?
+ }
+ b->mtx.Unlock();
+ } else {
+ CHECK_EQ(addr1, h->addr_);
+ if (h->addidx_ != -1U)
+ b->mtx.ReadUnlock();
+ }
+ }
template<typename T, uptr kSize>
uptr AddrHashMap<T, kSize>::calcHash(uptr addr) {
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_allocator.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_allocator.cpp
index 3157b35ffaf8..bcb7370a7906 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_allocator.cpp
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_allocator.cpp
@@ -137,14 +137,6 @@ static void RawInternalFree(void *ptr, InternalAllocatorCache *cache) {
#endif // SANITIZER_GO || defined(SANITIZER_USE_MALLOC)
-namespace {
-const u64 kBlockMagic = 0x6A6CB03ABCEBC041ull;
-
-struct BlockHeader {
- u64 magic;
-};
-} // namespace
-
static void NORETURN ReportInternalAllocatorOutOfMemory(uptr requested_size) {
SetAllocatorOutOfMemory();
Report("FATAL: %s: internal allocator is out of memory trying to allocate "
@@ -153,28 +145,17 @@ static void NORETURN ReportInternalAllocatorOutOfMemory(uptr requested_size) {
}
void *InternalAlloc(uptr size, InternalAllocatorCache *cache, uptr alignment) {
- uptr s = size + sizeof(BlockHeader);
- if (s < size)
- return nullptr;
- BlockHeader *p = (BlockHeader *)RawInternalAlloc(s, cache, alignment);
+ void *p = RawInternalAlloc(size, cache, alignment);
if (UNLIKELY(!p))
- ReportInternalAllocatorOutOfMemory(s);
- p->magic = kBlockMagic;
- return p + 1;
+ ReportInternalAllocatorOutOfMemory(size);
+ return p;
}
void *InternalRealloc(void *addr, uptr size, InternalAllocatorCache *cache) {
- if (!addr)
- return InternalAlloc(size, cache);
- uptr s = size + sizeof(BlockHeader);
- if (s < size)
- return nullptr;
- BlockHeader *p = (BlockHeader *)addr - 1;
- CHECK_EQ(kBlockMagic, p->magic);
- p = (BlockHeader *)RawInternalRealloc(p, s, cache);
+ void *p = RawInternalRealloc(addr, size, cache);
if (UNLIKELY(!p))
- ReportInternalAllocatorOutOfMemory(s);
- return p + 1;
+ ReportInternalAllocatorOutOfMemory(size);
+ return p;
}
void *InternalReallocArray(void *addr, uptr count, uptr size,
@@ -203,12 +184,7 @@ void *InternalCalloc(uptr count, uptr size, InternalAllocatorCache *cache) {
}
void InternalFree(void *addr, InternalAllocatorCache *cache) {
- if (!addr)
- return;
- BlockHeader *p = (BlockHeader *)addr - 1;
- CHECK_EQ(kBlockMagic, p->magic);
- p->magic = 0;
- RawInternalFree(p, cache);
+ RawInternalFree(addr, cache);
}
// LowLevelAllocator
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_allocator_combined.h b/compiler-rt/lib/sanitizer_common/sanitizer_allocator_combined.h
index 33f89d6d4992..0e81e6764f9a 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_allocator_combined.h
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_allocator_combined.h
@@ -35,9 +35,9 @@ class CombinedAllocator {
secondary_.InitLinkerInitialized();
}
- void Init(s32 release_to_os_interval_ms) {
+ void Init(s32 release_to_os_interval_ms, uptr heap_start = 0) {
stats_.Init();
- primary_.Init(release_to_os_interval_ms);
+ primary_.Init(release_to_os_interval_ms, heap_start);
secondary_.Init();
}
@@ -177,12 +177,12 @@ class CombinedAllocator {
// ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
// introspection API.
- void ForceLock() {
+ void ForceLock() NO_THREAD_SAFETY_ANALYSIS {
primary_.ForceLock();
secondary_.ForceLock();
}
- void ForceUnlock() {
+ void ForceUnlock() NO_THREAD_SAFETY_ANALYSIS {
secondary_.ForceUnlock();
primary_.ForceUnlock();
}
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_allocator_local_cache.h b/compiler-rt/lib/sanitizer_common/sanitizer_allocator_local_cache.h
index 108dfc231a22..e495c56f0377 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_allocator_local_cache.h
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_allocator_local_cache.h
@@ -17,6 +17,7 @@
template <class SizeClassAllocator>
struct SizeClassAllocator64LocalCache {
typedef SizeClassAllocator Allocator;
+ typedef MemoryMapper<Allocator> MemoryMapperT;
void Init(AllocatorGlobalStats *s) {
stats_.Init();
@@ -53,7 +54,7 @@ struct SizeClassAllocator64LocalCache {
PerClass *c = &per_class_[class_id];
InitCache(c);
if (UNLIKELY(c->count == c->max_count))
- Drain(c, allocator, class_id, c->max_count / 2);
+ DrainHalfMax(c, allocator, class_id);
CompactPtrT chunk = allocator->PointerToCompactPtr(
allocator->GetRegionBeginBySizeClass(class_id),
reinterpret_cast<uptr>(p));
@@ -62,10 +63,10 @@ struct SizeClassAllocator64LocalCache {
}
void Drain(SizeClassAllocator *allocator) {
+ MemoryMapperT memory_mapper(*allocator);
for (uptr i = 1; i < kNumClasses; i++) {
PerClass *c = &per_class_[i];
- while (c->count > 0)
- Drain(c, allocator, i, c->count);
+ while (c->count > 0) Drain(&memory_mapper, c, allocator, i, c->count);
}
}
@@ -106,12 +107,18 @@ struct SizeClassAllocator64LocalCache {
return true;
}
- NOINLINE void Drain(PerClass *c, SizeClassAllocator *allocator, uptr class_id,
- uptr count) {
+ NOINLINE void DrainHalfMax(PerClass *c, SizeClassAllocator *allocator,
+ uptr class_id) {
+ MemoryMapperT memory_mapper(*allocator);
+ Drain(&memory_mapper, c, allocator, class_id, c->max_count / 2);
+ }
+
+ void Drain(MemoryMapperT *memory_mapper, PerClass *c,
+ SizeClassAllocator *allocator, uptr class_id, uptr count) {
CHECK_GE(c->count, count);
const uptr first_idx_to_drain = c->count - count;
c->count -= count;
- allocator->ReturnToAllocator(&stats_, class_id,
+ allocator->ReturnToAllocator(memory_mapper, &stats_, class_id,
&c->chunks[first_idx_to_drain], count);
}
};
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_allocator_primary32.h b/compiler-rt/lib/sanitizer_common/sanitizer_allocator_primary32.h
index b90dabbf7769..38d2a7d117fb 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_allocator_primary32.h
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_allocator_primary32.h
@@ -119,7 +119,8 @@ class SizeClassAllocator32 {
typedef SizeClassAllocator32<Params> ThisT;
typedef SizeClassAllocator32LocalCache<ThisT> AllocatorCache;
- void Init(s32 release_to_os_interval_ms) {
+ void Init(s32 release_to_os_interval_ms, uptr heap_start = 0) {
+ CHECK(!heap_start);
possible_regions.Init();
internal_memset(size_class_info_array, 0, sizeof(size_class_info_array));
}
@@ -236,13 +237,13 @@ class SizeClassAllocator32 {
// ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
// introspection API.
- void ForceLock() {
+ void ForceLock() NO_THREAD_SAFETY_ANALYSIS {
for (uptr i = 0; i < kNumClasses; i++) {
GetSizeClassInfo(i)->mutex.Lock();
}
}
- void ForceUnlock() {
+ void ForceUnlock() NO_THREAD_SAFETY_ANALYSIS {
for (int i = kNumClasses - 1; i >= 0; i--) {
GetSizeClassInfo(i)->mutex.Unlock();
}
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_allocator_primary64.h b/compiler-rt/lib/sanitizer_common/sanitizer_allocator_primary64.h
index 0a18b0c58ef7..b142ee0131b2 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_allocator_primary64.h
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_allocator_primary64.h
@@ -19,7 +19,7 @@ template<class SizeClassAllocator> struct SizeClassAllocator64LocalCache;
// The template parameter Params is a class containing the actual parameters.
//
// Space: a portion of address space of kSpaceSize bytes starting at SpaceBeg.
-// If kSpaceBeg is ~0 then SpaceBeg is chosen dynamically my mmap.
+// If kSpaceBeg is ~0 then SpaceBeg is chosen dynamically by mmap.
// Otherwise SpaceBeg=kSpaceBeg (fixed address).
// kSpaceSize is a power of two.
// At the beginning the entire space is mprotect-ed, then small parts of it
@@ -42,6 +42,44 @@ struct SizeClassAllocator64FlagMasks { // Bit masks.
};
};
+template <typename Allocator>
+class MemoryMapper {
+ public:
+ typedef typename Allocator::CompactPtrT CompactPtrT;
+
+ explicit MemoryMapper(const Allocator &allocator) : allocator_(allocator) {}
+
+ bool GetAndResetStats(uptr &ranges, uptr &bytes) {
+ ranges = released_ranges_count_;
+ released_ranges_count_ = 0;
+ bytes = released_bytes_;
+ released_bytes_ = 0;
+ return ranges != 0;
+ }
+
+ u64 *MapPackedCounterArrayBuffer(uptr count) {
+ buffer_.clear();
+ buffer_.resize(count);
+ return buffer_.data();
+ }
+
+ // Releases [from, to) range of pages back to OS.
+ void ReleasePageRangeToOS(uptr class_id, CompactPtrT from, CompactPtrT to) {
+ const uptr region_base = allocator_.GetRegionBeginBySizeClass(class_id);
+ const uptr from_page = allocator_.CompactPtrToPointer(region_base, from);
+ const uptr to_page = allocator_.CompactPtrToPointer(region_base, to);
+ ReleaseMemoryPagesToOS(from_page, to_page);
+ released_ranges_count_++;
+ released_bytes_ += to_page - from_page;
+ }
+
+ private:
+ const Allocator &allocator_;
+ uptr released_ranges_count_ = 0;
+ uptr released_bytes_ = 0;
+ InternalMmapVector<u64> buffer_;
+};
+
template <class Params>
class SizeClassAllocator64 {
public:
@@ -57,6 +95,7 @@ class SizeClassAllocator64 {
typedef SizeClassAllocator64<Params> ThisT;
typedef SizeClassAllocator64LocalCache<ThisT> AllocatorCache;
+ typedef MemoryMapper<ThisT> MemoryMapperT;
// When we know the size class (the region base) we can represent a pointer
// as a 4-byte integer (offset from the region start shifted right by 4).
@@ -69,25 +108,45 @@ class SizeClassAllocator64 {
return base + (static_cast<uptr>(ptr32) << kCompactPtrScale);
}
- void Init(s32 release_to_os_interval_ms) {
+ // If heap_start is nonzero, assumes kSpaceSize bytes are already mapped R/W
+ // at heap_start and places the heap there. This mode requires kSpaceBeg ==
+ // ~(uptr)0.
+ void Init(s32 release_to_os_interval_ms, uptr heap_start = 0) {
uptr TotalSpaceSize = kSpaceSize + AdditionalSize();
- if (kUsingConstantSpaceBeg) {
- CHECK(IsAligned(kSpaceBeg, SizeClassMap::kMaxSize));
- CHECK_EQ(kSpaceBeg, address_range.Init(TotalSpaceSize,
- PrimaryAllocatorName, kSpaceBeg));
+ PremappedHeap = heap_start != 0;
+ if (PremappedHeap) {
+ CHECK(!kUsingConstantSpaceBeg);
+ NonConstSpaceBeg = heap_start;
+ uptr RegionInfoSize = AdditionalSize();
+ RegionInfoSpace =
+ address_range.Init(RegionInfoSize, PrimaryAllocatorName);
+ CHECK_NE(RegionInfoSpace, ~(uptr)0);
+ CHECK_EQ(RegionInfoSpace,
+ address_range.MapOrDie(RegionInfoSpace, RegionInfoSize,
+ "SizeClassAllocator: region info"));
+ MapUnmapCallback().OnMap(RegionInfoSpace, RegionInfoSize);
} else {
- // Combined allocator expects that an 2^N allocation is always aligned to
- // 2^N. For this to work, the start of the space needs to be aligned as
- // high as the largest size class (which also needs to be a power of 2).
- NonConstSpaceBeg = address_range.InitAligned(
- TotalSpaceSize, SizeClassMap::kMaxSize, PrimaryAllocatorName);
- CHECK_NE(NonConstSpaceBeg, ~(uptr)0);
+ if (kUsingConstantSpaceBeg) {
+ CHECK(IsAligned(kSpaceBeg, SizeClassMap::kMaxSize));
+ CHECK_EQ(kSpaceBeg,
+ address_range.Init(TotalSpaceSize, PrimaryAllocatorName,
+ kSpaceBeg));
+ } else {
+ // Combined allocator expects that an 2^N allocation is always aligned
+ // to 2^N. For this to work, the start of the space needs to be aligned
+ // as high as the largest size class (which also needs to be a power of
+ // 2).
+ NonConstSpaceBeg = address_range.InitAligned(
+ TotalSpaceSize, SizeClassMap::kMaxSize, PrimaryAllocatorName);
+ CHECK_NE(NonConstSpaceBeg, ~(uptr)0);
+ }
+ RegionInfoSpace = SpaceEnd();
+ MapWithCallbackOrDie(RegionInfoSpace, AdditionalSize(),
+ "SizeClassAllocator: region info");
}
SetReleaseToOSIntervalMs(release_to_os_interval_ms);
- MapWithCallbackOrDie(SpaceEnd(), AdditionalSize(),
- "SizeClassAllocator: region info");
// Check that the RegionInfo array is aligned on the CacheLine size.
- DCHECK_EQ(SpaceEnd() % kCacheLineSize, 0);
+ DCHECK_EQ(RegionInfoSpace % kCacheLineSize, 0);
}
s32 ReleaseToOSIntervalMs() const {
@@ -100,9 +159,10 @@ class SizeClassAllocator64 {
}
void ForceReleaseToOS() {
+ MemoryMapperT memory_mapper(*this);
for (uptr class_id = 1; class_id < kNumClasses; class_id++) {
BlockingMutexLock l(&GetRegionInfo(class_id)->mutex);
- MaybeReleaseToOS(class_id, true /*force*/);
+ MaybeReleaseToOS(&memory_mapper, class_id, true /*force*/);
}
}
@@ -111,7 +171,8 @@ class SizeClassAllocator64 {
alignment <= SizeClassMap::kMaxSize;
}
- NOINLINE void ReturnToAllocator(AllocatorStats *stat, uptr class_id,
+ NOINLINE void ReturnToAllocator(MemoryMapperT *memory_mapper,
+ AllocatorStats *stat, uptr class_id,
const CompactPtrT *chunks, uptr n_chunks) {
RegionInfo *region = GetRegionInfo(class_id);
uptr region_beg = GetRegionBeginBySizeClass(class_id);
@@ -134,7 +195,7 @@ class SizeClassAllocator64 {
region->num_freed_chunks = new_num_freed_chunks;
region->stats.n_freed += n_chunks;
- MaybeReleaseToOS(class_id, false /*force*/);
+ MaybeReleaseToOS(memory_mapper, class_id, false /*force*/);
}
NOINLINE bool GetFromAllocator(AllocatorStats *stat, uptr class_id,
@@ -144,6 +205,17 @@ class SizeClassAllocator64 {
CompactPtrT *free_array = GetFreeArray(region_beg);
BlockingMutexLock l(&region->mutex);
+#if SANITIZER_WINDOWS
+ /* On Windows unmapping of memory during __sanitizer_purge_allocator is
+ explicit and immediate, so unmapped regions must be explicitly mapped back
+ in when they are accessed again. */
+ if (region->rtoi.last_released_bytes > 0) {
+ MmapFixedOrDie(region_beg, region->mapped_user,
+ "SizeClassAllocator: region data");
+ region->rtoi.n_freed_at_last_release = 0;
+ region->rtoi.last_released_bytes = 0;
+ }
+#endif
if (UNLIKELY(region->num_freed_chunks < n_chunks)) {
if (UNLIKELY(!PopulateFreeArray(stat, class_id, region,
n_chunks - region->num_freed_chunks)))
@@ -281,13 +353,13 @@ class SizeClassAllocator64 {
// ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
// introspection API.
- void ForceLock() {
+ void ForceLock() NO_THREAD_SAFETY_ANALYSIS {
for (uptr i = 0; i < kNumClasses; i++) {
GetRegionInfo(i)->mutex.Lock();
}
}
- void ForceUnlock() {
+ void ForceUnlock() NO_THREAD_SAFETY_ANALYSIS {
for (int i = (int)kNumClasses - 1; i >= 0; i--) {
GetRegionInfo(i)->mutex.Unlock();
}
@@ -331,11 +403,11 @@ class SizeClassAllocator64 {
// For the performance sake, none of the accessors check the validity of the
// arguments, it is assumed that index is always in [0, n) range and the value
// is not incremented past max_value.
- template<class MemoryMapperT>
class PackedCounterArray {
public:
- PackedCounterArray(u64 num_counters, u64 max_value, MemoryMapperT *mapper)
- : n(num_counters), memory_mapper(mapper) {
+ template <typename MemoryMapper>
+ PackedCounterArray(u64 num_counters, u64 max_value, MemoryMapper *mapper)
+ : n(num_counters) {
CHECK_GT(num_counters, 0);
CHECK_GT(max_value, 0);
constexpr u64 kMaxCounterBits = sizeof(*buffer) * 8ULL;
@@ -352,17 +424,8 @@ class SizeClassAllocator64 {
packing_ratio_log = Log2(packing_ratio);
bit_offset_mask = packing_ratio - 1;
- buffer_size =
- (RoundUpTo(n, 1ULL << packing_ratio_log) >> packing_ratio_log) *
- sizeof(*buffer);
- buffer = reinterpret_cast<u64*>(
- memory_mapper->MapPackedCounterArrayBuffer(buffer_size));
- }
- ~PackedCounterArray() {
- if (buffer) {
- memory_mapper->UnmapPackedCounterArrayBuffer(
- reinterpret_cast<uptr>(buffer), buffer_size);
- }
+ buffer = mapper->MapPackedCounterArrayBuffer(
+ RoundUpTo(n, 1ULL << packing_ratio_log) >> packing_ratio_log);
}
bool IsAllocated() const {
@@ -399,19 +462,16 @@ class SizeClassAllocator64 {
u64 counter_mask;
u64 packing_ratio_log;
u64 bit_offset_mask;
-
- MemoryMapperT* const memory_mapper;
- u64 buffer_size;
u64* buffer;
};
- template<class MemoryMapperT>
+ template <class MemoryMapperT>
class FreePagesRangeTracker {
public:
- explicit FreePagesRangeTracker(MemoryMapperT* mapper)
+ FreePagesRangeTracker(MemoryMapperT *mapper, uptr class_id)
: memory_mapper(mapper),
- page_size_scaled_log(Log2(GetPageSizeCached() >> kCompactPtrScale)),
- in_the_range(false), current_page(0), current_range_start_page(0) {}
+ class_id(class_id),
+ page_size_scaled_log(Log2(GetPageSizeCached() >> kCompactPtrScale)) {}
void NextPage(bool freed) {
if (freed) {
@@ -433,28 +493,30 @@ class SizeClassAllocator64 {
void CloseOpenedRange() {
if (in_the_range) {
memory_mapper->ReleasePageRangeToOS(
- current_range_start_page << page_size_scaled_log,
+ class_id, current_range_start_page << page_size_scaled_log,
current_page << page_size_scaled_log);
in_the_range = false;
}
}
- MemoryMapperT* const memory_mapper;
- const uptr page_size_scaled_log;
- bool in_the_range;
- uptr current_page;
- uptr current_range_start_page;
+ MemoryMapperT *const memory_mapper = nullptr;
+ const uptr class_id = 0;
+ const uptr page_size_scaled_log = 0;
+ bool in_the_range = false;
+ uptr current_page = 0;
+ uptr current_range_start_page = 0;
};
// Iterates over the free_array to identify memory pages containing freed
// chunks only and returns these pages back to OS.
// allocated_pages_count is the total number of pages allocated for the
// current bucket.
- template<class MemoryMapperT>
+ template <typename MemoryMapper>
static void ReleaseFreeMemoryToOS(CompactPtrT *free_array,
uptr free_array_count, uptr chunk_size,
uptr allocated_pages_count,
- MemoryMapperT *memory_mapper) {
+ MemoryMapper *memory_mapper,
+ uptr class_id) {
const uptr page_size = GetPageSizeCached();
// Figure out the number of chunks per page and whether we can take a fast
@@ -490,9 +552,8 @@ class SizeClassAllocator64 {
UNREACHABLE("All chunk_size/page_size ratios must be handled.");
}
- PackedCounterArray<MemoryMapperT> counters(allocated_pages_count,
- full_pages_chunk_count_max,
- memory_mapper);
+ PackedCounterArray counters(allocated_pages_count,
+ full_pages_chunk_count_max, memory_mapper);
if (!counters.IsAllocated())
return;
@@ -517,7 +578,7 @@ class SizeClassAllocator64 {
// Iterate over pages detecting ranges of pages with chunk counters equal
// to the expected number of chunks for the particular page.
- FreePagesRangeTracker<MemoryMapperT> range_tracker(memory_mapper);
+ FreePagesRangeTracker<MemoryMapper> range_tracker(memory_mapper, class_id);
if (same_chunk_count_per_page) {
// Fast path, every page has the same number of chunks affecting it.
for (uptr i = 0; i < counters.GetCount(); i++)
@@ -556,7 +617,7 @@ class SizeClassAllocator64 {
}
private:
- friend class MemoryMapper;
+ friend class MemoryMapper<ThisT>;
ReservedAddressRange address_range;
@@ -586,6 +647,11 @@ class SizeClassAllocator64 {
atomic_sint32_t release_to_os_interval_ms_;
+ uptr RegionInfoSpace;
+
+ // True if the user has already mapped the entire heap R/W.
+ bool PremappedHeap;
+
struct Stats {
uptr n_allocated;
uptr n_freed;
@@ -615,7 +681,7 @@ class SizeClassAllocator64 {
RegionInfo *GetRegionInfo(uptr class_id) const {
DCHECK_LT(class_id, kNumClasses);
- RegionInfo *regions = reinterpret_cast<RegionInfo *>(SpaceEnd());
+ RegionInfo *regions = reinterpret_cast<RegionInfo *>(RegionInfoSpace);
return &regions[class_id];
}
@@ -640,6 +706,9 @@ class SizeClassAllocator64 {
}
bool MapWithCallback(uptr beg, uptr size, const char *name) {
+ if (PremappedHeap)
+ return beg >= NonConstSpaceBeg &&
+ beg + size <= NonConstSpaceBeg + kSpaceSize;
uptr mapped = address_range.Map(beg, size, name);
if (UNLIKELY(!mapped))
return false;
@@ -649,11 +718,18 @@ class SizeClassAllocator64 {
}
void MapWithCallbackOrDie(uptr beg, uptr size, const char *name) {
+ if (PremappedHeap) {
+ CHECK_GE(beg, NonConstSpaceBeg);
+ CHECK_LE(beg + size, NonConstSpaceBeg + kSpaceSize);
+ return;
+ }
CHECK_EQ(beg, address_range.MapOrDie(beg, size, name));
MapUnmapCallback().OnMap(beg, size);
}
void UnmapWithCallbackOrDie(uptr beg, uptr size) {
+ if (PremappedHeap)
+ return;
MapUnmapCallback().OnUnmap(beg, size);
address_range.Unmap(beg, size);
}
@@ -775,55 +851,13 @@ class SizeClassAllocator64 {
return true;
}
- class MemoryMapper {
- public:
- MemoryMapper(const ThisT& base_allocator, uptr class_id)
- : allocator(base_allocator),
- region_base(base_allocator.GetRegionBeginBySizeClass(class_id)),
- released_ranges_count(0),
- released_bytes(0) {
- }
-
- uptr GetReleasedRangesCount() const {
- return released_ranges_count;
- }
-
- uptr GetReleasedBytes() const {
- return released_bytes;
- }
-
- uptr MapPackedCounterArrayBuffer(uptr buffer_size) {
- // TODO(alekseyshl): The idea to explore is to check if we have enough
- // space between num_freed_chunks*sizeof(CompactPtrT) and
- // mapped_free_array to fit buffer_size bytes and use that space instead
- // of mapping a temporary one.
- return reinterpret_cast<uptr>(
- MmapOrDieOnFatalError(buffer_size, "ReleaseToOSPageCounters"));
- }
-
- void UnmapPackedCounterArrayBuffer(uptr buffer, uptr buffer_size) {
- UnmapOrDie(reinterpret_cast<void *>(buffer), buffer_size);
- }
-
- // Releases [from, to) range of pages back to OS.
- void ReleasePageRangeToOS(CompactPtrT from, CompactPtrT to) {
- const uptr from_page = allocator.CompactPtrToPointer(region_base, from);
- const uptr to_page = allocator.CompactPtrToPointer(region_base, to);
- ReleaseMemoryPagesToOS(from_page, to_page);
- released_ranges_count++;
- released_bytes += to_page - from_page;
- }
-
- private:
- const ThisT& allocator;
- const uptr region_base;
- uptr released_ranges_count;
- uptr released_bytes;
- };
-
// Attempts to release RAM occupied by freed chunks back to OS. The region is
// expected to be locked.
- void MaybeReleaseToOS(uptr class_id, bool force) {
+ //
+ // TODO(morehouse): Support a callback on memory release so HWASan can release
+ // aliases as well.
+ void MaybeReleaseToOS(MemoryMapperT *memory_mapper, uptr class_id,
+ bool force) {
RegionInfo *region = GetRegionInfo(class_id);
const uptr chunk_size = ClassIdToSize(class_id);
const uptr page_size = GetPageSizeCached();
@@ -847,17 +881,16 @@ class SizeClassAllocator64 {
}
}
- MemoryMapper memory_mapper(*this, class_id);
-
- ReleaseFreeMemoryToOS<MemoryMapper>(
+ ReleaseFreeMemoryToOS(
GetFreeArray(GetRegionBeginBySizeClass(class_id)), n, chunk_size,
- RoundUpTo(region->allocated_user, page_size) / page_size,
- &memory_mapper);
+ RoundUpTo(region->allocated_user, page_size) / page_size, memory_mapper,
+ class_id);
- if (memory_mapper.GetReleasedRangesCount() > 0) {
+ uptr ranges, bytes;
+ if (memory_mapper->GetAndResetStats(ranges, bytes)) {
region->rtoi.n_freed_at_last_release = region->stats.n_freed;
- region->rtoi.num_releases += memory_mapper.GetReleasedRangesCount();
- region->rtoi.last_released_bytes = memory_mapper.GetReleasedBytes();
+ region->rtoi.num_releases += ranges;
+ region->rtoi.last_released_bytes = bytes;
}
region->rtoi.last_release_at_ns = MonotonicNanoTime();
}
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_allocator_secondary.h b/compiler-rt/lib/sanitizer_common/sanitizer_allocator_secondary.h
index 61fb98742373..dd34fe85cc3a 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_allocator_secondary.h
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_allocator_secondary.h
@@ -267,13 +267,9 @@ class LargeMmapAllocator {
// ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
// introspection API.
- void ForceLock() {
- mutex_.Lock();
- }
+ void ForceLock() ACQUIRE(mutex_) { mutex_.Lock(); }
- void ForceUnlock() {
- mutex_.Unlock();
- }
+ void ForceUnlock() RELEASE(mutex_) { mutex_.Unlock(); }
// Iterate over all existing chunks.
// The allocator must be locked when calling this function.
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_allocator_size_class_map.h b/compiler-rt/lib/sanitizer_common/sanitizer_allocator_size_class_map.h
index 12d8c8923071..c50d13303ede 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_allocator_size_class_map.h
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_allocator_size_class_map.h
@@ -24,7 +24,7 @@
// E.g. with kNumBits==3 all size classes after 2^kMidSizeLog
// look like 0b1xx0..0, where x is either 0 or 1.
//
-// Example: kNumBits=3, kMidSizeLog=4, kMidSizeLog=8, kMaxSizeLog=17:
+// Example: kNumBits=3, kMinSizeLog=4, kMidSizeLog=8, kMaxSizeLog=17:
//
// Classes 1 - 16 correspond to sizes 16 to 256 (size = class_id * 16).
// Next 4 classes: 256 + i * 64 (i = 1 to 4).
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_atomic_clang_mips.h b/compiler-rt/lib/sanitizer_common/sanitizer_atomic_clang_mips.h
index 59155e9883eb..2b39097112d4 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_atomic_clang_mips.h
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_atomic_clang_mips.h
@@ -41,7 +41,7 @@ inline atomic_uint64_t::Type atomic_fetch_add(volatile atomic_uint64_t *ptr,
atomic_uint64_t::Type val,
memory_order mo) {
DCHECK(mo &
- (memory_order_relaxed | memory_order_releasae | memory_order_seq_cst));
+ (memory_order_relaxed | memory_order_release | memory_order_seq_cst));
DCHECK(!((uptr)ptr % sizeof(*ptr)));
atomic_uint64_t::Type ret;
@@ -67,7 +67,7 @@ inline bool atomic_compare_exchange_strong(volatile atomic_uint64_t *ptr,
atomic_uint64_t::Type xchg,
memory_order mo) {
DCHECK(mo &
- (memory_order_relaxed | memory_order_releasae | memory_order_seq_cst));
+ (memory_order_relaxed | memory_order_release | memory_order_seq_cst));
DCHECK(!((uptr)ptr % sizeof(*ptr)));
typedef atomic_uint64_t::Type Type;
@@ -90,7 +90,7 @@ template <>
inline atomic_uint64_t::Type atomic_load(const volatile atomic_uint64_t *ptr,
memory_order mo) {
DCHECK(mo &
- (memory_order_relaxed | memory_order_releasae | memory_order_seq_cst));
+ (memory_order_relaxed | memory_order_release | memory_order_seq_cst));
DCHECK(!((uptr)ptr % sizeof(*ptr)));
atomic_uint64_t::Type zero = 0;
@@ -103,7 +103,7 @@ template <>
inline void atomic_store(volatile atomic_uint64_t *ptr, atomic_uint64_t::Type v,
memory_order mo) {
DCHECK(mo &
- (memory_order_relaxed | memory_order_releasae | memory_order_seq_cst));
+ (memory_order_relaxed | memory_order_release | memory_order_seq_cst));
DCHECK(!((uptr)ptr % sizeof(*ptr)));
__spin_lock(&lock.lock);
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_chained_origin_depot.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_chained_origin_depot.cpp
new file mode 100644
index 000000000000..250ac39e1301
--- /dev/null
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_chained_origin_depot.cpp
@@ -0,0 +1,108 @@
+//===-- sanitizer_chained_origin_depot.cpp --------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// A storage for chained origins.
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_chained_origin_depot.h"
+
+namespace __sanitizer {
+
+bool ChainedOriginDepot::ChainedOriginDepotNode::eq(
+ u32 hash, const args_type &args) const {
+ return here_id == args.here_id && prev_id == args.prev_id;
+}
+
+uptr ChainedOriginDepot::ChainedOriginDepotNode::storage_size(
+ const args_type &args) {
+ return sizeof(ChainedOriginDepotNode);
+}
+
+/* This is murmur2 hash for the 64->32 bit case.
+ It does not behave all that well because the keys have a very biased
+ distribution (I've seen 7-element buckets with the table only 14% full).
+
+ here_id is built of
+ * (1 bits) Reserved, zero.
+ * (8 bits) Part id = bits 13..20 of the hash value of here_id's key.
+ * (23 bits) Sequential number (each part has each own sequence).
+
+ prev_id has either the same distribution as here_id (but with 3:8:21)
+ split, or one of two reserved values (-1) or (-2). Either case can
+ dominate depending on the workload.
+*/
+u32 ChainedOriginDepot::ChainedOriginDepotNode::hash(const args_type &args) {
+ const u32 m = 0x5bd1e995;
+ const u32 seed = 0x9747b28c;
+ const u32 r = 24;
+ u32 h = seed;
+ u32 k = args.here_id;
+ k *= m;
+ k ^= k >> r;
+ k *= m;
+ h *= m;
+ h ^= k;
+
+ k = args.prev_id;
+ k *= m;
+ k ^= k >> r;
+ k *= m;
+ h *= m;
+ h ^= k;
+
+ h ^= h >> 13;
+ h *= m;
+ h ^= h >> 15;
+ return h;
+}
+
+bool ChainedOriginDepot::ChainedOriginDepotNode::is_valid(
+ const args_type &args) {
+ return true;
+}
+
+void ChainedOriginDepot::ChainedOriginDepotNode::store(const args_type &args,
+ u32 other_hash) {
+ here_id = args.here_id;
+ prev_id = args.prev_id;
+}
+
+ChainedOriginDepot::ChainedOriginDepotNode::args_type
+ChainedOriginDepot::ChainedOriginDepotNode::load() const {
+ args_type ret = {here_id, prev_id};
+ return ret;
+}
+
+ChainedOriginDepot::ChainedOriginDepotNode::Handle
+ChainedOriginDepot::ChainedOriginDepotNode::get_handle() {
+ return Handle(this);
+}
+
+ChainedOriginDepot::ChainedOriginDepot() {}
+
+StackDepotStats *ChainedOriginDepot::GetStats() { return depot.GetStats(); }
+
+bool ChainedOriginDepot::Put(u32 here_id, u32 prev_id, u32 *new_id) {
+ ChainedOriginDepotDesc desc = {here_id, prev_id};
+ bool inserted;
+ ChainedOriginDepotNode::Handle h = depot.Put(desc, &inserted);
+ *new_id = h.valid() ? h.id() : 0;
+ return inserted;
+}
+
+u32 ChainedOriginDepot::Get(u32 id, u32 *other) {
+ ChainedOriginDepotDesc desc = depot.Get(id);
+ *other = desc.prev_id;
+ return desc.here_id;
+}
+
+void ChainedOriginDepot::LockAll() { depot.LockAll(); }
+
+void ChainedOriginDepot::UnlockAll() { depot.UnlockAll(); }
+
+} // namespace __sanitizer
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_chained_origin_depot.h b/compiler-rt/lib/sanitizer_common/sanitizer_chained_origin_depot.h
new file mode 100644
index 000000000000..453cdf6b5449
--- /dev/null
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_chained_origin_depot.h
@@ -0,0 +1,88 @@
+//===-- sanitizer_chained_origin_depot.h ------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// A storage for chained origins.
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_CHAINED_ORIGIN_DEPOT_H
+#define SANITIZER_CHAINED_ORIGIN_DEPOT_H
+
+#include "sanitizer_common.h"
+#include "sanitizer_stackdepotbase.h"
+
+namespace __sanitizer {
+
+class ChainedOriginDepot {
+ public:
+ ChainedOriginDepot();
+
+ // Gets the statistic of the origin chain storage.
+ StackDepotStats *GetStats();
+
+ // Stores a chain with StackDepot ID here_id and previous chain ID prev_id.
+ // If successful, returns true and the new chain id new_id.
+ // If the same element already exists, returns false and sets new_id to the
+ // existing ID.
+ bool Put(u32 here_id, u32 prev_id, u32 *new_id);
+
+ // Retrieves the stored StackDepot ID for the given origin ID.
+ u32 Get(u32 id, u32 *other);
+
+ void LockAll();
+ void UnlockAll();
+
+ private:
+ struct ChainedOriginDepotDesc {
+ u32 here_id;
+ u32 prev_id;
+ };
+
+ struct ChainedOriginDepotNode {
+ ChainedOriginDepotNode *link;
+ u32 id;
+ u32 here_id;
+ u32 prev_id;
+
+ typedef ChainedOriginDepotDesc args_type;
+
+ bool eq(u32 hash, const args_type &args) const;
+
+ static uptr storage_size(const args_type &args);
+
+ static u32 hash(const args_type &args);
+
+ static bool is_valid(const args_type &args);
+
+ void store(const args_type &args, u32 other_hash);
+
+ args_type load() const;
+
+ struct Handle {
+ ChainedOriginDepotNode *node_;
+ Handle() : node_(nullptr) {}
+ explicit Handle(ChainedOriginDepotNode *node) : node_(node) {}
+ bool valid() { return node_; }
+ u32 id() { return node_->id; }
+ int here_id() { return node_->here_id; }
+ int prev_id() { return node_->prev_id; }
+ };
+
+ Handle get_handle();
+
+ typedef Handle handle_type;
+ };
+
+ StackDepotBase<ChainedOriginDepotNode, 4, 20> depot;
+
+ ChainedOriginDepot(const ChainedOriginDepot &) = delete;
+ void operator=(const ChainedOriginDepot &) = delete;
+};
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_CHAINED_ORIGIN_DEPOT_H
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_common.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_common.cpp
index 87efda5bd372..5fae8e33b905 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_common.cpp
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_common.cpp
@@ -37,10 +37,9 @@ void NORETURN ReportMmapFailureAndDie(uptr size, const char *mem_type,
const char *mmap_type, error_t err,
bool raw_report) {
static int recursion_count;
- if (SANITIZER_RTEMS || raw_report || recursion_count) {
- // If we are on RTEMS or raw report is requested or we went into recursion,
- // just die. The Report() and CHECK calls below may call mmap recursively
- // and fail.
+ if (raw_report || recursion_count) {
+ // If raw report is requested or we went into recursion just die. The
+ // Report() and CHECK calls below may call mmap recursively and fail.
RawWrite("ERROR: Failed to mmap\n");
Die();
}
@@ -87,7 +86,7 @@ const char *StripModuleName(const char *module) {
void ReportErrorSummary(const char *error_message, const char *alt_tool_name) {
if (!common_flags()->print_summary)
return;
- InternalScopedString buff(kMaxSummaryLength);
+ InternalScopedString buff;
buff.append("SUMMARY: %s: %s",
alt_tool_name ? alt_tool_name : SanitizerToolName, error_message);
__sanitizer_report_error_summary(buff.data());
@@ -274,6 +273,14 @@ uptr ReadBinaryNameCached(/*out*/char *buf, uptr buf_len) {
return name_len;
}
+uptr ReadBinaryDir(/*out*/ char *buf, uptr buf_len) {
+ ReadBinaryNameCached(buf, buf_len);
+ const char *exec_name_pos = StripModuleName(buf);
+ uptr name_len = exec_name_pos - buf;
+ buf[name_len] = '\0';
+ return name_len;
+}
+
#if !SANITIZER_GO
void PrintCmdline() {
char **argv = GetArgv();
@@ -323,6 +330,14 @@ static int InstallMallocFreeHooks(void (*malloc_hook)(const void *, uptr),
return 0;
}
+void internal_sleep(unsigned seconds) {
+ internal_usleep((u64)seconds * 1000 * 1000);
+}
+void SleepForSeconds(unsigned seconds) {
+ internal_usleep((u64)seconds * 1000 * 1000);
+}
+void SleepForMillis(unsigned millis) { internal_usleep((u64)millis * 1000); }
+
} // namespace __sanitizer
using namespace __sanitizer;
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_common.h b/compiler-rt/lib/sanitizer_common/sanitizer_common.h
index a6532eee164d..cbdbb0c4c4bd 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_common.h
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_common.h
@@ -44,7 +44,7 @@ const uptr kMaxPathLength = 4096;
const uptr kMaxThreadStackSize = 1 << 30; // 1Gb
-static const uptr kErrorMessageBufferSize = 1 << 16;
+const uptr kErrorMessageBufferSize = 1 << 16;
// Denotes fake PC values that come from JIT/JAVA/etc.
// For such PC values __tsan_symbolize_external_ex() will be called.
@@ -135,6 +135,15 @@ void UnmapFromTo(uptr from, uptr to);
uptr MapDynamicShadow(uptr shadow_size_bytes, uptr shadow_scale,
uptr min_shadow_base_alignment, uptr &high_mem_end);
+// Let S = max(shadow_size, num_aliases * alias_size, ring_buffer_size).
+// Reserves 2*S bytes of address space to the right of the returned address and
+// ring_buffer_size bytes to the left. The returned address is aligned to 2*S.
+// Also creates num_aliases regions of accessible memory starting at offset S
+// from the returned address. Each region has size alias_size and is backed by
+// the same physical memory.
+uptr MapDynamicShadowAndAliases(uptr shadow_size, uptr alias_size,
+ uptr num_aliases, uptr ring_buffer_size);
+
// Reserve memory range [beg, end]. If madvise_shadow is true then apply
// madvise (e.g. hugepages, core dumping) requested by options.
void ReserveShadowMemoryRange(uptr beg, uptr end, const char *name,
@@ -228,10 +237,16 @@ void SetPrintfAndReportCallback(void (*callback)(const char *));
// Lock sanitizer error reporting and protects against nested errors.
class ScopedErrorReportLock {
public:
- ScopedErrorReportLock();
- ~ScopedErrorReportLock();
+ ScopedErrorReportLock() ACQUIRE(mutex_) { Lock(); }
+ ~ScopedErrorReportLock() RELEASE(mutex_) { Unlock(); }
+
+ static void Lock() ACQUIRE(mutex_);
+ static void Unlock() RELEASE(mutex_);
+ static void CheckLocked() CHECK_LOCKED(mutex_);
- static void CheckLocked();
+ private:
+ static atomic_uintptr_t reporting_thread_;
+ static StaticSpinMutex mutex_;
};
extern uptr stoptheworld_tracer_pid;
@@ -248,6 +263,7 @@ const char *StripModuleName(const char *module);
// OS
uptr ReadBinaryName(/*out*/char *buf, uptr buf_len);
uptr ReadBinaryNameCached(/*out*/char *buf, uptr buf_len);
+uptr ReadBinaryDir(/*out*/ char *buf, uptr buf_len);
uptr ReadLongProcessName(/*out*/ char *buf, uptr buf_len);
const char *GetProcessName();
void UpdateProcessName();
@@ -278,8 +294,8 @@ void InitTlsSize();
uptr GetTlsSize();
// Other
-void SleepForSeconds(int seconds);
-void SleepForMillis(int millis);
+void SleepForSeconds(unsigned seconds);
+void SleepForMillis(unsigned millis);
u64 NanoTime();
u64 MonotonicNanoTime();
int Atexit(void (*function)(void));
@@ -294,8 +310,8 @@ void NORETURN ReportMmapFailureAndDie(uptr size, const char *mem_type,
const char *mmap_type, error_t err,
bool raw_report = false);
-// Specific tools may override behavior of "Die" and "CheckFailed" functions
-// to do tool-specific job.
+// Specific tools may override behavior of "Die" function to do tool-specific
+// job.
typedef void (*DieCallbackType)(void);
// It's possible to add several callbacks that would be run when "Die" is
@@ -307,9 +323,7 @@ bool RemoveDieCallback(DieCallbackType callback);
void SetUserDieCallback(DieCallbackType callback);
-typedef void (*CheckFailedCallbackType)(const char *, int, const char *,
- u64, u64);
-void SetCheckFailedCallback(CheckFailedCallbackType callback);
+void SetCheckUnwindCallback(void (*callback)());
// Callback will be called if soft_rss_limit_mb is given and the limit is
// exceeded (exceeded==true) or if rss went down below the limit
@@ -343,8 +357,6 @@ void ReportDeadlySignal(const SignalContext &sig, u32 tid,
void SetAlternateSignalStack();
void UnsetAlternateSignalStack();
-// We don't want a summary too long.
-const int kMaxSummaryLength = 1024;
// Construct a one-line string:
// SUMMARY: SanitizerToolName: error_message
// and pass it to __sanitizer_report_error_summary.
@@ -441,8 +453,14 @@ inline uptr Log2(uptr x) {
// Don't use std::min, std::max or std::swap, to minimize dependency
// on libstdc++.
-template<class T> T Min(T a, T b) { return a < b ? a : b; }
-template<class T> T Max(T a, T b) { return a > b ? a : b; }
+template <class T>
+constexpr T Min(T a, T b) {
+ return a < b ? a : b;
+}
+template <class T>
+constexpr T Max(T a, T b) {
+ return a > b ? a : b;
+}
template<class T> void Swap(T& a, T& b) {
T tmp = a;
a = b;
@@ -591,21 +609,21 @@ class InternalMmapVector : public InternalMmapVectorNoCtor<T> {
InternalMmapVector &operator=(InternalMmapVector &&) = delete;
};
-class InternalScopedString : public InternalMmapVector<char> {
+class InternalScopedString {
public:
- explicit InternalScopedString(uptr max_length)
- : InternalMmapVector<char>(max_length), length_(0) {
- (*this)[0] = '\0';
- }
- uptr length() { return length_; }
+ InternalScopedString() : buffer_(1) { buffer_[0] = '\0'; }
+
+ uptr length() const { return buffer_.size() - 1; }
void clear() {
- (*this)[0] = '\0';
- length_ = 0;
+ buffer_.resize(1);
+ buffer_[0] = '\0';
}
void append(const char *format, ...);
+ const char *data() const { return buffer_.data(); }
+ char *data() { return buffer_.data(); }
private:
- uptr length_;
+ InternalMmapVector<char> buffer_;
};
template <class T>
@@ -1045,6 +1063,13 @@ class ArrayRef {
T *end_ = nullptr;
};
+#define PRINTF_128(v) \
+ (*((u8 *)&v + 0)), (*((u8 *)&v + 1)), (*((u8 *)&v + 2)), (*((u8 *)&v + 3)), \
+ (*((u8 *)&v + 4)), (*((u8 *)&v + 5)), (*((u8 *)&v + 6)), \
+ (*((u8 *)&v + 7)), (*((u8 *)&v + 8)), (*((u8 *)&v + 9)), \
+ (*((u8 *)&v + 10)), (*((u8 *)&v + 11)), (*((u8 *)&v + 12)), \
+ (*((u8 *)&v + 13)), (*((u8 *)&v + 14)), (*((u8 *)&v + 15))
+
} // namespace __sanitizer
inline void *operator new(__sanitizer::operator_new_size_type size,
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors.inc b/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors.inc
index d4b9ea5f7f06..6205d853a4c9 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors.inc
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors.inc
@@ -134,11 +134,11 @@ extern const short *_tolower_tab_;
// Platform-specific options.
#if SANITIZER_MAC
-#define PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE false
+#define PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE 0
#elif SANITIZER_WINDOWS64
-#define PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE false
+#define PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE 0
#else
-#define PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE true
+#define PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE 1
#endif // SANITIZER_MAC
#ifndef COMMON_INTERCEPTOR_INITIALIZE_RANGE
@@ -239,6 +239,23 @@ extern const short *_tolower_tab_;
COMMON_INTERCEPT_FUNCTION(fn)
#endif
+#if SANITIZER_GLIBC
+// If we could not find the versioned symbol, fall back to an unversioned
+// lookup. This is needed to work around a GLibc bug that causes dlsym
+// with RTLD_NEXT to return the oldest versioned symbol.
+// See https://sourceware.org/bugzilla/show_bug.cgi?id=14932.
+// For certain symbols (e.g. regexec) we have to perform a versioned lookup,
+// but that versioned symbol will only exist for architectures where the
+// oldest Glibc version pre-dates support for that architecture.
+// For example, regexec@GLIBC_2.3.4 exists on x86_64, but not RISC-V.
+// See also https://gcc.gnu.org/bugzilla/show_bug.cgi?id=98920.
+#define COMMON_INTERCEPT_FUNCTION_GLIBC_VER_MIN(fn, ver) \
+ COMMON_INTERCEPT_FUNCTION_VER_UNVERSIONED_FALLBACK(fn, ver)
+#else
+#define COMMON_INTERCEPT_FUNCTION_GLIBC_VER_MIN(fn, ver) \
+ COMMON_INTERCEPT_FUNCTION(fn)
+#endif
+
#ifndef COMMON_INTERCEPTOR_MEMSET_IMPL
#define COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, dst, v, size) \
{ \
@@ -806,11 +823,11 @@ INTERCEPTOR(void *, memcpy, void *dst, const void *src, uptr size) {
// N.B.: If we switch this to internal_ we'll have to use internal_memmove
// due to memcpy being an alias of memmove on OS X.
void *ctx;
- if (PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE) {
+#if PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE
COMMON_INTERCEPTOR_MEMCPY_IMPL(ctx, dst, src, size);
- } else {
+#else
COMMON_INTERCEPTOR_MEMMOVE_IMPL(ctx, dst, src, size);
- }
+#endif
}
#define INIT_MEMCPY \
@@ -940,6 +957,7 @@ INTERCEPTOR(double, frexp, double x, int *exp) {
// Assuming frexp() always writes to |exp|.
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, exp, sizeof(*exp));
double res = REAL(frexp)(x, exp);
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(exp, sizeof(*exp));
return res;
}
@@ -952,22 +970,18 @@ INTERCEPTOR(double, frexp, double x, int *exp) {
INTERCEPTOR(float, frexpf, float x, int *exp) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, frexpf, x, exp);
- // FIXME: under ASan the call below may write to freed memory and corrupt
- // its metadata. See
- // https://github.com/google/sanitizers/issues/321.
- float res = REAL(frexpf)(x, exp);
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, exp, sizeof(*exp));
+ float res = REAL(frexpf)(x, exp);
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(exp, sizeof(*exp));
return res;
}
INTERCEPTOR(long double, frexpl, long double x, int *exp) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, frexpl, x, exp);
- // FIXME: under ASan the call below may write to freed memory and corrupt
- // its metadata. See
- // https://github.com/google/sanitizers/issues/321.
- long double res = REAL(frexpl)(x, exp);
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, exp, sizeof(*exp));
+ long double res = REAL(frexpl)(x, exp);
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(exp, sizeof(*exp));
return res;
}
@@ -2178,6 +2192,7 @@ INTERCEPTOR(int, clock_gettime, u32 clk_id, void *tp) {
}
return res;
}
+#if SANITIZER_GLIBC
namespace __sanitizer {
extern "C" {
int real_clock_gettime(u32 clk_id, void *tp) {
@@ -2187,6 +2202,7 @@ int real_clock_gettime(u32 clk_id, void *tp) {
}
} // extern "C"
} // namespace __sanitizer
+#endif
INTERCEPTOR(int, clock_settime, u32 clk_id, const void *tp) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, clock_settime, clk_id, tp);
@@ -3338,7 +3354,7 @@ INTERCEPTOR(char *, setlocale, int category, char *locale) {
COMMON_INTERCEPTOR_READ_RANGE(ctx, locale, REAL(strlen)(locale) + 1);
char *res = REAL(setlocale)(category, locale);
if (res) {
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, REAL(strlen)(res) + 1);
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, REAL(strlen)(res) + 1);
unpoison_ctype_arrays(ctx);
}
return res;
@@ -4013,7 +4029,7 @@ INTERCEPTOR(int, sigwait, __sanitizer_sigset_t *set, int *sig) {
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
// https://github.com/google/sanitizers/issues/321.
- int res = REAL(sigwait)(set, sig);
+ int res = COMMON_INTERCEPTOR_BLOCK_REAL(sigwait)(set, sig);
if (!res && sig) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, sig, sizeof(*sig));
return res;
}
@@ -4030,7 +4046,7 @@ INTERCEPTOR(int, sigwaitinfo, __sanitizer_sigset_t *set, void *info) {
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
// https://github.com/google/sanitizers/issues/321.
- int res = REAL(sigwaitinfo)(set, info);
+ int res = COMMON_INTERCEPTOR_BLOCK_REAL(sigwaitinfo)(set, info);
if (res > 0 && info) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, info, siginfo_t_sz);
return res;
}
@@ -4049,7 +4065,7 @@ INTERCEPTOR(int, sigtimedwait, __sanitizer_sigset_t *set, void *info,
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
// https://github.com/google/sanitizers/issues/321.
- int res = REAL(sigtimedwait)(set, info, timeout);
+ int res = COMMON_INTERCEPTOR_BLOCK_REAL(sigtimedwait)(set, info, timeout);
if (res > 0 && info) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, info, siginfo_t_sz);
return res;
}
@@ -5284,6 +5300,12 @@ INTERCEPTOR(__sanitizer_clock_t, times, void *tms) {
#define INIT_TIMES
#endif
+#if SANITIZER_S390 && \
+ (SANITIZER_INTERCEPT_TLS_GET_ADDR || SANITIZER_INTERCEPT_TLS_GET_OFFSET)
+extern "C" uptr __tls_get_offset_wrapper(void *arg, uptr (*fn)(void *arg));
+DEFINE_REAL(uptr, __tls_get_offset, void *arg)
+#endif
+
#if SANITIZER_INTERCEPT_TLS_GET_ADDR
#if !SANITIZER_S390
#define INIT_TLS_GET_ADDR COMMON_INTERCEPT_FUNCTION(__tls_get_addr)
@@ -5323,11 +5345,7 @@ void *__tls_get_addr_opt(void *arg);
// descriptor offset as an argument instead of a pointer. GOT address
// is passed in r12, so it's necessary to write it in assembly. This is
// the function used by the compiler.
-extern "C" uptr __tls_get_offset_wrapper(void *arg, uptr (*fn)(void *arg));
#define INIT_TLS_GET_ADDR COMMON_INTERCEPT_FUNCTION(__tls_get_offset)
-DEFINE_REAL(uptr, __tls_get_offset, void *arg)
-extern "C" uptr __tls_get_offset(void *arg);
-extern "C" uptr __interceptor___tls_get_offset(void *arg);
INTERCEPTOR(uptr, __tls_get_addr_internal, void *arg) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, __tls_get_addr_internal, arg);
@@ -5343,6 +5361,15 @@ INTERCEPTOR(uptr, __tls_get_addr_internal, void *arg) {
}
return res;
}
+#endif // SANITIZER_S390
+#else
+#define INIT_TLS_GET_ADDR
+#endif
+
+#if SANITIZER_S390 && \
+ (SANITIZER_INTERCEPT_TLS_GET_ADDR || SANITIZER_INTERCEPT_TLS_GET_OFFSET)
+extern "C" uptr __tls_get_offset(void *arg);
+extern "C" uptr __interceptor___tls_get_offset(void *arg);
// We need a hidden symbol aliasing the above, so that we can jump
// directly to it from the assembly below.
extern "C" __attribute__((alias("__interceptor___tls_get_addr_internal"),
@@ -5381,9 +5408,6 @@ asm(
"br %r3\n"
".size __tls_get_offset_wrapper, .-__tls_get_offset_wrapper\n"
);
-#endif // SANITIZER_S390
-#else
-#define INIT_TLS_GET_ADDR
#endif
#if SANITIZER_INTERCEPT_LISTXATTR
@@ -6080,6 +6104,40 @@ INTERCEPTOR(__sanitizer_FILE *, freopen, const char *path, const char *mode,
#define INIT_FOPEN
#endif
+#if SANITIZER_INTERCEPT_FLOPEN
+INTERCEPTOR(int, flopen, const char *path, int flags, ...) {
+ void *ctx;
+ va_list ap;
+ va_start(ap, flags);
+ u16 mode = static_cast<u16>(va_arg(ap, u32));
+ va_end(ap);
+ COMMON_INTERCEPTOR_ENTER(ctx, flopen, path, flags, mode);
+ if (path) {
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
+ }
+ return REAL(flopen)(path, flags, mode);
+}
+
+INTERCEPTOR(int, flopenat, int dirfd, const char *path, int flags, ...) {
+ void *ctx;
+ va_list ap;
+ va_start(ap, flags);
+ u16 mode = static_cast<u16>(va_arg(ap, u32));
+ va_end(ap);
+ COMMON_INTERCEPTOR_ENTER(ctx, flopen, path, flags, mode);
+ if (path) {
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
+ }
+ return REAL(flopenat)(dirfd, path, flags, mode);
+}
+
+#define INIT_FLOPEN \
+ COMMON_INTERCEPT_FUNCTION(flopen); \
+ COMMON_INTERCEPT_FUNCTION(flopenat);
+#else
+#define INIT_FLOPEN
+#endif
+
#if SANITIZER_INTERCEPT_FOPEN64
INTERCEPTOR(__sanitizer_FILE *, fopen64, const char *path, const char *mode) {
void *ctx;
@@ -6444,7 +6502,7 @@ INTERCEPTOR(int, sem_wait, __sanitizer_sem_t *s) {
INTERCEPTOR(int, sem_trywait, __sanitizer_sem_t *s) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, sem_trywait, s);
- int res = COMMON_INTERCEPTOR_BLOCK_REAL(sem_trywait)(s);
+ int res = REAL(sem_trywait)(s);
if (res == 0) {
COMMON_INTERCEPTOR_ACQUIRE(ctx, (uptr)s);
}
@@ -7779,7 +7837,7 @@ INTERCEPTOR(void, regfree, const void *preg) {
}
#define INIT_REGEX \
COMMON_INTERCEPT_FUNCTION(regcomp); \
- COMMON_INTERCEPT_FUNCTION(regexec); \
+ COMMON_INTERCEPT_FUNCTION_GLIBC_VER_MIN(regexec, "GLIBC_2.3.4"); \
COMMON_INTERCEPT_FUNCTION(regerror); \
COMMON_INTERCEPT_FUNCTION(regfree);
#else
@@ -10245,6 +10303,7 @@ static void InitializeCommonInterceptors() {
INIT_LIBIO_INTERNALS;
INIT_FOPEN;
INIT_FOPEN64;
+ INIT_FLOPEN;
INIT_OPEN_MEMSTREAM;
INIT_OBSTACK;
INIT_FFLUSH;
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors_ioctl.inc b/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors_ioctl.inc
index 7f181258eab5..b7da65987557 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors_ioctl.inc
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors_ioctl.inc
@@ -370,15 +370,6 @@ static void ioctl_table_fill() {
#if SANITIZER_GLIBC
// _(SIOCDEVPLIP, WRITE, struct_ifreq_sz); // the same as EQL_ENSLAVE
- _(CYGETDEFTHRESH, WRITE, sizeof(int));
- _(CYGETDEFTIMEOUT, WRITE, sizeof(int));
- _(CYGETMON, WRITE, struct_cyclades_monitor_sz);
- _(CYGETTHRESH, WRITE, sizeof(int));
- _(CYGETTIMEOUT, WRITE, sizeof(int));
- _(CYSETDEFTHRESH, NONE, 0);
- _(CYSETDEFTIMEOUT, NONE, 0);
- _(CYSETTHRESH, NONE, 0);
- _(CYSETTIMEOUT, NONE, 0);
_(EQL_EMANCIPATE, WRITE, struct_ifreq_sz);
_(EQL_ENSLAVE, WRITE, struct_ifreq_sz);
_(EQL_GETMASTRCFG, WRITE, struct_ifreq_sz);
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors_vfork_aarch64.inc.S b/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors_vfork_aarch64.inc.S
index 20f42f1ea94e..72e482754b62 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors_vfork_aarch64.inc.S
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors_vfork_aarch64.inc.S
@@ -1,6 +1,7 @@
#if defined(__aarch64__) && defined(__linux__)
#include "sanitizer_common/sanitizer_asm.h"
+#include "builtins/assembly.h"
ASM_HIDDEN(COMMON_INTERCEPTOR_SPILL_AREA)
@@ -9,6 +10,7 @@ ASM_HIDDEN(COMMON_INTERCEPTOR_SPILL_AREA)
ASM_TYPE_FUNCTION(ASM_WRAPPER_NAME(vfork))
ASM_WRAPPER_NAME(vfork):
// Save x30 in the off-stack spill area.
+ hint #25 // paciasp
stp xzr, x30, [sp, #-16]!
bl COMMON_INTERCEPTOR_SPILL_AREA
ldp xzr, x30, [sp], 16
@@ -33,6 +35,7 @@ ASM_WRAPPER_NAME(vfork):
bl COMMON_INTERCEPTOR_SPILL_AREA
ldr x30, [x0]
ldp x0, xzr, [sp], 16
+ hint #29 // autiasp
ret
ASM_SIZE(vfork)
@@ -40,4 +43,6 @@ ASM_SIZE(vfork)
.weak vfork
.set vfork, ASM_WRAPPER_NAME(vfork)
+GNU_PROPERTY_BTI_PAC
+
#endif
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_common_libcdep.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_common_libcdep.cpp
index 047c5a17ea6e..01ccacc6f320 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_common_libcdep.cpp
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_common_libcdep.cpp
@@ -92,14 +92,13 @@ void *BackgroundThread(void *arg) {
#endif
void WriteToSyslog(const char *msg) {
- InternalScopedString msg_copy(kErrorMessageBufferSize);
+ InternalScopedString msg_copy;
msg_copy.append("%s", msg);
- char *p = msg_copy.data();
- char *q;
+ const char *p = msg_copy.data();
// Print one line at a time.
// syslog, at least on Android, has an implicit message length limit.
- while ((q = internal_strchr(p, '\n'))) {
+ while (char* q = internal_strchr(p, '\n')) {
*q = '\0';
WriteOneLineToSyslog(p);
p = q + 1;
@@ -139,7 +138,7 @@ uptr ReservedAddressRange::InitAligned(uptr size, uptr align,
return start;
}
-#if !SANITIZER_FUCHSIA && !SANITIZER_RTEMS
+#if !SANITIZER_FUCHSIA
// Reserve memory range [beg, end].
// We need to use inclusive range because end+1 may not be representable.
@@ -190,7 +189,7 @@ void ProtectGap(uptr addr, uptr size, uptr zero_base_shadow_start,
Die();
}
-#endif // !SANITIZER_FUCHSIA && !SANITIZER_RTEMS
+#endif // !SANITIZER_FUCHSIA
} // namespace __sanitizer
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_common_nolibc.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_common_nolibc.cpp
index 487a634a1652..9a4e5388f24d 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_common_nolibc.cpp
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_common_nolibc.cpp
@@ -25,7 +25,6 @@ void LogMessageOnPrintf(const char *str) {}
#endif
void WriteToSyslog(const char *buffer) {}
void Abort() { internal__exit(1); }
-void SleepForSeconds(int seconds) { internal_sleep(seconds); }
#endif // !SANITIZER_WINDOWS
#if !SANITIZER_WINDOWS && !SANITIZER_MAC
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_deadlock_detector1.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_deadlock_detector1.cpp
index 2c924f5d3963..ccb7065b07ae 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_deadlock_detector1.cpp
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_deadlock_detector1.cpp
@@ -136,7 +136,7 @@ void DD::ReportDeadlock(DDCallback *cb, DDMutex *m) {
DDMutex *m0 = (DDMutex*)dd.getData(from);
DDMutex *m1 = (DDMutex*)dd.getData(to);
- u32 stk_from = -1U, stk_to = -1U;
+ u32 stk_from = 0, stk_to = 0;
int unique_tid = 0;
dd.findEdge(from, to, &stk_from, &stk_to, &unique_tid);
// Printf("Edge: %zd=>%zd: %u/%u T%d\n", from, to, stk_from, stk_to,
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_deadlock_detector2.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_deadlock_detector2.cpp
index e3f8e1b12762..1fbbbcccfa99 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_deadlock_detector2.cpp
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_deadlock_detector2.cpp
@@ -73,7 +73,7 @@ struct DDLogicalThread {
int nlocked;
};
-struct Mutex {
+struct MutexState {
StaticSpinMutex mtx;
u32 seq;
int nlink;
@@ -101,12 +101,12 @@ struct DD final : public DDetector {
void CycleCheck(DDPhysicalThread *pt, DDLogicalThread *lt, DDMutex *mtx);
void Report(DDPhysicalThread *pt, DDLogicalThread *lt, int npath);
u32 allocateId(DDCallback *cb);
- Mutex *getMutex(u32 id);
- u32 getMutexId(Mutex *m);
+ MutexState *getMutex(u32 id);
+ u32 getMutexId(MutexState *m);
DDFlags flags;
- Mutex* mutex[kL1Size];
+ MutexState *mutex[kL1Size];
SpinMutex mtx;
InternalMmapVector<u32> free_id;
@@ -152,13 +152,11 @@ void DD::MutexInit(DDCallback *cb, DDMutex *m) {
atomic_store(&m->owner, 0, memory_order_relaxed);
}
-Mutex *DD::getMutex(u32 id) {
- return &mutex[id / kL2Size][id % kL2Size];
-}
+MutexState *DD::getMutex(u32 id) { return &mutex[id / kL2Size][id % kL2Size]; }
-u32 DD::getMutexId(Mutex *m) {
+u32 DD::getMutexId(MutexState *m) {
for (int i = 0; i < kL1Size; i++) {
- Mutex *tab = mutex[i];
+ MutexState *tab = mutex[i];
if (tab == 0)
break;
if (m >= tab && m < tab + kL2Size)
@@ -176,8 +174,8 @@ u32 DD::allocateId(DDCallback *cb) {
} else {
CHECK_LT(id_gen, kMaxMutex);
if ((id_gen % kL2Size) == 0) {
- mutex[id_gen / kL2Size] = (Mutex*)MmapOrDie(kL2Size * sizeof(Mutex),
- "deadlock detector (mutex table)");
+ mutex[id_gen / kL2Size] = (MutexState *)MmapOrDie(
+ kL2Size * sizeof(MutexState), "deadlock detector (mutex table)");
}
id = id_gen++;
}
@@ -216,11 +214,11 @@ void DD::MutexBeforeLock(DDCallback *cb, DDMutex *m, bool wlock) {
}
bool added = false;
- Mutex *mtx = getMutex(m->id);
+ MutexState *mtx = getMutex(m->id);
for (int i = 0; i < lt->nlocked - 1; i++) {
u32 id1 = lt->locked[i].id;
u32 stk1 = lt->locked[i].stk;
- Mutex *mtx1 = getMutex(id1);
+ MutexState *mtx1 = getMutex(id1);
SpinMutexLock l(&mtx1->mtx);
if (mtx1->nlink == kMaxLink) {
// FIXME(dvyukov): check stale links
@@ -342,7 +340,7 @@ void DD::MutexDestroy(DDCallback *cb, DDMutex *m) {
// Clear and invalidate the mutex descriptor.
{
- Mutex *mtx = getMutex(m->id);
+ MutexState *mtx = getMutex(m->id);
SpinMutexLock l(&mtx->mtx);
mtx->seq++;
mtx->nlink = 0;
@@ -361,7 +359,7 @@ void DD::CycleCheck(DDPhysicalThread *pt, DDLogicalThread *lt,
int npath = 0;
int npending = 0;
{
- Mutex *mtx = getMutex(m->id);
+ MutexState *mtx = getMutex(m->id);
SpinMutexLock l(&mtx->mtx);
for (int li = 0; li < mtx->nlink; li++)
pt->pending[npending++] = mtx->link[li];
@@ -374,7 +372,7 @@ void DD::CycleCheck(DDPhysicalThread *pt, DDLogicalThread *lt,
}
if (pt->visited[link.id])
continue;
- Mutex *mtx1 = getMutex(link.id);
+ MutexState *mtx1 = getMutex(link.id);
SpinMutexLock l(&mtx1->mtx);
if (mtx1->seq != link.seq)
continue;
@@ -387,7 +385,7 @@ void DD::CycleCheck(DDPhysicalThread *pt, DDLogicalThread *lt,
return Report(pt, lt, npath); // Bingo!
for (int li = 0; li < mtx1->nlink; li++) {
Link *link1 = &mtx1->link[li];
- // Mutex *mtx2 = getMutex(link->id);
+ // MutexState *mtx2 = getMutex(link->id);
// FIXME(dvyukov): fast seq check
// FIXME(dvyukov): fast nlink != 0 check
// FIXME(dvyukov): fast pending check?
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_errno.h b/compiler-rt/lib/sanitizer_common/sanitizer_errno.h
index 94f16b6e8735..70a6e88dbaad 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_errno.h
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_errno.h
@@ -23,8 +23,7 @@
#if SANITIZER_FREEBSD || SANITIZER_MAC
# define __errno_location __error
-#elif SANITIZER_ANDROID || SANITIZER_NETBSD || \
- SANITIZER_RTEMS
+#elif SANITIZER_ANDROID || SANITIZER_NETBSD
# define __errno_location __errno
#elif SANITIZER_SOLARIS
# define __errno_location ___errno
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_file.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_file.cpp
index 7c64b53e9b11..0b92dccde4a1 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_file.cpp
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_file.cpp
@@ -58,6 +58,9 @@ void ReportFile::ReopenIfNecessary() {
} else {
internal_snprintf(full_path, kMaxPathLength, "%s.%zu", path_prefix, pid);
}
+ if (common_flags()->log_suffix) {
+ internal_strlcat(full_path, common_flags()->log_suffix, kMaxPathLength);
+ }
error_t err;
fd = OpenFile(full_path, WrOnly, &err);
if (fd == kInvalidFd) {
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_flags.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_flags.cpp
index 21048be73041..d52e96a7c381 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_flags.cpp
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_flags.cpp
@@ -35,6 +35,7 @@ void CommonFlags::CopyFrom(const CommonFlags &other) {
// Copy the string from "s" to "out", making the following substitutions:
// %b = binary basename
// %p = pid
+// %d = binary directory
void SubstituteForFlagValue(const char *s, char *out, uptr out_size) {
char *out_end = out + out_size;
while (*s && out < out_end - 1) {
@@ -64,6 +65,12 @@ void SubstituteForFlagValue(const char *s, char *out, uptr out_size) {
s += 2; // skip "%p"
break;
}
+ case 'd': {
+ uptr len = ReadBinaryDir(out, out_end - out);
+ out += len;
+ s += 2; // skip "%d"
+ break;
+ }
default:
*out++ = *s++;
break;
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_flags.inc b/compiler-rt/lib/sanitizer_common/sanitizer_flags.inc
index cfb5822645f1..3bc44c6b1eb1 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_flags.inc
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_flags.inc
@@ -59,6 +59,8 @@ COMMON_FLAG(
bool, log_exe_name, false,
"Mention name of executable when reporting error and "
"append executable name to logs (as in \"log_path.exe_name.pid\").")
+COMMON_FLAG(const char *, log_suffix, nullptr,
+ "String to append to log file name, e.g. \".txt\".")
COMMON_FLAG(
bool, log_to_syslog, (bool)SANITIZER_ANDROID || (bool)SANITIZER_MAC,
"Write all sanitizer output to syslog in addition to other means of "
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_fuchsia.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_fuchsia.cpp
index 5ad20d0d7da6..65bc398656c9 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_fuchsia.cpp
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_fuchsia.cpp
@@ -14,7 +14,6 @@
#include "sanitizer_fuchsia.h"
#if SANITIZER_FUCHSIA
-#include <limits.h>
#include <pthread.h>
#include <stdlib.h>
#include <unistd.h>
@@ -37,16 +36,11 @@ uptr internal_sched_yield() {
return 0; // Why doesn't this return void?
}
-static void internal_nanosleep(zx_time_t ns) {
- zx_status_t status = _zx_nanosleep(_zx_deadline_after(ns));
+void internal_usleep(u64 useconds) {
+ zx_status_t status = _zx_nanosleep(_zx_deadline_after(ZX_USEC(useconds)));
CHECK_EQ(status, ZX_OK);
}
-unsigned int internal_sleep(unsigned int seconds) {
- internal_nanosleep(ZX_SEC(seconds));
- return 0;
-}
-
u64 NanoTime() {
zx_handle_t utc_clock = _zx_utc_reference_get();
CHECK_NE(utc_clock, ZX_HANDLE_INVALID);
@@ -69,9 +63,7 @@ uptr internal_getpid() {
return pid;
}
-int internal_dlinfo(void *handle, int request, void *p) {
- UNIMPLEMENTED();
-}
+int internal_dlinfo(void *handle, int request, void *p) { UNIMPLEMENTED(); }
uptr GetThreadSelf() { return reinterpret_cast<uptr>(thrd_current()); }
@@ -81,10 +73,6 @@ void Abort() { abort(); }
int Atexit(void (*function)(void)) { return atexit(function); }
-void SleepForSeconds(int seconds) { internal_sleep(seconds); }
-
-void SleepForMillis(int millis) { internal_nanosleep(ZX_MSEC(millis)); }
-
void GetThreadStackTopAndBottom(bool, uptr *stack_top, uptr *stack_bottom) {
pthread_attr_t attr;
CHECK_EQ(pthread_getattr_np(pthread_self(), &attr), 0);
@@ -112,6 +100,18 @@ bool SignalContext::IsStackOverflow() const { return false; }
void SignalContext::DumpAllRegisters(void *context) { UNIMPLEMENTED(); }
const char *SignalContext::Describe() const { UNIMPLEMENTED(); }
+void FutexWait(atomic_uint32_t *p, u32 cmp) {
+ zx_status_t status = _zx_futex_wait(reinterpret_cast<zx_futex_t *>(p), cmp,
+ ZX_HANDLE_INVALID, ZX_TIME_INFINITE);
+ if (status != ZX_ERR_BAD_STATE) // Normal race.
+ CHECK_EQ(status, ZX_OK);
+}
+
+void FutexWake(atomic_uint32_t *p, u32 count) {
+ zx_status_t status = _zx_futex_wake(reinterpret_cast<zx_futex_t *>(p), count);
+ CHECK_EQ(status, ZX_OK);
+}
+
enum MutexState : int { MtxUnlocked = 0, MtxLocked = 1, MtxSleeping = 2 };
BlockingMutex::BlockingMutex() {
@@ -148,19 +148,21 @@ void BlockingMutex::Unlock() {
}
}
-void BlockingMutex::CheckLocked() {
- atomic_uint32_t *m = reinterpret_cast<atomic_uint32_t *>(&opaque_storage_);
+void BlockingMutex::CheckLocked() const {
+ auto m = reinterpret_cast<atomic_uint32_t const *>(&opaque_storage_);
CHECK_NE(MtxUnlocked, atomic_load(m, memory_order_relaxed));
}
-uptr GetPageSize() { return PAGE_SIZE; }
+uptr GetPageSize() { return _zx_system_get_page_size(); }
-uptr GetMmapGranularity() { return PAGE_SIZE; }
+uptr GetMmapGranularity() { return _zx_system_get_page_size(); }
sanitizer_shadow_bounds_t ShadowBounds;
+void InitShadowBounds() { ShadowBounds = __sanitizer_shadow_bounds(); }
+
uptr GetMaxUserVirtualAddress() {
- ShadowBounds = __sanitizer_shadow_bounds();
+ InitShadowBounds();
return ShadowBounds.memory_limit - 1;
}
@@ -168,7 +170,7 @@ uptr GetMaxVirtualAddress() { return GetMaxUserVirtualAddress(); }
static void *DoAnonymousMmapOrDie(uptr size, const char *mem_type,
bool raw_report, bool die_for_nomem) {
- size = RoundUpTo(size, PAGE_SIZE);
+ size = RoundUpTo(size, GetPageSize());
zx_handle_t vmo;
zx_status_t status = _zx_vmo_create(size, 0, &vmo);
@@ -214,15 +216,14 @@ void *MmapOrDieOnFatalError(uptr size, const char *mem_type) {
uptr ReservedAddressRange::Init(uptr init_size, const char *name,
uptr fixed_addr) {
- init_size = RoundUpTo(init_size, PAGE_SIZE);
+ init_size = RoundUpTo(init_size, GetPageSize());
DCHECK_EQ(os_handle_, ZX_HANDLE_INVALID);
uintptr_t base;
zx_handle_t vmar;
- zx_status_t status =
- _zx_vmar_allocate(
- _zx_vmar_root_self(),
- ZX_VM_CAN_MAP_READ | ZX_VM_CAN_MAP_WRITE | ZX_VM_CAN_MAP_SPECIFIC,
- 0, init_size, &vmar, &base);
+ zx_status_t status = _zx_vmar_allocate(
+ _zx_vmar_root_self(),
+ ZX_VM_CAN_MAP_READ | ZX_VM_CAN_MAP_WRITE | ZX_VM_CAN_MAP_SPECIFIC, 0,
+ init_size, &vmar, &base);
if (status != ZX_OK)
ReportMmapFailureAndDie(init_size, name, "zx_vmar_allocate", status);
base_ = reinterpret_cast<void *>(base);
@@ -236,7 +237,7 @@ uptr ReservedAddressRange::Init(uptr init_size, const char *name,
static uptr DoMmapFixedOrDie(zx_handle_t vmar, uptr fixed_addr, uptr map_size,
void *base, const char *name, bool die_for_nomem) {
uptr offset = fixed_addr - reinterpret_cast<uptr>(base);
- map_size = RoundUpTo(map_size, PAGE_SIZE);
+ map_size = RoundUpTo(map_size, GetPageSize());
zx_handle_t vmo;
zx_status_t status = _zx_vmo_create(map_size, 0, &vmo);
if (status != ZX_OK) {
@@ -264,19 +265,19 @@ static uptr DoMmapFixedOrDie(zx_handle_t vmar, uptr fixed_addr, uptr map_size,
uptr ReservedAddressRange::Map(uptr fixed_addr, uptr map_size,
const char *name) {
- return DoMmapFixedOrDie(os_handle_, fixed_addr, map_size, base_,
- name_, false);
+ return DoMmapFixedOrDie(os_handle_, fixed_addr, map_size, base_, name_,
+ false);
}
uptr ReservedAddressRange::MapOrDie(uptr fixed_addr, uptr map_size,
const char *name) {
- return DoMmapFixedOrDie(os_handle_, fixed_addr, map_size, base_,
- name_, true);
+ return DoMmapFixedOrDie(os_handle_, fixed_addr, map_size, base_, name_, true);
}
void UnmapOrDieVmar(void *addr, uptr size, zx_handle_t target_vmar) {
- if (!addr || !size) return;
- size = RoundUpTo(size, PAGE_SIZE);
+ if (!addr || !size)
+ return;
+ size = RoundUpTo(size, GetPageSize());
zx_status_t status =
_zx_vmar_unmap(target_vmar, reinterpret_cast<uintptr_t>(addr), size);
@@ -316,7 +317,7 @@ void *MmapFixedNoAccess(uptr fixed_addr, uptr size, const char *name) {
void *MmapAlignedOrDieOnFatalError(uptr size, uptr alignment,
const char *mem_type) {
- CHECK_GE(size, PAGE_SIZE);
+ CHECK_GE(size, GetPageSize());
CHECK(IsPowerOfTwo(size));
CHECK(IsPowerOfTwo(alignment));
@@ -356,7 +357,8 @@ void *MmapAlignedOrDieOnFatalError(uptr size, uptr alignment,
_zx_vmar_root_self(),
ZX_VM_PERM_READ | ZX_VM_PERM_WRITE | ZX_VM_SPECIFIC_OVERWRITE,
addr - info.base, vmo, 0, size, &new_addr);
- if (status == ZX_OK) CHECK_EQ(new_addr, addr);
+ if (status == ZX_OK)
+ CHECK_EQ(new_addr, addr);
}
}
if (status == ZX_OK && addr != map_addr)
@@ -381,9 +383,18 @@ void UnmapOrDie(void *addr, uptr size) {
UnmapOrDieVmar(addr, size, _zx_vmar_root_self());
}
-// This is used on the shadow mapping, which cannot be changed.
-// Zircon doesn't have anything like MADV_DONTNEED.
-void ReleaseMemoryPagesToOS(uptr beg, uptr end) {}
+void ReleaseMemoryPagesToOS(uptr beg, uptr end) {
+ uptr beg_aligned = RoundUpTo(beg, GetPageSize());
+ uptr end_aligned = RoundDownTo(end, GetPageSize());
+ if (beg_aligned < end_aligned) {
+ zx_handle_t root_vmar = _zx_vmar_root_self();
+ CHECK_NE(root_vmar, ZX_HANDLE_INVALID);
+ zx_status_t status =
+ _zx_vmar_op_range(root_vmar, ZX_VMAR_OP_DECOMMIT, beg_aligned,
+ end_aligned - beg_aligned, nullptr, 0);
+ CHECK_EQ(status, ZX_OK);
+ }
+}
void DumpProcessMap() {
// TODO(mcgrathr): write it
@@ -412,8 +423,9 @@ bool ReadFileToBuffer(const char *file_name, char **buff, uptr *buff_size,
uint64_t vmo_size;
status = _zx_vmo_get_size(vmo, &vmo_size);
if (status == ZX_OK) {
- if (vmo_size < max_len) max_len = vmo_size;
- size_t map_size = RoundUpTo(max_len, PAGE_SIZE);
+ if (vmo_size < max_len)
+ max_len = vmo_size;
+ size_t map_size = RoundUpTo(max_len, GetPageSize());
uintptr_t addr;
status = _zx_vmar_map(_zx_vmar_root_self(), ZX_VM_PERM_READ, 0, vmo, 0,
map_size, &addr);
@@ -425,7 +437,8 @@ bool ReadFileToBuffer(const char *file_name, char **buff, uptr *buff_size,
}
_zx_handle_close(vmo);
}
- if (status != ZX_OK && errno_p) *errno_p = status;
+ if (status != ZX_OK && errno_p)
+ *errno_p = status;
return status == ZX_OK;
}
@@ -499,9 +512,7 @@ bool GetRandom(void *buffer, uptr length, bool blocking) {
return true;
}
-u32 GetNumberOfCPUs() {
- return zx_system_get_num_cpus();
-}
+u32 GetNumberOfCPUs() { return zx_system_get_num_cpus(); }
uptr GetRSS() { UNIMPLEMENTED(); }
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_fuchsia.h b/compiler-rt/lib/sanitizer_common/sanitizer_fuchsia.h
index 96f9cde7ef19..26c1deab9e5f 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_fuchsia.h
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_fuchsia.h
@@ -30,6 +30,8 @@ struct MemoryMappingLayoutData {
size_t current; // Current index into the vector.
};
+void InitShadowBounds();
+
} // namespace __sanitizer
#endif // SANITIZER_FUCHSIA
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_internal_defs.h b/compiler-rt/lib/sanitizer_common/sanitizer_internal_defs.h
index d8f0540037d2..84053fec2649 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_internal_defs.h
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_internal_defs.h
@@ -409,6 +409,9 @@ inline void Trap() {
(void)enable_fp; \
} while (0)
+constexpr u32 kInvalidTid = -1;
+constexpr u32 kMainTid = 0;
+
} // namespace __sanitizer
namespace __asan {
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_libc.h b/compiler-rt/lib/sanitizer_common/sanitizer_libc.h
index ec0a6ded009b..bcb81ebbc803 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_libc.h
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_libc.h
@@ -67,7 +67,8 @@ uptr internal_ftruncate(fd_t fd, uptr size);
// OS
void NORETURN internal__exit(int exitcode);
-unsigned int internal_sleep(unsigned int seconds);
+void internal_sleep(unsigned seconds);
+void internal_usleep(u64 useconds);
uptr internal_getpid();
uptr internal_getppid();
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_libignore.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_libignore.cpp
index 9ea19bc21fa3..a65d3d896e33 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_libignore.cpp
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_libignore.cpp
@@ -38,7 +38,7 @@ void LibIgnore::AddIgnoredLibrary(const char *name_templ) {
void LibIgnore::OnLibraryLoaded(const char *name) {
BlockingMutexLock lock(&mutex_);
// Try to match suppressions with symlink target.
- InternalScopedString buf(kMaxPathLength);
+ InternalMmapVector<char> buf(kMaxPathLength);
if (name && internal_readlink(name, buf.data(), buf.size() - 1) > 0 &&
buf[0]) {
for (uptr i = 0; i < count_; i++) {
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_linux.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_linux.cpp
index 379f6d9e294b..9b7d87eb85e1 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_linux.cpp
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_linux.cpp
@@ -183,6 +183,14 @@ uptr internal_munmap(void *addr, uptr length) {
return internal_syscall(SYSCALL(munmap), (uptr)addr, length);
}
+#if SANITIZER_LINUX
+uptr internal_mremap(void *old_address, uptr old_size, uptr new_size, int flags,
+ void *new_address) {
+ return internal_syscall(SYSCALL(mremap), (uptr)old_address, old_size,
+ new_size, flags, (uptr)new_address);
+}
+#endif
+
int internal_mprotect(void *addr, uptr length, int prot) {
return internal_syscall(SYSCALL(mprotect), (uptr)addr, length, prot);
}
@@ -422,13 +430,11 @@ uptr internal_sched_yield() {
return internal_syscall(SYSCALL(sched_yield));
}
-unsigned int internal_sleep(unsigned int seconds) {
+void internal_usleep(u64 useconds) {
struct timespec ts;
- ts.tv_sec = seconds;
- ts.tv_nsec = 0;
- int res = internal_syscall(SYSCALL(nanosleep), &ts, &ts);
- if (res) return ts.tv_sec;
- return 0;
+ ts.tv_sec = useconds / 1000000;
+ ts.tv_nsec = (useconds % 1000000) * 1000;
+ internal_syscall(SYSCALL(nanosleep), &ts, &ts);
}
uptr internal_execve(const char *filename, char *const argv[],
@@ -489,22 +495,24 @@ int TgKill(pid_t pid, tid_t tid, int sig) {
}
#endif
-#if !SANITIZER_SOLARIS && !SANITIZER_NETBSD
+#if SANITIZER_GLIBC
u64 NanoTime() {
-#if SANITIZER_FREEBSD
- timeval tv;
-#else
kernel_timeval tv;
-#endif
internal_memset(&tv, 0, sizeof(tv));
internal_syscall(SYSCALL(gettimeofday), &tv, 0);
- return (u64)tv.tv_sec * 1000*1000*1000 + tv.tv_usec * 1000;
+ return (u64)tv.tv_sec * 1000 * 1000 * 1000 + tv.tv_usec * 1000;
}
-
+// Used by real_clock_gettime.
uptr internal_clock_gettime(__sanitizer_clockid_t clk_id, void *tp) {
return internal_syscall(SYSCALL(clock_gettime), clk_id, tp);
}
-#endif // !SANITIZER_SOLARIS && !SANITIZER_NETBSD
+#elif !SANITIZER_SOLARIS && !SANITIZER_NETBSD
+u64 NanoTime() {
+ struct timespec ts;
+ clock_gettime(CLOCK_REALTIME, &ts);
+ return (u64)ts.tv_sec * 1000 * 1000 * 1000 + ts.tv_nsec;
+}
+#endif
// Like getenv, but reads env directly from /proc (on Linux) or parses the
// 'environ' array (on some others) and does not use libc. This function
@@ -631,11 +639,27 @@ char **GetEnviron() {
}
#if !SANITIZER_SOLARIS
-enum MutexState {
- MtxUnlocked = 0,
- MtxLocked = 1,
- MtxSleeping = 2
-};
+void FutexWait(atomic_uint32_t *p, u32 cmp) {
+# if SANITIZER_FREEBSD
+ _umtx_op(p, UMTX_OP_WAIT_UINT, cmp, 0, 0);
+# elif SANITIZER_NETBSD
+ sched_yield(); /* No userspace futex-like synchronization */
+# else
+ internal_syscall(SYSCALL(futex), (uptr)p, FUTEX_WAIT_PRIVATE, cmp, 0, 0, 0);
+# endif
+}
+
+void FutexWake(atomic_uint32_t *p, u32 count) {
+# if SANITIZER_FREEBSD
+ _umtx_op(p, UMTX_OP_WAKE, count, 0, 0);
+# elif SANITIZER_NETBSD
+ /* No userspace futex-like synchronization */
+# else
+ internal_syscall(SYSCALL(futex), (uptr)p, FUTEX_WAKE_PRIVATE, count, 0, 0, 0);
+# endif
+}
+
+enum { MtxUnlocked = 0, MtxLocked = 1, MtxSleeping = 2 };
BlockingMutex::BlockingMutex() {
internal_memset(this, 0, sizeof(*this));
@@ -673,11 +697,11 @@ void BlockingMutex::Unlock() {
}
}
-void BlockingMutex::CheckLocked() {
- atomic_uint32_t *m = reinterpret_cast<atomic_uint32_t *>(&opaque_storage_);
+void BlockingMutex::CheckLocked() const {
+ auto m = reinterpret_cast<atomic_uint32_t const *>(&opaque_storage_);
CHECK_NE(MtxUnlocked, atomic_load(m, memory_order_relaxed));
}
-#endif // !SANITIZER_SOLARIS
+# endif // !SANITIZER_SOLARIS
// ----------------- sanitizer_linux.h
// The actual size of this structure is specified by d_reclen.
@@ -874,7 +898,7 @@ void internal_sigdelset(__sanitizer_sigset_t *set, int signum) {
__sanitizer_kernel_sigset_t *k_set = (__sanitizer_kernel_sigset_t *)set;
const uptr idx = signum / (sizeof(k_set->sig[0]) * 8);
const uptr bit = signum % (sizeof(k_set->sig[0]) * 8);
- k_set->sig[idx] &= ~(1 << bit);
+ k_set->sig[idx] &= ~((uptr)1 << bit);
}
bool internal_sigismember(__sanitizer_sigset_t *set, int signum) {
@@ -884,7 +908,7 @@ bool internal_sigismember(__sanitizer_sigset_t *set, int signum) {
__sanitizer_kernel_sigset_t *k_set = (__sanitizer_kernel_sigset_t *)set;
const uptr idx = signum / (sizeof(k_set->sig[0]) * 8);
const uptr bit = signum % (sizeof(k_set->sig[0]) * 8);
- return k_set->sig[idx] & (1 << bit);
+ return k_set->sig[idx] & ((uptr)1 << bit);
}
#elif SANITIZER_FREEBSD
void internal_sigdelset(__sanitizer_sigset_t *set, int signum) {
@@ -1334,50 +1358,42 @@ uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,
#elif SANITIZER_RISCV64
uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,
int *parent_tidptr, void *newtls, int *child_tidptr) {
- long long res;
if (!fn || !child_stack)
return -EINVAL;
+
CHECK_EQ(0, (uptr)child_stack % 16);
- child_stack = (char *)child_stack - 2 * sizeof(unsigned long long);
- ((unsigned long long *)child_stack)[0] = (uptr)fn;
- ((unsigned long long *)child_stack)[1] = (uptr)arg;
- register int (*__fn)(void *) __asm__("a0") = fn;
+ register int res __asm__("a0");
+ register int __flags __asm__("a0") = flags;
register void *__stack __asm__("a1") = child_stack;
- register int __flags __asm__("a2") = flags;
- register void *__arg __asm__("a3") = arg;
- register int *__ptid __asm__("a4") = parent_tidptr;
- register void *__tls __asm__("a5") = newtls;
- register int *__ctid __asm__("a6") = child_tidptr;
+ register int *__ptid __asm__("a2") = parent_tidptr;
+ register void *__tls __asm__("a3") = newtls;
+ register int *__ctid __asm__("a4") = child_tidptr;
+ register int (*__fn)(void *) __asm__("a5") = fn;
+ register void *__arg __asm__("a6") = arg;
+ register int nr_clone __asm__("a7") = __NR_clone;
__asm__ __volatile__(
- "mv a0,a2\n" /* flags */
- "mv a2,a4\n" /* ptid */
- "mv a3,a5\n" /* tls */
- "mv a4,a6\n" /* ctid */
- "addi a7, zero, %9\n" /* clone */
-
"ecall\n"
- /* if (%r0 != 0)
- * return %r0;
+ /* if (a0 != 0)
+ * return a0;
*/
"bnez a0, 1f\n"
- /* In the child, now. Call "fn(arg)". */
- "ld a0, 8(sp)\n"
- "ld a1, 16(sp)\n"
- "jalr a1\n"
+ // In the child, now. Call "fn(arg)".
+ "mv a0, a6\n"
+ "jalr a5\n"
- /* Call _exit(%r0). */
- "addi a7, zero, %10\n"
+ // Call _exit(a0).
+ "addi a7, zero, %9\n"
"ecall\n"
"1:\n"
: "=r"(res)
- : "i"(-EINVAL), "r"(__fn), "r"(__stack), "r"(__flags), "r"(__arg),
- "r"(__ptid), "r"(__tls), "r"(__ctid), "i"(__NR_clone), "i"(__NR_exit)
- : "ra", "memory");
+ : "0"(__flags), "r"(__stack), "r"(__ptid), "r"(__tls), "r"(__ctid),
+ "r"(__fn), "r"(__arg), "r"(nr_clone), "i"(__NR_exit)
+ : "memory");
return res;
}
#elif defined(__aarch64__)
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_linux.h b/compiler-rt/lib/sanitizer_common/sanitizer_linux.h
index 24902d1b6bce..9a23fcfb3b93 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_linux.h
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_linux.h
@@ -49,7 +49,9 @@ uptr internal_getdents(fd_t fd, struct linux_dirent *dirp, unsigned int count);
uptr internal_sigaltstack(const void* ss, void* oss);
uptr internal_sigprocmask(int how, __sanitizer_sigset_t *set,
__sanitizer_sigset_t *oldset);
+#if SANITIZER_GLIBC
uptr internal_clock_gettime(__sanitizer_clockid_t clk_id, void *tp);
+#endif
// Linux-only syscalls.
#if SANITIZER_LINUX
@@ -96,7 +98,6 @@ class ThreadLister {
// Exposed for testing.
uptr ThreadDescriptorSize();
uptr ThreadSelf();
-uptr ThreadSelfOffset();
// Matches a library's file name against a base name (stripping path and version
// information).
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_linux_libcdep.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_linux_libcdep.cpp
index f20b9001c2c2..7ce9e25da342 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_linux_libcdep.cpp
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_linux_libcdep.cpp
@@ -36,6 +36,7 @@
#include <link.h>
#include <pthread.h>
#include <signal.h>
+#include <sys/mman.h>
#include <sys/resource.h>
#include <syslog.h>
@@ -48,6 +49,10 @@
#include <osreldate.h>
#include <sys/sysctl.h>
#define pthread_getattr_np pthread_attr_get_np
+// The MAP_NORESERVE define has been removed in FreeBSD 11.x, and even before
+// that, it was never implemented. So just define it to zero.
+#undef MAP_NORESERVE
+#define MAP_NORESERVE 0
#endif
#if SANITIZER_NETBSD
@@ -183,84 +188,35 @@ __attribute__((unused)) static bool GetLibcVersion(int *major, int *minor,
#endif
}
-#if SANITIZER_GLIBC && !SANITIZER_GO
-static uptr g_tls_size;
+// True if we can use dlpi_tls_data. glibc before 2.25 may leave NULL (BZ
+// #19826) so dlpi_tls_data cannot be used.
+//
+// musl before 1.2.3 and FreeBSD as of 12.2 incorrectly set dlpi_tls_data to
+// the TLS initialization image
+// https://bugs.freebsd.org/bugzilla/show_bug.cgi?id=254774
+__attribute__((unused)) static int g_use_dlpi_tls_data;
-#ifdef __i386__
-#define CHECK_GET_TLS_STATIC_INFO_VERSION (!__GLIBC_PREREQ(2, 27))
-#else
-#define CHECK_GET_TLS_STATIC_INFO_VERSION 0
-#endif
+#if SANITIZER_GLIBC && !SANITIZER_GO
+__attribute__((unused)) static size_t g_tls_size;
+void InitTlsSize() {
+ int major, minor, patch;
+ g_use_dlpi_tls_data =
+ GetLibcVersion(&major, &minor, &patch) && major == 2 && minor >= 25;
-#if CHECK_GET_TLS_STATIC_INFO_VERSION
-#define DL_INTERNAL_FUNCTION __attribute__((regparm(3), stdcall))
-#else
-#define DL_INTERNAL_FUNCTION
+#if defined(__aarch64__) || defined(__x86_64__) || defined(__powerpc64__)
+ void *get_tls_static_info = dlsym(RTLD_NEXT, "_dl_get_tls_static_info");
+ size_t tls_align;
+ ((void (*)(size_t *, size_t *))get_tls_static_info)(&g_tls_size, &tls_align);
#endif
-
-namespace {
-struct GetTlsStaticInfoCall {
- typedef void (*get_tls_func)(size_t*, size_t*);
-};
-struct GetTlsStaticInfoRegparmCall {
- typedef void (*get_tls_func)(size_t*, size_t*) DL_INTERNAL_FUNCTION;
-};
-
-template <typename T>
-void CallGetTls(void* ptr, size_t* size, size_t* align) {
- typename T::get_tls_func get_tls;
- CHECK_EQ(sizeof(get_tls), sizeof(ptr));
- internal_memcpy(&get_tls, &ptr, sizeof(ptr));
- CHECK_NE(get_tls, 0);
- get_tls(size, align);
-}
-
-bool CmpLibcVersion(int major, int minor, int patch) {
- int ma;
- int mi;
- int pa;
- if (!GetLibcVersion(&ma, &mi, &pa))
- return false;
- if (ma > major)
- return true;
- if (ma < major)
- return false;
- if (mi > minor)
- return true;
- if (mi < minor)
- return false;
- return pa >= patch;
-}
-
-} // namespace
-
-void InitTlsSize() {
- // all current supported platforms have 16 bytes stack alignment
- const size_t kStackAlign = 16;
- void *get_tls_static_info_ptr = dlsym(RTLD_NEXT, "_dl_get_tls_static_info");
- size_t tls_size = 0;
- size_t tls_align = 0;
- // On i?86, _dl_get_tls_static_info used to be internal_function, i.e.
- // __attribute__((regparm(3), stdcall)) before glibc 2.27 and is normal
- // function in 2.27 and later.
- if (CHECK_GET_TLS_STATIC_INFO_VERSION && !CmpLibcVersion(2, 27, 0))
- CallGetTls<GetTlsStaticInfoRegparmCall>(get_tls_static_info_ptr,
- &tls_size, &tls_align);
- else
- CallGetTls<GetTlsStaticInfoCall>(get_tls_static_info_ptr,
- &tls_size, &tls_align);
- if (tls_align < kStackAlign)
- tls_align = kStackAlign;
- g_tls_size = RoundUpTo(tls_size, tls_align);
}
#else
void InitTlsSize() { }
#endif // SANITIZER_GLIBC && !SANITIZER_GO
-#if (defined(__x86_64__) || defined(__i386__) || defined(__mips__) || \
- defined(__aarch64__) || defined(__powerpc64__) || defined(__s390__) || \
- defined(__arm__) || SANITIZER_RISCV64) && \
- SANITIZER_LINUX && !SANITIZER_ANDROID
+// On glibc x86_64, ThreadDescriptorSize() needs to be precise due to the usage
+// of g_tls_size. On other targets, ThreadDescriptorSize() is only used by lsan
+// to get the pointer to thread-specific data keys in the thread control block.
+#if (SANITIZER_FREEBSD || SANITIZER_LINUX) && !SANITIZER_ANDROID
// sizeof(struct pthread) from glibc.
static atomic_uintptr_t thread_descriptor_size;
@@ -298,6 +254,13 @@ uptr ThreadDescriptorSize() {
else // minor == 32
val = FIRST_32_SECOND_64(1344, 2496);
}
+#elif defined(__s390__) || defined(__sparc__)
+ // The size of a prefix of TCB including pthread::{specific_1stblock,specific}
+ // suffices. Just return offsetof(struct pthread, specific_used), which hasn't
+ // changed since 2007-05. Technically this applies to i386/x86_64 as well but
+ // we call _dl_get_tls_static_info and need the precise size of struct
+ // pthread.
+ return FIRST_32_SECOND_64(524, 1552);
#elif defined(__mips__)
// TODO(sagarthakur): add more values as per different glibc versions.
val = FIRST_32_SECOND_64(1152, 1776);
@@ -321,21 +284,12 @@ uptr ThreadDescriptorSize() {
val = 1776;
#elif defined(__powerpc64__)
val = 1776; // from glibc.ppc64le 2.20-8.fc21
-#elif defined(__s390__)
- val = FIRST_32_SECOND_64(1152, 1776); // valid for glibc 2.22
#endif
if (val)
atomic_store_relaxed(&thread_descriptor_size, val);
return val;
}
-// The offset at which pointer to self is located in the thread descriptor.
-const uptr kThreadSelfOffset = FIRST_32_SECOND_64(8, 16);
-
-uptr ThreadSelfOffset() {
- return kThreadSelfOffset;
-}
-
#if defined(__mips__) || defined(__powerpc64__) || SANITIZER_RISCV64
// TlsPreTcbSize includes size of struct pthread_descr and size of tcb
// head structure. It lies before the static tls blocks.
@@ -354,68 +308,97 @@ static uptr TlsPreTcbSize() {
}
#endif
-uptr ThreadSelf() {
- uptr descr_addr;
-#if defined(__i386__)
- asm("mov %%gs:%c1,%0" : "=r"(descr_addr) : "i"(kThreadSelfOffset));
-#elif defined(__x86_64__)
- asm("mov %%fs:%c1,%0" : "=r"(descr_addr) : "i"(kThreadSelfOffset));
-#elif defined(__mips__)
- // MIPS uses TLS variant I. The thread pointer (in hardware register $29)
- // points to the end of the TCB + 0x7000. The pthread_descr structure is
- // immediately in front of the TCB. TlsPreTcbSize() includes the size of the
- // TCB and the size of pthread_descr.
- const uptr kTlsTcbOffset = 0x7000;
- uptr thread_pointer;
- asm volatile(".set push;\
- .set mips64r2;\
- rdhwr %0,$29;\
- .set pop" : "=r" (thread_pointer));
- descr_addr = thread_pointer - kTlsTcbOffset - TlsPreTcbSize();
-#elif defined(__aarch64__) || defined(__arm__)
- descr_addr = reinterpret_cast<uptr>(__builtin_thread_pointer()) -
- ThreadDescriptorSize();
-#elif SANITIZER_RISCV64
- // https://github.com/riscv/riscv-elf-psabi-doc/issues/53
- uptr thread_pointer = reinterpret_cast<uptr>(__builtin_thread_pointer());
- descr_addr = thread_pointer - TlsPreTcbSize();
-#elif defined(__s390__)
- descr_addr = reinterpret_cast<uptr>(__builtin_thread_pointer());
-#elif defined(__powerpc64__)
- // PPC64LE uses TLS variant I. The thread pointer (in GPR 13)
- // points to the end of the TCB + 0x7000. The pthread_descr structure is
- // immediately in front of the TCB. TlsPreTcbSize() includes the size of the
- // TCB and the size of pthread_descr.
- const uptr kTlsTcbOffset = 0x7000;
- uptr thread_pointer;
- asm("addi %0,13,%1" : "=r"(thread_pointer) : "I"(-kTlsTcbOffset));
- descr_addr = thread_pointer - TlsPreTcbSize();
+#if !SANITIZER_GO
+namespace {
+struct TlsBlock {
+ uptr begin, end, align;
+ size_t tls_modid;
+ bool operator<(const TlsBlock &rhs) const { return begin < rhs.begin; }
+};
+} // namespace
+
+#ifdef __s390__
+extern "C" uptr __tls_get_offset(void *arg);
+
+static uptr TlsGetOffset(uptr ti_module, uptr ti_offset) {
+ // The __tls_get_offset ABI requires %r12 to point to GOT and %r2 to be an
+ // offset of a struct tls_index inside GOT. We don't possess either of the
+ // two, so violate the letter of the "ELF Handling For Thread-Local
+ // Storage" document and assume that the implementation just dereferences
+ // %r2 + %r12.
+ uptr tls_index[2] = {ti_module, ti_offset};
+ register uptr r2 asm("2") = 0;
+ register void *r12 asm("12") = tls_index;
+ asm("basr %%r14, %[__tls_get_offset]"
+ : "+r"(r2)
+ : [__tls_get_offset] "r"(__tls_get_offset), "r"(r12)
+ : "memory", "cc", "0", "1", "3", "4", "5", "14");
+ return r2;
+}
#else
-#error "unsupported CPU arch"
+extern "C" void *__tls_get_addr(size_t *);
#endif
- return descr_addr;
-}
-#endif // (x86_64 || i386 || MIPS) && SANITIZER_LINUX
-#if SANITIZER_FREEBSD
-static void **ThreadSelfSegbase() {
- void **segbase = 0;
-#if defined(__i386__)
- // sysarch(I386_GET_GSBASE, segbase);
- __asm __volatile("mov %%gs:0, %0" : "=r" (segbase));
-#elif defined(__x86_64__)
- // sysarch(AMD64_GET_FSBASE, segbase);
- __asm __volatile("movq %%fs:0, %0" : "=r" (segbase));
+static int CollectStaticTlsBlocks(struct dl_phdr_info *info, size_t size,
+ void *data) {
+ if (!info->dlpi_tls_modid)
+ return 0;
+ uptr begin = (uptr)info->dlpi_tls_data;
+ if (!g_use_dlpi_tls_data) {
+ // Call __tls_get_addr as a fallback. This forces TLS allocation on glibc
+ // and FreeBSD.
+#ifdef __s390__
+ begin = (uptr)__builtin_thread_pointer() +
+ TlsGetOffset(info->dlpi_tls_modid, 0);
#else
-#error "unsupported CPU arch"
+ size_t mod_and_off[2] = {info->dlpi_tls_modid, 0};
+ begin = (uptr)__tls_get_addr(mod_and_off);
#endif
- return segbase;
+ }
+ for (unsigned i = 0; i != info->dlpi_phnum; ++i)
+ if (info->dlpi_phdr[i].p_type == PT_TLS) {
+ static_cast<InternalMmapVector<TlsBlock> *>(data)->push_back(
+ TlsBlock{begin, begin + info->dlpi_phdr[i].p_memsz,
+ info->dlpi_phdr[i].p_align, info->dlpi_tls_modid});
+ break;
+ }
+ return 0;
}
-uptr ThreadSelf() {
- return (uptr)ThreadSelfSegbase()[2];
-}
-#endif // SANITIZER_FREEBSD
+__attribute__((unused)) static void GetStaticTlsBoundary(uptr *addr, uptr *size,
+ uptr *align) {
+ InternalMmapVector<TlsBlock> ranges;
+ dl_iterate_phdr(CollectStaticTlsBlocks, &ranges);
+ uptr len = ranges.size();
+ Sort(ranges.begin(), len);
+ // Find the range with tls_modid=1. For glibc, because libc.so uses PT_TLS,
+ // this module is guaranteed to exist and is one of the initially loaded
+ // modules.
+ uptr one = 0;
+ while (one != len && ranges[one].tls_modid != 1) ++one;
+ if (one == len) {
+ // This may happen with musl if no module uses PT_TLS.
+ *addr = 0;
+ *size = 0;
+ *align = 1;
+ return;
+ }
+ // Find the maximum consecutive ranges. We consider two modules consecutive if
+ // the gap is smaller than the alignment. The dynamic loader places static TLS
+ // blocks this way not to waste space.
+ uptr l = one;
+ *align = ranges[l].align;
+ while (l != 0 && ranges[l].begin < ranges[l - 1].end + ranges[l - 1].align)
+ *align = Max(*align, ranges[--l].align);
+ uptr r = one + 1;
+ while (r != len && ranges[r].begin < ranges[r - 1].end + ranges[r - 1].align)
+ *align = Max(*align, ranges[r++].align);
+ *addr = ranges[l].begin;
+ *size = ranges[r - 1].end - ranges[l].begin;
+}
+#endif // !SANITIZER_GO
+#endif // (x86_64 || i386 || mips || ...) && (SANITIZER_FREEBSD ||
+ // SANITIZER_LINUX) && !SANITIZER_ANDROID
#if SANITIZER_NETBSD
static struct tls_tcb * ThreadSelfTlsTcb() {
@@ -466,33 +449,71 @@ static void GetTls(uptr *addr, uptr *size) {
*addr = 0;
*size = 0;
}
-#elif SANITIZER_LINUX
-#if defined(__x86_64__) || defined(__i386__) || defined(__s390__)
- *addr = ThreadSelf();
- *size = GetTlsSize();
+#elif SANITIZER_GLIBC && defined(__x86_64__)
+ // For aarch64 and x86-64, use an O(1) approach which requires relatively
+ // precise ThreadDescriptorSize. g_tls_size was initialized in InitTlsSize.
+ asm("mov %%fs:16,%0" : "=r"(*addr));
+ *size = g_tls_size;
*addr -= *size;
*addr += ThreadDescriptorSize();
-#elif defined(__mips__) || defined(__aarch64__) || defined(__powerpc64__) || \
- defined(__arm__) || SANITIZER_RISCV64
- *addr = ThreadSelf();
- *size = GetTlsSize();
+#elif SANITIZER_GLIBC && defined(__aarch64__)
+ *addr = reinterpret_cast<uptr>(__builtin_thread_pointer()) -
+ ThreadDescriptorSize();
+ *size = g_tls_size + ThreadDescriptorSize();
+#elif SANITIZER_GLIBC && defined(__powerpc64__)
+ // Workaround for glibc<2.25(?). 2.27 is known to not need this.
+ uptr tp;
+ asm("addi %0,13,-0x7000" : "=r"(tp));
+ const uptr pre_tcb_size = TlsPreTcbSize();
+ *addr = tp - pre_tcb_size;
+ *size = g_tls_size + pre_tcb_size;
+#elif SANITIZER_FREEBSD || SANITIZER_LINUX
+ uptr align;
+ GetStaticTlsBoundary(addr, size, &align);
+#if defined(__x86_64__) || defined(__i386__) || defined(__s390__) || \
+ defined(__sparc__)
+ if (SANITIZER_GLIBC) {
+#if defined(__x86_64__) || defined(__i386__)
+ align = Max<uptr>(align, 64);
#else
- *addr = 0;
- *size = 0;
+ align = Max<uptr>(align, 16);
#endif
-#elif SANITIZER_FREEBSD
- void** segbase = ThreadSelfSegbase();
- *addr = 0;
- *size = 0;
- if (segbase != 0) {
- // tcbalign = 16
- // tls_size = round(tls_static_space, tcbalign);
- // dtv = segbase[1];
- // dtv[2] = segbase - tls_static_space;
- void **dtv = (void**) segbase[1];
- *addr = (uptr) dtv[2];
- *size = (*addr == 0) ? 0 : ((uptr) segbase[0] - (uptr) dtv[2]);
}
+ const uptr tp = RoundUpTo(*addr + *size, align);
+
+ // lsan requires the range to additionally cover the static TLS surplus
+ // (elf/dl-tls.c defines 1664). Otherwise there may be false positives for
+ // allocations only referenced by tls in dynamically loaded modules.
+ if (SANITIZER_GLIBC)
+ *size += 1644;
+ else if (SANITIZER_FREEBSD)
+ *size += 128; // RTLD_STATIC_TLS_EXTRA
+
+ // Extend the range to include the thread control block. On glibc, lsan needs
+ // the range to include pthread::{specific_1stblock,specific} so that
+ // allocations only referenced by pthread_setspecific can be scanned. This may
+ // underestimate by at most TLS_TCB_ALIGN-1 bytes but it should be fine
+ // because the number of bytes after pthread::specific is larger.
+ *addr = tp - RoundUpTo(*size, align);
+ *size = tp - *addr + ThreadDescriptorSize();
+#else
+ if (SANITIZER_GLIBC)
+ *size += 1664;
+ else if (SANITIZER_FREEBSD)
+ *size += 128; // RTLD_STATIC_TLS_EXTRA
+#if defined(__mips__) || defined(__powerpc64__) || SANITIZER_RISCV64
+ const uptr pre_tcb_size = TlsPreTcbSize();
+ *addr -= pre_tcb_size;
+ *size += pre_tcb_size;
+#else
+ // arm and aarch64 reserve two words at TP, so this underestimates the range.
+ // However, this is sufficient for the purpose of finding the pointers to
+ // thread-specific data keys.
+ const uptr tcb_size = ThreadDescriptorSize();
+ *addr -= tcb_size;
+ *size += tcb_size;
+#endif
+#endif
#elif SANITIZER_NETBSD
struct tls_tcb * const tcb = ThreadSelfTlsTcb();
*addr = 0;
@@ -519,17 +540,11 @@ static void GetTls(uptr *addr, uptr *size) {
#if !SANITIZER_GO
uptr GetTlsSize() {
-#if SANITIZER_FREEBSD || SANITIZER_ANDROID || SANITIZER_NETBSD || \
+#if SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD || \
SANITIZER_SOLARIS
uptr addr, size;
GetTls(&addr, &size);
return size;
-#elif SANITIZER_GLIBC
-#if defined(__mips__) || defined(__powerpc64__) || SANITIZER_RISCV64
- return RoundUpTo(g_tls_size + TlsPreTcbSize(), 16);
-#else
- return g_tls_size;
-#endif
#else
return 0;
#endif
@@ -552,10 +567,9 @@ void GetThreadStackAndTls(bool main, uptr *stk_addr, uptr *stk_size,
if (!main) {
// If stack and tls intersect, make them non-intersecting.
if (*tls_addr > *stk_addr && *tls_addr < *stk_addr + *stk_size) {
- CHECK_GT(*tls_addr + *tls_size, *stk_addr);
- CHECK_LE(*tls_addr + *tls_size, *stk_addr + *stk_size);
- *stk_size -= *tls_size;
- *tls_addr = *stk_addr + *stk_size;
+ if (*stk_addr + *stk_size < *tls_addr + *tls_size)
+ *tls_size = *stk_addr + *stk_size - *tls_addr;
+ *stk_size = *tls_addr - *stk_addr;
}
}
#endif
@@ -574,20 +588,12 @@ struct DlIteratePhdrData {
bool first;
};
-static int dl_iterate_phdr_cb(dl_phdr_info *info, size_t size, void *arg) {
- DlIteratePhdrData *data = (DlIteratePhdrData*)arg;
- InternalScopedString module_name(kMaxPathLength);
- if (data->first) {
- data->first = false;
- // First module is the binary itself.
- ReadBinaryNameCached(module_name.data(), module_name.size());
- } else if (info->dlpi_name) {
- module_name.append("%s", info->dlpi_name);
- }
+static int AddModuleSegments(const char *module_name, dl_phdr_info *info,
+ InternalMmapVectorNoCtor<LoadedModule> *modules) {
if (module_name[0] == '\0')
return 0;
LoadedModule cur_module;
- cur_module.set(module_name.data(), info->dlpi_addr);
+ cur_module.set(module_name, info->dlpi_addr);
for (int i = 0; i < (int)info->dlpi_phnum; i++) {
const Elf_Phdr *phdr = &info->dlpi_phdr[i];
if (phdr->p_type == PT_LOAD) {
@@ -599,7 +605,26 @@ static int dl_iterate_phdr_cb(dl_phdr_info *info, size_t size, void *arg) {
writable);
}
}
- data->modules->push_back(cur_module);
+ modules->push_back(cur_module);
+ return 0;
+}
+
+static int dl_iterate_phdr_cb(dl_phdr_info *info, size_t size, void *arg) {
+ DlIteratePhdrData *data = (DlIteratePhdrData *)arg;
+ if (data->first) {
+ InternalMmapVector<char> module_name(kMaxPathLength);
+ data->first = false;
+ // First module is the binary itself.
+ ReadBinaryNameCached(module_name.data(), module_name.size());
+ return AddModuleSegments(module_name.data(), info, data->modules);
+ }
+
+ if (info->dlpi_name) {
+ InternalScopedString module_name;
+ module_name.append("%s", info->dlpi_name);
+ return AddModuleSegments(module_name.data(), info, data->modules);
+ }
+
return 0;
}
@@ -803,20 +828,13 @@ void LogMessageOnPrintf(const char *str) {
#endif // SANITIZER_LINUX
-#if SANITIZER_LINUX && !SANITIZER_GO
+#if SANITIZER_GLIBC && !SANITIZER_GO
// glibc crashes when using clock_gettime from a preinit_array function as the
// vDSO function pointers haven't been initialized yet. __progname is
// initialized after the vDSO function pointers, so if it exists, is not null
// and is not empty, we can use clock_gettime.
extern "C" SANITIZER_WEAK_ATTRIBUTE char *__progname;
-inline bool CanUseVDSO() {
- // Bionic is safe, it checks for the vDSO function pointers to be initialized.
- if (SANITIZER_ANDROID)
- return true;
- if (&__progname && __progname && *__progname)
- return true;
- return false;
-}
+inline bool CanUseVDSO() { return &__progname && __progname && *__progname; }
// MonotonicNanoTime is a timing function that can leverage the vDSO by calling
// clock_gettime. real_clock_gettime only exists if clock_gettime is
@@ -836,13 +854,13 @@ u64 MonotonicNanoTime() {
return (u64)ts.tv_sec * (1000ULL * 1000 * 1000) + ts.tv_nsec;
}
#else
-// Non-Linux & Go always use the syscall.
+// Non-glibc & Go always use the regular function.
u64 MonotonicNanoTime() {
timespec ts;
- internal_clock_gettime(CLOCK_MONOTONIC, &ts);
+ clock_gettime(CLOCK_MONOTONIC, &ts);
return (u64)ts.tv_sec * (1000ULL * 1000 * 1000) + ts.tv_nsec;
}
-#endif // SANITIZER_LINUX && !SANITIZER_GO
+#endif // SANITIZER_GLIBC && !SANITIZER_GO
void ReExec() {
const char *pathname = "/proc/self/exe";
@@ -911,6 +929,65 @@ uptr MapDynamicShadow(uptr shadow_size_bytes, uptr shadow_scale,
return shadow_start;
}
+static uptr MmapSharedNoReserve(uptr addr, uptr size) {
+ return internal_mmap(
+ reinterpret_cast<void *>(addr), size, PROT_READ | PROT_WRITE,
+ MAP_FIXED | MAP_SHARED | MAP_ANONYMOUS | MAP_NORESERVE, -1, 0);
+}
+
+static uptr MremapCreateAlias(uptr base_addr, uptr alias_addr,
+ uptr alias_size) {
+#if SANITIZER_LINUX
+ return internal_mremap(reinterpret_cast<void *>(base_addr), 0, alias_size,
+ MREMAP_MAYMOVE | MREMAP_FIXED,
+ reinterpret_cast<void *>(alias_addr));
+#else
+ CHECK(false && "mremap is not supported outside of Linux");
+ return 0;
+#endif
+}
+
+static void CreateAliases(uptr start_addr, uptr alias_size, uptr num_aliases) {
+ uptr total_size = alias_size * num_aliases;
+ uptr mapped = MmapSharedNoReserve(start_addr, total_size);
+ CHECK_EQ(mapped, start_addr);
+
+ for (uptr i = 1; i < num_aliases; ++i) {
+ uptr alias_addr = start_addr + i * alias_size;
+ CHECK_EQ(MremapCreateAlias(start_addr, alias_addr, alias_size), alias_addr);
+ }
+}
+
+uptr MapDynamicShadowAndAliases(uptr shadow_size, uptr alias_size,
+ uptr num_aliases, uptr ring_buffer_size) {
+ CHECK_EQ(alias_size & (alias_size - 1), 0);
+ CHECK_EQ(num_aliases & (num_aliases - 1), 0);
+ CHECK_EQ(ring_buffer_size & (ring_buffer_size - 1), 0);
+
+ const uptr granularity = GetMmapGranularity();
+ shadow_size = RoundUpTo(shadow_size, granularity);
+ CHECK_EQ(shadow_size & (shadow_size - 1), 0);
+
+ const uptr alias_region_size = alias_size * num_aliases;
+ const uptr alignment =
+ 2 * Max(Max(shadow_size, alias_region_size), ring_buffer_size);
+ const uptr left_padding = ring_buffer_size;
+
+ const uptr right_size = alignment;
+ const uptr map_size = left_padding + 2 * alignment;
+
+ const uptr map_start = reinterpret_cast<uptr>(MmapNoAccess(map_size));
+ CHECK_NE(map_start, static_cast<uptr>(-1));
+ const uptr right_start = RoundUpTo(map_start + left_padding, alignment);
+
+ UnmapFromTo(map_start, right_start - left_padding);
+ UnmapFromTo(right_start + right_size, map_start + map_size);
+
+ CreateAliases(right_start + right_size / 2, alias_size, num_aliases);
+
+ return right_start;
+}
+
void InitializePlatformCommonFlags(CommonFlags *cf) {
#if SANITIZER_ANDROID
if (&__libc_get_static_tls_bounds == nullptr)
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_local_address_space_view.h b/compiler-rt/lib/sanitizer_common/sanitizer_local_address_space_view.h
index 5d1b5264b5ed..0e19c4d4a801 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_local_address_space_view.h
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_local_address_space_view.h
@@ -7,7 +7,7 @@
//===----------------------------------------------------------------------===//
//
// `LocalAddressSpaceView` provides the local (i.e. target and current address
-// space are the same) implementation of the `AddressSpaveView` interface which
+// space are the same) implementation of the `AddressSpaceView` interface which
// provides a simple interface to load memory from another process (i.e.
// out-of-process)
//
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_mac.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_mac.cpp
index 2b53d7d730d7..083595d1505f 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_mac.cpp
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_mac.cpp
@@ -44,6 +44,14 @@ extern char **environ;
#define SANITIZER_OS_TRACE 0
#endif
+// import new crash reporting api
+#if defined(__has_include) && __has_include(<CrashReporterClient.h>)
+#define HAVE_CRASHREPORTERCLIENT_H 1
+#include <CrashReporterClient.h>
+#else
+#define HAVE_CRASHREPORTERCLIENT_H 0
+#endif
+
#if !SANITIZER_IOS
#include <crt_externs.h> // for _NSGetArgv and _NSGetEnviron
#else
@@ -62,6 +70,7 @@ extern "C" {
#include <mach/mach_time.h>
#include <mach/vm_statistics.h>
#include <malloc/malloc.h>
+#include <os/log.h>
#include <pthread.h>
#include <sched.h>
#include <signal.h>
@@ -133,6 +142,12 @@ uptr internal_munmap(void *addr, uptr length) {
return munmap(addr, length);
}
+uptr internal_mremap(void *old_address, uptr old_size, uptr new_size, int flags,
+ void *new_address) {
+ CHECK(false && "internal_mremap is unimplemented on Mac");
+ return 0;
+}
+
int internal_mprotect(void *addr, uptr length, int prot) {
return mprotect(addr, length, prot);
}
@@ -204,9 +219,7 @@ void internal__exit(int exitcode) {
_exit(exitcode);
}
-unsigned int internal_sleep(unsigned int seconds) {
- return sleep(seconds);
-}
+void internal_usleep(u64 useconds) { usleep(useconds); }
uptr internal_getpid() {
return getpid();
@@ -444,7 +457,7 @@ uptr ReadBinaryName(/*out*/char *buf, uptr buf_len) {
// On OS X the executable path is saved to the stack by dyld. Reading it
// from there is much faster than calling dladdr, especially for large
// binaries with symbols.
- InternalScopedString exe_path(kMaxPathLength);
+ InternalMmapVector<char> exe_path(kMaxPathLength);
uint32_t size = exe_path.size();
if (_NSGetExecutablePath(exe_path.data(), &size) == 0 &&
realpath(exe_path.data(), buf) != 0) {
@@ -496,6 +509,13 @@ void MprotectMallocZones(void *addr, int prot) {
}
}
+void FutexWait(atomic_uint32_t *p, u32 cmp) {
+ // FIXME: implement actual blocking.
+ sched_yield();
+}
+
+void FutexWake(atomic_uint32_t *p, u32 count) {}
+
BlockingMutex::BlockingMutex() {
internal_memset(this, 0, sizeof(*this));
}
@@ -511,8 +531,8 @@ void BlockingMutex::Unlock() {
OSSpinLockUnlock((OSSpinLock*)&opaque_storage_);
}
-void BlockingMutex::CheckLocked() {
- CHECK_NE(*(OSSpinLock*)&opaque_storage_, 0);
+void BlockingMutex::CheckLocked() const {
+ CHECK_NE(*(const OSSpinLock*)&opaque_storage_, 0);
}
u64 NanoTime() {
@@ -770,7 +790,51 @@ static BlockingMutex syslog_lock(LINKER_INITIALIZED);
void WriteOneLineToSyslog(const char *s) {
#if !SANITIZER_GO
syslog_lock.CheckLocked();
- asl_log(nullptr, nullptr, ASL_LEVEL_ERR, "%s", s);
+ if (GetMacosAlignedVersion() >= MacosVersion(10, 12)) {
+ os_log_error(OS_LOG_DEFAULT, "%{public}s", s);
+ } else {
+ asl_log(nullptr, nullptr, ASL_LEVEL_ERR, "%s", s);
+ }
+#endif
+}
+
+// buffer to store crash report application information
+static char crashreporter_info_buff[__sanitizer::kErrorMessageBufferSize] = {};
+static BlockingMutex crashreporter_info_mutex(LINKER_INITIALIZED);
+
+extern "C" {
+// Integrate with crash reporter libraries.
+#if HAVE_CRASHREPORTERCLIENT_H
+CRASH_REPORTER_CLIENT_HIDDEN
+struct crashreporter_annotations_t gCRAnnotations
+ __attribute__((section("__DATA," CRASHREPORTER_ANNOTATIONS_SECTION))) = {
+ CRASHREPORTER_ANNOTATIONS_VERSION,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+#if CRASHREPORTER_ANNOTATIONS_VERSION > 4
+ 0,
+#endif
+};
+
+#else
+// fall back to old crashreporter api
+static const char *__crashreporter_info__ __attribute__((__used__)) =
+ &crashreporter_info_buff[0];
+asm(".desc ___crashreporter_info__, 0x10");
+#endif
+
+} // extern "C"
+
+static void CRAppendCrashLogMessage(const char *msg) {
+ BlockingMutexLock l(&crashreporter_info_mutex);
+ internal_strlcat(crashreporter_info_buff, msg,
+ sizeof(crashreporter_info_buff));
+#if HAVE_CRASHREPORTERCLIENT_H
+ (void)CRSetCrashLogMessage(crashreporter_info_buff);
#endif
}
@@ -966,7 +1030,7 @@ void MaybeReexec() {
if (DyldNeedsEnvVariable() && !lib_is_in_env) {
// DYLD_INSERT_LIBRARIES is not set or does not contain the runtime
// library.
- InternalScopedString program_name(1024);
+ InternalMmapVector<char> program_name(1024);
uint32_t buf_size = program_name.size();
_NSGetExecutablePath(program_name.data(), &buf_size);
char *new_env = const_cast<char*>(info.dli_fname);
@@ -1125,26 +1189,35 @@ static uptr GetTaskInfoMaxAddress() {
uptr GetMaxUserVirtualAddress() {
static uptr max_vm = GetTaskInfoMaxAddress();
- if (max_vm != 0)
- return max_vm - 1;
+ if (max_vm != 0) {
+ const uptr ret_value = max_vm - 1;
+ CHECK_LE(ret_value, SANITIZER_MMAP_RANGE_SIZE);
+ return ret_value;
+ }
// xnu cannot provide vm address limit
# if SANITIZER_WORDSIZE == 32
- return 0xffe00000 - 1;
+ constexpr uptr fallback_max_vm = 0xffe00000 - 1;
# else
- return 0x200000000 - 1;
+ constexpr uptr fallback_max_vm = 0x200000000 - 1;
# endif
+ static_assert(fallback_max_vm <= SANITIZER_MMAP_RANGE_SIZE,
+ "Max virtual address must be less than mmap range size.");
+ return fallback_max_vm;
}
#else // !SANITIZER_IOS
uptr GetMaxUserVirtualAddress() {
# if SANITIZER_WORDSIZE == 64
- return (1ULL << 47) - 1; // 0x00007fffffffffffUL;
+ constexpr uptr max_vm = (1ULL << 47) - 1; // 0x00007fffffffffffUL;
# else // SANITIZER_WORDSIZE == 32
static_assert(SANITIZER_WORDSIZE == 32, "Wrong wordsize");
- return (1ULL << 32) - 1; // 0xffffffff;
+ constexpr uptr max_vm = (1ULL << 32) - 1; // 0xffffffff;
# endif
+ static_assert(max_vm <= SANITIZER_MMAP_RANGE_SIZE,
+ "Max virtual address must be less than mmap range size.");
+ return max_vm;
}
#endif
@@ -1199,6 +1272,12 @@ uptr MapDynamicShadow(uptr shadow_size_bytes, uptr shadow_scale,
return shadow_start;
}
+uptr MapDynamicShadowAndAliases(uptr shadow_size, uptr alias_size,
+ uptr num_aliases, uptr ring_buffer_size) {
+ CHECK(false && "HWASan aliasing is unimplemented on Mac");
+ return 0;
+}
+
uptr FindAvailableMemoryRange(uptr size, uptr alignment, uptr left_padding,
uptr *largest_gap_found,
uptr *max_occupied_addr) {
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_mac.h b/compiler-rt/lib/sanitizer_common/sanitizer_mac.h
index 023071e4f11d..0b6af5a3c0ed 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_mac.h
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_mac.h
@@ -64,22 +64,5 @@ void RestrictMemoryToMaxAddress(uptr max_address);
} // namespace __sanitizer
-extern "C" {
-static char __crashreporter_info_buff__[__sanitizer::kErrorMessageBufferSize] =
- {};
-static const char *__crashreporter_info__ __attribute__((__used__)) =
- &__crashreporter_info_buff__[0];
-asm(".desc ___crashreporter_info__, 0x10");
-} // extern "C"
-
-namespace __sanitizer {
-static BlockingMutex crashreporter_info_mutex(LINKER_INITIALIZED);
-
-inline void CRAppendCrashLogMessage(const char *msg) {
- BlockingMutexLock l(&crashreporter_info_mutex);
- internal_strlcat(__crashreporter_info_buff__, msg,
- sizeof(__crashreporter_info_buff__)); }
-} // namespace __sanitizer
-
#endif // SANITIZER_MAC
#endif // SANITIZER_MAC_H
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_malloc_mac.inc b/compiler-rt/lib/sanitizer_common/sanitizer_malloc_mac.inc
index 647bcdfe105e..e3b664f68b61 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_malloc_mac.inc
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_malloc_mac.inc
@@ -120,11 +120,7 @@ INTERCEPTOR(int, malloc_make_nonpurgeable, void *ptr) {
INTERCEPTOR(void, malloc_set_zone_name, malloc_zone_t *zone, const char *name) {
COMMON_MALLOC_ENTER();
- // Allocate |sizeof(COMMON_MALLOC_ZONE_NAME "-") + internal_strlen(name)|
- // bytes.
- size_t buflen =
- sizeof(COMMON_MALLOC_ZONE_NAME "-") + (name ? internal_strlen(name) : 0);
- InternalScopedString new_name(buflen);
+ InternalScopedString new_name;
if (name && zone->introspect == sanitizer_zone.introspect) {
new_name.append(COMMON_MALLOC_ZONE_NAME "-%s", name);
name = new_name.data();
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_mutex.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_mutex.cpp
new file mode 100644
index 000000000000..46f1d0279ca1
--- /dev/null
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_mutex.cpp
@@ -0,0 +1,225 @@
+//===-- sanitizer_mutex.cpp -----------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between AddressSanitizer and ThreadSanitizer
+// run-time libraries.
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_mutex.h"
+
+#include "sanitizer_common.h"
+
+namespace __sanitizer {
+
+void StaticSpinMutex::LockSlow() {
+ for (int i = 0;; i++) {
+ if (i < 100)
+ proc_yield(1);
+ else
+ internal_sched_yield();
+ if (atomic_load(&state_, memory_order_relaxed) == 0 &&
+ atomic_exchange(&state_, 1, memory_order_acquire) == 0)
+ return;
+ }
+}
+
+void Semaphore::Wait() {
+ u32 count = atomic_load(&state_, memory_order_relaxed);
+ for (;;) {
+ if (count == 0) {
+ FutexWait(&state_, 0);
+ count = atomic_load(&state_, memory_order_relaxed);
+ continue;
+ }
+ if (atomic_compare_exchange_weak(&state_, &count, count - 1,
+ memory_order_acquire))
+ break;
+ }
+}
+
+void Semaphore::Post(u32 count) {
+ CHECK_NE(count, 0);
+ atomic_fetch_add(&state_, count, memory_order_release);
+ FutexWake(&state_, count);
+}
+
+#if SANITIZER_CHECK_DEADLOCKS
+// An empty mutex meta table, it effectively disables deadlock detection.
+// Each tool can override the table to define own mutex hierarchy and
+// enable deadlock detection.
+// The table defines a static mutex type hierarchy (what mutex types can be locked
+// under what mutex types). This table is checked to be acyclic and then
+// actual mutex lock/unlock operations are checked to adhere to this hierarchy.
+// The checking happens on mutex types rather than on individual mutex instances
+// because doing it on mutex instances will both significantly complicate
+// the implementation, worsen performance and memory overhead and is mostly
+// unnecessary (we almost never lock multiple mutexes of the same type recursively).
+static constexpr int kMutexTypeMax = 20;
+SANITIZER_WEAK_ATTRIBUTE MutexMeta mutex_meta[kMutexTypeMax] = {};
+SANITIZER_WEAK_ATTRIBUTE void PrintMutexPC(uptr pc) {}
+static StaticSpinMutex mutex_meta_mtx;
+static int mutex_type_count = -1;
+// Adjacency matrix of what mutexes can be locked under what mutexes.
+static bool mutex_can_lock[kMutexTypeMax][kMutexTypeMax];
+// Mutex types with MutexMulti mark.
+static bool mutex_multi[kMutexTypeMax];
+
+void DebugMutexInit() {
+ // Build adjacency matrix.
+ bool leaf[kMutexTypeMax];
+ internal_memset(&leaf, 0, sizeof(leaf));
+ int cnt[kMutexTypeMax] = {};
+ internal_memset(&cnt, 0, sizeof(cnt));
+ for (int t = 0; t < kMutexTypeMax; t++) {
+ mutex_type_count = t;
+ if (!mutex_meta[t].name)
+ break;
+ CHECK_EQ(t, mutex_meta[t].type);
+ for (uptr j = 0; j < ARRAY_SIZE(mutex_meta[t].can_lock); j++) {
+ MutexType z = mutex_meta[t].can_lock[j];
+ if (z == MutexInvalid)
+ break;
+ if (z == MutexLeaf) {
+ CHECK(!leaf[t]);
+ leaf[t] = true;
+ continue;
+ }
+ if (z == MutexMulti) {
+ mutex_multi[t] = true;
+ continue;
+ }
+ CHECK_LT(z, kMutexTypeMax);
+ CHECK(!mutex_can_lock[t][z]);
+ mutex_can_lock[t][z] = true;
+ cnt[t]++;
+ }
+ }
+ // Indicates the array is not properly terminated.
+ CHECK_LT(mutex_type_count, kMutexTypeMax);
+ // Add leaf mutexes.
+ for (int t = 0; t < mutex_type_count; t++) {
+ if (!leaf[t])
+ continue;
+ CHECK_EQ(cnt[t], 0);
+ for (int z = 0; z < mutex_type_count; z++) {
+ if (z == MutexInvalid || t == z || leaf[z])
+ continue;
+ CHECK(!mutex_can_lock[z][t]);
+ mutex_can_lock[z][t] = true;
+ }
+ }
+ // Build the transitive closure and check that the graphs is acyclic.
+ u32 trans[kMutexTypeMax];
+ static_assert(sizeof(trans[0]) * 8 >= kMutexTypeMax,
+ "kMutexTypeMax does not fit into u32, switch to u64");
+ internal_memset(&trans, 0, sizeof(trans));
+ for (int i = 0; i < mutex_type_count; i++) {
+ for (int j = 0; j < mutex_type_count; j++)
+ if (mutex_can_lock[i][j])
+ trans[i] |= 1 << j;
+ }
+ for (int k = 0; k < mutex_type_count; k++) {
+ for (int i = 0; i < mutex_type_count; i++) {
+ if (trans[i] & (1 << k))
+ trans[i] |= trans[k];
+ }
+ }
+ for (int i = 0; i < mutex_type_count; i++) {
+ if (trans[i] & (1 << i)) {
+ Printf("Mutex %s participates in a cycle\n", mutex_meta[i].name);
+ Die();
+ }
+ }
+}
+
+struct InternalDeadlockDetector {
+ struct LockDesc {
+ u64 seq;
+ uptr pc;
+ int recursion;
+ };
+ int initialized;
+ u64 sequence;
+ LockDesc locked[kMutexTypeMax];
+
+ void Lock(MutexType type, uptr pc) {
+ if (!Initialize(type))
+ return;
+ CHECK_LT(type, mutex_type_count);
+ // Find the last locked mutex type.
+ // This is the type we will use for hierarchy checks.
+ u64 max_seq = 0;
+ MutexType max_idx = MutexInvalid;
+ for (int i = 0; i != mutex_type_count; i++) {
+ if (locked[i].seq == 0)
+ continue;
+ CHECK_NE(locked[i].seq, max_seq);
+ if (max_seq < locked[i].seq) {
+ max_seq = locked[i].seq;
+ max_idx = (MutexType)i;
+ }
+ }
+ if (max_idx == type && mutex_multi[type]) {
+ // Recursive lock of the same type.
+ CHECK_EQ(locked[type].seq, max_seq);
+ CHECK(locked[type].pc);
+ locked[type].recursion++;
+ return;
+ }
+ if (max_idx != MutexInvalid && !mutex_can_lock[max_idx][type]) {
+ Printf("%s: internal deadlock: can't lock %s under %s mutex\n", SanitizerToolName,
+ mutex_meta[type].name, mutex_meta[max_idx].name);
+ PrintMutexPC(pc);
+ CHECK(0);
+ }
+ locked[type].seq = ++sequence;
+ locked[type].pc = pc;
+ locked[type].recursion = 1;
+ }
+
+ void Unlock(MutexType type) {
+ if (!Initialize(type))
+ return;
+ CHECK_LT(type, mutex_type_count);
+ CHECK(locked[type].seq);
+ CHECK_GT(locked[type].recursion, 0);
+ if (--locked[type].recursion)
+ return;
+ locked[type].seq = 0;
+ locked[type].pc = 0;
+ }
+
+ void CheckNoLocks() {
+ for (int i = 0; i < mutex_type_count; i++) CHECK_EQ(locked[i].recursion, 0);
+ }
+
+ bool Initialize(MutexType type) {
+ if (type == MutexUnchecked || type == MutexInvalid)
+ return false;
+ CHECK_GT(type, MutexInvalid);
+ if (initialized != 0)
+ return initialized > 0;
+ initialized = -1;
+ SpinMutexLock lock(&mutex_meta_mtx);
+ if (mutex_type_count < 0)
+ DebugMutexInit();
+ initialized = mutex_type_count ? 1 : -1;
+ return initialized > 0;
+ }
+};
+
+static THREADLOCAL InternalDeadlockDetector deadlock_detector;
+
+void CheckedMutex::LockImpl(uptr pc) { deadlock_detector.Lock(type_, pc); }
+
+void CheckedMutex::UnlockImpl() { deadlock_detector.Unlock(type_); }
+
+void CheckedMutex::CheckNoLocksImpl() { deadlock_detector.CheckNoLocks(); }
+#endif
+
+} // namespace __sanitizer
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_mutex.h b/compiler-rt/lib/sanitizer_common/sanitizer_mutex.h
index 40a65914299e..cbd1c25eb69f 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_mutex.h
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_mutex.h
@@ -16,67 +16,335 @@
#include "sanitizer_atomic.h"
#include "sanitizer_internal_defs.h"
#include "sanitizer_libc.h"
+#include "sanitizer_thread_safety.h"
namespace __sanitizer {
-class StaticSpinMutex {
+class MUTEX StaticSpinMutex {
public:
void Init() {
atomic_store(&state_, 0, memory_order_relaxed);
}
- void Lock() {
- if (TryLock())
+ void Lock() ACQUIRE() {
+ if (LIKELY(TryLock()))
return;
LockSlow();
}
- bool TryLock() {
+ bool TryLock() TRY_ACQUIRE(true) {
return atomic_exchange(&state_, 1, memory_order_acquire) == 0;
}
- void Unlock() {
- atomic_store(&state_, 0, memory_order_release);
- }
+ void Unlock() RELEASE() { atomic_store(&state_, 0, memory_order_release); }
- void CheckLocked() {
+ void CheckLocked() const CHECK_LOCKED() {
CHECK_EQ(atomic_load(&state_, memory_order_relaxed), 1);
}
private:
atomic_uint8_t state_;
- void NOINLINE LockSlow() {
- for (int i = 0;; i++) {
- if (i < 10)
- proc_yield(10);
- else
- internal_sched_yield();
- if (atomic_load(&state_, memory_order_relaxed) == 0
- && atomic_exchange(&state_, 1, memory_order_acquire) == 0)
- return;
- }
- }
+ void LockSlow();
};
-class SpinMutex : public StaticSpinMutex {
+class MUTEX SpinMutex : public StaticSpinMutex {
public:
SpinMutex() {
Init();
}
+ SpinMutex(const SpinMutex &) = delete;
+ void operator=(const SpinMutex &) = delete;
+};
+
+// Semaphore provides an OS-dependent way to park/unpark threads.
+// The last thread returned from Wait can destroy the object
+// (destruction-safety).
+class Semaphore {
+ public:
+ constexpr Semaphore() {}
+ Semaphore(const Semaphore &) = delete;
+ void operator=(const Semaphore &) = delete;
+
+ void Wait();
+ void Post(u32 count = 1);
+
private:
- SpinMutex(const SpinMutex&);
- void operator=(const SpinMutex&);
+ atomic_uint32_t state_ = {0};
};
-class BlockingMutex {
+typedef int MutexType;
+
+enum {
+ // Used as sentinel and to catch unassigned types
+ // (should not be used as real Mutex type).
+ MutexInvalid = 0,
+ MutexThreadRegistry,
+ // Each tool own mutexes must start at this number.
+ MutexLastCommon,
+ // Type for legacy mutexes that are not checked for deadlocks.
+ MutexUnchecked = -1,
+ // Special marks that can be used in MutexMeta::can_lock table.
+ // The leaf mutexes can be locked under any other non-leaf mutex,
+ // but no other mutex can be locked while under a leaf mutex.
+ MutexLeaf = -1,
+ // Multiple mutexes of this type can be locked at the same time.
+ MutexMulti = -3,
+};
+
+// Go linker does not support THREADLOCAL variables,
+// so we can't use per-thread state.
+#define SANITIZER_CHECK_DEADLOCKS (SANITIZER_DEBUG && !SANITIZER_GO)
+
+#if SANITIZER_CHECK_DEADLOCKS
+struct MutexMeta {
+ MutexType type;
+ const char *name;
+ // The table fixes what mutexes can be locked under what mutexes.
+ // If the entry for MutexTypeFoo contains MutexTypeBar,
+ // then Bar mutex can be locked while under Foo mutex.
+ // Can also contain the special MutexLeaf/MutexMulti marks.
+ MutexType can_lock[10];
+};
+#endif
+
+class CheckedMutex {
+ public:
+ constexpr CheckedMutex(MutexType type)
+#if SANITIZER_CHECK_DEADLOCKS
+ : type_(type)
+#endif
+ {
+ }
+
+ ALWAYS_INLINE void Lock() {
+#if SANITIZER_CHECK_DEADLOCKS
+ LockImpl(GET_CALLER_PC());
+#endif
+ }
+
+ ALWAYS_INLINE void Unlock() {
+#if SANITIZER_CHECK_DEADLOCKS
+ UnlockImpl();
+#endif
+ }
+
+ // Checks that the current thread does not hold any mutexes
+ // (e.g. when returning from a runtime function to user code).
+ static void CheckNoLocks() {
+#if SANITIZER_CHECK_DEADLOCKS
+ CheckNoLocksImpl();
+#endif
+ }
+
+ private:
+#if SANITIZER_CHECK_DEADLOCKS
+ const MutexType type_;
+
+ void LockImpl(uptr pc);
+ void UnlockImpl();
+ static void CheckNoLocksImpl();
+#endif
+};
+
+// Reader-writer mutex.
+// Derive from CheckedMutex for the purposes of EBO.
+// We could make it a field marked with [[no_unique_address]],
+// but this attribute is not supported by some older compilers.
+class MUTEX Mutex : CheckedMutex {
+ public:
+ constexpr Mutex(MutexType type = MutexUnchecked) : CheckedMutex(type) {}
+
+ void Lock() ACQUIRE() {
+ CheckedMutex::Lock();
+ u64 reset_mask = ~0ull;
+ u64 state = atomic_load_relaxed(&state_);
+ const uptr kMaxSpinIters = 1500;
+ for (uptr spin_iters = 0;; spin_iters++) {
+ u64 new_state;
+ bool locked = (state & (kWriterLock | kReaderLockMask)) != 0;
+ if (LIKELY(!locked)) {
+ // The mutex is not read-/write-locked, try to lock.
+ new_state = (state | kWriterLock) & reset_mask;
+ } else if (spin_iters > kMaxSpinIters) {
+ // We've spun enough, increment waiting writers count and block.
+ // The counter will be decremented by whoever wakes us.
+ new_state = (state + kWaitingWriterInc) & reset_mask;
+ } else if ((state & kWriterSpinWait) == 0) {
+ // Active spinning, but denote our presence so that unlocking
+ // thread does not wake up other threads.
+ new_state = state | kWriterSpinWait;
+ } else {
+ // Active spinning.
+ state = atomic_load(&state_, memory_order_relaxed);
+ continue;
+ }
+ if (UNLIKELY(!atomic_compare_exchange_weak(&state_, &state, new_state,
+ memory_order_acquire)))
+ continue;
+ if (LIKELY(!locked))
+ return; // We've locked the mutex.
+ if (spin_iters > kMaxSpinIters) {
+ // We've incremented waiting writers, so now block.
+ writers_.Wait();
+ spin_iters = 0;
+ state = atomic_load(&state_, memory_order_relaxed);
+ DCHECK_NE(state & kWriterSpinWait, 0);
+ } else {
+ // We've set kWriterSpinWait, but we are still in active spinning.
+ }
+ // We either blocked and were unblocked,
+ // or we just spun but set kWriterSpinWait.
+ // Either way we need to reset kWriterSpinWait
+ // next time we take the lock or block again.
+ reset_mask = ~kWriterSpinWait;
+ }
+ }
+
+ void Unlock() RELEASE() {
+ CheckedMutex::Unlock();
+ bool wake_writer;
+ u64 wake_readers;
+ u64 new_state;
+ u64 state = atomic_load_relaxed(&state_);
+ do {
+ DCHECK_NE(state & kWriterLock, 0);
+ DCHECK_EQ(state & kReaderLockMask, 0);
+ new_state = state & ~kWriterLock;
+ wake_writer =
+ (state & kWriterSpinWait) == 0 && (state & kWaitingWriterMask) != 0;
+ if (wake_writer)
+ new_state = (new_state - kWaitingWriterInc) | kWriterSpinWait;
+ wake_readers =
+ (state & (kWriterSpinWait | kWaitingWriterMask)) != 0
+ ? 0
+ : ((state & kWaitingReaderMask) >> kWaitingReaderShift);
+ if (wake_readers)
+ new_state = (new_state & ~kWaitingReaderMask) +
+ (wake_readers << kReaderLockShift);
+ } while (UNLIKELY(!atomic_compare_exchange_weak(&state_, &state, new_state,
+ memory_order_release)));
+ if (UNLIKELY(wake_writer))
+ writers_.Post();
+ else if (UNLIKELY(wake_readers))
+ readers_.Post(wake_readers);
+ }
+
+ void ReadLock() ACQUIRE_SHARED() {
+ CheckedMutex::Lock();
+ bool locked;
+ u64 new_state;
+ u64 state = atomic_load_relaxed(&state_);
+ do {
+ locked =
+ (state & kReaderLockMask) == 0 &&
+ (state & (kWriterLock | kWriterSpinWait | kWaitingWriterMask)) != 0;
+ if (LIKELY(!locked))
+ new_state = state + kReaderLockInc;
+ else
+ new_state = state + kWaitingReaderInc;
+ } while (UNLIKELY(!atomic_compare_exchange_weak(&state_, &state, new_state,
+ memory_order_acquire)));
+ if (UNLIKELY(locked))
+ readers_.Wait();
+ DCHECK_EQ(atomic_load_relaxed(&state_) & kWriterLock, 0);
+ DCHECK_NE(atomic_load_relaxed(&state_) & kReaderLockMask, 0);
+ }
+
+ void ReadUnlock() RELEASE_SHARED() {
+ CheckedMutex::Unlock();
+ bool wake;
+ u64 new_state;
+ u64 state = atomic_load_relaxed(&state_);
+ do {
+ DCHECK_NE(state & kReaderLockMask, 0);
+ DCHECK_EQ(state & (kWaitingReaderMask | kWriterLock), 0);
+ new_state = state - kReaderLockInc;
+ wake = (new_state & (kReaderLockMask | kWriterSpinWait)) == 0 &&
+ (new_state & kWaitingWriterMask) != 0;
+ if (wake)
+ new_state = (new_state - kWaitingWriterInc) | kWriterSpinWait;
+ } while (UNLIKELY(!atomic_compare_exchange_weak(&state_, &state, new_state,
+ memory_order_release)));
+ if (UNLIKELY(wake))
+ writers_.Post();
+ }
+
+ // This function does not guarantee an explicit check that the calling thread
+ // is the thread which owns the mutex. This behavior, while more strictly
+ // correct, causes problems in cases like StopTheWorld, where a parent thread
+ // owns the mutex but a child checks that it is locked. Rather than
+ // maintaining complex state to work around those situations, the check only
+ // checks that the mutex is owned.
+ void CheckWriteLocked() const CHECK_LOCKED() {
+ CHECK(atomic_load(&state_, memory_order_relaxed) & kWriterLock);
+ }
+
+ void CheckLocked() const CHECK_LOCKED() { CheckWriteLocked(); }
+
+ void CheckReadLocked() const CHECK_LOCKED() {
+ CHECK(atomic_load(&state_, memory_order_relaxed) & kReaderLockMask);
+ }
+
+ private:
+ atomic_uint64_t state_ = {0};
+ Semaphore writers_;
+ Semaphore readers_;
+
+ // The state has 3 counters:
+ // - number of readers holding the lock,
+ // if non zero, the mutex is read-locked
+ // - number of waiting readers,
+ // if not zero, the mutex is write-locked
+ // - number of waiting writers,
+ // if non zero, the mutex is read- or write-locked
+ // And 2 flags:
+ // - writer lock
+ // if set, the mutex is write-locked
+ // - a writer is awake and spin-waiting
+ // the flag is used to prevent thundering herd problem
+ // (new writers are not woken if this flag is set)
+ //
+ // Writer support active spinning, readers does not.
+ // But readers are more aggressive and always take the mutex
+ // if there are any other readers.
+ // Writers hand off the mutex to readers: after wake up readers
+ // already assume ownership of the mutex (don't need to do any
+ // state updates). But the mutex is not handed off to writers,
+ // after wake up writers compete to lock the mutex again.
+ // This is needed to allow repeated write locks even in presence
+ // of other blocked writers.
+ static constexpr u64 kCounterWidth = 20;
+ static constexpr u64 kReaderLockShift = 0;
+ static constexpr u64 kReaderLockInc = 1ull << kReaderLockShift;
+ static constexpr u64 kReaderLockMask = ((1ull << kCounterWidth) - 1)
+ << kReaderLockShift;
+ static constexpr u64 kWaitingReaderShift = kCounterWidth;
+ static constexpr u64 kWaitingReaderInc = 1ull << kWaitingReaderShift;
+ static constexpr u64 kWaitingReaderMask = ((1ull << kCounterWidth) - 1)
+ << kWaitingReaderShift;
+ static constexpr u64 kWaitingWriterShift = 2 * kCounterWidth;
+ static constexpr u64 kWaitingWriterInc = 1ull << kWaitingWriterShift;
+ static constexpr u64 kWaitingWriterMask = ((1ull << kCounterWidth) - 1)
+ << kWaitingWriterShift;
+ static constexpr u64 kWriterLock = 1ull << (3 * kCounterWidth);
+ static constexpr u64 kWriterSpinWait = 1ull << (3 * kCounterWidth + 1);
+
+ Mutex(const Mutex &) = delete;
+ void operator=(const Mutex &) = delete;
+};
+
+void FutexWait(atomic_uint32_t *p, u32 cmp);
+void FutexWake(atomic_uint32_t *p, u32 count);
+
+class MUTEX BlockingMutex {
public:
explicit constexpr BlockingMutex(LinkerInitialized)
: opaque_storage_ {0, }, owner_ {0} {}
BlockingMutex();
- void Lock();
- void Unlock();
+ void Lock() ACQUIRE();
+ void Unlock() RELEASE();
// This function does not guarantee an explicit check that the calling thread
// is the thread which owns the mutex. This behavior, while more strictly
@@ -85,7 +353,7 @@ class BlockingMutex {
// maintaining complex state to work around those situations, the check only
// checks that the mutex is owned, and assumes callers to be generally
// well-behaved.
- void CheckLocked();
+ void CheckLocked() const CHECK_LOCKED();
private:
// Solaris mutex_t has a member that requires 64-bit alignment.
@@ -94,7 +362,7 @@ class BlockingMutex {
};
// Reader-writer spin mutex.
-class RWMutex {
+class MUTEX RWMutex {
public:
RWMutex() {
atomic_store(&state_, kUnlocked, memory_order_relaxed);
@@ -104,7 +372,7 @@ class RWMutex {
CHECK_EQ(atomic_load(&state_, memory_order_relaxed), kUnlocked);
}
- void Lock() {
+ void Lock() ACQUIRE() {
u32 cmp = kUnlocked;
if (atomic_compare_exchange_strong(&state_, &cmp, kWriteLock,
memory_order_acquire))
@@ -112,27 +380,27 @@ class RWMutex {
LockSlow();
}
- void Unlock() {
+ void Unlock() RELEASE() {
u32 prev = atomic_fetch_sub(&state_, kWriteLock, memory_order_release);
DCHECK_NE(prev & kWriteLock, 0);
(void)prev;
}
- void ReadLock() {
+ void ReadLock() ACQUIRE_SHARED() {
u32 prev = atomic_fetch_add(&state_, kReadLock, memory_order_acquire);
if ((prev & kWriteLock) == 0)
return;
ReadLockSlow();
}
- void ReadUnlock() {
+ void ReadUnlock() RELEASE_SHARED() {
u32 prev = atomic_fetch_sub(&state_, kReadLock, memory_order_release);
DCHECK_EQ(prev & kWriteLock, 0);
DCHECK_GT(prev & ~kWriteLock, 0);
(void)prev;
}
- void CheckLocked() {
+ void CheckLocked() const CHECK_LOCKED() {
CHECK_NE(atomic_load(&state_, memory_order_relaxed), kUnlocked);
}
@@ -171,52 +439,48 @@ class RWMutex {
}
}
- RWMutex(const RWMutex&);
- void operator = (const RWMutex&);
+ RWMutex(const RWMutex &) = delete;
+ void operator=(const RWMutex &) = delete;
};
-template<typename MutexType>
-class GenericScopedLock {
+template <typename MutexType>
+class SCOPED_LOCK GenericScopedLock {
public:
- explicit GenericScopedLock(MutexType *mu)
- : mu_(mu) {
+ explicit GenericScopedLock(MutexType *mu) ACQUIRE(mu) : mu_(mu) {
mu_->Lock();
}
- ~GenericScopedLock() {
- mu_->Unlock();
- }
+ ~GenericScopedLock() RELEASE() { mu_->Unlock(); }
private:
MutexType *mu_;
- GenericScopedLock(const GenericScopedLock&);
- void operator=(const GenericScopedLock&);
+ GenericScopedLock(const GenericScopedLock &) = delete;
+ void operator=(const GenericScopedLock &) = delete;
};
-template<typename MutexType>
-class GenericScopedReadLock {
+template <typename MutexType>
+class SCOPED_LOCK GenericScopedReadLock {
public:
- explicit GenericScopedReadLock(MutexType *mu)
- : mu_(mu) {
+ explicit GenericScopedReadLock(MutexType *mu) ACQUIRE(mu) : mu_(mu) {
mu_->ReadLock();
}
- ~GenericScopedReadLock() {
- mu_->ReadUnlock();
- }
+ ~GenericScopedReadLock() RELEASE() { mu_->ReadUnlock(); }
private:
MutexType *mu_;
- GenericScopedReadLock(const GenericScopedReadLock&);
- void operator=(const GenericScopedReadLock&);
+ GenericScopedReadLock(const GenericScopedReadLock &) = delete;
+ void operator=(const GenericScopedReadLock &) = delete;
};
typedef GenericScopedLock<StaticSpinMutex> SpinMutexLock;
typedef GenericScopedLock<BlockingMutex> BlockingMutexLock;
typedef GenericScopedLock<RWMutex> RWMutexLock;
typedef GenericScopedReadLock<RWMutex> RWMutexReadLock;
+typedef GenericScopedLock<Mutex> Lock;
+typedef GenericScopedReadLock<Mutex> ReadLock;
} // namespace __sanitizer
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_netbsd.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_netbsd.cpp
index 98ac7365da05..5e601bdcde1e 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_netbsd.cpp
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_netbsd.cpp
@@ -105,6 +105,12 @@ uptr internal_munmap(void *addr, uptr length) {
return _REAL(munmap, addr, length);
}
+uptr internal_mremap(void *old_address, uptr old_size, uptr new_size, int flags,
+ void *new_address) {
+ CHECK(false && "internal_mremap is unimplemented on NetBSD");
+ return 0;
+}
+
int internal_mprotect(void *addr, uptr length, int prot) {
DEFINE__REAL(int, mprotect, void *a, uptr b, int c);
return _REAL(mprotect, addr, length, prot);
@@ -209,15 +215,12 @@ void internal__exit(int exitcode) {
Die(); // Unreachable.
}
-unsigned int internal_sleep(unsigned int seconds) {
+void internal_usleep(u64 useconds) {
struct timespec ts;
- ts.tv_sec = seconds;
- ts.tv_nsec = 0;
+ ts.tv_sec = useconds / 1000000;
+ ts.tv_nsec = (useconds % 1000000) * 1000;
CHECK(&_sys___nanosleep50);
- int res = _sys___nanosleep50(&ts, &ts);
- if (res)
- return ts.tv_sec;
- return 0;
+ _sys___nanosleep50(&ts, &ts);
}
uptr internal_execve(const char *filename, char *const argv[],
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_platform.h b/compiler-rt/lib/sanitizer_common/sanitizer_platform.h
index 96c01bad870d..4d3c08893c11 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_platform.h
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_platform.h
@@ -13,10 +13,9 @@
#define SANITIZER_PLATFORM_H
#if !defined(__linux__) && !defined(__FreeBSD__) && !defined(__NetBSD__) && \
- !defined(__APPLE__) && !defined(_WIN32) && \
- !defined(__Fuchsia__) && !defined(__rtems__) && \
- !(defined(__sun__) && defined(__svr4__))
-# error "This operating system is not supported"
+ !defined(__APPLE__) && !defined(_WIN32) && !defined(__Fuchsia__) && \
+ !(defined(__sun__) && defined(__svr4__))
+# error "This operating system is not supported"
#endif
// Get __GLIBC__ on a glibc platform. Exclude Android: features.h includes C
@@ -59,6 +58,11 @@
#if defined(__APPLE__)
# define SANITIZER_MAC 1
# include <TargetConditionals.h>
+# if TARGET_OS_OSX
+# define SANITIZER_OSX 1
+# else
+# define SANITIZER_OSX 0
+# endif
# if TARGET_OS_IPHONE
# define SANITIZER_IOS 1
# else
@@ -73,6 +77,7 @@
# define SANITIZER_MAC 0
# define SANITIZER_IOS 0
# define SANITIZER_IOSSIM 0
+# define SANITIZER_OSX 0
#endif
#if defined(__APPLE__) && TARGET_OS_IPHONE && TARGET_OS_WATCH
@@ -111,12 +116,6 @@
# define SANITIZER_FUCHSIA 0
#endif
-#if defined(__rtems__)
-# define SANITIZER_RTEMS 1
-#else
-# define SANITIZER_RTEMS 0
-#endif
-
#define SANITIZER_POSIX \
(SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_MAC || \
SANITIZER_NETBSD || SANITIZER_SOLARIS)
@@ -220,12 +219,6 @@
# define SANITIZER_SOLARIS32 0
#endif
-#if defined(__myriad2__)
-# define SANITIZER_MYRIAD2 1
-#else
-# define SANITIZER_MYRIAD2 0
-#endif
-
#if defined(__riscv) && (__riscv_xlen == 64)
#define SANITIZER_RISCV64 1
#else
@@ -260,8 +253,12 @@
#define SANITIZER_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 38)
#elif defined(__aarch64__)
# if SANITIZER_MAC
-// Darwin iOS/ARM64 has a 36-bit VMA, 64GiB VM
-# define SANITIZER_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 36)
+# if SANITIZER_OSX || SANITIZER_IOSSIM
+# define SANITIZER_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 47)
+# else
+ // Darwin iOS/ARM64 has a 36-bit VMA, 64GiB VM
+# define SANITIZER_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 36)
+# endif
# else
# define SANITIZER_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 48)
# endif
@@ -364,9 +361,9 @@
# define SANITIZER_CACHE_LINE_SIZE 64
#endif
-// Enable offline markup symbolizer for Fuchsia and RTEMS.
-#if SANITIZER_FUCHSIA || SANITIZER_RTEMS
-#define SANITIZER_SYMBOLIZER_MARKUP 1
+// Enable offline markup symbolizer for Fuchsia.
+#if SANITIZER_FUCHSIA
+# define SANITIZER_SYMBOLIZER_MARKUP 1
#else
#define SANITIZER_SYMBOLIZER_MARKUP 0
#endif
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_platform_interceptors.h b/compiler-rt/lib/sanitizer_common/sanitizer_platform_interceptors.h
index 7f7b38d4215b..5b710c23fd00 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_platform_interceptors.h
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_platform_interceptors.h
@@ -114,12 +114,6 @@
#define SI_NOT_FUCHSIA 1
#endif
-#if SANITIZER_RTEMS
-#define SI_NOT_RTEMS 0
-#else
-#define SI_NOT_RTEMS 1
-#endif
-
#if SANITIZER_SOLARIS
#define SI_SOLARIS 1
#else
@@ -226,7 +220,7 @@
(SI_FREEBSD || SI_NETBSD || SI_MAC || SI_LINUX_NOT_ANDROID || SI_SOLARIS)
#define SANITIZER_INTERCEPT_GETPWENT \
(SI_FREEBSD || SI_NETBSD || SI_MAC || SI_LINUX_NOT_ANDROID || SI_SOLARIS)
-#define SANITIZER_INTERCEPT_FGETGRENT_R (SI_FREEBSD || SI_GLIBC || SI_SOLARIS)
+#define SANITIZER_INTERCEPT_FGETGRENT_R (SI_GLIBC || SI_SOLARIS)
#define SANITIZER_INTERCEPT_FGETPWENT SI_LINUX_NOT_ANDROID || SI_SOLARIS
#define SANITIZER_INTERCEPT_GETPWENT_R \
(SI_FREEBSD || SI_NETBSD || SI_GLIBC || SI_SOLARIS)
@@ -458,7 +452,7 @@
#define SANITIZER_INTERCEPT_CTERMID_R (SI_MAC || SI_FREEBSD || SI_SOLARIS)
#define SANITIZER_INTERCEPTOR_HOOKS \
- (SI_LINUX || SI_MAC || SI_WINDOWS || SI_NETBSD || SI_SOLARIS)
+ (SI_LINUX || SI_MAC || SI_WINDOWS || SI_FREEBSD || SI_NETBSD || SI_SOLARIS)
#define SANITIZER_INTERCEPT_RECV_RECVFROM SI_POSIX
#define SANITIZER_INTERCEPT_SEND_SENDTO SI_POSIX
#define SANITIZER_INTERCEPT_EVENTFD_READ_WRITE SI_LINUX
@@ -482,13 +476,12 @@
#define SANITIZER_INTERCEPT_MMAP SI_POSIX
#define SANITIZER_INTERCEPT_MMAP64 SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_MALLOPT_AND_MALLINFO (SI_GLIBC || SI_ANDROID)
-#define SANITIZER_INTERCEPT_MEMALIGN \
- (!SI_FREEBSD && !SI_MAC && !SI_NETBSD && SI_NOT_RTEMS)
+#define SANITIZER_INTERCEPT_MEMALIGN (!SI_FREEBSD && !SI_MAC && !SI_NETBSD)
#define SANITIZER_INTERCEPT___LIBC_MEMALIGN SI_GLIBC
#define SANITIZER_INTERCEPT_PVALLOC (SI_GLIBC || SI_ANDROID)
-#define SANITIZER_INTERCEPT_CFREE SI_GLIBC
+#define SANITIZER_INTERCEPT_CFREE (SI_GLIBC && !SANITIZER_RISCV64)
#define SANITIZER_INTERCEPT_REALLOCARRAY SI_POSIX
-#define SANITIZER_INTERCEPT_ALIGNED_ALLOC (!SI_MAC && SI_NOT_RTEMS)
+#define SANITIZER_INTERCEPT_ALIGNED_ALLOC (!SI_MAC)
#define SANITIZER_INTERCEPT_MALLOC_USABLE_SIZE (!SI_MAC && !SI_NETBSD)
#define SANITIZER_INTERCEPT_MCHECK_MPROBE SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_WCSCAT SI_POSIX
@@ -584,5 +577,27 @@
(SI_POSIX && !(SANITIZER_MAC && SANITIZER_I386))
#define SANITIZER_INTERCEPT_UNAME (SI_POSIX && !SI_FREEBSD)
#define SANITIZER_INTERCEPT___XUNAME SI_FREEBSD
+#define SANITIZER_INTERCEPT_FLOPEN SI_FREEBSD
+
+// This macro gives a way for downstream users to override the above
+// interceptor macros irrespective of the platform they are on. They have
+// to do two things:
+// 1. Build compiler-rt with -DSANITIZER_OVERRIDE_INTERCEPTORS.
+// 2. Provide a header file named sanitizer_intercept_overriders.h in the
+// include path for their compiler-rt build.
+// An example of an overrider for strlen interceptor that one can list in
+// sanitizer_intercept_overriders.h is as follows:
+//
+// #ifdef SANITIZER_INTERCEPT_STRLEN
+// #undef SANITIZER_INTERCEPT_STRLEN
+// #define SANITIZER_INTERCEPT_STRLEN <value of choice>
+// #endif
+//
+// This "feature" is useful for downstream users who do not want some of
+// their libc funtions to be intercepted. They can selectively disable
+// interception of those functions.
+#ifdef SANITIZER_OVERRIDE_INTERCEPTORS
+#include <sanitizer_intercept_overriders.h>
+#endif
#endif // #ifndef SANITIZER_PLATFORM_INTERCEPTORS_H
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_freebsd.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_freebsd.cpp
index b1c15be58dea..b5a45ae72cd9 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_freebsd.cpp
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_freebsd.cpp
@@ -35,7 +35,10 @@
#include <sys/stat.h>
#include <sys/statvfs.h>
#include <sys/time.h>
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-W#warnings"
#include <sys/timeb.h>
+#pragma clang diagnostic pop
#include <sys/times.h>
#include <sys/timespec.h>
#include <sys/types.h>
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_posix.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_posix.cpp
index 12dd39e674ac..6e5c330b98ef 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_posix.cpp
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_posix.cpp
@@ -143,7 +143,6 @@ typedef struct user_fpregs elf_fpregset_t;
# include <sys/procfs.h>
#endif
#include <sys/user.h>
-#include <linux/cyclades.h>
#include <linux/if_eql.h>
#include <linux/if_plip.h>
#include <linux/lp.h>
@@ -210,7 +209,8 @@ namespace __sanitizer {
#if SANITIZER_GLIBC || SANITIZER_FREEBSD || SANITIZER_NETBSD || SANITIZER_MAC
unsigned struct_fstab_sz = sizeof(struct fstab);
-#endif // SANITIZER_GLIBC || SANITIZER_FREEBSD || SANITIZER_NETBSD || SANITIZER_MAC
+#endif // SANITIZER_GLIBC || SANITIZER_FREEBSD || SANITIZER_NETBSD ||
+ // SANITIZER_MAC
#if !SANITIZER_ANDROID
unsigned struct_statfs_sz = sizeof(struct statfs);
unsigned struct_sockaddr_sz = sizeof(struct sockaddr);
@@ -459,7 +459,6 @@ unsigned struct_ElfW_Phdr_sz = sizeof(Elf_Phdr);
#if SANITIZER_GLIBC
unsigned struct_ax25_parms_struct_sz = sizeof(struct ax25_parms_struct);
- unsigned struct_cyclades_monitor_sz = sizeof(struct cyclades_monitor);
#if EV_VERSION > (0x010000)
unsigned struct_input_keymap_entry_sz = sizeof(struct input_keymap_entry);
#else
@@ -823,15 +822,6 @@ unsigned struct_ElfW_Phdr_sz = sizeof(Elf_Phdr);
#endif // SANITIZER_LINUX
#if SANITIZER_LINUX && !SANITIZER_ANDROID
- unsigned IOCTL_CYGETDEFTHRESH = CYGETDEFTHRESH;
- unsigned IOCTL_CYGETDEFTIMEOUT = CYGETDEFTIMEOUT;
- unsigned IOCTL_CYGETMON = CYGETMON;
- unsigned IOCTL_CYGETTHRESH = CYGETTHRESH;
- unsigned IOCTL_CYGETTIMEOUT = CYGETTIMEOUT;
- unsigned IOCTL_CYSETDEFTHRESH = CYSETDEFTHRESH;
- unsigned IOCTL_CYSETDEFTIMEOUT = CYSETDEFTIMEOUT;
- unsigned IOCTL_CYSETTHRESH = CYSETTHRESH;
- unsigned IOCTL_CYSETTIMEOUT = CYSETTIMEOUT;
unsigned IOCTL_EQL_EMANCIPATE = EQL_EMANCIPATE;
unsigned IOCTL_EQL_ENSLAVE = EQL_ENSLAVE;
unsigned IOCTL_EQL_GETMASTRCFG = EQL_GETMASTRCFG;
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_posix.h b/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_posix.h
index 836b178c131b..4dd27644ed11 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_posix.h
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_posix.h
@@ -650,14 +650,14 @@ struct __sanitizer_sigaction {
#endif // !SANITIZER_ANDROID
#if defined(__mips__)
-struct __sanitizer_kernel_sigset_t {
- uptr sig[2];
-};
+#define __SANITIZER_KERNEL_NSIG 128
#else
+#define __SANITIZER_KERNEL_NSIG 64
+#endif
+
struct __sanitizer_kernel_sigset_t {
- u8 sig[8];
+ uptr sig[__SANITIZER_KERNEL_NSIG / (sizeof(uptr) * 8)];
};
-#endif
// Linux system headers define the 'sa_handler' and 'sa_sigaction' macros.
#if SANITIZER_MIPS
@@ -983,7 +983,6 @@ extern unsigned struct_vt_mode_sz;
#if SANITIZER_LINUX && !SANITIZER_ANDROID
extern unsigned struct_ax25_parms_struct_sz;
-extern unsigned struct_cyclades_monitor_sz;
extern unsigned struct_input_keymap_entry_sz;
extern unsigned struct_ipx_config_data_sz;
extern unsigned struct_kbdiacrs_sz;
@@ -1328,15 +1327,6 @@ extern unsigned IOCTL_VT_WAITACTIVE;
#endif // SANITIZER_LINUX
#if SANITIZER_LINUX && !SANITIZER_ANDROID
-extern unsigned IOCTL_CYGETDEFTHRESH;
-extern unsigned IOCTL_CYGETDEFTIMEOUT;
-extern unsigned IOCTL_CYGETMON;
-extern unsigned IOCTL_CYGETTHRESH;
-extern unsigned IOCTL_CYGETTIMEOUT;
-extern unsigned IOCTL_CYSETDEFTHRESH;
-extern unsigned IOCTL_CYSETDEFTIMEOUT;
-extern unsigned IOCTL_CYSETTHRESH;
-extern unsigned IOCTL_CYSETTIMEOUT;
extern unsigned IOCTL_EQL_EMANCIPATE;
extern unsigned IOCTL_EQL_ENSLAVE;
extern unsigned IOCTL_EQL_GETMASTRCFG;
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_posix.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_posix.cpp
index 2e080098283f..f8457a6aac41 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_posix.cpp
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_posix.cpp
@@ -275,8 +275,8 @@ void ReportFile::Write(const char *buffer, uptr length) {
bool GetCodeRangeForFile(const char *module, uptr *start, uptr *end) {
MemoryMappingLayout proc_maps(/*cache_enabled*/false);
- InternalScopedString buff(kMaxPathLength);
- MemoryMappedSegment segment(buff.data(), kMaxPathLength);
+ InternalMmapVector<char> buff(kMaxPathLength);
+ MemoryMappedSegment segment(buff.data(), buff.size());
while (proc_maps.Next(&segment)) {
if (segment.IsExecutable() &&
internal_strcmp(module, segment.filename) == 0) {
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_posix.h b/compiler-rt/lib/sanitizer_common/sanitizer_posix.h
index e1a2b48e5cd8..b65dae644767 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_posix.h
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_posix.h
@@ -40,6 +40,10 @@ uptr internal_write(fd_t fd, const void *buf, uptr count);
uptr internal_mmap(void *addr, uptr length, int prot, int flags,
int fd, u64 offset);
uptr internal_munmap(void *addr, uptr length);
+#if SANITIZER_LINUX
+uptr internal_mremap(void *old_address, uptr old_size, uptr new_size, int flags,
+ void *new_address);
+#endif
int internal_mprotect(void *addr, uptr length, int prot);
int internal_madvise(uptr addr, uptr length, int advice);
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_posix_libcdep.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_posix_libcdep.cpp
index d29438cf9dbd..ddf6844bed13 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_posix_libcdep.cpp
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_posix_libcdep.cpp
@@ -128,14 +128,6 @@ void SetAddressSpaceUnlimited() {
CHECK(AddressSpaceIsUnlimited());
}
-void SleepForSeconds(int seconds) {
- sleep(seconds);
-}
-
-void SleepForMillis(int millis) {
- usleep(millis * 1000);
-}
-
void Abort() {
#if !SANITIZER_GO
// If we are handling SIGABRT, unhandle it first.
@@ -143,7 +135,7 @@ void Abort() {
if (GetHandleSignalMode(SIGABRT) != kHandleSignalNo) {
struct sigaction sigact;
internal_memset(&sigact, 0, sizeof(sigact));
- sigact.sa_sigaction = (sa_sigaction_t)SIG_DFL;
+ sigact.sa_handler = SIG_DFL;
internal_sigaction(SIGABRT, &sigact, nullptr);
}
#endif
@@ -165,7 +157,12 @@ bool SupportsColoredOutput(fd_t fd) {
#if !SANITIZER_GO
// TODO(glider): different tools may require different altstack size.
-static const uptr kAltStackSize = SIGSTKSZ * 4; // SIGSTKSZ is not enough.
+static uptr GetAltStackSize() {
+ // Note: since GLIBC_2.31, SIGSTKSZ may be a function call, so this may be
+ // more costly that you think. However GetAltStackSize is only call 2-3 times
+ // per thread so don't cache the evaluation.
+ return SIGSTKSZ * 4;
+}
void SetAlternateSignalStack() {
stack_t altstack, oldstack;
@@ -176,10 +173,9 @@ void SetAlternateSignalStack() {
// TODO(glider): the mapped stack should have the MAP_STACK flag in the
// future. It is not required by man 2 sigaltstack now (they're using
// malloc()).
- void* base = MmapOrDie(kAltStackSize, __func__);
- altstack.ss_sp = (char*) base;
+ altstack.ss_size = GetAltStackSize();
+ altstack.ss_sp = (char *)MmapOrDie(altstack.ss_size, __func__);
altstack.ss_flags = 0;
- altstack.ss_size = kAltStackSize;
CHECK_EQ(0, sigaltstack(&altstack, nullptr));
}
@@ -187,7 +183,7 @@ void UnsetAlternateSignalStack() {
stack_t altstack, oldstack;
altstack.ss_sp = nullptr;
altstack.ss_flags = SS_DISABLE;
- altstack.ss_size = kAltStackSize; // Some sane value required on Darwin.
+ altstack.ss_size = GetAltStackSize(); // Some sane value required on Darwin.
CHECK_EQ(0, sigaltstack(&altstack, &oldstack));
UnmapOrDie(oldstack.ss_sp, oldstack.ss_size);
}
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_printf.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_printf.cpp
index a032787114bb..b913c92e16f1 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_printf.cpp
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_printf.cpp
@@ -20,6 +20,10 @@
#include <stdio.h>
#include <stdarg.h>
+#if defined(__x86_64__)
+# include <emmintrin.h>
+#endif
+
#if SANITIZER_WINDOWS && defined(_MSC_VER) && _MSC_VER < 1800 && \
!defined(va_copy)
# define va_copy(dst, src) ((dst) = (src))
@@ -128,7 +132,7 @@ static int AppendPointer(char **buff, const char *buff_end, u64 ptr_value) {
int VSNPrintf(char *buff, int buff_length,
const char *format, va_list args) {
static const char *kPrintfFormatsHelp =
- "Supported Printf formats: %([0-9]*)?(z|ll)?{d,u,x,X}; %p; "
+ "Supported Printf formats: %([0-9]*)?(z|ll)?{d,u,x,X,V}; %p; "
"%[-]([0-9]*)?(\\.\\*)?s; %c\n";
RAW_CHECK(format);
RAW_CHECK(buff_length > 0);
@@ -162,17 +166,15 @@ int VSNPrintf(char *buff, int buff_length,
cur += have_z;
bool have_ll = !have_z && (cur[0] == 'l' && cur[1] == 'l');
cur += have_ll * 2;
- s64 dval;
- u64 uval;
const bool have_length = have_z || have_ll;
const bool have_flags = have_width || have_length;
// At the moment only %s supports precision and left-justification.
CHECK(!((precision >= 0 || left_justified) && *cur != 's'));
switch (*cur) {
case 'd': {
- dval = have_ll ? va_arg(args, s64)
- : have_z ? va_arg(args, sptr)
- : va_arg(args, int);
+ s64 dval = have_ll ? va_arg(args, s64)
+ : have_z ? va_arg(args, sptr)
+ : va_arg(args, int);
result += AppendSignedDecimal(&buff, buff_end, dval, width,
pad_with_zero);
break;
@@ -180,14 +182,21 @@ int VSNPrintf(char *buff, int buff_length,
case 'u':
case 'x':
case 'X': {
- uval = have_ll ? va_arg(args, u64)
- : have_z ? va_arg(args, uptr)
- : va_arg(args, unsigned);
+ u64 uval = have_ll ? va_arg(args, u64)
+ : have_z ? va_arg(args, uptr)
+ : va_arg(args, unsigned);
bool uppercase = (*cur == 'X');
result += AppendUnsigned(&buff, buff_end, uval, (*cur == 'u') ? 10 : 16,
width, pad_with_zero, uppercase);
break;
}
+ case 'V': {
+ for (uptr i = 0; i < 16; i++) {
+ unsigned x = va_arg(args, unsigned);
+ result += AppendUnsigned(&buff, buff_end, x, 16, 2, true, false);
+ }
+ break;
+ }
case 'p': {
RAW_CHECK_MSG(!have_flags, kPrintfFormatsHelp);
result += AppendPointer(&buff, buff_end, va_arg(args, uptr));
@@ -249,26 +258,21 @@ static void NOINLINE SharedPrintfCodeNoBuffer(bool append_pid,
va_list args) {
va_list args2;
va_copy(args2, args);
- const int kLen = 16 * 1024;
- int needed_length;
+ InternalMmapVector<char> v;
+ int needed_length = 0;
char *buffer = local_buffer;
// First try to print a message using a local buffer, and then fall back to
// mmaped buffer.
- for (int use_mmap = 0; use_mmap < 2; use_mmap++) {
+ for (int use_mmap = 0;; use_mmap++) {
if (use_mmap) {
va_end(args);
va_copy(args, args2);
- buffer = (char*)MmapOrDie(kLen, "Report");
- buffer_size = kLen;
+ v.resize(needed_length + 1);
+ buffer_size = v.capacity();
+ v.resize(buffer_size);
+ buffer = &v[0];
}
needed_length = 0;
- // Check that data fits into the current buffer.
-# define CHECK_NEEDED_LENGTH \
- if (needed_length >= buffer_size) { \
- if (!use_mmap) continue; \
- RAW_CHECK_MSG(needed_length < kLen, \
- "Buffer in Report is too short!\n"); \
- }
// Fuchsia's logging infrastructure always keeps track of the logging
// process, thread, and timestamp, so never prepend such information.
if (!SANITIZER_FUCHSIA && append_pid) {
@@ -277,18 +281,20 @@ static void NOINLINE SharedPrintfCodeNoBuffer(bool append_pid,
if (common_flags()->log_exe_name && exe_name) {
needed_length += internal_snprintf(buffer, buffer_size,
"==%s", exe_name);
- CHECK_NEEDED_LENGTH
+ if (needed_length >= buffer_size)
+ continue;
}
needed_length += internal_snprintf(
buffer + needed_length, buffer_size - needed_length, "==%d==", pid);
- CHECK_NEEDED_LENGTH
+ if (needed_length >= buffer_size)
+ continue;
}
needed_length += VSNPrintf(buffer + needed_length,
buffer_size - needed_length, format, args);
- CHECK_NEEDED_LENGTH
+ if (needed_length >= buffer_size)
+ continue;
// If the message fit into the buffer, print it and exit.
break;
-# undef CHECK_NEEDED_LENGTH
}
RawWrite(buffer);
@@ -297,9 +303,6 @@ static void NOINLINE SharedPrintfCodeNoBuffer(bool append_pid,
CallPrintfAndReportCallback(buffer);
LogMessageOnPrintf(buffer);
- // If we had mapped any memory, clean up.
- if (buffer != local_buffer)
- UnmapOrDie((void *)buffer, buffer_size);
va_end(args2);
}
@@ -346,13 +349,24 @@ int internal_snprintf(char *buffer, uptr length, const char *format, ...) {
FORMAT(2, 3)
void InternalScopedString::append(const char *format, ...) {
- CHECK_LT(length_, size());
- va_list args;
- va_start(args, format);
- VSNPrintf(data() + length_, size() - length_, format, args);
- va_end(args);
- length_ += internal_strlen(data() + length_);
- CHECK_LT(length_, size());
+ uptr prev_len = length();
+
+ while (true) {
+ buffer_.resize(buffer_.capacity());
+
+ va_list args;
+ va_start(args, format);
+ uptr sz = VSNPrintf(buffer_.data() + prev_len, buffer_.size() - prev_len,
+ format, args);
+ va_end(args);
+ if (sz < buffer_.size() - prev_len) {
+ buffer_.resize(prev_len + sz + 1);
+ break;
+ }
+
+ buffer_.reserve(buffer_.capacity() * 2);
+ }
+ CHECK_EQ(buffer_[length()], '\0');
}
} // namespace __sanitizer
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_procmaps_common.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_procmaps_common.cpp
index f2cfcffaf476..1b7dd46d8de4 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_procmaps_common.cpp
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_procmaps_common.cpp
@@ -120,7 +120,7 @@ void MemoryMappingLayout::LoadFromCache() {
void MemoryMappingLayout::DumpListOfModules(
InternalMmapVectorNoCtor<LoadedModule> *modules) {
Reset();
- InternalScopedString module_name(kMaxPathLength);
+ InternalMmapVector<char> module_name(kMaxPathLength);
MemoryMappedSegment segment(module_name.data(), module_name.size());
for (uptr i = 0; Next(&segment); i++) {
const char *cur_name = segment.filename;
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_procmaps_mac.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_procmaps_mac.cpp
index d02afcfe87ae..1f53e3e46d8f 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_procmaps_mac.cpp
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_procmaps_mac.cpp
@@ -354,8 +354,8 @@ bool MemoryMappingLayout::Next(MemoryMappedSegment *segment) {
void MemoryMappingLayout::DumpListOfModules(
InternalMmapVectorNoCtor<LoadedModule> *modules) {
Reset();
- InternalScopedString module_name(kMaxPathLength);
- MemoryMappedSegment segment(module_name.data(), kMaxPathLength);
+ InternalMmapVector<char> module_name(kMaxPathLength);
+ MemoryMappedSegment segment(module_name.data(), module_name.size());
MemoryMappedSegmentData data;
segment.data_ = &data;
while (Next(&segment)) {
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_ptrauth.h b/compiler-rt/lib/sanitizer_common/sanitizer_ptrauth.h
index a288068bf943..520035469485 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_ptrauth.h
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_ptrauth.h
@@ -11,6 +11,24 @@
#if __has_feature(ptrauth_calls)
#include <ptrauth.h>
+#elif defined(__ARM_FEATURE_PAC_DEFAULT) && !defined(__APPLE__)
+inline unsigned long ptrauth_strip(void* __value, unsigned int __key) {
+ // On the stack the link register is protected with Pointer
+ // Authentication Code when compiled with -mbranch-protection.
+ // Let's stripping the PAC unconditionally because xpaclri is in
+ // the NOP space so will do nothing when it is not enabled or not available.
+ unsigned long ret;
+ asm volatile(
+ "mov x30, %1\n\t"
+ "hint #7\n\t" // xpaclri
+ "mov %0, x30\n\t"
+ : "=r"(ret)
+ : "r"(__value)
+ : "x30");
+ return ret;
+}
+#define ptrauth_auth_data(__value, __old_key, __old_data) __value
+#define ptrauth_string_discriminator(__string) ((int)0)
#else
// Copied from <ptrauth.h>
#define ptrauth_strip(__value, __key) __value
@@ -18,6 +36,6 @@
#define ptrauth_string_discriminator(__string) ((int)0)
#endif
-#define STRIP_PC(pc) ((uptr)ptrauth_strip(pc, 0))
+#define STRIP_PAC_PC(pc) ((uptr)ptrauth_strip(pc, 0))
#endif // SANITIZER_PTRAUTH_H
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_quarantine.h b/compiler-rt/lib/sanitizer_common/sanitizer_quarantine.h
index 992f23152c6a..1a074d2bb700 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_quarantine.h
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_quarantine.h
@@ -149,7 +149,8 @@ class Quarantine {
Cache cache_;
char pad2_[kCacheLineSize];
- void NOINLINE Recycle(uptr min_size, Callback cb) {
+ void NOINLINE Recycle(uptr min_size, Callback cb) REQUIRES(recycle_mutex_)
+ RELEASE(recycle_mutex_) {
Cache tmp;
{
SpinMutexLock l(&cache_mutex_);
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_rtems.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_rtems.cpp
deleted file mode 100644
index d58bd08fb1a8..000000000000
--- a/compiler-rt/lib/sanitizer_common/sanitizer_rtems.cpp
+++ /dev/null
@@ -1,281 +0,0 @@
-//===-- sanitizer_rtems.cpp -----------------------------------------------===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-//
-// This file is shared between various sanitizers' runtime libraries and
-// implements RTEMS-specific functions.
-//===----------------------------------------------------------------------===//
-
-#include "sanitizer_rtems.h"
-#if SANITIZER_RTEMS
-
-#define posix_memalign __real_posix_memalign
-#define free __real_free
-#define memset __real_memset
-
-#include "sanitizer_file.h"
-#include "sanitizer_symbolizer.h"
-#include <errno.h>
-#include <fcntl.h>
-#include <pthread.h>
-#include <sched.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include <unistd.h>
-
-// There is no mmap on RTEMS. Use memalign, etc.
-#define __mmap_alloc_aligned posix_memalign
-#define __mmap_free free
-#define __mmap_memset memset
-
-namespace __sanitizer {
-
-#include "sanitizer_syscall_generic.inc"
-
-void NORETURN internal__exit(int exitcode) {
- _exit(exitcode);
-}
-
-uptr internal_sched_yield() {
- return sched_yield();
-}
-
-uptr internal_getpid() {
- return getpid();
-}
-
-int internal_dlinfo(void *handle, int request, void *p) {
- UNIMPLEMENTED();
-}
-
-bool FileExists(const char *filename) {
- struct stat st;
- if (stat(filename, &st))
- return false;
- // Sanity check: filename is a regular file.
- return S_ISREG(st.st_mode);
-}
-
-uptr GetThreadSelf() { return static_cast<uptr>(pthread_self()); }
-
-tid_t GetTid() { return GetThreadSelf(); }
-
-void Abort() { abort(); }
-
-int Atexit(void (*function)(void)) { return atexit(function); }
-
-void SleepForSeconds(int seconds) { sleep(seconds); }
-
-void SleepForMillis(int millis) { usleep(millis * 1000); }
-
-bool SupportsColoredOutput(fd_t fd) { return false; }
-
-void GetThreadStackTopAndBottom(bool at_initialization,
- uptr *stack_top, uptr *stack_bottom) {
- pthread_attr_t attr;
- pthread_attr_init(&attr);
- CHECK_EQ(pthread_getattr_np(pthread_self(), &attr), 0);
- void *base = nullptr;
- size_t size = 0;
- CHECK_EQ(pthread_attr_getstack(&attr, &base, &size), 0);
- CHECK_EQ(pthread_attr_destroy(&attr), 0);
-
- *stack_bottom = reinterpret_cast<uptr>(base);
- *stack_top = *stack_bottom + size;
-}
-
-void GetThreadStackAndTls(bool main, uptr *stk_addr, uptr *stk_size,
- uptr *tls_addr, uptr *tls_size) {
- uptr stack_top, stack_bottom;
- GetThreadStackTopAndBottom(main, &stack_top, &stack_bottom);
- *stk_addr = stack_bottom;
- *stk_size = stack_top - stack_bottom;
- *tls_addr = *tls_size = 0;
-}
-
-void InitializePlatformEarly() {}
-void MaybeReexec() {}
-void CheckASLR() {}
-void CheckMPROTECT() {}
-void DisableCoreDumperIfNecessary() {}
-void InstallDeadlySignalHandlers(SignalHandlerType handler) {}
-void SetAlternateSignalStack() {}
-void UnsetAlternateSignalStack() {}
-void InitTlsSize() {}
-
-void SignalContext::DumpAllRegisters(void *context) {}
-const char *DescribeSignalOrException(int signo) { UNIMPLEMENTED(); }
-
-enum MutexState { MtxUnlocked = 0, MtxLocked = 1, MtxSleeping = 2 };
-
-BlockingMutex::BlockingMutex() {
- internal_memset(this, 0, sizeof(*this));
-}
-
-void BlockingMutex::Lock() {
- CHECK_EQ(owner_, 0);
- atomic_uint32_t *m = reinterpret_cast<atomic_uint32_t *>(&opaque_storage_);
- if (atomic_exchange(m, MtxLocked, memory_order_acquire) == MtxUnlocked)
- return;
- while (atomic_exchange(m, MtxSleeping, memory_order_acquire) != MtxUnlocked) {
- internal_sched_yield();
- }
-}
-
-void BlockingMutex::Unlock() {
- atomic_uint32_t *m = reinterpret_cast<atomic_uint32_t *>(&opaque_storage_);
- u32 v = atomic_exchange(m, MtxUnlocked, memory_order_release);
- CHECK_NE(v, MtxUnlocked);
-}
-
-void BlockingMutex::CheckLocked() {
- atomic_uint32_t *m = reinterpret_cast<atomic_uint32_t *>(&opaque_storage_);
- CHECK_NE(MtxUnlocked, atomic_load(m, memory_order_relaxed));
-}
-
-uptr GetPageSize() { return getpagesize(); }
-
-uptr GetMmapGranularity() { return GetPageSize(); }
-
-uptr GetMaxVirtualAddress() {
- return (1ULL << 32) - 1; // 0xffffffff
-}
-
-void *MmapOrDie(uptr size, const char *mem_type, bool raw_report) {
- void* ptr = 0;
- int res = __mmap_alloc_aligned(&ptr, GetPageSize(), size);
- if (UNLIKELY(res))
- ReportMmapFailureAndDie(size, mem_type, "allocate", res, raw_report);
- __mmap_memset(ptr, 0, size);
- IncreaseTotalMmap(size);
- return ptr;
-}
-
-void *MmapOrDieOnFatalError(uptr size, const char *mem_type) {
- void* ptr = 0;
- int res = __mmap_alloc_aligned(&ptr, GetPageSize(), size);
- if (UNLIKELY(res)) {
- if (res == ENOMEM)
- return nullptr;
- ReportMmapFailureAndDie(size, mem_type, "allocate", false);
- }
- __mmap_memset(ptr, 0, size);
- IncreaseTotalMmap(size);
- return ptr;
-}
-
-void *MmapAlignedOrDieOnFatalError(uptr size, uptr alignment,
- const char *mem_type) {
- CHECK(IsPowerOfTwo(size));
- CHECK(IsPowerOfTwo(alignment));
- void* ptr = 0;
- int res = __mmap_alloc_aligned(&ptr, alignment, size);
- if (res)
- ReportMmapFailureAndDie(size, mem_type, "align allocate", res, false);
- __mmap_memset(ptr, 0, size);
- IncreaseTotalMmap(size);
- return ptr;
-}
-
-void *MmapNoReserveOrDie(uptr size, const char *mem_type) {
- return MmapOrDie(size, mem_type, false);
-}
-
-void UnmapOrDie(void *addr, uptr size) {
- if (!addr || !size) return;
- __mmap_free(addr);
- DecreaseTotalMmap(size);
-}
-
-fd_t OpenFile(const char *filename, FileAccessMode mode, error_t *errno_p) {
- int flags;
- switch (mode) {
- case RdOnly: flags = O_RDONLY; break;
- case WrOnly: flags = O_WRONLY | O_CREAT | O_TRUNC; break;
- case RdWr: flags = O_RDWR | O_CREAT; break;
- }
- fd_t res = open(filename, flags, 0660);
- if (internal_iserror(res, errno_p))
- return kInvalidFd;
- return res;
-}
-
-void CloseFile(fd_t fd) {
- close(fd);
-}
-
-bool ReadFromFile(fd_t fd, void *buff, uptr buff_size, uptr *bytes_read,
- error_t *error_p) {
- uptr res = read(fd, buff, buff_size);
- if (internal_iserror(res, error_p))
- return false;
- if (bytes_read)
- *bytes_read = res;
- return true;
-}
-
-bool WriteToFile(fd_t fd, const void *buff, uptr buff_size, uptr *bytes_written,
- error_t *error_p) {
- uptr res = write(fd, buff, buff_size);
- if (internal_iserror(res, error_p))
- return false;
- if (bytes_written)
- *bytes_written = res;
- return true;
-}
-
-void ReleaseMemoryPagesToOS(uptr beg, uptr end) {}
-void DumpProcessMap() {}
-
-// There is no page protection so everything is "accessible."
-bool IsAccessibleMemoryRange(uptr beg, uptr size) {
- return true;
-}
-
-char **GetArgv() { return nullptr; }
-char **GetEnviron() { return nullptr; }
-
-const char *GetEnv(const char *name) {
- return getenv(name);
-}
-
-uptr ReadBinaryName(/*out*/char *buf, uptr buf_len) {
- internal_strncpy(buf, "StubBinaryName", buf_len);
- return internal_strlen(buf);
-}
-
-uptr ReadLongProcessName(/*out*/ char *buf, uptr buf_len) {
- internal_strncpy(buf, "StubProcessName", buf_len);
- return internal_strlen(buf);
-}
-
-bool IsPathSeparator(const char c) {
- return c == '/';
-}
-
-bool IsAbsolutePath(const char *path) {
- return path != nullptr && IsPathSeparator(path[0]);
-}
-
-void ReportFile::Write(const char *buffer, uptr length) {
- SpinMutexLock l(mu);
- static const char *kWriteError =
- "ReportFile::Write() can't output requested buffer!\n";
- ReopenIfNecessary();
- if (length != write(fd, buffer, length)) {
- write(fd, kWriteError, internal_strlen(kWriteError));
- Die();
- }
-}
-
-uptr MainThreadStackBase, MainThreadStackSize;
-uptr MainThreadTlsBase, MainThreadTlsSize;
-
-} // namespace __sanitizer
-
-#endif // SANITIZER_RTEMS
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_rtems.h b/compiler-rt/lib/sanitizer_common/sanitizer_rtems.h
deleted file mode 100644
index e8adfd500dff..000000000000
--- a/compiler-rt/lib/sanitizer_common/sanitizer_rtems.h
+++ /dev/null
@@ -1,20 +0,0 @@
-//===-- sanitizer_rtems.h ---------------------------------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-//
-// This file is shared between various sanitizers' runtime libraries and
-// provides definitions for RTEMS-specific functions.
-//===----------------------------------------------------------------------===//
-#ifndef SANITIZER_RTEMS_H
-#define SANITIZER_RTEMS_H
-
-#include "sanitizer_platform.h"
-#if SANITIZER_RTEMS
-#include "sanitizer_common.h"
-
-#endif // SANITIZER_RTEMS
-#endif // SANITIZER_RTEMS_H
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_solaris.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_solaris.cpp
index 8789dcd10a95..cb53eab8da15 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_solaris.cpp
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_solaris.cpp
@@ -160,6 +160,13 @@ DECLARE__REAL_AND_INTERNAL(uptr, sched_yield, void) {
return sched_yield();
}
+DECLARE__REAL_AND_INTERNAL(void, usleep, u64 useconds) {
+ struct timespec ts;
+ ts.tv_sec = useconds / 1000000;
+ ts.tv_nsec = (useconds % 1000000) * 1000;
+ nanosleep(&ts, nullptr);
+}
+
DECLARE__REAL_AND_INTERNAL(uptr, execve, const char *filename,
char *const argv[], char *const envp[]) {
return _REAL(execve)(filename, argv, envp);
@@ -211,6 +218,13 @@ uptr internal_clock_gettime(__sanitizer_clockid_t clk_id, void *tp) {
}
// ----------------- sanitizer_common.h
+void FutexWait(atomic_uint32_t *p, u32 cmp) {
+ // FIXME: implement actual blocking.
+ sched_yield();
+}
+
+void FutexWake(atomic_uint32_t *p, u32 count) {}
+
BlockingMutex::BlockingMutex() {
CHECK(sizeof(mutex_t) <= sizeof(opaque_storage_));
internal_memset(this, 0, sizeof(*this));
@@ -231,9 +245,7 @@ void BlockingMutex::Unlock() {
CHECK_EQ(mutex_unlock((mutex_t *)&opaque_storage_), 0);
}
-void BlockingMutex::CheckLocked() {
- CHECK_EQ((uptr)thr_self(), owner_);
-}
+void BlockingMutex::CheckLocked() const { CHECK_EQ((uptr)thr_self(), owner_); }
} // namespace __sanitizer
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_stacktrace.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_stacktrace.cpp
index b0487d8987db..07e4409f4a5d 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_stacktrace.cpp
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_stacktrace.cpp
@@ -15,6 +15,7 @@
#include "sanitizer_common.h"
#include "sanitizer_flags.h"
#include "sanitizer_platform.h"
+#include "sanitizer_ptrauth.h"
namespace __sanitizer {
@@ -122,7 +123,7 @@ void BufferedStackTrace::UnwindFast(uptr pc, uptr bp, uptr stack_top,
// frame[-1] contains the return address
uhwptr pc1 = frame[-1];
#else
- uhwptr pc1 = frame[1];
+ uhwptr pc1 = STRIP_PAC_PC((void *)frame[1]);
#endif
// Let's assume that any pointer in the 0th page (i.e. <0x1000 on i386 and
// x86_64) is invalid and stop unwinding here. If we're adding support for
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_stacktrace.h b/compiler-rt/lib/sanitizer_common/sanitizer_stacktrace.h
index 15616f899d01..ea330f36f7d7 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_stacktrace.h
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_stacktrace.h
@@ -12,6 +12,7 @@
#ifndef SANITIZER_STACKTRACE_H
#define SANITIZER_STACKTRACE_H
+#include "sanitizer_common.h"
#include "sanitizer_internal_defs.h"
#include "sanitizer_platform.h"
@@ -32,8 +33,8 @@ static const u32 kStackTraceMax = 256;
// Fast unwind is the only option on Mac for now; we will need to
// revisit this macro when slow unwind works on Mac, see
// https://github.com/google/sanitizers/issues/137
-#if SANITIZER_MAC || SANITIZER_RTEMS
-# define SANITIZER_CAN_SLOW_UNWIND 0
+#if SANITIZER_MAC
+# define SANITIZER_CAN_SLOW_UNWIND 0
#else
# define SANITIZER_CAN_SLOW_UNWIND 1
#endif
@@ -56,6 +57,16 @@ struct StackTrace {
// Prints a symbolized stacktrace, followed by an empty line.
void Print() const;
+ // Prints a symbolized stacktrace to the output string, followed by an empty
+ // line.
+ void PrintTo(InternalScopedString *output) const;
+
+ // Prints a symbolized stacktrace to the output buffer, followed by an empty
+ // line. Returns the number of symbols that should have been written to buffer
+ // (not including trailing '\0'). Thus, the string is truncated iff return
+ // value is not less than "out_buf_size".
+ uptr PrintTo(char *out_buf, uptr out_buf_size) const;
+
static bool WillUseFastUnwind(bool request_fast_unwind) {
if (!SANITIZER_CAN_FAST_UNWIND)
return false;
@@ -185,5 +196,26 @@ static inline bool IsValidFrame(uptr frame, uptr stack_top, uptr stack_bottom) {
uptr local_stack; \
uptr sp = (uptr)&local_stack
+// GET_CURRENT_PC() is equivalent to StackTrace::GetCurrentPc().
+// Optimized x86 version is faster than GetCurrentPc because
+// it does not involve a function call, instead it reads RIP register.
+// Reads of RIP by an instruction return RIP pointing to the next
+// instruction, which is exactly what we want here, thus 0 offset.
+// It needs to be a macro because otherwise we will get the name
+// of this function on the top of most stacks. Attribute artificial
+// does not do what it claims to do, unfortunatley. And attribute
+// __nodebug__ is clang-only. If we would have an attribute that
+// would remove this function from debug info, we could simply make
+// StackTrace::GetCurrentPc() faster.
+#if defined(__x86_64__)
+# define GET_CURRENT_PC() \
+ ({ \
+ uptr pc; \
+ asm("lea 0(%%rip), %0" : "=r"(pc)); \
+ pc; \
+ })
+#else
+# define GET_CURRENT_PC() StackTrace::GetCurrentPc()
+#endif
#endif // SANITIZER_STACKTRACE_H
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_stacktrace_libcdep.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_stacktrace_libcdep.cpp
index 7808ba9b0f57..f60ea7731748 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_stacktrace_libcdep.cpp
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_stacktrace_libcdep.cpp
@@ -18,46 +18,119 @@
namespace __sanitizer {
-void StackTrace::Print() const {
+namespace {
+
+class StackTraceTextPrinter {
+ public:
+ StackTraceTextPrinter(const char *stack_trace_fmt, char frame_delimiter,
+ InternalScopedString *output,
+ InternalScopedString *dedup_token)
+ : stack_trace_fmt_(stack_trace_fmt),
+ frame_delimiter_(frame_delimiter),
+ output_(output),
+ dedup_token_(dedup_token),
+ symbolize_(RenderNeedsSymbolization(stack_trace_fmt)) {}
+
+ bool ProcessAddressFrames(uptr pc) {
+ SymbolizedStack *frames = symbolize_
+ ? Symbolizer::GetOrInit()->SymbolizePC(pc)
+ : SymbolizedStack::New(pc);
+ if (!frames)
+ return false;
+
+ for (SymbolizedStack *cur = frames; cur; cur = cur->next) {
+ uptr prev_len = output_->length();
+ RenderFrame(output_, stack_trace_fmt_, frame_num_++, cur->info.address,
+ symbolize_ ? &cur->info : nullptr,
+ common_flags()->symbolize_vs_style,
+ common_flags()->strip_path_prefix);
+
+ if (prev_len != output_->length())
+ output_->append("%c", frame_delimiter_);
+
+ ExtendDedupToken(cur);
+ }
+ frames->ClearAll();
+ return true;
+ }
+
+ private:
+ // Extend the dedup token by appending a new frame.
+ void ExtendDedupToken(SymbolizedStack *stack) {
+ if (!dedup_token_)
+ return;
+
+ if (dedup_frames_-- > 0) {
+ if (dedup_token_->length())
+ dedup_token_->append("--");
+ if (stack->info.function != nullptr)
+ dedup_token_->append(stack->info.function);
+ }
+ }
+
+ const char *stack_trace_fmt_;
+ const char frame_delimiter_;
+ int dedup_frames_ = common_flags()->dedup_token_length;
+ uptr frame_num_ = 0;
+ InternalScopedString *output_;
+ InternalScopedString *dedup_token_;
+ const bool symbolize_ = false;
+};
+
+static void CopyStringToBuffer(const InternalScopedString &str, char *out_buf,
+ uptr out_buf_size) {
+ if (!out_buf_size)
+ return;
+
+ CHECK_GT(out_buf_size, 0);
+ uptr copy_size = Min(str.length(), out_buf_size - 1);
+ internal_memcpy(out_buf, str.data(), copy_size);
+ out_buf[copy_size] = '\0';
+}
+
+} // namespace
+
+void StackTrace::PrintTo(InternalScopedString *output) const {
+ CHECK(output);
+
+ InternalScopedString dedup_token;
+ StackTraceTextPrinter printer(common_flags()->stack_trace_format, '\n',
+ output, &dedup_token);
+
if (trace == nullptr || size == 0) {
- Printf(" <empty stack>\n\n");
+ output->append(" <empty stack>\n\n");
return;
}
- InternalScopedString frame_desc(GetPageSizeCached() * 2);
- InternalScopedString dedup_token(GetPageSizeCached());
- int dedup_frames = common_flags()->dedup_token_length;
- bool symbolize = RenderNeedsSymbolization(common_flags()->stack_trace_format);
- uptr frame_num = 0;
+
for (uptr i = 0; i < size && trace[i]; i++) {
// PCs in stack traces are actually the return addresses, that is,
// addresses of the next instructions after the call.
uptr pc = GetPreviousInstructionPc(trace[i]);
- SymbolizedStack *frames;
- if (symbolize)
- frames = Symbolizer::GetOrInit()->SymbolizePC(pc);
- else
- frames = SymbolizedStack::New(pc);
- CHECK(frames);
- for (SymbolizedStack *cur = frames; cur; cur = cur->next) {
- frame_desc.clear();
- RenderFrame(&frame_desc, common_flags()->stack_trace_format, frame_num++,
- cur->info.address, symbolize ? &cur->info : nullptr,
- common_flags()->symbolize_vs_style,
- common_flags()->strip_path_prefix);
- Printf("%s\n", frame_desc.data());
- if (dedup_frames-- > 0) {
- if (dedup_token.length())
- dedup_token.append("--");
- if (cur->info.function != nullptr)
- dedup_token.append(cur->info.function);
- }
- }
- frames->ClearAll();
+ CHECK(printer.ProcessAddressFrames(pc));
}
- // Always print a trailing empty line after stack trace.
- Printf("\n");
+
+ // Always add a trailing empty line after stack trace.
+ output->append("\n");
+
+ // Append deduplication token, if non-empty.
if (dedup_token.length())
- Printf("DEDUP_TOKEN: %s\n", dedup_token.data());
+ output->append("DEDUP_TOKEN: %s\n", dedup_token.data());
+}
+
+uptr StackTrace::PrintTo(char *out_buf, uptr out_buf_size) const {
+ CHECK(out_buf);
+
+ InternalScopedString output;
+ PrintTo(&output);
+ CopyStringToBuffer(output, out_buf, out_buf_size);
+
+ return output.length();
+}
+
+void StackTrace::Print() const {
+ InternalScopedString output;
+ PrintTo(&output);
+ Printf("%s", output.data());
}
void BufferedStackTrace::Unwind(u32 max_depth, uptr pc, uptr bp, void *context,
@@ -82,12 +155,15 @@ void BufferedStackTrace::Unwind(u32 max_depth, uptr pc, uptr bp, void *context,
UnwindSlow(pc, context, max_depth);
else
UnwindSlow(pc, max_depth);
+ // If there are too few frames, the program may be built with
+ // -fno-asynchronous-unwind-tables. Fall back to fast unwinder below.
+ if (size > 2 || size >= max_depth)
+ return;
#else
UNREACHABLE("slow unwind requested but not available");
#endif
- } else {
- UnwindFast(pc, bp, stack_top, stack_bottom, max_depth);
}
+ UnwindFast(pc, bp, stack_top, stack_bottom, max_depth);
}
static int GetModuleAndOffsetForPc(uptr pc, char *module_name,
@@ -112,41 +188,18 @@ extern "C" {
SANITIZER_INTERFACE_ATTRIBUTE
void __sanitizer_symbolize_pc(uptr pc, const char *fmt, char *out_buf,
uptr out_buf_size) {
- if (!out_buf_size) return;
- pc = StackTrace::GetPreviousInstructionPc(pc);
- SymbolizedStack *frame;
- bool symbolize = RenderNeedsSymbolization(fmt);
- if (symbolize)
- frame = Symbolizer::GetOrInit()->SymbolizePC(pc);
- else
- frame = SymbolizedStack::New(pc);
- if (!frame) {
- internal_strncpy(out_buf, "<can't symbolize>", out_buf_size);
- out_buf[out_buf_size - 1] = 0;
+ if (!out_buf_size)
return;
+
+ pc = StackTrace::GetPreviousInstructionPc(pc);
+
+ InternalScopedString output;
+ StackTraceTextPrinter printer(fmt, '\0', &output, nullptr);
+ if (!printer.ProcessAddressFrames(pc)) {
+ output.clear();
+ output.append("<can't symbolize>");
}
- InternalScopedString frame_desc(GetPageSizeCached());
- uptr frame_num = 0;
- // Reserve one byte for the final 0.
- char *out_end = out_buf + out_buf_size - 1;
- for (SymbolizedStack *cur = frame; cur && out_buf < out_end;
- cur = cur->next) {
- frame_desc.clear();
- RenderFrame(&frame_desc, fmt, frame_num++, cur->info.address,
- symbolize ? &cur->info : nullptr,
- common_flags()->symbolize_vs_style,
- common_flags()->strip_path_prefix);
- if (!frame_desc.length())
- continue;
- // Reserve one byte for the terminating 0.
- uptr n = out_end - out_buf - 1;
- internal_strncpy(out_buf, frame_desc.data(), n);
- out_buf += __sanitizer::Min<uptr>(n, frame_desc.length());
- *out_buf++ = 0;
- }
- CHECK(out_buf <= out_end);
- *out_buf = 0;
- frame->ClearAll();
+ CopyStringToBuffer(output, out_buf, out_buf_size);
}
SANITIZER_INTERFACE_ATTRIBUTE
@@ -156,7 +209,7 @@ void __sanitizer_symbolize_global(uptr data_addr, const char *fmt,
out_buf[0] = 0;
DataInfo DI;
if (!Symbolizer::GetOrInit()->SymbolizeData(data_addr, &DI)) return;
- InternalScopedString data_desc(GetPageSizeCached());
+ InternalScopedString data_desc;
RenderData(&data_desc, fmt, &DI, common_flags()->strip_path_prefix);
internal_strncpy(out_buf, data_desc.data(), out_buf_size);
out_buf[out_buf_size - 1] = 0;
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_suppressions.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_suppressions.cpp
index 44c83a66c5fe..a674034b8e29 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_suppressions.cpp
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_suppressions.cpp
@@ -34,7 +34,7 @@ SuppressionContext::SuppressionContext(const char *suppression_types[],
static bool GetPathAssumingFileIsRelativeToExec(const char *file_path,
/*out*/char *new_file_path,
uptr new_file_path_size) {
- InternalScopedString exec(kMaxPathLength);
+ InternalMmapVector<char> exec(kMaxPathLength);
if (ReadBinaryNameCached(exec.data(), exec.size())) {
const char *file_name_pos = StripModuleName(exec.data());
uptr path_to_exec_len = file_name_pos - exec.data();
@@ -69,7 +69,7 @@ void SuppressionContext::ParseFromFile(const char *filename) {
if (filename[0] == '\0')
return;
- InternalScopedString new_file_path(kMaxPathLength);
+ InternalMmapVector<char> new_file_path(kMaxPathLength);
filename = FindFile(filename, new_file_path.data(), new_file_path.size());
// Read the file.
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_libcdep.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_libcdep.cpp
index 710da4c1cecd..98418b426c37 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_libcdep.cpp
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_libcdep.cpp
@@ -356,7 +356,7 @@ void ParseSymbolizePCOutput(const char *str, SymbolizedStack *res) {
InternalFree(info->function);
info->function = 0;
}
- if (0 == internal_strcmp(info->file, "??")) {
+ if (info->file && 0 == internal_strcmp(info->file, "??")) {
InternalFree(info->file);
info->file = 0;
}
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_markup.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_markup.cpp
index 30cba08ed539..9a5b4a8c54c7 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_markup.cpp
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_markup.cpp
@@ -16,14 +16,13 @@
#if SANITIZER_FUCHSIA
#include "sanitizer_symbolizer_fuchsia.h"
-#elif SANITIZER_RTEMS
-#include "sanitizer_symbolizer_rtems.h"
-#endif
-#include "sanitizer_stacktrace.h"
-#include "sanitizer_symbolizer.h"
+# endif
-#include <limits.h>
-#include <unwind.h>
+# include <limits.h>
+# include <unwind.h>
+
+# include "sanitizer_stacktrace.h"
+# include "sanitizer_symbolizer.h"
namespace __sanitizer {
@@ -54,6 +53,10 @@ bool Symbolizer::GetModuleNameAndOffsetForPC(uptr pc, const char **module_name,
return false;
}
+// This is mainly used by hwasan for online symbolization. This isn't needed
+// since hwasan can always just dump stack frames for offline symbolization.
+bool Symbolizer::SymbolizeFrame(uptr addr, FrameInfo *info) { return false; }
+
// This is used in some places for suppression checking, which we
// don't really support for Fuchsia. It's also used in UBSan to
// identify a PC location to a function name, so we always fill in
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_posix_libcdep.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_posix_libcdep.cpp
index 4dd5cc3ad7cb..4cd4b4636f0a 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_posix_libcdep.cpp
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_posix_libcdep.cpp
@@ -400,11 +400,20 @@ const char *Symbolizer::PlatformDemangle(const char *name) {
static SymbolizerTool *ChooseExternalSymbolizer(LowLevelAllocator *allocator) {
const char *path = common_flags()->external_symbolizer_path;
+
+ if (path && internal_strchr(path, '%')) {
+ char *new_path = (char *)InternalAlloc(kMaxPathLength);
+ SubstituteForFlagValue(path, new_path, kMaxPathLength);
+ path = new_path;
+ }
+
const char *binary_name = path ? StripModuleName(path) : "";
+ static const char kLLVMSymbolizerPrefix[] = "llvm-symbolizer";
if (path && path[0] == '\0') {
VReport(2, "External symbolizer is explicitly disabled.\n");
return nullptr;
- } else if (!internal_strcmp(binary_name, "llvm-symbolizer")) {
+ } else if (!internal_strncmp(binary_name, kLLVMSymbolizerPrefix,
+ internal_strlen(kLLVMSymbolizerPrefix))) {
VReport(2, "Using llvm-symbolizer at user-specified path: %s\n", path);
return new(*allocator) LLVMSymbolizer(path, allocator);
} else if (!internal_strcmp(binary_name, "atos")) {
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_report.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_report.cpp
index c99a6ceaa562..f330ed36640a 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_report.cpp
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_report.cpp
@@ -31,7 +31,7 @@ namespace __sanitizer {
void ReportErrorSummary(const char *error_type, const AddressInfo &info,
const char *alt_tool_name) {
if (!common_flags()->print_summary) return;
- InternalScopedString buff(kMaxSummaryLength);
+ InternalScopedString buff;
buff.append("%s ", error_type);
RenderFrame(&buff, "%L %F", 0, info.address, &info,
common_flags()->symbolize_vs_style,
@@ -120,7 +120,7 @@ void ReportMmapWriteExec(int prot) {
#endif
}
-#if !SANITIZER_FUCHSIA && !SANITIZER_RTEMS && !SANITIZER_GO
+#if !SANITIZER_FUCHSIA && !SANITIZER_GO
void StartReportDeadlySignal() {
// Write the first message using fd=2, just in case.
// It may actually fail to write in case stderr is closed.
@@ -150,7 +150,7 @@ static void PrintMemoryByte(InternalScopedString *str, const char *before,
static void MaybeDumpInstructionBytes(uptr pc) {
if (!common_flags()->dump_instruction_bytes || (pc < GetPageSizeCached()))
return;
- InternalScopedString str(1024);
+ InternalScopedString str;
str.append("First 16 instruction bytes at pc: ");
if (IsAccessibleMemoryRange(pc, 16)) {
for (int i = 0; i < 16; ++i) {
@@ -250,17 +250,17 @@ void HandleDeadlySignal(void *siginfo, void *context, u32 tid,
#endif // !SANITIZER_FUCHSIA && !SANITIZER_GO
-static atomic_uintptr_t reporting_thread = {0};
-static StaticSpinMutex CommonSanitizerReportMutex;
+atomic_uintptr_t ScopedErrorReportLock::reporting_thread_ = {0};
+StaticSpinMutex ScopedErrorReportLock::mutex_;
-ScopedErrorReportLock::ScopedErrorReportLock() {
+void ScopedErrorReportLock::Lock() {
uptr current = GetThreadSelf();
for (;;) {
uptr expected = 0;
- if (atomic_compare_exchange_strong(&reporting_thread, &expected, current,
+ if (atomic_compare_exchange_strong(&reporting_thread_, &expected, current,
memory_order_relaxed)) {
// We've claimed reporting_thread so proceed.
- CommonSanitizerReportMutex.Lock();
+ mutex_.Lock();
return;
}
@@ -282,13 +282,11 @@ ScopedErrorReportLock::ScopedErrorReportLock() {
}
}
-ScopedErrorReportLock::~ScopedErrorReportLock() {
- CommonSanitizerReportMutex.Unlock();
- atomic_store_relaxed(&reporting_thread, 0);
+void ScopedErrorReportLock::Unlock() {
+ mutex_.Unlock();
+ atomic_store_relaxed(&reporting_thread_, 0);
}
-void ScopedErrorReportLock::CheckLocked() {
- CommonSanitizerReportMutex.CheckLocked();
-}
+void ScopedErrorReportLock::CheckLocked() { mutex_.CheckLocked(); }
} // namespace __sanitizer
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_rtems.h b/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_rtems.h
deleted file mode 100644
index 3371092e0687..000000000000
--- a/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_rtems.h
+++ /dev/null
@@ -1,40 +0,0 @@
-//===-- sanitizer_symbolizer_rtems.h -----------------------------------===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-//
-// This file is shared between various sanitizers' runtime libraries.
-//
-// Define RTEMS's string formats and limits for the markup symbolizer.
-//===----------------------------------------------------------------------===//
-#ifndef SANITIZER_SYMBOLIZER_RTEMS_H
-#define SANITIZER_SYMBOLIZER_RTEMS_H
-
-#include "sanitizer_internal_defs.h"
-
-namespace __sanitizer {
-
-// The Myriad RTEMS symbolizer currently only parses backtrace lines,
-// so use a format that the symbolizer understands. For other
-// markups, keep them the same as the Fuchsia's.
-
-// This is used by UBSan for type names, and by ASan for global variable names.
-constexpr const char *kFormatDemangle = "{{{symbol:%s}}}";
-constexpr uptr kFormatDemangleMax = 1024; // Arbitrary.
-
-// Function name or equivalent from PC location.
-constexpr const char *kFormatFunction = "{{{pc:%p}}}";
-constexpr uptr kFormatFunctionMax = 64; // More than big enough for 64-bit hex.
-
-// Global variable name or equivalent from data memory address.
-constexpr const char *kFormatData = "{{{data:%p}}}";
-
-// One frame in a backtrace (printed on a line by itself).
-constexpr const char *kFormatFrame = " [%u] IP: %p";
-
-} // namespace __sanitizer
-
-#endif // SANITIZER_SYMBOLIZER_RTEMS_H
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_win.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_win.cpp
index 7db7d3b0eb9d..702d901353db 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_win.cpp
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_win.cpp
@@ -133,16 +133,13 @@ void InitializeDbgHelpIfNeeded() {
}
}
-#ifdef __clang__
-#pragma clang diagnostic push
-#pragma clang diagnostic ignored "-Wframe-larger-than="
-#endif
bool WinSymbolizerTool::SymbolizePC(uptr addr, SymbolizedStack *frame) {
InitializeDbgHelpIfNeeded();
// See https://docs.microsoft.com/en-us/windows/win32/debug/retrieving-symbol-information-by-address
- char buffer[sizeof(SYMBOL_INFO) + MAX_SYM_NAME * sizeof(CHAR)];
- PSYMBOL_INFO symbol = (PSYMBOL_INFO)buffer;
+ InternalMmapVector<char> buffer(sizeof(SYMBOL_INFO) +
+ MAX_SYM_NAME * sizeof(CHAR));
+ PSYMBOL_INFO symbol = (PSYMBOL_INFO)&buffer[0];
symbol->SizeOfStruct = sizeof(SYMBOL_INFO);
symbol->MaxNameLen = MAX_SYM_NAME;
DWORD64 offset = 0;
@@ -166,9 +163,6 @@ bool WinSymbolizerTool::SymbolizePC(uptr addr, SymbolizedStack *frame) {
// Otherwise, try llvm-symbolizer.
return got_fileline;
}
-#ifdef __clang__
-#pragma clang diagnostic pop
-#endif
const char *WinSymbolizerTool::Demangle(const char *name) {
CHECK(is_dbghelp_initialized);
@@ -230,7 +224,7 @@ bool SymbolizerProcess::StartSymbolizerSubprocess() {
// Compute the command line. Wrap double quotes around everything.
const char *argv[kArgVMax];
GetArgV(path_, argv);
- InternalScopedString command_line(kMaxPathLength * 3);
+ InternalScopedString command_line;
for (int i = 0; argv[i]; i++) {
const char *arg = argv[i];
int arglen = internal_strlen(arg);
@@ -288,8 +282,15 @@ static void ChooseSymbolizerTools(IntrusiveList<SymbolizerTool> *list,
return;
}
- // Add llvm-symbolizer in case the binary has dwarf.
+ // Add llvm-symbolizer.
const char *user_path = common_flags()->external_symbolizer_path;
+
+ if (user_path && internal_strchr(user_path, '%')) {
+ char *new_path = (char *)InternalAlloc(kMaxPathLength);
+ SubstituteForFlagValue(user_path, new_path, kMaxPathLength);
+ user_path = new_path;
+ }
+
const char *path =
user_path ? user_path : FindPathToBinary("llvm-symbolizer.exe");
if (path) {
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_termination.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_termination.cpp
index 84be6fc32342..6a54734353c5 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_termination.cpp
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_termination.cpp
@@ -59,26 +59,31 @@ void NORETURN Die() {
internal__exit(common_flags()->exitcode);
}
-static CheckFailedCallbackType CheckFailedCallback;
-void SetCheckFailedCallback(CheckFailedCallbackType callback) {
- CheckFailedCallback = callback;
+static void (*CheckUnwindCallback)();
+void SetCheckUnwindCallback(void (*callback)()) {
+ CheckUnwindCallback = callback;
}
-const int kSecondsToSleepWhenRecursiveCheckFailed = 2;
-
void NORETURN CheckFailed(const char *file, int line, const char *cond,
u64 v1, u64 v2) {
- static atomic_uint32_t num_calls;
- if (atomic_fetch_add(&num_calls, 1, memory_order_relaxed) > 10) {
- SleepForSeconds(kSecondsToSleepWhenRecursiveCheckFailed);
+ u32 tid = GetTid();
+ Printf("%s: CHECK failed: %s:%d \"%s\" (0x%zx, 0x%zx) (tid=%u)\n",
+ SanitizerToolName, StripModuleName(file), line, cond, (uptr)v1,
+ (uptr)v2, tid);
+ static atomic_uint32_t first_tid;
+ u32 cmp = 0;
+ if (!atomic_compare_exchange_strong(&first_tid, &cmp, tid,
+ memory_order_relaxed)) {
+ if (cmp == tid) {
+ // Recursing into CheckFailed.
+ } else {
+ // Another thread fails already, let it print the stack and terminate.
+ SleepForSeconds(2);
+ }
Trap();
}
-
- if (CheckFailedCallback) {
- CheckFailedCallback(file, line, cond, v1, v2);
- }
- Report("Sanitizer CHECK failed: %s:%d %s (%lld, %lld)\n", file, line, cond,
- v1, v2);
+ if (CheckUnwindCallback)
+ CheckUnwindCallback();
Die();
}
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_thread_registry.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_thread_registry.cpp
index f2c6f2799315..745fbf76b01f 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_thread_registry.cpp
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_thread_registry.cpp
@@ -85,7 +85,7 @@ void ThreadContextBase::SetCreated(uptr _user_id, u64 _unique_id,
unique_id = _unique_id;
detached = _detached;
// Parent tid makes no sense for the main thread.
- if (tid != 0)
+ if (tid != kMainTid)
parent_tid = _parent_tid;
OnCreated(arg);
}
@@ -99,7 +99,8 @@ void ThreadContextBase::Reset() {
// ThreadRegistry implementation.
-const u32 ThreadRegistry::kUnknownTid = ~0U;
+ThreadRegistry::ThreadRegistry(ThreadContextFactory factory)
+ : ThreadRegistry(factory, UINT32_MAX, UINT32_MAX, 0) {}
ThreadRegistry::ThreadRegistry(ThreadContextFactory factory, u32 max_threads,
u32 thread_quarantine_size, u32 max_reuse)
@@ -108,13 +109,10 @@ ThreadRegistry::ThreadRegistry(ThreadContextFactory factory, u32 max_threads,
thread_quarantine_size_(thread_quarantine_size),
max_reuse_(max_reuse),
mtx_(),
- n_contexts_(0),
total_threads_(0),
alive_threads_(0),
max_alive_threads_(0),
running_threads_(0) {
- threads_ = (ThreadContextBase **)MmapOrDie(max_threads_ * sizeof(threads_[0]),
- "ThreadRegistry");
dead_threads_.clear();
invalid_threads_.clear();
}
@@ -122,7 +120,8 @@ ThreadRegistry::ThreadRegistry(ThreadContextFactory factory, u32 max_threads,
void ThreadRegistry::GetNumberOfThreads(uptr *total, uptr *running,
uptr *alive) {
BlockingMutexLock l(&mtx_);
- if (total) *total = n_contexts_;
+ if (total)
+ *total = threads_.size();
if (running) *running = running_threads_;
if (alive) *alive = alive_threads_;
}
@@ -135,15 +134,15 @@ uptr ThreadRegistry::GetMaxAliveThreads() {
u32 ThreadRegistry::CreateThread(uptr user_id, bool detached, u32 parent_tid,
void *arg) {
BlockingMutexLock l(&mtx_);
- u32 tid = kUnknownTid;
+ u32 tid = kInvalidTid;
ThreadContextBase *tctx = QuarantinePop();
if (tctx) {
tid = tctx->tid;
- } else if (n_contexts_ < max_threads_) {
+ } else if (threads_.size() < max_threads_) {
// Allocate new thread context and tid.
- tid = n_contexts_++;
+ tid = threads_.size();
tctx = context_factory_(tid);
- threads_[tid] = tctx;
+ threads_.push_back(tctx);
} else {
#if !SANITIZER_GO
Report("%s: Thread limit (%u threads) exceeded. Dying.\n",
@@ -155,7 +154,7 @@ u32 ThreadRegistry::CreateThread(uptr user_id, bool detached, u32 parent_tid,
Die();
}
CHECK_NE(tctx, 0);
- CHECK_NE(tid, kUnknownTid);
+ CHECK_NE(tid, kInvalidTid);
CHECK_LT(tid, max_threads_);
CHECK_EQ(tctx->status, ThreadStatusInvalid);
alive_threads_++;
@@ -171,7 +170,7 @@ u32 ThreadRegistry::CreateThread(uptr user_id, bool detached, u32 parent_tid,
void ThreadRegistry::RunCallbackForEachThreadLocked(ThreadCallback cb,
void *arg) {
CheckLocked();
- for (u32 tid = 0; tid < n_contexts_; tid++) {
+ for (u32 tid = 0; tid < threads_.size(); tid++) {
ThreadContextBase *tctx = threads_[tid];
if (tctx == 0)
continue;
@@ -181,18 +180,18 @@ void ThreadRegistry::RunCallbackForEachThreadLocked(ThreadCallback cb,
u32 ThreadRegistry::FindThread(FindThreadCallback cb, void *arg) {
BlockingMutexLock l(&mtx_);
- for (u32 tid = 0; tid < n_contexts_; tid++) {
+ for (u32 tid = 0; tid < threads_.size(); tid++) {
ThreadContextBase *tctx = threads_[tid];
if (tctx != 0 && cb(tctx, arg))
return tctx->tid;
}
- return kUnknownTid;
+ return kInvalidTid;
}
ThreadContextBase *
ThreadRegistry::FindThreadContextLocked(FindThreadCallback cb, void *arg) {
CheckLocked();
- for (u32 tid = 0; tid < n_contexts_; tid++) {
+ for (u32 tid = 0; tid < threads_.size(); tid++) {
ThreadContextBase *tctx = threads_[tid];
if (tctx != 0 && cb(tctx, arg))
return tctx;
@@ -213,7 +212,6 @@ ThreadContextBase *ThreadRegistry::FindThreadContextByOsIDLocked(tid_t os_id) {
void ThreadRegistry::SetThreadName(u32 tid, const char *name) {
BlockingMutexLock l(&mtx_);
- CHECK_LT(tid, n_contexts_);
ThreadContextBase *tctx = threads_[tid];
CHECK_NE(tctx, 0);
CHECK_EQ(SANITIZER_FUCHSIA ? ThreadStatusCreated : ThreadStatusRunning,
@@ -223,7 +221,7 @@ void ThreadRegistry::SetThreadName(u32 tid, const char *name) {
void ThreadRegistry::SetThreadNameByUserId(uptr user_id, const char *name) {
BlockingMutexLock l(&mtx_);
- for (u32 tid = 0; tid < n_contexts_; tid++) {
+ for (u32 tid = 0; tid < threads_.size(); tid++) {
ThreadContextBase *tctx = threads_[tid];
if (tctx != 0 && tctx->user_id == user_id &&
tctx->status != ThreadStatusInvalid) {
@@ -235,7 +233,6 @@ void ThreadRegistry::SetThreadNameByUserId(uptr user_id, const char *name) {
void ThreadRegistry::DetachThread(u32 tid, void *arg) {
BlockingMutexLock l(&mtx_);
- CHECK_LT(tid, n_contexts_);
ThreadContextBase *tctx = threads_[tid];
CHECK_NE(tctx, 0);
if (tctx->status == ThreadStatusInvalid) {
@@ -256,7 +253,6 @@ void ThreadRegistry::JoinThread(u32 tid, void *arg) {
do {
{
BlockingMutexLock l(&mtx_);
- CHECK_LT(tid, n_contexts_);
ThreadContextBase *tctx = threads_[tid];
CHECK_NE(tctx, 0);
if (tctx->status == ThreadStatusInvalid) {
@@ -278,14 +274,14 @@ void ThreadRegistry::JoinThread(u32 tid, void *arg) {
// really started. We just did CreateThread for a prospective new
// thread before trying to create it, and then failed to actually
// create it, and so never called StartThread.
-void ThreadRegistry::FinishThread(u32 tid) {
+ThreadStatus ThreadRegistry::FinishThread(u32 tid) {
BlockingMutexLock l(&mtx_);
CHECK_GT(alive_threads_, 0);
alive_threads_--;
- CHECK_LT(tid, n_contexts_);
ThreadContextBase *tctx = threads_[tid];
CHECK_NE(tctx, 0);
bool dead = tctx->detached;
+ ThreadStatus prev_status = tctx->status;
if (tctx->status == ThreadStatusRunning) {
CHECK_GT(running_threads_, 0);
running_threads_--;
@@ -300,13 +296,13 @@ void ThreadRegistry::FinishThread(u32 tid) {
QuarantinePush(tctx);
}
tctx->SetDestroyed();
+ return prev_status;
}
void ThreadRegistry::StartThread(u32 tid, tid_t os_id, ThreadType thread_type,
void *arg) {
BlockingMutexLock l(&mtx_);
running_threads_++;
- CHECK_LT(tid, n_contexts_);
ThreadContextBase *tctx = threads_[tid];
CHECK_NE(tctx, 0);
CHECK_EQ(ThreadStatusCreated, tctx->status);
@@ -339,7 +335,6 @@ ThreadContextBase *ThreadRegistry::QuarantinePop() {
void ThreadRegistry::SetThreadUserId(u32 tid, uptr user_id) {
BlockingMutexLock l(&mtx_);
- CHECK_LT(tid, n_contexts_);
ThreadContextBase *tctx = threads_[tid];
CHECK_NE(tctx, 0);
CHECK_NE(tctx->status, ThreadStatusInvalid);
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_thread_registry.h b/compiler-rt/lib/sanitizer_common/sanitizer_thread_registry.h
index 85c522a31cac..0b28bbe6ddf6 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_thread_registry.h
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_thread_registry.h
@@ -85,24 +85,22 @@ class ThreadContextBase {
typedef ThreadContextBase* (*ThreadContextFactory)(u32 tid);
-class ThreadRegistry {
+class MUTEX ThreadRegistry {
public:
- static const u32 kUnknownTid;
-
+ ThreadRegistry(ThreadContextFactory factory);
ThreadRegistry(ThreadContextFactory factory, u32 max_threads,
- u32 thread_quarantine_size, u32 max_reuse = 0);
+ u32 thread_quarantine_size, u32 max_reuse);
void GetNumberOfThreads(uptr *total = nullptr, uptr *running = nullptr,
uptr *alive = nullptr);
uptr GetMaxAliveThreads();
- void Lock() { mtx_.Lock(); }
- void CheckLocked() { mtx_.CheckLocked(); }
- void Unlock() { mtx_.Unlock(); }
+ void Lock() ACQUIRE() { mtx_.Lock(); }
+ void CheckLocked() const CHECK_LOCKED() { mtx_.CheckLocked(); }
+ void Unlock() RELEASE() { mtx_.Unlock(); }
// Should be guarded by ThreadRegistryLock.
ThreadContextBase *GetThreadLocked(u32 tid) {
- DCHECK_LT(tid, n_contexts_);
- return threads_[tid];
+ return threads_.empty() ? nullptr : threads_[tid];
}
u32 CreateThread(uptr user_id, bool detached, u32 parent_tid, void *arg);
@@ -113,7 +111,7 @@ class ThreadRegistry {
void RunCallbackForEachThreadLocked(ThreadCallback cb, void *arg);
typedef bool (*FindThreadCallback)(ThreadContextBase *tctx, void *arg);
- // Finds a thread using the provided callback. Returns kUnknownTid if no
+ // Finds a thread using the provided callback. Returns kInvalidTid if no
// thread is found.
u32 FindThread(FindThreadCallback cb, void *arg);
// Should be guarded by ThreadRegistryLock. Return 0 if no thread
@@ -126,7 +124,8 @@ class ThreadRegistry {
void SetThreadNameByUserId(uptr user_id, const char *name);
void DetachThread(u32 tid, void *arg);
void JoinThread(u32 tid, void *arg);
- void FinishThread(u32 tid);
+ // Finishes thread and returns previous status.
+ ThreadStatus FinishThread(u32 tid);
void StartThread(u32 tid, tid_t os_id, ThreadType thread_type, void *arg);
void SetThreadUserId(u32 tid, uptr user_id);
@@ -138,15 +137,13 @@ class ThreadRegistry {
BlockingMutex mtx_;
- u32 n_contexts_; // Number of created thread contexts,
- // at most max_threads_.
u64 total_threads_; // Total number of created threads. May be greater than
// max_threads_ if contexts were reused.
uptr alive_threads_; // Created or running.
uptr max_alive_threads_;
uptr running_threads_;
- ThreadContextBase **threads_; // Array of thread contexts is leaked.
+ InternalMmapVector<ThreadContextBase *> threads_;
IntrusiveList<ThreadContextBase> dead_threads_;
IntrusiveList<ThreadContextBase> invalid_threads_;
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_thread_safety.h b/compiler-rt/lib/sanitizer_common/sanitizer_thread_safety.h
new file mode 100644
index 000000000000..52b25edaa7a3
--- /dev/null
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_thread_safety.h
@@ -0,0 +1,42 @@
+//===-- sanitizer_thread_safety.h -------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between sanitizer tools.
+//
+// Wrappers around thread safety annotations.
+// https://clang.llvm.org/docs/ThreadSafetyAnalysis.html
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_THREAD_SAFETY_H
+#define SANITIZER_THREAD_SAFETY_H
+
+#if defined(__clang__)
+# define THREAD_ANNOTATION(x) __attribute__((x))
+#else
+# define THREAD_ANNOTATION(x)
+#endif
+
+#define MUTEX THREAD_ANNOTATION(capability("mutex"))
+#define SCOPED_LOCK THREAD_ANNOTATION(scoped_lockable)
+#define GUARDED_BY(x) THREAD_ANNOTATION(guarded_by(x))
+#define PT_GUARDED_BY(x) THREAD_ANNOTATION(pt_guarded_by(x))
+#define REQUIRES(...) THREAD_ANNOTATION(requires_capability(__VA_ARGS__))
+#define REQUIRES_SHARED(...) \
+ THREAD_ANNOTATION(requires_shared_capability(__VA_ARGS__))
+#define ACQUIRE(...) THREAD_ANNOTATION(acquire_capability(__VA_ARGS__))
+#define ACQUIRE_SHARED(...) \
+ THREAD_ANNOTATION(acquire_shared_capability(__VA_ARGS__))
+#define TRY_ACQUIRE(...) THREAD_ANNOTATION(try_acquire_capability(__VA_ARGS__))
+#define RELEASE(...) THREAD_ANNOTATION(release_capability(__VA_ARGS__))
+#define RELEASE_SHARED(...) \
+ THREAD_ANNOTATION(release_shared_capability(__VA_ARGS__))
+#define EXCLUDES(...) THREAD_ANNOTATION(locks_excluded(__VA_ARGS__))
+#define CHECK_LOCKED(...) THREAD_ANNOTATION(assert_capability(__VA_ARGS__))
+#define NO_THREAD_SAFETY_ANALYSIS THREAD_ANNOTATION(no_thread_safety_analysis)
+
+#endif
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_win.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_win.cpp
index 63c90785f270..dddd885a45dd 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_win.cpp
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_win.cpp
@@ -44,6 +44,9 @@ TRACELOGGING_DEFINE_PROVIDER(g_asan_provider, "AddressSanitizerLoggingProvider",
#define TraceLoggingUnregister(x)
#endif
+// For WaitOnAddress
+# pragma comment(lib, "synchronization.lib")
+
// A macro to tell the compiler that this part of the code cannot be reached,
// if the compiler supports this feature. Since we're using this in
// code that is called when terminating the process, the expansion of the
@@ -334,8 +337,12 @@ bool MprotectNoAccess(uptr addr, uptr size) {
}
void ReleaseMemoryPagesToOS(uptr beg, uptr end) {
- // This is almost useless on 32-bits.
- // FIXME: add madvise-analog when we move to 64-bits.
+ uptr beg_aligned = RoundDownTo(beg, GetPageSizeCached()),
+ end_aligned = RoundDownTo(end, GetPageSizeCached());
+ CHECK(beg < end); // make sure the region is sane
+ if (beg_aligned == end_aligned) // make sure we're freeing at least 1 page;
+ return;
+ UnmapOrDie((void *)beg, end_aligned - beg_aligned);
}
void SetShadowRegionHugePageMode(uptr addr, uptr size) {
@@ -386,6 +393,12 @@ uptr FindAvailableMemoryRange(uptr size, uptr alignment, uptr left_padding,
return 0;
}
+uptr MapDynamicShadowAndAliases(uptr shadow_size, uptr alias_size,
+ uptr num_aliases, uptr ring_buffer_size) {
+ CHECK(false && "HWASan aliasing is unimplemented on Windows");
+ return 0;
+}
+
bool MemoryRangeIsAvailable(uptr range_start, uptr range_end) {
MEMORY_BASIC_INFORMATION mbi;
CHECK(VirtualQuery((void *)range_start, &mbi, sizeof(mbi)));
@@ -531,13 +544,7 @@ bool IsAbsolutePath(const char *path) {
IsPathSeparator(path[2]);
}
-void SleepForSeconds(int seconds) {
- Sleep(seconds * 1000);
-}
-
-void SleepForMillis(int millis) {
- Sleep(millis);
-}
+void internal_usleep(u64 useconds) { Sleep(useconds / 1000); }
u64 NanoTime() {
static LARGE_INTEGER frequency = {};
@@ -564,7 +571,7 @@ void Abort() {
// load the image at this address. Therefore, we call it the preferred base. Any
// addresses in the DWARF typically assume that the object has been loaded at
// this address.
-static uptr GetPreferredBase(const char *modname) {
+static uptr GetPreferredBase(const char *modname, char *buf, size_t buf_size) {
fd_t fd = OpenFile(modname, RdOnly, nullptr);
if (fd == kInvalidFd)
return 0;
@@ -586,12 +593,10 @@ static uptr GetPreferredBase(const char *modname) {
// IMAGE_FILE_HEADER
// IMAGE_OPTIONAL_HEADER
// Seek to e_lfanew and read all that data.
- char buf[4 + sizeof(IMAGE_FILE_HEADER) + sizeof(IMAGE_OPTIONAL_HEADER)];
if (::SetFilePointer(fd, dos_header.e_lfanew, nullptr, FILE_BEGIN) ==
INVALID_SET_FILE_POINTER)
return 0;
- if (!ReadFromFile(fd, &buf[0], sizeof(buf), &bytes_read) ||
- bytes_read != sizeof(buf))
+ if (!ReadFromFile(fd, buf, buf_size, &bytes_read) || bytes_read != buf_size)
return 0;
// Check for "PE\0\0" before the PE header.
@@ -611,10 +616,6 @@ static uptr GetPreferredBase(const char *modname) {
return (uptr)pe_header->ImageBase;
}
-#ifdef __clang__
-#pragma clang diagnostic push
-#pragma clang diagnostic ignored "-Wframe-larger-than="
-#endif
void ListOfModules::init() {
clearOrInit();
HANDLE cur_process = GetCurrentProcess();
@@ -637,6 +638,10 @@ void ListOfModules::init() {
}
}
+ InternalMmapVector<char> buf(4 + sizeof(IMAGE_FILE_HEADER) +
+ sizeof(IMAGE_OPTIONAL_HEADER));
+ InternalMmapVector<wchar_t> modname_utf16(kMaxPathLength);
+ InternalMmapVector<char> module_name(kMaxPathLength);
// |num_modules| is the number of modules actually present,
size_t num_modules = bytes_required / sizeof(HMODULE);
for (size_t i = 0; i < num_modules; ++i) {
@@ -646,15 +651,13 @@ void ListOfModules::init() {
continue;
// Get the UTF-16 path and convert to UTF-8.
- wchar_t modname_utf16[kMaxPathLength];
int modname_utf16_len =
- GetModuleFileNameW(handle, modname_utf16, kMaxPathLength);
+ GetModuleFileNameW(handle, &modname_utf16[0], kMaxPathLength);
if (modname_utf16_len == 0)
modname_utf16[0] = '\0';
- char module_name[kMaxPathLength];
- int module_name_len =
- ::WideCharToMultiByte(CP_UTF8, 0, modname_utf16, modname_utf16_len + 1,
- &module_name[0], kMaxPathLength, NULL, NULL);
+ int module_name_len = ::WideCharToMultiByte(
+ CP_UTF8, 0, &modname_utf16[0], modname_utf16_len + 1, &module_name[0],
+ kMaxPathLength, NULL, NULL);
module_name[module_name_len] = '\0';
uptr base_address = (uptr)mi.lpBaseOfDll;
@@ -664,21 +667,19 @@ void ListOfModules::init() {
// RVA when computing the module offset. This helps llvm-symbolizer find the
// right DWARF CU. In the common case that the image is loaded at it's
// preferred address, we will now print normal virtual addresses.
- uptr preferred_base = GetPreferredBase(&module_name[0]);
+ uptr preferred_base =
+ GetPreferredBase(&module_name[0], &buf[0], buf.size());
uptr adjusted_base = base_address - preferred_base;
- LoadedModule cur_module;
- cur_module.set(module_name, adjusted_base);
+ modules_.push_back(LoadedModule());
+ LoadedModule &cur_module = modules_.back();
+ cur_module.set(&module_name[0], adjusted_base);
// We add the whole module as one single address range.
cur_module.addAddressRange(base_address, end_address, /*executable*/ true,
/*writable*/ true);
- modules_.push_back(cur_module);
}
UnmapOrDie(hmodules, modules_buffer_size);
}
-#ifdef __clang__
-#pragma clang diagnostic pop
-#endif
void ListOfModules::fallbackInit() { clear(); }
@@ -815,6 +816,17 @@ uptr GetRSS() {
void *internal_start_thread(void *(*func)(void *arg), void *arg) { return 0; }
void internal_join_thread(void *th) { }
+void FutexWait(atomic_uint32_t *p, u32 cmp) {
+ WaitOnAddress(p, &cmp, sizeof(cmp), INFINITE);
+}
+
+void FutexWake(atomic_uint32_t *p, u32 count) {
+ if (count == 1)
+ WakeByAddressSingle(p);
+ else
+ WakeByAddressAll(p);
+}
+
// ---------------------- BlockingMutex ---------------- {{{1
BlockingMutex::BlockingMutex() {
@@ -834,9 +846,7 @@ void BlockingMutex::Unlock() {
ReleaseSRWLockExclusive((PSRWLOCK)opaque_storage_);
}
-void BlockingMutex::CheckLocked() {
- CHECK_EQ(owner_, GetThreadSelf());
-}
+void BlockingMutex::CheckLocked() const { CHECK_EQ(owner_, GetThreadSelf()); }
uptr GetTlsSize() {
return 0;
@@ -1049,10 +1059,24 @@ const char *SignalContext::Describe() const {
}
uptr ReadBinaryName(/*out*/char *buf, uptr buf_len) {
- // FIXME: Actually implement this function.
- CHECK_GT(buf_len, 0);
- buf[0] = 0;
- return 0;
+ if (buf_len == 0)
+ return 0;
+
+ // Get the UTF-16 path and convert to UTF-8.
+ InternalMmapVector<wchar_t> binname_utf16(kMaxPathLength);
+ int binname_utf16_len =
+ GetModuleFileNameW(NULL, &binname_utf16[0], kMaxPathLength);
+ if (binname_utf16_len == 0) {
+ buf[0] = '\0';
+ return 0;
+ }
+ int binary_name_len =
+ ::WideCharToMultiByte(CP_UTF8, 0, &binname_utf16[0], binname_utf16_len,
+ buf, buf_len, NULL, NULL);
+ if ((unsigned)binary_name_len == buf_len)
+ --binary_name_len;
+ buf[binary_name_len] = '\0';
+ return binary_name_len;
}
uptr ReadLongProcessName(/*out*/char *buf, uptr buf_len) {
diff --git a/compiler-rt/lib/sanitizer_common/symbolizer/sanitizer_symbolize.cpp b/compiler-rt/lib/sanitizer_common/symbolizer/sanitizer_symbolize.cpp
index 4902be0bf51e..3809880d50b4 100644
--- a/compiler-rt/lib/sanitizer_common/symbolizer/sanitizer_symbolize.cpp
+++ b/compiler-rt/lib/sanitizer_common/symbolizer/sanitizer_symbolize.cpp
@@ -23,6 +23,16 @@ static llvm::symbolize::LLVMSymbolizer *getDefaultSymbolizer() {
return DefaultSymbolizer;
}
+static llvm::symbolize::PrinterConfig getDefaultPrinterConfig() {
+ llvm::symbolize::PrinterConfig Config;
+ Config.Pretty = false;
+ Config.Verbose = false;
+ Config.PrintFunctions = true;
+ Config.PrintAddress = false;
+ Config.SourceContextLines = 0;
+ return Config;
+}
+
namespace __sanitizer {
int internal_snprintf(char *buffer, unsigned long length, const char *format,
...);
@@ -38,19 +48,24 @@ bool __sanitizer_symbolize_code(const char *ModuleName, uint64_t ModuleOffset,
std::string Result;
{
llvm::raw_string_ostream OS(Result);
- llvm::symbolize::DIPrinter Printer(OS);
+ llvm::symbolize::PrinterConfig Config = getDefaultPrinterConfig();
+ llvm::symbolize::Request Request{ModuleName, ModuleOffset};
+ auto Printer =
+ std::make_unique<llvm::symbolize::LLVMPrinter>(OS, OS, Config);
+
// TODO: it is neccessary to set proper SectionIndex here.
// object::SectionedAddress::UndefSection works for only absolute addresses.
if (SymbolizeInlineFrames) {
auto ResOrErr = getDefaultSymbolizer()->symbolizeInlinedCode(
ModuleName,
{ModuleOffset, llvm::object::SectionedAddress::UndefSection});
- Printer << (ResOrErr ? ResOrErr.get() : llvm::DIInliningInfo());
+ Printer->print(Request,
+ ResOrErr ? ResOrErr.get() : llvm::DIInliningInfo());
} else {
auto ResOrErr = getDefaultSymbolizer()->symbolizeCode(
ModuleName,
{ModuleOffset, llvm::object::SectionedAddress::UndefSection});
- Printer << (ResOrErr ? ResOrErr.get() : llvm::DILineInfo());
+ Printer->print(Request, ResOrErr ? ResOrErr.get() : llvm::DILineInfo());
}
}
return __sanitizer::internal_snprintf(Buffer, MaxLength, "%s",
@@ -61,14 +76,18 @@ bool __sanitizer_symbolize_data(const char *ModuleName, uint64_t ModuleOffset,
char *Buffer, int MaxLength) {
std::string Result;
{
+ llvm::symbolize::PrinterConfig Config = getDefaultPrinterConfig();
llvm::raw_string_ostream OS(Result);
- llvm::symbolize::DIPrinter Printer(OS);
+ llvm::symbolize::Request Request{ModuleName, ModuleOffset};
+ auto Printer =
+ std::make_unique<llvm::symbolize::LLVMPrinter>(OS, OS, Config);
+
// TODO: it is neccessary to set proper SectionIndex here.
// object::SectionedAddress::UndefSection works for only absolute addresses.
auto ResOrErr = getDefaultSymbolizer()->symbolizeData(
ModuleName,
{ModuleOffset, llvm::object::SectionedAddress::UndefSection});
- Printer << (ResOrErr ? ResOrErr.get() : llvm::DIGlobal());
+ Printer->print(Request, ResOrErr ? ResOrErr.get() : llvm::DIGlobal());
}
return __sanitizer::internal_snprintf(Buffer, MaxLength, "%s",
Result.c_str()) < MaxLength;
@@ -86,4 +105,32 @@ int __sanitizer_symbolize_demangle(const char *Name, char *Buffer,
: 0;
}
+// Override __cxa_atexit and ignore callbacks.
+// This prevents crashes in a configuration when the symbolizer
+// is built into sanitizer runtime and consequently into the test process.
+// LLVM libraries have some global objects destroyed during exit,
+// so if the test process triggers any bugs after that, the symbolizer crashes.
+// An example stack trace of such crash:
+//
+// #1 __cxa_throw
+// #2 std::__u::__throw_system_error
+// #3 std::__u::recursive_mutex::lock
+// #4 __sanitizer_llvm::ManagedStaticBase::RegisterManagedStatic
+// #5 __sanitizer_llvm::errorToErrorCode
+// #6 __sanitizer_llvm::getFileAux
+// #7 __sanitizer_llvm::MemoryBuffer::getFileOrSTDIN
+// #10 __sanitizer_llvm::symbolize::LLVMSymbolizer::getOrCreateModuleInfo
+// #13 __sanitizer::Symbolizer::SymbolizeData
+// #14 __tsan::SymbolizeData
+// #16 __tsan::ReportRace
+// #18 __tsan_write4
+// #19 race() () at test/tsan/atexit4.cpp
+// #20 cxa_at_exit_wrapper
+// #21 __cxa_finalize
+// #22 __do_fini
+//
+// For the standalone llvm-symbolizer this does not hurt,
+// we just don't destroy few global objects on exit.
+int __cxa_atexit(void (*f)(void *a), void *arg, void *dso) { return 0; }
+
} // extern "C"
diff --git a/compiler-rt/lib/sanitizer_common/symbolizer/scripts/build_symbolizer.sh b/compiler-rt/lib/sanitizer_common/symbolizer/scripts/build_symbolizer.sh
index 5b6433011a09..c793875db099 100755
--- a/compiler-rt/lib/sanitizer_common/symbolizer/scripts/build_symbolizer.sh
+++ b/compiler-rt/lib/sanitizer_common/symbolizer/scripts/build_symbolizer.sh
@@ -123,7 +123,7 @@ cd ${LIBCXX_BUILD}
ninja cxx cxxabi
FLAGS="${FLAGS} -fno-rtti -fno-exceptions"
-LLVM_FLAGS="${FLAGS} -nostdinc++ -I${ZLIB_BUILD} -I${LIBCXX_BUILD}/include/c++/v1"
+LLVM_FLAGS="${FLAGS} -nostdinc++ -I${ZLIB_BUILD} -I${LIBCXX_BUILD}/include/c++/v1 -Wno-error=global-constructors"
# Build LLVM.
if [[ ! -d ${LLVM_BUILD} ]]; then
diff --git a/compiler-rt/lib/scudo/scudo_allocator.cpp b/compiler-rt/lib/scudo/scudo_allocator.cpp
index 82864405dfb0..172353fadb1f 100644
--- a/compiler-rt/lib/scudo/scudo_allocator.cpp
+++ b/compiler-rt/lib/scudo/scudo_allocator.cpp
@@ -300,16 +300,9 @@ struct Allocator {
// Allocates a chunk.
void *allocate(uptr Size, uptr Alignment, AllocType Type,
- bool ForceZeroContents = false) {
+ bool ForceZeroContents = false) NO_THREAD_SAFETY_ANALYSIS {
initThreadMaybe();
-#ifdef GWP_ASAN_HOOKS
- if (UNLIKELY(GuardedAlloc.shouldSample())) {
- if (void *Ptr = GuardedAlloc.allocate(Size))
- return Ptr;
- }
-#endif // GWP_ASAN_HOOKS
-
if (UNLIKELY(Alignment > MaxAlignment)) {
if (AllocatorMayReturnNull())
return nullptr;
@@ -318,6 +311,16 @@ struct Allocator {
if (UNLIKELY(Alignment < MinAlignment))
Alignment = MinAlignment;
+#ifdef GWP_ASAN_HOOKS
+ if (UNLIKELY(GuardedAlloc.shouldSample())) {
+ if (void *Ptr = GuardedAlloc.allocate(Size, Alignment)) {
+ if (SCUDO_CAN_USE_HOOKS && &__sanitizer_malloc_hook)
+ __sanitizer_malloc_hook(Ptr, Size);
+ return Ptr;
+ }
+ }
+#endif // GWP_ASAN_HOOKS
+
const uptr NeededSize = RoundUpTo(Size ? Size : 1, MinAlignment) +
Chunk::getHeaderSize();
const uptr AlignedSize = (Alignment > MinAlignment) ?
@@ -402,7 +405,7 @@ struct Allocator {
// a zero-sized quarantine, or if the size of the chunk is greater than the
// quarantine chunk size threshold.
void quarantineOrDeallocateChunk(void *Ptr, UnpackedHeader *Header,
- uptr Size) {
+ uptr Size) NO_THREAD_SAFETY_ANALYSIS {
const bool BypassQuarantine = !Size || (Size > QuarantineChunksUpToSize);
if (BypassQuarantine) {
UnpackedHeader NewHeader = *Header;
diff --git a/compiler-rt/lib/scudo/scudo_termination.cpp b/compiler-rt/lib/scudo/scudo_termination.cpp
index 6c7c0abc6d36..5f1337efaca0 100644
--- a/compiler-rt/lib/scudo/scudo_termination.cpp
+++ b/compiler-rt/lib/scudo/scudo_termination.cpp
@@ -30,7 +30,7 @@ void NORETURN Die() {
internal__exit(common_flags()->exitcode);
}
-void SetCheckFailedCallback(CheckFailedCallbackType callback) {}
+void SetCheckUnwindCallback(void (*callback)()) {}
void NORETURN CheckFailed(const char *File, int Line, const char *Condition,
u64 Value1, u64 Value2) {
diff --git a/compiler-rt/lib/scudo/scudo_tsd.h b/compiler-rt/lib/scudo/scudo_tsd.h
index ec8dabc1f8a7..e1310974db45 100644
--- a/compiler-rt/lib/scudo/scudo_tsd.h
+++ b/compiler-rt/lib/scudo/scudo_tsd.h
@@ -29,7 +29,7 @@ struct ALIGNED(SANITIZER_CACHE_LINE_SIZE) ScudoTSD {
void init();
void commitBack();
- inline bool tryLock() {
+ inline bool tryLock() TRY_ACQUIRE(true, Mutex) {
if (Mutex.TryLock()) {
atomic_store_relaxed(&Precedence, 0);
return true;
@@ -40,12 +40,12 @@ struct ALIGNED(SANITIZER_CACHE_LINE_SIZE) ScudoTSD {
return false;
}
- inline void lock() {
+ inline void lock() ACQUIRE(Mutex) {
atomic_store_relaxed(&Precedence, 0);
Mutex.Lock();
}
- inline void unlock() { Mutex.Unlock(); }
+ inline void unlock() RELEASE(Mutex) { Mutex.Unlock(); }
inline uptr getPrecedence() { return atomic_load_relaxed(&Precedence); }
diff --git a/compiler-rt/lib/scudo/scudo_tsd_exclusive.inc b/compiler-rt/lib/scudo/scudo_tsd_exclusive.inc
index 08e4d3af7316..29db8a2eff1a 100644
--- a/compiler-rt/lib/scudo/scudo_tsd_exclusive.inc
+++ b/compiler-rt/lib/scudo/scudo_tsd_exclusive.inc
@@ -11,8 +11,8 @@
//===----------------------------------------------------------------------===//
#ifndef SCUDO_TSD_H_
-# error "This file must be included inside scudo_tsd.h."
-#endif // SCUDO_TSD_H_
+#error "This file must be included inside scudo_tsd.h."
+#endif // SCUDO_TSD_H_
#if SCUDO_TSD_EXCLUSIVE
@@ -21,10 +21,9 @@ enum ThreadState : u8 {
ThreadInitialized,
ThreadTornDown,
};
-__attribute__((tls_model("initial-exec")))
-extern THREADLOCAL ThreadState ScudoThreadState;
-__attribute__((tls_model("initial-exec")))
-extern THREADLOCAL ScudoTSD TSD;
+__attribute__((
+ tls_model("initial-exec"))) extern THREADLOCAL ThreadState ScudoThreadState;
+__attribute__((tls_model("initial-exec"))) extern THREADLOCAL ScudoTSD TSD;
extern ScudoTSD FallbackTSD;
@@ -34,7 +33,8 @@ ALWAYS_INLINE void initThreadMaybe(bool MinimalInit = false) {
initThread(MinimalInit);
}
-ALWAYS_INLINE ScudoTSD *getTSDAndLock(bool *UnlockRequired) {
+ALWAYS_INLINE ScudoTSD *
+getTSDAndLock(bool *UnlockRequired) NO_THREAD_SAFETY_ANALYSIS {
if (UNLIKELY(ScudoThreadState != ThreadInitialized)) {
FallbackTSD.lock();
*UnlockRequired = true;
@@ -44,4 +44,4 @@ ALWAYS_INLINE ScudoTSD *getTSDAndLock(bool *UnlockRequired) {
return &TSD;
}
-#endif // SCUDO_TSD_EXCLUSIVE
+#endif // SCUDO_TSD_EXCLUSIVE
diff --git a/compiler-rt/lib/scudo/scudo_tsd_shared.cpp b/compiler-rt/lib/scudo/scudo_tsd_shared.cpp
index 59ad2549998c..fd85a7c4017f 100644
--- a/compiler-rt/lib/scudo/scudo_tsd_shared.cpp
+++ b/compiler-rt/lib/scudo/scudo_tsd_shared.cpp
@@ -64,7 +64,7 @@ void initThread(bool MinimalInit) {
setCurrentTSD(&TSDs[Index % NumberOfTSDs]);
}
-ScudoTSD *getTSDAndLockSlow(ScudoTSD *TSD) {
+ScudoTSD *getTSDAndLockSlow(ScudoTSD *TSD) NO_THREAD_SAFETY_ANALYSIS {
if (NumberOfTSDs > 1) {
// Use the Precedence of the current TSD as our random seed. Since we are in
// the slow path, it means that tryLock failed, and as a result it's very
diff --git a/compiler-rt/lib/scudo/scudo_tsd_shared.inc b/compiler-rt/lib/scudo/scudo_tsd_shared.inc
index 8f3362dd3d71..e46b044a81f8 100644
--- a/compiler-rt/lib/scudo/scudo_tsd_shared.inc
+++ b/compiler-rt/lib/scudo/scudo_tsd_shared.inc
@@ -41,7 +41,8 @@ ALWAYS_INLINE void initThreadMaybe(bool MinimalInit = false) {
ScudoTSD *getTSDAndLockSlow(ScudoTSD *TSD);
-ALWAYS_INLINE ScudoTSD *getTSDAndLock(bool *UnlockRequired) {
+ALWAYS_INLINE ScudoTSD *
+getTSDAndLock(bool *UnlockRequired) NO_THREAD_SAFETY_ANALYSIS {
ScudoTSD *TSD = getCurrentTSD();
DCHECK(TSD && "No TSD associated with the current thread!");
*UnlockRequired = true;
diff --git a/compiler-rt/lib/scudo/standalone/allocator_config.h b/compiler-rt/lib/scudo/standalone/allocator_config.h
index 12daaa2f6b44..e6f46b511dbf 100644
--- a/compiler-rt/lib/scudo/standalone/allocator_config.h
+++ b/compiler-rt/lib/scudo/standalone/allocator_config.h
@@ -21,24 +21,65 @@
namespace scudo {
+// The combined allocator uses a structure as a template argument that
+// specifies the configuration options for the various subcomponents of the
+// allocator.
+//
+// struct ExampleConfig {
+// // SizeClasMmap to use with the Primary.
+// using SizeClassMap = DefaultSizeClassMap;
+// // Indicates possible support for Memory Tagging.
+// static const bool MaySupportMemoryTagging = false;
+// // Defines the Primary allocator to use.
+// typedef SizeClassAllocator64<ExampleConfig> Primary;
+// // Log2 of the size of a size class region, as used by the Primary.
+// static const uptr PrimaryRegionSizeLog = 30U;
+// // Defines the type and scale of a compact pointer. A compact pointer can
+// // be understood as the offset of a pointer within the region it belongs
+// // to, in increments of a power-of-2 scale.
+// // eg: Ptr = Base + (CompactPtr << Scale).
+// typedef u32 PrimaryCompactPtrT;
+// static const uptr PrimaryCompactPtrScale = SCUDO_MIN_ALIGNMENT_LOG;
+// // Indicates support for offsetting the start of a region by
+// // a random number of pages. Only used with primary64.
+// static const bool PrimaryEnableRandomOffset = true;
+// // Call map for user memory with at least this size. Only used with
+// // primary64.
+// static const uptr PrimaryMapSizeIncrement = 1UL << 18;
+// // Defines the minimal & maximal release interval that can be set.
+// static const s32 PrimaryMinReleaseToOsIntervalMs = INT32_MIN;
+// static const s32 PrimaryMaxReleaseToOsIntervalMs = INT32_MAX;
+// // Defines the type of cache used by the Secondary. Some additional
+// // configuration entries can be necessary depending on the Cache.
+// typedef MapAllocatorNoCache SecondaryCache;
+// // Thread-Specific Data Registry used, shared or exclusive.
+// template <class A> using TSDRegistryT = TSDRegistrySharedT<A, 8U, 4U>;
+// };
+
// Default configurations for various platforms.
struct DefaultConfig {
using SizeClassMap = DefaultSizeClassMap;
- static const bool MaySupportMemoryTagging = false;
+ static const bool MaySupportMemoryTagging = true;
#if SCUDO_CAN_USE_PRIMARY64
typedef SizeClassAllocator64<DefaultConfig> Primary;
- static const uptr PrimaryRegionSizeLog = 30U;
+ static const uptr PrimaryRegionSizeLog = 32U;
+ typedef uptr PrimaryCompactPtrT;
+ static const uptr PrimaryCompactPtrScale = 0;
+ static const bool PrimaryEnableRandomOffset = true;
+ static const uptr PrimaryMapSizeIncrement = 1UL << 18;
#else
typedef SizeClassAllocator32<DefaultConfig> Primary;
static const uptr PrimaryRegionSizeLog = 19U;
+ typedef uptr PrimaryCompactPtrT;
#endif
static const s32 PrimaryMinReleaseToOsIntervalMs = INT32_MIN;
static const s32 PrimaryMaxReleaseToOsIntervalMs = INT32_MAX;
typedef MapAllocatorCache<DefaultConfig> SecondaryCache;
static const u32 SecondaryCacheEntriesArraySize = 32U;
+ static const u32 SecondaryCacheQuarantineSize = 0U;
static const u32 SecondaryCacheDefaultMaxEntriesCount = 32U;
static const uptr SecondaryCacheDefaultMaxEntrySize = 1UL << 19;
static const s32 SecondaryCacheMinReleaseToOsIntervalMs = INT32_MIN;
@@ -46,7 +87,6 @@ struct DefaultConfig {
template <class A> using TSDRegistryT = TSDRegistryExT<A>; // Exclusive
};
-
struct AndroidConfig {
using SizeClassMap = AndroidSizeClassMap;
static const bool MaySupportMemoryTagging = true;
@@ -54,15 +94,21 @@ struct AndroidConfig {
#if SCUDO_CAN_USE_PRIMARY64
typedef SizeClassAllocator64<AndroidConfig> Primary;
static const uptr PrimaryRegionSizeLog = 28U;
+ typedef u32 PrimaryCompactPtrT;
+ static const uptr PrimaryCompactPtrScale = SCUDO_MIN_ALIGNMENT_LOG;
+ static const bool PrimaryEnableRandomOffset = true;
+ static const uptr PrimaryMapSizeIncrement = 1UL << 18;
#else
typedef SizeClassAllocator32<AndroidConfig> Primary;
static const uptr PrimaryRegionSizeLog = 18U;
+ typedef uptr PrimaryCompactPtrT;
#endif
static const s32 PrimaryMinReleaseToOsIntervalMs = 1000;
static const s32 PrimaryMaxReleaseToOsIntervalMs = 1000;
typedef MapAllocatorCache<AndroidConfig> SecondaryCache;
static const u32 SecondaryCacheEntriesArraySize = 256U;
+ static const u32 SecondaryCacheQuarantineSize = 32U;
static const u32 SecondaryCacheDefaultMaxEntriesCount = 32U;
static const uptr SecondaryCacheDefaultMaxEntrySize = 2UL << 20;
static const s32 SecondaryCacheMinReleaseToOsIntervalMs = 0;
@@ -79,15 +125,21 @@ struct AndroidSvelteConfig {
#if SCUDO_CAN_USE_PRIMARY64
typedef SizeClassAllocator64<AndroidSvelteConfig> Primary;
static const uptr PrimaryRegionSizeLog = 27U;
+ typedef u32 PrimaryCompactPtrT;
+ static const uptr PrimaryCompactPtrScale = SCUDO_MIN_ALIGNMENT_LOG;
+ static const bool PrimaryEnableRandomOffset = true;
+ static const uptr PrimaryMapSizeIncrement = 1UL << 18;
#else
typedef SizeClassAllocator32<AndroidSvelteConfig> Primary;
static const uptr PrimaryRegionSizeLog = 16U;
+ typedef uptr PrimaryCompactPtrT;
#endif
static const s32 PrimaryMinReleaseToOsIntervalMs = 1000;
static const s32 PrimaryMaxReleaseToOsIntervalMs = 1000;
typedef MapAllocatorCache<AndroidSvelteConfig> SecondaryCache;
static const u32 SecondaryCacheEntriesArraySize = 16U;
+ static const u32 SecondaryCacheQuarantineSize = 32U;
static const u32 SecondaryCacheDefaultMaxEntriesCount = 4U;
static const uptr SecondaryCacheDefaultMaxEntrySize = 1UL << 18;
static const s32 SecondaryCacheMinReleaseToOsIntervalMs = 0;
@@ -99,11 +151,15 @@ struct AndroidSvelteConfig {
#if SCUDO_CAN_USE_PRIMARY64
struct FuchsiaConfig {
- using SizeClassMap = DefaultSizeClassMap;
+ using SizeClassMap = FuchsiaSizeClassMap;
static const bool MaySupportMemoryTagging = false;
typedef SizeClassAllocator64<FuchsiaConfig> Primary;
static const uptr PrimaryRegionSizeLog = 30U;
+ typedef u32 PrimaryCompactPtrT;
+ static const bool PrimaryEnableRandomOffset = true;
+ static const uptr PrimaryMapSizeIncrement = 1UL << 18;
+ static const uptr PrimaryCompactPtrScale = SCUDO_MIN_ALIGNMENT_LOG;
static const s32 PrimaryMinReleaseToOsIntervalMs = INT32_MIN;
static const s32 PrimaryMaxReleaseToOsIntervalMs = INT32_MAX;
@@ -111,12 +167,34 @@ struct FuchsiaConfig {
template <class A>
using TSDRegistryT = TSDRegistrySharedT<A, 8U, 4U>; // Shared, max 8 TSDs.
};
+
+struct TrustyConfig {
+ using SizeClassMap = TrustySizeClassMap;
+ static const bool MaySupportMemoryTagging = false;
+
+ typedef SizeClassAllocator64<TrustyConfig> Primary;
+ // Some apps have 1 page of heap total so small regions are necessary.
+ static const uptr PrimaryRegionSizeLog = 10U;
+ typedef u32 PrimaryCompactPtrT;
+ static const bool PrimaryEnableRandomOffset = false;
+ // Trusty is extremely memory-constrained so minimally round up map calls.
+ static const uptr PrimaryMapSizeIncrement = 1UL << 4;
+ static const uptr PrimaryCompactPtrScale = SCUDO_MIN_ALIGNMENT_LOG;
+ static const s32 PrimaryMinReleaseToOsIntervalMs = INT32_MIN;
+ static const s32 PrimaryMaxReleaseToOsIntervalMs = INT32_MAX;
+
+ typedef MapAllocatorNoCache SecondaryCache;
+ template <class A>
+ using TSDRegistryT = TSDRegistrySharedT<A, 1U, 1U>; // Shared, max 1 TSD.
+};
#endif
#if SCUDO_ANDROID
typedef AndroidConfig Config;
#elif SCUDO_FUCHSIA
typedef FuchsiaConfig Config;
+#elif SCUDO_TRUSTY
+typedef TrustyConfig Config;
#else
typedef DefaultConfig Config;
#endif
diff --git a/compiler-rt/lib/scudo/standalone/bytemap.h b/compiler-rt/lib/scudo/standalone/bytemap.h
index e0d54f4e5971..248e096d07b6 100644
--- a/compiler-rt/lib/scudo/standalone/bytemap.h
+++ b/compiler-rt/lib/scudo/standalone/bytemap.h
@@ -17,10 +17,9 @@ namespace scudo {
template <uptr Size> class FlatByteMap {
public:
- void initLinkerInitialized() {}
- void init() { memset(Map, 0, sizeof(Map)); }
+ void init() { DCHECK(Size == 0 || Map[0] == 0); }
- void unmapTestOnly() {}
+ void unmapTestOnly() { memset(Map, 0, Size); }
void set(uptr Index, u8 Value) {
DCHECK_LT(Index, Size);
@@ -36,7 +35,7 @@ public:
void enable() {}
private:
- u8 Map[Size];
+ u8 Map[Size] = {};
};
} // namespace scudo
diff --git a/compiler-rt/lib/scudo/standalone/combined.h b/compiler-rt/lib/scudo/standalone/combined.h
index 0df7a652ffa5..fd5360ce0f55 100644
--- a/compiler-rt/lib/scudo/standalone/combined.h
+++ b/compiler-rt/lib/scudo/standalone/combined.h
@@ -28,7 +28,6 @@
#ifdef GWP_ASAN_HOOKS
#include "gwp_asan/guarded_pool_allocator.h"
#include "gwp_asan/optional/backtrace.h"
-#include "gwp_asan/optional/options_parser.h"
#include "gwp_asan/optional/segv_handler.h"
#endif // GWP_ASAN_HOOKS
@@ -52,8 +51,7 @@ public:
typedef typename Params::template TSDRegistryT<ThisT> TSDRegistryT;
void callPostInitCallback() {
- static pthread_once_t OnceControl = PTHREAD_ONCE_INIT;
- pthread_once(&OnceControl, PostInitCallback);
+ pthread_once(&PostInitNonce, PostInitCallback);
}
struct QuarantineCallback {
@@ -72,12 +70,10 @@ public:
NewHeader.State = Chunk::State::Available;
Chunk::compareExchangeHeader(Allocator.Cookie, Ptr, &NewHeader, &Header);
+ if (allocatorSupportsMemoryTagging<Params>())
+ Ptr = untagPointer(Ptr);
void *BlockBegin = Allocator::getBlockBegin(Ptr, &NewHeader);
- const uptr ClassId = NewHeader.ClassId;
- if (LIKELY(ClassId))
- Cache.deallocate(ClassId, BlockBegin);
- else
- Allocator.Secondary.deallocate(BlockBegin);
+ Cache.deallocate(NewHeader.ClassId, BlockBegin);
}
// We take a shortcut when allocating a quarantine batch by working with the
@@ -136,7 +132,7 @@ public:
typedef GlobalQuarantine<QuarantineCallback, void> QuarantineT;
typedef typename QuarantineT::CacheT QuarantineCacheT;
- void initLinkerInitialized() {
+ void init() {
performSanityChecks();
// Check if hardware CRC32 is supported in the binary and by the platform,
@@ -170,11 +166,10 @@ public:
QuarantineMaxChunkSize =
static_cast<u32>(getFlags()->quarantine_max_chunk_size);
- Stats.initLinkerInitialized();
+ Stats.init();
const s32 ReleaseToOsIntervalMs = getFlags()->release_to_os_interval_ms;
- Primary.initLinkerInitialized(ReleaseToOsIntervalMs);
- Secondary.initLinkerInitialized(&Stats, ReleaseToOsIntervalMs);
-
+ Primary.init(ReleaseToOsIntervalMs);
+ Secondary.init(&Stats, ReleaseToOsIntervalMs);
Quarantine.init(
static_cast<uptr>(getFlags()->quarantine_size_kb << 10),
static_cast<uptr>(getFlags()->thread_local_quarantine_size_kb << 10));
@@ -184,12 +179,12 @@ public:
// be functional, best called from PostInitCallback.
void initGwpAsan() {
#ifdef GWP_ASAN_HOOKS
- // Bear in mind - Scudo has its own alignment guarantees that are strictly
- // enforced. Scudo exposes the same allocation function for everything from
- // malloc() to posix_memalign, so in general this flag goes unused, as Scudo
- // will always ask GWP-ASan for an aligned amount of bytes.
- gwp_asan::options::initOptions(getEnv("GWP_ASAN_OPTIONS"), Printf);
- gwp_asan::options::Options Opt = gwp_asan::options::getOptions();
+ gwp_asan::options::Options Opt;
+ Opt.Enabled = getFlags()->GWP_ASAN_Enabled;
+ Opt.MaxSimultaneousAllocations =
+ getFlags()->GWP_ASAN_MaxSimultaneousAllocations;
+ Opt.SampleRate = getFlags()->GWP_ASAN_SampleRate;
+ Opt.InstallSignalHandlers = getFlags()->GWP_ASAN_InstallSignalHandlers;
// Embedded GWP-ASan is locked through the Scudo atfork handler (via
// Allocator::disable calling GWPASan.disable). Disable GWP-ASan's atfork
// handler.
@@ -202,6 +197,11 @@ public:
&GuardedAlloc, Printf,
gwp_asan::backtrace::getPrintBacktraceFunction(),
gwp_asan::backtrace::getSegvBacktraceFunction());
+
+ GuardedAllocSlotSize =
+ GuardedAlloc.getAllocatorState()->maximumAllocationSize();
+ Stats.add(StatFree, static_cast<uptr>(Opt.MaxSimultaneousAllocations) *
+ GuardedAllocSlotSize);
#endif // GWP_ASAN_HOOKS
}
@@ -209,11 +209,10 @@ public:
TSDRegistry.initThreadMaybe(this, MinimalInit);
}
- void reset() { memset(this, 0, sizeof(*this)); }
-
void unmapTestOnly() {
- TSDRegistry.unmapTestOnly();
+ TSDRegistry.unmapTestOnly(this);
Primary.unmapTestOnly();
+ Secondary.unmapTestOnly();
#ifdef GWP_ASAN_HOOKS
if (getFlags()->GWP_ASAN_InstallSignalHandlers)
gwp_asan::segv_handler::uninstallSignalHandlers();
@@ -224,9 +223,7 @@ public:
TSDRegistryT *getTSDRegistry() { return &TSDRegistry; }
// The Cache must be provided zero-initialized.
- void initCache(CacheT *Cache) {
- Cache->initLinkerInitialized(&Stats, &Primary);
- }
+ void initCache(CacheT *Cache) { Cache->init(&Stats, &Primary); }
// Release the resources used by a TSD, which involves:
// - draining the local quarantine cache to the global quarantine;
@@ -239,11 +236,26 @@ public:
TSD->Cache.destroy(&Stats);
}
- ALWAYS_INLINE void *untagPointerMaybe(void *Ptr) {
- if (allocatorSupportsMemoryTagging<Params>())
- return reinterpret_cast<void *>(
- untagPointer(reinterpret_cast<uptr>(Ptr)));
- return Ptr;
+ ALWAYS_INLINE void *getHeaderTaggedPointer(void *Ptr) {
+ if (!allocatorSupportsMemoryTagging<Params>())
+ return Ptr;
+ auto UntaggedPtr = untagPointer(Ptr);
+ if (UntaggedPtr != Ptr)
+ return UntaggedPtr;
+ // Secondary, or pointer allocated while memory tagging is unsupported or
+ // disabled. The tag mismatch is okay in the latter case because tags will
+ // not be checked.
+ return addHeaderTag(Ptr);
+ }
+
+ ALWAYS_INLINE uptr addHeaderTag(uptr Ptr) {
+ if (!allocatorSupportsMemoryTagging<Params>())
+ return Ptr;
+ return addFixedTag(Ptr, 2);
+ }
+
+ ALWAYS_INLINE void *addHeaderTag(void *Ptr) {
+ return reinterpret_cast<void *>(addHeaderTag(reinterpret_cast<uptr>(Ptr)));
}
NOINLINE u32 collectStackTrace() {
@@ -260,7 +272,8 @@ public:
#endif
}
- uptr computeOddEvenMaskForPointerMaybe(Options Options, uptr Ptr, uptr Size) {
+ uptr computeOddEvenMaskForPointerMaybe(Options Options, uptr Ptr,
+ uptr ClassId) {
if (!Options.get(OptionBit::UseOddEvenTags))
return 0;
@@ -269,8 +282,7 @@ public:
// Size to Ptr will flip the least significant set bit of Size in Ptr, so
// that bit will have the pattern 010101... for consecutive blocks, which we
// can use to determine which tag mask to use.
- return (Ptr & (1ULL << getLeastSignificantSetBitIndex(Size))) ? 0xaaaa
- : 0x5555;
+ return 0x5555U << ((Ptr >> SizeClassMap::getSizeLSBByClassId(ClassId)) & 1);
}
NOINLINE void *allocate(uptr Size, Chunk::Origin Origin,
@@ -278,27 +290,34 @@ public:
bool ZeroContents = false) {
initThreadMaybe();
+ const Options Options = Primary.Options.load();
+ if (UNLIKELY(Alignment > MaxAlignment)) {
+ if (Options.get(OptionBit::MayReturnNull))
+ return nullptr;
+ reportAlignmentTooBig(Alignment, MaxAlignment);
+ }
+ if (Alignment < MinAlignment)
+ Alignment = MinAlignment;
+
#ifdef GWP_ASAN_HOOKS
if (UNLIKELY(GuardedAlloc.shouldSample())) {
- if (void *Ptr = GuardedAlloc.allocate(roundUpTo(Size, Alignment)))
+ if (void *Ptr = GuardedAlloc.allocate(Size, Alignment)) {
+ if (UNLIKELY(&__scudo_allocate_hook))
+ __scudo_allocate_hook(Ptr, Size);
+ Stats.lock();
+ Stats.add(StatAllocated, GuardedAllocSlotSize);
+ Stats.sub(StatFree, GuardedAllocSlotSize);
+ Stats.unlock();
return Ptr;
+ }
}
#endif // GWP_ASAN_HOOKS
- const Options Options = Primary.Options.load();
const FillContentsMode FillContents = ZeroContents ? ZeroFill
: TSDRegistry.getDisableMemInit()
? NoFill
: Options.getFillContentsMode();
- if (UNLIKELY(Alignment > MaxAlignment)) {
- if (Options.get(OptionBit::MayReturnNull))
- return nullptr;
- reportAlignmentTooBig(Alignment, MaxAlignment);
- }
- if (Alignment < MinAlignment)
- Alignment = MinAlignment;
-
// If the requested size happens to be 0 (more common than you might think),
// allocate MinAlignment bytes on top of the header. Then add the extra
// bytes required to fulfill the alignment requirements: we allocate enough
@@ -340,7 +359,7 @@ public:
TSD->unlock();
}
if (UNLIKELY(ClassId == 0))
- Block = Secondary.allocate(NeededSize, Alignment, &SecondaryBlockEnd,
+ Block = Secondary.allocate(Options, Size, Alignment, &SecondaryBlockEnd,
FillContents);
if (UNLIKELY(!Block)) {
@@ -414,7 +433,7 @@ public:
if (NextPage < PrevEnd && loadTag(NextPage) != NextPage)
PrevEnd = NextPage;
TaggedPtr = reinterpret_cast<void *>(TaggedUserPtr);
- resizeTaggedChunk(PrevEnd, TaggedUserPtr + Size, BlockEnd);
+ resizeTaggedChunk(PrevEnd, TaggedUserPtr + Size, Size, BlockEnd);
if (UNLIKELY(FillContents != NoFill && !Header.OriginOrWasZeroed)) {
// If an allocation needs to be zeroed (i.e. calloc) we can normally
// avoid zeroing the memory now since we can rely on memory having
@@ -432,15 +451,26 @@ public:
}
} else {
const uptr OddEvenMask =
- computeOddEvenMaskForPointerMaybe(Options, BlockUptr, BlockSize);
+ computeOddEvenMaskForPointerMaybe(Options, BlockUptr, ClassId);
TaggedPtr = prepareTaggedChunk(Ptr, Size, OddEvenMask, BlockEnd);
}
- storeAllocationStackMaybe(Options, Ptr);
- } else if (UNLIKELY(FillContents != NoFill)) {
- // This condition is not necessarily unlikely, but since memset is
- // costly, we might as well mark it as such.
- memset(Block, FillContents == ZeroFill ? 0 : PatternFillByte,
- PrimaryT::getSizeByClassId(ClassId));
+ storePrimaryAllocationStackMaybe(Options, Ptr);
+ } else {
+ Block = addHeaderTag(Block);
+ Ptr = addHeaderTag(Ptr);
+ if (UNLIKELY(FillContents != NoFill)) {
+ // This condition is not necessarily unlikely, but since memset is
+ // costly, we might as well mark it as such.
+ memset(Block, FillContents == ZeroFill ? 0 : PatternFillByte,
+ PrimaryT::getSizeByClassId(ClassId));
+ }
+ }
+ } else {
+ Block = addHeaderTag(Block);
+ Ptr = addHeaderTag(Ptr);
+ if (UNLIKELY(useMemoryTagging<Params>(Options))) {
+ storeTags(reinterpret_cast<uptr>(Block), reinterpret_cast<uptr>(Ptr));
+ storeSecondaryAllocationStackMaybe(Options, Ptr, Size);
}
}
@@ -480,22 +510,28 @@ public:
// being destroyed properly. Any other heap operation will do a full init.
initThreadMaybe(/*MinimalInit=*/true);
+ if (UNLIKELY(&__scudo_deallocate_hook))
+ __scudo_deallocate_hook(Ptr);
+
+ if (UNLIKELY(!Ptr))
+ return;
+
#ifdef GWP_ASAN_HOOKS
if (UNLIKELY(GuardedAlloc.pointerIsMine(Ptr))) {
GuardedAlloc.deallocate(Ptr);
+ Stats.lock();
+ Stats.add(StatFree, GuardedAllocSlotSize);
+ Stats.sub(StatAllocated, GuardedAllocSlotSize);
+ Stats.unlock();
return;
}
#endif // GWP_ASAN_HOOKS
- if (UNLIKELY(&__scudo_deallocate_hook))
- __scudo_deallocate_hook(Ptr);
-
- if (UNLIKELY(!Ptr))
- return;
if (UNLIKELY(!isAligned(reinterpret_cast<uptr>(Ptr), MinAlignment)))
reportMisalignedPointer(AllocatorAction::Deallocating, Ptr);
- Ptr = untagPointerMaybe(Ptr);
+ void *TaggedPtr = Ptr;
+ Ptr = getHeaderTaggedPointer(Ptr);
Chunk::UnpackedHeader Header;
Chunk::loadHeader(Cookie, Ptr, &Header);
@@ -520,7 +556,7 @@ public:
reportDeleteSizeMismatch(Ptr, DeleteSize, Size);
}
- quarantineOrDeallocateChunk(Options, Ptr, &Header, Size);
+ quarantineOrDeallocateChunk(Options, TaggedPtr, &Header, Size);
}
void *reallocate(void *OldPtr, uptr NewSize, uptr Alignment = MinAlignment) {
@@ -533,9 +569,6 @@ public:
reportAllocationSizeTooBig(NewSize, 0, MaxAllowedMallocSize);
}
- void *OldTaggedPtr = OldPtr;
- OldPtr = untagPointerMaybe(OldPtr);
-
// The following cases are handled by the C wrappers.
DCHECK_NE(OldPtr, nullptr);
DCHECK_NE(NewSize, 0);
@@ -547,10 +580,17 @@ public:
if (NewPtr)
memcpy(NewPtr, OldPtr, (NewSize < OldSize) ? NewSize : OldSize);
GuardedAlloc.deallocate(OldPtr);
+ Stats.lock();
+ Stats.add(StatFree, GuardedAllocSlotSize);
+ Stats.sub(StatAllocated, GuardedAllocSlotSize);
+ Stats.unlock();
return NewPtr;
}
#endif // GWP_ASAN_HOOKS
+ void *OldTaggedPtr = OldPtr;
+ OldPtr = getHeaderTaggedPointer(OldPtr);
+
if (UNLIKELY(!isAligned(reinterpret_cast<uptr>(OldPtr), MinAlignment)))
reportMisalignedPointer(AllocatorAction::Reallocating, OldPtr);
@@ -570,7 +610,7 @@ public:
Chunk::Origin::Malloc);
}
- void *BlockBegin = getBlockBegin(OldPtr, &OldHeader);
+ void *BlockBegin = getBlockBegin(OldTaggedPtr, &OldHeader);
uptr BlockEnd;
uptr OldSize;
const uptr ClassId = OldHeader.ClassId;
@@ -580,25 +620,30 @@ public:
OldSize = OldHeader.SizeOrUnusedBytes;
} else {
BlockEnd = SecondaryT::getBlockEnd(BlockBegin);
- OldSize = BlockEnd -
- (reinterpret_cast<uptr>(OldPtr) + OldHeader.SizeOrUnusedBytes);
+ OldSize = BlockEnd - (reinterpret_cast<uptr>(OldTaggedPtr) +
+ OldHeader.SizeOrUnusedBytes);
}
// If the new chunk still fits in the previously allocated block (with a
// reasonable delta), we just keep the old block, and update the chunk
// header to reflect the size change.
- if (reinterpret_cast<uptr>(OldPtr) + NewSize <= BlockEnd) {
+ if (reinterpret_cast<uptr>(OldTaggedPtr) + NewSize <= BlockEnd) {
if (NewSize > OldSize || (OldSize - NewSize) < getPageSizeCached()) {
Chunk::UnpackedHeader NewHeader = OldHeader;
NewHeader.SizeOrUnusedBytes =
(ClassId ? NewSize
- : BlockEnd - (reinterpret_cast<uptr>(OldPtr) + NewSize)) &
+ : BlockEnd -
+ (reinterpret_cast<uptr>(OldTaggedPtr) + NewSize)) &
Chunk::SizeOrUnusedBytesMask;
Chunk::compareExchangeHeader(Cookie, OldPtr, &NewHeader, &OldHeader);
- if (UNLIKELY(ClassId && useMemoryTagging<Params>(Options))) {
- resizeTaggedChunk(reinterpret_cast<uptr>(OldTaggedPtr) + OldSize,
- reinterpret_cast<uptr>(OldTaggedPtr) + NewSize,
- BlockEnd);
- storeAllocationStackMaybe(Options, OldPtr);
+ if (UNLIKELY(useMemoryTagging<Params>(Options))) {
+ if (ClassId) {
+ resizeTaggedChunk(reinterpret_cast<uptr>(OldTaggedPtr) + OldSize,
+ reinterpret_cast<uptr>(OldTaggedPtr) + NewSize,
+ NewSize, untagPointer(BlockEnd));
+ storePrimaryAllocationStackMaybe(Options, OldPtr);
+ } else {
+ storeSecondaryAllocationStackMaybe(Options, OldPtr, NewSize);
+ }
}
return OldTaggedPtr;
}
@@ -611,7 +656,7 @@ public:
void *NewPtr = allocate(NewSize, Chunk::Origin::Malloc, Alignment);
if (LIKELY(NewPtr)) {
memcpy(NewPtr, OldTaggedPtr, Min(NewSize, OldSize));
- quarantineOrDeallocateChunk(Options, OldPtr, &OldHeader, OldSize);
+ quarantineOrDeallocateChunk(Options, OldTaggedPtr, &OldHeader, OldSize);
}
return NewPtr;
}
@@ -649,7 +694,7 @@ public:
// function. This can be called with a null buffer or zero size for buffer
// sizing purposes.
uptr getStats(char *Buffer, uptr Size) {
- ScopedString Str(1024);
+ ScopedString Str;
disable();
const uptr Length = getStats(&Str) + 1;
enable();
@@ -663,7 +708,7 @@ public:
}
void printStats() {
- ScopedString Str(1024);
+ ScopedString Str;
disable();
getStats(&Str);
enable();
@@ -682,16 +727,34 @@ public:
void iterateOverChunks(uptr Base, uptr Size, iterate_callback Callback,
void *Arg) {
initThreadMaybe();
+ if (archSupportsMemoryTagging())
+ Base = untagPointer(Base);
const uptr From = Base;
const uptr To = Base + Size;
- auto Lambda = [this, From, To, Callback, Arg](uptr Block) {
+ bool MayHaveTaggedPrimary = allocatorSupportsMemoryTagging<Params>() &&
+ systemSupportsMemoryTagging();
+ auto Lambda = [this, From, To, MayHaveTaggedPrimary, Callback,
+ Arg](uptr Block) {
if (Block < From || Block >= To)
return;
uptr Chunk;
Chunk::UnpackedHeader Header;
- if (getChunkFromBlock(Block, &Chunk, &Header) &&
- Header.State == Chunk::State::Allocated) {
+ if (MayHaveTaggedPrimary) {
+ // A chunk header can either have a zero tag (tagged primary) or the
+ // header tag (secondary, or untagged primary). We don't know which so
+ // try both.
+ ScopedDisableMemoryTagChecks x;
+ if (!getChunkFromBlock(Block, &Chunk, &Header) &&
+ !getChunkFromBlock(addHeaderTag(Block), &Chunk, &Header))
+ return;
+ } else {
+ if (!getChunkFromBlock(addHeaderTag(Block), &Chunk, &Header))
+ return;
+ }
+ if (Header.State == Chunk::State::Allocated) {
uptr TaggedChunk = Chunk;
+ if (allocatorSupportsMemoryTagging<Params>())
+ TaggedChunk = untagPointer(TaggedChunk);
if (useMemoryTagging<Params>(Primary.Options.load()))
TaggedChunk = loadTag(Chunk);
Callback(TaggedChunk, getSize(reinterpret_cast<void *>(Chunk), &Header),
@@ -752,7 +815,7 @@ public:
return GuardedAlloc.getSize(Ptr);
#endif // GWP_ASAN_HOOKS
- Ptr = untagPointerMaybe(const_cast<void *>(Ptr));
+ Ptr = getHeaderTaggedPointer(const_cast<void *>(Ptr));
Chunk::UnpackedHeader Header;
Chunk::loadHeader(Cookie, Ptr, &Header);
// Getting the usable size of a chunk only makes sense if it's allocated.
@@ -777,7 +840,7 @@ public:
#endif // GWP_ASAN_HOOKS
if (!Ptr || !isAligned(reinterpret_cast<uptr>(Ptr), MinAlignment))
return false;
- Ptr = untagPointerMaybe(const_cast<void *>(Ptr));
+ Ptr = getHeaderTaggedPointer(const_cast<void *>(Ptr));
Chunk::UnpackedHeader Header;
return Chunk::isValid(Cookie, Ptr, &Header) &&
Header.State == Chunk::State::Allocated;
@@ -787,8 +850,17 @@ public:
return useMemoryTagging<Params>(Primary.Options.load());
}
void disableMemoryTagging() {
- if (allocatorSupportsMemoryTagging<Params>())
+ // If we haven't been initialized yet, we need to initialize now in order to
+ // prevent a future call to initThreadMaybe() from enabling memory tagging
+ // based on feature detection. But don't call initThreadMaybe() because it
+ // may end up calling the allocator (via pthread_atfork, via the post-init
+ // callback), which may cause mappings to be created with memory tagging
+ // enabled.
+ TSDRegistry.initOnceMaybe(this);
+ if (allocatorSupportsMemoryTagging<Params>()) {
+ Secondary.disableMemoryTagging();
Primary.Options.clear(OptionBit::UseMemoryTagging);
+ }
}
void setTrackAllocationStacks(bool Track) {
@@ -804,6 +876,14 @@ public:
Primary.Options.setFillContentsMode(FillContents);
}
+ void setAddLargeAllocationSlack(bool AddSlack) {
+ initThreadMaybe();
+ if (AddSlack)
+ Primary.Options.set(OptionBit::AddLargeAllocationSlack);
+ else
+ Primary.Options.clear(OptionBit::AddLargeAllocationSlack);
+ }
+
const char *getStackDepotAddress() const {
return reinterpret_cast<const char *>(&Depot);
}
@@ -816,116 +896,54 @@ public:
return PrimaryT::getRegionInfoArraySize();
}
+ const char *getRingBufferAddress() const {
+ return reinterpret_cast<const char *>(&RingBuffer);
+ }
+
+ static uptr getRingBufferSize() { return sizeof(RingBuffer); }
+
+ static const uptr MaxTraceSize = 64;
+
+ static void collectTraceMaybe(const StackDepot *Depot,
+ uintptr_t (&Trace)[MaxTraceSize], u32 Hash) {
+ uptr RingPos, Size;
+ if (!Depot->find(Hash, &RingPos, &Size))
+ return;
+ for (unsigned I = 0; I != Size && I != MaxTraceSize; ++I)
+ Trace[I] = (*Depot)[RingPos + I];
+ }
+
static void getErrorInfo(struct scudo_error_info *ErrorInfo,
uintptr_t FaultAddr, const char *DepotPtr,
- const char *RegionInfoPtr, const char *Memory,
- const char *MemoryTags, uintptr_t MemoryAddr,
- size_t MemorySize) {
+ const char *RegionInfoPtr, const char *RingBufferPtr,
+ const char *Memory, const char *MemoryTags,
+ uintptr_t MemoryAddr, size_t MemorySize) {
*ErrorInfo = {};
if (!allocatorSupportsMemoryTagging<Params>() ||
MemoryAddr + MemorySize < MemoryAddr)
return;
- uptr UntaggedFaultAddr = untagPointer(FaultAddr);
- u8 FaultAddrTag = extractTag(FaultAddr);
- BlockInfo Info =
- PrimaryT::findNearestBlock(RegionInfoPtr, UntaggedFaultAddr);
-
- auto GetGranule = [&](uptr Addr, const char **Data, uint8_t *Tag) -> bool {
- if (Addr < MemoryAddr || Addr + archMemoryTagGranuleSize() < Addr ||
- Addr + archMemoryTagGranuleSize() > MemoryAddr + MemorySize)
- return false;
- *Data = &Memory[Addr - MemoryAddr];
- *Tag = static_cast<u8>(
- MemoryTags[(Addr - MemoryAddr) / archMemoryTagGranuleSize()]);
- return true;
- };
-
- auto ReadBlock = [&](uptr Addr, uptr *ChunkAddr,
- Chunk::UnpackedHeader *Header, const u32 **Data,
- u8 *Tag) {
- const char *BlockBegin;
- u8 BlockBeginTag;
- if (!GetGranule(Addr, &BlockBegin, &BlockBeginTag))
- return false;
- uptr ChunkOffset = getChunkOffsetFromBlock(BlockBegin);
- *ChunkAddr = Addr + ChunkOffset;
-
- const char *ChunkBegin;
- if (!GetGranule(*ChunkAddr, &ChunkBegin, Tag))
- return false;
- *Header = *reinterpret_cast<const Chunk::UnpackedHeader *>(
- ChunkBegin - Chunk::getHeaderSize());
- *Data = reinterpret_cast<const u32 *>(ChunkBegin);
- return true;
- };
-
auto *Depot = reinterpret_cast<const StackDepot *>(DepotPtr);
-
- auto MaybeCollectTrace = [&](uintptr_t(&Trace)[MaxTraceSize], u32 Hash) {
- uptr RingPos, Size;
- if (!Depot->find(Hash, &RingPos, &Size))
- return;
- for (unsigned I = 0; I != Size && I != MaxTraceSize; ++I)
- Trace[I] = (*Depot)[RingPos + I];
- };
-
size_t NextErrorReport = 0;
- // First, check for UAF.
- {
- uptr ChunkAddr;
- Chunk::UnpackedHeader Header;
- const u32 *Data;
- uint8_t Tag;
- if (ReadBlock(Info.BlockBegin, &ChunkAddr, &Header, &Data, &Tag) &&
- Header.State != Chunk::State::Allocated &&
- Data[MemTagPrevTagIndex] == FaultAddrTag) {
- auto *R = &ErrorInfo->reports[NextErrorReport++];
- R->error_type = USE_AFTER_FREE;
- R->allocation_address = ChunkAddr;
- R->allocation_size = Header.SizeOrUnusedBytes;
- MaybeCollectTrace(R->allocation_trace,
- Data[MemTagAllocationTraceIndex]);
- R->allocation_tid = Data[MemTagAllocationTidIndex];
- MaybeCollectTrace(R->deallocation_trace,
- Data[MemTagDeallocationTraceIndex]);
- R->deallocation_tid = Data[MemTagDeallocationTidIndex];
- }
- }
-
- auto CheckOOB = [&](uptr BlockAddr) {
- if (BlockAddr < Info.RegionBegin || BlockAddr >= Info.RegionEnd)
- return false;
-
- uptr ChunkAddr;
- Chunk::UnpackedHeader Header;
- const u32 *Data;
- uint8_t Tag;
- if (!ReadBlock(BlockAddr, &ChunkAddr, &Header, &Data, &Tag) ||
- Header.State != Chunk::State::Allocated || Tag != FaultAddrTag)
- return false;
-
- auto *R = &ErrorInfo->reports[NextErrorReport++];
- R->error_type =
- UntaggedFaultAddr < ChunkAddr ? BUFFER_UNDERFLOW : BUFFER_OVERFLOW;
- R->allocation_address = ChunkAddr;
- R->allocation_size = Header.SizeOrUnusedBytes;
- MaybeCollectTrace(R->allocation_trace, Data[MemTagAllocationTraceIndex]);
- R->allocation_tid = Data[MemTagAllocationTidIndex];
- return NextErrorReport ==
- sizeof(ErrorInfo->reports) / sizeof(ErrorInfo->reports[0]);
- };
-
- if (CheckOOB(Info.BlockBegin))
- return;
-
- // Check for OOB in the 30 surrounding blocks. Beyond that we are likely to
- // hit false positives.
- for (int I = 1; I != 16; ++I)
- if (CheckOOB(Info.BlockBegin + I * Info.BlockSize) ||
- CheckOOB(Info.BlockBegin - I * Info.BlockSize))
- return;
+ // Check for OOB in the current block and the two surrounding blocks. Beyond
+ // that, UAF is more likely.
+ if (extractTag(FaultAddr) != 0)
+ getInlineErrorInfo(ErrorInfo, NextErrorReport, FaultAddr, Depot,
+ RegionInfoPtr, Memory, MemoryTags, MemoryAddr,
+ MemorySize, 0, 2);
+
+ // Check the ring buffer. For primary allocations this will only find UAF;
+ // for secondary allocations we can find either UAF or OOB.
+ getRingBufferErrorInfo(ErrorInfo, NextErrorReport, FaultAddr, Depot,
+ RingBufferPtr);
+
+ // Check for OOB in the 28 blocks surrounding the 3 we checked earlier.
+ // Beyond that we are likely to hit false positives.
+ if (extractTag(FaultAddr) != 0)
+ getInlineErrorInfo(ErrorInfo, NextErrorReport, FaultAddr, Depot,
+ RegionInfoPtr, Memory, MemoryTags, MemoryAddr,
+ MemorySize, 2, 16);
}
private:
@@ -949,39 +967,51 @@ private:
// These are indexes into an "array" of 32-bit values that store information
// inline with a chunk that is relevant to diagnosing memory tag faults, where
- // 0 corresponds to the address of the user memory. This means that negative
- // indexes may be used to store information about allocations, while positive
- // indexes may only be used to store information about deallocations, because
- // the user memory is in use until it has been deallocated. The smallest index
- // that may be used is -2, which corresponds to 8 bytes before the user
- // memory, because the chunk header size is 8 bytes and in allocators that
- // support memory tagging the minimum alignment is at least the tag granule
- // size (16 on aarch64), and the largest index that may be used is 3 because
- // we are only guaranteed to have at least a granule's worth of space in the
- // user memory.
+ // 0 corresponds to the address of the user memory. This means that only
+ // negative indexes may be used. The smallest index that may be used is -2,
+ // which corresponds to 8 bytes before the user memory, because the chunk
+ // header size is 8 bytes and in allocators that support memory tagging the
+ // minimum alignment is at least the tag granule size (16 on aarch64).
static const sptr MemTagAllocationTraceIndex = -2;
static const sptr MemTagAllocationTidIndex = -1;
- static const sptr MemTagDeallocationTraceIndex = 0;
- static const sptr MemTagDeallocationTidIndex = 1;
- static const sptr MemTagPrevTagIndex = 2;
-
- static const uptr MaxTraceSize = 64;
- u32 Cookie;
- u32 QuarantineMaxChunkSize;
+ u32 Cookie = 0;
+ u32 QuarantineMaxChunkSize = 0;
GlobalStats Stats;
PrimaryT Primary;
SecondaryT Secondary;
QuarantineT Quarantine;
TSDRegistryT TSDRegistry;
+ pthread_once_t PostInitNonce = PTHREAD_ONCE_INIT;
#ifdef GWP_ASAN_HOOKS
gwp_asan::GuardedPoolAllocator GuardedAlloc;
+ uptr GuardedAllocSlotSize = 0;
#endif // GWP_ASAN_HOOKS
StackDepot Depot;
+ struct AllocationRingBuffer {
+ struct Entry {
+ atomic_uptr Ptr;
+ atomic_uptr AllocationSize;
+ atomic_u32 AllocationTrace;
+ atomic_u32 AllocationTid;
+ atomic_u32 DeallocationTrace;
+ atomic_u32 DeallocationTid;
+ };
+
+ atomic_uptr Pos;
+#ifdef SCUDO_FUZZ
+ static const uptr NumEntries = 2;
+#else
+ static const uptr NumEntries = 32768;
+#endif
+ Entry Entries[NumEntries];
+ };
+ AllocationRingBuffer RingBuffer = {};
+
// The following might get optimized out by the compiler.
NOINLINE void performSanityChecks() {
// Verify that the header offset field can hold the maximum offset. In the
@@ -1029,36 +1059,50 @@ private:
const uptr SizeOrUnusedBytes = Header->SizeOrUnusedBytes;
if (LIKELY(Header->ClassId))
return SizeOrUnusedBytes;
+ if (allocatorSupportsMemoryTagging<Params>())
+ Ptr = untagPointer(const_cast<void *>(Ptr));
return SecondaryT::getBlockEnd(getBlockBegin(Ptr, Header)) -
reinterpret_cast<uptr>(Ptr) - SizeOrUnusedBytes;
}
- void quarantineOrDeallocateChunk(Options Options, void *Ptr,
+ void quarantineOrDeallocateChunk(Options Options, void *TaggedPtr,
Chunk::UnpackedHeader *Header, uptr Size) {
+ void *Ptr = getHeaderTaggedPointer(TaggedPtr);
Chunk::UnpackedHeader NewHeader = *Header;
- if (UNLIKELY(NewHeader.ClassId && useMemoryTagging<Params>(Options))) {
- u8 PrevTag = extractTag(loadTag(reinterpret_cast<uptr>(Ptr)));
- if (!TSDRegistry.getDisableMemInit()) {
- uptr TaggedBegin, TaggedEnd;
- const uptr OddEvenMask = computeOddEvenMaskForPointerMaybe(
- Options, reinterpret_cast<uptr>(getBlockBegin(Ptr, &NewHeader)),
- SizeClassMap::getSizeByClassId(NewHeader.ClassId));
- // Exclude the previous tag so that immediate use after free is detected
- // 100% of the time.
- setRandomTag(Ptr, Size, OddEvenMask | (1UL << PrevTag), &TaggedBegin,
- &TaggedEnd);
- }
- NewHeader.OriginOrWasZeroed = !TSDRegistry.getDisableMemInit();
- storeDeallocationStackMaybe(Options, Ptr, PrevTag);
- }
// If the quarantine is disabled, the actual size of a chunk is 0 or larger
// than the maximum allowed, we return a chunk directly to the backend.
// This purposefully underflows for Size == 0.
- const bool BypassQuarantine =
- !Quarantine.getCacheSize() || ((Size - 1) >= QuarantineMaxChunkSize);
- if (BypassQuarantine) {
+ const bool BypassQuarantine = !Quarantine.getCacheSize() ||
+ ((Size - 1) >= QuarantineMaxChunkSize) ||
+ !NewHeader.ClassId;
+ if (BypassQuarantine)
NewHeader.State = Chunk::State::Available;
- Chunk::compareExchangeHeader(Cookie, Ptr, &NewHeader, Header);
+ else
+ NewHeader.State = Chunk::State::Quarantined;
+ NewHeader.OriginOrWasZeroed = useMemoryTagging<Params>(Options) &&
+ NewHeader.ClassId &&
+ !TSDRegistry.getDisableMemInit();
+ Chunk::compareExchangeHeader(Cookie, Ptr, &NewHeader, Header);
+
+ if (UNLIKELY(useMemoryTagging<Params>(Options))) {
+ u8 PrevTag = extractTag(reinterpret_cast<uptr>(TaggedPtr));
+ storeDeallocationStackMaybe(Options, Ptr, PrevTag, Size);
+ if (NewHeader.ClassId) {
+ if (!TSDRegistry.getDisableMemInit()) {
+ uptr TaggedBegin, TaggedEnd;
+ const uptr OddEvenMask = computeOddEvenMaskForPointerMaybe(
+ Options, reinterpret_cast<uptr>(getBlockBegin(Ptr, &NewHeader)),
+ NewHeader.ClassId);
+ // Exclude the previous tag so that immediate use after free is
+ // detected 100% of the time.
+ setRandomTag(Ptr, Size, OddEvenMask | (1UL << PrevTag), &TaggedBegin,
+ &TaggedEnd);
+ }
+ }
+ }
+ if (BypassQuarantine) {
+ if (allocatorSupportsMemoryTagging<Params>())
+ Ptr = untagPointer(Ptr);
void *BlockBegin = getBlockBegin(Ptr, &NewHeader);
const uptr ClassId = NewHeader.ClassId;
if (LIKELY(ClassId)) {
@@ -1068,11 +1112,12 @@ private:
if (UnlockRequired)
TSD->unlock();
} else {
- Secondary.deallocate(BlockBegin);
+ if (UNLIKELY(useMemoryTagging<Params>(Options)))
+ storeTags(reinterpret_cast<uptr>(BlockBegin),
+ reinterpret_cast<uptr>(Ptr));
+ Secondary.deallocate(Options, BlockBegin);
}
} else {
- NewHeader.State = Chunk::State::Quarantined;
- Chunk::compareExchangeHeader(Cookie, Ptr, &NewHeader, Header);
bool UnlockRequired;
auto *TSD = TSDRegistry.getTSDAndLock(&UnlockRequired);
Quarantine.put(&TSD->QuarantineCache,
@@ -1096,7 +1141,62 @@ private:
return Offset + Chunk::getHeaderSize();
}
- void storeAllocationStackMaybe(Options Options, void *Ptr) {
+ // Set the tag of the granule past the end of the allocation to 0, to catch
+ // linear overflows even if a previous larger allocation used the same block
+ // and tag. Only do this if the granule past the end is in our block, because
+ // this would otherwise lead to a SEGV if the allocation covers the entire
+ // block and our block is at the end of a mapping. The tag of the next block's
+ // header granule will be set to 0, so it will serve the purpose of catching
+ // linear overflows in this case.
+ //
+ // For allocations of size 0 we do not end up storing the address tag to the
+ // memory tag space, which getInlineErrorInfo() normally relies on to match
+ // address tags against chunks. To allow matching in this case we store the
+ // address tag in the first byte of the chunk.
+ void storeEndMarker(uptr End, uptr Size, uptr BlockEnd) {
+ DCHECK_EQ(BlockEnd, untagPointer(BlockEnd));
+ uptr UntaggedEnd = untagPointer(End);
+ if (UntaggedEnd != BlockEnd) {
+ storeTag(UntaggedEnd);
+ if (Size == 0)
+ *reinterpret_cast<u8 *>(UntaggedEnd) = extractTag(End);
+ }
+ }
+
+ void *prepareTaggedChunk(void *Ptr, uptr Size, uptr ExcludeMask,
+ uptr BlockEnd) {
+ // Prepare the granule before the chunk to store the chunk header by setting
+ // its tag to 0. Normally its tag will already be 0, but in the case where a
+ // chunk holding a low alignment allocation is reused for a higher alignment
+ // allocation, the chunk may already have a non-zero tag from the previous
+ // allocation.
+ storeTag(reinterpret_cast<uptr>(Ptr) - archMemoryTagGranuleSize());
+
+ uptr TaggedBegin, TaggedEnd;
+ setRandomTag(Ptr, Size, ExcludeMask, &TaggedBegin, &TaggedEnd);
+
+ storeEndMarker(TaggedEnd, Size, BlockEnd);
+ return reinterpret_cast<void *>(TaggedBegin);
+ }
+
+ void resizeTaggedChunk(uptr OldPtr, uptr NewPtr, uptr NewSize,
+ uptr BlockEnd) {
+ uptr RoundOldPtr = roundUpTo(OldPtr, archMemoryTagGranuleSize());
+ uptr RoundNewPtr;
+ if (RoundOldPtr >= NewPtr) {
+ // If the allocation is shrinking we just need to set the tag past the end
+ // of the allocation to 0. See explanation in storeEndMarker() above.
+ RoundNewPtr = roundUpTo(NewPtr, archMemoryTagGranuleSize());
+ } else {
+ // Set the memory tag of the region
+ // [RoundOldPtr, roundUpTo(NewPtr, archMemoryTagGranuleSize()))
+ // to the pointer tag stored in OldPtr.
+ RoundNewPtr = storeTags(RoundOldPtr, NewPtr);
+ }
+ storeEndMarker(RoundNewPtr, NewSize, BlockEnd);
+ }
+
+ void storePrimaryAllocationStackMaybe(Options Options, void *Ptr) {
if (!UNLIKELY(Options.get(OptionBit::TrackAllocationStacks)))
return;
auto *Ptr32 = reinterpret_cast<u32 *>(Ptr);
@@ -1104,18 +1204,217 @@ private:
Ptr32[MemTagAllocationTidIndex] = getThreadID();
}
- void storeDeallocationStackMaybe(Options Options, void *Ptr,
- uint8_t PrevTag) {
+ void storeRingBufferEntry(void *Ptr, u32 AllocationTrace, u32 AllocationTid,
+ uptr AllocationSize, u32 DeallocationTrace,
+ u32 DeallocationTid) {
+ uptr Pos = atomic_fetch_add(&RingBuffer.Pos, 1, memory_order_relaxed);
+ typename AllocationRingBuffer::Entry *Entry =
+ &RingBuffer.Entries[Pos % AllocationRingBuffer::NumEntries];
+
+ // First invalidate our entry so that we don't attempt to interpret a
+ // partially written state in getSecondaryErrorInfo(). The fences below
+ // ensure that the compiler does not move the stores to Ptr in between the
+ // stores to the other fields.
+ atomic_store_relaxed(&Entry->Ptr, 0);
+
+ __atomic_signal_fence(__ATOMIC_SEQ_CST);
+ atomic_store_relaxed(&Entry->AllocationTrace, AllocationTrace);
+ atomic_store_relaxed(&Entry->AllocationTid, AllocationTid);
+ atomic_store_relaxed(&Entry->AllocationSize, AllocationSize);
+ atomic_store_relaxed(&Entry->DeallocationTrace, DeallocationTrace);
+ atomic_store_relaxed(&Entry->DeallocationTid, DeallocationTid);
+ __atomic_signal_fence(__ATOMIC_SEQ_CST);
+
+ atomic_store_relaxed(&Entry->Ptr, reinterpret_cast<uptr>(Ptr));
+ }
+
+ void storeSecondaryAllocationStackMaybe(Options Options, void *Ptr,
+ uptr Size) {
+ if (!UNLIKELY(Options.get(OptionBit::TrackAllocationStacks)))
+ return;
+
+ u32 Trace = collectStackTrace();
+ u32 Tid = getThreadID();
+
+ auto *Ptr32 = reinterpret_cast<u32 *>(Ptr);
+ Ptr32[MemTagAllocationTraceIndex] = Trace;
+ Ptr32[MemTagAllocationTidIndex] = Tid;
+
+ storeRingBufferEntry(untagPointer(Ptr), Trace, Tid, Size, 0, 0);
+ }
+
+ void storeDeallocationStackMaybe(Options Options, void *Ptr, u8 PrevTag,
+ uptr Size) {
if (!UNLIKELY(Options.get(OptionBit::TrackAllocationStacks)))
return;
- // Disable tag checks here so that we don't need to worry about zero sized
- // allocations.
- ScopedDisableMemoryTagChecks x;
auto *Ptr32 = reinterpret_cast<u32 *>(Ptr);
- Ptr32[MemTagDeallocationTraceIndex] = collectStackTrace();
- Ptr32[MemTagDeallocationTidIndex] = getThreadID();
- Ptr32[MemTagPrevTagIndex] = PrevTag;
+ u32 AllocationTrace = Ptr32[MemTagAllocationTraceIndex];
+ u32 AllocationTid = Ptr32[MemTagAllocationTidIndex];
+
+ u32 DeallocationTrace = collectStackTrace();
+ u32 DeallocationTid = getThreadID();
+
+ storeRingBufferEntry(addFixedTag(untagPointer(Ptr), PrevTag),
+ AllocationTrace, AllocationTid, Size,
+ DeallocationTrace, DeallocationTid);
+ }
+
+ static const size_t NumErrorReports =
+ sizeof(((scudo_error_info *)0)->reports) /
+ sizeof(((scudo_error_info *)0)->reports[0]);
+
+ static void getInlineErrorInfo(struct scudo_error_info *ErrorInfo,
+ size_t &NextErrorReport, uintptr_t FaultAddr,
+ const StackDepot *Depot,
+ const char *RegionInfoPtr, const char *Memory,
+ const char *MemoryTags, uintptr_t MemoryAddr,
+ size_t MemorySize, size_t MinDistance,
+ size_t MaxDistance) {
+ uptr UntaggedFaultAddr = untagPointer(FaultAddr);
+ u8 FaultAddrTag = extractTag(FaultAddr);
+ BlockInfo Info =
+ PrimaryT::findNearestBlock(RegionInfoPtr, UntaggedFaultAddr);
+
+ auto GetGranule = [&](uptr Addr, const char **Data, uint8_t *Tag) -> bool {
+ if (Addr < MemoryAddr || Addr + archMemoryTagGranuleSize() < Addr ||
+ Addr + archMemoryTagGranuleSize() > MemoryAddr + MemorySize)
+ return false;
+ *Data = &Memory[Addr - MemoryAddr];
+ *Tag = static_cast<u8>(
+ MemoryTags[(Addr - MemoryAddr) / archMemoryTagGranuleSize()]);
+ return true;
+ };
+
+ auto ReadBlock = [&](uptr Addr, uptr *ChunkAddr,
+ Chunk::UnpackedHeader *Header, const u32 **Data,
+ u8 *Tag) {
+ const char *BlockBegin;
+ u8 BlockBeginTag;
+ if (!GetGranule(Addr, &BlockBegin, &BlockBeginTag))
+ return false;
+ uptr ChunkOffset = getChunkOffsetFromBlock(BlockBegin);
+ *ChunkAddr = Addr + ChunkOffset;
+
+ const char *ChunkBegin;
+ if (!GetGranule(*ChunkAddr, &ChunkBegin, Tag))
+ return false;
+ *Header = *reinterpret_cast<const Chunk::UnpackedHeader *>(
+ ChunkBegin - Chunk::getHeaderSize());
+ *Data = reinterpret_cast<const u32 *>(ChunkBegin);
+
+ // Allocations of size 0 will have stashed the tag in the first byte of
+ // the chunk, see storeEndMarker().
+ if (Header->SizeOrUnusedBytes == 0)
+ *Tag = static_cast<u8>(*ChunkBegin);
+
+ return true;
+ };
+
+ if (NextErrorReport == NumErrorReports)
+ return;
+
+ auto CheckOOB = [&](uptr BlockAddr) {
+ if (BlockAddr < Info.RegionBegin || BlockAddr >= Info.RegionEnd)
+ return false;
+
+ uptr ChunkAddr;
+ Chunk::UnpackedHeader Header;
+ const u32 *Data;
+ uint8_t Tag;
+ if (!ReadBlock(BlockAddr, &ChunkAddr, &Header, &Data, &Tag) ||
+ Header.State != Chunk::State::Allocated || Tag != FaultAddrTag)
+ return false;
+
+ auto *R = &ErrorInfo->reports[NextErrorReport++];
+ R->error_type =
+ UntaggedFaultAddr < ChunkAddr ? BUFFER_UNDERFLOW : BUFFER_OVERFLOW;
+ R->allocation_address = ChunkAddr;
+ R->allocation_size = Header.SizeOrUnusedBytes;
+ collectTraceMaybe(Depot, R->allocation_trace,
+ Data[MemTagAllocationTraceIndex]);
+ R->allocation_tid = Data[MemTagAllocationTidIndex];
+ return NextErrorReport == NumErrorReports;
+ };
+
+ if (MinDistance == 0 && CheckOOB(Info.BlockBegin))
+ return;
+
+ for (size_t I = Max<size_t>(MinDistance, 1); I != MaxDistance; ++I)
+ if (CheckOOB(Info.BlockBegin + I * Info.BlockSize) ||
+ CheckOOB(Info.BlockBegin - I * Info.BlockSize))
+ return;
+ }
+
+ static void getRingBufferErrorInfo(struct scudo_error_info *ErrorInfo,
+ size_t &NextErrorReport,
+ uintptr_t FaultAddr,
+ const StackDepot *Depot,
+ const char *RingBufferPtr) {
+ auto *RingBuffer =
+ reinterpret_cast<const AllocationRingBuffer *>(RingBufferPtr);
+ uptr Pos = atomic_load_relaxed(&RingBuffer->Pos);
+
+ for (uptr I = Pos - 1; I != Pos - 1 - AllocationRingBuffer::NumEntries &&
+ NextErrorReport != NumErrorReports;
+ --I) {
+ auto *Entry = &RingBuffer->Entries[I % AllocationRingBuffer::NumEntries];
+ uptr EntryPtr = atomic_load_relaxed(&Entry->Ptr);
+ if (!EntryPtr)
+ continue;
+
+ uptr UntaggedEntryPtr = untagPointer(EntryPtr);
+ uptr EntrySize = atomic_load_relaxed(&Entry->AllocationSize);
+ u32 AllocationTrace = atomic_load_relaxed(&Entry->AllocationTrace);
+ u32 AllocationTid = atomic_load_relaxed(&Entry->AllocationTid);
+ u32 DeallocationTrace = atomic_load_relaxed(&Entry->DeallocationTrace);
+ u32 DeallocationTid = atomic_load_relaxed(&Entry->DeallocationTid);
+
+ if (DeallocationTid) {
+ // For UAF we only consider in-bounds fault addresses because
+ // out-of-bounds UAF is rare and attempting to detect it is very likely
+ // to result in false positives.
+ if (FaultAddr < EntryPtr || FaultAddr >= EntryPtr + EntrySize)
+ continue;
+ } else {
+ // Ring buffer OOB is only possible with secondary allocations. In this
+ // case we are guaranteed a guard region of at least a page on either
+ // side of the allocation (guard page on the right, guard page + tagged
+ // region on the left), so ignore any faults outside of that range.
+ if (FaultAddr < EntryPtr - getPageSizeCached() ||
+ FaultAddr >= EntryPtr + EntrySize + getPageSizeCached())
+ continue;
+
+ // For UAF the ring buffer will contain two entries, one for the
+ // allocation and another for the deallocation. Don't report buffer
+ // overflow/underflow using the allocation entry if we have already
+ // collected a report from the deallocation entry.
+ bool Found = false;
+ for (uptr J = 0; J != NextErrorReport; ++J) {
+ if (ErrorInfo->reports[J].allocation_address == UntaggedEntryPtr) {
+ Found = true;
+ break;
+ }
+ }
+ if (Found)
+ continue;
+ }
+
+ auto *R = &ErrorInfo->reports[NextErrorReport++];
+ if (DeallocationTid)
+ R->error_type = USE_AFTER_FREE;
+ else if (FaultAddr < EntryPtr)
+ R->error_type = BUFFER_UNDERFLOW;
+ else
+ R->error_type = BUFFER_OVERFLOW;
+
+ R->allocation_address = UntaggedEntryPtr;
+ R->allocation_size = EntrySize;
+ collectTraceMaybe(Depot, R->allocation_trace, AllocationTrace);
+ R->allocation_tid = AllocationTid;
+ collectTraceMaybe(Depot, R->deallocation_trace, DeallocationTrace);
+ R->deallocation_tid = DeallocationTid;
+ }
}
uptr getStats(ScopedString *Str) {
diff --git a/compiler-rt/lib/scudo/standalone/common.cpp b/compiler-rt/lib/scudo/standalone/common.cpp
index d93bfc59b3ca..666f95400c7e 100644
--- a/compiler-rt/lib/scudo/standalone/common.cpp
+++ b/compiler-rt/lib/scudo/standalone/common.cpp
@@ -8,6 +8,7 @@
#include "common.h"
#include "atomic_helpers.h"
+#include "string_utils.h"
namespace scudo {
@@ -21,11 +22,16 @@ uptr getPageSizeSlow() {
}
// Fatal internal map() or unmap() error (potentially OOM related).
-void NORETURN dieOnMapUnmapError(bool OutOfMemory) {
- outputRaw("Scudo ERROR: internal map or unmap failure");
- if (OutOfMemory)
- outputRaw(" (OOM)");
- outputRaw("\n");
+void NORETURN dieOnMapUnmapError(uptr SizeIfOOM) {
+ char Error[128] = "Scudo ERROR: internal map or unmap failure\n";
+ if (SizeIfOOM) {
+ formatString(
+ Error, sizeof(Error),
+ "Scudo ERROR: internal map failure (NO MEMORY) requesting %zuKB\n",
+ SizeIfOOM >> 10);
+ }
+ outputRaw(Error);
+ setAbortMessage(Error);
die();
}
diff --git a/compiler-rt/lib/scudo/standalone/common.h b/compiler-rt/lib/scudo/standalone/common.h
index 662b733050bb..bc3dfec6dbba 100644
--- a/compiler-rt/lib/scudo/standalone/common.h
+++ b/compiler-rt/lib/scudo/standalone/common.h
@@ -13,6 +13,7 @@
#include "fuchsia.h"
#include "linux.h"
+#include "trusty.h"
#include <stddef.h>
#include <string.h>
@@ -165,11 +166,15 @@ void *map(void *Addr, uptr Size, const char *Name, uptr Flags = 0,
void unmap(void *Addr, uptr Size, uptr Flags = 0,
MapPlatformData *Data = nullptr);
+void setMemoryPermission(uptr Addr, uptr Size, uptr Flags,
+ MapPlatformData *Data = nullptr);
+
void releasePagesToOS(uptr BaseAddress, uptr Offset, uptr Size,
MapPlatformData *Data = nullptr);
-// Internal map & unmap fatal error. This must not call map().
-void NORETURN dieOnMapUnmapError(bool OutOfMemory = false);
+// Internal map & unmap fatal error. This must not call map(). SizeIfOOM shall
+// hold the requested size on an out-of-memory error, 0 otherwise.
+void NORETURN dieOnMapUnmapError(uptr SizeIfOOM = 0);
// Logging related functions.
diff --git a/compiler-rt/lib/scudo/standalone/flags.cpp b/compiler-rt/lib/scudo/standalone/flags.cpp
index 285143a5d6bb..de5153b288b1 100644
--- a/compiler-rt/lib/scudo/standalone/flags.cpp
+++ b/compiler-rt/lib/scudo/standalone/flags.cpp
@@ -23,6 +23,13 @@ void Flags::setDefaults() {
#define SCUDO_FLAG(Type, Name, DefaultValue, Description) Name = DefaultValue;
#include "flags.inc"
#undef SCUDO_FLAG
+
+#ifdef GWP_ASAN_HOOKS
+#define GWP_ASAN_OPTION(Type, Name, DefaultValue, Description) \
+ GWP_ASAN_##Name = DefaultValue;
+#include "gwp_asan/options.inc"
+#undef GWP_ASAN_OPTION
+#endif // GWP_ASAN_HOOKS
}
void registerFlags(FlagParser *Parser, Flags *F) {
@@ -31,6 +38,14 @@ void registerFlags(FlagParser *Parser, Flags *F) {
reinterpret_cast<void *>(&F->Name));
#include "flags.inc"
#undef SCUDO_FLAG
+
+#ifdef GWP_ASAN_HOOKS
+#define GWP_ASAN_OPTION(Type, Name, DefaultValue, Description) \
+ Parser->registerFlag("GWP_ASAN_" #Name, Description, FlagType::FT_##Type, \
+ reinterpret_cast<void *>(&F->GWP_ASAN_##Name));
+#include "gwp_asan/options.inc"
+#undef GWP_ASAN_OPTION
+#endif // GWP_ASAN_HOOKS
}
static const char *getCompileDefinitionScudoDefaultOptions() {
diff --git a/compiler-rt/lib/scudo/standalone/flags.inc b/compiler-rt/lib/scudo/standalone/flags.inc
index b5cab4734166..690d889b8cee 100644
--- a/compiler-rt/lib/scudo/standalone/flags.inc
+++ b/compiler-rt/lib/scudo/standalone/flags.inc
@@ -37,12 +37,6 @@ SCUDO_FLAG(bool, zero_contents, false, "Zero chunk contents on allocation.")
SCUDO_FLAG(bool, pattern_fill_contents, false,
"Pattern fill chunk contents on allocation.")
-SCUDO_FLAG(int, rss_limit_mb, -1,
- "Enforce an upper limit (in megabytes) to the process RSS. The "
- "allocator will terminate or return NULL when allocations are "
- "attempted past that limit (depending on may_return_null). Negative "
- "values disable the feature.")
-
SCUDO_FLAG(bool, may_return_null, true,
"Indicate whether the allocator should terminate instead of "
"returning NULL in otherwise non-fatal error scenarios, eg: OOM, "
diff --git a/compiler-rt/lib/scudo/standalone/flags_parser.h b/compiler-rt/lib/scudo/standalone/flags_parser.h
index 32511f768c66..ba832adbd909 100644
--- a/compiler-rt/lib/scudo/standalone/flags_parser.h
+++ b/compiler-rt/lib/scudo/standalone/flags_parser.h
@@ -29,7 +29,7 @@ public:
void printFlagDescriptions();
private:
- static const u32 MaxFlags = 16;
+ static const u32 MaxFlags = 20;
struct Flag {
const char *Name;
const char *Desc;
diff --git a/compiler-rt/lib/scudo/standalone/fuchsia.cpp b/compiler-rt/lib/scudo/standalone/fuchsia.cpp
index d4ea33277941..3b473bc9e22d 100644
--- a/compiler-rt/lib/scudo/standalone/fuchsia.cpp
+++ b/compiler-rt/lib/scudo/standalone/fuchsia.cpp
@@ -15,7 +15,6 @@
#include "string_utils.h"
#include <lib/sync/mutex.h> // for sync_mutex_t
-#include <limits.h> // for PAGE_SIZE
#include <stdlib.h> // for getenv()
#include <zircon/compiler.h>
#include <zircon/sanitizer.h>
@@ -23,7 +22,7 @@
namespace scudo {
-uptr getPageSize() { return PAGE_SIZE; }
+uptr getPageSize() { return _zx_system_get_page_size(); }
void NORETURN die() { __builtin_trap(); }
@@ -42,7 +41,7 @@ static void *allocateVmar(uptr Size, MapPlatformData *Data, bool AllowNoMem) {
Size, &Data->Vmar, &Data->VmarBase);
if (UNLIKELY(Status != ZX_OK)) {
if (Status != ZX_ERR_NO_MEMORY || !AllowNoMem)
- dieOnMapUnmapError(Status == ZX_ERR_NO_MEMORY);
+ dieOnMapUnmapError(Status == ZX_ERR_NO_MEMORY ? Size : 0);
return nullptr;
}
return reinterpret_cast<void *>(Data->VmarBase);
@@ -50,7 +49,7 @@ static void *allocateVmar(uptr Size, MapPlatformData *Data, bool AllowNoMem) {
void *map(void *Addr, uptr Size, const char *Name, uptr Flags,
MapPlatformData *Data) {
- DCHECK_EQ(Size % PAGE_SIZE, 0);
+ DCHECK_EQ(Size % getPageSizeCached(), 0);
const bool AllowNoMem = !!(Flags & MAP_ALLOWNOMEM);
// For MAP_NOACCESS, just allocate a Vmar and return.
@@ -72,7 +71,7 @@ void *map(void *Addr, uptr Size, const char *Name, uptr Flags,
Status = _zx_vmo_set_size(Vmo, VmoSize + Size);
if (Status != ZX_OK) {
if (Status != ZX_ERR_NO_MEMORY || !AllowNoMem)
- dieOnMapUnmapError(Status == ZX_ERR_NO_MEMORY);
+ dieOnMapUnmapError(Status == ZX_ERR_NO_MEMORY ? Size : 0);
return nullptr;
}
} else {
@@ -80,7 +79,7 @@ void *map(void *Addr, uptr Size, const char *Name, uptr Flags,
Status = _zx_vmo_create(Size, ZX_VMO_RESIZABLE, &Vmo);
if (UNLIKELY(Status != ZX_OK)) {
if (Status != ZX_ERR_NO_MEMORY || !AllowNoMem)
- dieOnMapUnmapError(Status == ZX_ERR_NO_MEMORY);
+ dieOnMapUnmapError(Status == ZX_ERR_NO_MEMORY ? Size : 0);
return nullptr;
}
_zx_object_set_property(Vmo, ZX_PROP_NAME, Name, strlen(Name));
@@ -97,14 +96,16 @@ void *map(void *Addr, uptr Size, const char *Name, uptr Flags,
// No need to track the Vmo if we don't intend on resizing it. Close it.
if (Flags & MAP_RESIZABLE) {
DCHECK(Data);
- DCHECK_EQ(Data->Vmo, ZX_HANDLE_INVALID);
- Data->Vmo = Vmo;
+ if (Data->Vmo == ZX_HANDLE_INVALID)
+ Data->Vmo = Vmo;
+ else
+ DCHECK_EQ(Data->Vmo, Vmo);
} else {
CHECK_EQ(_zx_handle_close(Vmo), ZX_OK);
}
if (UNLIKELY(Status != ZX_OK)) {
if (Status != ZX_ERR_NO_MEMORY || !AllowNoMem)
- dieOnMapUnmapError(Status == ZX_ERR_NO_MEMORY);
+ dieOnMapUnmapError(Status == ZX_ERR_NO_MEMORY ? Size : 0);
return nullptr;
}
if (Data)
@@ -135,6 +136,16 @@ void unmap(void *Addr, uptr Size, uptr Flags, MapPlatformData *Data) {
}
}
+void setMemoryPermission(UNUSED uptr Addr, UNUSED uptr Size, UNUSED uptr Flags,
+ UNUSED MapPlatformData *Data) {
+ const zx_vm_option_t Prot =
+ (Flags & MAP_NOACCESS) ? 0 : (ZX_VM_PERM_READ | ZX_VM_PERM_WRITE);
+ DCHECK(Data);
+ DCHECK_NE(Data->Vmar, ZX_HANDLE_INVALID);
+ if (_zx_vmar_protect(Data->Vmar, Prot, Addr, Size) != ZX_OK)
+ dieOnMapUnmapError();
+}
+
void releasePagesToOS(UNUSED uptr BaseAddress, uptr Offset, uptr Size,
MapPlatformData *Data) {
DCHECK(Data);
diff --git a/compiler-rt/lib/scudo/standalone/fuzz/get_error_info_fuzzer.cpp b/compiler-rt/lib/scudo/standalone/fuzz/get_error_info_fuzzer.cpp
index f20a8a84a010..078e44b0dfc8 100644
--- a/compiler-rt/lib/scudo/standalone/fuzz/get_error_info_fuzzer.cpp
+++ b/compiler-rt/lib/scudo/standalone/fuzz/get_error_info_fuzzer.cpp
@@ -37,16 +37,24 @@ extern "C" int LLVMFuzzerTestOneInput(uint8_t *Data, size_t Size) {
StackDepot[i] = StackDepotBytes[i];
}
- std::string RegionInfoBytes = FDP.ConsumeRemainingBytesAsString();
+ std::string RegionInfoBytes =
+ FDP.ConsumeRandomLengthString(FDP.remaining_bytes());
std::vector<char> RegionInfo(AllocatorT::getRegionInfoArraySize(), 0);
for (size_t i = 0; i < RegionInfoBytes.length() && i < RegionInfo.size();
++i) {
RegionInfo[i] = RegionInfoBytes[i];
}
+ std::string RingBufferBytes = FDP.ConsumeRemainingBytesAsString();
+ std::vector<char> RingBuffer(AllocatorT::getRingBufferSize(), 0);
+ for (size_t i = 0; i < RingBufferBytes.length() && i < RingBuffer.size();
+ ++i) {
+ RingBuffer[i] = RingBufferBytes[i];
+ }
+
scudo_error_info ErrorInfo;
AllocatorT::getErrorInfo(&ErrorInfo, FaultAddr, StackDepot.data(),
- RegionInfo.data(), Memory, MemoryTags, MemoryAddr,
- MemorySize);
+ RegionInfo.data(), RingBuffer.data(), Memory,
+ MemoryTags, MemoryAddr, MemorySize);
return 0;
}
diff --git a/compiler-rt/lib/scudo/standalone/include/scudo/interface.h b/compiler-rt/lib/scudo/standalone/include/scudo/interface.h
index 68029e4857a3..9b9a84623c51 100644
--- a/compiler-rt/lib/scudo/standalone/include/scudo/interface.h
+++ b/compiler-rt/lib/scudo/standalone/include/scudo/interface.h
@@ -73,9 +73,9 @@ typedef void (*iterate_callback)(uintptr_t base, size_t size, void *arg);
// pointer.
void __scudo_get_error_info(struct scudo_error_info *error_info,
uintptr_t fault_addr, const char *stack_depot,
- const char *region_info, const char *memory,
- const char *memory_tags, uintptr_t memory_addr,
- size_t memory_size);
+ const char *region_info, const char *ring_buffer,
+ const char *memory, const char *memory_tags,
+ uintptr_t memory_addr, size_t memory_size);
enum scudo_error_type {
UNKNOWN,
@@ -107,6 +107,9 @@ size_t __scudo_get_stack_depot_size();
const char *__scudo_get_region_info_addr();
size_t __scudo_get_region_info_size();
+const char *__scudo_get_ring_buffer_addr();
+size_t __scudo_get_ring_buffer_size();
+
#ifndef M_DECAY_TIME
#define M_DECAY_TIME -100
#endif
@@ -117,7 +120,7 @@ size_t __scudo_get_region_info_size();
// Tune the allocator's choice of memory tags to make it more likely that
// a certain class of memory errors will be detected. The value argument should
-// be one of the enumerators of the scudo_memtag_tuning enum below.
+// be one of the M_MEMTAG_TUNING_* constants below.
#ifndef M_MEMTAG_TUNING
#define M_MEMTAG_TUNING -102
#endif
@@ -142,13 +145,15 @@ size_t __scudo_get_region_info_size();
#define M_TSDS_COUNT_MAX -202
#endif
-enum scudo_memtag_tuning {
- // Tune for buffer overflows.
- M_MEMTAG_TUNING_BUFFER_OVERFLOW,
+// Tune for buffer overflows.
+#ifndef M_MEMTAG_TUNING_BUFFER_OVERFLOW
+#define M_MEMTAG_TUNING_BUFFER_OVERFLOW 0
+#endif
- // Tune for use-after-free.
- M_MEMTAG_TUNING_UAF,
-};
+// Tune for use-after-free.
+#ifndef M_MEMTAG_TUNING_UAF
+#define M_MEMTAG_TUNING_UAF 1
+#endif
} // extern "C"
diff --git a/compiler-rt/lib/scudo/standalone/internal_defs.h b/compiler-rt/lib/scudo/standalone/internal_defs.h
index 0babbbe3c11b..c9ffad136b78 100644
--- a/compiler-rt/lib/scudo/standalone/internal_defs.h
+++ b/compiler-rt/lib/scudo/standalone/internal_defs.h
@@ -48,6 +48,34 @@
#define USED __attribute__((used))
#define NOEXCEPT noexcept
+// This check is only available on Clang. This is essentially an alias of
+// C++20's 'constinit' specifier which will take care of this when (if?) we can
+// ask all libc's that use Scudo to compile us with C++20. Dynamic
+// initialization is bad; Scudo is designed to be lazy-initializated on the
+// first call to malloc/free (and friends), and this generally happens in the
+// loader somewhere in libdl's init. After the loader is done, control is
+// transferred to libc's initialization, and the dynamic initializers are run.
+// If there's a dynamic initializer for Scudo, then it will clobber the
+// already-initialized Scudo, and re-initialize all its members back to default
+// values, causing various explosions. Unfortunately, marking
+// scudo::Allocator<>'s constructor as 'constexpr' isn't sufficient to prevent
+// dynamic initialization, as default initialization is fine under 'constexpr'
+// (but not 'constinit'). Clang at -O0, and gcc at all opt levels will emit a
+// dynamic initializer for any constant-initialized variables if there is a mix
+// of default-initialized and constant-initialized variables.
+//
+// If you're looking at this because your build failed, you probably introduced
+// a new member to scudo::Allocator<> (possibly transiently) that didn't have an
+// initializer. The fix is easy - just add one.
+#if defined(__has_attribute)
+#if __has_attribute(require_constant_initialization)
+#define SCUDO_REQUIRE_CONSTANT_INITIALIZATION \
+ __attribute__((__require_constant_initialization__))
+#else
+#define SCUDO_REQUIRE_CONSTANT_INITIALIZATION
+#endif
+#endif
+
namespace scudo {
typedef unsigned long uptr;
@@ -77,14 +105,11 @@ void NORETURN die();
void NORETURN reportCheckFailed(const char *File, int Line,
const char *Condition, u64 Value1, u64 Value2);
-
#define CHECK_IMPL(C1, Op, C2) \
do { \
- scudo::u64 V1 = (scudo::u64)(C1); \
- scudo::u64 V2 = (scudo::u64)(C2); \
- if (UNLIKELY(!(V1 Op V2))) { \
- scudo::reportCheckFailed(__FILE__, __LINE__, \
- "(" #C1 ") " #Op " (" #C2 ")", V1, V2); \
+ if (UNLIKELY(!(C1 Op C2))) { \
+ scudo::reportCheckFailed(__FILE__, __LINE__, #C1 " " #Op " " #C2, \
+ (scudo::u64)C1, (scudo::u64)C2); \
scudo::die(); \
} \
} while (false)
@@ -106,13 +131,27 @@ void NORETURN reportCheckFailed(const char *File, int Line,
#define DCHECK_GT(A, B) CHECK_GT(A, B)
#define DCHECK_GE(A, B) CHECK_GE(A, B)
#else
-#define DCHECK(A)
-#define DCHECK_EQ(A, B)
-#define DCHECK_NE(A, B)
-#define DCHECK_LT(A, B)
-#define DCHECK_LE(A, B)
-#define DCHECK_GT(A, B)
-#define DCHECK_GE(A, B)
+#define DCHECK(A) \
+ do { \
+ } while (false)
+#define DCHECK_EQ(A, B) \
+ do { \
+ } while (false)
+#define DCHECK_NE(A, B) \
+ do { \
+ } while (false)
+#define DCHECK_LT(A, B) \
+ do { \
+ } while (false)
+#define DCHECK_LE(A, B) \
+ do { \
+ } while (false)
+#define DCHECK_GT(A, B) \
+ do { \
+ } while (false)
+#define DCHECK_GE(A, B) \
+ do { \
+ } while (false)
#endif
// The superfluous die() call effectively makes this macro NORETURN.
diff --git a/compiler-rt/lib/scudo/standalone/linux.cpp b/compiler-rt/lib/scudo/standalone/linux.cpp
index d2464677b279..c77c1bb600d9 100644
--- a/compiler-rt/lib/scudo/standalone/linux.cpp
+++ b/compiler-rt/lib/scudo/standalone/linux.cpp
@@ -50,27 +50,24 @@ void *map(void *Addr, uptr Size, UNUSED const char *Name, uptr Flags,
MmapProt = PROT_NONE;
} else {
MmapProt = PROT_READ | PROT_WRITE;
+ }
#if defined(__aarch64__)
#ifndef PROT_MTE
#define PROT_MTE 0x20
#endif
- if (Flags & MAP_MEMTAG)
- MmapProt |= PROT_MTE;
+ if (Flags & MAP_MEMTAG)
+ MmapProt |= PROT_MTE;
#endif
- }
- if (Addr) {
- // Currently no scenario for a noaccess mapping with a fixed address.
- DCHECK_EQ(Flags & MAP_NOACCESS, 0);
+ if (Addr)
MmapFlags |= MAP_FIXED;
- }
void *P = mmap(Addr, Size, MmapProt, MmapFlags, -1, 0);
if (P == MAP_FAILED) {
if (!(Flags & MAP_ALLOWNOMEM) || errno != ENOMEM)
- dieOnMapUnmapError(errno == ENOMEM);
+ dieOnMapUnmapError(errno == ENOMEM ? Size : 0);
return nullptr;
}
#if SCUDO_ANDROID
- if (!(Flags & MAP_NOACCESS))
+ if (Name)
prctl(ANDROID_PR_SET_VMA, ANDROID_PR_SET_VMA_ANON_NAME, P, Size, Name);
#endif
return P;
@@ -82,9 +79,17 @@ void unmap(void *Addr, uptr Size, UNUSED uptr Flags,
dieOnMapUnmapError();
}
+void setMemoryPermission(uptr Addr, uptr Size, uptr Flags,
+ UNUSED MapPlatformData *Data) {
+ int Prot = (Flags & MAP_NOACCESS) ? PROT_NONE : (PROT_READ | PROT_WRITE);
+ if (mprotect(reinterpret_cast<void *>(Addr), Size, Prot) != 0)
+ dieOnMapUnmapError();
+}
+
void releasePagesToOS(uptr BaseAddress, uptr Offset, uptr Size,
UNUSED MapPlatformData *Data) {
void *Addr = reinterpret_cast<void *>(BaseAddress + Offset);
+
while (madvise(Addr, Size, MADV_DONTNEED) == -1 && errno == EAGAIN) {
}
}
diff --git a/compiler-rt/lib/scudo/standalone/list.h b/compiler-rt/lib/scudo/standalone/list.h
index c3b898a328ca..1ac93c2f65d7 100644
--- a/compiler-rt/lib/scudo/standalone/list.h
+++ b/compiler-rt/lib/scudo/standalone/list.h
@@ -57,9 +57,9 @@ template <class T> struct IntrusiveList {
void checkConsistency() const;
protected:
- uptr Size;
- T *First;
- T *Last;
+ uptr Size = 0;
+ T *First = nullptr;
+ T *Last = nullptr;
};
template <class T> void IntrusiveList<T>::checkConsistency() const {
diff --git a/compiler-rt/lib/scudo/standalone/local_cache.h b/compiler-rt/lib/scudo/standalone/local_cache.h
index 089aeb939627..f46645f9badf 100644
--- a/compiler-rt/lib/scudo/standalone/local_cache.h
+++ b/compiler-rt/lib/scudo/standalone/local_cache.h
@@ -17,24 +17,25 @@ namespace scudo {
template <class SizeClassAllocator> struct SizeClassAllocatorLocalCache {
typedef typename SizeClassAllocator::SizeClassMap SizeClassMap;
+ typedef typename SizeClassAllocator::CompactPtrT CompactPtrT;
struct TransferBatch {
static const u32 MaxNumCached = SizeClassMap::MaxNumCachedHint;
- void setFromArray(void **Array, u32 N) {
+ void setFromArray(CompactPtrT *Array, u32 N) {
DCHECK_LE(N, MaxNumCached);
Count = N;
- memcpy(Batch, Array, sizeof(void *) * Count);
+ memcpy(Batch, Array, sizeof(Batch[0]) * Count);
}
void clear() { Count = 0; }
- void add(void *P) {
+ void add(CompactPtrT P) {
DCHECK_LT(Count, MaxNumCached);
Batch[Count++] = P;
}
- void copyToArray(void **Array) const {
- memcpy(Array, Batch, sizeof(void *) * Count);
+ void copyToArray(CompactPtrT *Array) const {
+ memcpy(Array, Batch, sizeof(Batch[0]) * Count);
}
u32 getCount() const { return Count; }
- void *get(u32 I) const {
+ CompactPtrT get(u32 I) const {
DCHECK_LE(I, Count);
return Batch[I];
}
@@ -45,21 +46,17 @@ template <class SizeClassAllocator> struct SizeClassAllocatorLocalCache {
private:
u32 Count;
- void *Batch[MaxNumCached];
+ CompactPtrT Batch[MaxNumCached];
};
- void initLinkerInitialized(GlobalStats *S, SizeClassAllocator *A) {
- Stats.initLinkerInitialized();
+ void init(GlobalStats *S, SizeClassAllocator *A) {
+ DCHECK(isEmpty());
+ Stats.init();
if (LIKELY(S))
S->link(&Stats);
Allocator = A;
}
- void init(GlobalStats *S, SizeClassAllocator *A) {
- memset(this, 0, sizeof(*this));
- initLinkerInitialized(S, A);
- }
-
void destroy(GlobalStats *S) {
drain();
if (LIKELY(S))
@@ -78,13 +75,10 @@ template <class SizeClassAllocator> struct SizeClassAllocatorLocalCache {
// Count, while Chunks might be further off (depending on Count). That keeps
// the memory accesses in close quarters.
const uptr ClassSize = C->ClassSize;
- void *P = C->Chunks[--C->Count];
- // The jury is still out as to whether any kind of PREFETCH here increases
- // performance. It definitely decreases performance on Android though.
- // if (!SCUDO_ANDROID) PREFETCH(P);
+ CompactPtrT CompactP = C->Chunks[--C->Count];
Stats.add(StatAllocated, ClassSize);
Stats.sub(StatFree, ClassSize);
- return P;
+ return Allocator->decompactPtr(ClassId, CompactP);
}
void deallocate(uptr ClassId, void *P) {
@@ -97,22 +91,35 @@ template <class SizeClassAllocator> struct SizeClassAllocatorLocalCache {
drain(C, ClassId);
// See comment in allocate() about memory accesses.
const uptr ClassSize = C->ClassSize;
- C->Chunks[C->Count++] = P;
+ C->Chunks[C->Count++] =
+ Allocator->compactPtr(ClassId, reinterpret_cast<uptr>(P));
Stats.sub(StatAllocated, ClassSize);
Stats.add(StatFree, ClassSize);
}
+ bool isEmpty() const {
+ for (uptr I = 0; I < NumClasses; ++I)
+ if (PerClassArray[I].Count)
+ return false;
+ return true;
+ }
+
void drain() {
- for (uptr I = 0; I < NumClasses; I++) {
- PerClass *C = &PerClassArray[I];
- while (C->Count > 0)
- drain(C, I);
+ // Drain BatchClassId last as createBatch can refill it.
+ for (uptr I = 0; I < NumClasses; ++I) {
+ if (I == BatchClassId)
+ continue;
+ while (PerClassArray[I].Count > 0)
+ drain(&PerClassArray[I], I);
}
+ while (PerClassArray[BatchClassId].Count > 0)
+ drain(&PerClassArray[BatchClassId], BatchClassId);
+ DCHECK(isEmpty());
}
TransferBatch *createBatch(uptr ClassId, void *B) {
- if (ClassId != SizeClassMap::BatchClassId)
- B = allocate(SizeClassMap::BatchClassId);
+ if (ClassId != BatchClassId)
+ B = allocate(BatchClassId);
return reinterpret_cast<TransferBatch *>(B);
}
@@ -120,15 +127,17 @@ template <class SizeClassAllocator> struct SizeClassAllocatorLocalCache {
private:
static const uptr NumClasses = SizeClassMap::NumClasses;
+ static const uptr BatchClassId = SizeClassMap::BatchClassId;
struct PerClass {
u32 Count;
u32 MaxCount;
+ // Note: ClassSize is zero for the transfer batch.
uptr ClassSize;
- void *Chunks[2 * TransferBatch::MaxNumCached];
+ CompactPtrT Chunks[2 * TransferBatch::MaxNumCached];
};
- PerClass PerClassArray[NumClasses];
+ PerClass PerClassArray[NumClasses] = {};
LocalStats Stats;
- SizeClassAllocator *Allocator;
+ SizeClassAllocator *Allocator = nullptr;
ALWAYS_INLINE void initCacheMaybe(PerClass *C) {
if (LIKELY(C->MaxCount))
@@ -142,13 +151,19 @@ private:
PerClass *P = &PerClassArray[I];
const uptr Size = SizeClassAllocator::getSizeByClassId(I);
P->MaxCount = 2 * TransferBatch::getMaxCached(Size);
- P->ClassSize = Size;
+ if (I != BatchClassId) {
+ P->ClassSize = Size;
+ } else {
+ // ClassSize in this struct is only used for malloc/free stats, which
+ // should only track user allocations, not internal movements.
+ P->ClassSize = 0;
+ }
}
}
void destroyBatch(uptr ClassId, void *B) {
- if (ClassId != SizeClassMap::BatchClassId)
- deallocate(SizeClassMap::BatchClassId, B);
+ if (ClassId != BatchClassId)
+ deallocate(BatchClassId, B);
}
NOINLINE bool refill(PerClass *C, uptr ClassId) {
@@ -166,10 +181,10 @@ private:
NOINLINE void drain(PerClass *C, uptr ClassId) {
const u32 Count = Min(C->MaxCount / 2, C->Count);
- TransferBatch *B = createBatch(ClassId, C->Chunks[0]);
+ TransferBatch *B =
+ createBatch(ClassId, Allocator->decompactPtr(ClassId, C->Chunks[0]));
if (UNLIKELY(!B))
- reportOutOfMemory(
- SizeClassAllocator::getSizeByClassId(SizeClassMap::BatchClassId));
+ reportOutOfMemory(SizeClassAllocator::getSizeByClassId(BatchClassId));
B->setFromArray(&C->Chunks[0], Count);
C->Count -= Count;
for (uptr I = 0; I < C->Count; I++)
diff --git a/compiler-rt/lib/scudo/standalone/memtag.h b/compiler-rt/lib/scudo/standalone/memtag.h
index b1b62065ed72..c48e228fbe44 100644
--- a/compiler-rt/lib/scudo/standalone/memtag.h
+++ b/compiler-rt/lib/scudo/standalone/memtag.h
@@ -18,12 +18,17 @@
namespace scudo {
-void setRandomTag(void *Ptr, uptr Size, uptr ExcludeMask, uptr *TaggedBegin,
- uptr *TaggedEnd);
-
-#if defined(__aarch64__) || defined(SCUDO_FUZZ)
+#if (__clang_major__ >= 12 && defined(__aarch64__)) || defined(SCUDO_FUZZ)
+// We assume that Top-Byte Ignore is enabled if the architecture supports memory
+// tagging. Not all operating systems enable TBI, so we only claim architectural
+// support for memory tagging if the operating system enables TBI.
+#if SCUDO_LINUX && !defined(SCUDO_DISABLE_TBI)
inline constexpr bool archSupportsMemoryTagging() { return true; }
+#else
+inline constexpr bool archSupportsMemoryTagging() { return false; }
+#endif
+
inline constexpr uptr archMemoryTagGranuleSize() { return 16; }
inline uptr untagPointer(uptr Ptr) { return Ptr & ((1ULL << 56) - 1); }
@@ -50,7 +55,7 @@ inline uint8_t extractTag(uptr Ptr) {
#endif
-#if defined(__aarch64__)
+#if __clang_major__ >= 12 && defined(__aarch64__)
#if SCUDO_LINUX
@@ -62,15 +67,27 @@ inline bool systemSupportsMemoryTagging() {
}
inline bool systemDetectsMemoryTagFaultsTestOnly() {
+#ifndef PR_SET_TAGGED_ADDR_CTRL
+#define PR_SET_TAGGED_ADDR_CTRL 54
+#endif
#ifndef PR_GET_TAGGED_ADDR_CTRL
#define PR_GET_TAGGED_ADDR_CTRL 56
#endif
+#ifndef PR_TAGGED_ADDR_ENABLE
+#define PR_TAGGED_ADDR_ENABLE (1UL << 0)
+#endif
#ifndef PR_MTE_TCF_SHIFT
#define PR_MTE_TCF_SHIFT 1
#endif
+#ifndef PR_MTE_TAG_SHIFT
+#define PR_MTE_TAG_SHIFT 3
+#endif
#ifndef PR_MTE_TCF_NONE
#define PR_MTE_TCF_NONE (0UL << PR_MTE_TCF_SHIFT)
#endif
+#ifndef PR_MTE_TCF_SYNC
+#define PR_MTE_TCF_SYNC (1UL << PR_MTE_TCF_SHIFT)
+#endif
#ifndef PR_MTE_TCF_MASK
#define PR_MTE_TCF_MASK (3UL << PR_MTE_TCF_SHIFT)
#endif
@@ -79,139 +96,157 @@ inline bool systemDetectsMemoryTagFaultsTestOnly() {
PR_MTE_TCF_MASK) != PR_MTE_TCF_NONE;
}
+inline void enableSystemMemoryTaggingTestOnly() {
+ prctl(PR_SET_TAGGED_ADDR_CTRL,
+ PR_TAGGED_ADDR_ENABLE | PR_MTE_TCF_SYNC | (0xfffe << PR_MTE_TAG_SHIFT),
+ 0, 0, 0);
+}
+
#else // !SCUDO_LINUX
inline bool systemSupportsMemoryTagging() { return false; }
-inline bool systemDetectsMemoryTagFaultsTestOnly() { return false; }
-
-#endif // SCUDO_LINUX
-
-inline void disableMemoryTagChecksTestOnly() {
- __asm__ __volatile__(".arch_extension mte; msr tco, #1");
+inline bool systemDetectsMemoryTagFaultsTestOnly() {
+ UNREACHABLE("memory tagging not supported");
}
-inline void enableMemoryTagChecksTestOnly() {
- __asm__ __volatile__(".arch_extension mte; msr tco, #0");
+inline void enableSystemMemoryTaggingTestOnly() {
+ UNREACHABLE("memory tagging not supported");
}
+#endif // SCUDO_LINUX
+
class ScopedDisableMemoryTagChecks {
- size_t PrevTCO;
+ uptr PrevTCO;
public:
ScopedDisableMemoryTagChecks() {
- __asm__ __volatile__(".arch_extension mte; mrs %0, tco; msr tco, #1"
- : "=r"(PrevTCO));
+ __asm__ __volatile__(
+ R"(
+ .arch_extension memtag
+ mrs %0, tco
+ msr tco, #1
+ )"
+ : "=r"(PrevTCO));
}
~ScopedDisableMemoryTagChecks() {
- __asm__ __volatile__(".arch_extension mte; msr tco, %0" : : "r"(PrevTCO));
+ __asm__ __volatile__(
+ R"(
+ .arch_extension memtag
+ msr tco, %0
+ )"
+ :
+ : "r"(PrevTCO));
}
};
inline uptr selectRandomTag(uptr Ptr, uptr ExcludeMask) {
+ ExcludeMask |= 1; // Always exclude Tag 0.
uptr TaggedPtr;
__asm__ __volatile__(
- ".arch_extension mte; irg %[TaggedPtr], %[Ptr], %[ExcludeMask]"
+ R"(
+ .arch_extension memtag
+ irg %[TaggedPtr], %[Ptr], %[ExcludeMask]
+ )"
: [TaggedPtr] "=r"(TaggedPtr)
: [Ptr] "r"(Ptr), [ExcludeMask] "r"(ExcludeMask));
return TaggedPtr;
}
-inline uptr storeTags(uptr Begin, uptr End) {
- DCHECK(Begin % 16 == 0);
- if (Begin != End) {
- __asm__ __volatile__(
- R"(
- .arch_extension mte
-
- 1:
- stzg %[Cur], [%[Cur]], #16
- cmp %[Cur], %[End]
- b.lt 1b
- )"
- : [Cur] "+&r"(Begin)
- : [End] "r"(End)
- : "memory");
- }
- return Begin;
-}
-
-inline void *prepareTaggedChunk(void *Ptr, uptr Size, uptr ExcludeMask,
- uptr BlockEnd) {
- // Prepare the granule before the chunk to store the chunk header by setting
- // its tag to 0. Normally its tag will already be 0, but in the case where a
- // chunk holding a low alignment allocation is reused for a higher alignment
- // allocation, the chunk may already have a non-zero tag from the previous
- // allocation.
- __asm__ __volatile__(".arch_extension mte; stg %0, [%0, #-16]"
- :
- : "r"(Ptr)
- : "memory");
-
- uptr TaggedBegin, TaggedEnd;
- setRandomTag(Ptr, Size, ExcludeMask, &TaggedBegin, &TaggedEnd);
-
- // Finally, set the tag of the granule past the end of the allocation to 0,
- // to catch linear overflows even if a previous larger allocation used the
- // same block and tag. Only do this if the granule past the end is in our
- // block, because this would otherwise lead to a SEGV if the allocation
- // covers the entire block and our block is at the end of a mapping. The tag
- // of the next block's header granule will be set to 0, so it will serve the
- // purpose of catching linear overflows in this case.
- uptr UntaggedEnd = untagPointer(TaggedEnd);
- if (UntaggedEnd != BlockEnd)
- __asm__ __volatile__(".arch_extension mte; stg %0, [%0]"
- :
- : "r"(UntaggedEnd)
- : "memory");
- return reinterpret_cast<void *>(TaggedBegin);
+inline uptr addFixedTag(uptr Ptr, uptr Tag) {
+ DCHECK_LT(Tag, 16);
+ DCHECK_EQ(untagPointer(Ptr), Ptr);
+ return Ptr | (Tag << 56);
}
-inline void resizeTaggedChunk(uptr OldPtr, uptr NewPtr, uptr BlockEnd) {
- uptr RoundOldPtr = roundUpTo(OldPtr, 16);
- if (RoundOldPtr >= NewPtr) {
- // If the allocation is shrinking we just need to set the tag past the end
- // of the allocation to 0. See explanation in prepareTaggedChunk above.
- uptr RoundNewPtr = untagPointer(roundUpTo(NewPtr, 16));
- if (RoundNewPtr != BlockEnd)
- __asm__ __volatile__(".arch_extension mte; stg %0, [%0]"
- :
- : "r"(RoundNewPtr)
- : "memory");
- return;
- }
-
- __asm__ __volatile__(R"(
- .arch_extension mte
-
- // Set the memory tag of the region
- // [roundUpTo(OldPtr, 16), roundUpTo(NewPtr, 16))
- // to the pointer tag stored in OldPtr.
+inline uptr storeTags(uptr Begin, uptr End) {
+ DCHECK_EQ(0, Begin % 16);
+ uptr LineSize, Next, Tmp;
+ __asm__ __volatile__(
+ R"(
+ .arch_extension memtag
+
+ // Compute the cache line size in bytes (DCZID_EL0 stores it as the log2
+ // of the number of 4-byte words) and bail out to the slow path if DCZID_EL0
+ // indicates that the DC instructions are unavailable.
+ DCZID .req %[Tmp]
+ mrs DCZID, dczid_el0
+ tbnz DCZID, #4, 3f
+ and DCZID, DCZID, #15
+ mov %[LineSize], #4
+ lsl %[LineSize], %[LineSize], DCZID
+ .unreq DCZID
+
+ // Our main loop doesn't handle the case where we don't need to perform any
+ // DC GZVA operations. If the size of our tagged region is less than
+ // twice the cache line size, bail out to the slow path since it's not
+ // guaranteed that we'll be able to do a DC GZVA.
+ Size .req %[Tmp]
+ sub Size, %[End], %[Cur]
+ cmp Size, %[LineSize], lsl #1
+ b.lt 3f
+ .unreq Size
+
+ LineMask .req %[Tmp]
+ sub LineMask, %[LineSize], #1
+
+ // STZG until the start of the next cache line.
+ orr %[Next], %[Cur], LineMask
1:
stzg %[Cur], [%[Cur]], #16
- cmp %[Cur], %[End]
+ cmp %[Cur], %[Next]
b.lt 1b
- // Finally, set the tag of the granule past the end of the allocation to 0.
- and %[Cur], %[Cur], #(1 << 56) - 1
- cmp %[Cur], %[BlockEnd]
- b.eq 2f
- stg %[Cur], [%[Cur]]
-
+ // DC GZVA cache lines until we have no more full cache lines.
+ bic %[Next], %[End], LineMask
+ .unreq LineMask
2:
+ dc gzva, %[Cur]
+ add %[Cur], %[Cur], %[LineSize]
+ cmp %[Cur], %[Next]
+ b.lt 2b
+
+ // STZG until the end of the tagged region. This loop is also used to handle
+ // slow path cases.
+ 3:
+ cmp %[Cur], %[End]
+ b.ge 4f
+ stzg %[Cur], [%[Cur]], #16
+ b 3b
+
+ 4:
)"
- : [Cur] "+&r"(RoundOldPtr), [End] "+&r"(NewPtr)
- : [BlockEnd] "r"(BlockEnd)
+ : [Cur] "+&r"(Begin), [LineSize] "=&r"(LineSize), [Next] "=&r"(Next),
+ [Tmp] "=&r"(Tmp)
+ : [End] "r"(End)
+ : "memory");
+ DCHECK_EQ(0, Begin % 16);
+ return Begin;
+}
+
+inline void storeTag(uptr Ptr) {
+ DCHECK_EQ(0, Ptr % 16);
+ __asm__ __volatile__(R"(
+ .arch_extension memtag
+ stg %0, [%0]
+ )"
+ :
+ : "r"(Ptr)
: "memory");
}
inline uptr loadTag(uptr Ptr) {
+ DCHECK_EQ(0, Ptr % 16);
uptr TaggedPtr = Ptr;
- __asm__ __volatile__(".arch_extension mte; ldg %0, [%0]"
- : "+r"(TaggedPtr)
- :
- : "memory");
+ __asm__ __volatile__(
+ R"(
+ .arch_extension memtag
+ ldg %0, [%0]
+ )"
+ : "+r"(TaggedPtr)
+ :
+ : "memory");
return TaggedPtr;
}
@@ -225,11 +260,7 @@ inline bool systemDetectsMemoryTagFaultsTestOnly() {
UNREACHABLE("memory tagging not supported");
}
-inline void disableMemoryTagChecksTestOnly() {
- UNREACHABLE("memory tagging not supported");
-}
-
-inline void enableMemoryTagChecksTestOnly() {
+inline void enableSystemMemoryTaggingTestOnly() {
UNREACHABLE("memory tagging not supported");
}
@@ -243,25 +274,20 @@ inline uptr selectRandomTag(uptr Ptr, uptr ExcludeMask) {
UNREACHABLE("memory tagging not supported");
}
+inline uptr addFixedTag(uptr Ptr, uptr Tag) {
+ (void)Ptr;
+ (void)Tag;
+ UNREACHABLE("memory tagging not supported");
+}
+
inline uptr storeTags(uptr Begin, uptr End) {
(void)Begin;
(void)End;
UNREACHABLE("memory tagging not supported");
}
-inline void *prepareTaggedChunk(void *Ptr, uptr Size, uptr ExcludeMask,
- uptr BlockEnd) {
+inline void storeTag(uptr Ptr) {
(void)Ptr;
- (void)Size;
- (void)ExcludeMask;
- (void)BlockEnd;
- UNREACHABLE("memory tagging not supported");
-}
-
-inline void resizeTaggedChunk(uptr OldPtr, uptr NewPtr, uptr BlockEnd) {
- (void)OldPtr;
- (void)NewPtr;
- (void)BlockEnd;
UNREACHABLE("memory tagging not supported");
}
@@ -278,9 +304,23 @@ inline void setRandomTag(void *Ptr, uptr Size, uptr ExcludeMask,
*TaggedEnd = storeTags(*TaggedBegin, *TaggedBegin + Size);
}
+inline void *untagPointer(void *Ptr) {
+ return reinterpret_cast<void *>(untagPointer(reinterpret_cast<uptr>(Ptr)));
+}
+
+inline void *loadTag(void *Ptr) {
+ return reinterpret_cast<void *>(loadTag(reinterpret_cast<uptr>(Ptr)));
+}
+
+inline void *addFixedTag(void *Ptr, uptr Tag) {
+ return reinterpret_cast<void *>(
+ addFixedTag(reinterpret_cast<uptr>(Ptr), Tag));
+}
+
template <typename Config>
inline constexpr bool allocatorSupportsMemoryTagging() {
- return archSupportsMemoryTagging() && Config::MaySupportMemoryTagging;
+ return archSupportsMemoryTagging() && Config::MaySupportMemoryTagging &&
+ (1 << SCUDO_MIN_ALIGNMENT_LOG) >= archMemoryTagGranuleSize();
}
} // namespace scudo
diff --git a/compiler-rt/lib/scudo/standalone/mutex.h b/compiler-rt/lib/scudo/standalone/mutex.h
index d6e6a5b33aae..c8504c040914 100644
--- a/compiler-rt/lib/scudo/standalone/mutex.h
+++ b/compiler-rt/lib/scudo/standalone/mutex.h
@@ -22,7 +22,6 @@ namespace scudo {
class HybridMutex {
public:
- void init() { M = {}; }
bool tryLock();
NOINLINE void lock() {
if (LIKELY(tryLock()))
@@ -48,9 +47,9 @@ private:
static constexpr u8 NumberOfYields = 8U;
#if SCUDO_LINUX
- atomic_u32 M;
+ atomic_u32 M = {};
#elif SCUDO_FUCHSIA
- sync_mutex_t M;
+ sync_mutex_t M = {};
#endif
void lockSlow();
diff --git a/compiler-rt/lib/scudo/standalone/options.h b/compiler-rt/lib/scudo/standalone/options.h
index 91301bf5ec9c..4e6786513334 100644
--- a/compiler-rt/lib/scudo/standalone/options.h
+++ b/compiler-rt/lib/scudo/standalone/options.h
@@ -24,6 +24,7 @@ enum class OptionBit {
TrackAllocationStacks,
UseOddEvenTags,
UseMemoryTagging,
+ AddLargeAllocationSlack,
};
struct Options {
@@ -43,9 +44,8 @@ template <typename Config> bool useMemoryTagging(Options Options) {
}
struct AtomicOptions {
- atomic_u32 Val;
+ atomic_u32 Val = {};
-public:
Options load() const { return Options{atomic_load_relaxed(&Val)}; }
void clear(OptionBit Opt) {
diff --git a/compiler-rt/lib/scudo/standalone/platform.h b/compiler-rt/lib/scudo/standalone/platform.h
index a4c2a0b26603..36378d14d844 100644
--- a/compiler-rt/lib/scudo/standalone/platform.h
+++ b/compiler-rt/lib/scudo/standalone/platform.h
@@ -12,7 +12,7 @@
// Transitive includes of stdint.h specify some of the defines checked below.
#include <stdint.h>
-#if defined(__linux__)
+#if defined(__linux__) && !defined(__TRUSTY__)
#define SCUDO_LINUX 1
#else
#define SCUDO_LINUX 0
@@ -31,6 +31,12 @@
#define SCUDO_FUCHSIA 0
#endif
+#if defined(__TRUSTY__)
+#define SCUDO_TRUSTY 1
+#else
+#define SCUDO_TRUSTY 0
+#endif
+
#if __LP64__
#define SCUDO_WORDSIZE 64U
#else
diff --git a/compiler-rt/lib/scudo/standalone/primary32.h b/compiler-rt/lib/scudo/standalone/primary32.h
index a88a2a67e951..326c10a32a85 100644
--- a/compiler-rt/lib/scudo/standalone/primary32.h
+++ b/compiler-rt/lib/scudo/standalone/primary32.h
@@ -41,6 +41,7 @@ namespace scudo {
template <typename Config> class SizeClassAllocator32 {
public:
+ typedef typename Config::PrimaryCompactPtrT CompactPtrT;
typedef typename Config::SizeClassMap SizeClassMap;
// The bytemap can only track UINT8_MAX - 1 classes.
static_assert(SizeClassMap::LargestClassId <= (UINT8_MAX - 1), "");
@@ -59,15 +60,18 @@ public:
static bool canAllocate(uptr Size) { return Size <= SizeClassMap::MaxSize; }
- void initLinkerInitialized(s32 ReleaseToOsInterval) {
+ void init(s32 ReleaseToOsInterval) {
if (SCUDO_FUCHSIA)
reportError("SizeClassAllocator32 is not supported on Fuchsia");
- PossibleRegions.initLinkerInitialized();
+ if (SCUDO_TRUSTY)
+ reportError("SizeClassAllocator32 is not supported on Trusty");
+ DCHECK(isAligned(reinterpret_cast<uptr>(this), alignof(ThisT)));
+ PossibleRegions.init();
u32 Seed;
const u64 Time = getMonotonicTime();
- if (UNLIKELY(!getRandom(reinterpret_cast<void *>(&Seed), sizeof(Seed))))
+ if (!getRandom(reinterpret_cast<void *>(&Seed), sizeof(Seed)))
Seed = static_cast<u32>(
Time ^ (reinterpret_cast<uptr>(SizeClassInfoArray) >> 6));
for (uptr I = 0; I < NumClasses; I++) {
@@ -79,10 +83,6 @@ public:
}
setOption(Option::ReleaseInterval, static_cast<sptr>(ReleaseToOsInterval));
}
- void init(s32 ReleaseToOsInterval) {
- memset(this, 0, sizeof(*this));
- initLinkerInitialized(ReleaseToOsInterval);
- }
void unmapTestOnly() {
while (NumberOfStashedRegions > 0)
@@ -95,6 +95,7 @@ public:
MinRegionIndex = Sci->MinRegionIndex;
if (Sci->MaxRegionIndex > MaxRegionIndex)
MaxRegionIndex = Sci->MaxRegionIndex;
+ *Sci = {};
}
for (uptr I = MinRegionIndex; I < MaxRegionIndex; I++)
if (PossibleRegions[I])
@@ -102,6 +103,14 @@ public:
PossibleRegions.unmapTestOnly();
}
+ CompactPtrT compactPtr(UNUSED uptr ClassId, uptr Ptr) const {
+ return static_cast<CompactPtrT>(Ptr);
+ }
+
+ void *decompactPtr(UNUSED uptr ClassId, CompactPtrT CompactPtr) const {
+ return reinterpret_cast<void *>(static_cast<uptr>(CompactPtr));
+ }
+
TransferBatch *popBatch(CacheT *C, uptr ClassId) {
DCHECK_LT(ClassId, NumClasses);
SizeClassInfo *Sci = getSizeClassInfo(ClassId);
@@ -359,17 +368,18 @@ private:
// Fill the transfer batches and put them in the size-class freelist. We
// need to randomize the blocks for security purposes, so we first fill a
// local array that we then shuffle before populating the batches.
- void *ShuffleArray[ShuffleArraySize];
+ CompactPtrT ShuffleArray[ShuffleArraySize];
DCHECK_LE(NumberOfBlocks, ShuffleArraySize);
uptr P = Region + Offset;
for (u32 I = 0; I < NumberOfBlocks; I++, P += Size)
- ShuffleArray[I] = reinterpret_cast<void *>(P);
+ ShuffleArray[I] = reinterpret_cast<CompactPtrT>(P);
// No need to shuffle the batches size class.
if (ClassId != SizeClassMap::BatchClassId)
shuffle(ShuffleArray, NumberOfBlocks, &Sci->RandState);
for (u32 I = 0; I < NumberOfBlocks;) {
- TransferBatch *B = C->createBatch(ClassId, ShuffleArray[I]);
+ TransferBatch *B =
+ C->createBatch(ClassId, reinterpret_cast<void *>(ShuffleArray[I]));
if (UNLIKELY(!B))
return nullptr;
const u32 N = Min(MaxCount, NumberOfBlocks - I);
@@ -435,7 +445,7 @@ private:
if (BlockSize < PageSize / 16U) {
if (!Force && BytesPushed < Sci->AllocatedUser / 16U)
return 0;
- // We want 8x% to 9x% free bytes (the larger the bock, the lower the %).
+ // We want 8x% to 9x% free bytes (the larger the block, the lower the %).
if ((BytesInFreeList * 100U) / Sci->AllocatedUser <
(100U - 1U - BlockSize / 16U))
return 0;
@@ -463,8 +473,11 @@ private:
auto SkipRegion = [this, First, ClassId](uptr RegionIndex) {
return (PossibleRegions[First + RegionIndex] - 1U) != ClassId;
};
- releaseFreeMemoryToOS(Sci->FreeList, Base, RegionSize, NumberOfRegions,
- BlockSize, &Recorder, SkipRegion);
+ auto DecompactPtr = [](CompactPtrT CompactPtr) {
+ return reinterpret_cast<uptr>(CompactPtr);
+ };
+ releaseFreeMemoryToOS(Sci->FreeList, RegionSize, NumberOfRegions, BlockSize,
+ &Recorder, DecompactPtr, SkipRegion);
if (Recorder.getReleasedRangesCount() > 0) {
Sci->ReleaseInfo.PushedBlocksAtLastRelease = Sci->Stats.PushedBlocks;
Sci->ReleaseInfo.RangesReleased += Recorder.getReleasedRangesCount();
@@ -476,17 +489,17 @@ private:
return TotalReleasedBytes;
}
- SizeClassInfo SizeClassInfoArray[NumClasses];
+ SizeClassInfo SizeClassInfoArray[NumClasses] = {};
// Track the regions in use, 0 is unused, otherwise store ClassId + 1.
- ByteMap PossibleRegions;
- atomic_s32 ReleaseToOsIntervalMs;
+ ByteMap PossibleRegions = {};
+ atomic_s32 ReleaseToOsIntervalMs = {};
// Unless several threads request regions simultaneously from different size
// classes, the stash rarely contains more than 1 entry.
static constexpr uptr MaxStashedRegions = 4;
HybridMutex RegionsStashMutex;
- uptr NumberOfStashedRegions;
- uptr RegionsStash[MaxStashedRegions];
+ uptr NumberOfStashedRegions = 0;
+ uptr RegionsStash[MaxStashedRegions] = {};
};
} // namespace scudo
diff --git a/compiler-rt/lib/scudo/standalone/primary64.h b/compiler-rt/lib/scudo/standalone/primary64.h
index 2724a2529f75..13420bf3d222 100644
--- a/compiler-rt/lib/scudo/standalone/primary64.h
+++ b/compiler-rt/lib/scudo/standalone/primary64.h
@@ -25,8 +25,9 @@ namespace scudo {
//
// It starts by reserving NumClasses * 2^RegionSizeLog bytes, equally divided in
// Regions, specific to each size class. Note that the base of that mapping is
-// random (based to the platform specific map() capabilities), and that each
-// Region actually starts at a random offset from its base.
+// random (based to the platform specific map() capabilities). If
+// PrimaryEnableRandomOffset is set, each Region actually starts at a random
+// offset from its base.
//
// Regions are mapped incrementally on demand to fulfill allocation requests,
// those mappings being split into equally sized Blocks based on the size class
@@ -42,6 +43,8 @@ namespace scudo {
template <typename Config> class SizeClassAllocator64 {
public:
+ typedef typename Config::PrimaryCompactPtrT CompactPtrT;
+ static const uptr CompactPtrScale = Config::PrimaryCompactPtrScale;
typedef typename Config::SizeClassMap SizeClassMap;
typedef SizeClassAllocator64<Config> ThisT;
typedef SizeClassAllocatorLocalCache<ThisT> CacheT;
@@ -49,39 +52,45 @@ public:
static uptr getSizeByClassId(uptr ClassId) {
return (ClassId == SizeClassMap::BatchClassId)
- ? sizeof(TransferBatch)
+ ? roundUpTo(sizeof(TransferBatch), 1U << CompactPtrScale)
: SizeClassMap::getSizeByClassId(ClassId);
}
static bool canAllocate(uptr Size) { return Size <= SizeClassMap::MaxSize; }
- void initLinkerInitialized(s32 ReleaseToOsInterval) {
+ void init(s32 ReleaseToOsInterval) {
+ DCHECK(isAligned(reinterpret_cast<uptr>(this), alignof(ThisT)));
+ DCHECK_EQ(PrimaryBase, 0U);
// Reserve the space required for the Primary.
PrimaryBase = reinterpret_cast<uptr>(
- map(nullptr, PrimarySize, "scudo:primary", MAP_NOACCESS, &Data));
+ map(nullptr, PrimarySize, nullptr, MAP_NOACCESS, &Data));
u32 Seed;
const u64 Time = getMonotonicTime();
- if (UNLIKELY(!getRandom(reinterpret_cast<void *>(&Seed), sizeof(Seed))))
+ if (!getRandom(reinterpret_cast<void *>(&Seed), sizeof(Seed)))
Seed = static_cast<u32>(Time ^ (PrimaryBase >> 12));
const uptr PageSize = getPageSizeCached();
for (uptr I = 0; I < NumClasses; I++) {
RegionInfo *Region = getRegionInfo(I);
- // The actual start of a region is offseted by a random number of pages.
- Region->RegionBeg =
- getRegionBaseByClassId(I) + (getRandomModN(&Seed, 16) + 1) * PageSize;
+ // The actual start of a region is offset by a random number of pages
+ // when PrimaryEnableRandomOffset is set.
+ Region->RegionBeg = getRegionBaseByClassId(I) +
+ (Config::PrimaryEnableRandomOffset
+ ? ((getRandomModN(&Seed, 16) + 1) * PageSize)
+ : 0);
Region->RandState = getRandomU32(&Seed);
Region->ReleaseInfo.LastReleaseAtNs = Time;
}
setOption(Option::ReleaseInterval, static_cast<sptr>(ReleaseToOsInterval));
}
- void init(s32 ReleaseToOsInterval) {
- memset(this, 0, sizeof(*this));
- initLinkerInitialized(ReleaseToOsInterval);
- }
void unmapTestOnly() {
+ for (uptr I = 0; I < NumClasses; I++) {
+ RegionInfo *Region = getRegionInfo(I);
+ *Region = {};
+ }
unmap(reinterpret_cast<void *>(PrimaryBase), PrimarySize, UNMAP_ALL, &Data);
+ PrimaryBase = 0U;
}
TransferBatch *popBatch(CacheT *C, uptr ClassId) {
@@ -194,6 +203,24 @@ public:
static uptr getRegionInfoArraySize() { return sizeof(RegionInfoArray); }
+ uptr getCompactPtrBaseByClassId(uptr ClassId) {
+ // If we are not compacting pointers, base everything off of 0.
+ if (sizeof(CompactPtrT) == sizeof(uptr) && CompactPtrScale == 0)
+ return 0;
+ return getRegionInfo(ClassId)->RegionBeg;
+ }
+
+ CompactPtrT compactPtr(uptr ClassId, uptr Ptr) {
+ DCHECK_LE(ClassId, SizeClassMap::LargestClassId);
+ return compactPtrInternal(getCompactPtrBaseByClassId(ClassId), Ptr);
+ }
+
+ void *decompactPtr(uptr ClassId, CompactPtrT CompactPtr) {
+ DCHECK_LE(ClassId, SizeClassMap::LargestClassId);
+ return reinterpret_cast<void *>(
+ decompactPtrInternal(getCompactPtrBaseByClassId(ClassId), CompactPtr));
+ }
+
static BlockInfo findNearestBlock(const char *RegionInfoData, uptr Ptr) {
const RegionInfo *RegionInfoArray =
reinterpret_cast<const RegionInfo *>(RegionInfoData);
@@ -245,8 +272,7 @@ private:
static const uptr NumClasses = SizeClassMap::NumClasses;
static const uptr PrimarySize = RegionSize * NumClasses;
- // Call map for user memory with at least this size.
- static const uptr MapSizeIncrement = 1UL << 18;
+ static const uptr MapSizeIncrement = Config::PrimaryMapSizeIncrement;
// Fill at most this number of batches from the newly map'd memory.
static const u32 MaxNumBatches = SCUDO_ANDROID ? 4U : 8U;
@@ -265,24 +291,24 @@ private:
struct UnpaddedRegionInfo {
HybridMutex Mutex;
SinglyLinkedList<TransferBatch> FreeList;
- RegionStats Stats;
- bool Exhausted;
- u32 RandState;
- uptr RegionBeg;
- uptr MappedUser; // Bytes mapped for user memory.
- uptr AllocatedUser; // Bytes allocated for user memory.
- MapPlatformData Data;
- ReleaseToOsInfo ReleaseInfo;
+ uptr RegionBeg = 0;
+ RegionStats Stats = {};
+ u32 RandState = 0;
+ uptr MappedUser = 0; // Bytes mapped for user memory.
+ uptr AllocatedUser = 0; // Bytes allocated for user memory.
+ MapPlatformData Data = {};
+ ReleaseToOsInfo ReleaseInfo = {};
+ bool Exhausted = false;
};
struct RegionInfo : UnpaddedRegionInfo {
char Padding[SCUDO_CACHE_LINE_SIZE -
- (sizeof(UnpaddedRegionInfo) % SCUDO_CACHE_LINE_SIZE)];
+ (sizeof(UnpaddedRegionInfo) % SCUDO_CACHE_LINE_SIZE)] = {};
};
static_assert(sizeof(RegionInfo) % SCUDO_CACHE_LINE_SIZE == 0, "");
- uptr PrimaryBase;
- MapPlatformData Data;
- atomic_s32 ReleaseToOsIntervalMs;
+ uptr PrimaryBase = 0;
+ MapPlatformData Data = {};
+ atomic_s32 ReleaseToOsIntervalMs = {};
alignas(SCUDO_CACHE_LINE_SIZE) RegionInfo RegionInfoArray[NumClasses];
RegionInfo *getRegionInfo(uptr ClassId) {
@@ -294,6 +320,14 @@ private:
return PrimaryBase + (ClassId << Config::PrimaryRegionSizeLog);
}
+ static CompactPtrT compactPtrInternal(uptr Base, uptr Ptr) {
+ return static_cast<CompactPtrT>((Ptr - Base) >> CompactPtrScale);
+ }
+
+ static uptr decompactPtrInternal(uptr Base, CompactPtrT CompactPtr) {
+ return Base + (static_cast<uptr>(CompactPtr) << CompactPtrScale);
+ }
+
NOINLINE TransferBatch *populateFreeList(CacheT *C, uptr ClassId,
RegionInfo *Region) {
const uptr Size = getSizeByClassId(ClassId);
@@ -303,15 +337,15 @@ private:
const uptr MappedUser = Region->MappedUser;
const uptr TotalUserBytes = Region->AllocatedUser + MaxCount * Size;
// Map more space for blocks, if necessary.
- if (UNLIKELY(TotalUserBytes > MappedUser)) {
+ if (TotalUserBytes > MappedUser) {
// Do the mmap for the user memory.
- const uptr UserMapSize =
+ const uptr MapSize =
roundUpTo(TotalUserBytes - MappedUser, MapSizeIncrement);
const uptr RegionBase = RegionBeg - getRegionBaseByClassId(ClassId);
- if (RegionBase + MappedUser + UserMapSize > RegionSize) {
+ if (UNLIKELY(RegionBase + MappedUser + MapSize > RegionSize)) {
if (!Region->Exhausted) {
Region->Exhausted = true;
- ScopedString Str(1024);
+ ScopedString Str;
getStats(&Str);
Str.append(
"Scudo OOM: The process has exhausted %zuM for size class %zu.\n",
@@ -322,14 +356,15 @@ private:
}
if (MappedUser == 0)
Region->Data = Data;
- if (!map(reinterpret_cast<void *>(RegionBeg + MappedUser), UserMapSize,
- "scudo:primary",
- MAP_ALLOWNOMEM | MAP_RESIZABLE |
- (useMemoryTagging<Config>(Options.load()) ? MAP_MEMTAG : 0),
- &Region->Data))
+ if (UNLIKELY(!map(
+ reinterpret_cast<void *>(RegionBeg + MappedUser), MapSize,
+ "scudo:primary",
+ MAP_ALLOWNOMEM | MAP_RESIZABLE |
+ (useMemoryTagging<Config>(Options.load()) ? MAP_MEMTAG : 0),
+ &Region->Data)))
return nullptr;
- Region->MappedUser += UserMapSize;
- C->getStats().add(StatMapped, UserMapSize);
+ Region->MappedUser += MapSize;
+ C->getStats().add(StatMapped, MapSize);
}
const u32 NumberOfBlocks = Min(
@@ -339,17 +374,20 @@ private:
constexpr u32 ShuffleArraySize =
MaxNumBatches * TransferBatch::MaxNumCached;
- void *ShuffleArray[ShuffleArraySize];
+ CompactPtrT ShuffleArray[ShuffleArraySize];
DCHECK_LE(NumberOfBlocks, ShuffleArraySize);
+ const uptr CompactPtrBase = getCompactPtrBaseByClassId(ClassId);
uptr P = RegionBeg + Region->AllocatedUser;
for (u32 I = 0; I < NumberOfBlocks; I++, P += Size)
- ShuffleArray[I] = reinterpret_cast<void *>(P);
+ ShuffleArray[I] = compactPtrInternal(CompactPtrBase, P);
// No need to shuffle the batches size class.
if (ClassId != SizeClassMap::BatchClassId)
shuffle(ShuffleArray, NumberOfBlocks, &Region->RandState);
for (u32 I = 0; I < NumberOfBlocks;) {
- TransferBatch *B = C->createBatch(ClassId, ShuffleArray[I]);
+ TransferBatch *B =
+ C->createBatch(ClassId, reinterpret_cast<void *>(decompactPtrInternal(
+ CompactPtrBase, ShuffleArray[I])));
if (UNLIKELY(!B))
return nullptr;
const u32 N = Min(MaxCount, NumberOfBlocks - I);
@@ -409,7 +447,7 @@ private:
if (BlockSize < PageSize / 16U) {
if (!Force && BytesPushed < Region->AllocatedUser / 16U)
return 0;
- // We want 8x% to 9x% free bytes (the larger the bock, the lower the %).
+ // We want 8x% to 9x% free bytes (the larger the block, the lower the %).
if ((BytesInFreeList * 100U) / Region->AllocatedUser <
(100U - 1U - BlockSize / 16U))
return 0;
@@ -426,11 +464,14 @@ private:
}
}
- auto SkipRegion = [](UNUSED uptr RegionIndex) { return false; };
ReleaseRecorder Recorder(Region->RegionBeg, &Region->Data);
- releaseFreeMemoryToOS(Region->FreeList, Region->RegionBeg,
- Region->AllocatedUser, 1U, BlockSize, &Recorder,
- SkipRegion);
+ const uptr CompactPtrBase = getCompactPtrBaseByClassId(ClassId);
+ auto DecompactPtr = [CompactPtrBase](CompactPtrT CompactPtr) {
+ return decompactPtrInternal(CompactPtrBase, CompactPtr);
+ };
+ auto SkipRegion = [](UNUSED uptr RegionIndex) { return false; };
+ releaseFreeMemoryToOS(Region->FreeList, Region->AllocatedUser, 1U,
+ BlockSize, &Recorder, DecompactPtr, SkipRegion);
if (Recorder.getReleasedRangesCount() > 0) {
Region->ReleaseInfo.PushedBlocksAtLastRelease =
diff --git a/compiler-rt/lib/scudo/standalone/quarantine.h b/compiler-rt/lib/scudo/standalone/quarantine.h
index 27aa4bfec91a..2d231c3a28db 100644
--- a/compiler-rt/lib/scudo/standalone/quarantine.h
+++ b/compiler-rt/lib/scudo/standalone/quarantine.h
@@ -64,11 +64,7 @@ static_assert(sizeof(QuarantineBatch) <= (1U << 13), ""); // 8Kb.
// Per-thread cache of memory blocks.
template <typename Callback> class QuarantineCache {
public:
- void initLinkerInitialized() {}
- void init() {
- memset(this, 0, sizeof(*this));
- initLinkerInitialized();
- }
+ void init() { DCHECK_EQ(atomic_load_relaxed(&Size), 0U); }
// Total memory used, including internal accounting.
uptr getSize() const { return atomic_load_relaxed(&Size); }
@@ -161,7 +157,7 @@ public:
private:
SinglyLinkedList<QuarantineBatch> List;
- atomic_uptr Size;
+ atomic_uptr Size = {};
void addToSize(uptr add) { atomic_store_relaxed(&Size, getSize() + add); }
void subFromSize(uptr sub) { atomic_store_relaxed(&Size, getSize() - sub); }
@@ -174,8 +170,13 @@ private:
template <typename Callback, typename Node> class GlobalQuarantine {
public:
typedef QuarantineCache<Callback> CacheT;
+ using ThisT = GlobalQuarantine<Callback, Node>;
- void initLinkerInitialized(uptr Size, uptr CacheSize) {
+ void init(uptr Size, uptr CacheSize) {
+ DCHECK(isAligned(reinterpret_cast<uptr>(this), alignof(ThisT)));
+ DCHECK_EQ(atomic_load_relaxed(&MaxSize), 0U);
+ DCHECK_EQ(atomic_load_relaxed(&MinSize), 0U);
+ DCHECK_EQ(atomic_load_relaxed(&MaxCacheSize), 0U);
// Thread local quarantine size can be zero only when global quarantine size
// is zero (it allows us to perform just one atomic read per put() call).
CHECK((Size == 0 && CacheSize == 0) || CacheSize != 0);
@@ -184,16 +185,7 @@ public:
atomic_store_relaxed(&MinSize, Size / 10 * 9); // 90% of max size.
atomic_store_relaxed(&MaxCacheSize, CacheSize);
- Cache.initLinkerInitialized();
- }
- void init(uptr Size, uptr CacheSize) {
- CacheMutex.init();
Cache.init();
- RecycleMutex.init();
- MinSize = {};
- MaxSize = {};
- MaxCacheSize = {};
- initLinkerInitialized(Size, CacheSize);
}
uptr getMaxSize() const { return atomic_load_relaxed(&MaxSize); }
@@ -246,9 +238,9 @@ private:
alignas(SCUDO_CACHE_LINE_SIZE) HybridMutex CacheMutex;
CacheT Cache;
alignas(SCUDO_CACHE_LINE_SIZE) HybridMutex RecycleMutex;
- atomic_uptr MinSize;
- atomic_uptr MaxSize;
- alignas(SCUDO_CACHE_LINE_SIZE) atomic_uptr MaxCacheSize;
+ atomic_uptr MinSize = {};
+ atomic_uptr MaxSize = {};
+ alignas(SCUDO_CACHE_LINE_SIZE) atomic_uptr MaxCacheSize = {};
void NOINLINE recycle(uptr MinSize, Callback Cb) {
CacheT Tmp;
diff --git a/compiler-rt/lib/scudo/standalone/release.h b/compiler-rt/lib/scudo/standalone/release.h
index 5c11da2200e9..293a8bc27bab 100644
--- a/compiler-rt/lib/scudo/standalone/release.h
+++ b/compiler-rt/lib/scudo/standalone/release.h
@@ -17,17 +17,19 @@ namespace scudo {
class ReleaseRecorder {
public:
- ReleaseRecorder(uptr BaseAddress, MapPlatformData *Data = nullptr)
- : BaseAddress(BaseAddress), Data(Data) {}
+ ReleaseRecorder(uptr Base, MapPlatformData *Data = nullptr)
+ : Base(Base), Data(Data) {}
uptr getReleasedRangesCount() const { return ReleasedRangesCount; }
uptr getReleasedBytes() const { return ReleasedBytes; }
+ uptr getBase() const { return Base; }
+
// Releases [From, To) range of pages back to OS.
void releasePageRangeToOS(uptr From, uptr To) {
const uptr Size = To - From;
- releasePagesToOS(BaseAddress, From, Size, Data);
+ releasePagesToOS(Base, From, Size, Data);
ReleasedRangesCount++;
ReleasedBytes += Size;
}
@@ -35,7 +37,7 @@ public:
private:
uptr ReleasedRangesCount = 0;
uptr ReleasedBytes = 0;
- uptr BaseAddress = 0;
+ uptr Base = 0;
MapPlatformData *Data = nullptr;
};
@@ -79,7 +81,8 @@ public:
memset(Buffer, 0, BufferSize);
} else {
Buffer = reinterpret_cast<uptr *>(
- map(nullptr, BufferSize, "scudo:counters", MAP_ALLOWNOMEM));
+ map(nullptr, roundUpTo(BufferSize, getPageSizeCached()),
+ "scudo:counters", MAP_ALLOWNOMEM));
}
}
~PackedCounterArray() {
@@ -88,7 +91,8 @@ public:
if (Buffer == &StaticBuffer[0])
Mutex.unlock();
else
- unmap(reinterpret_cast<void *>(Buffer), BufferSize);
+ unmap(reinterpret_cast<void *>(Buffer),
+ roundUpTo(BufferSize, getPageSizeCached()));
}
bool isAllocated() const { return !!Buffer; }
@@ -179,11 +183,13 @@ private:
uptr CurrentRangeStatePage = 0;
};
-template <class TransferBatchT, class ReleaseRecorderT, typename SkipRegionT>
+template <class TransferBatchT, class ReleaseRecorderT, typename DecompactPtrT,
+ typename SkipRegionT>
NOINLINE void
-releaseFreeMemoryToOS(const IntrusiveList<TransferBatchT> &FreeList, uptr Base,
+releaseFreeMemoryToOS(const IntrusiveList<TransferBatchT> &FreeList,
uptr RegionSize, uptr NumberOfRegions, uptr BlockSize,
- ReleaseRecorderT *Recorder, SkipRegionT SkipRegion) {
+ ReleaseRecorderT *Recorder, DecompactPtrT DecompactPtr,
+ SkipRegionT SkipRegion) {
const uptr PageSize = getPageSizeCached();
// Figure out the number of chunks per page and whether we can take a fast
@@ -236,9 +242,8 @@ releaseFreeMemoryToOS(const IntrusiveList<TransferBatchT> &FreeList, uptr Base,
// Each chunk affects one page only.
for (const auto &It : FreeList) {
for (u32 I = 0; I < It.getCount(); I++) {
- const uptr P = reinterpret_cast<uptr>(It.get(I)) - Base;
- // This takes care of P < Base and P >= Base + RoundedSize.
- if (UNLIKELY(P >= RoundedSize))
+ const uptr P = DecompactPtr(It.get(I)) - Recorder->getBase();
+ if (P >= RoundedSize)
continue;
const uptr RegionIndex = NumberOfRegions == 1U ? 0 : P / RegionSize;
const uptr PInRegion = P - RegionIndex * RegionSize;
@@ -251,9 +256,8 @@ releaseFreeMemoryToOS(const IntrusiveList<TransferBatchT> &FreeList, uptr Base,
const uptr LastBlockInRegion = ((RegionSize / BlockSize) - 1U) * BlockSize;
for (const auto &It : FreeList) {
for (u32 I = 0; I < It.getCount(); I++) {
- const uptr P = reinterpret_cast<uptr>(It.get(I)) - Base;
- // This takes care of P < Base and P >= Base + RoundedSize.
- if (UNLIKELY(P >= RoundedSize))
+ const uptr P = DecompactPtr(It.get(I)) - Recorder->getBase();
+ if (P >= RoundedSize)
continue;
const uptr RegionIndex = NumberOfRegions == 1U ? 0 : P / RegionSize;
uptr PInRegion = P - RegionIndex * RegionSize;
diff --git a/compiler-rt/lib/scudo/standalone/report.cpp b/compiler-rt/lib/scudo/standalone/report.cpp
index 80cc6eda2af9..561c7c51f4e1 100644
--- a/compiler-rt/lib/scudo/standalone/report.cpp
+++ b/compiler-rt/lib/scudo/standalone/report.cpp
@@ -17,7 +17,7 @@ namespace scudo {
class ScopedErrorReport {
public:
- ScopedErrorReport() : Message(512) { Message.append("Scudo ERROR: "); }
+ ScopedErrorReport() : Message() { Message.append("Scudo ERROR: "); }
void append(const char *Format, ...) {
va_list Args;
va_start(Args, Format);
@@ -45,8 +45,8 @@ void NORETURN reportCheckFailed(const char *File, int Line,
trap();
}
ScopedErrorReport Report;
- Report.append("CHECK failed @ %s:%d %s (%llu, %llu)\n", File, Line, Condition,
- Value1, Value2);
+ Report.append("CHECK failed @ %s:%d %s ((u64)op1=%llu, (u64)op2=%llu)\n",
+ File, Line, Condition, Value1, Value2);
}
// Generic string fatal error message.
diff --git a/compiler-rt/lib/scudo/standalone/secondary.h b/compiler-rt/lib/scudo/standalone/secondary.h
index 063640106abb..630e64d46edf 100644
--- a/compiler-rt/lib/scudo/standalone/secondary.h
+++ b/compiler-rt/lib/scudo/standalone/secondary.h
@@ -9,9 +9,12 @@
#ifndef SCUDO_SECONDARY_H_
#define SCUDO_SECONDARY_H_
+#include "chunk.h"
#include "common.h"
#include "list.h"
+#include "memtag.h"
#include "mutex.h"
+#include "options.h"
#include "stats.h"
#include "string_utils.h"
@@ -25,42 +28,61 @@ namespace scudo {
namespace LargeBlock {
-struct Header {
+struct alignas(Max<uptr>(archSupportsMemoryTagging()
+ ? archMemoryTagGranuleSize()
+ : 1,
+ 1U << SCUDO_MIN_ALIGNMENT_LOG)) Header {
LargeBlock::Header *Prev;
LargeBlock::Header *Next;
- uptr BlockEnd;
+ uptr CommitBase;
+ uptr CommitSize;
uptr MapBase;
uptr MapSize;
[[no_unique_address]] MapPlatformData Data;
};
-constexpr uptr getHeaderSize() {
- return roundUpTo(sizeof(Header), 1U << SCUDO_MIN_ALIGNMENT_LOG);
+static_assert(sizeof(Header) % (1U << SCUDO_MIN_ALIGNMENT_LOG) == 0, "");
+static_assert(!archSupportsMemoryTagging() ||
+ sizeof(Header) % archMemoryTagGranuleSize() == 0,
+ "");
+
+constexpr uptr getHeaderSize() { return sizeof(Header); }
+
+template <typename Config> static uptr addHeaderTag(uptr Ptr) {
+ if (allocatorSupportsMemoryTagging<Config>())
+ return addFixedTag(Ptr, 1);
+ return Ptr;
}
-static Header *getHeader(uptr Ptr) {
- return reinterpret_cast<Header *>(Ptr - getHeaderSize());
+template <typename Config> static Header *getHeader(uptr Ptr) {
+ return reinterpret_cast<Header *>(addHeaderTag<Config>(Ptr)) - 1;
}
-static Header *getHeader(const void *Ptr) {
- return getHeader(reinterpret_cast<uptr>(Ptr));
+template <typename Config> static Header *getHeader(const void *Ptr) {
+ return getHeader<Config>(reinterpret_cast<uptr>(Ptr));
}
} // namespace LargeBlock
+static void unmap(LargeBlock::Header *H) {
+ MapPlatformData Data = H->Data;
+ unmap(reinterpret_cast<void *>(H->MapBase), H->MapSize, UNMAP_ALL, &Data);
+}
+
class MapAllocatorNoCache {
public:
- void initLinkerInitialized(UNUSED s32 ReleaseToOsInterval) {}
void init(UNUSED s32 ReleaseToOsInterval) {}
- bool retrieve(UNUSED uptr Size, UNUSED LargeBlock::Header **H,
- UNUSED bool *Zeroed) {
+ bool retrieve(UNUSED Options Options, UNUSED uptr Size, UNUSED uptr Alignment,
+ UNUSED LargeBlock::Header **H, UNUSED bool *Zeroed) {
return false;
}
- bool store(UNUSED LargeBlock::Header *H) { return false; }
+ void store(UNUSED Options Options, LargeBlock::Header *H) { unmap(H); }
bool canCache(UNUSED uptr Size) { return false; }
void disable() {}
void enable() {}
void releaseToOS() {}
+ void disableMemoryTagging() {}
+ void unmapTestOnly() {}
bool setOption(Option O, UNUSED sptr Value) {
if (O == Option::ReleaseInterval || O == Option::MaxCacheEntriesCount ||
O == Option::MaxCacheEntrySize)
@@ -70,6 +92,27 @@ public:
}
};
+static const uptr MaxUnusedCachePages = 4U;
+
+template <typename Config>
+void mapSecondary(Options Options, uptr CommitBase, uptr CommitSize,
+ uptr AllocPos, uptr Flags, MapPlatformData *Data) {
+ const uptr MaxUnusedCacheBytes = MaxUnusedCachePages * getPageSizeCached();
+ if (useMemoryTagging<Config>(Options) && CommitSize > MaxUnusedCacheBytes) {
+ const uptr UntaggedPos = Max(AllocPos, CommitBase + MaxUnusedCacheBytes);
+ map(reinterpret_cast<void *>(CommitBase), UntaggedPos - CommitBase,
+ "scudo:secondary", MAP_RESIZABLE | MAP_MEMTAG | Flags, Data);
+ map(reinterpret_cast<void *>(UntaggedPos),
+ CommitBase + CommitSize - UntaggedPos, "scudo:secondary",
+ MAP_RESIZABLE | Flags, Data);
+ } else {
+ map(reinterpret_cast<void *>(CommitBase), CommitSize, "scudo:secondary",
+ MAP_RESIZABLE | (useMemoryTagging<Config>(Options) ? MAP_MEMTAG : 0) |
+ Flags,
+ Data);
+ }
+}
+
template <typename Config> class MapAllocatorCache {
public:
// Ensure the default maximum specified fits the array.
@@ -77,79 +120,155 @@ public:
Config::SecondaryCacheEntriesArraySize,
"");
- void initLinkerInitialized(s32 ReleaseToOsInterval) {
+ void init(s32 ReleaseToOsInterval) {
+ DCHECK_EQ(EntriesCount, 0U);
setOption(Option::MaxCacheEntriesCount,
static_cast<sptr>(Config::SecondaryCacheDefaultMaxEntriesCount));
setOption(Option::MaxCacheEntrySize,
static_cast<sptr>(Config::SecondaryCacheDefaultMaxEntrySize));
setOption(Option::ReleaseInterval, static_cast<sptr>(ReleaseToOsInterval));
}
- void init(s32 ReleaseToOsInterval) {
- memset(this, 0, sizeof(*this));
- initLinkerInitialized(ReleaseToOsInterval);
- }
- bool store(LargeBlock::Header *H) {
+ void store(Options Options, LargeBlock::Header *H) {
+ if (!canCache(H->CommitSize))
+ return unmap(H);
+
bool EntryCached = false;
bool EmptyCache = false;
+ const s32 Interval = atomic_load_relaxed(&ReleaseToOsIntervalMs);
const u64 Time = getMonotonicTime();
const u32 MaxCount = atomic_load_relaxed(&MaxEntriesCount);
- {
+ CachedBlock Entry;
+ Entry.CommitBase = H->CommitBase;
+ Entry.CommitSize = H->CommitSize;
+ Entry.MapBase = H->MapBase;
+ Entry.MapSize = H->MapSize;
+ Entry.BlockBegin = reinterpret_cast<uptr>(H + 1);
+ Entry.Data = H->Data;
+ Entry.Time = Time;
+ if (useMemoryTagging<Config>(Options)) {
+ if (Interval == 0 && !SCUDO_FUCHSIA) {
+ // Release the memory and make it inaccessible at the same time by
+ // creating a new MAP_NOACCESS mapping on top of the existing mapping.
+ // Fuchsia does not support replacing mappings by creating a new mapping
+ // on top so we just do the two syscalls there.
+ Entry.Time = 0;
+ mapSecondary<Config>(Options, Entry.CommitBase, Entry.CommitSize,
+ Entry.CommitBase, MAP_NOACCESS, &Entry.Data);
+ } else {
+ setMemoryPermission(Entry.CommitBase, Entry.CommitSize, MAP_NOACCESS,
+ &Entry.Data);
+ }
+ } else if (Interval == 0) {
+ releasePagesToOS(Entry.CommitBase, 0, Entry.CommitSize, &Entry.Data);
+ Entry.Time = 0;
+ }
+ do {
ScopedLock L(Mutex);
+ if (useMemoryTagging<Config>(Options) && QuarantinePos == -1U) {
+ // If we get here then memory tagging was disabled in between when we
+ // read Options and when we locked Mutex. We can't insert our entry into
+ // the quarantine or the cache because the permissions would be wrong so
+ // just unmap it.
+ break;
+ }
+ if (Config::SecondaryCacheQuarantineSize &&
+ useMemoryTagging<Config>(Options)) {
+ QuarantinePos =
+ (QuarantinePos + 1) % Max(Config::SecondaryCacheQuarantineSize, 1u);
+ if (!Quarantine[QuarantinePos].CommitBase) {
+ Quarantine[QuarantinePos] = Entry;
+ return;
+ }
+ CachedBlock PrevEntry = Quarantine[QuarantinePos];
+ Quarantine[QuarantinePos] = Entry;
+ if (OldestTime == 0)
+ OldestTime = Entry.Time;
+ Entry = PrevEntry;
+ }
if (EntriesCount >= MaxCount) {
if (IsFullEvents++ == 4U)
EmptyCache = true;
} else {
for (u32 I = 0; I < MaxCount; I++) {
- if (Entries[I].Block)
+ if (Entries[I].CommitBase)
continue;
if (I != 0)
Entries[I] = Entries[0];
- Entries[0].Block = reinterpret_cast<uptr>(H);
- Entries[0].BlockEnd = H->BlockEnd;
- Entries[0].MapBase = H->MapBase;
- Entries[0].MapSize = H->MapSize;
- Entries[0].Data = H->Data;
- Entries[0].Time = Time;
+ Entries[0] = Entry;
EntriesCount++;
+ if (OldestTime == 0)
+ OldestTime = Entry.Time;
EntryCached = true;
break;
}
}
- }
- s32 Interval;
+ } while (0);
if (EmptyCache)
empty();
- else if ((Interval = atomic_load_relaxed(&ReleaseToOsIntervalMs)) >= 0)
+ else if (Interval >= 0)
releaseOlderThan(Time - static_cast<u64>(Interval) * 1000000);
- return EntryCached;
+ if (!EntryCached)
+ unmap(reinterpret_cast<void *>(Entry.MapBase), Entry.MapSize, UNMAP_ALL,
+ &Entry.Data);
}
- bool retrieve(uptr Size, LargeBlock::Header **H, bool *Zeroed) {
+ bool retrieve(Options Options, uptr Size, uptr Alignment,
+ LargeBlock::Header **H, bool *Zeroed) {
const uptr PageSize = getPageSizeCached();
const u32 MaxCount = atomic_load_relaxed(&MaxEntriesCount);
- ScopedLock L(Mutex);
- if (EntriesCount == 0)
- return false;
- for (u32 I = 0; I < MaxCount; I++) {
- if (!Entries[I].Block)
- continue;
- const uptr BlockSize = Entries[I].BlockEnd - Entries[I].Block;
- if (Size > BlockSize)
- continue;
- if (Size < BlockSize - PageSize * 4U)
- continue;
- *H = reinterpret_cast<LargeBlock::Header *>(Entries[I].Block);
- *Zeroed = Entries[I].Time == 0;
- Entries[I].Block = 0;
- (*H)->BlockEnd = Entries[I].BlockEnd;
- (*H)->MapBase = Entries[I].MapBase;
- (*H)->MapSize = Entries[I].MapSize;
- (*H)->Data = Entries[I].Data;
+ bool Found = false;
+ CachedBlock Entry;
+ uptr HeaderPos;
+ {
+ ScopedLock L(Mutex);
+ if (EntriesCount == 0)
+ return false;
+ for (u32 I = 0; I < MaxCount; I++) {
+ const uptr CommitBase = Entries[I].CommitBase;
+ if (!CommitBase)
+ continue;
+ const uptr CommitSize = Entries[I].CommitSize;
+ const uptr AllocPos =
+ roundDownTo(CommitBase + CommitSize - Size, Alignment);
+ HeaderPos =
+ AllocPos - Chunk::getHeaderSize() - LargeBlock::getHeaderSize();
+ if (HeaderPos > CommitBase + CommitSize)
+ continue;
+ if (HeaderPos < CommitBase ||
+ AllocPos > CommitBase + PageSize * MaxUnusedCachePages)
+ continue;
+ Found = true;
+ Entry = Entries[I];
+ Entries[I].CommitBase = 0;
+ break;
+ }
+ }
+ if (Found) {
+ *H = reinterpret_cast<LargeBlock::Header *>(
+ LargeBlock::addHeaderTag<Config>(HeaderPos));
+ *Zeroed = Entry.Time == 0;
+ if (useMemoryTagging<Config>(Options))
+ setMemoryPermission(Entry.CommitBase, Entry.CommitSize, 0, &Entry.Data);
+ uptr NewBlockBegin = reinterpret_cast<uptr>(*H + 1);
+ if (useMemoryTagging<Config>(Options)) {
+ if (*Zeroed)
+ storeTags(LargeBlock::addHeaderTag<Config>(Entry.CommitBase),
+ NewBlockBegin);
+ else if (Entry.BlockBegin < NewBlockBegin)
+ storeTags(Entry.BlockBegin, NewBlockBegin);
+ else
+ storeTags(untagPointer(NewBlockBegin),
+ untagPointer(Entry.BlockBegin));
+ }
+ (*H)->CommitBase = Entry.CommitBase;
+ (*H)->CommitSize = Entry.CommitSize;
+ (*H)->MapBase = Entry.MapBase;
+ (*H)->MapSize = Entry.MapSize;
+ (*H)->Data = Entry.Data;
EntriesCount--;
- return true;
}
- return false;
+ return Found;
}
bool canCache(uptr Size) {
@@ -165,13 +284,15 @@ public:
Config::SecondaryCacheMinReleaseToOsIntervalMs);
atomic_store_relaxed(&ReleaseToOsIntervalMs, Interval);
return true;
- } else if (O == Option::MaxCacheEntriesCount) {
+ }
+ if (O == Option::MaxCacheEntriesCount) {
const u32 MaxCount = static_cast<u32>(Value);
if (MaxCount > Config::SecondaryCacheEntriesArraySize)
return false;
atomic_store_relaxed(&MaxEntriesCount, MaxCount);
return true;
- } else if (O == Option::MaxCacheEntrySize) {
+ }
+ if (O == Option::MaxCacheEntrySize) {
atomic_store_relaxed(&MaxEntrySize, static_cast<uptr>(Value));
return true;
}
@@ -181,10 +302,29 @@ public:
void releaseToOS() { releaseOlderThan(UINT64_MAX); }
+ void disableMemoryTagging() {
+ ScopedLock L(Mutex);
+ for (u32 I = 0; I != Config::SecondaryCacheQuarantineSize; ++I) {
+ if (Quarantine[I].CommitBase) {
+ unmap(reinterpret_cast<void *>(Quarantine[I].MapBase),
+ Quarantine[I].MapSize, UNMAP_ALL, &Quarantine[I].Data);
+ Quarantine[I].CommitBase = 0;
+ }
+ }
+ const u32 MaxCount = atomic_load_relaxed(&MaxEntriesCount);
+ for (u32 I = 0; I < MaxCount; I++)
+ if (Entries[I].CommitBase)
+ setMemoryPermission(Entries[I].CommitBase, Entries[I].CommitSize, 0,
+ &Entries[I].Data);
+ QuarantinePos = -1U;
+ }
+
void disable() { Mutex.lock(); }
void enable() { Mutex.unlock(); }
+ void unmapTestOnly() { empty(); }
+
private:
void empty() {
struct {
@@ -196,12 +336,12 @@ private:
{
ScopedLock L(Mutex);
for (uptr I = 0; I < Config::SecondaryCacheEntriesArraySize; I++) {
- if (!Entries[I].Block)
+ if (!Entries[I].CommitBase)
continue;
MapInfo[N].MapBase = reinterpret_cast<void *>(Entries[I].MapBase);
MapInfo[N].MapSize = Entries[I].MapSize;
MapInfo[N].Data = Entries[I].Data;
- Entries[I].Block = 0;
+ Entries[I].CommitBase = 0;
N++;
}
EntriesCount = 0;
@@ -212,59 +352,72 @@ private:
&MapInfo[I].Data);
}
- void releaseOlderThan(u64 Time) {
- ScopedLock L(Mutex);
- if (!EntriesCount)
- return;
- for (uptr I = 0; I < Config::SecondaryCacheEntriesArraySize; I++) {
- if (!Entries[I].Block || !Entries[I].Time || Entries[I].Time > Time)
- continue;
- releasePagesToOS(Entries[I].Block, 0,
- Entries[I].BlockEnd - Entries[I].Block,
- &Entries[I].Data);
- Entries[I].Time = 0;
- }
- }
-
struct CachedBlock {
- uptr Block;
- uptr BlockEnd;
+ uptr CommitBase;
+ uptr CommitSize;
uptr MapBase;
uptr MapSize;
+ uptr BlockBegin;
[[no_unique_address]] MapPlatformData Data;
u64 Time;
};
+ void releaseIfOlderThan(CachedBlock &Entry, u64 Time) {
+ if (!Entry.CommitBase || !Entry.Time)
+ return;
+ if (Entry.Time > Time) {
+ if (OldestTime == 0 || Entry.Time < OldestTime)
+ OldestTime = Entry.Time;
+ return;
+ }
+ releasePagesToOS(Entry.CommitBase, 0, Entry.CommitSize, &Entry.Data);
+ Entry.Time = 0;
+ }
+
+ void releaseOlderThan(u64 Time) {
+ ScopedLock L(Mutex);
+ if (!EntriesCount || OldestTime == 0 || OldestTime > Time)
+ return;
+ OldestTime = 0;
+ for (uptr I = 0; I < Config::SecondaryCacheQuarantineSize; I++)
+ releaseIfOlderThan(Quarantine[I], Time);
+ for (uptr I = 0; I < Config::SecondaryCacheEntriesArraySize; I++)
+ releaseIfOlderThan(Entries[I], Time);
+ }
+
HybridMutex Mutex;
- CachedBlock Entries[Config::SecondaryCacheEntriesArraySize];
- u32 EntriesCount;
- atomic_u32 MaxEntriesCount;
- atomic_uptr MaxEntrySize;
- uptr LargestSize;
- u32 IsFullEvents;
- atomic_s32 ReleaseToOsIntervalMs;
+ u32 EntriesCount = 0;
+ u32 QuarantinePos = 0;
+ atomic_u32 MaxEntriesCount = {};
+ atomic_uptr MaxEntrySize = {};
+ u64 OldestTime = 0;
+ u32 IsFullEvents = 0;
+ atomic_s32 ReleaseToOsIntervalMs = {};
+
+ CachedBlock Entries[Config::SecondaryCacheEntriesArraySize] = {};
+ CachedBlock Quarantine[Config::SecondaryCacheQuarantineSize] = {};
};
template <typename Config> class MapAllocator {
public:
- void initLinkerInitialized(GlobalStats *S, s32 ReleaseToOsInterval = -1) {
- Cache.initLinkerInitialized(ReleaseToOsInterval);
- Stats.initLinkerInitialized();
+ void init(GlobalStats *S, s32 ReleaseToOsInterval = -1) {
+ DCHECK_EQ(AllocatedBytes, 0U);
+ DCHECK_EQ(FreedBytes, 0U);
+ Cache.init(ReleaseToOsInterval);
+ Stats.init();
if (LIKELY(S))
S->link(&Stats);
}
- void init(GlobalStats *S, s32 ReleaseToOsInterval = -1) {
- memset(this, 0, sizeof(*this));
- initLinkerInitialized(S, ReleaseToOsInterval);
- }
- void *allocate(uptr Size, uptr AlignmentHint = 0, uptr *BlockEnd = nullptr,
+ void *allocate(Options Options, uptr Size, uptr AlignmentHint = 0,
+ uptr *BlockEnd = nullptr,
FillContentsMode FillContents = NoFill);
- void deallocate(void *Ptr);
+ void deallocate(Options Options, void *Ptr);
static uptr getBlockEnd(void *Ptr) {
- return LargeBlock::getHeader(Ptr)->BlockEnd;
+ auto *B = LargeBlock::getHeader<Config>(Ptr);
+ return B->CommitBase + B->CommitSize;
}
static uptr getBlockSize(void *Ptr) {
@@ -284,8 +437,12 @@ public:
}
template <typename F> void iterateOverBlocks(F Callback) const {
- for (const auto &H : InUseBlocks)
- Callback(reinterpret_cast<uptr>(&H) + LargeBlock::getHeaderSize());
+ for (const auto &H : InUseBlocks) {
+ uptr Ptr = reinterpret_cast<uptr>(&H) + LargeBlock::getHeaderSize();
+ if (allocatorSupportsMemoryTagging<Config>())
+ Ptr = untagPointer(Ptr);
+ Callback(Ptr);
+ }
}
uptr canCache(uptr Size) { return Cache.canCache(Size); }
@@ -294,16 +451,20 @@ public:
void releaseToOS() { Cache.releaseToOS(); }
+ void disableMemoryTagging() { Cache.disableMemoryTagging(); }
+
+ void unmapTestOnly() { Cache.unmapTestOnly(); }
+
private:
typename Config::SecondaryCache Cache;
HybridMutex Mutex;
DoublyLinkedList<LargeBlock::Header> InUseBlocks;
- uptr AllocatedBytes;
- uptr FreedBytes;
- uptr LargestSize;
- u32 NumberOfAllocs;
- u32 NumberOfFrees;
+ uptr AllocatedBytes = 0;
+ uptr FreedBytes = 0;
+ uptr LargestSize = 0;
+ u32 NumberOfAllocs = 0;
+ u32 NumberOfFrees = 0;
LocalStats Stats;
};
@@ -319,26 +480,36 @@ private:
// the committed memory will amount to something close to Size - AlignmentHint
// (pending rounding and headers).
template <typename Config>
-void *MapAllocator<Config>::allocate(uptr Size, uptr AlignmentHint,
- uptr *BlockEnd,
+void *MapAllocator<Config>::allocate(Options Options, uptr Size, uptr Alignment,
+ uptr *BlockEndPtr,
FillContentsMode FillContents) {
- DCHECK_GE(Size, AlignmentHint);
+ if (Options.get(OptionBit::AddLargeAllocationSlack))
+ Size += 1UL << SCUDO_MIN_ALIGNMENT_LOG;
+ Alignment = Max(Alignment, 1UL << SCUDO_MIN_ALIGNMENT_LOG);
const uptr PageSize = getPageSizeCached();
- const uptr RoundedSize =
- roundUpTo(Size + LargeBlock::getHeaderSize(), PageSize);
-
- if (AlignmentHint < PageSize && Cache.canCache(RoundedSize)) {
+ uptr RoundedSize =
+ roundUpTo(roundUpTo(Size, Alignment) + LargeBlock::getHeaderSize() +
+ Chunk::getHeaderSize(),
+ PageSize);
+ if (Alignment > PageSize)
+ RoundedSize += Alignment - PageSize;
+
+ if (Alignment < PageSize && Cache.canCache(RoundedSize)) {
LargeBlock::Header *H;
bool Zeroed;
- if (Cache.retrieve(RoundedSize, &H, &Zeroed)) {
- if (BlockEnd)
- *BlockEnd = H->BlockEnd;
- void *Ptr = reinterpret_cast<void *>(reinterpret_cast<uptr>(H) +
- LargeBlock::getHeaderSize());
+ if (Cache.retrieve(Options, Size, Alignment, &H, &Zeroed)) {
+ const uptr BlockEnd = H->CommitBase + H->CommitSize;
+ if (BlockEndPtr)
+ *BlockEndPtr = BlockEnd;
+ uptr HInt = reinterpret_cast<uptr>(H);
+ if (allocatorSupportsMemoryTagging<Config>())
+ HInt = untagPointer(HInt);
+ const uptr PtrInt = HInt + LargeBlock::getHeaderSize();
+ void *Ptr = reinterpret_cast<void *>(PtrInt);
if (FillContents && !Zeroed)
memset(Ptr, FillContents == ZeroFill ? 0 : PatternFillByte,
- H->BlockEnd - reinterpret_cast<uptr>(Ptr));
- const uptr BlockSize = H->BlockEnd - reinterpret_cast<uptr>(H);
+ BlockEnd - PtrInt);
+ const uptr BlockSize = BlockEnd - HInt;
{
ScopedLock L(Mutex);
InUseBlocks.push_back(H);
@@ -353,9 +524,8 @@ void *MapAllocator<Config>::allocate(uptr Size, uptr AlignmentHint,
MapPlatformData Data = {};
const uptr MapSize = RoundedSize + 2 * PageSize;
- uptr MapBase =
- reinterpret_cast<uptr>(map(nullptr, MapSize, "scudo:secondary",
- MAP_NOACCESS | MAP_ALLOWNOMEM, &Data));
+ uptr MapBase = reinterpret_cast<uptr>(
+ map(nullptr, MapSize, nullptr, MAP_NOACCESS | MAP_ALLOWNOMEM, &Data));
if (UNLIKELY(!MapBase))
return nullptr;
uptr CommitBase = MapBase + PageSize;
@@ -363,11 +533,11 @@ void *MapAllocator<Config>::allocate(uptr Size, uptr AlignmentHint,
// In the unlikely event of alignments larger than a page, adjust the amount
// of memory we want to commit, and trim the extra memory.
- if (UNLIKELY(AlignmentHint >= PageSize)) {
+ if (UNLIKELY(Alignment >= PageSize)) {
// For alignments greater than or equal to a page, the user pointer (eg: the
// pointer that is returned by the C or C++ allocation APIs) ends up on a
// page boundary , and our headers will live in the preceding page.
- CommitBase = roundUpTo(MapBase + PageSize + 1, AlignmentHint) - PageSize;
+ CommitBase = roundUpTo(MapBase + PageSize + 1, Alignment) - PageSize;
const uptr NewMapBase = CommitBase - PageSize;
DCHECK_GE(NewMapBase, MapBase);
// We only trim the extra memory on 32-bit platforms: 64-bit platforms
@@ -376,9 +546,8 @@ void *MapAllocator<Config>::allocate(uptr Size, uptr AlignmentHint,
unmap(reinterpret_cast<void *>(MapBase), NewMapBase - MapBase, 0, &Data);
MapBase = NewMapBase;
}
- const uptr NewMapEnd = CommitBase + PageSize +
- roundUpTo((Size - AlignmentHint), PageSize) +
- PageSize;
+ const uptr NewMapEnd =
+ CommitBase + PageSize + roundUpTo(Size, PageSize) + PageSize;
DCHECK_LE(NewMapEnd, MapEnd);
if (SCUDO_WORDSIZE == 32U && NewMapEnd != MapEnd) {
unmap(reinterpret_cast<void *>(NewMapEnd), MapEnd - NewMapEnd, 0, &Data);
@@ -387,16 +556,22 @@ void *MapAllocator<Config>::allocate(uptr Size, uptr AlignmentHint,
}
const uptr CommitSize = MapEnd - PageSize - CommitBase;
- const uptr Ptr = reinterpret_cast<uptr>(
- map(reinterpret_cast<void *>(CommitBase), CommitSize, "scudo:secondary",
- MAP_RESIZABLE, &Data));
- LargeBlock::Header *H = reinterpret_cast<LargeBlock::Header *>(Ptr);
+ const uptr AllocPos = roundDownTo(CommitBase + CommitSize - Size, Alignment);
+ mapSecondary<Config>(Options, CommitBase, CommitSize, AllocPos, 0, &Data);
+ const uptr HeaderPos =
+ AllocPos - Chunk::getHeaderSize() - LargeBlock::getHeaderSize();
+ LargeBlock::Header *H = reinterpret_cast<LargeBlock::Header *>(
+ LargeBlock::addHeaderTag<Config>(HeaderPos));
+ if (useMemoryTagging<Config>(Options))
+ storeTags(LargeBlock::addHeaderTag<Config>(CommitBase),
+ reinterpret_cast<uptr>(H + 1));
H->MapBase = MapBase;
H->MapSize = MapEnd - MapBase;
- H->BlockEnd = CommitBase + CommitSize;
+ H->CommitBase = CommitBase;
+ H->CommitSize = CommitSize;
H->Data = Data;
- if (BlockEnd)
- *BlockEnd = CommitBase + CommitSize;
+ if (BlockEndPtr)
+ *BlockEndPtr = CommitBase + CommitSize;
{
ScopedLock L(Mutex);
InUseBlocks.push_back(H);
@@ -407,13 +582,13 @@ void *MapAllocator<Config>::allocate(uptr Size, uptr AlignmentHint,
Stats.add(StatAllocated, CommitSize);
Stats.add(StatMapped, H->MapSize);
}
- return reinterpret_cast<void *>(Ptr + LargeBlock::getHeaderSize());
+ return reinterpret_cast<void *>(HeaderPos + LargeBlock::getHeaderSize());
}
-template <typename Config> void MapAllocator<Config>::deallocate(void *Ptr) {
- LargeBlock::Header *H = LargeBlock::getHeader(Ptr);
- const uptr Block = reinterpret_cast<uptr>(H);
- const uptr CommitSize = H->BlockEnd - Block;
+template <typename Config>
+void MapAllocator<Config>::deallocate(Options Options, void *Ptr) {
+ LargeBlock::Header *H = LargeBlock::getHeader<Config>(Ptr);
+ const uptr CommitSize = H->CommitSize;
{
ScopedLock L(Mutex);
InUseBlocks.remove(H);
@@ -422,12 +597,7 @@ template <typename Config> void MapAllocator<Config>::deallocate(void *Ptr) {
Stats.sub(StatAllocated, CommitSize);
Stats.sub(StatMapped, H->MapSize);
}
- if (Cache.canCache(CommitSize) && Cache.store(H))
- return;
- void *Addr = reinterpret_cast<void *>(H->MapBase);
- const uptr Size = H->MapSize;
- MapPlatformData Data = H->Data;
- unmap(Addr, Size, UNMAP_ALL, &Data);
+ Cache.store(Options, H);
}
template <typename Config>
diff --git a/compiler-rt/lib/scudo/standalone/size_class_map.h b/compiler-rt/lib/scudo/standalone/size_class_map.h
index 5ed8e2845b38..ba0f78453bcb 100644
--- a/compiler-rt/lib/scudo/standalone/size_class_map.h
+++ b/compiler-rt/lib/scudo/standalone/size_class_map.h
@@ -64,12 +64,10 @@ class FixedSizeClassMap : public SizeClassMapBase<Config> {
static const u8 S = Config::NumBits - 1;
static const uptr M = (1UL << S) - 1;
- static const uptr SizeDelta = Chunk::getHeaderSize();
-
public:
static const u32 MaxNumCachedHint = Config::MaxNumCachedHint;
- static const uptr MaxSize = (1UL << Config::MaxSizeLog) + SizeDelta;
+ static const uptr MaxSize = (1UL << Config::MaxSizeLog) + Config::SizeDelta;
static const uptr NumClasses =
MidClass + ((Config::MaxSizeLog - Config::MidSizeLog) << S) + 1;
static_assert(NumClasses <= 256, "");
@@ -79,16 +77,22 @@ public:
static uptr getSizeByClassId(uptr ClassId) {
DCHECK_NE(ClassId, BatchClassId);
if (ClassId <= MidClass)
- return (ClassId << Config::MinSizeLog) + SizeDelta;
+ return (ClassId << Config::MinSizeLog) + Config::SizeDelta;
ClassId -= MidClass;
const uptr T = MidSize << (ClassId >> S);
- return T + (T >> S) * (ClassId & M) + SizeDelta;
+ return T + (T >> S) * (ClassId & M) + Config::SizeDelta;
+ }
+
+ static u8 getSizeLSBByClassId(uptr ClassId) {
+ return u8(getLeastSignificantSetBitIndex(getSizeByClassId(ClassId)));
}
+ static constexpr bool usesCompressedLSBFormat() { return false; }
+
static uptr getClassIdBySize(uptr Size) {
- if (Size <= SizeDelta + (1 << Config::MinSizeLog))
+ if (Size <= Config::SizeDelta + (1 << Config::MinSizeLog))
return 1;
- Size -= SizeDelta;
+ Size -= Config::SizeDelta;
DCHECK_LE(Size, MaxSize);
if (Size <= MidSize)
return (Size + MinSize - 1) >> Config::MinSizeLog;
@@ -137,7 +141,41 @@ class TableSizeClassMap : public SizeClassMapBase<Config> {
u8 Tab[getTableSize()] = {};
};
- static constexpr SizeTable Table = {};
+ static constexpr SizeTable SzTable = {};
+
+ struct LSBTable {
+ constexpr LSBTable() {
+ u8 Min = 255, Max = 0;
+ for (uptr I = 0; I != ClassesSize; ++I) {
+ for (u8 Bit = 0; Bit != 64; ++Bit) {
+ if (Config::Classes[I] & (1 << Bit)) {
+ Tab[I] = Bit;
+ if (Bit < Min)
+ Min = Bit;
+ if (Bit > Max)
+ Max = Bit;
+ break;
+ }
+ }
+ }
+
+ if (Max - Min > 3 || ClassesSize > 32)
+ return;
+
+ UseCompressedFormat = true;
+ CompressedMin = Min;
+ for (uptr I = 0; I != ClassesSize; ++I)
+ CompressedValue |= u64(Tab[I] - Min) << (I * 2);
+ }
+
+ u8 Tab[ClassesSize] = {};
+
+ bool UseCompressedFormat = false;
+ u8 CompressedMin = 0;
+ u64 CompressedValue = 0;
+ };
+
+ static constexpr LSBTable LTable = {};
public:
static const u32 MaxNumCachedHint = Config::MaxNumCachedHint;
@@ -152,6 +190,18 @@ public:
return Config::Classes[ClassId - 1];
}
+ static u8 getSizeLSBByClassId(uptr ClassId) {
+ if (LTable.UseCompressedFormat)
+ return ((LTable.CompressedValue >> ((ClassId - 1) * 2)) & 3) +
+ LTable.CompressedMin;
+ else
+ return LTable.Tab[ClassId - 1];
+ }
+
+ static constexpr bool usesCompressedLSBFormat() {
+ return LTable.UseCompressedFormat;
+ }
+
static uptr getClassIdBySize(uptr Size) {
if (Size <= Config::Classes[0])
return 1;
@@ -159,7 +209,7 @@ public:
DCHECK_LE(Size, MaxSize);
if (Size <= (1 << Config::MidSizeLog))
return ((Size - 1) >> Config::MinSizeLog) + 1;
- return Table.Tab[scaledLog2(Size - 1, Config::MidSizeLog, S)];
+ return SzTable.Tab[scaledLog2(Size - 1, Config::MidSizeLog, S)];
}
static u32 getMaxCachedHint(uptr Size) {
@@ -168,13 +218,37 @@ public:
}
};
+struct DefaultSizeClassConfig {
+ static const uptr NumBits = 3;
+ static const uptr MinSizeLog = 5;
+ static const uptr MidSizeLog = 8;
+ static const uptr MaxSizeLog = 17;
+ static const u32 MaxNumCachedHint = 14;
+ static const uptr MaxBytesCachedLog = 10;
+ static const uptr SizeDelta = 0;
+};
+
+typedef FixedSizeClassMap<DefaultSizeClassConfig> DefaultSizeClassMap;
+
+struct FuchsiaSizeClassConfig {
+ static const uptr NumBits = 3;
+ static const uptr MinSizeLog = 5;
+ static const uptr MidSizeLog = 8;
+ static const uptr MaxSizeLog = 17;
+ static const u32 MaxNumCachedHint = 10;
+ static const uptr MaxBytesCachedLog = 10;
+ static const uptr SizeDelta = Chunk::getHeaderSize();
+};
+
+typedef FixedSizeClassMap<FuchsiaSizeClassConfig> FuchsiaSizeClassMap;
+
struct AndroidSizeClassConfig {
#if SCUDO_WORDSIZE == 64U
static const uptr NumBits = 7;
static const uptr MinSizeLog = 4;
static const uptr MidSizeLog = 6;
static const uptr MaxSizeLog = 16;
- static const u32 MaxNumCachedHint = 14;
+ static const u32 MaxNumCachedHint = 13;
static const uptr MaxBytesCachedLog = 13;
static constexpr u32 Classes[] = {
@@ -208,16 +282,9 @@ struct AndroidSizeClassConfig {
typedef TableSizeClassMap<AndroidSizeClassConfig> AndroidSizeClassMap;
-struct DefaultSizeClassConfig {
- static const uptr NumBits = 3;
- static const uptr MinSizeLog = 5;
- static const uptr MidSizeLog = 8;
- static const uptr MaxSizeLog = 17;
- static const u32 MaxNumCachedHint = 8;
- static const uptr MaxBytesCachedLog = 10;
-};
-
-typedef FixedSizeClassMap<DefaultSizeClassConfig> DefaultSizeClassMap;
+#if SCUDO_WORDSIZE == 64U && defined(__clang__)
+static_assert(AndroidSizeClassMap::usesCompressedLSBFormat(), "");
+#endif
struct SvelteSizeClassConfig {
#if SCUDO_WORDSIZE == 64U
@@ -225,22 +292,38 @@ struct SvelteSizeClassConfig {
static const uptr MinSizeLog = 4;
static const uptr MidSizeLog = 8;
static const uptr MaxSizeLog = 14;
- static const u32 MaxNumCachedHint = 4;
+ static const u32 MaxNumCachedHint = 13;
static const uptr MaxBytesCachedLog = 10;
+ static const uptr SizeDelta = Chunk::getHeaderSize();
#else
static const uptr NumBits = 4;
static const uptr MinSizeLog = 3;
static const uptr MidSizeLog = 7;
static const uptr MaxSizeLog = 14;
- static const u32 MaxNumCachedHint = 5;
+ static const u32 MaxNumCachedHint = 14;
static const uptr MaxBytesCachedLog = 10;
+ static const uptr SizeDelta = Chunk::getHeaderSize();
#endif
};
typedef FixedSizeClassMap<SvelteSizeClassConfig> SvelteSizeClassMap;
+// Trusty is configured to only have one region containing blocks of size
+// 2^7 bytes.
+struct TrustySizeClassConfig {
+ static const uptr NumBits = 1;
+ static const uptr MinSizeLog = 7;
+ static const uptr MidSizeLog = 7;
+ static const uptr MaxSizeLog = 7;
+ static const u32 MaxNumCachedHint = 8;
+ static const uptr MaxBytesCachedLog = 10;
+ static const uptr SizeDelta = 0;
+};
+
+typedef FixedSizeClassMap<TrustySizeClassConfig> TrustySizeClassMap;
+
template <typename SCMap> inline void printMap() {
- ScopedString Buffer(1024);
+ ScopedString Buffer;
uptr PrevS = 0;
uptr TotalCached = 0;
for (uptr I = 0; I < SCMap::NumClasses; I++) {
diff --git a/compiler-rt/lib/scudo/standalone/stack_depot.h b/compiler-rt/lib/scudo/standalone/stack_depot.h
index 7968f7efff7c..458198fcb7aa 100644
--- a/compiler-rt/lib/scudo/standalone/stack_depot.h
+++ b/compiler-rt/lib/scudo/standalone/stack_depot.h
@@ -40,7 +40,7 @@ public:
class StackDepot {
HybridMutex RingEndMu;
- u32 RingEnd;
+ u32 RingEnd = 0;
// This data structure stores a stack trace for each allocation and
// deallocation when stack trace recording is enabled, that may be looked up
@@ -70,7 +70,7 @@ class StackDepot {
#endif
static const uptr TabSize = 1 << TabBits;
static const uptr TabMask = TabSize - 1;
- atomic_u32 Tab[TabSize];
+ atomic_u32 Tab[TabSize] = {};
#ifdef SCUDO_FUZZ
static const uptr RingBits = 4;
@@ -79,7 +79,7 @@ class StackDepot {
#endif
static const uptr RingSize = 1 << RingBits;
static const uptr RingMask = RingSize - 1;
- atomic_u64 Ring[RingSize];
+ atomic_u64 Ring[RingSize] = {};
public:
// Insert hash of the stack trace [Begin, End) into the stack depot, and
diff --git a/compiler-rt/lib/scudo/standalone/stats.h b/compiler-rt/lib/scudo/standalone/stats.h
index d76b904949ea..be5bf2d3720a 100644
--- a/compiler-rt/lib/scudo/standalone/stats.h
+++ b/compiler-rt/lib/scudo/standalone/stats.h
@@ -29,8 +29,10 @@ typedef uptr StatCounters[StatCount];
// LocalStats::add'ing, this is OK, we will still get a meaningful number.
class LocalStats {
public:
- void initLinkerInitialized() {}
- void init() { memset(this, 0, sizeof(*this)); }
+ void init() {
+ for (uptr I = 0; I < StatCount; I++)
+ DCHECK_EQ(get(static_cast<StatType>(I)), 0U);
+ }
void add(StatType I, uptr V) {
V += atomic_load_relaxed(&StatsArray[I]);
@@ -46,23 +48,17 @@ public:
uptr get(StatType I) const { return atomic_load_relaxed(&StatsArray[I]); }
- LocalStats *Next;
- LocalStats *Prev;
+ LocalStats *Next = nullptr;
+ LocalStats *Prev = nullptr;
private:
- atomic_uptr StatsArray[StatCount];
+ atomic_uptr StatsArray[StatCount] = {};
};
// Global stats, used for aggregation and querying.
class GlobalStats : public LocalStats {
public:
- void initLinkerInitialized() {}
- void init() {
- LocalStats::init();
- Mutex.init();
- StatsList = {};
- initLinkerInitialized();
- }
+ void init() { LocalStats::init(); }
void link(LocalStats *S) {
ScopedLock L(Mutex);
@@ -89,8 +85,11 @@ public:
S[I] = static_cast<sptr>(S[I]) >= 0 ? S[I] : 0;
}
- void disable() { Mutex.lock(); }
- void enable() { Mutex.unlock(); }
+ void lock() { Mutex.lock(); }
+ void unlock() { Mutex.unlock(); }
+
+ void disable() { lock(); }
+ void enable() { unlock(); }
private:
mutable HybridMutex Mutex;
diff --git a/compiler-rt/lib/scudo/standalone/string_utils.cpp b/compiler-rt/lib/scudo/standalone/string_utils.cpp
index f304491019b2..acf85889fcff 100644
--- a/compiler-rt/lib/scudo/standalone/string_utils.cpp
+++ b/compiler-rt/lib/scudo/standalone/string_utils.cpp
@@ -115,8 +115,8 @@ static int appendPointer(char **Buffer, const char *BufferEnd, u64 ptr_value) {
return Res;
}
-int formatString(char *Buffer, uptr BufferLength, const char *Format,
- va_list Args) {
+static int formatString(char *Buffer, uptr BufferLength, const char *Format,
+ va_list Args) {
static const char *PrintfFormatsHelp =
"Supported formatString formats: %([0-9]*)?(z|ll)?{d,u,x,X}; %p; "
"%[-]([0-9]*)?(\\.\\*)?s; %c\n";
@@ -210,8 +210,15 @@ int formatString(char *Buffer, uptr BufferLength, const char *Format,
return Res;
}
+int formatString(char *Buffer, uptr BufferLength, const char *Format, ...) {
+ va_list Args;
+ va_start(Args, Format);
+ int Res = formatString(Buffer, BufferLength, Format, Args);
+ va_end(Args);
+ return Res;
+}
+
void ScopedString::append(const char *Format, va_list Args) {
- DCHECK_LT(Length, String.size());
va_list ArgsCopy;
va_copy(ArgsCopy, Args);
// formatString doesn't currently support a null buffer or zero buffer length,
@@ -220,11 +227,13 @@ void ScopedString::append(const char *Format, va_list Args) {
char C[1];
const uptr AdditionalLength =
static_cast<uptr>(formatString(C, sizeof(C), Format, Args)) + 1;
+ const uptr Length = length();
String.resize(Length + AdditionalLength);
- formatString(String.data() + Length, AdditionalLength, Format, ArgsCopy);
+ const uptr FormattedLength = static_cast<uptr>(formatString(
+ String.data() + Length, String.size() - Length, Format, ArgsCopy));
+ RAW_CHECK(data()[length()] == '\0');
+ RAW_CHECK(FormattedLength + 1 == AdditionalLength);
va_end(ArgsCopy);
- Length = strlen(String.data());
- CHECK_LT(Length, String.size());
}
FORMAT(2, 3)
@@ -239,7 +248,7 @@ FORMAT(1, 2)
void Printf(const char *Format, ...) {
va_list Args;
va_start(Args, Format);
- ScopedString Msg(1024);
+ ScopedString Msg;
Msg.append(Format, Args);
outputRaw(Msg.data());
va_end(Args);
diff --git a/compiler-rt/lib/scudo/standalone/string_utils.h b/compiler-rt/lib/scudo/standalone/string_utils.h
index acd60bda9d8d..06d23d42246d 100644
--- a/compiler-rt/lib/scudo/standalone/string_utils.h
+++ b/compiler-rt/lib/scudo/standalone/string_utils.h
@@ -18,14 +18,12 @@ namespace scudo {
class ScopedString {
public:
- explicit ScopedString(uptr MaxLength) : String(MaxLength), Length(0) {
- String[0] = '\0';
- }
- uptr length() { return Length; }
+ explicit ScopedString() { String.push_back('\0'); }
+ uptr length() { return String.size() - 1; }
const char *data() { return String.data(); }
void clear() {
- String[0] = '\0';
- Length = 0;
+ String.clear();
+ String.push_back('\0');
}
void append(const char *Format, va_list Args);
void append(const char *Format, ...);
@@ -33,9 +31,9 @@ public:
private:
Vector<char> String;
- uptr Length;
};
+int formatString(char *Buffer, uptr BufferLength, const char *Format, ...);
void Printf(const char *Format, ...);
} // namespace scudo
diff --git a/compiler-rt/lib/scudo/standalone/trusty.cpp b/compiler-rt/lib/scudo/standalone/trusty.cpp
new file mode 100644
index 000000000000..81d6bc585f09
--- /dev/null
+++ b/compiler-rt/lib/scudo/standalone/trusty.cpp
@@ -0,0 +1,100 @@
+//===-- trusty.cpp ---------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "platform.h"
+
+#if SCUDO_TRUSTY
+
+#include "common.h"
+#include "mutex.h"
+#include "string_utils.h"
+#include "trusty.h"
+
+#include <errno.h> // for errno
+#include <stdio.h> // for printf()
+#include <stdlib.h> // for getenv()
+#include <sys/auxv.h> // for getauxval()
+#include <time.h> // for clock_gettime()
+#include <trusty_syscalls.h> // for _trusty_brk()
+
+#define SBRK_ALIGN 32
+
+namespace scudo {
+
+uptr getPageSize() { return getauxval(AT_PAGESZ); }
+
+void NORETURN die() { abort(); }
+
+void *map(UNUSED void *Addr, uptr Size, UNUSED const char *Name, uptr Flags,
+ UNUSED MapPlatformData *Data) {
+ // Calling _trusty_brk(0) returns the current program break.
+ uptr ProgramBreak = reinterpret_cast<uptr>(_trusty_brk(0));
+ uptr Start;
+ uptr End;
+
+ Start = roundUpTo(ProgramBreak, SBRK_ALIGN);
+ // Don't actually extend the heap if MAP_NOACCESS flag is set since this is
+ // the case where Scudo tries to reserve a memory region without mapping
+ // physical pages.
+ if (Flags & MAP_NOACCESS)
+ return reinterpret_cast<void *>(Start);
+
+ // Attempt to extend the heap by Size bytes using _trusty_brk.
+ End = roundUpTo(Start + Size, SBRK_ALIGN);
+ ProgramBreak =
+ reinterpret_cast<uptr>(_trusty_brk(reinterpret_cast<void *>(End)));
+ if (ProgramBreak < End) {
+ errno = ENOMEM;
+ dieOnMapUnmapError(Size);
+ return nullptr;
+ }
+ return reinterpret_cast<void *>(Start); // Base of new reserved region.
+}
+
+// Unmap is a no-op since Trusty uses sbrk instead of memory mapping.
+void unmap(UNUSED void *Addr, UNUSED uptr Size, UNUSED uptr Flags,
+ UNUSED MapPlatformData *Data) {}
+
+void setMemoryPermission(UNUSED uptr Addr, UNUSED uptr Size, UNUSED uptr Flags,
+ UNUSED MapPlatformData *Data) {}
+
+void releasePagesToOS(UNUSED uptr BaseAddress, UNUSED uptr Offset,
+ UNUSED uptr Size, UNUSED MapPlatformData *Data) {}
+
+const char *getEnv(const char *Name) { return getenv(Name); }
+
+// All mutex operations are a no-op since Trusty doesn't currently support
+// threads.
+bool HybridMutex::tryLock() { return true; }
+
+void HybridMutex::lockSlow() {}
+
+void HybridMutex::unlock() {}
+
+u64 getMonotonicTime() {
+ timespec TS;
+ clock_gettime(CLOCK_MONOTONIC, &TS);
+ return static_cast<u64>(TS.tv_sec) * (1000ULL * 1000 * 1000) +
+ static_cast<u64>(TS.tv_nsec);
+}
+
+u32 getNumberOfCPUs() { return 0; }
+
+u32 getThreadID() { return 0; }
+
+bool getRandom(UNUSED void *Buffer, UNUSED uptr Length, UNUSED bool Blocking) {
+ return false;
+}
+
+void outputRaw(const char *Buffer) { printf("%s", Buffer); }
+
+void setAbortMessage(UNUSED const char *Message) {}
+
+} // namespace scudo
+
+#endif // SCUDO_TRUSTY
diff --git a/compiler-rt/lib/scudo/standalone/trusty.h b/compiler-rt/lib/scudo/standalone/trusty.h
new file mode 100644
index 000000000000..50edd1c6fe63
--- /dev/null
+++ b/compiler-rt/lib/scudo/standalone/trusty.h
@@ -0,0 +1,24 @@
+//===-- trusty.h -----------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_TRUSTY_H_
+#define SCUDO_TRUSTY_H_
+
+#include "platform.h"
+
+#if SCUDO_TRUSTY
+
+namespace scudo {
+// MapPlatformData is unused on Trusty, define it as a minimially sized
+// structure.
+struct MapPlatformData {};
+} // namespace scudo
+
+#endif // SCUDO_TRUSTY
+
+#endif // SCUDO_TRUSTY_H_
diff --git a/compiler-rt/lib/scudo/standalone/tsd.h b/compiler-rt/lib/scudo/standalone/tsd.h
index b3701c63f8a9..b400a3b56da9 100644
--- a/compiler-rt/lib/scudo/standalone/tsd.h
+++ b/compiler-rt/lib/scudo/standalone/tsd.h
@@ -26,16 +26,15 @@ namespace scudo {
template <class Allocator> struct alignas(SCUDO_CACHE_LINE_SIZE) TSD {
typename Allocator::CacheT Cache;
typename Allocator::QuarantineCacheT QuarantineCache;
- u8 DestructorIterations;
+ using ThisT = TSD<Allocator>;
+ u8 DestructorIterations = 0;
- void initLinkerInitialized(Allocator *Instance) {
+ void init(Allocator *Instance) {
+ DCHECK_EQ(DestructorIterations, 0U);
+ DCHECK(isAligned(reinterpret_cast<uptr>(this), alignof(ThisT)));
Instance->initCache(&Cache);
DestructorIterations = PTHREAD_DESTRUCTOR_ITERATIONS;
}
- void init(Allocator *Instance) {
- memset(this, 0, sizeof(*this));
- initLinkerInitialized(Instance);
- }
void commitBack(Allocator *Instance) { Instance->commitBack(this); }
@@ -59,7 +58,7 @@ template <class Allocator> struct alignas(SCUDO_CACHE_LINE_SIZE) TSD {
private:
HybridMutex Mutex;
- atomic_uptr Precedence;
+ atomic_uptr Precedence = {};
};
} // namespace scudo
diff --git a/compiler-rt/lib/scudo/standalone/tsd_exclusive.h b/compiler-rt/lib/scudo/standalone/tsd_exclusive.h
index 1704c8cf80d8..bba0c277c6a7 100644
--- a/compiler-rt/lib/scudo/standalone/tsd_exclusive.h
+++ b/compiler-rt/lib/scudo/standalone/tsd_exclusive.h
@@ -25,18 +25,36 @@ struct ThreadState {
template <class Allocator> void teardownThread(void *Ptr);
template <class Allocator> struct TSDRegistryExT {
- void initLinkerInitialized(Allocator *Instance) {
- Instance->initLinkerInitialized();
+ void init(Allocator *Instance) {
+ DCHECK(!Initialized);
+ Instance->init();
CHECK_EQ(pthread_key_create(&PThreadKey, teardownThread<Allocator>), 0);
- FallbackTSD.initLinkerInitialized(Instance);
+ FallbackTSD.init(Instance);
Initialized = true;
}
- void init(Allocator *Instance) {
- memset(this, 0, sizeof(*this));
- initLinkerInitialized(Instance);
+
+ void initOnceMaybe(Allocator *Instance) {
+ ScopedLock L(Mutex);
+ if (LIKELY(Initialized))
+ return;
+ init(Instance); // Sets Initialized.
}
- void unmapTestOnly() {}
+ void unmapTestOnly(Allocator *Instance) {
+ DCHECK(Instance);
+ if (reinterpret_cast<Allocator *>(pthread_getspecific(PThreadKey))) {
+ DCHECK_EQ(reinterpret_cast<Allocator *>(pthread_getspecific(PThreadKey)),
+ Instance);
+ ThreadTSD.commitBack(Instance);
+ ThreadTSD = {};
+ }
+ CHECK_EQ(pthread_key_delete(PThreadKey), 0);
+ PThreadKey = {};
+ FallbackTSD.commitBack(Instance);
+ FallbackTSD = {};
+ State = {};
+ Initialized = false;
+ }
ALWAYS_INLINE void initThreadMaybe(Allocator *Instance, bool MinimalInit) {
if (LIKELY(State.InitState != ThreadState::NotInitialized))
@@ -80,13 +98,6 @@ template <class Allocator> struct TSDRegistryExT {
bool getDisableMemInit() { return State.DisableMemInit; }
private:
- void initOnceMaybe(Allocator *Instance) {
- ScopedLock L(Mutex);
- if (LIKELY(Initialized))
- return;
- initLinkerInitialized(Instance); // Sets Initialized.
- }
-
// Using minimal initialization allows for global initialization while keeping
// the thread specific structure untouched. The fallback structure will be
// used instead.
@@ -96,14 +107,14 @@ private:
return;
CHECK_EQ(
pthread_setspecific(PThreadKey, reinterpret_cast<void *>(Instance)), 0);
- ThreadTSD.initLinkerInitialized(Instance);
+ ThreadTSD.init(Instance);
State.InitState = ThreadState::Initialized;
Instance->callPostInitCallback();
}
- pthread_key_t PThreadKey;
- bool Initialized;
- atomic_u8 Disabled;
+ pthread_key_t PThreadKey = {};
+ bool Initialized = false;
+ atomic_u8 Disabled = {};
TSD<Allocator> FallbackTSD;
HybridMutex Mutex;
static thread_local ThreadState State;
diff --git a/compiler-rt/lib/scudo/standalone/tsd_shared.h b/compiler-rt/lib/scudo/standalone/tsd_shared.h
index 6a68b3ef5453..1c2a880416b9 100644
--- a/compiler-rt/lib/scudo/standalone/tsd_shared.h
+++ b/compiler-rt/lib/scudo/standalone/tsd_shared.h
@@ -24,21 +24,32 @@ namespace scudo {
template <class Allocator, u32 TSDsArraySize, u32 DefaultTSDCount>
struct TSDRegistrySharedT {
- void initLinkerInitialized(Allocator *Instance) {
- Instance->initLinkerInitialized();
+ void init(Allocator *Instance) {
+ DCHECK(!Initialized);
+ Instance->init();
for (u32 I = 0; I < TSDsArraySize; I++)
- TSDs[I].initLinkerInitialized(Instance);
+ TSDs[I].init(Instance);
const u32 NumberOfCPUs = getNumberOfCPUs();
setNumberOfTSDs((NumberOfCPUs == 0) ? DefaultTSDCount
: Min(NumberOfCPUs, DefaultTSDCount));
Initialized = true;
}
- void init(Allocator *Instance) {
- memset(this, 0, sizeof(*this));
- initLinkerInitialized(Instance);
+
+ void initOnceMaybe(Allocator *Instance) {
+ ScopedLock L(Mutex);
+ if (LIKELY(Initialized))
+ return;
+ init(Instance); // Sets Initialized.
}
- void unmapTestOnly() { setCurrentTSD(nullptr); }
+ void unmapTestOnly(Allocator *Instance) {
+ for (u32 I = 0; I < TSDsArraySize; I++) {
+ TSDs[I].commitBack(Instance);
+ TSDs[I] = {};
+ }
+ setCurrentTSD(nullptr);
+ Initialized = false;
+ }
ALWAYS_INLINE void initThreadMaybe(Allocator *Instance,
UNUSED bool MinimalInit) {
@@ -139,13 +150,6 @@ private:
*getTlsPtr() |= B;
}
- void initOnceMaybe(Allocator *Instance) {
- ScopedLock L(Mutex);
- if (LIKELY(Initialized))
- return;
- initLinkerInitialized(Instance); // Sets Initialized.
- }
-
NOINLINE void initThread(Allocator *Instance) {
initOnceMaybe(Instance);
// Initial context assignment is done in a plain round-robin fashion.
@@ -197,11 +201,11 @@ private:
return CurrentTSD;
}
- atomic_u32 CurrentIndex;
- u32 NumberOfTSDs;
- u32 NumberOfCoPrimes;
- u32 CoPrimes[TSDsArraySize];
- bool Initialized;
+ atomic_u32 CurrentIndex = {};
+ u32 NumberOfTSDs = 0;
+ u32 NumberOfCoPrimes = 0;
+ u32 CoPrimes[TSDsArraySize] = {};
+ bool Initialized = false;
HybridMutex Mutex;
HybridMutex MutexTSDs;
TSD<Allocator> TSDs[TSDsArraySize];
diff --git a/compiler-rt/lib/scudo/standalone/vector.h b/compiler-rt/lib/scudo/standalone/vector.h
index 6ca350a25771..2c9a6e2aa655 100644
--- a/compiler-rt/lib/scudo/standalone/vector.h
+++ b/compiler-rt/lib/scudo/standalone/vector.h
@@ -19,14 +19,13 @@ namespace scudo {
// small vectors. The current implementation supports only POD types.
template <typename T> class VectorNoCtor {
public:
- void init(uptr InitialCapacity) {
- CapacityBytes = 0;
- Size = 0;
- Data = nullptr;
+ void init(uptr InitialCapacity = 0) {
+ Data = reinterpret_cast<T *>(&LocalData[0]);
+ CapacityBytes = sizeof(LocalData);
reserve(InitialCapacity);
}
void destroy() {
- if (Data)
+ if (Data != reinterpret_cast<T *>(&LocalData[0]))
unmap(Data, CapacityBytes);
}
T &operator[](uptr I) {
@@ -82,26 +81,24 @@ private:
void reallocate(uptr NewCapacity) {
DCHECK_GT(NewCapacity, 0);
DCHECK_LE(Size, NewCapacity);
- const uptr NewCapacityBytes =
- roundUpTo(NewCapacity * sizeof(T), getPageSizeCached());
+ NewCapacity = roundUpTo(NewCapacity * sizeof(T), getPageSizeCached());
T *NewData =
- reinterpret_cast<T *>(map(nullptr, NewCapacityBytes, "scudo:vector"));
- if (Data) {
- memcpy(NewData, Data, Size * sizeof(T));
- unmap(Data, CapacityBytes);
- }
+ reinterpret_cast<T *>(map(nullptr, NewCapacity, "scudo:vector"));
+ memcpy(NewData, Data, Size * sizeof(T));
+ destroy();
Data = NewData;
- CapacityBytes = NewCapacityBytes;
+ CapacityBytes = NewCapacity;
}
- T *Data;
- uptr CapacityBytes;
- uptr Size;
+ T *Data = nullptr;
+ u8 LocalData[256] = {};
+ uptr CapacityBytes = 0;
+ uptr Size = 0;
};
template <typename T> class Vector : public VectorNoCtor<T> {
public:
- Vector() { VectorNoCtor<T>::init(1); }
+ Vector() { VectorNoCtor<T>::init(); }
explicit Vector(uptr Count) {
VectorNoCtor<T>::init(Count);
this->resize(Count);
diff --git a/compiler-rt/lib/scudo/standalone/wrappers_c.cpp b/compiler-rt/lib/scudo/standalone/wrappers_c.cpp
index 098cc089a1ca..81c7dd60ee33 100644
--- a/compiler-rt/lib/scudo/standalone/wrappers_c.cpp
+++ b/compiler-rt/lib/scudo/standalone/wrappers_c.cpp
@@ -26,6 +26,7 @@ extern "C" void SCUDO_PREFIX(malloc_postinit)();
// Export the static allocator so that the C++ wrappers can access it.
// Technically we could have a completely separated heap for C & C++ but in
// reality the amount of cross pollination between the two is staggering.
+SCUDO_REQUIRE_CONSTANT_INITIALIZATION
scudo::Allocator<scudo::Config, SCUDO_PREFIX(malloc_postinit)> SCUDO_ALLOCATOR;
#include "wrappers_c.inc"
diff --git a/compiler-rt/lib/scudo/standalone/wrappers_c.inc b/compiler-rt/lib/scudo/standalone/wrappers_c.inc
index 9d640038d8e2..43efb02cb860 100644
--- a/compiler-rt/lib/scudo/standalone/wrappers_c.inc
+++ b/compiler-rt/lib/scudo/standalone/wrappers_c.inc
@@ -260,4 +260,12 @@ SCUDO_PREFIX(malloc_set_pattern_fill_contents)(int pattern_fill_contents) {
pattern_fill_contents ? scudo::PatternOrZeroFill : scudo::NoFill);
}
+// Sets whether scudo adds a small amount of slack at the end of large
+// allocations, before the guard page. This can be enabled to work around buggy
+// applications that read a few bytes past the end of their allocation.
+INTERFACE WEAK void
+SCUDO_PREFIX(malloc_set_add_large_allocation_slack)(int add_slack) {
+ SCUDO_ALLOCATOR.setAddLargeAllocationSlack(add_slack);
+}
+
} // extern "C"
diff --git a/compiler-rt/lib/scudo/standalone/wrappers_c_bionic.cpp b/compiler-rt/lib/scudo/standalone/wrappers_c_bionic.cpp
index 4298e69b5774..18c3bf2c0edf 100644
--- a/compiler-rt/lib/scudo/standalone/wrappers_c_bionic.cpp
+++ b/compiler-rt/lib/scudo/standalone/wrappers_c_bionic.cpp
@@ -23,6 +23,7 @@
#define SCUDO_ALLOCATOR Allocator
extern "C" void SCUDO_PREFIX(malloc_postinit)();
+SCUDO_REQUIRE_CONSTANT_INITIALIZATION
static scudo::Allocator<scudo::AndroidConfig, SCUDO_PREFIX(malloc_postinit)>
SCUDO_ALLOCATOR;
@@ -36,6 +37,7 @@ static scudo::Allocator<scudo::AndroidConfig, SCUDO_PREFIX(malloc_postinit)>
#define SCUDO_ALLOCATOR SvelteAllocator
extern "C" void SCUDO_PREFIX(malloc_postinit)();
+SCUDO_REQUIRE_CONSTANT_INITIALIZATION
static scudo::Allocator<scudo::AndroidSvelteConfig,
SCUDO_PREFIX(malloc_postinit)>
SCUDO_ALLOCATOR;
@@ -48,12 +50,15 @@ static scudo::Allocator<scudo::AndroidSvelteConfig,
// TODO(kostyak): support both allocators.
INTERFACE void __scudo_print_stats(void) { Allocator.printStats(); }
-INTERFACE void __scudo_get_error_info(
- struct scudo_error_info *error_info, uintptr_t fault_addr,
- const char *stack_depot, const char *region_info, const char *memory,
- const char *memory_tags, uintptr_t memory_addr, size_t memory_size) {
+INTERFACE void
+__scudo_get_error_info(struct scudo_error_info *error_info,
+ uintptr_t fault_addr, const char *stack_depot,
+ const char *region_info, const char *ring_buffer,
+ const char *memory, const char *memory_tags,
+ uintptr_t memory_addr, size_t memory_size) {
Allocator.getErrorInfo(error_info, fault_addr, stack_depot, region_info,
- memory, memory_tags, memory_addr, memory_size);
+ ring_buffer, memory, memory_tags, memory_addr,
+ memory_size);
}
INTERFACE const char *__scudo_get_stack_depot_addr() {
@@ -72,4 +77,12 @@ INTERFACE size_t __scudo_get_region_info_size() {
return Allocator.getRegionInfoArraySize();
}
+INTERFACE const char *__scudo_get_ring_buffer_addr() {
+ return Allocator.getRingBufferAddress();
+}
+
+INTERFACE size_t __scudo_get_ring_buffer_size() {
+ return Allocator.getRingBufferSize();
+}
+
#endif // SCUDO_ANDROID && _BIONIC
diff --git a/compiler-rt/lib/tsan/rtl/tsan_clock.cpp b/compiler-rt/lib/tsan/rtl/tsan_clock.cpp
index c91b29cb22b4..61848c21d162 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_clock.cpp
+++ b/compiler-rt/lib/tsan/rtl/tsan_clock.cpp
@@ -80,14 +80,6 @@
// release-store operation by the thread with release_store_tid_ index.
// release_store_reused_ - reuse count of release_store_tid_.
-// We don't have ThreadState in these methods, so this is an ugly hack that
-// works only in C++.
-#if !SANITIZER_GO
-# define CPP_STAT_INC(typ) StatInc(cur_thread(), typ)
-#else
-# define CPP_STAT_INC(typ) (void)0
-#endif
-
namespace __tsan {
static atomic_uint32_t *ref_ptr(ClockBlock *cb) {
@@ -138,19 +130,16 @@ void ThreadClock::ResetCached(ClockCache *c) {
void ThreadClock::acquire(ClockCache *c, SyncClock *src) {
DCHECK_LE(nclk_, kMaxTid);
DCHECK_LE(src->size_, kMaxTid);
- CPP_STAT_INC(StatClockAcquire);
// Check if it's empty -> no need to do anything.
const uptr nclk = src->size_;
- if (nclk == 0) {
- CPP_STAT_INC(StatClockAcquireEmpty);
+ if (nclk == 0)
return;
- }
bool acquired = false;
for (unsigned i = 0; i < kDirtyTids; i++) {
SyncClock::Dirty dirty = src->dirty_[i];
- unsigned tid = dirty.tid;
+ unsigned tid = dirty.tid();
if (tid != kInvalidTid) {
if (clk_[tid] < dirty.epoch) {
clk_[tid] = dirty.epoch;
@@ -162,7 +151,6 @@ void ThreadClock::acquire(ClockCache *c, SyncClock *src) {
// Check if we've already acquired src after the last release operation on src
if (tid_ >= nclk || src->elem(tid_).reused != reused_) {
// O(N) acquire.
- CPP_STAT_INC(StatClockAcquireFull);
nclk_ = max(nclk_, nclk);
u64 *dst_pos = &clk_[0];
for (ClockElem &src_elem : *src) {
@@ -180,7 +168,6 @@ void ThreadClock::acquire(ClockCache *c, SyncClock *src) {
}
if (acquired) {
- CPP_STAT_INC(StatClockAcquiredSomething);
last_acquire_ = clk_[tid_];
ResetCached(c);
}
@@ -223,7 +210,6 @@ void ThreadClock::releaseStoreAcquire(ClockCache *c, SyncClock *sc) {
sc->release_store_reused_ = 0;
if (acquired) {
- CPP_STAT_INC(StatClockAcquiredSomething);
last_acquire_ = clk_[tid_];
ResetCached(c);
}
@@ -240,7 +226,6 @@ void ThreadClock::release(ClockCache *c, SyncClock *dst) {
return;
}
- CPP_STAT_INC(StatClockRelease);
// Check if we need to resize dst.
if (dst->size_ < nclk_)
dst->Resize(c, nclk_);
@@ -257,12 +242,9 @@ void ThreadClock::release(ClockCache *c, SyncClock *dst) {
}
// O(N) release.
- CPP_STAT_INC(StatClockReleaseFull);
dst->Unshare(c);
// First, remember whether we've acquired dst.
bool acquired = IsAlreadyAcquired(dst);
- if (acquired)
- CPP_STAT_INC(StatClockReleaseAcquired);
// Update dst->clk_.
dst->FlushDirty();
uptr i = 0;
@@ -272,8 +254,6 @@ void ThreadClock::release(ClockCache *c, SyncClock *dst) {
i++;
}
// Clear 'acquired' flag in the remaining elements.
- if (nclk_ < dst->size_)
- CPP_STAT_INC(StatClockReleaseClearTail);
dst->release_store_tid_ = kInvalidTid;
dst->release_store_reused_ = 0;
// If we've acquired dst, remember this fact,
@@ -285,7 +265,6 @@ void ThreadClock::release(ClockCache *c, SyncClock *dst) {
void ThreadClock::ReleaseStore(ClockCache *c, SyncClock *dst) {
DCHECK_LE(nclk_, kMaxTid);
DCHECK_LE(dst->size_, kMaxTid);
- CPP_STAT_INC(StatClockStore);
if (dst->size_ == 0 && cached_idx_ != 0) {
// Reuse the cached clock.
@@ -299,10 +278,10 @@ void ThreadClock::ReleaseStore(ClockCache *c, SyncClock *dst) {
dst->tab_idx_ = cached_idx_;
dst->size_ = cached_size_;
dst->blocks_ = cached_blocks_;
- CHECK_EQ(dst->dirty_[0].tid, kInvalidTid);
+ CHECK_EQ(dst->dirty_[0].tid(), kInvalidTid);
// The cached clock is shared (immutable),
// so this is where we store the current clock.
- dst->dirty_[0].tid = tid_;
+ dst->dirty_[0].set_tid(tid_);
dst->dirty_[0].epoch = clk_[tid_];
dst->release_store_tid_ = tid_;
dst->release_store_reused_ = reused_;
@@ -320,13 +299,11 @@ void ThreadClock::ReleaseStore(ClockCache *c, SyncClock *dst) {
if (dst->release_store_tid_ == tid_ &&
dst->release_store_reused_ == reused_ &&
!HasAcquiredAfterRelease(dst)) {
- CPP_STAT_INC(StatClockStoreFast);
UpdateCurrentThread(c, dst);
return;
}
// O(N) release-store.
- CPP_STAT_INC(StatClockStoreFull);
dst->Unshare(c);
// Note: dst can be larger than this ThreadClock.
// This is fine since clk_ beyond size is all zeros.
@@ -336,8 +313,7 @@ void ThreadClock::ReleaseStore(ClockCache *c, SyncClock *dst) {
ce.reused = 0;
i++;
}
- for (uptr i = 0; i < kDirtyTids; i++)
- dst->dirty_[i].tid = kInvalidTid;
+ for (uptr i = 0; i < kDirtyTids; i++) dst->dirty_[i].set_tid(kInvalidTid);
dst->release_store_tid_ = tid_;
dst->release_store_reused_ = reused_;
// Rememeber that we don't need to acquire it in future.
@@ -359,7 +335,6 @@ void ThreadClock::ReleaseStore(ClockCache *c, SyncClock *dst) {
}
void ThreadClock::acq_rel(ClockCache *c, SyncClock *dst) {
- CPP_STAT_INC(StatClockAcquireRelease);
acquire(c, dst);
ReleaseStore(c, dst);
}
@@ -369,10 +344,9 @@ void ThreadClock::UpdateCurrentThread(ClockCache *c, SyncClock *dst) const {
// Update the threads time, but preserve 'acquired' flag.
for (unsigned i = 0; i < kDirtyTids; i++) {
SyncClock::Dirty *dirty = &dst->dirty_[i];
- const unsigned tid = dirty->tid;
+ const unsigned tid = dirty->tid();
if (tid == tid_ || tid == kInvalidTid) {
- CPP_STAT_INC(StatClockReleaseFast);
- dirty->tid = tid_;
+ dirty->set_tid(tid_);
dirty->epoch = clk_[tid_];
return;
}
@@ -380,7 +354,6 @@ void ThreadClock::UpdateCurrentThread(ClockCache *c, SyncClock *dst) const {
// Reset all 'acquired' flags, O(N).
// We are going to touch dst elements, so we need to unshare it.
dst->Unshare(c);
- CPP_STAT_INC(StatClockReleaseSlow);
dst->elem(tid_).epoch = clk_[tid_];
for (uptr i = 0; i < dst->size_; i++)
dst->elem(i).reused = 0;
@@ -393,8 +366,8 @@ bool ThreadClock::IsAlreadyAcquired(const SyncClock *src) const {
return false;
for (unsigned i = 0; i < kDirtyTids; i++) {
SyncClock::Dirty dirty = src->dirty_[i];
- if (dirty.tid != kInvalidTid) {
- if (clk_[dirty.tid] < dirty.epoch)
+ if (dirty.tid() != kInvalidTid) {
+ if (clk_[dirty.tid()] < dirty.epoch)
return false;
}
}
@@ -453,12 +426,10 @@ void SyncClock::ResetImpl() {
blocks_ = 0;
release_store_tid_ = kInvalidTid;
release_store_reused_ = 0;
- for (uptr i = 0; i < kDirtyTids; i++)
- dirty_[i].tid = kInvalidTid;
+ for (uptr i = 0; i < kDirtyTids; i++) dirty_[i].set_tid(kInvalidTid);
}
void SyncClock::Resize(ClockCache *c, uptr nclk) {
- CPP_STAT_INC(StatClockReleaseResize);
Unshare(c);
if (nclk <= capacity()) {
// Memory is already allocated, just increase the size.
@@ -503,10 +474,10 @@ void SyncClock::Resize(ClockCache *c, uptr nclk) {
void SyncClock::FlushDirty() {
for (unsigned i = 0; i < kDirtyTids; i++) {
Dirty *dirty = &dirty_[i];
- if (dirty->tid != kInvalidTid) {
- CHECK_LT(dirty->tid, size_);
- elem(dirty->tid).epoch = dirty->epoch;
- dirty->tid = kInvalidTid;
+ if (dirty->tid() != kInvalidTid) {
+ CHECK_LT(dirty->tid(), size_);
+ elem(dirty->tid()).epoch = dirty->epoch;
+ dirty->set_tid(kInvalidTid);
}
}
}
@@ -559,7 +530,7 @@ ALWAYS_INLINE bool SyncClock::Cachable() const {
if (size_ == 0)
return false;
for (unsigned i = 0; i < kDirtyTids; i++) {
- if (dirty_[i].tid != kInvalidTid)
+ if (dirty_[i].tid() != kInvalidTid)
return false;
}
return atomic_load_relaxed(ref_ptr(tab_)) == 1;
@@ -606,7 +577,7 @@ ALWAYS_INLINE void SyncClock::append_block(u32 idx) {
u64 SyncClock::get(unsigned tid) const {
for (unsigned i = 0; i < kDirtyTids; i++) {
Dirty dirty = dirty_[i];
- if (dirty.tid == tid)
+ if (dirty.tid() == tid)
return dirty.epoch;
}
return elem(tid).epoch;
@@ -625,9 +596,8 @@ void SyncClock::DebugDump(int(*printf)(const char *s, ...)) {
for (uptr i = 0; i < size_; i++)
printf("%s%llu", i == 0 ? "" : ",", elem(i).reused);
printf("] release_store_tid=%d/%d dirty_tids=%d[%llu]/%d[%llu]",
- release_store_tid_, release_store_reused_,
- dirty_[0].tid, dirty_[0].epoch,
- dirty_[1].tid, dirty_[1].epoch);
+ release_store_tid_, release_store_reused_, dirty_[0].tid(),
+ dirty_[0].epoch, dirty_[1].tid(), dirty_[1].epoch);
}
void SyncClock::Iter::Next() {
diff --git a/compiler-rt/lib/tsan/rtl/tsan_clock.h b/compiler-rt/lib/tsan/rtl/tsan_clock.h
index 736cdae06ba2..31376a1bc9e2 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_clock.h
+++ b/compiler-rt/lib/tsan/rtl/tsan_clock.h
@@ -17,7 +17,7 @@
namespace __tsan {
-typedef DenseSlabAlloc<ClockBlock, 1<<16, 1<<10> ClockAlloc;
+typedef DenseSlabAlloc<ClockBlock, 1 << 22, 1 << 10> ClockAlloc;
typedef DenseSlabAllocCache ClockCache;
// The clock that lives in sync variables (mutexes, atomics, etc).
@@ -65,10 +65,20 @@ class SyncClock {
static const uptr kDirtyTids = 2;
struct Dirty {
- u64 epoch : kClkBits;
- u64 tid : 64 - kClkBits; // kInvalidId if not active
+ u32 tid() const { return tid_ == kShortInvalidTid ? kInvalidTid : tid_; }
+ void set_tid(u32 tid) {
+ tid_ = tid == kInvalidTid ? kShortInvalidTid : tid;
+ }
+ u64 epoch : kClkBits;
+
+ private:
+ // Full kInvalidTid won't fit into Dirty::tid.
+ static const u64 kShortInvalidTid = (1ull << (64 - kClkBits)) - 1;
+ u64 tid_ : 64 - kClkBits; // kInvalidId if not active
};
+ static_assert(sizeof(Dirty) == 8, "Dirty is not 64bit");
+
unsigned release_store_tid_;
unsigned release_store_reused_;
Dirty dirty_[kDirtyTids];
diff --git a/compiler-rt/lib/tsan/rtl/tsan_defs.h b/compiler-rt/lib/tsan/rtl/tsan_defs.h
index 293d7deccc31..f2fb7b1a213f 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_defs.h
+++ b/compiler-rt/lib/tsan/rtl/tsan_defs.h
@@ -15,7 +15,7 @@
#include "sanitizer_common/sanitizer_internal_defs.h"
#include "sanitizer_common/sanitizer_libc.h"
-#include "tsan_stat.h"
+#include "sanitizer_common/sanitizer_mutex.h"
#include "ubsan/ubsan_platform.h"
// Setup defaults for compile definitions.
@@ -23,10 +23,6 @@
# define TSAN_NO_HISTORY 0
#endif
-#ifndef TSAN_COLLECT_STATS
-# define TSAN_COLLECT_STATS 0
-#endif
-
#ifndef TSAN_CONTAINS_UBSAN
# if CAN_SANITIZE_UB && !SANITIZER_GO
# define TSAN_CONTAINS_UBSAN 1
@@ -98,8 +94,6 @@ const bool kCollectHistory = false;
const bool kCollectHistory = true;
#endif
-const u16 kInvalidTid = kMaxTid + 1;
-
// The following "build consistency" machinery ensures that all source files
// are built in the same configuration. Inconsistent builds lead to
// hard to debug crashes.
@@ -109,23 +103,12 @@ void build_consistency_debug();
void build_consistency_release();
#endif
-#if TSAN_COLLECT_STATS
-void build_consistency_stats();
-#else
-void build_consistency_nostats();
-#endif
-
static inline void USED build_consistency() {
#if SANITIZER_DEBUG
build_consistency_debug();
#else
build_consistency_release();
#endif
-#if TSAN_COLLECT_STATS
- build_consistency_stats();
-#else
- build_consistency_nostats();
-#endif
}
template<typename T>
@@ -190,6 +173,17 @@ enum ExternalTag : uptr {
// as 16-bit values, see tsan_defs.h.
};
+enum MutexType {
+ MutexTypeTrace = MutexLastCommon,
+ MutexTypeReport,
+ MutexTypeSyncVar,
+ MutexTypeAnnotations,
+ MutexTypeAtExit,
+ MutexTypeFired,
+ MutexTypeRacy,
+ MutexTypeGlobalProc,
+};
+
} // namespace __tsan
#endif // TSAN_DEFS_H
diff --git a/compiler-rt/lib/tsan/rtl/tsan_dense_alloc.h b/compiler-rt/lib/tsan/rtl/tsan_dense_alloc.h
index 64fc50e95c25..68ded43c4f6b 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_dense_alloc.h
+++ b/compiler-rt/lib/tsan/rtl/tsan_dense_alloc.h
@@ -20,7 +20,6 @@
#include "sanitizer_common/sanitizer_common.h"
#include "tsan_defs.h"
-#include "tsan_mutex.h"
namespace __tsan {
@@ -29,28 +28,40 @@ class DenseSlabAllocCache {
typedef u32 IndexT;
uptr pos;
IndexT cache[kSize];
- template<typename T, uptr kL1Size, uptr kL2Size> friend class DenseSlabAlloc;
+ template <typename, uptr, uptr, u64>
+ friend class DenseSlabAlloc;
};
-template<typename T, uptr kL1Size, uptr kL2Size>
+template <typename T, uptr kL1Size, uptr kL2Size, u64 kReserved = 0>
class DenseSlabAlloc {
public:
typedef DenseSlabAllocCache Cache;
typedef typename Cache::IndexT IndexT;
- explicit DenseSlabAlloc(const char *name) {
- // Check that kL1Size and kL2Size are sane.
- CHECK_EQ(kL1Size & (kL1Size - 1), 0);
- CHECK_EQ(kL2Size & (kL2Size - 1), 0);
- CHECK_GE(1ull << (sizeof(IndexT) * 8), kL1Size * kL2Size);
- // Check that it makes sense to use the dense alloc.
- CHECK_GE(sizeof(T), sizeof(IndexT));
- internal_memset(map_, 0, sizeof(map_));
+ static_assert((kL1Size & (kL1Size - 1)) == 0,
+ "kL1Size must be a power-of-two");
+ static_assert((kL2Size & (kL2Size - 1)) == 0,
+ "kL2Size must be a power-of-two");
+ static_assert((kL1Size * kL2Size) <= (1ull << (sizeof(IndexT) * 8)),
+ "kL1Size/kL2Size are too large");
+ static_assert(((kL1Size * kL2Size - 1) & kReserved) == 0,
+ "reserved bits don't fit");
+ static_assert(sizeof(T) > sizeof(IndexT),
+ "it doesn't make sense to use dense alloc");
+
+ explicit DenseSlabAlloc(LinkerInitialized, const char *name) {
freelist_ = 0;
fillpos_ = 0;
name_ = name;
}
+ explicit DenseSlabAlloc(const char *name)
+ : DenseSlabAlloc(LINKER_INITIALIZED, name) {
+ // It can be very large.
+ // Don't page it in for linker initialized objects.
+ internal_memset(map_, 0, sizeof(map_));
+ }
+
~DenseSlabAlloc() {
for (uptr i = 0; i < kL1Size; i++) {
if (map_[i] != 0)
diff --git a/compiler-rt/lib/tsan/rtl/tsan_external.cpp b/compiler-rt/lib/tsan/rtl/tsan_external.cpp
index 466b2bf0f66c..a87e12f2936f 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_external.cpp
+++ b/compiler-rt/lib/tsan/rtl/tsan_external.cpp
@@ -111,12 +111,12 @@ void __tsan_external_assign_tag(void *addr, void *tag) {
SANITIZER_INTERFACE_ATTRIBUTE
void __tsan_external_read(void *addr, void *caller_pc, void *tag) {
- ExternalAccess(addr, STRIP_PC(caller_pc), tag, MemoryRead);
+ ExternalAccess(addr, STRIP_PAC_PC(caller_pc), tag, MemoryRead);
}
SANITIZER_INTERFACE_ATTRIBUTE
void __tsan_external_write(void *addr, void *caller_pc, void *tag) {
- ExternalAccess(addr, STRIP_PC(caller_pc), tag, MemoryWrite);
+ ExternalAccess(addr, STRIP_PAC_PC(caller_pc), tag, MemoryWrite);
}
} // extern "C"
diff --git a/compiler-rt/lib/tsan/rtl/tsan_interceptors.h b/compiler-rt/lib/tsan/rtl/tsan_interceptors.h
index 29576ea2d49a..c5716f53a323 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_interceptors.h
+++ b/compiler-rt/lib/tsan/rtl/tsan_interceptors.h
@@ -30,14 +30,14 @@ inline bool in_symbolizer() {
} // namespace __tsan
-#define SCOPED_INTERCEPTOR_RAW(func, ...) \
- cur_thread_init(); \
- ThreadState *thr = cur_thread(); \
- const uptr caller_pc = GET_CALLER_PC(); \
- ScopedInterceptor si(thr, #func, caller_pc); \
- const uptr pc = StackTrace::GetCurrentPc(); \
- (void)pc; \
-/**/
+#define SCOPED_INTERCEPTOR_RAW(func, ...) \
+ cur_thread_init(); \
+ ThreadState *thr = cur_thread(); \
+ const uptr caller_pc = GET_CALLER_PC(); \
+ ScopedInterceptor si(thr, #func, caller_pc); \
+ const uptr pc = GET_CURRENT_PC(); \
+ (void)pc; \
+ /**/
#define SCOPED_TSAN_INTERCEPTOR(func, ...) \
SCOPED_INTERCEPTOR_RAW(func, __VA_ARGS__); \
diff --git a/compiler-rt/lib/tsan/rtl/tsan_interceptors_mac.cpp b/compiler-rt/lib/tsan/rtl/tsan_interceptors_mac.cpp
index ed10fccc980a..2d400c7e7098 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_interceptors_mac.cpp
+++ b/compiler-rt/lib/tsan/rtl/tsan_interceptors_mac.cpp
@@ -44,8 +44,9 @@ namespace __tsan {
// actually aliases of each other, and we cannot have different interceptors for
// them, because they're actually the same function. Thus, we have to stay
// conservative and treat the non-barrier versions as mo_acq_rel.
-static const morder kMacOrderBarrier = mo_acq_rel;
-static const morder kMacOrderNonBarrier = mo_acq_rel;
+static constexpr morder kMacOrderBarrier = mo_acq_rel;
+static constexpr morder kMacOrderNonBarrier = mo_acq_rel;
+static constexpr morder kMacFailureOrder = mo_relaxed;
#define OSATOMIC_INTERCEPTOR(return_t, t, tsan_t, f, tsan_atomic_f, mo) \
TSAN_INTERCEPTOR(return_t, f, t x, volatile t *ptr) { \
@@ -110,7 +111,7 @@ OSATOMIC_INTERCEPTORS_BITWISE(OSAtomicXor, fetch_xor,
SCOPED_TSAN_INTERCEPTOR(f, old_value, new_value, ptr); \
return tsan_atomic_f##_compare_exchange_strong( \
(volatile tsan_t *)ptr, (tsan_t *)&old_value, (tsan_t)new_value, \
- kMacOrderNonBarrier, kMacOrderNonBarrier); \
+ kMacOrderNonBarrier, kMacFailureOrder); \
} \
\
TSAN_INTERCEPTOR(bool, f##Barrier, t old_value, t new_value, \
@@ -118,7 +119,7 @@ OSATOMIC_INTERCEPTORS_BITWISE(OSAtomicXor, fetch_xor,
SCOPED_TSAN_INTERCEPTOR(f##Barrier, old_value, new_value, ptr); \
return tsan_atomic_f##_compare_exchange_strong( \
(volatile tsan_t *)ptr, (tsan_t *)&old_value, (tsan_t)new_value, \
- kMacOrderBarrier, kMacOrderNonBarrier); \
+ kMacOrderBarrier, kMacFailureOrder); \
}
OSATOMIC_INTERCEPTORS_CAS(OSAtomicCompareAndSwapInt, __tsan_atomic32, a32, int)
diff --git a/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp b/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp
index 6c49ccd6dd5b..dd2442842795 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp
+++ b/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp
@@ -71,7 +71,8 @@ struct ucontext_t {
};
#endif
-#if defined(__x86_64__) || defined(__mips__) || SANITIZER_PPC64V1
+#if defined(__x86_64__) || defined(__mips__) || SANITIZER_PPC64V1 || \
+ defined(__s390x__)
#define PTHREAD_ABI_BASE "GLIBC_2.3.2"
#elif defined(__aarch64__) || SANITIZER_PPC64V2
#define PTHREAD_ABI_BASE "GLIBC_2.17"
@@ -81,6 +82,8 @@ extern "C" int pthread_attr_init(void *attr);
extern "C" int pthread_attr_destroy(void *attr);
DECLARE_REAL(int, pthread_attr_getdetachstate, void *, void *)
extern "C" int pthread_attr_setstacksize(void *attr, uptr stacksize);
+extern "C" int pthread_atfork(void (*prepare)(void), void (*parent)(void),
+ void (*child)(void));
extern "C" int pthread_key_create(unsigned *key, void (*destructor)(void* v));
extern "C" int pthread_setspecific(unsigned key, const void *v);
DECLARE_REAL(int, pthread_mutexattr_gettype, void *, void *)
@@ -193,12 +196,10 @@ struct InterceptorContext {
unsigned finalize_key;
#endif
- BlockingMutex atexit_mu;
+ Mutex atexit_mu;
Vector<struct AtExitCtx *> AtExitStack;
- InterceptorContext()
- : libignore(LINKER_INITIALIZED), AtExitStack() {
- }
+ InterceptorContext() : libignore(LINKER_INITIALIZED), atexit_mu(MutexTypeAtExit), AtExitStack() {}
};
static ALIGNED(64) char interceptor_placeholder[sizeof(InterceptorContext)];
@@ -264,7 +265,7 @@ ScopedInterceptor::~ScopedInterceptor() {
if (!thr_->ignore_interceptors) {
ProcessPendingSignals(thr_);
FuncExit(thr_);
- CheckNoLocks(thr_);
+ CheckedMutex::CheckNoLocks();
}
}
@@ -374,7 +375,7 @@ static void at_exit_wrapper() {
AtExitCtx *ctx;
{
// Ensure thread-safety.
- BlockingMutexLock l(&interceptor_ctx()->atexit_mu);
+ Lock l(&interceptor_ctx()->atexit_mu);
// Pop AtExitCtx from the top of the stack of callback functions
uptr element = interceptor_ctx()->AtExitStack.Size() - 1;
@@ -430,7 +431,10 @@ static int setup_at_exit_wrapper(ThreadState *thr, uptr pc, void(*f)(),
// Store ctx in a local stack-like structure
// Ensure thread-safety.
- BlockingMutexLock l(&interceptor_ctx()->atexit_mu);
+ Lock l(&interceptor_ctx()->atexit_mu);
+ // __cxa_atexit calls calloc. If we don't ignore interceptors, we will fail
+ // due to atexit_mu held on exit from the calloc interceptor.
+ ScopedIgnoreInterceptors ignore;
res = REAL(__cxa_atexit)((void (*)(void *a))at_exit_wrapper, 0, 0);
// Push AtExitCtx on the top of the stack of callback functions
@@ -655,8 +659,11 @@ TSAN_INTERCEPTOR(void*, malloc, uptr size) {
return p;
}
+// In glibc<2.25, dynamic TLS blocks are allocated by __libc_memalign. Intercept
+// __libc_memalign so that (1) we can detect races (2) free will not be called
+// on libc internally allocated blocks.
TSAN_INTERCEPTOR(void*, __libc_memalign, uptr align, uptr sz) {
- SCOPED_TSAN_INTERCEPTOR(__libc_memalign, align, sz);
+ SCOPED_INTERCEPTOR_RAW(__libc_memalign, align, sz);
return user_memalign(thr, pc, align, sz);
}
@@ -769,6 +776,11 @@ static void *mmap_interceptor(ThreadState *thr, uptr pc, Mmap real_mmap,
if (!fix_mmap_addr(&addr, sz, flags)) return MAP_FAILED;
void *res = real_mmap(addr, sz, prot, flags, fd, off);
if (res != MAP_FAILED) {
+ if (!IsAppMem((uptr)res) || !IsAppMem((uptr)res + sz - 1)) {
+ Report("ThreadSanitizer: mmap at bad address: addr=%p size=%p res=%p\n",
+ addr, (void*)sz, res);
+ Die();
+ }
if (fd > 0) FdAccess(thr, pc, fd);
MemoryRangeImitateWriteOrResetRange(thr, pc, (uptr)res, sz);
}
@@ -1968,7 +1980,8 @@ static void CallUserSignalHandler(ThreadState *thr, bool sync, bool acquire,
// because in async signal processing case (when handler is called directly
// from rtl_generic_sighandler) we have not yet received the reraised
// signal; and it looks too fragile to intercept all ways to reraise a signal.
- if (flags()->report_bugs && !sync && sig != SIGTERM && errno != 99) {
+ if (ShouldReport(thr, ReportTypeErrnoInSignal) && !sync && sig != SIGTERM &&
+ errno != 99) {
VarSizeStackTrace stack;
// StackTrace::GetNestInstructionPc(pc) is used because return address is
// expected, OutputReport() will undo this.
@@ -2138,26 +2151,32 @@ TSAN_INTERCEPTOR(int, fork, int fake) {
if (in_symbolizer())
return REAL(fork)(fake);
SCOPED_INTERCEPTOR_RAW(fork, fake);
+ return REAL(fork)(fake);
+}
+
+void atfork_prepare() {
+ if (in_symbolizer())
+ return;
+ ThreadState *thr = cur_thread();
+ const uptr pc = StackTrace::GetCurrentPc();
ForkBefore(thr, pc);
- int pid;
- {
- // On OS X, REAL(fork) can call intercepted functions (OSSpinLockLock), and
- // we'll assert in CheckNoLocks() unless we ignore interceptors.
- ScopedIgnoreInterceptors ignore;
- pid = REAL(fork)(fake);
- }
- if (pid == 0) {
- // child
- ForkChildAfter(thr, pc);
- FdOnFork(thr, pc);
- } else if (pid > 0) {
- // parent
- ForkParentAfter(thr, pc);
- } else {
- // error
- ForkParentAfter(thr, pc);
- }
- return pid;
+}
+
+void atfork_parent() {
+ if (in_symbolizer())
+ return;
+ ThreadState *thr = cur_thread();
+ const uptr pc = StackTrace::GetCurrentPc();
+ ForkParentAfter(thr, pc);
+}
+
+void atfork_child() {
+ if (in_symbolizer())
+ return;
+ ThreadState *thr = cur_thread();
+ const uptr pc = StackTrace::GetCurrentPc();
+ ForkChildAfter(thr, pc);
+ FdOnFork(thr, pc);
}
TSAN_INTERCEPTOR(int, vfork, int fake) {
@@ -2253,11 +2272,14 @@ static void HandleRecvmsg(ThreadState *thr, uptr pc,
#define NEED_TLS_GET_ADDR
#endif
#undef SANITIZER_INTERCEPT_TLS_GET_ADDR
+#define SANITIZER_INTERCEPT_TLS_GET_OFFSET 1
#undef SANITIZER_INTERCEPT_PTHREAD_SIGMASK
#define COMMON_INTERCEPT_FUNCTION(name) INTERCEPT_FUNCTION(name)
#define COMMON_INTERCEPT_FUNCTION_VER(name, ver) \
INTERCEPT_FUNCTION_VER(name, ver)
+#define COMMON_INTERCEPT_FUNCTION_VER_UNVERSIONED_FALLBACK(name, ver) \
+ (INTERCEPT_FUNCTION_VER(name, ver) || INTERCEPT_FUNCTION(name))
#define COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ptr, size) \
MemoryAccessRange(((TsanInterceptorContext *)ctx)->thr, \
@@ -2401,6 +2423,10 @@ int sigaction_impl(int sig, const __sanitizer_sigaction *act,
// the signal handler through rtl_sigaction, very bad things will happen.
// The handler will run synchronously and corrupt tsan per-thread state.
SCOPED_INTERCEPTOR_RAW(sigaction, sig, act, old);
+ if (sig <= 0 || sig >= kSigCount) {
+ errno = errno_EINVAL;
+ return -1;
+ }
__sanitizer_sigaction *sigactions = interceptor_ctx()->sigactions;
__sanitizer_sigaction old_stored;
if (old) internal_memcpy(&old_stored, &sigactions[sig], sizeof(old_stored));
@@ -2508,13 +2534,10 @@ static USED void syscall_fd_release(uptr pc, int fd) {
FdRelease(thr, pc, fd);
}
-static void syscall_pre_fork(uptr pc) {
- TSAN_SYSCALL();
- ForkBefore(thr, pc);
-}
+static void syscall_pre_fork(uptr pc) { ForkBefore(cur_thread(), pc); }
static void syscall_post_fork(uptr pc, int pid) {
- TSAN_SYSCALL();
+ ThreadState *thr = cur_thread();
if (pid == 0) {
// child
ForkChildAfter(thr, pc);
@@ -2569,6 +2592,20 @@ static void syscall_post_fork(uptr pc, int pid) {
#include "sanitizer_common/sanitizer_syscalls_netbsd.inc"
#ifdef NEED_TLS_GET_ADDR
+
+static void handle_tls_addr(void *arg, void *res) {
+ ThreadState *thr = cur_thread();
+ if (!thr)
+ return;
+ DTLS::DTV *dtv = DTLS_on_tls_get_addr(arg, res, thr->tls_addr,
+ thr->tls_addr + thr->tls_size);
+ if (!dtv)
+ return;
+ // New DTLS block has been allocated.
+ MemoryResetRange(thr, 0, dtv->beg, dtv->size);
+}
+
+#if !SANITIZER_S390
// Define own interceptor instead of sanitizer_common's for three reasons:
// 1. It must not process pending signals.
// Signal handlers may contain MOVDQA instruction (see below).
@@ -2581,18 +2618,18 @@ static void syscall_post_fork(uptr pc, int pid) {
// execute MOVDQA with stack addresses.
TSAN_INTERCEPTOR(void *, __tls_get_addr, void *arg) {
void *res = REAL(__tls_get_addr)(arg);
- ThreadState *thr = cur_thread();
- if (!thr)
- return res;
- DTLS::DTV *dtv = DTLS_on_tls_get_addr(arg, res, thr->tls_addr,
- thr->tls_addr + thr->tls_size);
- if (!dtv)
- return res;
- // New DTLS block has been allocated.
- MemoryResetRange(thr, 0, dtv->beg, dtv->size);
+ handle_tls_addr(arg, res);
+ return res;
+}
+#else // SANITIZER_S390
+TSAN_INTERCEPTOR(uptr, __tls_get_addr_internal, void *arg) {
+ uptr res = __tls_get_offset_wrapper(arg, REAL(__tls_get_offset));
+ char *tp = static_cast<char *>(__builtin_thread_pointer());
+ handle_tls_addr(arg, res + tp);
return res;
}
#endif
+#endif
#if SANITIZER_NETBSD
TSAN_INTERCEPTOR(void, _lwp_exit) {
@@ -2814,7 +2851,12 @@ void InitializeInterceptors() {
TSAN_INTERCEPT(_exit);
#ifdef NEED_TLS_GET_ADDR
+#if !SANITIZER_S390
TSAN_INTERCEPT(__tls_get_addr);
+#else
+ TSAN_INTERCEPT(__tls_get_addr_internal);
+ TSAN_INTERCEPT(__tls_get_offset);
+#endif
#endif
TSAN_MAYBE_INTERCEPT__LWP_EXIT;
@@ -2830,6 +2872,10 @@ void InitializeInterceptors() {
Printf("ThreadSanitizer: failed to setup atexit callback\n");
Die();
}
+ if (pthread_atfork(atfork_prepare, atfork_parent, atfork_child)) {
+ Printf("ThreadSanitizer: failed to setup atfork callbacks\n");
+ Die();
+ }
#if !SANITIZER_MAC && !SANITIZER_NETBSD && !SANITIZER_FREEBSD
if (pthread_key_create(&interceptor_ctx()->finalize_key, &thread_finalize)) {
diff --git a/compiler-rt/lib/tsan/rtl/tsan_interface.cpp b/compiler-rt/lib/tsan/rtl/tsan_interface.cpp
index 55f1c9834f70..9bd0e8580b17 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_interface.cpp
+++ b/compiler-rt/lib/tsan/rtl/tsan_interface.cpp
@@ -40,13 +40,13 @@ void __tsan_write16(void *addr) {
}
void __tsan_read16_pc(void *addr, void *pc) {
- MemoryRead(cur_thread(), STRIP_PC(pc), (uptr)addr, kSizeLog8);
- MemoryRead(cur_thread(), STRIP_PC(pc), (uptr)addr + 8, kSizeLog8);
+ MemoryRead(cur_thread(), STRIP_PAC_PC(pc), (uptr)addr, kSizeLog8);
+ MemoryRead(cur_thread(), STRIP_PAC_PC(pc), (uptr)addr + 8, kSizeLog8);
}
void __tsan_write16_pc(void *addr, void *pc) {
- MemoryWrite(cur_thread(), STRIP_PC(pc), (uptr)addr, kSizeLog8);
- MemoryWrite(cur_thread(), STRIP_PC(pc), (uptr)addr + 8, kSizeLog8);
+ MemoryWrite(cur_thread(), STRIP_PAC_PC(pc), (uptr)addr, kSizeLog8);
+ MemoryWrite(cur_thread(), STRIP_PAC_PC(pc), (uptr)addr + 8, kSizeLog8);
}
// __tsan_unaligned_read/write calls are emitted by compiler.
diff --git a/compiler-rt/lib/tsan/rtl/tsan_interface.h b/compiler-rt/lib/tsan/rtl/tsan_interface.h
index 6d7286ca5b8a..124aa2fd2143 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_interface.h
+++ b/compiler-rt/lib/tsan/rtl/tsan_interface.h
@@ -196,7 +196,8 @@ typedef unsigned short a16;
typedef unsigned int a32;
typedef unsigned long long a64;
#if !SANITIZER_GO && (defined(__SIZEOF_INT128__) \
- || (__clang_major__ * 100 + __clang_minor__ >= 302)) && !defined(__mips64)
+ || (__clang_major__ * 100 + __clang_minor__ >= 302)) && \
+ !defined(__mips64) && !defined(__s390x__)
__extension__ typedef __int128 a128;
# define __TSAN_HAS_INT128 1
#else
@@ -204,7 +205,7 @@ __extension__ typedef __int128 a128;
#endif
// Part of ABI, do not change.
-// https://github.com/llvm/llvm-project/blob/master/libcxx/include/atomic
+// https://github.com/llvm/llvm-project/blob/main/libcxx/include/atomic
typedef enum {
mo_relaxed,
mo_consume,
@@ -415,6 +416,13 @@ void __tsan_go_atomic32_compare_exchange(ThreadState *thr, uptr cpc, uptr pc,
SANITIZER_INTERFACE_ATTRIBUTE
void __tsan_go_atomic64_compare_exchange(ThreadState *thr, uptr cpc, uptr pc,
u8 *a);
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_on_initialize();
+
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_on_finalize(int failed);
+
} // extern "C"
} // namespace __tsan
diff --git a/compiler-rt/lib/tsan/rtl/tsan_interface_ann.cpp b/compiler-rt/lib/tsan/rtl/tsan_interface_ann.cpp
index 99516d94bba3..47314f5ad812 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_interface_ann.cpp
+++ b/compiler-rt/lib/tsan/rtl/tsan_interface_ann.cpp
@@ -15,7 +15,6 @@
#include "sanitizer_common/sanitizer_stacktrace.h"
#include "sanitizer_common/sanitizer_vector.h"
#include "tsan_interface_ann.h"
-#include "tsan_mutex.h"
#include "tsan_report.h"
#include "tsan_rtl.h"
#include "tsan_mman.h"
@@ -38,7 +37,7 @@ class ScopedAnnotation {
~ScopedAnnotation() {
FuncExit(thr_);
- CheckNoLocks(thr_);
+ CheckedMutex::CheckNoLocks();
}
private:
ThreadState *const thr_;
@@ -49,8 +48,6 @@ class ScopedAnnotation {
return ret; \
ThreadState *thr = cur_thread(); \
const uptr caller_pc = (uptr)__builtin_return_address(0); \
- StatInc(thr, StatAnnotation); \
- StatInc(thr, Stat##typ); \
ScopedAnnotation sa(thr, __func__, caller_pc); \
const uptr pc = StackTrace::GetCurrentPc(); \
(void)pc; \
@@ -77,9 +74,7 @@ struct DynamicAnnContext {
ExpectRace expect;
ExpectRace benign;
- DynamicAnnContext()
- : mtx(MutexTypeAnnotations, StatMtxAnnotations) {
- }
+ DynamicAnnContext() : mtx(MutexTypeAnnotations) {}
};
static DynamicAnnContext *dyn_ann_ctx;
diff --git a/compiler-rt/lib/tsan/rtl/tsan_interface_atomic.cpp b/compiler-rt/lib/tsan/rtl/tsan_interface_atomic.cpp
index 3f459aff532c..89bb75394553 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_interface_atomic.cpp
+++ b/compiler-rt/lib/tsan/rtl/tsan_interface_atomic.cpp
@@ -218,8 +218,9 @@ static a128 NoTsanAtomicLoad(const volatile a128 *a, morder mo) {
}
#endif
-template<typename T>
-static T AtomicLoad(ThreadState *thr, uptr pc, const volatile T *a, morder mo) {
+template <typename T>
+static T AtomicLoad(ThreadState *thr, uptr pc, const volatile T *a,
+ morder mo) NO_THREAD_SAFETY_ANALYSIS {
CHECK(IsLoadOrder(mo));
// This fast-path is critical for performance.
// Assume the access is atomic.
@@ -254,9 +255,9 @@ static void NoTsanAtomicStore(volatile a128 *a, a128 v, morder mo) {
}
#endif
-template<typename T>
+template <typename T>
static void AtomicStore(ThreadState *thr, uptr pc, volatile T *a, T v,
- morder mo) {
+ morder mo) NO_THREAD_SAFETY_ANALYSIS {
CHECK(IsStoreOrder(mo));
MemoryWriteAtomic(thr, pc, (uptr)a, SizeLog<T>());
// This fast-path is critical for performance.
@@ -277,8 +278,9 @@ static void AtomicStore(ThreadState *thr, uptr pc, volatile T *a, T v,
s->mtx.Unlock();
}
-template<typename T, T (*F)(volatile T *v, T op)>
-static T AtomicRMW(ThreadState *thr, uptr pc, volatile T *a, T v, morder mo) {
+template <typename T, T (*F)(volatile T *v, T op)>
+static T AtomicRMW(ThreadState *thr, uptr pc, volatile T *a, T v,
+ morder mo) NO_THREAD_SAFETY_ANALYSIS {
MemoryWriteAtomic(thr, pc, (uptr)a, SizeLog<T>());
SyncVar *s = 0;
if (mo != mo_relaxed) {
@@ -399,37 +401,48 @@ static T NoTsanAtomicCAS(volatile T *a, T c, T v, morder mo, morder fmo) {
return c;
}
-template<typename T>
-static bool AtomicCAS(ThreadState *thr, uptr pc,
- volatile T *a, T *c, T v, morder mo, morder fmo) {
- (void)fmo; // Unused because llvm does not pass it yet.
+template <typename T>
+static bool AtomicCAS(ThreadState *thr, uptr pc, volatile T *a, T *c, T v, morder mo,
+ morder fmo) NO_THREAD_SAFETY_ANALYSIS {
+ // 31.7.2.18: "The failure argument shall not be memory_order_release
+ // nor memory_order_acq_rel". LLVM (2021-05) fallbacks to Monotonic
+ // (mo_relaxed) when those are used.
+ CHECK(IsLoadOrder(fmo));
+
MemoryWriteAtomic(thr, pc, (uptr)a, SizeLog<T>());
SyncVar *s = 0;
- bool write_lock = mo != mo_acquire && mo != mo_consume;
- if (mo != mo_relaxed) {
+ bool write_lock = IsReleaseOrder(mo);
+
+ if (mo != mo_relaxed || fmo != mo_relaxed)
s = ctx->metamap.GetOrCreateAndLock(thr, pc, (uptr)a, write_lock);
+
+ T cc = *c;
+ T pr = func_cas(a, cc, v);
+ bool success = pr == cc;
+ if (!success) {
+ *c = pr;
+ mo = fmo;
+ }
+
+ if (s) {
thr->fast_state.IncrementEpoch();
// Can't increment epoch w/o writing to the trace as well.
TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
- if (IsAcqRelOrder(mo))
+
+ if (success && IsAcqRelOrder(mo))
AcquireReleaseImpl(thr, pc, &s->clock);
- else if (IsReleaseOrder(mo))
+ else if (success && IsReleaseOrder(mo))
ReleaseImpl(thr, pc, &s->clock);
else if (IsAcquireOrder(mo))
AcquireImpl(thr, pc, &s->clock);
- }
- T cc = *c;
- T pr = func_cas(a, cc, v);
- if (s) {
+
if (write_lock)
s->mtx.Unlock();
else
s->mtx.ReadUnlock();
}
- if (pr == cc)
- return true;
- *c = pr;
- return false;
+
+ return success;
}
template<typename T>
@@ -481,7 +494,6 @@ static morder convert_morder(morder mo) {
const uptr callpc = (uptr)__builtin_return_address(0); \
uptr pc = StackTrace::GetCurrentPc(); \
mo = convert_morder(mo); \
- AtomicStatInc(thr, sizeof(*a), mo, StatAtomic##func); \
ScopedAtomic sa(thr, callpc, a, mo, __func__); \
return Atomic##func(thr, pc, __VA_ARGS__); \
/**/
@@ -502,22 +514,6 @@ class ScopedAtomic {
ThreadState *thr_;
};
-static void AtomicStatInc(ThreadState *thr, uptr size, morder mo, StatType t) {
- StatInc(thr, StatAtomic);
- StatInc(thr, t);
- StatInc(thr, size == 1 ? StatAtomic1
- : size == 2 ? StatAtomic2
- : size == 4 ? StatAtomic4
- : size == 8 ? StatAtomic8
- : StatAtomic16);
- StatInc(thr, mo == mo_relaxed ? StatAtomicRelaxed
- : mo == mo_consume ? StatAtomicConsume
- : mo == mo_acquire ? StatAtomicAcquire
- : mo == mo_release ? StatAtomicRelease
- : mo == mo_acq_rel ? StatAtomicAcq_Rel
- : StatAtomicSeq_Cst);
-}
-
extern "C" {
SANITIZER_INTERFACE_ATTRIBUTE
a8 __tsan_atomic8_load(const volatile a8 *a, morder mo) {
diff --git a/compiler-rt/lib/tsan/rtl/tsan_interface_inl.h b/compiler-rt/lib/tsan/rtl/tsan_interface_inl.h
index f5d743c10772..5e77d4d3d288 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_interface_inl.h
+++ b/compiler-rt/lib/tsan/rtl/tsan_interface_inl.h
@@ -51,35 +51,35 @@ void __tsan_write8(void *addr) {
}
void __tsan_read1_pc(void *addr, void *pc) {
- MemoryRead(cur_thread(), STRIP_PC(pc), (uptr)addr, kSizeLog1);
+ MemoryRead(cur_thread(), STRIP_PAC_PC(pc), (uptr)addr, kSizeLog1);
}
void __tsan_read2_pc(void *addr, void *pc) {
- MemoryRead(cur_thread(), STRIP_PC(pc), (uptr)addr, kSizeLog2);
+ MemoryRead(cur_thread(), STRIP_PAC_PC(pc), (uptr)addr, kSizeLog2);
}
void __tsan_read4_pc(void *addr, void *pc) {
- MemoryRead(cur_thread(), STRIP_PC(pc), (uptr)addr, kSizeLog4);
+ MemoryRead(cur_thread(), STRIP_PAC_PC(pc), (uptr)addr, kSizeLog4);
}
void __tsan_read8_pc(void *addr, void *pc) {
- MemoryRead(cur_thread(), STRIP_PC(pc), (uptr)addr, kSizeLog8);
+ MemoryRead(cur_thread(), STRIP_PAC_PC(pc), (uptr)addr, kSizeLog8);
}
void __tsan_write1_pc(void *addr, void *pc) {
- MemoryWrite(cur_thread(), STRIP_PC(pc), (uptr)addr, kSizeLog1);
+ MemoryWrite(cur_thread(), STRIP_PAC_PC(pc), (uptr)addr, kSizeLog1);
}
void __tsan_write2_pc(void *addr, void *pc) {
- MemoryWrite(cur_thread(), STRIP_PC(pc), (uptr)addr, kSizeLog2);
+ MemoryWrite(cur_thread(), STRIP_PAC_PC(pc), (uptr)addr, kSizeLog2);
}
void __tsan_write4_pc(void *addr, void *pc) {
- MemoryWrite(cur_thread(), STRIP_PC(pc), (uptr)addr, kSizeLog4);
+ MemoryWrite(cur_thread(), STRIP_PAC_PC(pc), (uptr)addr, kSizeLog4);
}
void __tsan_write8_pc(void *addr, void *pc) {
- MemoryWrite(cur_thread(), STRIP_PC(pc), (uptr)addr, kSizeLog8);
+ MemoryWrite(cur_thread(), STRIP_PAC_PC(pc), (uptr)addr, kSizeLog8);
}
void __tsan_vptr_update(void **vptr_p, void *new_val) {
@@ -101,7 +101,7 @@ void __tsan_vptr_read(void **vptr_p) {
}
void __tsan_func_entry(void *pc) {
- FuncEntry(cur_thread(), STRIP_PC(pc));
+ FuncEntry(cur_thread(), STRIP_PAC_PC(pc));
}
void __tsan_func_exit() {
@@ -125,9 +125,9 @@ void __tsan_write_range(void *addr, uptr size) {
}
void __tsan_read_range_pc(void *addr, uptr size, void *pc) {
- MemoryAccessRange(cur_thread(), STRIP_PC(pc), (uptr)addr, size, false);
+ MemoryAccessRange(cur_thread(), STRIP_PAC_PC(pc), (uptr)addr, size, false);
}
void __tsan_write_range_pc(void *addr, uptr size, void *pc) {
- MemoryAccessRange(cur_thread(), STRIP_PC(pc), (uptr)addr, size, true);
+ MemoryAccessRange(cur_thread(), STRIP_PAC_PC(pc), (uptr)addr, size, true);
}
diff --git a/compiler-rt/lib/tsan/rtl/tsan_interface_java.cpp b/compiler-rt/lib/tsan/rtl/tsan_interface_java.cpp
index 081c6ff1022e..6aa8a7b1d6a7 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_interface_java.cpp
+++ b/compiler-rt/lib/tsan/rtl/tsan_interface_java.cpp
@@ -12,7 +12,6 @@
#include "tsan_interface_java.h"
#include "tsan_rtl.h"
-#include "tsan_mutex.h"
#include "sanitizer_common/sanitizer_internal_defs.h"
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_placement_new.h"
diff --git a/compiler-rt/lib/tsan/rtl/tsan_mman.cpp b/compiler-rt/lib/tsan/rtl/tsan_mman.cpp
index 743e67bf2f7d..7765bc070522 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_mman.cpp
+++ b/compiler-rt/lib/tsan/rtl/tsan_mman.cpp
@@ -70,10 +70,7 @@ struct GlobalProc {
Mutex mtx;
Processor *proc;
- GlobalProc()
- : mtx(MutexTypeGlobalProc, StatMtxGlobalProc)
- , proc(ProcCreate()) {
- }
+ GlobalProc() : mtx(MutexTypeGlobalProc), proc(ProcCreate()) {}
};
static char global_proc_placeholder[sizeof(GlobalProc)] ALIGNED(64);
@@ -145,7 +142,7 @@ void AllocatorPrintStats() {
static void SignalUnsafeCall(ThreadState *thr, uptr pc) {
if (atomic_load_relaxed(&thr->in_signal_handler) == 0 ||
- !flags()->report_signal_unsafe)
+ !ShouldReport(thr, ReportTypeSignalUnsafe))
return;
VarSizeStackTrace stack;
ObtainCurrentStack(thr, pc, &stack);
diff --git a/compiler-rt/lib/tsan/rtl/tsan_mutex.cpp b/compiler-rt/lib/tsan/rtl/tsan_mutex.cpp
deleted file mode 100644
index 7a0918f2a2c0..000000000000
--- a/compiler-rt/lib/tsan/rtl/tsan_mutex.cpp
+++ /dev/null
@@ -1,289 +0,0 @@
-//===-- tsan_mutex.cpp ----------------------------------------------------===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-//
-// This file is a part of ThreadSanitizer (TSan), a race detector.
-//
-//===----------------------------------------------------------------------===//
-#include "sanitizer_common/sanitizer_libc.h"
-#include "tsan_mutex.h"
-#include "tsan_platform.h"
-#include "tsan_rtl.h"
-
-namespace __tsan {
-
-// Simple reader-writer spin-mutex. Optimized for not-so-contended case.
-// Readers have preference, can possibly starvate writers.
-
-// The table fixes what mutexes can be locked under what mutexes.
-// E.g. if the row for MutexTypeThreads contains MutexTypeReport,
-// then Report mutex can be locked while under Threads mutex.
-// The leaf mutexes can be locked under any other mutexes.
-// Recursive locking is not supported.
-#if SANITIZER_DEBUG && !SANITIZER_GO
-const MutexType MutexTypeLeaf = (MutexType)-1;
-static MutexType CanLockTab[MutexTypeCount][MutexTypeCount] = {
- /*0 MutexTypeInvalid*/ {},
- /*1 MutexTypeTrace*/ {MutexTypeLeaf},
- /*2 MutexTypeThreads*/ {MutexTypeReport},
- /*3 MutexTypeReport*/ {MutexTypeSyncVar,
- MutexTypeMBlock, MutexTypeJavaMBlock},
- /*4 MutexTypeSyncVar*/ {MutexTypeDDetector},
- /*5 MutexTypeSyncTab*/ {}, // unused
- /*6 MutexTypeSlab*/ {MutexTypeLeaf},
- /*7 MutexTypeAnnotations*/ {},
- /*8 MutexTypeAtExit*/ {MutexTypeSyncVar},
- /*9 MutexTypeMBlock*/ {MutexTypeSyncVar},
- /*10 MutexTypeJavaMBlock*/ {MutexTypeSyncVar},
- /*11 MutexTypeDDetector*/ {},
- /*12 MutexTypeFired*/ {MutexTypeLeaf},
- /*13 MutexTypeRacy*/ {MutexTypeLeaf},
- /*14 MutexTypeGlobalProc*/ {},
-};
-
-static bool CanLockAdj[MutexTypeCount][MutexTypeCount];
-#endif
-
-void InitializeMutex() {
-#if SANITIZER_DEBUG && !SANITIZER_GO
- // Build the "can lock" adjacency matrix.
- // If [i][j]==true, then one can lock mutex j while under mutex i.
- const int N = MutexTypeCount;
- int cnt[N] = {};
- bool leaf[N] = {};
- for (int i = 1; i < N; i++) {
- for (int j = 0; j < N; j++) {
- MutexType z = CanLockTab[i][j];
- if (z == MutexTypeInvalid)
- continue;
- if (z == MutexTypeLeaf) {
- CHECK(!leaf[i]);
- leaf[i] = true;
- continue;
- }
- CHECK(!CanLockAdj[i][(int)z]);
- CanLockAdj[i][(int)z] = true;
- cnt[i]++;
- }
- }
- for (int i = 0; i < N; i++) {
- CHECK(!leaf[i] || cnt[i] == 0);
- }
- // Add leaf mutexes.
- for (int i = 0; i < N; i++) {
- if (!leaf[i])
- continue;
- for (int j = 0; j < N; j++) {
- if (i == j || leaf[j] || j == MutexTypeInvalid)
- continue;
- CHECK(!CanLockAdj[j][i]);
- CanLockAdj[j][i] = true;
- }
- }
- // Build the transitive closure.
- bool CanLockAdj2[MutexTypeCount][MutexTypeCount];
- for (int i = 0; i < N; i++) {
- for (int j = 0; j < N; j++) {
- CanLockAdj2[i][j] = CanLockAdj[i][j];
- }
- }
- for (int k = 0; k < N; k++) {
- for (int i = 0; i < N; i++) {
- for (int j = 0; j < N; j++) {
- if (CanLockAdj2[i][k] && CanLockAdj2[k][j]) {
- CanLockAdj2[i][j] = true;
- }
- }
- }
- }
-#if 0
- Printf("Can lock graph:\n");
- for (int i = 0; i < N; i++) {
- for (int j = 0; j < N; j++) {
- Printf("%d ", CanLockAdj[i][j]);
- }
- Printf("\n");
- }
- Printf("Can lock graph closure:\n");
- for (int i = 0; i < N; i++) {
- for (int j = 0; j < N; j++) {
- Printf("%d ", CanLockAdj2[i][j]);
- }
- Printf("\n");
- }
-#endif
- // Verify that the graph is acyclic.
- for (int i = 0; i < N; i++) {
- if (CanLockAdj2[i][i]) {
- Printf("Mutex %d participates in a cycle\n", i);
- Die();
- }
- }
-#endif
-}
-
-InternalDeadlockDetector::InternalDeadlockDetector() {
- // Rely on zero initialization because some mutexes can be locked before ctor.
-}
-
-#if SANITIZER_DEBUG && !SANITIZER_GO
-void InternalDeadlockDetector::Lock(MutexType t) {
- // Printf("LOCK %d @%zu\n", t, seq_ + 1);
- CHECK_GT(t, MutexTypeInvalid);
- CHECK_LT(t, MutexTypeCount);
- u64 max_seq = 0;
- u64 max_idx = MutexTypeInvalid;
- for (int i = 0; i != MutexTypeCount; i++) {
- if (locked_[i] == 0)
- continue;
- CHECK_NE(locked_[i], max_seq);
- if (max_seq < locked_[i]) {
- max_seq = locked_[i];
- max_idx = i;
- }
- }
- locked_[t] = ++seq_;
- if (max_idx == MutexTypeInvalid)
- return;
- // Printf(" last %d @%zu\n", max_idx, max_seq);
- if (!CanLockAdj[max_idx][t]) {
- Printf("ThreadSanitizer: internal deadlock detected\n");
- Printf("ThreadSanitizer: can't lock %d while under %zu\n",
- t, (uptr)max_idx);
- CHECK(0);
- }
-}
-
-void InternalDeadlockDetector::Unlock(MutexType t) {
- // Printf("UNLO %d @%zu #%zu\n", t, seq_, locked_[t]);
- CHECK(locked_[t]);
- locked_[t] = 0;
-}
-
-void InternalDeadlockDetector::CheckNoLocks() {
- for (int i = 0; i != MutexTypeCount; i++) {
- CHECK_EQ(locked_[i], 0);
- }
-}
-#endif
-
-void CheckNoLocks(ThreadState *thr) {
-#if SANITIZER_DEBUG && !SANITIZER_GO
- thr->internal_deadlock_detector.CheckNoLocks();
-#endif
-}
-
-const uptr kUnlocked = 0;
-const uptr kWriteLock = 1;
-const uptr kReadLock = 2;
-
-class Backoff {
- public:
- Backoff()
- : iter_() {
- }
-
- bool Do() {
- if (iter_++ < kActiveSpinIters)
- proc_yield(kActiveSpinCnt);
- else
- internal_sched_yield();
- return true;
- }
-
- u64 Contention() const {
- u64 active = iter_ % kActiveSpinIters;
- u64 passive = iter_ - active;
- return active + 10 * passive;
- }
-
- private:
- int iter_;
- static const int kActiveSpinIters = 10;
- static const int kActiveSpinCnt = 20;
-};
-
-Mutex::Mutex(MutexType type, StatType stat_type) {
- CHECK_GT(type, MutexTypeInvalid);
- CHECK_LT(type, MutexTypeCount);
-#if SANITIZER_DEBUG
- type_ = type;
-#endif
-#if TSAN_COLLECT_STATS
- stat_type_ = stat_type;
-#endif
- atomic_store(&state_, kUnlocked, memory_order_relaxed);
-}
-
-Mutex::~Mutex() {
- CHECK_EQ(atomic_load(&state_, memory_order_relaxed), kUnlocked);
-}
-
-void Mutex::Lock() {
-#if SANITIZER_DEBUG && !SANITIZER_GO
- cur_thread()->internal_deadlock_detector.Lock(type_);
-#endif
- uptr cmp = kUnlocked;
- if (atomic_compare_exchange_strong(&state_, &cmp, kWriteLock,
- memory_order_acquire))
- return;
- for (Backoff backoff; backoff.Do();) {
- if (atomic_load(&state_, memory_order_relaxed) == kUnlocked) {
- cmp = kUnlocked;
- if (atomic_compare_exchange_weak(&state_, &cmp, kWriteLock,
- memory_order_acquire)) {
-#if TSAN_COLLECT_STATS && !SANITIZER_GO
- StatInc(cur_thread(), stat_type_, backoff.Contention());
-#endif
- return;
- }
- }
- }
-}
-
-void Mutex::Unlock() {
- uptr prev = atomic_fetch_sub(&state_, kWriteLock, memory_order_release);
- (void)prev;
- DCHECK_NE(prev & kWriteLock, 0);
-#if SANITIZER_DEBUG && !SANITIZER_GO
- cur_thread()->internal_deadlock_detector.Unlock(type_);
-#endif
-}
-
-void Mutex::ReadLock() {
-#if SANITIZER_DEBUG && !SANITIZER_GO
- cur_thread()->internal_deadlock_detector.Lock(type_);
-#endif
- uptr prev = atomic_fetch_add(&state_, kReadLock, memory_order_acquire);
- if ((prev & kWriteLock) == 0)
- return;
- for (Backoff backoff; backoff.Do();) {
- prev = atomic_load(&state_, memory_order_acquire);
- if ((prev & kWriteLock) == 0) {
-#if TSAN_COLLECT_STATS && !SANITIZER_GO
- StatInc(cur_thread(), stat_type_, backoff.Contention());
-#endif
- return;
- }
- }
-}
-
-void Mutex::ReadUnlock() {
- uptr prev = atomic_fetch_sub(&state_, kReadLock, memory_order_release);
- (void)prev;
- DCHECK_EQ(prev & kWriteLock, 0);
- DCHECK_GT(prev & ~kWriteLock, 0);
-#if SANITIZER_DEBUG && !SANITIZER_GO
- cur_thread()->internal_deadlock_detector.Unlock(type_);
-#endif
-}
-
-void Mutex::CheckLocked() {
- CHECK_NE(atomic_load(&state_, memory_order_relaxed), 0);
-}
-
-} // namespace __tsan
diff --git a/compiler-rt/lib/tsan/rtl/tsan_mutex.h b/compiler-rt/lib/tsan/rtl/tsan_mutex.h
deleted file mode 100644
index 80fdc6ed57bb..000000000000
--- a/compiler-rt/lib/tsan/rtl/tsan_mutex.h
+++ /dev/null
@@ -1,90 +0,0 @@
-//===-- tsan_mutex.h --------------------------------------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-//
-// This file is a part of ThreadSanitizer (TSan), a race detector.
-//
-//===----------------------------------------------------------------------===//
-#ifndef TSAN_MUTEX_H
-#define TSAN_MUTEX_H
-
-#include "sanitizer_common/sanitizer_atomic.h"
-#include "sanitizer_common/sanitizer_mutex.h"
-#include "tsan_defs.h"
-
-namespace __tsan {
-
-enum MutexType {
- MutexTypeInvalid,
- MutexTypeTrace,
- MutexTypeThreads,
- MutexTypeReport,
- MutexTypeSyncVar,
- MutexTypeSyncTab,
- MutexTypeSlab,
- MutexTypeAnnotations,
- MutexTypeAtExit,
- MutexTypeMBlock,
- MutexTypeJavaMBlock,
- MutexTypeDDetector,
- MutexTypeFired,
- MutexTypeRacy,
- MutexTypeGlobalProc,
-
- // This must be the last.
- MutexTypeCount
-};
-
-class Mutex {
- public:
- explicit Mutex(MutexType type, StatType stat_type);
- ~Mutex();
-
- void Lock();
- void Unlock();
-
- void ReadLock();
- void ReadUnlock();
-
- void CheckLocked();
-
- private:
- atomic_uintptr_t state_;
-#if SANITIZER_DEBUG
- MutexType type_;
-#endif
-#if TSAN_COLLECT_STATS
- StatType stat_type_;
-#endif
-
- Mutex(const Mutex&);
- void operator = (const Mutex&);
-};
-
-typedef GenericScopedLock<Mutex> Lock;
-typedef GenericScopedReadLock<Mutex> ReadLock;
-
-class InternalDeadlockDetector {
- public:
- InternalDeadlockDetector();
- void Lock(MutexType t);
- void Unlock(MutexType t);
- void CheckNoLocks();
- private:
- u64 seq_;
- u64 locked_[MutexTypeCount];
-};
-
-void InitializeMutex();
-
-// Checks that the current thread does not hold any runtime locks
-// (e.g. when returning from an interceptor).
-void CheckNoLocks(ThreadState *thr);
-
-} // namespace __tsan
-
-#endif // TSAN_MUTEX_H
diff --git a/compiler-rt/lib/tsan/rtl/tsan_platform.h b/compiler-rt/lib/tsan/rtl/tsan_platform.h
index 81d345dea756..8bd218e25fd6 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_platform.h
+++ b/compiler-rt/lib/tsan/rtl/tsan_platform.h
@@ -23,9 +23,21 @@
namespace __tsan {
+#if defined(__x86_64__)
+#define HAS_48_BIT_ADDRESS_SPACE 1
+#elif SANITIZER_IOSSIM // arm64 iOS simulators (order of #if matters)
+#define HAS_48_BIT_ADDRESS_SPACE 1
+#elif SANITIZER_IOS // arm64 iOS devices (order of #if matters)
+#define HAS_48_BIT_ADDRESS_SPACE 0
+#elif SANITIZER_MAC // arm64 macOS (order of #if matters)
+#define HAS_48_BIT_ADDRESS_SPACE 1
+#else
+#define HAS_48_BIT_ADDRESS_SPACE 0
+#endif
+
#if !SANITIZER_GO
-#if defined(__x86_64__)
+#if HAS_48_BIT_ADDRESS_SPACE
/*
C/C++ on linux/x86_64 and freebsd/x86_64
0000 0000 1000 - 0080 0000 0000: main binary and/or MAP_32BIT mappings (512GB)
@@ -93,7 +105,7 @@ fe00 0000 00 - ff00 0000 00: heap (4 GB)
ff00 0000 00 - ff80 0000 00: - (2 GB)
ff80 0000 00 - ffff ffff ff: modules and main thread stack (<2 GB)
*/
-struct Mapping {
+struct Mapping40 {
static const uptr kMetaShadowBeg = 0x4000000000ull;
static const uptr kMetaShadowEnd = 0x5000000000ull;
static const uptr kTraceMemBeg = 0xb000000000ull;
@@ -114,6 +126,7 @@ struct Mapping {
};
#define TSAN_MID_APP_RANGE 1
+#define TSAN_RUNTIME_VMA 1
#elif defined(__aarch64__) && defined(__APPLE__)
/*
C/C++ on Darwin/iOS/ARM64 (36-bit VMA, 64 GB VM)
@@ -146,7 +159,7 @@ struct Mapping {
static const uptr kVdsoBeg = 0x7000000000000000ull;
};
-#elif defined(__aarch64__)
+#elif defined(__aarch64__) && !defined(__APPLE__)
// AArch64 supports multiple VMA which leads to multiple address transformation
// functions. To support these multiple VMAS transformations and mappings TSAN
// runtime for AArch64 uses an external memory read (vmaSize) to select which
@@ -352,9 +365,41 @@ struct Mapping47 {
// Indicates the runtime will define the memory regions at runtime.
#define TSAN_RUNTIME_VMA 1
+#elif defined(__s390x__)
+/*
+C/C++ on linux/s390x
+While the kernel provides a 64-bit address space, we have to restrict ourselves
+to 48 bits due to how e.g. SyncVar::GetId() works.
+0000 0000 1000 - 0e00 0000 0000: binary, modules, stacks - 14 TiB
+0e00 0000 0000 - 4000 0000 0000: -
+4000 0000 0000 - 8000 0000 0000: shadow - 64TiB (4 * app)
+8000 0000 0000 - 9000 0000 0000: -
+9000 0000 0000 - 9800 0000 0000: metainfo - 8TiB (0.5 * app)
+9800 0000 0000 - a000 0000 0000: -
+a000 0000 0000 - b000 0000 0000: traces - 16TiB (max history * 128k threads)
+b000 0000 0000 - be00 0000 0000: -
+be00 0000 0000 - c000 0000 0000: heap - 2TiB (max supported by the allocator)
+*/
+struct Mapping {
+ static const uptr kMetaShadowBeg = 0x900000000000ull;
+ static const uptr kMetaShadowEnd = 0x980000000000ull;
+ static const uptr kTraceMemBeg = 0xa00000000000ull;
+ static const uptr kTraceMemEnd = 0xb00000000000ull;
+ static const uptr kShadowBeg = 0x400000000000ull;
+ static const uptr kShadowEnd = 0x800000000000ull;
+ static const uptr kHeapMemBeg = 0xbe0000000000ull;
+ static const uptr kHeapMemEnd = 0xc00000000000ull;
+ static const uptr kLoAppMemBeg = 0x000000001000ull;
+ static const uptr kLoAppMemEnd = 0x0e0000000000ull;
+ static const uptr kHiAppMemBeg = 0xc00000004000ull;
+ static const uptr kHiAppMemEnd = 0xc00000004000ull;
+ static const uptr kAppMemMsk = 0xb00000000000ull;
+ static const uptr kAppMemXor = 0x100000000000ull;
+ static const uptr kVdsoBeg = 0xfffffffff000ull;
+};
#endif
-#elif SANITIZER_GO && !SANITIZER_WINDOWS && defined(__x86_64__)
+#elif SANITIZER_GO && !SANITIZER_WINDOWS && HAS_48_BIT_ADDRESS_SPACE
/* Go on linux, darwin and freebsd on x86_64
0000 0000 1000 - 0000 1000 0000: executable
@@ -502,7 +547,7 @@ Go on linux/mips64 (47-bit VMA)
6000 0000 0000 - 6200 0000 0000: traces
6200 0000 0000 - 8000 0000 0000: -
*/
-struct Mapping {
+struct Mapping47 {
static const uptr kMetaShadowBeg = 0x300000000000ull;
static const uptr kMetaShadowEnd = 0x400000000000ull;
static const uptr kTraceMemBeg = 0x600000000000ull;
@@ -512,6 +557,31 @@ struct Mapping {
static const uptr kAppMemBeg = 0x000000001000ull;
static const uptr kAppMemEnd = 0x00e000000000ull;
};
+
+#define TSAN_RUNTIME_VMA 1
+
+#elif SANITIZER_GO && defined(__s390x__)
+/*
+Go on linux/s390x
+0000 0000 1000 - 1000 0000 0000: executable and heap - 16 TiB
+1000 0000 0000 - 4000 0000 0000: -
+4000 0000 0000 - 8000 0000 0000: shadow - 64TiB (4 * app)
+8000 0000 0000 - 9000 0000 0000: -
+9000 0000 0000 - 9800 0000 0000: metainfo - 8TiB (0.5 * app)
+9800 0000 0000 - a000 0000 0000: -
+a000 0000 0000 - b000 0000 0000: traces - 16TiB (max history * 128k threads)
+*/
+struct Mapping {
+ static const uptr kMetaShadowBeg = 0x900000000000ull;
+ static const uptr kMetaShadowEnd = 0x980000000000ull;
+ static const uptr kTraceMemBeg = 0xa00000000000ull;
+ static const uptr kTraceMemEnd = 0xb00000000000ull;
+ static const uptr kShadowBeg = 0x400000000000ull;
+ static const uptr kShadowEnd = 0x800000000000ull;
+ static const uptr kAppMemBeg = 0x000000001000ull;
+ static const uptr kAppMemEnd = 0x100000000000ull;
+};
+
#else
# error "Unknown platform"
#endif
@@ -592,6 +662,16 @@ uptr MappingArchImpl(void) {
}
DCHECK(0);
return 0;
+#elif defined(__mips64)
+ switch (vmaSize) {
+#if !SANITIZER_GO
+ case 40: return MappingImpl<Mapping40, Type>();
+#else
+ case 47: return MappingImpl<Mapping47, Type>();
+#endif
+ }
+ DCHECK(0);
+ return 0;
#else
return MappingImpl<Mapping, Type>();
#endif
@@ -749,6 +829,16 @@ bool IsAppMem(uptr mem) {
}
DCHECK(0);
return false;
+#elif defined(__mips64)
+ switch (vmaSize) {
+#if !SANITIZER_GO
+ case 40: return IsAppMemImpl<Mapping40>(mem);
+#else
+ case 47: return IsAppMemImpl<Mapping47>(mem);
+#endif
+ }
+ DCHECK(0);
+ return false;
#else
return IsAppMemImpl<Mapping>(mem);
#endif
@@ -780,6 +870,16 @@ bool IsShadowMem(uptr mem) {
}
DCHECK(0);
return false;
+#elif defined(__mips64)
+ switch (vmaSize) {
+#if !SANITIZER_GO
+ case 40: return IsShadowMemImpl<Mapping40>(mem);
+#else
+ case 47: return IsShadowMemImpl<Mapping47>(mem);
+#endif
+ }
+ DCHECK(0);
+ return false;
#else
return IsShadowMemImpl<Mapping>(mem);
#endif
@@ -811,6 +911,16 @@ bool IsMetaMem(uptr mem) {
}
DCHECK(0);
return false;
+#elif defined(__mips64)
+ switch (vmaSize) {
+#if !SANITIZER_GO
+ case 40: return IsMetaMemImpl<Mapping40>(mem);
+#else
+ case 47: return IsMetaMemImpl<Mapping47>(mem);
+#endif
+ }
+ DCHECK(0);
+ return false;
#else
return IsMetaMemImpl<Mapping>(mem);
#endif
@@ -852,6 +962,16 @@ uptr MemToShadow(uptr x) {
}
DCHECK(0);
return 0;
+#elif defined(__mips64)
+ switch (vmaSize) {
+#if !SANITIZER_GO
+ case 40: return MemToShadowImpl<Mapping40>(x);
+#else
+ case 47: return MemToShadowImpl<Mapping47>(x);
+#endif
+ }
+ DCHECK(0);
+ return 0;
#else
return MemToShadowImpl<Mapping>(x);
#endif
@@ -895,6 +1015,16 @@ u32 *MemToMeta(uptr x) {
}
DCHECK(0);
return 0;
+#elif defined(__mips64)
+ switch (vmaSize) {
+#if !SANITIZER_GO
+ case 40: return MemToMetaImpl<Mapping40>(x);
+#else
+ case 47: return MemToMetaImpl<Mapping47>(x);
+#endif
+ }
+ DCHECK(0);
+ return 0;
#else
return MemToMetaImpl<Mapping>(x);
#endif
@@ -951,6 +1081,16 @@ uptr ShadowToMem(uptr s) {
}
DCHECK(0);
return 0;
+#elif defined(__mips64)
+ switch (vmaSize) {
+#if !SANITIZER_GO
+ case 40: return ShadowToMemImpl<Mapping40>(s);
+#else
+ case 47: return ShadowToMemImpl<Mapping47>(s);
+#endif
+ }
+ DCHECK(0);
+ return 0;
#else
return ShadowToMemImpl<Mapping>(s);
#endif
@@ -990,6 +1130,16 @@ uptr GetThreadTrace(int tid) {
}
DCHECK(0);
return 0;
+#elif defined(__mips64)
+ switch (vmaSize) {
+#if !SANITIZER_GO
+ case 40: return GetThreadTraceImpl<Mapping40>(tid);
+#else
+ case 47: return GetThreadTraceImpl<Mapping47>(tid);
+#endif
+ }
+ DCHECK(0);
+ return 0;
#else
return GetThreadTraceImpl<Mapping>(tid);
#endif
@@ -1024,6 +1174,16 @@ uptr GetThreadTraceHeader(int tid) {
}
DCHECK(0);
return 0;
+#elif defined(__mips64)
+ switch (vmaSize) {
+#if !SANITIZER_GO
+ case 40: return GetThreadTraceHeaderImpl<Mapping40>(tid);
+#else
+ case 47: return GetThreadTraceHeaderImpl<Mapping47>(tid);
+#endif
+ }
+ DCHECK(0);
+ return 0;
#else
return GetThreadTraceHeaderImpl<Mapping>(tid);
#endif
diff --git a/compiler-rt/lib/tsan/rtl/tsan_platform_linux.cpp b/compiler-rt/lib/tsan/rtl/tsan_platform_linux.cpp
index 5e8879de26a7..cfe597e5380e 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_platform_linux.cpp
+++ b/compiler-rt/lib/tsan/rtl/tsan_platform_linux.cpp
@@ -250,6 +250,20 @@ void InitializePlatformEarly() {
Die();
}
# endif
+#elif defined(__mips64)
+# if !SANITIZER_GO
+ if (vmaSize != 40) {
+ Printf("FATAL: ThreadSanitizer: unsupported VMA range\n");
+ Printf("FATAL: Found %zd - Supported 40\n", vmaSize);
+ Die();
+ }
+# else
+ if (vmaSize != 47) {
+ Printf("FATAL: ThreadSanitizer: unsupported VMA range\n");
+ Printf("FATAL: Found %zd - Supported 47\n", vmaSize);
+ Die();
+ }
+# endif
#endif
#endif
}
@@ -377,6 +391,10 @@ static uptr UnmangleLongJmpSp(uptr mangled_sp) {
return mangled_sp ^ xor_key;
#elif defined(__mips__)
return mangled_sp;
+#elif defined(__s390x__)
+ // tcbhead_t.stack_guard
+ uptr xor_key = ((uptr *)__builtin_thread_pointer())[5];
+ return mangled_sp ^ xor_key;
#else
#error "Unknown platform"
#endif
@@ -397,6 +415,8 @@ static uptr UnmangleLongJmpSp(uptr mangled_sp) {
# define LONG_JMP_SP_ENV_SLOT 13
# elif defined(__mips64)
# define LONG_JMP_SP_ENV_SLOT 1
+# elif defined(__s390x__)
+# define LONG_JMP_SP_ENV_SLOT 9
# else
# define LONG_JMP_SP_ENV_SLOT 6
# endif
@@ -483,7 +503,7 @@ ThreadState *cur_thread() {
dead_thread_state->fast_state.SetIgnoreBit();
dead_thread_state->ignore_interceptors = 1;
dead_thread_state->is_dead = true;
- *const_cast<int*>(&dead_thread_state->tid) = -1;
+ *const_cast<u32*>(&dead_thread_state->tid) = -1;
CHECK_EQ(0, internal_mprotect(dead_thread_state, sizeof(ThreadState),
PROT_READ));
}
diff --git a/compiler-rt/lib/tsan/rtl/tsan_platform_mac.cpp b/compiler-rt/lib/tsan/rtl/tsan_platform_mac.cpp
index 0740805822de..d9719a136b21 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_platform_mac.cpp
+++ b/compiler-rt/lib/tsan/rtl/tsan_platform_mac.cpp
@@ -234,7 +234,7 @@ static void my_pthread_introspection_hook(unsigned int event, pthread_t thread,
#endif
void InitializePlatformEarly() {
-#if !SANITIZER_GO && defined(__aarch64__)
+#if !SANITIZER_GO && !HAS_48_BIT_ADDRESS_SPACE
uptr max_vm = GetMaxUserVirtualAddress() + 1;
if (max_vm != Mapping::kHiAppMemEnd) {
Printf("ThreadSanitizer: unsupported vm address limit %p, expected %p.\n",
diff --git a/compiler-rt/lib/tsan/rtl/tsan_platform_posix.cpp b/compiler-rt/lib/tsan/rtl/tsan_platform_posix.cpp
index d56b6c3b9c54..1c6198cefcd7 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_platform_posix.cpp
+++ b/compiler-rt/lib/tsan/rtl/tsan_platform_posix.cpp
@@ -72,11 +72,15 @@ void InitializeShadowMemory() {
InitializeShadowMemoryPlatform();
}
-static void ProtectRange(uptr beg, uptr end) {
+static bool TryProtectRange(uptr beg, uptr end) {
CHECK_LE(beg, end);
if (beg == end)
- return;
- if (beg != (uptr)MmapFixedNoAccess(beg, end - beg)) {
+ return true;
+ return beg == (uptr)MmapFixedNoAccess(beg, end - beg);
+}
+
+static void ProtectRange(uptr beg, uptr end) {
+ if (!TryProtectRange(beg, end)) {
Printf("FATAL: ThreadSanitizer can not protect [%zx,%zx]\n", beg, end);
Printf("FATAL: Make sure you are not using unlimited stack\n");
Die();
@@ -99,7 +103,7 @@ void CheckAndProtect() {
Die();
}
-#if defined(__aarch64__) && defined(__APPLE__)
+#if defined(__aarch64__) && defined(__APPLE__) && !HAS_48_BIT_ADDRESS_SPACE
ProtectRange(HeapMemEnd(), ShadowBeg());
ProtectRange(ShadowEnd(), MetaShadowBeg());
ProtectRange(MetaShadowEnd(), TraceMemBeg());
@@ -118,6 +122,16 @@ void CheckAndProtect() {
ProtectRange(TraceMemEnd(), HeapMemBeg());
ProtectRange(HeapEnd(), HiAppMemBeg());
#endif
+
+#if defined(__s390x__)
+ // Protect the rest of the address space.
+ const uptr user_addr_max_l4 = 0x0020000000000000ull;
+ const uptr user_addr_max_l5 = 0xfffffffffffff000ull;
+ // All the maintained s390x kernels support at least 4-level page tables.
+ ProtectRange(HiAppMemEnd(), user_addr_max_l4);
+ // Older s390x kernels may not support 5-level page tables.
+ TryProtectRange(user_addr_max_l4, user_addr_max_l5);
+#endif
}
#endif
diff --git a/compiler-rt/lib/tsan/rtl/tsan_report.cpp b/compiler-rt/lib/tsan/rtl/tsan_report.cpp
index 968c7b97553c..8ef9f0cd4fe8 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_report.cpp
+++ b/compiler-rt/lib/tsan/rtl/tsan_report.cpp
@@ -69,7 +69,7 @@ ReportDesc::~ReportDesc() {
const int kThreadBufSize = 32;
const char *thread_name(char *buf, int tid) {
- if (tid == 0)
+ if (tid == kMainTid)
return "main thread";
internal_snprintf(buf, kThreadBufSize, "thread T%d", tid);
return buf;
@@ -127,7 +127,7 @@ void PrintStack(const ReportStack *ent) {
}
SymbolizedStack *frame = ent->frames;
for (int i = 0; frame && frame->info.address; frame = frame->next, i++) {
- InternalScopedString res(2 * GetPageSizeCached());
+ InternalScopedString res;
RenderFrame(&res, common_flags()->stack_trace_format, i,
frame->info.address, &frame->info,
common_flags()->symbolize_vs_style,
@@ -250,7 +250,7 @@ static void PrintMutex(const ReportMutex *rm) {
static void PrintThread(const ReportThread *rt) {
Decorator d;
- if (rt->id == 0) // Little sense in describing the main thread.
+ if (rt->id == kMainTid) // Little sense in describing the main thread.
return;
Printf("%s", d.ThreadDescription());
Printf(" Thread T%d", rt->id);
@@ -394,7 +394,7 @@ void PrintReport(const ReportDesc *rep) {
#else // #if !SANITIZER_GO
-const int kMainThreadId = 1;
+const u32 kMainGoroutineId = 1;
void PrintStack(const ReportStack *ent) {
if (ent == 0 || ent->frames == 0) {
@@ -415,7 +415,7 @@ static void PrintMop(const ReportMop *mop, bool first) {
Printf("%s at %p by ",
(first ? (mop->write ? "Write" : "Read")
: (mop->write ? "Previous write" : "Previous read")), mop->addr);
- if (mop->tid == kMainThreadId)
+ if (mop->tid == kMainGoroutineId)
Printf("main goroutine:\n");
else
Printf("goroutine %d:\n", mop->tid);
@@ -428,7 +428,7 @@ static void PrintLocation(const ReportLocation *loc) {
Printf("\n");
Printf("Heap block of size %zu at %p allocated by ",
loc->heap_chunk_size, loc->heap_chunk_start);
- if (loc->tid == kMainThreadId)
+ if (loc->tid == kMainGoroutineId)
Printf("main goroutine:\n");
else
Printf("goroutine %d:\n", loc->tid);
@@ -448,7 +448,7 @@ static void PrintLocation(const ReportLocation *loc) {
}
static void PrintThread(const ReportThread *rt) {
- if (rt->id == kMainThreadId)
+ if (rt->id == kMainGoroutineId)
return;
Printf("\n");
Printf("Goroutine %d (%s) created at:\n",
diff --git a/compiler-rt/lib/tsan/rtl/tsan_rtl.cpp b/compiler-rt/lib/tsan/rtl/tsan_rtl.cpp
index 3d721eb95a2c..a21da9c81c6f 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_rtl.cpp
+++ b/compiler-rt/lib/tsan/rtl/tsan_rtl.cpp
@@ -11,17 +11,19 @@
// Main file (entry points) for the TSan run-time.
//===----------------------------------------------------------------------===//
+#include "tsan_rtl.h"
+
#include "sanitizer_common/sanitizer_atomic.h"
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_file.h"
#include "sanitizer_common/sanitizer_libc.h"
-#include "sanitizer_common/sanitizer_stackdepot.h"
#include "sanitizer_common/sanitizer_placement_new.h"
+#include "sanitizer_common/sanitizer_stackdepot.h"
#include "sanitizer_common/sanitizer_symbolizer.h"
#include "tsan_defs.h"
-#include "tsan_platform.h"
-#include "tsan_rtl.h"
+#include "tsan_interface.h"
#include "tsan_mman.h"
+#include "tsan_platform.h"
#include "tsan_suppressions.h"
#include "tsan_symbolize.h"
#include "ubsan/ubsan_init.h"
@@ -56,15 +58,26 @@ Context *ctx;
bool OnFinalize(bool failed);
void OnInitialize();
#else
+#include <dlfcn.h>
SANITIZER_WEAK_CXX_DEFAULT_IMPL
bool OnFinalize(bool failed) {
+#if !SANITIZER_GO
+ if (auto *ptr = dlsym(RTLD_DEFAULT, "__tsan_on_finalize"))
+ return reinterpret_cast<decltype(&__tsan_on_finalize)>(ptr)(failed);
+#endif
return failed;
}
SANITIZER_WEAK_CXX_DEFAULT_IMPL
-void OnInitialize() {}
+void OnInitialize() {
+#if !SANITIZER_GO
+ if (auto *ptr = dlsym(RTLD_DEFAULT, "__tsan_on_initialize")) {
+ return reinterpret_cast<decltype(&__tsan_on_initialize)>(ptr)();
+ }
+#endif
+}
#endif
-static char thread_registry_placeholder[sizeof(ThreadRegistry)];
+static ALIGNED(64) char thread_registry_placeholder[sizeof(ThreadRegistry)];
static ThreadContextBase *CreateThreadContext(u32 tid) {
// Map thread trace when context is created.
@@ -77,12 +90,19 @@ static ThreadContextBase *CreateThreadContext(u32 tid) {
new((void*)hdr) Trace();
// We are going to use only a small part of the trace with the default
// value of history_size. However, the constructor writes to the whole trace.
- // Unmap the unused part.
+ // Release the unused part.
uptr hdr_end = hdr + sizeof(Trace);
hdr_end -= sizeof(TraceHeader) * (kTraceParts - TraceParts());
hdr_end = RoundUp(hdr_end, GetPageSizeCached());
- if (hdr_end < hdr + sizeof(Trace))
- UnmapOrDie((void*)hdr_end, hdr + sizeof(Trace) - hdr_end);
+ if (hdr_end < hdr + sizeof(Trace)) {
+ ReleaseMemoryPagesToOS(hdr_end, hdr + sizeof(Trace));
+ uptr unused = hdr + sizeof(Trace) - hdr_end;
+ if (hdr_end != (uptr)MmapFixedNoAccess(hdr_end, unused)) {
+ Report("ThreadSanitizer: failed to mprotect(%p, %p)\n",
+ hdr_end, unused);
+ CHECK("unable to mprotect" && 0);
+ }
+ }
void *mem = internal_alloc(MBlockThreadContex, sizeof(ThreadContext));
return new(mem) ThreadContext(tid);
}
@@ -94,42 +114,45 @@ static const u32 kThreadQuarantineSize = 64;
#endif
Context::Context()
- : initialized()
- , report_mtx(MutexTypeReport, StatMtxReport)
- , nreported()
- , nmissed_expected()
- , thread_registry(new(thread_registry_placeholder) ThreadRegistry(
- CreateThreadContext, kMaxTid, kThreadQuarantineSize, kMaxTidReuse))
- , racy_mtx(MutexTypeRacy, StatMtxRacy)
- , racy_stacks()
- , racy_addresses()
- , fired_suppressions_mtx(MutexTypeFired, StatMtxFired)
- , clock_alloc("clock allocator") {
+ : initialized(),
+ report_mtx(MutexTypeReport),
+ nreported(),
+ nmissed_expected(),
+ thread_registry(new (thread_registry_placeholder) ThreadRegistry(
+ CreateThreadContext, kMaxTid, kThreadQuarantineSize, kMaxTidReuse)),
+ racy_mtx(MutexTypeRacy),
+ racy_stacks(),
+ racy_addresses(),
+ fired_suppressions_mtx(MutexTypeFired),
+ clock_alloc(LINKER_INITIALIZED, "clock allocator") {
fired_suppressions.reserve(8);
}
// The objects are allocated in TLS, so one may rely on zero-initialization.
-ThreadState::ThreadState(Context *ctx, int tid, int unique_id, u64 epoch,
- unsigned reuse_count,
- uptr stk_addr, uptr stk_size,
+ThreadState::ThreadState(Context *ctx, u32 tid, int unique_id, u64 epoch,
+ unsigned reuse_count, uptr stk_addr, uptr stk_size,
uptr tls_addr, uptr tls_size)
- : fast_state(tid, epoch)
- // Do not touch these, rely on zero initialization,
- // they may be accessed before the ctor.
- // , ignore_reads_and_writes()
- // , ignore_interceptors()
- , clock(tid, reuse_count)
+ : fast_state(tid, epoch)
+ // Do not touch these, rely on zero initialization,
+ // they may be accessed before the ctor.
+ // , ignore_reads_and_writes()
+ // , ignore_interceptors()
+ ,
+ clock(tid, reuse_count)
#if !SANITIZER_GO
- , jmp_bufs()
+ ,
+ jmp_bufs()
#endif
- , tid(tid)
- , unique_id(unique_id)
- , stk_addr(stk_addr)
- , stk_size(stk_size)
- , tls_addr(tls_addr)
- , tls_size(tls_size)
+ ,
+ tid(tid),
+ unique_id(unique_id),
+ stk_addr(stk_addr),
+ stk_size(stk_size),
+ tls_addr(tls_addr),
+ tls_size(tls_size)
#if !SANITIZER_GO
- , last_sleep_clock(tid)
+ ,
+ last_sleep_clock(tid)
#endif
{
}
@@ -160,12 +183,12 @@ static void *BackgroundThread(void *arg) {
} else if (internal_strcmp(flags()->profile_memory, "stderr") == 0) {
mprof_fd = 2;
} else {
- InternalScopedString filename(kMaxPathLength);
+ InternalScopedString filename;
filename.append("%s.%d", flags()->profile_memory, (int)internal_getpid());
fd_t fd = OpenFile(filename.data(), WrOnly);
if (fd == kInvalidFd) {
Printf("ThreadSanitizer: failed to open memory profile file '%s'\n",
- &filename[0]);
+ filename.data());
} else {
mprof_fd = fd;
}
@@ -351,6 +374,18 @@ static void TsanOnDeadlySignal(int signo, void *siginfo, void *context) {
}
#endif
+void CheckUnwind() {
+ // There is high probability that interceptors will check-fail as well,
+ // on the other hand there is no sense in processing interceptors
+ // since we are going to die soon.
+ ScopedIgnoreInterceptors ignore;
+#if !SANITIZER_GO
+ cur_thread()->ignore_sync++;
+ cur_thread()->ignore_reads_and_writes++;
+#endif
+ PrintCurrentStackSlow(StackTrace::GetCurrentPc());
+}
+
void Initialize(ThreadState *thr) {
// Thread safe because done before all threads exist.
static bool is_initialized = false;
@@ -361,7 +396,7 @@ void Initialize(ThreadState *thr) {
ScopedIgnoreInterceptors ignore;
SanitizerToolName = "ThreadSanitizer";
// Install tool-specific callbacks in sanitizer_common.
- SetCheckFailedCallback(TsanCheckFailed);
+ SetCheckUnwindCallback(CheckUnwind);
ctx = new(ctx_placeholder) Context;
const char *env_name = SANITIZER_GO ? "GORACE" : "TSAN_OPTIONS";
@@ -387,7 +422,6 @@ void Initialize(ThreadState *thr) {
InitializeInterceptors();
CheckShadowMapping();
InitializePlatform();
- InitializeMutex();
InitializeDynamicAnnotations();
#if !SANITIZER_GO
InitializeShadowMemory();
@@ -487,35 +521,37 @@ int Finalize(ThreadState *thr) {
failed = OnFinalize(failed);
-#if TSAN_COLLECT_STATS
- StatAggregate(ctx->stat, thr->stat);
- StatOutput(ctx->stat);
-#endif
-
return failed ? common_flags()->exitcode : 0;
}
#if !SANITIZER_GO
-void ForkBefore(ThreadState *thr, uptr pc) {
+void ForkBefore(ThreadState *thr, uptr pc) NO_THREAD_SAFETY_ANALYSIS {
ctx->thread_registry->Lock();
ctx->report_mtx.Lock();
- // Ignore memory accesses in the pthread_atfork callbacks.
- // If any of them triggers a data race we will deadlock
- // on the report_mtx.
- // We could ignore interceptors and sync operations as well,
+ ScopedErrorReportLock::Lock();
+ // Suppress all reports in the pthread_atfork callbacks.
+ // Reports will deadlock on the report_mtx.
+ // We could ignore sync operations as well,
// but so far it's unclear if it will do more good or harm.
// Unnecessarily ignoring things can lead to false positives later.
- ThreadIgnoreBegin(thr, pc);
+ thr->suppress_reports++;
+ // On OS X, REAL(fork) can call intercepted functions (OSSpinLockLock), and
+ // we'll assert in CheckNoLocks() unless we ignore interceptors.
+ thr->ignore_interceptors++;
}
-void ForkParentAfter(ThreadState *thr, uptr pc) {
- ThreadIgnoreEnd(thr, pc); // Begin is in ForkBefore.
+void ForkParentAfter(ThreadState *thr, uptr pc) NO_THREAD_SAFETY_ANALYSIS {
+ thr->suppress_reports--; // Enabled in ForkBefore.
+ thr->ignore_interceptors--;
+ ScopedErrorReportLock::Unlock();
ctx->report_mtx.Unlock();
ctx->thread_registry->Unlock();
}
-void ForkChildAfter(ThreadState *thr, uptr pc) {
- ThreadIgnoreEnd(thr, pc); // Begin is in ForkBefore.
+void ForkChildAfter(ThreadState *thr, uptr pc) NO_THREAD_SAFETY_ANALYSIS {
+ thr->suppress_reports--; // Enabled in ForkBefore.
+ thr->ignore_interceptors--;
+ ScopedErrorReportLock::Unlock();
ctx->report_mtx.Unlock();
ctx->thread_registry->Unlock();
@@ -654,9 +690,6 @@ ALWAYS_INLINE
void MemoryAccessImpl1(ThreadState *thr, uptr addr,
int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic,
u64 *shadow_mem, Shadow cur) {
- StatInc(thr, StatMop);
- StatInc(thr, kAccessIsWrite ? StatMopWrite : StatMopRead);
- StatInc(thr, (StatType)(StatMop1 + kAccessSizeLog));
// This potentially can live in an MMX/SSE scratch register.
// The required intrinsics are:
@@ -713,7 +746,6 @@ void MemoryAccessImpl1(ThreadState *thr, uptr addr,
return;
// choose a random candidate slot and replace it
StoreShadow(shadow_mem + (cur.epoch() % kShadowCnt), store_word);
- StatInc(thr, StatShadowReplace);
return;
RACE:
HandleRace(thr, shadow_mem, cur, old);
@@ -852,19 +884,11 @@ void MemoryAccess(ThreadState *thr, uptr pc, uptr addr,
if (!SANITIZER_GO && !kAccessIsWrite && *shadow_mem == kShadowRodata) {
// Access to .rodata section, no races here.
// Measurements show that it can be 10-20% of all memory accesses.
- StatInc(thr, StatMop);
- StatInc(thr, kAccessIsWrite ? StatMopWrite : StatMopRead);
- StatInc(thr, (StatType)(StatMop1 + kAccessSizeLog));
- StatInc(thr, StatMopRodata);
return;
}
FastState fast_state = thr->fast_state;
if (UNLIKELY(fast_state.GetIgnoreBit())) {
- StatInc(thr, StatMop);
- StatInc(thr, kAccessIsWrite ? StatMopWrite : StatMopRead);
- StatInc(thr, (StatType)(StatMop1 + kAccessSizeLog));
- StatInc(thr, StatMopIgnored);
return;
}
@@ -875,10 +899,6 @@ void MemoryAccess(ThreadState *thr, uptr pc, uptr addr,
if (LIKELY(ContainsSameAccess(shadow_mem, cur.raw(),
thr->fast_synch_epoch, kAccessIsWrite))) {
- StatInc(thr, StatMop);
- StatInc(thr, kAccessIsWrite ? StatMopWrite : StatMopRead);
- StatInc(thr, (StatType)(StatMop1 + kAccessSizeLog));
- StatInc(thr, StatMopSame);
return;
}
@@ -900,10 +920,6 @@ void MemoryAccessImpl(ThreadState *thr, uptr addr,
u64 *shadow_mem, Shadow cur) {
if (LIKELY(ContainsSameAccess(shadow_mem, cur.raw(),
thr->fast_synch_epoch, kAccessIsWrite))) {
- StatInc(thr, StatMop);
- StatInc(thr, kAccessIsWrite ? StatMopWrite : StatMopRead);
- StatInc(thr, (StatType)(StatMop1 + kAccessSizeLog));
- StatInc(thr, StatMopSame);
return;
}
@@ -960,7 +976,6 @@ static void MemoryRangeSet(ThreadState *thr, uptr pc, uptr addr, uptr size,
// Reset middle part.
u64 *p1 = p;
p = RoundDown(end, kPageSize);
- UnmapOrDie((void*)p1, (uptr)p - (uptr)p1);
if (!MmapFixedSuperNoReserve((uptr)p1, (uptr)p - (uptr)p1))
Die();
// Set the ending.
@@ -1020,7 +1035,6 @@ void MemoryRangeImitateWriteOrResetRange(ThreadState *thr, uptr pc, uptr addr,
ALWAYS_INLINE USED
void FuncEntry(ThreadState *thr, uptr pc) {
- StatInc(thr, StatFuncEnter);
DPrintf2("#%d: FuncEntry %p\n", (int)thr->fast_state.tid(), (void*)pc);
if (kCollectHistory) {
thr->fast_state.IncrementEpoch();
@@ -1042,7 +1056,6 @@ void FuncEntry(ThreadState *thr, uptr pc) {
ALWAYS_INLINE USED
void FuncExit(ThreadState *thr) {
- StatInc(thr, StatFuncExit);
DPrintf2("#%d: FuncExit\n", (int)thr->fast_state.tid());
if (kCollectHistory) {
thr->fast_state.IncrementEpoch();
@@ -1117,15 +1130,30 @@ void build_consistency_debug() {}
void build_consistency_release() {}
#endif
-#if TSAN_COLLECT_STATS
-void build_consistency_stats() {}
-#else
-void build_consistency_nostats() {}
-#endif
-
} // namespace __tsan
+#if SANITIZER_CHECK_DEADLOCKS
+namespace __sanitizer {
+using namespace __tsan;
+MutexMeta mutex_meta[] = {
+ {MutexInvalid, "Invalid", {}},
+ {MutexThreadRegistry, "ThreadRegistry", {}},
+ {MutexTypeTrace, "Trace", {MutexLeaf}},
+ {MutexTypeReport, "Report", {MutexTypeSyncVar}},
+ {MutexTypeSyncVar, "SyncVar", {}},
+ {MutexTypeAnnotations, "Annotations", {}},
+ {MutexTypeAtExit, "AtExit", {MutexTypeSyncVar}},
+ {MutexTypeFired, "Fired", {MutexLeaf}},
+ {MutexTypeRacy, "Racy", {MutexLeaf}},
+ {MutexTypeGlobalProc, "GlobalProc", {}},
+ {},
+};
+
+void PrintMutexPC(uptr pc) { StackTrace(&pc, 1).Print(); }
+} // namespace __sanitizer
+#endif
+
#if !SANITIZER_GO
// Must be included in this file to make sure everything is inlined.
-#include "tsan_interface_inl.h"
+# include "tsan_interface_inl.h"
#endif
diff --git a/compiler-rt/lib/tsan/rtl/tsan_rtl.h b/compiler-rt/lib/tsan/rtl/tsan_rtl.h
index 04d474e044e1..8567d0ade877 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_rtl.h
+++ b/compiler-rt/lib/tsan/rtl/tsan_rtl.h
@@ -84,9 +84,6 @@ typedef Allocator::AllocatorCache AllocatorCache;
Allocator *allocator();
#endif
-void TsanCheckFailed(const char *file, int line, const char *cond,
- u64 v1, u64 v2);
-
const u64 kShadowRodata = (u64)-1; // .rodata shadow marker
// FastState (from most significant bit):
@@ -403,10 +400,7 @@ struct ThreadState {
Vector<JmpBuf> jmp_bufs;
int ignore_interceptors;
#endif
-#if TSAN_COLLECT_STATS
- u64 stat[StatCnt];
-#endif
- const int tid;
+ const u32 tid;
const int unique_id;
bool in_symbolizer;
bool in_ignored_lib;
@@ -420,9 +414,6 @@ struct ThreadState {
const uptr tls_size;
ThreadContext *tctx;
-#if SANITIZER_DEBUG && !SANITIZER_GO
- InternalDeadlockDetector internal_deadlock_detector;
-#endif
DDLogicalThread *dd_lt;
// Current wired Processor, or nullptr. Required to handle any events.
@@ -447,9 +438,8 @@ struct ThreadState {
const ReportDesc *current_report;
- explicit ThreadState(Context *ctx, int tid, int unique_id, u64 epoch,
- unsigned reuse_count,
- uptr stk_addr, uptr stk_size,
+ explicit ThreadState(Context *ctx, u32 tid, int unique_id, u64 epoch,
+ unsigned reuse_count, uptr stk_addr, uptr stk_size,
uptr tls_addr, uptr tls_size);
};
@@ -554,7 +544,6 @@ struct Context {
Flags flags;
- u64 stat[StatCnt];
u64 int_alloc_cnt[MBlockTypeCount];
u64 int_alloc_siz[MBlockTypeCount];
};
@@ -624,6 +613,7 @@ class ScopedReport : public ScopedReportBase {
ScopedErrorReportLock lock_;
};
+bool ShouldReport(ThreadState *thr, ReportType typ);
ThreadContext *IsThreadStackOrTls(uptr addr, bool *is_stack);
void RestoreStack(int tid, const u64 epoch, VarSizeStackTrace *stk,
MutexSet *mset, uptr *tag = nullptr);
@@ -661,22 +651,6 @@ void ObtainCurrentStack(ThreadState *thr, uptr toppc, StackTraceTy *stack,
ObtainCurrentStack(thr, pc, &stack); \
stack.ReverseOrder();
-#if TSAN_COLLECT_STATS
-void StatAggregate(u64 *dst, u64 *src);
-void StatOutput(u64 *stat);
-#endif
-
-void ALWAYS_INLINE StatInc(ThreadState *thr, StatType typ, u64 n = 1) {
-#if TSAN_COLLECT_STATS
- thr->stat[typ] += n;
-#endif
-}
-void ALWAYS_INLINE StatSet(ThreadState *thr, StatType typ, u64 n) {
-#if TSAN_COLLECT_STATS
- thr->stat[typ] = n;
-#endif
-}
-
void MapShadow(uptr addr, uptr size);
void MapThreadTrace(uptr addr, uptr size, const char *name);
void DontNeedShadowFor(uptr addr, uptr size);
@@ -857,7 +831,6 @@ void ALWAYS_INLINE TraceAddEvent(ThreadState *thr, FastState fs,
DCHECK_GE((int)typ, 0);
DCHECK_LE((int)typ, 7);
DCHECK_EQ(GetLsb(addr, kEventPCBits), addr);
- StatInc(thr, StatEvents);
u64 pos = fs.GetTracePos();
if (UNLIKELY((pos % kTracePartSize) == 0)) {
#if !SANITIZER_GO
diff --git a/compiler-rt/lib/tsan/rtl/tsan_rtl_mutex.cpp b/compiler-rt/lib/tsan/rtl/tsan_rtl_mutex.cpp
index 27897f0592b0..27ae279d6304 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_rtl_mutex.cpp
+++ b/compiler-rt/lib/tsan/rtl/tsan_rtl_mutex.cpp
@@ -51,6 +51,8 @@ static void ReportMutexMisuse(ThreadState *thr, uptr pc, ReportType typ,
// or false positives (e.g. unlock in a different thread).
if (SANITIZER_GO)
return;
+ if (!ShouldReport(thr, typ))
+ return;
ThreadRegistryLock l(ctx->thread_registry);
ScopedReport rep(typ);
rep.AddMutex(mid);
@@ -61,9 +63,8 @@ static void ReportMutexMisuse(ThreadState *thr, uptr pc, ReportType typ,
OutputReport(thr, rep);
}
-void MutexCreate(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
+void MutexCreate(ThreadState *thr, uptr pc, uptr addr, u32 flagz) NO_THREAD_SAFETY_ANALYSIS {
DPrintf("#%d: MutexCreate %zx flagz=0x%x\n", thr->tid, addr, flagz);
- StatInc(thr, StatMutexCreate);
if (!(flagz & MutexFlagLinkerInit) && IsAppMem(addr)) {
CHECK(!thr->is_freeing);
thr->is_freeing = true;
@@ -77,9 +78,8 @@ void MutexCreate(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
s->mtx.Unlock();
}
-void MutexDestroy(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
+void MutexDestroy(ThreadState *thr, uptr pc, uptr addr, u32 flagz) NO_THREAD_SAFETY_ANALYSIS {
DPrintf("#%d: MutexDestroy %zx\n", thr->tid, addr);
- StatInc(thr, StatMutexDestroy);
SyncVar *s = ctx->metamap.GetIfExistsAndLock(addr, true);
if (s == 0)
return;
@@ -96,9 +96,8 @@ void MutexDestroy(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
ctx->dd->MutexInit(&cb, &s->dd);
}
bool unlock_locked = false;
- if (flags()->report_destroy_locked
- && s->owner_tid != SyncVar::kInvalidTid
- && !s->IsFlagSet(MutexFlagBroken)) {
+ if (flags()->report_destroy_locked && s->owner_tid != kInvalidTid &&
+ !s->IsFlagSet(MutexFlagBroken)) {
s->SetFlags(MutexFlagBroken);
unlock_locked = true;
}
@@ -107,7 +106,7 @@ void MutexDestroy(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
if (!unlock_locked)
s->Reset(thr->proc()); // must not reset it before the report is printed
s->mtx.Unlock();
- if (unlock_locked) {
+ if (unlock_locked && ShouldReport(thr, ReportTypeMutexDestroyLocked)) {
ThreadRegistryLock l(ctx->thread_registry);
ScopedReport rep(ReportTypeMutexDestroyLocked);
rep.AddMutex(mid);
@@ -139,7 +138,7 @@ void MutexDestroy(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
// s will be destroyed and freed in MetaMap::FreeBlock.
}
-void MutexPreLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
+void MutexPreLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) NO_THREAD_SAFETY_ANALYSIS {
DPrintf("#%d: MutexPreLock %zx flagz=0x%x\n", thr->tid, addr, flagz);
if (!(flagz & MutexFlagTryLock) && common_flags()->detect_deadlocks) {
SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, false);
@@ -155,7 +154,8 @@ void MutexPreLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
}
}
-void MutexPostLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz, int rec) {
+void MutexPostLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz,
+ int rec) NO_THREAD_SAFETY_ANALYSIS {
DPrintf("#%d: MutexPostLock %zx flag=0x%x rec=%d\n",
thr->tid, addr, flagz, rec);
if (flagz & MutexFlagRecursiveLock)
@@ -169,7 +169,7 @@ void MutexPostLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz, int rec) {
thr->fast_state.IncrementEpoch();
TraceAddEvent(thr, thr->fast_state, EventTypeLock, s->GetId());
bool report_double_lock = false;
- if (s->owner_tid == SyncVar::kInvalidTid) {
+ if (s->owner_tid == kInvalidTid) {
CHECK_EQ(s->recursion, 0);
s->owner_tid = thr->tid;
s->last_lock = thr->fast_state.raw();
@@ -182,11 +182,9 @@ void MutexPostLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz, int rec) {
const bool first = s->recursion == 0;
s->recursion += rec;
if (first) {
- StatInc(thr, StatMutexLock);
AcquireImpl(thr, pc, &s->clock);
AcquireImpl(thr, pc, &s->read_clock);
} else if (!s->IsFlagSet(MutexFlagWriteReentrant)) {
- StatInc(thr, StatMutexRecLock);
}
thr->mset.Add(s->GetId(), true, thr->fast_state.epoch());
bool pre_lock = false;
@@ -210,7 +208,7 @@ void MutexPostLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz, int rec) {
}
}
-int MutexUnlock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
+int MutexUnlock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) NO_THREAD_SAFETY_ANALYSIS {
DPrintf("#%d: MutexUnlock %zx flagz=0x%x\n", thr->tid, addr, flagz);
if (IsAppMem(addr))
MemoryReadAtomic(thr, pc, addr, kSizeLog1);
@@ -228,11 +226,9 @@ int MutexUnlock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
rec = (flagz & MutexFlagRecursiveUnlock) ? s->recursion : 1;
s->recursion -= rec;
if (s->recursion == 0) {
- StatInc(thr, StatMutexUnlock);
- s->owner_tid = SyncVar::kInvalidTid;
+ s->owner_tid = kInvalidTid;
ReleaseStoreImpl(thr, pc, &s->clock);
} else {
- StatInc(thr, StatMutexRecUnlock);
}
}
thr->mset.Del(s->GetId(), true);
@@ -253,7 +249,7 @@ int MutexUnlock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
return rec;
}
-void MutexPreReadLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
+void MutexPreReadLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) NO_THREAD_SAFETY_ANALYSIS {
DPrintf("#%d: MutexPreReadLock %zx flagz=0x%x\n", thr->tid, addr, flagz);
if (!(flagz & MutexFlagTryLock) && common_flags()->detect_deadlocks) {
SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, false);
@@ -265,9 +261,8 @@ void MutexPreReadLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
}
}
-void MutexPostReadLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
+void MutexPostReadLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) NO_THREAD_SAFETY_ANALYSIS {
DPrintf("#%d: MutexPostReadLock %zx flagz=0x%x\n", thr->tid, addr, flagz);
- StatInc(thr, StatMutexReadLock);
if (IsAppMem(addr))
MemoryReadAtomic(thr, pc, addr, kSizeLog1);
SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, false);
@@ -275,7 +270,7 @@ void MutexPostReadLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
thr->fast_state.IncrementEpoch();
TraceAddEvent(thr, thr->fast_state, EventTypeRLock, s->GetId());
bool report_bad_lock = false;
- if (s->owner_tid != SyncVar::kInvalidTid) {
+ if (s->owner_tid != kInvalidTid) {
if (flags()->report_mutex_bugs && !s->IsFlagSet(MutexFlagBroken)) {
s->SetFlags(MutexFlagBroken);
report_bad_lock = true;
@@ -305,16 +300,15 @@ void MutexPostReadLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
}
}
-void MutexReadUnlock(ThreadState *thr, uptr pc, uptr addr) {
+void MutexReadUnlock(ThreadState *thr, uptr pc, uptr addr) NO_THREAD_SAFETY_ANALYSIS {
DPrintf("#%d: MutexReadUnlock %zx\n", thr->tid, addr);
- StatInc(thr, StatMutexReadUnlock);
if (IsAppMem(addr))
MemoryReadAtomic(thr, pc, addr, kSizeLog1);
SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
thr->fast_state.IncrementEpoch();
TraceAddEvent(thr, thr->fast_state, EventTypeRUnlock, s->GetId());
bool report_bad_unlock = false;
- if (s->owner_tid != SyncVar::kInvalidTid) {
+ if (s->owner_tid != kInvalidTid) {
if (flags()->report_mutex_bugs && !s->IsFlagSet(MutexFlagBroken)) {
s->SetFlags(MutexFlagBroken);
report_bad_unlock = true;
@@ -337,17 +331,16 @@ void MutexReadUnlock(ThreadState *thr, uptr pc, uptr addr) {
}
}
-void MutexReadOrWriteUnlock(ThreadState *thr, uptr pc, uptr addr) {
+void MutexReadOrWriteUnlock(ThreadState *thr, uptr pc, uptr addr) NO_THREAD_SAFETY_ANALYSIS {
DPrintf("#%d: MutexReadOrWriteUnlock %zx\n", thr->tid, addr);
if (IsAppMem(addr))
MemoryReadAtomic(thr, pc, addr, kSizeLog1);
SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
bool write = true;
bool report_bad_unlock = false;
- if (s->owner_tid == SyncVar::kInvalidTid) {
+ if (s->owner_tid == kInvalidTid) {
// Seems to be read unlock.
write = false;
- StatInc(thr, StatMutexReadUnlock);
thr->fast_state.IncrementEpoch();
TraceAddEvent(thr, thr->fast_state, EventTypeRUnlock, s->GetId());
ReleaseImpl(thr, pc, &s->read_clock);
@@ -358,11 +351,9 @@ void MutexReadOrWriteUnlock(ThreadState *thr, uptr pc, uptr addr) {
CHECK_GT(s->recursion, 0);
s->recursion--;
if (s->recursion == 0) {
- StatInc(thr, StatMutexUnlock);
- s->owner_tid = SyncVar::kInvalidTid;
+ s->owner_tid = kInvalidTid;
ReleaseStoreImpl(thr, pc, &s->clock);
} else {
- StatInc(thr, StatMutexRecUnlock);
}
} else if (!s->IsFlagSet(MutexFlagBroken)) {
s->SetFlags(MutexFlagBroken);
@@ -384,15 +375,15 @@ void MutexReadOrWriteUnlock(ThreadState *thr, uptr pc, uptr addr) {
}
}
-void MutexRepair(ThreadState *thr, uptr pc, uptr addr) {
+void MutexRepair(ThreadState *thr, uptr pc, uptr addr) NO_THREAD_SAFETY_ANALYSIS {
DPrintf("#%d: MutexRepair %zx\n", thr->tid, addr);
SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
- s->owner_tid = SyncVar::kInvalidTid;
+ s->owner_tid = kInvalidTid;
s->recursion = 0;
s->mtx.Unlock();
}
-void MutexInvalidAccess(ThreadState *thr, uptr pc, uptr addr) {
+void MutexInvalidAccess(ThreadState *thr, uptr pc, uptr addr) NO_THREAD_SAFETY_ANALYSIS {
DPrintf("#%d: MutexInvalidAccess %zx\n", thr->tid, addr);
SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
u64 mid = s->GetId();
@@ -400,7 +391,7 @@ void MutexInvalidAccess(ThreadState *thr, uptr pc, uptr addr) {
ReportMutexMisuse(thr, pc, ReportTypeMutexInvalidAccess, addr, mid);
}
-void Acquire(ThreadState *thr, uptr pc, uptr addr) {
+void Acquire(ThreadState *thr, uptr pc, uptr addr) NO_THREAD_SAFETY_ANALYSIS {
DPrintf("#%d: Acquire %zx\n", thr->tid, addr);
if (thr->ignore_sync)
return;
@@ -431,7 +422,7 @@ void AcquireGlobal(ThreadState *thr, uptr pc) {
UpdateClockCallback, thr);
}
-void ReleaseStoreAcquire(ThreadState *thr, uptr pc, uptr addr) {
+void ReleaseStoreAcquire(ThreadState *thr, uptr pc, uptr addr) NO_THREAD_SAFETY_ANALYSIS {
DPrintf("#%d: ReleaseStoreAcquire %zx\n", thr->tid, addr);
if (thr->ignore_sync)
return;
@@ -443,7 +434,7 @@ void ReleaseStoreAcquire(ThreadState *thr, uptr pc, uptr addr) {
s->mtx.Unlock();
}
-void Release(ThreadState *thr, uptr pc, uptr addr) {
+void Release(ThreadState *thr, uptr pc, uptr addr) NO_THREAD_SAFETY_ANALYSIS {
DPrintf("#%d: Release %zx\n", thr->tid, addr);
if (thr->ignore_sync)
return;
@@ -455,7 +446,7 @@ void Release(ThreadState *thr, uptr pc, uptr addr) {
s->mtx.Unlock();
}
-void ReleaseStore(ThreadState *thr, uptr pc, uptr addr) {
+void ReleaseStore(ThreadState *thr, uptr pc, uptr addr) NO_THREAD_SAFETY_ANALYSIS {
DPrintf("#%d: ReleaseStore %zx\n", thr->tid, addr);
if (thr->ignore_sync)
return;
@@ -493,7 +484,6 @@ void AcquireImpl(ThreadState *thr, uptr pc, SyncClock *c) {
return;
thr->clock.set(thr->fast_state.epoch());
thr->clock.acquire(&thr->proc()->clock_cache, c);
- StatInc(thr, StatSyncAcquire);
}
void ReleaseStoreAcquireImpl(ThreadState *thr, uptr pc, SyncClock *c) {
@@ -502,7 +492,6 @@ void ReleaseStoreAcquireImpl(ThreadState *thr, uptr pc, SyncClock *c) {
thr->clock.set(thr->fast_state.epoch());
thr->fast_synch_epoch = thr->fast_state.epoch();
thr->clock.releaseStoreAcquire(&thr->proc()->clock_cache, c);
- StatInc(thr, StatSyncReleaseStoreAcquire);
}
void ReleaseImpl(ThreadState *thr, uptr pc, SyncClock *c) {
@@ -511,7 +500,6 @@ void ReleaseImpl(ThreadState *thr, uptr pc, SyncClock *c) {
thr->clock.set(thr->fast_state.epoch());
thr->fast_synch_epoch = thr->fast_state.epoch();
thr->clock.release(&thr->proc()->clock_cache, c);
- StatInc(thr, StatSyncRelease);
}
void ReleaseStoreImpl(ThreadState *thr, uptr pc, SyncClock *c) {
@@ -520,7 +508,6 @@ void ReleaseStoreImpl(ThreadState *thr, uptr pc, SyncClock *c) {
thr->clock.set(thr->fast_state.epoch());
thr->fast_synch_epoch = thr->fast_state.epoch();
thr->clock.ReleaseStore(&thr->proc()->clock_cache, c);
- StatInc(thr, StatSyncRelease);
}
void AcquireReleaseImpl(ThreadState *thr, uptr pc, SyncClock *c) {
@@ -529,12 +516,10 @@ void AcquireReleaseImpl(ThreadState *thr, uptr pc, SyncClock *c) {
thr->clock.set(thr->fast_state.epoch());
thr->fast_synch_epoch = thr->fast_state.epoch();
thr->clock.acq_rel(&thr->proc()->clock_cache, c);
- StatInc(thr, StatSyncAcquire);
- StatInc(thr, StatSyncRelease);
}
void ReportDeadlock(ThreadState *thr, uptr pc, DDReport *r) {
- if (r == 0)
+ if (r == 0 || !ShouldReport(thr, ReportTypeDeadlock))
return;
ThreadRegistryLock l(ctx->thread_registry);
ScopedReport rep(ReportTypeDeadlock);
diff --git a/compiler-rt/lib/tsan/rtl/tsan_rtl_report.cpp b/compiler-rt/lib/tsan/rtl/tsan_rtl_report.cpp
index 208d0df44df7..3e809e653c70 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_rtl_report.cpp
+++ b/compiler-rt/lib/tsan/rtl/tsan_rtl_report.cpp
@@ -31,23 +31,6 @@ using namespace __sanitizer;
static ReportStack *SymbolizeStack(StackTrace trace);
-void TsanCheckFailed(const char *file, int line, const char *cond,
- u64 v1, u64 v2) {
- // There is high probability that interceptors will check-fail as well,
- // on the other hand there is no sense in processing interceptors
- // since we are going to die soon.
- ScopedIgnoreInterceptors ignore;
-#if !SANITIZER_GO
- cur_thread()->ignore_sync++;
- cur_thread()->ignore_reads_and_writes++;
-#endif
- Printf("FATAL: ThreadSanitizer CHECK failed: "
- "%s:%d \"%s\" (0x%zx, 0x%zx)\n",
- file, line, cond, (uptr)v1, (uptr)v2);
- PrintCurrentStackSlow(StackTrace::GetCurrentPc());
- Die();
-}
-
// Can be overriden by an application/test to intercept reports.
#ifdef TSAN_EXTERNAL_HOOKS
bool OnReport(const ReportDesc *rep, bool suppressed);
@@ -142,6 +125,34 @@ static ReportStack *SymbolizeStack(StackTrace trace) {
return stack;
}
+bool ShouldReport(ThreadState *thr, ReportType typ) {
+ // We set thr->suppress_reports in the fork context.
+ // Taking any locking in the fork context can lead to deadlocks.
+ // If any locks are already taken, it's too late to do this check.
+ CheckedMutex::CheckNoLocks();
+ // For the same reason check we didn't lock thread_registry yet.
+ if (SANITIZER_DEBUG)
+ ThreadRegistryLock l(ctx->thread_registry);
+ if (!flags()->report_bugs || thr->suppress_reports)
+ return false;
+ switch (typ) {
+ case ReportTypeSignalUnsafe:
+ return flags()->report_signal_unsafe;
+ case ReportTypeThreadLeak:
+#if !SANITIZER_GO
+ // It's impossible to join phantom threads
+ // in the child after fork.
+ if (ctx->after_multithreaded_fork)
+ return false;
+#endif
+ return flags()->report_thread_leaks;
+ case ReportTypeMutexDestroyLocked:
+ return flags()->report_destroy_locked;
+ default:
+ return true;
+ }
+}
+
ScopedReportBase::ScopedReportBase(ReportType typ, uptr tag) {
ctx->thread_registry->CheckLocked();
void *mem = internal_alloc(MBlockReport, sizeof(ReportDesc));
@@ -274,7 +285,7 @@ void ScopedReportBase::AddMutex(const SyncVar *s) {
rm->stack = SymbolizeStackId(s->creation_stack_id);
}
-u64 ScopedReportBase::AddMutex(u64 id) {
+u64 ScopedReportBase::AddMutex(u64 id) NO_THREAD_SAFETY_ANALYSIS {
u64 uid = 0;
u64 mid = id;
uptr addr = SyncVar::SplitId(id, &uid);
@@ -497,8 +508,10 @@ static bool HandleRacyAddress(ThreadState *thr, uptr addr_min, uptr addr_max) {
}
bool OutputReport(ThreadState *thr, const ScopedReport &srep) {
- if (!flags()->report_bugs || thr->suppress_reports)
- return false;
+ // These should have been checked in ShouldReport.
+ // It's too late to check them here, we have already taken locks.
+ CHECK(flags()->report_bugs);
+ CHECK(!thr->suppress_reports);
atomic_store_relaxed(&ctx->last_symbolize_time_ns, NanoTime());
const ReportDesc *rep = srep.GetReport();
CHECK_EQ(thr->current_report, nullptr);
@@ -583,13 +596,13 @@ static bool RaceBetweenAtomicAndFree(ThreadState *thr) {
}
void ReportRace(ThreadState *thr) {
- CheckNoLocks(thr);
+ CheckedMutex::CheckNoLocks();
// Symbolizer makes lots of intercepted calls. If we try to process them,
// at best it will cause deadlocks on internal mutexes.
ScopedIgnoreInterceptors ignore;
- if (!flags()->report_bugs)
+ if (!ShouldReport(thr, ReportTypeRace))
return;
if (!flags()->report_atomic_races && !RaceBetweenAtomicAndFree(thr))
return;
@@ -722,8 +735,7 @@ void PrintCurrentStack(ThreadState *thr, uptr pc) {
// However, this solution is not reliable enough, please see dvyukov's comment
// http://reviews.llvm.org/D19148#406208
// Also see PR27280 comment 2 and 3 for breaking examples and analysis.
-ALWAYS_INLINE
-void PrintCurrentStackSlow(uptr pc) {
+ALWAYS_INLINE USED void PrintCurrentStackSlow(uptr pc) {
#if !SANITIZER_GO
uptr bp = GET_CURRENT_FRAME();
BufferedStackTrace *ptrace =
diff --git a/compiler-rt/lib/tsan/rtl/tsan_rtl_s390x.S b/compiler-rt/lib/tsan/rtl/tsan_rtl_s390x.S
new file mode 100644
index 000000000000..fcff35fbc7e0
--- /dev/null
+++ b/compiler-rt/lib/tsan/rtl/tsan_rtl_s390x.S
@@ -0,0 +1,47 @@
+#include "sanitizer_common/sanitizer_asm.h"
+
+#define CFA_OFFSET 160
+#define R2_REL_OFFSET 16
+#define R3_REL_OFFSET 24
+#define R14_REL_OFFSET 112
+#define R15_REL_OFFSET 120
+#define FRAME_SIZE 160
+
+.text
+
+ASM_HIDDEN(__tsan_setjmp)
+
+.macro intercept symbol, real
+.comm \real, 8, 8
+.globl ASM_SYMBOL_INTERCEPTOR(\symbol)
+ASM_TYPE_FUNCTION(ASM_SYMBOL_INTERCEPTOR(\symbol))
+ASM_SYMBOL_INTERCEPTOR(\symbol):
+ CFI_STARTPROC
+ stmg %r2, %r3, R2_REL_OFFSET(%r15)
+ CFI_REL_OFFSET(%r2, R2_REL_OFFSET)
+ CFI_REL_OFFSET(%r3, R3_REL_OFFSET)
+ stmg %r14, %r15, R14_REL_OFFSET(%r15)
+ CFI_REL_OFFSET(%r14, R14_REL_OFFSET)
+ CFI_REL_OFFSET(%r15, R15_REL_OFFSET)
+ aghi %r15, -FRAME_SIZE
+ CFI_ADJUST_CFA_OFFSET(FRAME_SIZE)
+ la %r2, FRAME_SIZE(%r15)
+ brasl %r14, ASM_SYMBOL(__tsan_setjmp)
+ lmg %r14, %r15, FRAME_SIZE + R14_REL_OFFSET(%r15)
+ CFI_RESTORE(%r14)
+ CFI_RESTORE(%r15)
+ CFI_DEF_CFA_OFFSET(CFA_OFFSET)
+ lmg %r2, %r3, R2_REL_OFFSET(%r15)
+ CFI_RESTORE(%r2)
+ CFI_RESTORE(%r3)
+ larl %r1, \real
+ lg %r1, 0(%r1)
+ br %r1
+ CFI_ENDPROC
+ ASM_SIZE(ASM_SYMBOL_INTERCEPTOR(\symbol))
+.endm
+
+intercept setjmp, _ZN14__interception11real_setjmpE
+intercept _setjmp, _ZN14__interception12real__setjmpE
+intercept sigsetjmp, _ZN14__interception14real_sigsetjmpE
+intercept __sigsetjmp, _ZN14__interception16real___sigsetjmpE
diff --git a/compiler-rt/lib/tsan/rtl/tsan_rtl_thread.cpp b/compiler-rt/lib/tsan/rtl/tsan_rtl_thread.cpp
index d80146735ea7..cdb6e60ebbd0 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_rtl_thread.cpp
+++ b/compiler-rt/lib/tsan/rtl/tsan_rtl_thread.cpp
@@ -51,7 +51,7 @@ struct OnCreatedArgs {
void ThreadContext::OnCreated(void *arg) {
thr = 0;
- if (tid == 0)
+ if (tid == kMainTid)
return;
OnCreatedArgs *args = static_cast<OnCreatedArgs *>(arg);
if (!args->thr) // GCD workers don't have a parent thread.
@@ -61,8 +61,6 @@ void ThreadContext::OnCreated(void *arg) {
TraceAddEvent(args->thr, args->thr->fast_state, EventTypeMop, 0);
ReleaseImpl(args->thr, 0, &sync);
creation_stack_id = CurrentStackId(args->thr, args->pc);
- if (reuse_count == 0)
- StatInc(args->thr, StatThreadMaxTid);
}
void ThreadContext::OnReset() {
@@ -115,7 +113,6 @@ void ThreadContext::OnStarted(void *arg) {
thr->fast_synch_epoch = epoch0;
AcquireImpl(thr, 0, &sync);
- StatInc(thr, StatSyncAcquire);
sync.Reset(&thr->proc()->clock_cache);
thr->is_inited = true;
DPrintf("#%d: ThreadStart epoch=%zu stk_addr=%zx stk_size=%zx "
@@ -149,9 +146,6 @@ void ThreadContext::OnFinished() {
PlatformCleanUpThreadState(thr);
#endif
thr->~ThreadState();
-#if TSAN_COLLECT_STATS
- StatAggregate(ctx->stat, thr->stat);
-#endif
thr = 0;
}
@@ -179,7 +173,7 @@ static void MaybeReportThreadLeak(ThreadContextBase *tctx_base, void *arg) {
#if !SANITIZER_GO
static void ReportIgnoresEnabled(ThreadContext *tctx, IgnoreSet *set) {
- if (tctx->tid == 0) {
+ if (tctx->tid == kMainTid) {
Printf("ThreadSanitizer: main thread finished with ignores enabled\n");
} else {
Printf("ThreadSanitizer: thread T%d %s finished with ignores enabled,"
@@ -210,7 +204,7 @@ static void ThreadCheckIgnore(ThreadState *thr) {}
void ThreadFinalize(ThreadState *thr) {
ThreadCheckIgnore(thr);
#if !SANITIZER_GO
- if (!flags()->report_thread_leaks)
+ if (!ShouldReport(thr, ReportTypeThreadLeak))
return;
ThreadRegistryLock l(ctx->thread_registry);
Vector<ThreadLeak> leaks;
@@ -232,13 +226,11 @@ int ThreadCount(ThreadState *thr) {
}
int ThreadCreate(ThreadState *thr, uptr pc, uptr uid, bool detached) {
- StatInc(thr, StatThreadCreate);
OnCreatedArgs args = { thr, pc };
u32 parent_tid = thr ? thr->tid : kInvalidTid; // No parent for GCD workers.
int tid =
ctx->thread_registry->CreateThread(uid, detached, parent_tid, &args);
DPrintf("#%d: ThreadCreate tid=%d uid=%zu\n", parent_tid, tid, uid);
- StatSet(thr, StatThreadMaxAlive, ctx->thread_registry->GetMaxAliveThreads());
return tid;
}
@@ -250,9 +242,10 @@ void ThreadStart(ThreadState *thr, int tid, tid_t os_id,
uptr tls_size = 0;
#if !SANITIZER_GO
if (thread_type != ThreadType::Fiber)
- GetThreadStackAndTls(tid == 0, &stk_addr, &stk_size, &tls_addr, &tls_size);
+ GetThreadStackAndTls(tid == kMainTid, &stk_addr, &stk_size, &tls_addr,
+ &tls_size);
- if (tid) {
+ if (tid != kMainTid) {
if (stk_addr && stk_size)
MemoryRangeImitateWrite(thr, /*pc=*/ 1, stk_addr, stk_size);
@@ -279,7 +272,6 @@ void ThreadStart(ThreadState *thr, int tid, tid_t os_id,
void ThreadFinish(ThreadState *thr) {
ThreadCheckIgnore(thr);
- StatInc(thr, StatThreadFinish);
if (thr->stk_addr && thr->stk_size)
DontNeedShadowFor(thr->stk_addr, thr->stk_size);
if (thr->tls_addr && thr->tls_size)
@@ -313,7 +305,7 @@ static bool ConsumeThreadByUid(ThreadContextBase *tctx, void *arg) {
int ThreadConsumeTid(ThreadState *thr, uptr pc, uptr uid) {
ConsumeThreadContext findCtx = {uid, nullptr};
ctx->thread_registry->FindThread(ConsumeThreadByUid, &findCtx);
- int tid = findCtx.tctx ? findCtx.tctx->tid : ThreadRegistry::kUnknownTid;
+ int tid = findCtx.tctx ? findCtx.tctx->tid : kInvalidTid;
DPrintf("#%d: ThreadTid uid=%zu tid=%d\n", thr->tid, uid, tid);
return tid;
}
@@ -371,13 +363,10 @@ void MemoryAccessRange(ThreadState *thr, uptr pc, uptr addr,
}
#endif
- StatInc(thr, StatMopRange);
-
if (*shadow_mem == kShadowRodata) {
DCHECK(!is_write);
// Access to .rodata section, no races here.
// Measurements show that it can be 10-20% of all memory accesses.
- StatInc(thr, StatMopRangeRodata);
return;
}
diff --git a/compiler-rt/lib/tsan/rtl/tsan_stack_trace.cpp b/compiler-rt/lib/tsan/rtl/tsan_stack_trace.cpp
index 403a21ae4ae3..6c703d7f2b10 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_stack_trace.cpp
+++ b/compiler-rt/lib/tsan/rtl/tsan_stack_trace.cpp
@@ -54,10 +54,8 @@ void __sanitizer::BufferedStackTrace::UnwindImpl(
uptr pc, uptr bp, void *context, bool request_fast, u32 max_depth) {
uptr top = 0;
uptr bottom = 0;
- if (StackTrace::WillUseFastUnwind(request_fast)) {
- GetThreadStackTopAndBottom(false, &top, &bottom);
- Unwind(max_depth, pc, bp, nullptr, top, bottom, true);
- } else
- Unwind(max_depth, pc, 0, context, 0, 0, false);
+ GetThreadStackTopAndBottom(false, &top, &bottom);
+ bool fast = StackTrace::WillUseFastUnwind(request_fast);
+ Unwind(max_depth, pc, bp, context, top, bottom, fast);
}
#endif // SANITIZER_GO
diff --git a/compiler-rt/lib/tsan/rtl/tsan_stat.cpp b/compiler-rt/lib/tsan/rtl/tsan_stat.cpp
deleted file mode 100644
index 78f3cce91384..000000000000
--- a/compiler-rt/lib/tsan/rtl/tsan_stat.cpp
+++ /dev/null
@@ -1,186 +0,0 @@
-//===-- tsan_stat.cpp -----------------------------------------------------===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-//
-// This file is a part of ThreadSanitizer (TSan), a race detector.
-//
-//===----------------------------------------------------------------------===//
-#include "tsan_stat.h"
-#include "tsan_rtl.h"
-
-namespace __tsan {
-
-#if TSAN_COLLECT_STATS
-
-void StatAggregate(u64 *dst, u64 *src) {
- for (int i = 0; i < StatCnt; i++)
- dst[i] += src[i];
-}
-
-void StatOutput(u64 *stat) {
- stat[StatShadowNonZero] = stat[StatShadowProcessed] - stat[StatShadowZero];
-
- static const char *name[StatCnt] = {};
- name[StatMop] = "Memory accesses ";
- name[StatMopRead] = " Including reads ";
- name[StatMopWrite] = " writes ";
- name[StatMop1] = " Including size 1 ";
- name[StatMop2] = " size 2 ";
- name[StatMop4] = " size 4 ";
- name[StatMop8] = " size 8 ";
- name[StatMopSame] = " Including same ";
- name[StatMopIgnored] = " Including ignored ";
- name[StatMopRange] = " Including range ";
- name[StatMopRodata] = " Including .rodata ";
- name[StatMopRangeRodata] = " Including .rodata range ";
- name[StatShadowProcessed] = "Shadow processed ";
- name[StatShadowZero] = " Including empty ";
- name[StatShadowNonZero] = " Including non empty ";
- name[StatShadowSameSize] = " Including same size ";
- name[StatShadowIntersect] = " intersect ";
- name[StatShadowNotIntersect] = " not intersect ";
- name[StatShadowSameThread] = " Including same thread ";
- name[StatShadowAnotherThread] = " another thread ";
- name[StatShadowReplace] = " Including evicted ";
-
- name[StatFuncEnter] = "Function entries ";
- name[StatFuncExit] = "Function exits ";
- name[StatEvents] = "Events collected ";
-
- name[StatThreadCreate] = "Total threads created ";
- name[StatThreadFinish] = " threads finished ";
- name[StatThreadReuse] = " threads reused ";
- name[StatThreadMaxTid] = " max tid ";
- name[StatThreadMaxAlive] = " max alive threads ";
-
- name[StatMutexCreate] = "Mutexes created ";
- name[StatMutexDestroy] = " destroyed ";
- name[StatMutexLock] = " lock ";
- name[StatMutexUnlock] = " unlock ";
- name[StatMutexRecLock] = " recursive lock ";
- name[StatMutexRecUnlock] = " recursive unlock ";
- name[StatMutexReadLock] = " read lock ";
- name[StatMutexReadUnlock] = " read unlock ";
-
- name[StatSyncCreated] = "Sync objects created ";
- name[StatSyncDestroyed] = " destroyed ";
- name[StatSyncAcquire] = " acquired ";
- name[StatSyncRelease] = " released ";
-
- name[StatClockAcquire] = "Clock acquire ";
- name[StatClockAcquireEmpty] = " empty clock ";
- name[StatClockAcquireFastRelease] = " fast from release-store ";
- name[StatClockAcquireFull] = " full (slow) ";
- name[StatClockAcquiredSomething] = " acquired something ";
- name[StatClockRelease] = "Clock release ";
- name[StatClockReleaseResize] = " resize ";
- name[StatClockReleaseFast] = " fast ";
- name[StatClockReleaseSlow] = " dirty overflow (slow) ";
- name[StatClockReleaseFull] = " full (slow) ";
- name[StatClockReleaseAcquired] = " was acquired ";
- name[StatClockReleaseClearTail] = " clear tail ";
- name[StatClockStore] = "Clock release store ";
- name[StatClockStoreResize] = " resize ";
- name[StatClockStoreFast] = " fast ";
- name[StatClockStoreFull] = " slow ";
- name[StatClockStoreTail] = " clear tail ";
- name[StatClockAcquireRelease] = "Clock acquire-release ";
-
- name[StatAtomic] = "Atomic operations ";
- name[StatAtomicLoad] = " Including load ";
- name[StatAtomicStore] = " store ";
- name[StatAtomicExchange] = " exchange ";
- name[StatAtomicFetchAdd] = " fetch_add ";
- name[StatAtomicFetchSub] = " fetch_sub ";
- name[StatAtomicFetchAnd] = " fetch_and ";
- name[StatAtomicFetchOr] = " fetch_or ";
- name[StatAtomicFetchXor] = " fetch_xor ";
- name[StatAtomicFetchNand] = " fetch_nand ";
- name[StatAtomicCAS] = " compare_exchange ";
- name[StatAtomicFence] = " fence ";
- name[StatAtomicRelaxed] = " Including relaxed ";
- name[StatAtomicConsume] = " consume ";
- name[StatAtomicAcquire] = " acquire ";
- name[StatAtomicRelease] = " release ";
- name[StatAtomicAcq_Rel] = " acq_rel ";
- name[StatAtomicSeq_Cst] = " seq_cst ";
- name[StatAtomic1] = " Including size 1 ";
- name[StatAtomic2] = " size 2 ";
- name[StatAtomic4] = " size 4 ";
- name[StatAtomic8] = " size 8 ";
- name[StatAtomic16] = " size 16 ";
-
- name[StatAnnotation] = "Dynamic annotations ";
- name[StatAnnotateHappensBefore] = " HappensBefore ";
- name[StatAnnotateHappensAfter] = " HappensAfter ";
- name[StatAnnotateCondVarSignal] = " CondVarSignal ";
- name[StatAnnotateCondVarSignalAll] = " CondVarSignalAll ";
- name[StatAnnotateMutexIsNotPHB] = " MutexIsNotPHB ";
- name[StatAnnotateCondVarWait] = " CondVarWait ";
- name[StatAnnotateRWLockCreate] = " RWLockCreate ";
- name[StatAnnotateRWLockCreateStatic] = " StatAnnotateRWLockCreateStatic ";
- name[StatAnnotateRWLockDestroy] = " RWLockDestroy ";
- name[StatAnnotateRWLockAcquired] = " RWLockAcquired ";
- name[StatAnnotateRWLockReleased] = " RWLockReleased ";
- name[StatAnnotateTraceMemory] = " TraceMemory ";
- name[StatAnnotateFlushState] = " FlushState ";
- name[StatAnnotateNewMemory] = " NewMemory ";
- name[StatAnnotateNoOp] = " NoOp ";
- name[StatAnnotateFlushExpectedRaces] = " FlushExpectedRaces ";
- name[StatAnnotateEnableRaceDetection] = " EnableRaceDetection ";
- name[StatAnnotateMutexIsUsedAsCondVar] = " MutexIsUsedAsCondVar ";
- name[StatAnnotatePCQGet] = " PCQGet ";
- name[StatAnnotatePCQPut] = " PCQPut ";
- name[StatAnnotatePCQDestroy] = " PCQDestroy ";
- name[StatAnnotatePCQCreate] = " PCQCreate ";
- name[StatAnnotateExpectRace] = " ExpectRace ";
- name[StatAnnotateBenignRaceSized] = " BenignRaceSized ";
- name[StatAnnotateBenignRace] = " BenignRace ";
- name[StatAnnotateIgnoreReadsBegin] = " IgnoreReadsBegin ";
- name[StatAnnotateIgnoreReadsEnd] = " IgnoreReadsEnd ";
- name[StatAnnotateIgnoreWritesBegin] = " IgnoreWritesBegin ";
- name[StatAnnotateIgnoreWritesEnd] = " IgnoreWritesEnd ";
- name[StatAnnotateIgnoreSyncBegin] = " IgnoreSyncBegin ";
- name[StatAnnotateIgnoreSyncEnd] = " IgnoreSyncEnd ";
- name[StatAnnotatePublishMemoryRange] = " PublishMemoryRange ";
- name[StatAnnotateUnpublishMemoryRange] = " UnpublishMemoryRange ";
- name[StatAnnotateThreadName] = " ThreadName ";
- name[Stat__tsan_mutex_create] = " __tsan_mutex_create ";
- name[Stat__tsan_mutex_destroy] = " __tsan_mutex_destroy ";
- name[Stat__tsan_mutex_pre_lock] = " __tsan_mutex_pre_lock ";
- name[Stat__tsan_mutex_post_lock] = " __tsan_mutex_post_lock ";
- name[Stat__tsan_mutex_pre_unlock] = " __tsan_mutex_pre_unlock ";
- name[Stat__tsan_mutex_post_unlock] = " __tsan_mutex_post_unlock ";
- name[Stat__tsan_mutex_pre_signal] = " __tsan_mutex_pre_signal ";
- name[Stat__tsan_mutex_post_signal] = " __tsan_mutex_post_signal ";
- name[Stat__tsan_mutex_pre_divert] = " __tsan_mutex_pre_divert ";
- name[Stat__tsan_mutex_post_divert] = " __tsan_mutex_post_divert ";
-
- name[StatMtxTotal] = "Contentionz ";
- name[StatMtxTrace] = " Trace ";
- name[StatMtxThreads] = " Threads ";
- name[StatMtxReport] = " Report ";
- name[StatMtxSyncVar] = " SyncVar ";
- name[StatMtxSyncTab] = " SyncTab ";
- name[StatMtxSlab] = " Slab ";
- name[StatMtxAtExit] = " Atexit ";
- name[StatMtxAnnotations] = " Annotations ";
- name[StatMtxMBlock] = " MBlock ";
- name[StatMtxDeadlockDetector] = " DeadlockDetector ";
- name[StatMtxFired] = " FiredSuppressions ";
- name[StatMtxRacy] = " RacyStacks ";
- name[StatMtxFD] = " FD ";
- name[StatMtxGlobalProc] = " GlobalProc ";
-
- Printf("Statistics:\n");
- for (int i = 0; i < StatCnt; i++)
- Printf("%s: %16zu\n", name[i], (uptr)stat[i]);
-}
-
-#endif
-
-} // namespace __tsan
diff --git a/compiler-rt/lib/tsan/rtl/tsan_stat.h b/compiler-rt/lib/tsan/rtl/tsan_stat.h
deleted file mode 100644
index 8b26a59bb2ed..000000000000
--- a/compiler-rt/lib/tsan/rtl/tsan_stat.h
+++ /dev/null
@@ -1,191 +0,0 @@
-//===-- tsan_stat.h ---------------------------------------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-//
-// This file is a part of ThreadSanitizer (TSan), a race detector.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef TSAN_STAT_H
-#define TSAN_STAT_H
-
-namespace __tsan {
-
-enum StatType {
- // Memory access processing related stuff.
- StatMop,
- StatMopRead,
- StatMopWrite,
- StatMop1, // These must be consequtive.
- StatMop2,
- StatMop4,
- StatMop8,
- StatMopSame,
- StatMopIgnored,
- StatMopRange,
- StatMopRodata,
- StatMopRangeRodata,
- StatShadowProcessed,
- StatShadowZero,
- StatShadowNonZero, // Derived.
- StatShadowSameSize,
- StatShadowIntersect,
- StatShadowNotIntersect,
- StatShadowSameThread,
- StatShadowAnotherThread,
- StatShadowReplace,
-
- // Func processing.
- StatFuncEnter,
- StatFuncExit,
-
- // Trace processing.
- StatEvents,
-
- // Threads.
- StatThreadCreate,
- StatThreadFinish,
- StatThreadReuse,
- StatThreadMaxTid,
- StatThreadMaxAlive,
-
- // Mutexes.
- StatMutexCreate,
- StatMutexDestroy,
- StatMutexLock,
- StatMutexUnlock,
- StatMutexRecLock,
- StatMutexRecUnlock,
- StatMutexReadLock,
- StatMutexReadUnlock,
-
- // Synchronization.
- StatSyncCreated,
- StatSyncDestroyed,
- StatSyncAcquire,
- StatSyncRelease,
- StatSyncReleaseStoreAcquire,
-
- // Clocks - acquire.
- StatClockAcquire,
- StatClockAcquireEmpty,
- StatClockAcquireFastRelease,
- StatClockAcquireFull,
- StatClockAcquiredSomething,
- // Clocks - release.
- StatClockRelease,
- StatClockReleaseResize,
- StatClockReleaseFast,
- StatClockReleaseSlow,
- StatClockReleaseFull,
- StatClockReleaseAcquired,
- StatClockReleaseClearTail,
- // Clocks - release store.
- StatClockStore,
- StatClockStoreResize,
- StatClockStoreFast,
- StatClockStoreFull,
- StatClockStoreTail,
- // Clocks - acquire-release.
- StatClockAcquireRelease,
-
- // Atomics.
- StatAtomic,
- StatAtomicLoad,
- StatAtomicStore,
- StatAtomicExchange,
- StatAtomicFetchAdd,
- StatAtomicFetchSub,
- StatAtomicFetchAnd,
- StatAtomicFetchOr,
- StatAtomicFetchXor,
- StatAtomicFetchNand,
- StatAtomicCAS,
- StatAtomicFence,
- StatAtomicRelaxed,
- StatAtomicConsume,
- StatAtomicAcquire,
- StatAtomicRelease,
- StatAtomicAcq_Rel,
- StatAtomicSeq_Cst,
- StatAtomic1,
- StatAtomic2,
- StatAtomic4,
- StatAtomic8,
- StatAtomic16,
-
- // Dynamic annotations.
- StatAnnotation,
- StatAnnotateHappensBefore,
- StatAnnotateHappensAfter,
- StatAnnotateCondVarSignal,
- StatAnnotateCondVarSignalAll,
- StatAnnotateMutexIsNotPHB,
- StatAnnotateCondVarWait,
- StatAnnotateRWLockCreate,
- StatAnnotateRWLockCreateStatic,
- StatAnnotateRWLockDestroy,
- StatAnnotateRWLockAcquired,
- StatAnnotateRWLockReleased,
- StatAnnotateTraceMemory,
- StatAnnotateFlushState,
- StatAnnotateNewMemory,
- StatAnnotateNoOp,
- StatAnnotateFlushExpectedRaces,
- StatAnnotateEnableRaceDetection,
- StatAnnotateMutexIsUsedAsCondVar,
- StatAnnotatePCQGet,
- StatAnnotatePCQPut,
- StatAnnotatePCQDestroy,
- StatAnnotatePCQCreate,
- StatAnnotateExpectRace,
- StatAnnotateBenignRaceSized,
- StatAnnotateBenignRace,
- StatAnnotateIgnoreReadsBegin,
- StatAnnotateIgnoreReadsEnd,
- StatAnnotateIgnoreWritesBegin,
- StatAnnotateIgnoreWritesEnd,
- StatAnnotateIgnoreSyncBegin,
- StatAnnotateIgnoreSyncEnd,
- StatAnnotatePublishMemoryRange,
- StatAnnotateUnpublishMemoryRange,
- StatAnnotateThreadName,
- Stat__tsan_mutex_create,
- Stat__tsan_mutex_destroy,
- Stat__tsan_mutex_pre_lock,
- Stat__tsan_mutex_post_lock,
- Stat__tsan_mutex_pre_unlock,
- Stat__tsan_mutex_post_unlock,
- Stat__tsan_mutex_pre_signal,
- Stat__tsan_mutex_post_signal,
- Stat__tsan_mutex_pre_divert,
- Stat__tsan_mutex_post_divert,
-
- // Internal mutex contentionz.
- StatMtxTotal,
- StatMtxTrace,
- StatMtxThreads,
- StatMtxReport,
- StatMtxSyncVar,
- StatMtxSyncTab,
- StatMtxSlab,
- StatMtxAnnotations,
- StatMtxAtExit,
- StatMtxMBlock,
- StatMtxDeadlockDetector,
- StatMtxFired,
- StatMtxRacy,
- StatMtxFD,
- StatMtxGlobalProc,
-
- // This must be the last.
- StatCnt
-};
-
-} // namespace __tsan
-
-#endif // TSAN_STAT_H
diff --git a/compiler-rt/lib/tsan/rtl/tsan_sync.cpp b/compiler-rt/lib/tsan/rtl/tsan_sync.cpp
index 17ddd50f1284..5e226b2d12b1 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_sync.cpp
+++ b/compiler-rt/lib/tsan/rtl/tsan_sync.cpp
@@ -18,10 +18,7 @@ namespace __tsan {
void DDMutexInit(ThreadState *thr, uptr pc, SyncVar *s);
-SyncVar::SyncVar()
- : mtx(MutexTypeSyncVar, StatMtxSyncVar) {
- Reset(0);
-}
+SyncVar::SyncVar() : mtx(MutexTypeSyncVar) { Reset(0); }
void SyncVar::Init(ThreadState *thr, uptr pc, uptr addr, u64 uid) {
this->addr = addr;
@@ -53,8 +50,8 @@ void SyncVar::Reset(Processor *proc) {
}
MetaMap::MetaMap()
- : block_alloc_("heap block allocator")
- , sync_alloc_("sync allocator") {
+ : block_alloc_(LINKER_INITIALIZED, "heap block allocator"),
+ sync_alloc_(LINKER_INITIALIZED, "sync allocator") {
atomic_store(&uid_gen_, 0, memory_order_relaxed);
}
@@ -202,8 +199,8 @@ SyncVar* MetaMap::GetIfExistsAndLock(uptr addr, bool write_lock) {
return GetAndLock(0, 0, addr, write_lock, false);
}
-SyncVar* MetaMap::GetAndLock(ThreadState *thr, uptr pc,
- uptr addr, bool write_lock, bool create) {
+SyncVar *MetaMap::GetAndLock(ThreadState *thr, uptr pc, uptr addr, bool write_lock,
+ bool create) NO_THREAD_SAFETY_ANALYSIS {
u32 *meta = MemToMeta(addr);
u32 idx0 = *meta;
u32 myidx = 0;
diff --git a/compiler-rt/lib/tsan/rtl/tsan_sync.h b/compiler-rt/lib/tsan/rtl/tsan_sync.h
index 47f2739d8de5..324aa1b0cea1 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_sync.h
+++ b/compiler-rt/lib/tsan/rtl/tsan_sync.h
@@ -17,7 +17,6 @@
#include "sanitizer_common/sanitizer_deadlock_detector_interface.h"
#include "tsan_defs.h"
#include "tsan_clock.h"
-#include "tsan_mutex.h"
#include "tsan_dense_alloc.h"
namespace __tsan {
@@ -50,13 +49,11 @@ enum MutexFlags {
struct SyncVar {
SyncVar();
- static const int kInvalidTid = -1;
-
uptr addr; // overwritten by DenseSlabAlloc freelist
Mutex mtx;
u64 uid; // Globally unique id.
u32 creation_stack_id;
- int owner_tid; // Set only by exclusive owners.
+ u32 owner_tid; // Set only by exclusive owners.
u64 last_lock;
int recursion;
atomic_uint32_t flags;
@@ -130,8 +127,8 @@ class MetaMap {
static const u32 kFlagMask = 3u << 30;
static const u32 kFlagBlock = 1u << 30;
static const u32 kFlagSync = 2u << 30;
- typedef DenseSlabAlloc<MBlock, 1<<16, 1<<12> BlockAlloc;
- typedef DenseSlabAlloc<SyncVar, 1<<16, 1<<10> SyncAlloc;
+ typedef DenseSlabAlloc<MBlock, 1 << 18, 1 << 12, kFlagMask> BlockAlloc;
+ typedef DenseSlabAlloc<SyncVar, 1 << 20, 1 << 10, kFlagMask> SyncAlloc;
BlockAlloc block_alloc_;
SyncAlloc sync_alloc_;
atomic_uint64_t uid_gen_;
diff --git a/compiler-rt/lib/tsan/rtl/tsan_trace.h b/compiler-rt/lib/tsan/rtl/tsan_trace.h
index fbd0f72db6e7..f5e0c407cda8 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_trace.h
+++ b/compiler-rt/lib/tsan/rtl/tsan_trace.h
@@ -13,7 +13,6 @@
#define TSAN_TRACE_H
#include "tsan_defs.h"
-#include "tsan_mutex.h"
#include "tsan_stack_trace.h"
#include "tsan_mutexset.h"
@@ -65,9 +64,7 @@ struct Trace {
// CreateThreadContext.
TraceHeader headers[kTraceParts];
- Trace()
- : mtx(MutexTypeTrace, StatMtxTrace) {
- }
+ Trace() : mtx(MutexTypeTrace) {}
};
} // namespace __tsan
diff --git a/compiler-rt/lib/tsan/rtl/tsan_update_shadow_word_inl.h b/compiler-rt/lib/tsan/rtl/tsan_update_shadow_word_inl.h
index 056c3aa20320..d23dfb0ba061 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_update_shadow_word_inl.h
+++ b/compiler-rt/lib/tsan/rtl/tsan_update_shadow_word_inl.h
@@ -13,12 +13,10 @@
// produce sligtly less efficient code.
//===----------------------------------------------------------------------===//
do {
- StatInc(thr, StatShadowProcessed);
const unsigned kAccessSize = 1 << kAccessSizeLog;
u64 *sp = &shadow_mem[idx];
old = LoadShadow(sp);
if (LIKELY(old.IsZero())) {
- StatInc(thr, StatShadowZero);
if (!stored) {
StoreIfNotYetStored(sp, &store_word);
stored = true;
@@ -27,17 +25,14 @@ do {
}
// is the memory access equal to the previous?
if (LIKELY(Shadow::Addr0AndSizeAreEqual(cur, old))) {
- StatInc(thr, StatShadowSameSize);
// same thread?
if (LIKELY(Shadow::TidsAreEqual(old, cur))) {
- StatInc(thr, StatShadowSameThread);
if (LIKELY(old.IsRWWeakerOrEqual(kAccessIsWrite, kIsAtomic))) {
StoreIfNotYetStored(sp, &store_word);
stored = true;
}
break;
}
- StatInc(thr, StatShadowAnotherThread);
if (HappensBefore(old, thr)) {
if (old.IsRWWeakerOrEqual(kAccessIsWrite, kIsAtomic)) {
StoreIfNotYetStored(sp, &store_word);
@@ -51,12 +46,8 @@ do {
}
// Do the memory access intersect?
if (Shadow::TwoRangesIntersect(old, cur, kAccessSize)) {
- StatInc(thr, StatShadowIntersect);
- if (Shadow::TidsAreEqual(old, cur)) {
- StatInc(thr, StatShadowSameThread);
+ if (Shadow::TidsAreEqual(old, cur))
break;
- }
- StatInc(thr, StatShadowAnotherThread);
if (old.IsBothReadsOrAtomic(kAccessIsWrite, kIsAtomic))
break;
if (LIKELY(HappensBefore(old, thr)))
@@ -64,6 +55,5 @@ do {
goto RACE;
}
// The accesses do not intersect.
- StatInc(thr, StatShadowNotIntersect);
break;
} while (0);
diff --git a/compiler-rt/lib/ubsan/ubsan_diag.cpp b/compiler-rt/lib/ubsan/ubsan_diag.cpp
index 1b2828d236d6..ef2e495cac8e 100644
--- a/compiler-rt/lib/ubsan/ubsan_diag.cpp
+++ b/compiler-rt/lib/ubsan/ubsan_diag.cpp
@@ -278,7 +278,7 @@ static void PrintMemorySnippet(const Decorator &Decor, MemoryLocation Loc,
}
// Emit data.
- InternalScopedString Buffer(1024);
+ InternalScopedString Buffer;
for (uptr P = Min; P != Max; ++P) {
unsigned char C = *reinterpret_cast<const unsigned char*>(P);
Buffer.append("%s%02x", (P % 8 == 0) ? " " : " ", C);
@@ -346,7 +346,7 @@ Diag::~Diag() {
// All diagnostics should be printed under report mutex.
ScopedReport::CheckLocked();
Decorator Decor;
- InternalScopedString Buffer(1024);
+ InternalScopedString Buffer;
// Prepare a report that a monitor process can inspect.
if (Level == DL_Error) {
@@ -388,6 +388,10 @@ ScopedReport::ScopedReport(ReportOptions Opts, Location SummaryLoc,
ScopedReport::~ScopedReport() {
MaybePrintStackTrace(Opts.pc, Opts.bp);
MaybeReportErrorSummary(SummaryLoc, Type);
+
+ if (common_flags()->print_module_map >= 2)
+ DumpProcessMap();
+
if (flags()->halt_on_error)
Die();
}
diff --git a/compiler-rt/lib/ubsan/ubsan_diag_standalone.cpp b/compiler-rt/lib/ubsan/ubsan_diag_standalone.cpp
index 300179adae28..5526ae051650 100644
--- a/compiler-rt/lib/ubsan/ubsan_diag_standalone.cpp
+++ b/compiler-rt/lib/ubsan/ubsan_diag_standalone.cpp
@@ -20,11 +20,9 @@ void __sanitizer::BufferedStackTrace::UnwindImpl(
uptr pc, uptr bp, void *context, bool request_fast, u32 max_depth) {
uptr top = 0;
uptr bottom = 0;
- if (StackTrace::WillUseFastUnwind(request_fast)) {
- GetThreadStackTopAndBottom(false, &top, &bottom);
- Unwind(max_depth, pc, bp, nullptr, top, bottom, true);
- } else
- Unwind(max_depth, pc, bp, context, 0, 0, false);
+ GetThreadStackTopAndBottom(false, &top, &bottom);
+ bool fast = StackTrace::WillUseFastUnwind(request_fast);
+ Unwind(max_depth, pc, bp, context, top, bottom, fast);
}
extern "C" {
diff --git a/compiler-rt/lib/ubsan/ubsan_init.cpp b/compiler-rt/lib/ubsan/ubsan_init.cpp
index e0be5a72ec42..9931d85bf40c 100644
--- a/compiler-rt/lib/ubsan/ubsan_init.cpp
+++ b/compiler-rt/lib/ubsan/ubsan_init.cpp
@@ -33,6 +33,11 @@ static void CommonInit() {
InitializeSuppressions();
}
+static void UbsanDie() {
+ if (common_flags()->print_module_map >= 1)
+ DumpProcessMap();
+}
+
static void CommonStandaloneInit() {
SanitizerToolName = GetSanititizerToolName();
CacheBinaryName();
@@ -42,6 +47,10 @@ static void CommonStandaloneInit() {
AndroidLogInit();
InitializeCoverage(common_flags()->coverage, common_flags()->coverage_dir);
CommonInit();
+
+ // Only add die callback when running in standalone mode to avoid printing
+ // the same information from multiple sanitizers' output
+ AddDieCallback(UbsanDie);
Symbolizer::LateInitialize();
}
diff --git a/compiler-rt/lib/ubsan/ubsan_monitor.cpp b/compiler-rt/lib/ubsan/ubsan_monitor.cpp
index d064e95f76f7..69dd986f9bdf 100644
--- a/compiler-rt/lib/ubsan/ubsan_monitor.cpp
+++ b/compiler-rt/lib/ubsan/ubsan_monitor.cpp
@@ -17,7 +17,7 @@ using namespace __ubsan;
UndefinedBehaviorReport::UndefinedBehaviorReport(const char *IssueKind,
Location &Loc,
InternalScopedString &Msg)
- : IssueKind(IssueKind), Loc(Loc), Buffer(Msg.length() + 1) {
+ : IssueKind(IssueKind), Loc(Loc) {
// We have the common sanitizer reporting lock, so it's safe to register a
// new UB report.
RegisterUndefinedBehaviorReport(this);
@@ -52,9 +52,9 @@ void __ubsan::__ubsan_get_current_report_data(const char **OutIssueKind,
// Ensure that the first character of the diagnostic text can't start with a
// lowercase letter.
- char FirstChar = Buf.data()[0];
+ char FirstChar = *Buf.data();
if (FirstChar >= 'a' && FirstChar <= 'z')
- Buf.data()[0] = FirstChar - 'a' + 'A';
+ *Buf.data() += 'A' - 'a';
*OutIssueKind = CurrentUBR->IssueKind;
*OutMessage = Buf.data();
diff --git a/compiler-rt/lib/ubsan/ubsan_platform.h b/compiler-rt/lib/ubsan/ubsan_platform.h
index 32d949d75b9c..d2cc2e10bd2f 100644
--- a/compiler-rt/lib/ubsan/ubsan_platform.h
+++ b/compiler-rt/lib/ubsan/ubsan_platform.h
@@ -14,10 +14,10 @@
// Other platforms should be easy to add, and probably work as-is.
#if defined(__linux__) || defined(__FreeBSD__) || defined(__APPLE__) || \
- defined(__NetBSD__) || \
- (defined(__sun__) && defined(__svr4__)) || \
- defined(_WIN32) || defined(__Fuchsia__) || defined(__rtems__)
-# define CAN_SANITIZE_UB 1
+ defined(__NetBSD__) || defined(__DragonFly__) || \
+ (defined(__sun__) && defined(__svr4__)) || defined(_WIN32) || \
+ defined(__Fuchsia__)
+#define CAN_SANITIZE_UB 1
#else
# define CAN_SANITIZE_UB 0
#endif
diff --git a/compiler-rt/lib/ubsan/ubsan_value.cpp b/compiler-rt/lib/ubsan/ubsan_value.cpp
index 79c3ba991d39..40042bf3a903 100644
--- a/compiler-rt/lib/ubsan/ubsan_value.cpp
+++ b/compiler-rt/lib/ubsan/ubsan_value.cpp
@@ -74,7 +74,7 @@ SIntMax Value::getSIntValue() const {
// to SIntMax.
const unsigned ExtraBits =
sizeof(SIntMax) * 8 - getType().getIntegerBitWidth();
- return SIntMax(Val) << ExtraBits >> ExtraBits;
+ return SIntMax(UIntMax(Val) << ExtraBits) >> ExtraBits;
}
if (getType().getIntegerBitWidth() == 64)
return *reinterpret_cast<s64*>(Val);
diff --git a/compiler-rt/lib/xray/xray_fdr_logging.cpp b/compiler-rt/lib/xray/xray_fdr_logging.cpp
index 16ce483502f0..799814f437f9 100644
--- a/compiler-rt/lib/xray/xray_fdr_logging.cpp
+++ b/compiler-rt/lib/xray/xray_fdr_logging.cpp
@@ -284,13 +284,12 @@ XRayLogFlushStatus fdrLoggingFlush() XRAY_NEVER_INSTRUMENT {
return XRayLogFlushStatus::XRAY_LOG_NOT_FLUSHING;
}
- s32 Result = XRayLogFlushStatus::XRAY_LOG_NOT_FLUSHING;
- if (!atomic_compare_exchange_strong(&LogFlushStatus, &Result,
- XRayLogFlushStatus::XRAY_LOG_FLUSHING,
- memory_order_release)) {
+ if (atomic_exchange(&LogFlushStatus, XRayLogFlushStatus::XRAY_LOG_FLUSHING,
+ memory_order_release) ==
+ XRayLogFlushStatus::XRAY_LOG_FLUSHING) {
if (Verbosity())
- Report("Not flushing log, implementation is still finalizing.\n");
- return static_cast<XRayLogFlushStatus>(Result);
+ Report("Not flushing log, implementation is still flushing.\n");
+ return XRayLogFlushStatus::XRAY_LOG_NOT_FLUSHING;
}
if (BQ == nullptr) {
diff --git a/compiler-rt/lib/xray/xray_trampoline_x86_64.S b/compiler-rt/lib/xray/xray_trampoline_x86_64.S
index 12c5a6ccd9a4..02cf69f766c4 100644
--- a/compiler-rt/lib/xray/xray_trampoline_x86_64.S
+++ b/compiler-rt/lib/xray/xray_trampoline_x86_64.S
@@ -15,12 +15,37 @@
#include "../builtins/assembly.h"
#include "../sanitizer_common/sanitizer_asm.h"
+// XRay trampolines which are not produced by intrinsics are not System V AMD64
+// ABI compliant because they are called with a stack that is always misaligned
+// by 8 bytes with respect to a 16 bytes alignment. This is because they are
+// called immediately after the call to, or immediately before returning from,
+// the function being instrumented. This saves space in the patch point, but
+// misaligns the stack by 8 bytes.
+
+.macro ALIGN_STACK_16B
+#if defined(__APPLE__)
+ subq $$8, %rsp
+#else
+ subq $8, %rsp
+#endif
+ CFI_ADJUST_CFA_OFFSET(8)
+.endm
+.macro RESTORE_STACK_ALIGNMENT
+#if defined(__APPLE__)
+ addq $$8, %rsp
+#else
+ addq $8, %rsp
+#endif
+ CFI_ADJUST_CFA_OFFSET(-8)
+.endm
+// This macro should keep the stack aligned to 16 bytes.
.macro SAVE_REGISTERS
pushfq
+ CFI_ADJUST_CFA_OFFSET(8)
subq $240, %rsp
- CFI_DEF_CFA_OFFSET(248)
+ CFI_ADJUST_CFA_OFFSET(240)
movq %rbp, 232(%rsp)
movupd %xmm0, 216(%rsp)
movupd %xmm1, 200(%rsp)
@@ -45,6 +70,7 @@
movq %r15, 0(%rsp)
.endm
+// This macro should keep the stack aligned to 16 bytes.
.macro RESTORE_REGISTERS
movq 232(%rsp), %rbp
movupd 216(%rsp), %xmm0
@@ -69,22 +95,9 @@
movq 8(%rsp), %r14
movq 0(%rsp), %r15
addq $240, %rsp
+ CFI_ADJUST_CFA_OFFSET(-240)
popfq
- CFI_DEF_CFA_OFFSET(8)
-.endm
-
-.macro ALIGNED_CALL_RAX
- // Call the logging handler, after aligning the stack to a 16-byte boundary.
- // The approach we're taking here uses additional stack space to stash the
- // stack pointer twice before aligning the pointer to 16-bytes. If the stack
- // was 8-byte aligned, it will become 16-byte aligned -- when restoring the
- // pointer, we can always look -8 bytes from the current position to get
- // either of the values we've stashed in the first place.
- pushq %rsp
- pushq (%rsp)
- andq $-0x10, %rsp
- callq *%rax
- movq 8(%rsp), %rsp
+ CFI_ADJUST_CFA_OFFSET(-8)
.endm
.text
@@ -104,6 +117,7 @@
# LLVM-MCA-BEGIN __xray_FunctionEntry
ASM_SYMBOL(__xray_FunctionEntry):
CFI_STARTPROC
+ ALIGN_STACK_16B
SAVE_REGISTERS
// This load has to be atomic, it's concurrent with __xray_patch().
@@ -115,10 +129,11 @@ ASM_SYMBOL(__xray_FunctionEntry):
// The patched function prologue puts its xray_instr_map index into %r10d.
movl %r10d, %edi
xor %esi,%esi
- ALIGNED_CALL_RAX
+ callq *%rax
.Ltmp0:
RESTORE_REGISTERS
+ RESTORE_STACK_ALIGNMENT
retq
# LLVM-MCA-END
ASM_SIZE(__xray_FunctionEntry)
@@ -133,11 +148,13 @@ ASM_SYMBOL(__xray_FunctionEntry):
# LLVM-MCA-BEGIN __xray_FunctionExit
ASM_SYMBOL(__xray_FunctionExit):
CFI_STARTPROC
+ ALIGN_STACK_16B
+
// Save the important registers first. Since we're assuming that this
// function is only jumped into, we only preserve the registers for
// returning.
- subq $56, %rsp
- CFI_DEF_CFA_OFFSET(64)
+ subq $64, %rsp
+ CFI_ADJUST_CFA_OFFSET(64)
movq %rbp, 48(%rsp)
movupd %xmm0, 32(%rsp)
movupd %xmm1, 16(%rsp)
@@ -149,7 +166,7 @@ ASM_SYMBOL(__xray_FunctionExit):
movl %r10d, %edi
movl $1, %esi
- ALIGNED_CALL_RAX
+ callq *%rax
.Ltmp2:
// Restore the important registers.
@@ -158,8 +175,10 @@ ASM_SYMBOL(__xray_FunctionExit):
movupd 16(%rsp), %xmm1
movq 8(%rsp), %rax
movq 0(%rsp), %rdx
- addq $56, %rsp
- CFI_DEF_CFA_OFFSET(8)
+ addq $64, %rsp
+ CFI_ADJUST_CFA_OFFSET(-64)
+
+ RESTORE_STACK_ALIGNMENT
retq
# LLVM-MCA-END
ASM_SIZE(__xray_FunctionExit)
@@ -174,6 +193,7 @@ ASM_SYMBOL(__xray_FunctionExit):
# LLVM-MCA-BEGIN __xray_FunctionTailExit
ASM_SYMBOL(__xray_FunctionTailExit):
CFI_STARTPROC
+ ALIGN_STACK_16B
SAVE_REGISTERS
movq ASM_SYMBOL(_ZN6__xray19XRayPatchedFunctionE)(%rip), %rax
@@ -182,11 +202,11 @@ ASM_SYMBOL(__xray_FunctionTailExit):
movl %r10d, %edi
movl $2, %esi
-
- ALIGNED_CALL_RAX
+ callq *%rax
.Ltmp4:
RESTORE_REGISTERS
+ RESTORE_STACK_ALIGNMENT
retq
# LLVM-MCA-END
ASM_SIZE(__xray_FunctionTailExit)
@@ -201,6 +221,7 @@ ASM_SYMBOL(__xray_FunctionTailExit):
# LLVM-MCA-BEGIN __xray_ArgLoggerEntry
ASM_SYMBOL(__xray_ArgLoggerEntry):
CFI_STARTPROC
+ ALIGN_STACK_16B
SAVE_REGISTERS
// Again, these function pointer loads must be atomic; MOV is fine.
@@ -223,10 +244,12 @@ ASM_SYMBOL(__xray_ArgLoggerEntry):
// 32-bit function ID becomes the first
movl %r10d, %edi
- ALIGNED_CALL_RAX
+
+ callq *%rax
.Larg1entryFail:
RESTORE_REGISTERS
+ RESTORE_STACK_ALIGNMENT
retq
# LLVM-MCA-END
ASM_SIZE(__xray_ArgLoggerEntry)
@@ -249,7 +272,7 @@ ASM_SYMBOL(__xray_CustomEvent):
testq %rax,%rax
je .LcustomEventCleanup
- ALIGNED_CALL_RAX
+ callq *%rax
.LcustomEventCleanup:
RESTORE_REGISTERS
@@ -275,7 +298,7 @@ ASM_SYMBOL(__xray_TypedEvent):
testq %rax,%rax
je .LtypedEventCleanup
- ALIGNED_CALL_RAX
+ callq *%rax
.LtypedEventCleanup:
RESTORE_REGISTERS
diff --git a/compiler-rt/lib/xray/xray_utils.cpp b/compiler-rt/lib/xray/xray_utils.cpp
index 4c8ad5b92be7..befbabfe4532 100644
--- a/compiler-rt/lib/xray/xray_utils.cpp
+++ b/compiler-rt/lib/xray/xray_utils.cpp
@@ -20,6 +20,7 @@
#include <errno.h>
#include <fcntl.h>
#include <iterator>
+#include <new>
#include <stdlib.h>
#include <sys/types.h>
#include <tuple>