aboutsummaryrefslogtreecommitdiff
path: root/contrib/llvm-project/compiler-rt/lib
diff options
context:
space:
mode:
Diffstat (limited to 'contrib/llvm-project/compiler-rt/lib')
-rw-r--r--contrib/llvm-project/compiler-rt/lib/asan/asan_activation.cpp2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/asan/asan_allocator.cpp292
-rw-r--r--contrib/llvm-project/compiler-rt/lib/asan/asan_allocator.h94
-rw-r--r--contrib/llvm-project/compiler-rt/lib/asan/asan_debugging.cpp9
-rw-r--r--contrib/llvm-project/compiler-rt/lib/asan/asan_descriptions.cpp77
-rw-r--r--contrib/llvm-project/compiler-rt/lib/asan/asan_errors.cpp96
-rw-r--r--contrib/llvm-project/compiler-rt/lib/asan/asan_errors.h73
-rw-r--r--contrib/llvm-project/compiler-rt/lib/asan/asan_fake_stack.cpp62
-rw-r--r--contrib/llvm-project/compiler-rt/lib/asan/asan_flags.cpp8
-rw-r--r--contrib/llvm-project/compiler-rt/lib/asan/asan_flags.inc11
-rw-r--r--contrib/llvm-project/compiler-rt/lib/asan/asan_fuchsia.cpp69
-rw-r--r--contrib/llvm-project/compiler-rt/lib/asan/asan_globals.cpp94
-rw-r--r--contrib/llvm-project/compiler-rt/lib/asan/asan_interceptors.cpp442
-rw-r--r--contrib/llvm-project/compiler-rt/lib/asan/asan_interceptors.h75
-rw-r--r--contrib/llvm-project/compiler-rt/lib/asan/asan_interceptors_memintrinsics.cpp63
-rw-r--r--contrib/llvm-project/compiler-rt/lib/asan/asan_interceptors_memintrinsics.h127
-rw-r--r--contrib/llvm-project/compiler-rt/lib/asan/asan_interceptors_vfork.S1
-rw-r--r--contrib/llvm-project/compiler-rt/lib/asan/asan_interface.inc8
-rw-r--r--contrib/llvm-project/compiler-rt/lib/asan/asan_interface_internal.h20
-rw-r--r--contrib/llvm-project/compiler-rt/lib/asan/asan_internal.h46
-rw-r--r--contrib/llvm-project/compiler-rt/lib/asan/asan_linux.cpp199
-rw-r--r--contrib/llvm-project/compiler-rt/lib/asan/asan_lock.h0
-rw-r--r--contrib/llvm-project/compiler-rt/lib/asan/asan_mac.cpp65
-rw-r--r--contrib/llvm-project/compiler-rt/lib/asan/asan_malloc_linux.cpp120
-rw-r--r--contrib/llvm-project/compiler-rt/lib/asan/asan_malloc_mac.cpp82
-rw-r--r--contrib/llvm-project/compiler-rt/lib/asan/asan_malloc_win.cpp18
-rw-r--r--contrib/llvm-project/compiler-rt/lib/asan/asan_mapping.h217
-rw-r--r--contrib/llvm-project/compiler-rt/lib/asan/asan_mapping_sparc64.h28
-rw-r--r--contrib/llvm-project/compiler-rt/lib/asan/asan_memory_profile.cpp19
-rw-r--r--contrib/llvm-project/compiler-rt/lib/asan/asan_new_delete.cpp21
-rw-r--r--contrib/llvm-project/compiler-rt/lib/asan/asan_poisoning.cpp363
-rw-r--r--contrib/llvm-project/compiler-rt/lib/asan/asan_poisoning.h11
-rw-r--r--contrib/llvm-project/compiler-rt/lib/asan/asan_posix.cpp86
-rw-r--r--contrib/llvm-project/compiler-rt/lib/asan/asan_premap_shadow.cpp2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/asan/asan_report.cpp53
-rw-r--r--contrib/llvm-project/compiler-rt/lib/asan/asan_report.h7
-rw-r--r--contrib/llvm-project/compiler-rt/lib/asan/asan_rtl.cpp159
-rw-r--r--contrib/llvm-project/compiler-rt/lib/asan/asan_rtl_static.cpp36
-rw-r--r--contrib/llvm-project/compiler-rt/lib/asan/asan_rtl_x86_64.S150
-rw-r--r--contrib/llvm-project/compiler-rt/lib/asan/asan_shadow_setup.cpp4
-rw-r--r--contrib/llvm-project/compiler-rt/lib/asan/asan_stack.cpp2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/asan/asan_stack.h32
-rw-r--r--contrib/llvm-project/compiler-rt/lib/asan/asan_stats.cpp18
-rw-r--r--contrib/llvm-project/compiler-rt/lib/asan/asan_suppressions.cpp7
-rw-r--r--contrib/llvm-project/compiler-rt/lib/asan/asan_thread.cpp234
-rw-r--r--contrib/llvm-project/compiler-rt/lib/asan/asan_thread.h42
-rw-r--r--contrib/llvm-project/compiler-rt/lib/asan/asan_win.cpp76
-rw-r--r--contrib/llvm-project/compiler-rt/lib/asan/asan_win_dll_thunk.cpp13
-rw-r--r--contrib/llvm-project/compiler-rt/lib/asan/weak_symbols.txt7
-rw-r--r--contrib/llvm-project/compiler-rt/lib/asan_abi/asan_abi.cpp89
-rw-r--r--contrib/llvm-project/compiler-rt/lib/asan_abi/asan_abi.h91
-rw-r--r--contrib/llvm-project/compiler-rt/lib/asan_abi/asan_abi_shim.cpp481
-rw-r--r--contrib/llvm-project/compiler-rt/lib/asan_abi/asan_abi_tbd.txt22
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/README.txt41
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/aarch64/fp_mode.c4
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/aarch64/lse.S40
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/aarch64/sme-abi-init.c52
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/aarch64/sme-abi.S176
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/aarch64/sme-libc-routines.c87
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/absvdi2.c2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/absvsi2.c2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/absvti2.c2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/addtf3.c2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/apple_versioning.c4
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/arm/aeabi_cdcmp.S4
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/arm/aeabi_cfcmp.S4
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/arm/divsi3.S3
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/arm/fp_mode.c4
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/arm/sync-ops.h22
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/arm/truncdfsf2vfp.S4
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/arm/udivsi3.S5
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/ashldi3.c3
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/ashlti3.c5
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/ashrdi3.c3
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/ashrti3.c5
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/assembly.h13
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/atomic.c20
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/avr/divmodhi4.S57
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/avr/divmodqi4.S44
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/avr/exit.S18
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/avr/mulhi3.S71
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/avr/mulqi3.S53
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/avr/udivmodhi4.S49
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/avr/udivmodqi4.S39
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/clear_cache.c40
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/comparetf2.c2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/cpu_model/aarch64.c152
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/cpu_model/aarch64/fmv/android.inc36
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/cpu_model/aarch64/fmv/apple.inc71
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/cpu_model/aarch64/fmv/freebsd.inc27
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/cpu_model/aarch64/fmv/fuchsia.inc51
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/cpu_model/aarch64/fmv/mrs.inc198
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/cpu_model/aarch64/fmv/sysauxv.inc21
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/cpu_model/aarch64/fmv/unimplemented.inc8
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/cpu_model/aarch64/hwcap.inc183
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/cpu_model/aarch64/lse_atomics/android.inc28
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/cpu_model/aarch64/lse_atomics/freebsd.inc5
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/cpu_model/aarch64/lse_atomics/fuchsia.inc12
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/cpu_model/aarch64/lse_atomics/sysauxv.inc6
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/cpu_model/cpu_model.h41
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/cpu_model/x86.c (renamed from contrib/llvm-project/compiler-rt/lib/builtins/cpu_model.c)324
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/crtbegin.c (renamed from contrib/llvm-project/compiler-rt/lib/crt/crtbegin.c)52
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/crtend.c (renamed from contrib/llvm-project/compiler-rt/lib/crt/crtend.c)0
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/divmoddi4.c4
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/divmodsi4.c4
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/divmodti4.c4
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/divtc3.c51
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/divtf3.c2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/divxc3.c8
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/emutls.c31
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/eprintf.c2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/extenddftf2.c6
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/extendhftf2.c7
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/extendsftf2.c6
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/extendxftf2.c24
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/fixdfdi.c4
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/fixsfdi.c4
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/fixtfdi.c2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/fixtfsi.c2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/fixtfti.c2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/fixunsdfdi.c4
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/fixunssfdi.c4
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/fixunstfdi.c2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/fixunstfsi.c2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/fixunstfti.c2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/fixunsxfdi.c6
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/fixunsxfsi.c6
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/fixunsxfti.c4
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/fixxfdi.c6
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/fixxfti.c4
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/floatdidf.c56
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/floatdisf.c55
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/floatditf.c2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/floatdixf.c4
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/floatsidf.c7
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/floatsisf.c13
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/floatsitf.c10
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/floattidf.c52
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/floattisf.c51
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/floattitf.c55
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/floattixf.c4
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/floatundidf.c53
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/floatundisf.c52
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/floatunditf.c2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/floatundixf.c4
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/floatunsisf.c4
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/floatunsitf.c6
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/floatuntidf.c49
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/floatuntisf.c48
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/floatuntitf.c52
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/floatuntixf.c4
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/fp_compare_impl.inc3
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/fp_extend.h105
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/fp_extend_impl.inc83
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/fp_fixint_impl.inc2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/fp_lib.h94
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/fp_mode.c4
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/fp_mode.h4
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/fp_trunc.h91
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/fp_trunc_impl.inc119
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/gcc_personality_v0.c4
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/i386/chkstk.S22
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/i386/chkstk2.S41
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/i386/floatdixf.S2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/i386/floatundixf.S2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/i386/fp_mode.c4
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/int_endianness.h2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/int_lib.h12
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/int_math.h10
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/int_mulo_impl.inc4
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/int_mulv_impl.inc2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/int_to_fp.h82
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/int_to_fp_impl.inc72
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/int_types.h88
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/loongarch/fp_mode.c59
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/lshrti3.c2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/mingw_fixfloat.c34
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/mulodi4.c1
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/mulosi4.c1
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/muloti4.c1
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/multc3.c53
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/multf3.c2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/mulvdi3.c1
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/mulvsi3.c1
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/mulvti3.c1
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/mulxc3.c12
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/negdi2.c2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/negti2.c2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/negvdi2.c3
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/negvsi2.c3
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/negvti2.c2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/os_version_check.c12
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/powitf2.c6
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/powixf2.c4
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/riscv/fp_mode.c42
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/riscv/restore.S52
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/riscv/save.S44
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/subtf3.c2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/trampoline_setup.c4
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/truncdfbf2.c13
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/truncsfbf2.c13
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/trunctfdf2.c4
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/trunctfhf2.c7
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/trunctfsf2.c4
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/trunctfxf2.c23
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/udivmoddi4.c6
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/x86_64/chkstk2.S43
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/x86_64/floatdixf.c4
-rw-r--r--contrib/llvm-project/compiler-rt/lib/builtins/x86_64/floatundixf.S2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/cfi/cfi.cpp20
-rw-r--r--contrib/llvm-project/compiler-rt/lib/dfsan/dfsan.cpp413
-rw-r--r--contrib/llvm-project/compiler-rt/lib/dfsan/dfsan.h12
-rw-r--r--contrib/llvm-project/compiler-rt/lib/dfsan/dfsan_allocator.cpp47
-rw-r--r--contrib/llvm-project/compiler-rt/lib/dfsan/dfsan_chained_origin_depot.cpp6
-rw-r--r--contrib/llvm-project/compiler-rt/lib/dfsan/dfsan_chained_origin_depot.h3
-rw-r--r--contrib/llvm-project/compiler-rt/lib/dfsan/dfsan_custom.cpp916
-rw-r--r--contrib/llvm-project/compiler-rt/lib/dfsan/dfsan_interceptors.cpp65
-rw-r--r--contrib/llvm-project/compiler-rt/lib/dfsan/dfsan_platform.h31
-rw-r--r--contrib/llvm-project/compiler-rt/lib/dfsan/dfsan_thread.cpp30
-rw-r--r--contrib/llvm-project/compiler-rt/lib/dfsan/dfsan_thread.h9
-rw-r--r--contrib/llvm-project/compiler-rt/lib/dfsan/done_abilist.txt37
-rw-r--r--contrib/llvm-project/compiler-rt/lib/dfsan/libc_ubuntu1404_abilist.txt6
-rw-r--r--contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerBuiltinsMsvc.h3
-rw-r--r--contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerCommand.h15
-rw-r--r--contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerCorpus.h35
-rw-r--r--contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerDataFlowTrace.cpp34
-rw-r--r--contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerDataFlowTrace.h16
-rw-r--r--contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerDefs.h24
-rw-r--r--contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerDictionary.h11
-rw-r--r--contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerDriver.cpp88
-rw-r--r--contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerExtraCounters.cpp8
-rw-r--r--contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerExtraCountersDarwin.cpp22
-rw-r--r--contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerExtraCountersWindows.cpp80
-rw-r--r--contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerFlags.def11
-rw-r--r--contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerFork.cpp126
-rw-r--r--contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerFork.h4
-rw-r--r--contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerIO.cpp25
-rw-r--r--contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerIO.h13
-rw-r--r--contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerIOPosix.cpp3
-rw-r--r--contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerIOWindows.cpp8
-rw-r--r--contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerInternal.h24
-rw-r--r--contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerLoop.cpp60
-rw-r--r--contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerMerge.cpp219
-rw-r--r--contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerMerge.h38
-rw-r--r--contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerMutate.cpp8
-rw-r--r--contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerMutate.h18
-rw-r--r--contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerOptions.h1
-rw-r--r--contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerTracePC.cpp23
-rw-r--r--contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerTracePC.h2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerUtil.cpp4
-rw-r--r--contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerUtil.h9
-rw-r--r--contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerUtilDarwin.cpp5
-rw-r--r--contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerUtilFuchsia.cpp216
-rw-r--r--contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerUtilLinux.cpp12
-rw-r--r--contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerUtilPosix.cpp5
-rw-r--r--contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerUtilWindows.cpp16
-rw-r--r--contrib/llvm-project/compiler-rt/lib/gwp_asan/common.cpp4
-rw-r--r--contrib/llvm-project/compiler-rt/lib/gwp_asan/common.h68
-rw-r--r--contrib/llvm-project/compiler-rt/lib/gwp_asan/crash_handler.cpp26
-rw-r--r--contrib/llvm-project/compiler-rt/lib/gwp_asan/crash_handler.h16
-rw-r--r--contrib/llvm-project/compiler-rt/lib/gwp_asan/guarded_pool_allocator.cpp151
-rw-r--r--contrib/llvm-project/compiler-rt/lib/gwp_asan/guarded_pool_allocator.h22
-rw-r--r--contrib/llvm-project/compiler-rt/lib/gwp_asan/optional/backtrace_sanitizer_common.cpp4
-rw-r--r--contrib/llvm-project/compiler-rt/lib/gwp_asan/optional/segv_handler.h3
-rw-r--r--contrib/llvm-project/compiler-rt/lib/gwp_asan/optional/segv_handler_fuchsia.cpp3
-rw-r--r--contrib/llvm-project/compiler-rt/lib/gwp_asan/optional/segv_handler_posix.cpp106
-rw-r--r--contrib/llvm-project/compiler-rt/lib/gwp_asan/options.inc10
-rw-r--r--contrib/llvm-project/compiler-rt/lib/gwp_asan/platform_specific/guarded_pool_allocator_posix.cpp4
-rw-r--r--contrib/llvm-project/compiler-rt/lib/hwasan/hwasan.cpp236
-rw-r--r--contrib/llvm-project/compiler-rt/lib/hwasan/hwasan.h73
-rw-r--r--contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_allocation_functions.cpp103
-rw-r--r--contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_allocator.cpp416
-rw-r--r--contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_allocator.h72
-rw-r--r--contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_checks.h114
-rw-r--r--contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_exceptions.cpp9
-rw-r--r--contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_flags.inc12
-rw-r--r--contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_fuchsia.cpp36
-rw-r--r--contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_globals.cpp2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_globals.h1
-rw-r--r--contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_interceptors.cpp520
-rw-r--r--contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_interface_internal.h112
-rw-r--r--contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_linux.cpp392
-rw-r--r--contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_memintrinsics.cpp32
-rw-r--r--contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_new_delete.cpp87
-rw-r--r--contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_platform_interceptors.h1001
-rw-r--r--contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_poisoning.cpp8
-rw-r--r--contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_preinit.cpp23
-rw-r--r--contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_registers.h56
-rw-r--r--contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_report.cpp978
-rw-r--r--contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_report.h2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_setjmp_aarch64.S (renamed from contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_setjmp.S)54
-rw-r--r--contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_setjmp_riscv64.S92
-rw-r--r--contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_setjmp_x86_64.S79
-rw-r--r--contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_tag_mismatch_aarch64.S8
-rw-r--r--contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_tag_mismatch_riscv64.S132
-rw-r--r--contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_thread.cpp128
-rw-r--r--contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_thread.h24
-rw-r--r--contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_thread_list.cpp19
-rw-r--r--contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_thread_list.h50
-rw-r--r--contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_type_test.cpp2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/interception/interception.h223
-rw-r--r--contrib/llvm-project/compiler-rt/lib/interception/interception_linux.cpp16
-rw-r--r--contrib/llvm-project/compiler-rt/lib/interception/interception_linux.h18
-rw-r--r--contrib/llvm-project/compiler-rt/lib/interception/interception_mac.cpp4
-rw-r--r--contrib/llvm-project/compiler-rt/lib/interception/interception_mac.h4
-rw-r--r--contrib/llvm-project/compiler-rt/lib/interception/interception_type_test.cpp6
-rw-r--r--contrib/llvm-project/compiler-rt/lib/interception/interception_win.cpp176
-rw-r--r--contrib/llvm-project/compiler-rt/lib/interception/interception_win.h5
-rw-r--r--contrib/llvm-project/compiler-rt/lib/lsan/lsan.cpp14
-rw-r--r--contrib/llvm-project/compiler-rt/lib/lsan/lsan.h27
-rw-r--r--contrib/llvm-project/compiler-rt/lib/lsan/lsan_allocator.cpp89
-rw-r--r--contrib/llvm-project/compiler-rt/lib/lsan/lsan_allocator.h45
-rw-r--r--contrib/llvm-project/compiler-rt/lib/lsan/lsan_common.cpp780
-rw-r--r--contrib/llvm-project/compiler-rt/lib/lsan/lsan_common.h220
-rw-r--r--contrib/llvm-project/compiler-rt/lib/lsan/lsan_common_fuchsia.cpp31
-rw-r--r--contrib/llvm-project/compiler-rt/lib/lsan/lsan_common_linux.cpp5
-rw-r--r--contrib/llvm-project/compiler-rt/lib/lsan/lsan_common_mac.cpp120
-rw-r--r--contrib/llvm-project/compiler-rt/lib/lsan/lsan_fuchsia.cpp20
-rw-r--r--contrib/llvm-project/compiler-rt/lib/lsan/lsan_interceptors.cpp195
-rw-r--r--contrib/llvm-project/compiler-rt/lib/lsan/lsan_linux.cpp9
-rw-r--r--contrib/llvm-project/compiler-rt/lib/lsan/lsan_mac.cpp27
-rw-r--r--contrib/llvm-project/compiler-rt/lib/lsan/lsan_malloc_mac.cpp4
-rw-r--r--contrib/llvm-project/compiler-rt/lib/lsan/lsan_posix.cpp46
-rw-r--r--contrib/llvm-project/compiler-rt/lib/lsan/lsan_thread.cpp100
-rw-r--r--contrib/llvm-project/compiler-rt/lib/lsan/lsan_thread.h21
-rw-r--r--contrib/llvm-project/compiler-rt/lib/memprof/memprof_allocator.cpp441
-rw-r--r--contrib/llvm-project/compiler-rt/lib/memprof/memprof_allocator.h5
-rw-r--r--contrib/llvm-project/compiler-rt/lib/memprof/memprof_descriptions.cpp8
-rw-r--r--contrib/llvm-project/compiler-rt/lib/memprof/memprof_flags.inc14
-rw-r--r--contrib/llvm-project/compiler-rt/lib/memprof/memprof_interceptors.cpp41
-rw-r--r--contrib/llvm-project/compiler-rt/lib/memprof/memprof_interceptors.h10
-rw-r--r--contrib/llvm-project/compiler-rt/lib/memprof/memprof_interceptors_memintrinsics.cpp63
-rw-r--r--contrib/llvm-project/compiler-rt/lib/memprof/memprof_interceptors_memintrinsics.h39
-rw-r--r--contrib/llvm-project/compiler-rt/lib/memprof/memprof_interface_internal.h1
-rw-r--r--contrib/llvm-project/compiler-rt/lib/memprof/memprof_internal.h18
-rw-r--r--contrib/llvm-project/compiler-rt/lib/memprof/memprof_linux.cpp7
-rw-r--r--contrib/llvm-project/compiler-rt/lib/memprof/memprof_malloc_linux.cpp103
-rw-r--r--contrib/llvm-project/compiler-rt/lib/memprof/memprof_mibmap.cpp37
-rw-r--r--contrib/llvm-project/compiler-rt/lib/memprof/memprof_mibmap.h27
-rw-r--r--contrib/llvm-project/compiler-rt/lib/memprof/memprof_rawprofile.cpp252
-rw-r--r--contrib/llvm-project/compiler-rt/lib/memprof/memprof_rawprofile.h15
-rw-r--r--contrib/llvm-project/compiler-rt/lib/memprof/memprof_rtl.cpp41
-rw-r--r--contrib/llvm-project/compiler-rt/lib/memprof/memprof_stats.cpp14
-rw-r--r--contrib/llvm-project/compiler-rt/lib/memprof/memprof_thread.cpp11
-rw-r--r--contrib/llvm-project/compiler-rt/lib/memprof/tests/driver.cpp14
-rw-r--r--contrib/llvm-project/compiler-rt/lib/memprof/tests/rawprofile.cpp170
-rw-r--r--contrib/llvm-project/compiler-rt/lib/msan/msan.cpp146
-rw-r--r--contrib/llvm-project/compiler-rt/lib/msan/msan.h294
-rw-r--r--contrib/llvm-project/compiler-rt/lib/msan/msan_allocator.cpp154
-rw-r--r--contrib/llvm-project/compiler-rt/lib/msan/msan_allocator.h4
-rw-r--r--contrib/llvm-project/compiler-rt/lib/msan/msan_chained_origin_depot.cpp10
-rw-r--r--contrib/llvm-project/compiler-rt/lib/msan/msan_chained_origin_depot.h6
-rw-r--r--contrib/llvm-project/compiler-rt/lib/msan/msan_dl.cpp65
-rw-r--r--contrib/llvm-project/compiler-rt/lib/msan/msan_dl.h30
-rw-r--r--contrib/llvm-project/compiler-rt/lib/msan/msan_flags.inc2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/msan/msan_interceptors.cpp493
-rw-r--r--contrib/llvm-project/compiler-rt/lib/msan/msan_interface_internal.h11
-rw-r--r--contrib/llvm-project/compiler-rt/lib/msan/msan_linux.cpp133
-rw-r--r--contrib/llvm-project/compiler-rt/lib/msan/msan_new_delete.cpp26
-rw-r--r--contrib/llvm-project/compiler-rt/lib/msan/msan_poisoning.cpp11
-rw-r--r--contrib/llvm-project/compiler-rt/lib/msan/msan_report.cpp64
-rw-r--r--contrib/llvm-project/compiler-rt/lib/msan/msan_report.h4
-rw-r--r--contrib/llvm-project/compiler-rt/lib/msan/msan_thread.cpp3
-rw-r--r--contrib/llvm-project/compiler-rt/lib/msan/msan_thread.h3
-rw-r--r--contrib/llvm-project/compiler-rt/lib/orc/adt.h54
-rw-r--r--contrib/llvm-project/compiler-rt/lib/orc/bitmask_enum.h151
-rw-r--r--contrib/llvm-project/compiler-rt/lib/orc/c_api.h208
-rw-r--r--contrib/llvm-project/compiler-rt/lib/orc/coff_platform.cpp775
-rw-r--r--contrib/llvm-project/compiler-rt/lib/orc/coff_platform.h39
-rw-r--r--contrib/llvm-project/compiler-rt/lib/orc/coff_platform.per_jd.cpp31
-rw-r--r--contrib/llvm-project/compiler-rt/lib/orc/common.h6
-rw-r--r--contrib/llvm-project/compiler-rt/lib/orc/compiler.h11
-rw-r--r--contrib/llvm-project/compiler-rt/lib/orc/debug.cpp83
-rw-r--r--contrib/llvm-project/compiler-rt/lib/orc/debug.h56
-rw-r--r--contrib/llvm-project/compiler-rt/lib/orc/dlfcn_wrapper.cpp52
-rw-r--r--contrib/llvm-project/compiler-rt/lib/orc/elfnix_platform.cpp624
-rw-r--r--contrib/llvm-project/compiler-rt/lib/orc/elfnix_platform.h131
-rw-r--r--contrib/llvm-project/compiler-rt/lib/orc/elfnix_tls.aarch64.S94
-rw-r--r--contrib/llvm-project/compiler-rt/lib/orc/elfnix_tls.ppc64.S33
-rw-r--r--contrib/llvm-project/compiler-rt/lib/orc/elfnix_tls.x86-64.S64
-rw-r--r--contrib/llvm-project/compiler-rt/lib/orc/error.h4
-rw-r--r--contrib/llvm-project/compiler-rt/lib/orc/executor_address.h225
-rw-r--r--contrib/llvm-project/compiler-rt/lib/orc/executor_symbol_def.h151
-rw-r--r--contrib/llvm-project/compiler-rt/lib/orc/interval_map.h168
-rw-r--r--contrib/llvm-project/compiler-rt/lib/orc/interval_set.h87
-rw-r--r--contrib/llvm-project/compiler-rt/lib/orc/macho_platform.cpp1533
-rw-r--r--contrib/llvm-project/compiler-rt/lib/orc/macho_platform.h91
-rw-r--r--contrib/llvm-project/compiler-rt/lib/orc/macho_tlv.arm64.S92
-rw-r--r--contrib/llvm-project/compiler-rt/lib/orc/macho_tlv.x86-64.S5
-rw-r--r--contrib/llvm-project/compiler-rt/lib/orc/run_program_wrapper.cpp4
-rw-r--r--contrib/llvm-project/compiler-rt/lib/orc/simple_packed_serialization.h122
-rw-r--r--contrib/llvm-project/compiler-rt/lib/orc/stl_extras.h41
-rw-r--r--contrib/llvm-project/compiler-rt/lib/orc/string_pool.h172
-rw-r--r--contrib/llvm-project/compiler-rt/lib/orc/tests/tools/orc-rt-executor.cpp49
-rw-r--r--contrib/llvm-project/compiler-rt/lib/orc/tests/unit/adt_test.cpp50
-rw-r--r--contrib/llvm-project/compiler-rt/lib/orc/tests/unit/bitmask_enum_test.cpp143
-rw-r--r--contrib/llvm-project/compiler-rt/lib/orc/tests/unit/c_api_test.cpp200
-rw-r--r--contrib/llvm-project/compiler-rt/lib/orc/tests/unit/endian_test.cpp174
-rw-r--r--contrib/llvm-project/compiler-rt/lib/orc/tests/unit/error_test.cpp295
-rw-r--r--contrib/llvm-project/compiler-rt/lib/orc/tests/unit/executor_address_test.cpp115
-rw-r--r--contrib/llvm-project/compiler-rt/lib/orc/tests/unit/executor_symbol_def_test.cpp19
-rw-r--r--contrib/llvm-project/compiler-rt/lib/orc/tests/unit/extensible_rtti_test.cpp54
-rw-r--r--contrib/llvm-project/compiler-rt/lib/orc/tests/unit/interval_map_test.cpp204
-rw-r--r--contrib/llvm-project/compiler-rt/lib/orc/tests/unit/interval_set_test.cpp121
-rw-r--r--contrib/llvm-project/compiler-rt/lib/orc/tests/unit/orc_unit_test_main.cpp (renamed from contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_persistent_allocator.cpp)18
-rw-r--r--contrib/llvm-project/compiler-rt/lib/orc/tests/unit/simple_packed_serialization_test.cpp184
-rw-r--r--contrib/llvm-project/compiler-rt/lib/orc/tests/unit/simple_packed_serialization_utils.h34
-rw-r--r--contrib/llvm-project/compiler-rt/lib/orc/tests/unit/string_pool_test.cpp66
-rw-r--r--contrib/llvm-project/compiler-rt/lib/orc/tests/unit/wrapper_function_utils_test.cpp184
-rw-r--r--contrib/llvm-project/compiler-rt/lib/orc/wrapper_function_utils.h268
-rw-r--r--contrib/llvm-project/compiler-rt/lib/profile/GCDAProfiling.c25
-rw-r--r--contrib/llvm-project/compiler-rt/lib/profile/InstrProfiling.c35
-rw-r--r--contrib/llvm-project/compiler-rt/lib/profile/InstrProfiling.h144
-rw-r--r--contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingBuffer.c104
-rw-r--r--contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingFile.c607
-rw-r--r--contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingInternal.c2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingInternal.h25
-rw-r--r--contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingMerge.c165
-rw-r--r--contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingNameVar.c2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingPlatformAIX.c215
-rw-r--r--contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingPlatformDarwin.c18
-rw-r--r--contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingPlatformFuchsia.c19
-rw-r--r--contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingPlatformLinux.c108
-rw-r--r--contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingPlatformOther.c30
-rw-r--r--contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingPlatformWindows.c28
-rw-r--r--contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingRuntime.cpp22
-rw-r--r--contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingUtil.c26
-rw-r--r--contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingValue.c18
-rw-r--r--contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingVersionVar.c3
-rw-r--r--contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingWriter.c87
-rw-r--r--contrib/llvm-project/compiler-rt/lib/profile/WindowsMMap.c3
-rw-r--r--contrib/llvm-project/compiler-rt/lib/profile/WindowsMMap.h6
-rw-r--r--contrib/llvm-project/compiler-rt/lib/safestack/safestack_platform.h2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sancov_flags.inc2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_addrhashmap.h46
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_allocator.cpp97
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_allocator.h18
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_allocator_bytemap.h107
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_allocator_combined.h18
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_allocator_dlsym.h79
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_allocator_interface.h4
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_allocator_internal.h3
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_allocator_primary32.h23
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_allocator_primary64.h34
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_allocator_report.cpp3
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_allocator_secondary.h18
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_allocator_size_class_map.h8
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_allocator_stats.h27
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_array_ref.h123
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_asm.h86
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_atomic_clang.h17
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_atomic_clang_mips.h2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_chained_origin_depot.cpp90
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_chained_origin_depot.h50
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common.cpp83
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common.h161
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors.inc1651
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors_format.inc41
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors_ioctl.inc10
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors_memintrinsics.inc244
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors_netbsd_compat.inc4
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors_vfork_aarch64.inc.S4
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors_vfork_arm.inc.S4
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors_vfork_i386.inc.S5
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors_vfork_loongarch64.inc.S57
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors_vfork_riscv64.inc.S4
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors_vfork_x86_64.inc.S7
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common_interface.inc10
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common_interface_posix.inc3
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common_libcdep.cpp103
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common_nolibc.cpp3
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common_syscalls.inc1579
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_coverage_fuchsia.cpp11
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_coverage_interface.inc10
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_coverage_libcdep_new.cpp104
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_deadlock_detector.h2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_dense_map.h705
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_dense_map_info.h282
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_dl.cpp37
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_dl.h26
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_errno.h2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_errno_codes.h1
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_file.cpp20
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_file.h7
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_flag_parser.cpp11
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_flag_parser.h8
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_flags.cpp4
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_flags.inc33
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_flat_map.h166
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_freebsd.h137
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_fuchsia.cpp210
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_hash.h24
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_interceptors_ioctl_netbsd.inc4
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_interface_internal.h211
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_internal_defs.h105
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_leb128.h87
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_libc.cpp43
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_libc.h32
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_libignore.cpp12
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_libignore.h2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_linux.cpp1992
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_linux.h137
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_linux_libcdep.cpp552
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_linux_s390.cpp176
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_local_address_space_view.h2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_lzw.h159
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_mac.cpp439
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_mac.h21
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_mac_libcdep.cpp4
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_mallinfo.h42
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_malloc_mac.inc24
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_mutex.cpp4
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_mutex.h281
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_openbsd.cpp0
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_persistent_allocator.h71
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform.h420
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_interceptors.h98
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_freebsd.cpp40
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_freebsd.h240
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_linux.cpp63
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_netbsd.cpp5
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_netbsd.h5
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_openbsd.cpp0
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_openbsd.h0
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_posix.cpp154
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_posix.h153
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_solaris.cpp3
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_solaris.h3
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_posix.cpp30
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_posix.h12
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_posix_libcdep.cpp10
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_printf.cpp46
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_procmaps.h42
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_procmaps_bsd.cpp27
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_procmaps_common.cpp28
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_procmaps_mac.cpp108
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_procmaps_solaris.cpp50
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_quarantine.h29
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_range.cpp62
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_range.h40
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_redefine_builtins.h56
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_ring_buffer.h17
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_signal_interceptors.inc15
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_solaris.cpp22
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_solaris.h56
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stack_store.cpp384
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stack_store.h121
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stackdepot.cpp263
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stackdepot.h43
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stackdepotbase.h202
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stacktrace.cpp16
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stacktrace.h19
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stacktrace_libcdep.cpp55
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stacktrace_printer.cpp188
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stacktrace_printer.h134
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stacktrace_sparc.cpp8
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stoptheworld_linux_libcdep.cpp20
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stoptheworld_mac.cpp19
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stoptheworld_netbsd_libcdep.cpp2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stoptheworld_win.cpp175
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_suppressions.cpp6
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer.cpp25
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer.h40
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_internal.h26
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_libbacktrace.cpp6
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_libcdep.cpp111
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_mac.cpp86
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_mac.h5
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_markup.cpp228
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_markup.h79
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_markup_constants.h (renamed from contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_fuchsia.h)19
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_markup_fuchsia.cpp85
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_posix_libcdep.cpp180
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_report.cpp93
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_report_fuchsia.cpp33
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_win.cpp24
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_syscall_generic.inc5
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_syscall_linux_hexagon.inc131
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_syscall_linux_loongarch64.inc171
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_syscalls_netbsd.inc4
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_thread_arg_retval.cpp109
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_thread_arg_retval.h117
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_thread_registry.cpp80
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_thread_registry.h20
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_thread_safety.h45
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_tls_get_addr.cpp53
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_tls_get_addr.h26
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_type_traits.h79
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_unwind_fuchsia.cpp66
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_unwind_linux_libcdep.cpp8
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_unwind_win.cpp27
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_vector.h4
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_win.cpp98
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_win_dll_thunk.h2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/symbolizer/sanitizer_symbolize.cpp113
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/symbolizer/sanitizer_wrappers.cpp11
-rwxr-xr-xcontrib/llvm-project/compiler-rt/lib/sanitizer_common/symbolizer/scripts/ar_to_bc.sh39
-rwxr-xr-xcontrib/llvm-project/compiler-rt/lib/sanitizer_common/symbolizer/scripts/build_symbolizer.sh188
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/symbolizer/scripts/global_symbols.txt51
-rw-r--r--contrib/llvm-project/compiler-rt/lib/sanitizer_common/weak_symbols.txt3
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/scudo_allocator.cpp830
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/scudo_allocator.h125
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/scudo_allocator_combined.h75
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/scudo_allocator_secondary.h192
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/scudo_crc32.cpp24
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/scudo_crc32.h100
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/scudo_errors.cpp77
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/scudo_errors.h34
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/scudo_flags.cpp136
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/scudo_flags.h32
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/scudo_flags.inc48
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/scudo_interface_internal.h32
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/scudo_malloc.cpp84
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/scudo_new_delete.cpp107
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/scudo_platform.h93
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/scudo_termination.cpp41
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/scudo_tsd.h65
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/scudo_tsd_exclusive.cpp67
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/scudo_tsd_exclusive.inc47
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/scudo_tsd_shared.cpp107
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/scudo_tsd_shared.inc56
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/scudo_utils.cpp145
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/scudo_utils.h36
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/allocator_common.h85
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/allocator_config.h318
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/atomic_helpers.h8
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/checksum.cpp1
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/checksum.h9
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/chunk.h20
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/combined.h370
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/common.cpp14
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/common.h69
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/condition_variable.h60
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/condition_variable_base.h56
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/condition_variable_linux.cpp52
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/condition_variable_linux.h38
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/crc32_hw.cpp5
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/flags.cpp3
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/flags.inc4
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/flags_parser.cpp30
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/flags_parser.h3
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/fuchsia.cpp55
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/fuchsia.h3
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/fuzz/get_error_info_fuzzer.cpp10
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/include/scudo/interface.h40
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/internal_defs.h34
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/linux.cpp34
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/list.h12
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/local_cache.h137
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/mem_map.cpp84
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/mem_map.h92
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/mem_map_base.h129
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/mem_map_fuchsia.cpp252
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/mem_map_fuchsia.h75
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/mem_map_linux.cpp153
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/mem_map_linux.h67
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/memtag.h53
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/mutex.h47
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/options.h2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/platform.h26
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/primary32.h921
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/primary64.h1563
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/quarantine.h30
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/release.cpp5
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/release.h644
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/report.cpp28
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/report.h7
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/report_linux.cpp58
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/report_linux.h34
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/secondary.h463
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/size_class_map.h65
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/stack_depot.h5
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/stats.h17
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/string_utils.cpp30
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/string_utils.h10
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/thread_annotations.h70
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/timing.cpp29
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/timing.h221
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/trusty.cpp68
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/tsd.h46
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/tsd_exclusive.h46
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/tsd_shared.h62
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/vector.h57
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/wrappers_c.cpp5
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/wrappers_c.h18
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/wrappers_c.inc145
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/wrappers_c_bionic.cpp36
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/wrappers_c_checks.h11
-rw-r--r--contrib/llvm-project/compiler-rt/lib/scudo/standalone/wrappers_cpp.cpp84
-rw-r--r--contrib/llvm-project/compiler-rt/lib/tsan/dd/dd_interceptors.cpp3
-rw-r--r--contrib/llvm-project/compiler-rt/lib/tsan/dd/dd_rtl.cpp6
-rw-r--r--contrib/llvm-project/compiler-rt/lib/tsan/dd/dd_rtl.h6
-rw-r--r--contrib/llvm-project/compiler-rt/lib/tsan/go/test.c2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/tsan/go/tsan_go.cpp41
-rw-r--r--contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan.syms.extra4
-rw-r--r--contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_clock.cpp625
-rw-r--r--contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_clock.h293
-rw-r--r--contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_debugging.cpp16
-rw-r--r--contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_defs.h100
-rw-r--r--contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_dense_alloc.h134
-rw-r--r--contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_dispatch_defs.h7
-rw-r--r--contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_external.cpp40
-rw-r--r--contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_fd.cpp113
-rw-r--r--contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_fd.h3
-rw-r--r--contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_flags.cpp19
-rw-r--r--contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_flags.inc25
-rw-r--r--contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_ignoreset.cpp12
-rw-r--r--contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_ignoreset.h13
-rw-r--r--contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_ilist.h189
-rw-r--r--contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_interceptors.h110
-rw-r--r--contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_interceptors_libdispatch.cpp6
-rw-r--r--contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_interceptors_mac.cpp11
-rw-r--r--contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_interceptors_memintrinsics.cpp43
-rw-r--r--contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp1108
-rw-r--r--contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_interface.cpp99
-rw-r--r--contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_interface.h30
-rw-r--r--contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_interface.inc190
-rw-r--r--contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_interface_ann.cpp191
-rw-r--r--contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_interface_atomic.cpp360
-rw-r--r--contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_interface_inl.h133
-rw-r--r--contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_interface_java.cpp294
-rw-r--r--contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_malloc_mac.cpp30
-rw-r--r--contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_mman.cpp135
-rw-r--r--contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_mman.h53
-rw-r--r--contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_mutexset.cpp48
-rw-r--r--contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_mutexset.h60
-rw-r--r--contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_platform.h1526
-rw-r--r--contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_platform_linux.cpp343
-rw-r--r--contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_platform_mac.cpp284
-rw-r--r--contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_platform_posix.cpp92
-rw-r--r--contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_platform_windows.cpp6
-rw-r--r--contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_report.cpp142
-rw-r--r--contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_report.h46
-rw-r--r--contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_rtl.cpp1350
-rw-r--r--contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_rtl.h805
-rw-r--r--contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_rtl_aarch64.S37
-rw-r--r--contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_rtl_access.cpp744
-rw-r--r--contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_rtl_amd64.S164
-rw-r--r--contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_rtl_loongarch64.S196
-rw-r--r--contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_rtl_mutex.cpp707
-rw-r--r--contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_rtl_proc.cpp1
-rw-r--r--contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_rtl_report.cpp678
-rw-r--r--contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_rtl_riscv64.S203
-rw-r--r--contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_rtl_s390x.S2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_rtl_thread.cpp453
-rw-r--r--contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_shadow.h193
-rw-r--r--contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_spinlock_defs_mac.h45
-rw-r--r--contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_stack_trace.cpp12
-rw-r--r--contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_suppressions.cpp6
-rw-r--r--contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_symbolize.cpp3
-rw-r--r--contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_sync.cpp132
-rw-r--r--contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_sync.h80
-rw-r--r--contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_trace.h217
-rw-r--r--contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_update_shadow_word_inl.h59
-rw-r--r--contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_vector_clock.cpp126
-rw-r--r--contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_vector_clock.h51
-rw-r--r--contrib/llvm-project/compiler-rt/lib/ubsan/ubsan_diag.cpp102
-rw-r--r--contrib/llvm-project/compiler-rt/lib/ubsan/ubsan_diag.h20
-rw-r--r--contrib/llvm-project/compiler-rt/lib/ubsan/ubsan_handlers.cpp37
-rw-r--r--contrib/llvm-project/compiler-rt/lib/ubsan/ubsan_handlers.h11
-rw-r--r--contrib/llvm-project/compiler-rt/lib/ubsan/ubsan_handlers_cxx.cpp46
-rw-r--r--contrib/llvm-project/compiler-rt/lib/ubsan/ubsan_handlers_cxx.h18
-rw-r--r--contrib/llvm-project/compiler-rt/lib/ubsan/ubsan_init.cpp7
-rw-r--r--contrib/llvm-project/compiler-rt/lib/ubsan/ubsan_interface.inc4
-rw-r--r--contrib/llvm-project/compiler-rt/lib/ubsan/ubsan_monitor.cpp3
-rw-r--r--contrib/llvm-project/compiler-rt/lib/ubsan/ubsan_signals_standalone.cpp5
-rw-r--r--contrib/llvm-project/compiler-rt/lib/ubsan/ubsan_type_hash_itanium.cpp23
-rw-r--r--contrib/llvm-project/compiler-rt/lib/ubsan/ubsan_value.cpp6
-rw-r--r--contrib/llvm-project/compiler-rt/lib/ubsan_minimal/ubsan_minimal_handlers.cpp41
-rw-r--r--contrib/llvm-project/compiler-rt/lib/xray/xray_AArch64.cpp38
-rw-r--r--contrib/llvm-project/compiler-rt/lib/xray/xray_allocator.h16
-rw-r--r--contrib/llvm-project/compiler-rt/lib/xray/xray_basic_flags.h2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/xray/xray_basic_logging.cpp6
-rw-r--r--contrib/llvm-project/compiler-rt/lib/xray/xray_buffer_queue.cpp2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/xray/xray_fdr_logging.cpp15
-rw-r--r--contrib/llvm-project/compiler-rt/lib/xray/xray_flags.h2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/xray/xray_hexagon.cpp168
-rw-r--r--contrib/llvm-project/compiler-rt/lib/xray/xray_init.cpp2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/xray/xray_interface.cpp58
-rw-r--r--contrib/llvm-project/compiler-rt/lib/xray/xray_interface_internal.h16
-rw-r--r--contrib/llvm-project/compiler-rt/lib/xray/xray_loongarch64.cpp160
-rw-r--r--contrib/llvm-project/compiler-rt/lib/xray/xray_powerpc64.inc15
-rw-r--r--contrib/llvm-project/compiler-rt/lib/xray/xray_profiling.cpp4
-rw-r--r--contrib/llvm-project/compiler-rt/lib/xray/xray_trampoline_AArch64.S286
-rw-r--r--contrib/llvm-project/compiler-rt/lib/xray/xray_trampoline_hexagon.S99
-rw-r--r--contrib/llvm-project/compiler-rt/lib/xray/xray_trampoline_loongarch64.S124
-rw-r--r--contrib/llvm-project/compiler-rt/lib/xray/xray_trampoline_x86_64.S28
-rw-r--r--contrib/llvm-project/compiler-rt/lib/xray/xray_tsc.h3
-rw-r--r--contrib/llvm-project/compiler-rt/lib/xray/xray_utils.cpp2
-rw-r--r--contrib/llvm-project/compiler-rt/lib/xray/xray_x86_64.cpp48
790 files changed, 48300 insertions, 23826 deletions
diff --git a/contrib/llvm-project/compiler-rt/lib/asan/asan_activation.cpp b/contrib/llvm-project/compiler-rt/lib/asan/asan_activation.cpp
index 795df95a5414..1757838600ca 100644
--- a/contrib/llvm-project/compiler-rt/lib/asan/asan_activation.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/asan/asan_activation.cpp
@@ -112,7 +112,7 @@ void AsanDeactivate() {
disabled.quarantine_size_mb = 0;
disabled.thread_local_quarantine_size_kb = 0;
// Redzone must be at least Max(16, granularity) bytes long.
- disabled.min_redzone = Max(16, (int)SHADOW_GRANULARITY);
+ disabled.min_redzone = Max(16, (int)ASAN_SHADOW_GRANULARITY);
disabled.max_redzone = disabled.min_redzone;
disabled.alloc_dealloc_mismatch = false;
disabled.may_return_null = true;
diff --git a/contrib/llvm-project/compiler-rt/lib/asan/asan_allocator.cpp b/contrib/llvm-project/compiler-rt/lib/asan/asan_allocator.cpp
index 414fba3b427d..22dcf6132707 100644
--- a/contrib/llvm-project/compiler-rt/lib/asan/asan_allocator.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/asan/asan_allocator.cpp
@@ -16,6 +16,7 @@
#include "asan_allocator.h"
+#include "asan_internal.h"
#include "asan_mapping.h"
#include "asan_poisoning.h"
#include "asan_report.h"
@@ -24,6 +25,7 @@
#include "lsan/lsan_common.h"
#include "sanitizer_common/sanitizer_allocator_checks.h"
#include "sanitizer_common/sanitizer_allocator_interface.h"
+#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_errno.h"
#include "sanitizer_common/sanitizer_flags.h"
#include "sanitizer_common/sanitizer_internal_defs.h"
@@ -102,19 +104,18 @@ class ChunkHeader {
public:
uptr UsedSize() const {
- uptr R = user_requested_size_lo;
- if (sizeof(uptr) > sizeof(user_requested_size_lo))
- R += (uptr)user_requested_size_hi << (8 * sizeof(user_requested_size_lo));
- return R;
+ static_assert(sizeof(user_requested_size_lo) == 4,
+ "Expression below requires this");
+ return FIRST_32_SECOND_64(0, ((uptr)user_requested_size_hi << 32)) +
+ user_requested_size_lo;
}
void SetUsedSize(uptr size) {
user_requested_size_lo = size;
- if (sizeof(uptr) > sizeof(user_requested_size_lo)) {
- size >>= (8 * sizeof(user_requested_size_lo));
- user_requested_size_hi = size;
- CHECK_EQ(user_requested_size_hi, size);
- }
+ static_assert(sizeof(user_requested_size_lo) == 4,
+ "Expression below requires this");
+ user_requested_size_hi = FIRST_32_SECOND_64(0, size >> 32);
+ CHECK_EQ(UsedSize(), size);
}
void SetAllocContext(u32 tid, u32 stack) {
@@ -191,29 +192,56 @@ class LargeChunkHeader {
}
};
+static void FillChunk(AsanChunk *m) {
+ // FIXME: Use ReleaseMemoryPagesToOS.
+ Flags &fl = *flags();
+
+ if (fl.max_free_fill_size > 0) {
+ // We have to skip the chunk header, it contains free_context_id.
+ uptr scribble_start = (uptr)m + kChunkHeaderSize + kChunkHeader2Size;
+ if (m->UsedSize() >= kChunkHeader2Size) { // Skip Header2 in user area.
+ uptr size_to_fill = m->UsedSize() - kChunkHeader2Size;
+ size_to_fill = Min(size_to_fill, (uptr)fl.max_free_fill_size);
+ REAL(memset)((void *)scribble_start, fl.free_fill_byte, size_to_fill);
+ }
+ }
+}
+
struct QuarantineCallback {
QuarantineCallback(AllocatorCache *cache, BufferedStackTrace *stack)
: cache_(cache),
stack_(stack) {
}
- void Recycle(AsanChunk *m) {
+ void PreQuarantine(AsanChunk *m) const {
+ FillChunk(m);
+ // Poison the region.
+ PoisonShadow(m->Beg(), RoundUpTo(m->UsedSize(), ASAN_SHADOW_GRANULARITY),
+ kAsanHeapFreeMagic);
+ }
+
+ void Recycle(AsanChunk *m) const {
void *p = get_allocator().GetBlockBegin(m);
- if (p != m) {
- // Clear the magic value, as allocator internals may overwrite the
- // contents of deallocated chunk, confusing GetAsanChunk lookup.
- reinterpret_cast<LargeChunkHeader *>(p)->Set(nullptr);
- }
- u8 old_chunk_state = CHUNK_QUARANTINE;
- if (!atomic_compare_exchange_strong(&m->chunk_state, &old_chunk_state,
- CHUNK_INVALID, memory_order_acquire)) {
- CHECK_EQ(old_chunk_state, CHUNK_QUARANTINE);
- }
+ // The secondary will immediately unpoison and unmap the memory, so this
+ // branch is unnecessary.
+ if (get_allocator().FromPrimary(p)) {
+ if (p != m) {
+ // Clear the magic value, as allocator internals may overwrite the
+ // contents of deallocated chunk, confusing GetAsanChunk lookup.
+ reinterpret_cast<LargeChunkHeader *>(p)->Set(nullptr);
+ }
+
+ u8 old_chunk_state = CHUNK_QUARANTINE;
+ if (!atomic_compare_exchange_strong(&m->chunk_state, &old_chunk_state,
+ CHUNK_INVALID,
+ memory_order_acquire)) {
+ CHECK_EQ(old_chunk_state, CHUNK_QUARANTINE);
+ }
- PoisonShadow(m->Beg(),
- RoundUpTo(m->UsedSize(), SHADOW_GRANULARITY),
- kAsanHeapLeftRedzoneMagic);
+ PoisonShadow(m->Beg(), RoundUpTo(m->UsedSize(), ASAN_SHADOW_GRANULARITY),
+ kAsanHeapLeftRedzoneMagic);
+ }
// Statistics.
AsanStats &thread_stats = GetCurrentThreadStats();
@@ -223,7 +251,17 @@ struct QuarantineCallback {
get_allocator().Deallocate(cache_, p);
}
- void *Allocate(uptr size) {
+ void RecyclePassThrough(AsanChunk *m) const {
+ // Recycle for the secondary will immediately unpoison and unmap the
+ // memory, so quarantine preparation is unnecessary.
+ if (get_allocator().FromPrimary(m)) {
+ // The primary allocation may need pattern fill if enabled.
+ FillChunk(m);
+ }
+ Recycle(m);
+ }
+
+ void *Allocate(uptr size) const {
void *res = get_allocator().Allocate(cache_, size, 1);
// TODO(alekseys): Consider making quarantine OOM-friendly.
if (UNLIKELY(!res))
@@ -231,9 +269,7 @@ struct QuarantineCallback {
return res;
}
- void Deallocate(void *p) {
- get_allocator().Deallocate(cache_, p);
- }
+ void Deallocate(void *p) const { get_allocator().Deallocate(cache_, p); }
private:
AllocatorCache* const cache_;
@@ -250,6 +286,22 @@ void AsanMapUnmapCallback::OnMap(uptr p, uptr size) const {
thread_stats.mmaps++;
thread_stats.mmaped += size;
}
+
+void AsanMapUnmapCallback::OnMapSecondary(uptr p, uptr size, uptr user_begin,
+ uptr user_size) const {
+ uptr user_end = RoundDownTo(user_begin + user_size, ASAN_SHADOW_GRANULARITY);
+ user_begin = RoundUpTo(user_begin, ASAN_SHADOW_GRANULARITY);
+ // The secondary mapping will be immediately returned to user, no value
+ // poisoning that with non-zero just before unpoisoning by Allocate(). So just
+ // poison head/tail invisible to Allocate().
+ PoisonShadow(p, user_begin - p, kAsanHeapLeftRedzoneMagic);
+ PoisonShadow(user_end, size - (user_end - p), kAsanHeapLeftRedzoneMagic);
+ // Statistics.
+ AsanStats &thread_stats = GetCurrentThreadStats();
+ thread_stats.mmaps++;
+ thread_stats.mmaped += size;
+}
+
void AsanMapUnmapCallback::OnUnmap(uptr p, uptr size) const {
PoisonShadow(p, size, 0);
// We are about to unmap a chunk of user memory.
@@ -306,7 +358,6 @@ struct Allocator {
QuarantineCache fallback_quarantine_cache;
uptr max_user_defined_malloc_size;
- atomic_uint8_t rss_limit_exceeded;
// ------------------- Options --------------------------
atomic_uint16_t min_redzone;
@@ -346,14 +397,6 @@ struct Allocator {
: kMaxAllowedMallocSize;
}
- bool RssLimitExceeded() {
- return atomic_load(&rss_limit_exceeded, memory_order_relaxed);
- }
-
- void SetRssLimitExceeded(bool limit_exceeded) {
- atomic_store(&rss_limit_exceeded, limit_exceeded, memory_order_relaxed);
- }
-
void RePoisonChunk(uptr chunk) {
// This could be a user-facing chunk (with redzones), or some internal
// housekeeping chunk, like TransferBatch. Start by assuming the former.
@@ -367,7 +410,7 @@ struct Allocator {
if (chunk < beg && beg < end && end <= chunk_end) {
// Looks like a valid AsanChunk in use, poison redzones only.
PoisonShadow(chunk, beg - chunk, kAsanHeapLeftRedzoneMagic);
- uptr end_aligned_down = RoundDownTo(end, SHADOW_GRANULARITY);
+ uptr end_aligned_down = RoundDownTo(end, ASAN_SHADOW_GRANULARITY);
FastPoisonShadowPartialRightRedzone(
end_aligned_down, end - end_aligned_down,
chunk_end - end_aligned_down, kAsanHeapLeftRedzoneMagic);
@@ -398,8 +441,9 @@ struct Allocator {
}
void GetOptions(AllocatorOptions *options) const {
- options->quarantine_size_mb = quarantine.GetSize() >> 20;
- options->thread_local_quarantine_size_kb = quarantine.GetCacheSize() >> 10;
+ options->quarantine_size_mb = quarantine.GetMaxSize() >> 20;
+ options->thread_local_quarantine_size_kb =
+ quarantine.GetMaxCacheSize() >> 10;
options->min_redzone = atomic_load(&min_redzone, memory_order_acquire);
options->max_redzone = atomic_load(&max_redzone, memory_order_acquire);
options->may_return_null = AllocatorMayReturnNull();
@@ -483,16 +527,16 @@ struct Allocator {
// -------------------- Allocation/Deallocation routines ---------------
void *Allocate(uptr size, uptr alignment, BufferedStackTrace *stack,
AllocType alloc_type, bool can_fill) {
- if (UNLIKELY(!asan_inited))
+ if (UNLIKELY(!AsanInited()))
AsanInitFromRtl();
- if (RssLimitExceeded()) {
+ if (UNLIKELY(IsRssLimitExceeded())) {
if (AllocatorMayReturnNull())
return nullptr;
ReportRssLimitExceeded(stack);
}
Flags &fl = *flags();
CHECK(stack);
- const uptr min_alignment = SHADOW_GRANULARITY;
+ const uptr min_alignment = ASAN_SHADOW_GRANULARITY;
const uptr user_requested_alignment_log =
ComputeUserRequestedAlignmentLog(alignment);
if (alignment < min_alignment)
@@ -513,16 +557,17 @@ struct Allocator {
uptr needed_size = rounded_size + rz_size;
if (alignment > min_alignment)
needed_size += alignment;
+ bool from_primary = PrimaryAllocator::CanAllocate(needed_size, alignment);
// If we are allocating from the secondary allocator, there will be no
// automatic right redzone, so add the right redzone manually.
- if (!PrimaryAllocator::CanAllocate(needed_size, alignment))
+ if (!from_primary)
needed_size += rz_size;
CHECK(IsAligned(needed_size, min_alignment));
if (size > kMaxAllowedMallocSize || needed_size > kMaxAllowedMallocSize ||
size > max_user_defined_malloc_size) {
if (AllocatorMayReturnNull()) {
Report("WARNING: AddressSanitizer failed to allocate 0x%zx bytes\n",
- (void*)size);
+ size);
return nullptr;
}
uptr malloc_limit =
@@ -547,15 +592,6 @@ struct Allocator {
ReportOutOfMemory(size, stack);
}
- if (*(u8 *)MEM_TO_SHADOW((uptr)allocated) == 0 && CanPoisonMemory()) {
- // Heap poisoning is enabled, but the allocator provides an unpoisoned
- // chunk. This is possible if CanPoisonMemory() was false for some
- // time, for example, due to flags()->start_disabled.
- // Anyway, poison the block before using it for anything else.
- uptr allocated_size = allocator.GetActuallyAllocatedSize(allocated);
- PoisonShadow((uptr)allocated, allocated_size, kAsanHeapLeftRedzoneMagic);
- }
-
uptr alloc_beg = reinterpret_cast<uptr>(allocated);
uptr alloc_end = alloc_beg + needed_size;
uptr user_beg = alloc_beg + rz_size;
@@ -572,8 +608,19 @@ struct Allocator {
m->SetAllocContext(t ? t->tid() : kMainTid, StackDepotPut(*stack));
+ if (!from_primary || *(u8 *)MEM_TO_SHADOW((uptr)allocated) == 0) {
+ // The allocator provides an unpoisoned chunk. This is possible for the
+ // secondary allocator, or if CanPoisonMemory() was false for some time,
+ // for example, due to flags()->start_disabled. Anyway, poison left and
+ // right of the block before using it for anything else.
+ uptr tail_beg = RoundUpTo(user_end, ASAN_SHADOW_GRANULARITY);
+ uptr tail_end = alloc_beg + allocator.GetActuallyAllocatedSize(allocated);
+ PoisonShadow(alloc_beg, user_beg - alloc_beg, kAsanHeapLeftRedzoneMagic);
+ PoisonShadow(tail_beg, tail_end - tail_beg, kAsanHeapLeftRedzoneMagic);
+ }
+
uptr size_rounded_down_to_granularity =
- RoundDownTo(size, SHADOW_GRANULARITY);
+ RoundDownTo(size, ASAN_SHADOW_GRANULARITY);
// Unpoison the bulk of the memory region.
if (size_rounded_down_to_granularity)
PoisonShadow(user_beg, size_rounded_down_to_granularity, 0);
@@ -581,7 +628,7 @@ struct Allocator {
if (size != size_rounded_down_to_granularity && CanPoisonMemory()) {
u8 *shadow =
(u8 *)MemToShadow(user_beg + size_rounded_down_to_granularity);
- *shadow = fl.poison_partial ? (size & (SHADOW_GRANULARITY - 1)) : 0;
+ *shadow = fl.poison_partial ? (size & (ASAN_SHADOW_GRANULARITY - 1)) : 0;
}
AsanStats &thread_stats = GetCurrentThreadStats();
@@ -608,7 +655,7 @@ struct Allocator {
CHECK_LE(alloc_beg + sizeof(LargeChunkHeader), chunk_beg);
reinterpret_cast<LargeChunkHeader *>(alloc_beg)->Set(m);
}
- ASAN_MALLOC_HOOK(res, size);
+ RunMallocHooks(res, size);
return res;
}
@@ -639,26 +686,6 @@ struct Allocator {
AsanThread *t = GetCurrentThread();
m->SetFreeContext(t ? t->tid() : 0, StackDepotPut(*stack));
- Flags &fl = *flags();
- if (fl.max_free_fill_size > 0) {
- // We have to skip the chunk header, it contains free_context_id.
- uptr scribble_start = (uptr)m + kChunkHeaderSize + kChunkHeader2Size;
- if (m->UsedSize() >= kChunkHeader2Size) { // Skip Header2 in user area.
- uptr size_to_fill = m->UsedSize() - kChunkHeader2Size;
- size_to_fill = Min(size_to_fill, (uptr)fl.max_free_fill_size);
- REAL(memset)((void *)scribble_start, fl.free_fill_byte, size_to_fill);
- }
- }
-
- // Poison the region.
- PoisonShadow(m->Beg(),
- RoundUpTo(m->UsedSize(), SHADOW_GRANULARITY),
- kAsanHeapFreeMagic);
-
- AsanStats &thread_stats = GetCurrentThreadStats();
- thread_stats.frees++;
- thread_stats.freed += m->UsedSize();
-
// Push into quarantine.
if (t) {
AsanThreadLocalMallocStorage *ms = &t->malloc_storage();
@@ -690,7 +717,7 @@ struct Allocator {
return;
}
- ASAN_FREE_HOOK(ptr);
+ RunFreeHooks(ptr);
// Must mark the chunk as quarantined before any changes to its metadata.
// Do not quarantine given chunk if we failed to set CHUNK_QUARANTINE flag.
@@ -711,6 +738,10 @@ struct Allocator {
}
}
+ AsanStats &thread_stats = GetCurrentThreadStats();
+ thread_stats.frees++;
+ thread_stats.freed += m->UsedSize();
+
QuarantineChunk(m, ptr, stack);
}
@@ -810,13 +841,17 @@ struct Allocator {
return m->UsedSize();
}
+ uptr AllocationSizeFast(uptr p) {
+ return reinterpret_cast<AsanChunk *>(p - kChunkHeaderSize)->UsedSize();
+ }
+
AsanChunkView FindHeapChunkByAddress(uptr addr) {
AsanChunk *m1 = GetAsanChunkByAddr(addr);
sptr offset = 0;
if (!m1 || AsanChunkView(m1).AddrIsAtLeft(addr, 1, &offset)) {
// The address is in the chunk's left redzone, so maybe it is actually
- // a right buffer overflow from the other chunk to the left.
- // Search a bit to the left to see if there is another chunk.
+ // a right buffer overflow from the other chunk before.
+ // Search a bit before to see if there is another chunk.
AsanChunk *m2 = nullptr;
for (uptr l = 1; l < GetPageSizeCached(); l++) {
m2 = GetAsanChunkByAddr(addr - l);
@@ -852,12 +887,12 @@ struct Allocator {
quarantine.PrintStats();
}
- void ForceLock() ACQUIRE(fallback_mutex) {
+ void ForceLock() SANITIZER_ACQUIRE(fallback_mutex) {
allocator.ForceLock();
fallback_mutex.Lock();
}
- void ForceUnlock() RELEASE(fallback_mutex) {
+ void ForceUnlock() SANITIZER_RELEASE(fallback_mutex) {
fallback_mutex.Unlock();
allocator.ForceUnlock();
}
@@ -908,13 +943,6 @@ AllocType AsanChunkView::GetAllocType() const {
return (AllocType)chunk_->alloc_type;
}
-static StackTrace GetStackTraceFromId(u32 id) {
- CHECK(id);
- StackTrace res = StackDepotGet(id);
- CHECK(res.trace);
- return res;
-}
-
u32 AsanChunkView::GetAllocStackId() const {
u32 tid = 0;
u32 stack = 0;
@@ -931,14 +959,6 @@ u32 AsanChunkView::GetFreeStackId() const {
return stack;
}
-StackTrace AsanChunkView::GetAllocStack() const {
- return GetStackTraceFromId(GetAllocStackId());
-}
-
-StackTrace AsanChunkView::GetFreeStack() const {
- return GetStackTraceFromId(GetFreeStackId());
-}
-
void InitializeAllocator(const AllocatorOptions &options) {
instance.InitLinkerInitialized(options);
}
@@ -1081,14 +1101,12 @@ uptr asan_mz_size(const void *ptr) {
return instance.AllocationSize(reinterpret_cast<uptr>(ptr));
}
-void asan_mz_force_lock() NO_THREAD_SAFETY_ANALYSIS { instance.ForceLock(); }
-
-void asan_mz_force_unlock() NO_THREAD_SAFETY_ANALYSIS {
- instance.ForceUnlock();
+void asan_mz_force_lock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
+ instance.ForceLock();
}
-void AsanSoftRssLimitExceededCallback(bool limit_exceeded) {
- instance.SetRssLimitExceeded(limit_exceeded);
+void asan_mz_force_unlock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
+ instance.ForceUnlock();
}
} // namespace __asan
@@ -1123,10 +1141,16 @@ uptr PointsIntoChunk(void *p) {
}
uptr GetUserBegin(uptr chunk) {
+ // FIXME: All usecases provide chunk address, GetAsanChunkByAddrFastLocked is
+ // not needed.
__asan::AsanChunk *m = __asan::instance.GetAsanChunkByAddrFastLocked(chunk);
return m ? m->Beg() : 0;
}
+uptr GetUserAddr(uptr chunk) {
+ return chunk;
+}
+
LsanMetadata::LsanMetadata(uptr chunk) {
metadata_ = chunk ? reinterpret_cast<void *>(chunk - __asan::kChunkHeaderSize)
: nullptr;
@@ -1167,7 +1191,7 @@ void ForEachChunk(ForEachChunkCallback callback, void *arg) {
__asan::get_allocator().ForEachChunk(callback, arg);
}
-IgnoreObjectResult IgnoreObjectLocked(const void *p) {
+IgnoreObjectResult IgnoreObject(const void *p) {
uptr addr = reinterpret_cast<uptr>(p);
__asan::AsanChunk *m = __asan::instance.GetAsanChunkByAddr(addr);
if (!m ||
@@ -1182,38 +1206,22 @@ IgnoreObjectResult IgnoreObjectLocked(const void *p) {
return kIgnoreObjectSuccess;
}
-void GetAdditionalThreadContextPtrs(ThreadContextBase *tctx, void *ptrs) {
- // Look for the arg pointer of threads that have been created or are running.
- // This is necessary to prevent false positive leaks due to the AsanThread
- // holding the only live reference to a heap object. This can happen because
- // the `pthread_create()` interceptor doesn't wait for the child thread to
- // start before returning and thus loosing the the only live reference to the
- // heap object on the stack.
-
- __asan::AsanThreadContext *atctx =
- reinterpret_cast<__asan::AsanThreadContext *>(tctx);
- __asan::AsanThread *asan_thread = atctx->thread;
-
- // Note ThreadStatusRunning is required because there is a small window where
- // the thread status switches to `ThreadStatusRunning` but the `arg` pointer
- // still isn't on the stack yet.
- if (atctx->status != ThreadStatusCreated &&
- atctx->status != ThreadStatusRunning)
- return;
-
- uptr thread_arg = reinterpret_cast<uptr>(asan_thread->get_arg());
- if (!thread_arg)
- return;
-
- auto ptrsVec = reinterpret_cast<InternalMmapVector<uptr> *>(ptrs);
- ptrsVec->push_back(thread_arg);
-}
-
} // namespace __lsan
// ---------------------- Interface ---------------- {{{1
using namespace __asan;
+static const void *AllocationBegin(const void *p) {
+ AsanChunk *m = __asan::instance.GetAsanChunkByAddr((uptr)p);
+ if (!m)
+ return nullptr;
+ if (atomic_load(&m->chunk_state, memory_order_acquire) != CHUNK_ALLOCATED)
+ return nullptr;
+ if (m->UsedSize() == 0)
+ return nullptr;
+ return (const void *)(m->Beg());
+}
+
// ASan allocator doesn't reserve extra bytes, so normally we would
// just return "size". We don't want to expose our redzone sizes, etc here.
uptr __sanitizer_get_estimated_allocated_size(uptr size) {
@@ -1237,6 +1245,17 @@ uptr __sanitizer_get_allocated_size(const void *p) {
return allocated_size;
}
+uptr __sanitizer_get_allocated_size_fast(const void *p) {
+ DCHECK_EQ(p, __sanitizer_get_allocated_begin(p));
+ uptr ret = instance.AllocationSizeFast(reinterpret_cast<uptr>(p));
+ DCHECK_EQ(ret, __sanitizer_get_allocated_size(p));
+ return ret;
+}
+
+const void *__sanitizer_get_allocated_begin(const void *p) {
+ return AllocationBegin(p);
+}
+
void __sanitizer_purge_allocator() {
GET_STACK_TRACE_MALLOC;
instance.Purge(&stack);
@@ -1246,16 +1265,3 @@ int __asan_update_allocation_context(void* addr) {
GET_STACK_TRACE_MALLOC;
return instance.UpdateAllocationStack((uptr)addr, &stack);
}
-
-#if !SANITIZER_SUPPORTS_WEAK_HOOKS
-// Provide default (no-op) implementation of malloc hooks.
-SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_malloc_hook,
- void *ptr, uptr size) {
- (void)ptr;
- (void)size;
-}
-
-SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_free_hook, void *ptr) {
- (void)ptr;
-}
-#endif
diff --git a/contrib/llvm-project/compiler-rt/lib/asan/asan_allocator.h b/contrib/llvm-project/compiler-rt/lib/asan/asan_allocator.h
index 2963e979b55c..c3c4fae85b12 100644
--- a/contrib/llvm-project/compiler-rt/lib/asan/asan_allocator.h
+++ b/contrib/llvm-project/compiler-rt/lib/asan/asan_allocator.h
@@ -64,8 +64,6 @@ class AsanChunkView {
bool Eq(const AsanChunkView &c) const { return chunk_ == c.chunk_; }
u32 GetAllocStackId() const;
u32 GetFreeStackId() const;
- StackTrace GetAllocStack() const;
- StackTrace GetFreeStack() const;
AllocType GetAllocType() const;
bool AddrIsInside(uptr addr, uptr access_size, sptr *offset) const {
if (addr >= Beg() && (addr + access_size) <= End()) {
@@ -116,46 +114,110 @@ class AsanChunkFifoList: public IntrusiveList<AsanChunk> {
struct AsanMapUnmapCallback {
void OnMap(uptr p, uptr size) const;
+ void OnMapSecondary(uptr p, uptr size, uptr user_begin, uptr user_size) const;
void OnUnmap(uptr p, uptr size) const;
};
#if SANITIZER_CAN_USE_ALLOCATOR64
# if SANITIZER_FUCHSIA
+// This is a sentinel indicating we do not want the primary allocator arena to
+// be placed at a fixed address. It will be anonymously mmap'd.
const uptr kAllocatorSpace = ~(uptr)0;
-const uptr kAllocatorSize = 0x40000000000ULL; // 4T.
+# if SANITIZER_RISCV64
+
+// These are sanitizer tunings that allow all bringup tests for RISCV-64 Sv39 +
+// Fuchsia to run with asan-instrumented. That is, we can run bringup, e2e,
+// libc, and scudo tests with this configuration.
+//
+// TODO: This is specifically tuned for Sv39. 48/57 will likely require other
+// tunings, or possibly use the same tunings Fuchsia uses for other archs. The
+// VMA size isn't technically tied to the Fuchsia System ABI, so once 48/57 is
+// supported, we'd need a way of dynamically checking what the VMA size is and
+// determining optimal configuration.
+
+// This indicates the total amount of space dedicated for the primary allocator
+// during initialization. This is roughly proportional to the size set by the
+// FuchsiaConfig for scudo (~11.25GB == ~2^33.49). Requesting any more could
+// lead to some failures in sanitized bringup tests where we can't allocate new
+// vmars because there wouldn't be enough contiguous space. We could try 2^34 if
+// we re-evaluate the SizeClassMap settings.
+const uptr kAllocatorSize = UINT64_C(1) << 33; // 8GB
+
+// This is roughly equivalent to the configuration for the VeryDenseSizeClassMap
+// but has fewer size classes (ideally at most 32). Fewer class sizes means the
+// region size for each class is larger, thus less chances of running out of
+// space for each region. The main differences are the MidSizeLog (which is
+// smaller) and the MaxSizeLog (which is larger).
+//
+// - The MaxSizeLog is higher to allow some of the largest allocations I've
+// observed to be placed in the primary allocator's arena as opposed to being
+// mmap'd by the secondary allocator. This helps reduce fragmentation from
+// large classes. A huge example of this the scudo allocator tests (and its
+// testing infrastructure) which malloc's/new's objects on the order of
+// hundreds of kilobytes which normally would not be in the primary allocator
+// arena with the default VeryDenseSizeClassMap.
+// - The MidSizeLog is reduced to help shrink the number of size classes and
+// increase region size. Without this, we'd see ASan complain many times about
+// a region running out of available space.
+//
+// This differs a bit from the fuchsia config in scudo, mainly from the NumBits,
+// MaxSizeLog, and NumCachedHintT. This should place the number of size classes
+// for scudo at 45 and some large objects allocated by this config would be
+// placed in the arena whereas scudo would mmap them. The asan allocator needs
+// to have a number of classes that are a power of 2 for various internal things
+// to work, so we can't match the scudo settings to a tee. The sanitizer
+// allocator is slightly slower than scudo's but this is enough to get
+// memory-intensive scudo tests to run with asan instrumentation.
+typedef SizeClassMap</*kNumBits=*/2,
+ /*kMinSizeLog=*/5,
+ /*kMidSizeLog=*/8,
+ /*kMaxSizeLog=*/18,
+ /*kNumCachedHintT=*/8,
+ /*kMaxBytesCachedLog=*/10>
+ SizeClassMap;
+static_assert(SizeClassMap::kNumClassesRounded <= 32,
+ "The above tunings were specifically selected to ensure there "
+ "would be at most 32 size classes. This restriction could be "
+ "loosened to 64 size classes if we can find a configuration of "
+ "allocator size and SizeClassMap tunings that allows us to "
+ "reliably run all bringup tests in a sanitized environment.");
+
+# else
+// These are the default allocator tunings for non-RISCV environments where the
+// VMA is usually 48 bits and we have lots of space.
+const uptr kAllocatorSize = 0x40000000000ULL; // 4T.
typedef DefaultSizeClassMap SizeClassMap;
-# elif defined(__powerpc64__)
+# endif
+# elif defined(__powerpc64__)
const uptr kAllocatorSpace = ~(uptr)0;
const uptr kAllocatorSize = 0x20000000000ULL; // 2T.
typedef DefaultSizeClassMap SizeClassMap;
-# elif defined(__aarch64__) && SANITIZER_ANDROID
+# elif defined(__aarch64__) && SANITIZER_ANDROID
// Android needs to support 39, 42 and 48 bit VMA.
const uptr kAllocatorSpace = ~(uptr)0;
const uptr kAllocatorSize = 0x2000000000ULL; // 128G.
typedef VeryCompactSizeClassMap SizeClassMap;
-#elif SANITIZER_RISCV64
+# elif SANITIZER_RISCV64
const uptr kAllocatorSpace = ~(uptr)0;
const uptr kAllocatorSize = 0x2000000000ULL; // 128G.
typedef VeryDenseSizeClassMap SizeClassMap;
-# elif defined(__aarch64__)
-// AArch64/SANITIZER_CAN_USE_ALLOCATOR64 is only for 42-bit VMA
-// so no need to different values for different VMA.
-const uptr kAllocatorSpace = 0x10000000000ULL;
-const uptr kAllocatorSize = 0x10000000000ULL; // 3T.
-typedef DefaultSizeClassMap SizeClassMap;
-#elif defined(__sparc__)
+# elif defined(__sparc__)
const uptr kAllocatorSpace = ~(uptr)0;
const uptr kAllocatorSize = 0x20000000000ULL; // 2T.
typedef DefaultSizeClassMap SizeClassMap;
-# elif SANITIZER_WINDOWS
+# elif SANITIZER_WINDOWS
const uptr kAllocatorSpace = ~(uptr)0;
const uptr kAllocatorSize = 0x8000000000ULL; // 500G
typedef DefaultSizeClassMap SizeClassMap;
-# else
+# elif SANITIZER_APPLE
const uptr kAllocatorSpace = 0x600000000000ULL;
const uptr kAllocatorSize = 0x40000000000ULL; // 4T.
typedef DefaultSizeClassMap SizeClassMap;
-# endif
+# else
+const uptr kAllocatorSpace = 0x500000000000ULL;
+const uptr kAllocatorSize = 0x40000000000ULL; // 4T.
+typedef DefaultSizeClassMap SizeClassMap;
+# endif
template <typename AddressSpaceViewTy>
struct AP64 { // Allocator64 parameters. Deliberately using a short name.
static const uptr kSpaceBeg = kAllocatorSpace;
diff --git a/contrib/llvm-project/compiler-rt/lib/asan/asan_debugging.cpp b/contrib/llvm-project/compiler-rt/lib/asan/asan_debugging.cpp
index c01360b52fc9..f078f1041a87 100644
--- a/contrib/llvm-project/compiler-rt/lib/asan/asan_debugging.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/asan/asan_debugging.cpp
@@ -19,6 +19,7 @@
#include "asan_mapping.h"
#include "asan_report.h"
#include "asan_thread.h"
+#include "sanitizer_common/sanitizer_stackdepot.h"
namespace {
using namespace __asan;
@@ -54,11 +55,11 @@ uptr AsanGetStack(uptr addr, uptr *trace, u32 size, u32 *thread_id,
StackTrace stack(nullptr, 0);
if (alloc_stack) {
if (chunk.AllocTid() == kInvalidTid) return 0;
- stack = chunk.GetAllocStack();
+ stack = StackDepotGet(chunk.GetAllocStackId());
if (thread_id) *thread_id = chunk.AllocTid();
} else {
if (chunk.FreeTid() == kInvalidTid) return 0;
- stack = chunk.GetFreeStack();
+ stack = StackDepotGet(chunk.GetFreeStackId());
if (thread_id) *thread_id = chunk.FreeTid();
}
@@ -140,7 +141,7 @@ uptr __asan_get_free_stack(uptr addr, uptr *trace, uptr size, u32 *thread_id) {
SANITIZER_INTERFACE_ATTRIBUTE
void __asan_get_shadow_mapping(uptr *shadow_scale, uptr *shadow_offset) {
if (shadow_scale)
- *shadow_scale = SHADOW_SCALE;
+ *shadow_scale = ASAN_SHADOW_SCALE;
if (shadow_offset)
- *shadow_offset = SHADOW_OFFSET;
+ *shadow_offset = ASAN_SHADOW_OFFSET;
}
diff --git a/contrib/llvm-project/compiler-rt/lib/asan/asan_descriptions.cpp b/contrib/llvm-project/compiler-rt/lib/asan/asan_descriptions.cpp
index 2ba8a02f8410..ef6f3e0a096f 100644
--- a/contrib/llvm-project/compiler-rt/lib/asan/asan_descriptions.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/asan/asan_descriptions.cpp
@@ -49,14 +49,14 @@ void DescribeThread(AsanThreadContext *context) {
}
context->announced = true;
InternalScopedString str;
- str.append("Thread %s", AsanThreadIdAndName(context).c_str());
+ str.AppendF("Thread %s", AsanThreadIdAndName(context).c_str());
if (context->parent_tid == kInvalidTid) {
- str.append(" created by unknown thread\n");
+ str.Append(" created by unknown thread\n");
Printf("%s", str.data());
return;
}
- str.append(" created by %s here:\n",
- AsanThreadIdAndName(context->parent_tid).c_str());
+ str.AppendF(" created by %s here:\n",
+ AsanThreadIdAndName(context->parent_tid).c_str());
Printf("%s", str.data());
StackDepotGet(context->stack_id).Print();
// Recursively described parent thread if needed.
@@ -126,29 +126,29 @@ static void GetAccessToHeapChunkInformation(ChunkAccess *descr,
static void PrintHeapChunkAccess(uptr addr, const ChunkAccess &descr) {
Decorator d;
InternalScopedString str;
- str.append("%s", d.Location());
+ str.Append(d.Location());
switch (descr.access_type) {
case kAccessTypeLeft:
- str.append("%p is located %zd bytes to the left of",
- (void *)descr.bad_addr, descr.offset);
+ str.AppendF("%p is located %zd bytes before", (void *)descr.bad_addr,
+ descr.offset);
break;
case kAccessTypeRight:
- str.append("%p is located %zd bytes to the right of",
- (void *)descr.bad_addr, descr.offset);
+ str.AppendF("%p is located %zd bytes after", (void *)descr.bad_addr,
+ descr.offset);
break;
case kAccessTypeInside:
- str.append("%p is located %zd bytes inside of", (void *)descr.bad_addr,
- descr.offset);
+ str.AppendF("%p is located %zd bytes inside of", (void *)descr.bad_addr,
+ descr.offset);
break;
case kAccessTypeUnknown:
- str.append(
+ str.AppendF(
"%p is located somewhere around (this is AddressSanitizer bug!)",
(void *)descr.bad_addr);
}
- str.append(" %zu-byte region [%p,%p)\n", descr.chunk_size,
- (void *)descr.chunk_begin,
- (void *)(descr.chunk_begin + descr.chunk_size));
- str.append("%s", d.Default());
+ str.AppendF(" %zu-byte region [%p,%p)\n", descr.chunk_size,
+ (void *)descr.chunk_begin,
+ (void *)(descr.chunk_begin + descr.chunk_size));
+ str.Append(d.Default());
Printf("%s", str.data());
}
@@ -243,24 +243,24 @@ static void PrintAccessAndVarIntersection(const StackVarDescr &var, uptr addr,
pos_descr = "underflows";
}
InternalScopedString str;
- str.append(" [%zd, %zd)", var.beg, var_end);
+ str.AppendF(" [%zd, %zd)", var.beg, var_end);
// Render variable name.
- str.append(" '");
+ str.AppendF(" '");
for (uptr i = 0; i < var.name_len; ++i) {
- str.append("%c", var.name_pos[i]);
+ str.AppendF("%c", var.name_pos[i]);
}
- str.append("'");
+ str.AppendF("'");
if (var.line > 0) {
- str.append(" (line %d)", var.line);
+ str.AppendF(" (line %zd)", var.line);
}
if (pos_descr) {
Decorator d;
// FIXME: we may want to also print the size of the access here,
// but in case of accesses generated by memset it may be confusing.
- str.append("%s <== Memory access at offset %zd %s this variable%s\n",
- d.Location(), addr, pos_descr, d.Default());
+ str.AppendF("%s <== Memory access at offset %zd %s this variable%s\n",
+ d.Location(), addr, pos_descr, d.Default());
} else {
- str.append("\n");
+ str.AppendF("\n");
}
Printf("%s", str.data());
}
@@ -277,23 +277,23 @@ static void DescribeAddressRelativeToGlobal(uptr addr, uptr access_size,
const __asan_global &g) {
InternalScopedString str;
Decorator d;
- str.append("%s", d.Location());
+ str.Append(d.Location());
if (addr < g.beg) {
- str.append("%p is located %zd bytes to the left", (void *)addr,
- g.beg - addr);
+ str.AppendF("%p is located %zd bytes before", (void *)addr, g.beg - addr);
} else if (addr + access_size > g.beg + g.size) {
if (addr < g.beg + g.size) addr = g.beg + g.size;
- str.append("%p is located %zd bytes to the right", (void *)addr,
- addr - (g.beg + g.size));
+ str.AppendF("%p is located %zd bytes after", (void *)addr,
+ addr - (g.beg + g.size));
} else {
// Can it happen?
- str.append("%p is located %zd bytes inside", (void *)addr, addr - g.beg);
+ str.AppendF("%p is located %zd bytes inside of", (void *)addr,
+ addr - g.beg);
}
- str.append(" of global variable '%s' defined in '",
- MaybeDemangleGlobalName(g.name));
- PrintGlobalLocation(&str, g);
- str.append("' (0x%zx) of size %zu\n", g.beg, g.size);
- str.append("%s", d.Default());
+ str.AppendF(" global variable '%s' defined in '",
+ MaybeDemangleGlobalName(g.name));
+ PrintGlobalLocation(&str, g, /*print_module_name=*/false);
+ str.AppendF("' (0x%zx) of size %zu\n", g.beg, g.size);
+ str.Append(d.Default());
PrintGlobalNameIfASCII(&str, g);
Printf("%s", str.data());
}
@@ -318,7 +318,8 @@ bool DescribeAddressIfGlobal(uptr addr, uptr access_size,
}
void ShadowAddressDescription::Print() const {
- Printf("Address %p is located in the %s area.\n", addr, ShadowNames[kind]);
+ Printf("Address %p is located in the %s area.\n", (void *)addr,
+ ShadowNames[kind]);
}
void GlobalAddressDescription::Print(const char *bug_type) const {
@@ -356,7 +357,7 @@ bool GlobalAddressDescription::PointsInsideTheSameVariable(
void StackAddressDescription::Print() const {
Decorator d;
Printf("%s", d.Location());
- Printf("Address %p is located in stack of thread %s", addr,
+ Printf("Address %p is located in stack of thread %s", (void *)addr,
AsanThreadIdAndName(tid).c_str());
if (!frame_descr) {
@@ -469,7 +470,7 @@ AddressDescription::AddressDescription(uptr addr, uptr access_size,
void WildAddressDescription::Print() const {
Printf("Address %p is a wild pointer inside of access range of size %p.\n",
- addr, access_size);
+ (void *)addr, (void *)access_size);
}
void PrintAddressDescription(uptr addr, uptr access_size,
diff --git a/contrib/llvm-project/compiler-rt/lib/asan/asan_errors.cpp b/contrib/llvm-project/compiler-rt/lib/asan/asan_errors.cpp
index 45166c064877..3f2d13e31464 100644
--- a/contrib/llvm-project/compiler-rt/lib/asan/asan_errors.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/asan/asan_errors.cpp
@@ -46,10 +46,9 @@ void ErrorDeadlySignal::Print() {
void ErrorDoubleFree::Print() {
Decorator d;
Printf("%s", d.Error());
- Report(
- "ERROR: AddressSanitizer: attempting %s on %p in thread %s:\n",
- scariness.GetDescription(), addr_description.addr,
- AsanThreadIdAndName(tid).c_str());
+ Report("ERROR: AddressSanitizer: attempting %s on %p in thread %s:\n",
+ scariness.GetDescription(), (void *)addr_description.addr,
+ AsanThreadIdAndName(tid).c_str());
Printf("%s", d.Default());
scariness.Print();
GET_STACK_TRACE_FATAL(second_free_stack->trace[0],
@@ -62,10 +61,9 @@ void ErrorDoubleFree::Print() {
void ErrorNewDeleteTypeMismatch::Print() {
Decorator d;
Printf("%s", d.Error());
- Report(
- "ERROR: AddressSanitizer: %s on %p in thread %s:\n",
- scariness.GetDescription(), addr_description.addr,
- AsanThreadIdAndName(tid).c_str());
+ Report("ERROR: AddressSanitizer: %s on %p in thread %s:\n",
+ scariness.GetDescription(), (void *)addr_description.addr,
+ AsanThreadIdAndName(tid).c_str());
Printf("%s object passed to delete has wrong type:\n", d.Default());
if (delete_size != 0) {
Printf(
@@ -106,7 +104,7 @@ void ErrorFreeNotMalloced::Print() {
Report(
"ERROR: AddressSanitizer: attempting free on address "
"which was not malloc()-ed: %p in thread %s\n",
- addr_description.Address(), AsanThreadIdAndName(tid).c_str());
+ (void *)addr_description.Address(), AsanThreadIdAndName(tid).c_str());
Printf("%s", d.Default());
CHECK_GT(free_stack->size, 0);
scariness.Print();
@@ -126,7 +124,7 @@ void ErrorAllocTypeMismatch::Print() {
Printf("%s", d.Error());
Report("ERROR: AddressSanitizer: %s (%s vs %s) on %p\n",
scariness.GetDescription(), alloc_names[alloc_type],
- dealloc_names[dealloc_type], addr_description.Address());
+ dealloc_names[dealloc_type], (void *)addr_description.Address());
Printf("%s", d.Default());
CHECK_GT(dealloc_stack->size, 0);
scariness.Print();
@@ -145,7 +143,7 @@ void ErrorMallocUsableSizeNotOwned::Print() {
Report(
"ERROR: AddressSanitizer: attempting to call malloc_usable_size() for "
"pointer which is not owned: %p\n",
- addr_description.Address());
+ (void *)addr_description.Address());
Printf("%s", d.Default());
stack->Print();
addr_description.Print();
@@ -158,7 +156,7 @@ void ErrorSanitizerGetAllocatedSizeNotOwned::Print() {
Report(
"ERROR: AddressSanitizer: attempting to call "
"__sanitizer_get_allocated_size() for pointer which is not owned: %p\n",
- addr_description.Address());
+ (void *)addr_description.Address());
Printf("%s", d.Default());
stack->Print();
addr_description.Print();
@@ -281,9 +279,7 @@ void ErrorRssLimitExceeded::Print() {
void ErrorOutOfMemory::Print() {
Decorator d;
Printf("%s", d.Error());
- Report(
- "ERROR: AddressSanitizer: allocator is out of memory trying to allocate "
- "0x%zx bytes\n", requested_size);
+ ERROR_OOM("allocator is trying to allocate 0x%zx bytes\n", requested_size);
Printf("%s", d.Default());
stack->Print();
PrintHintAllocatorCannotReturnNull();
@@ -298,9 +294,10 @@ void ErrorStringFunctionMemoryRangesOverlap::Print() {
Report(
"ERROR: AddressSanitizer: %s: memory ranges [%p,%p) and [%p, %p) "
"overlap\n",
- bug_type, addr1_description.Address(),
- addr1_description.Address() + length1, addr2_description.Address(),
- addr2_description.Address() + length2);
+ bug_type, (void *)addr1_description.Address(),
+ (void *)(addr1_description.Address() + length1),
+ (void *)addr2_description.Address(),
+ (void *)(addr2_description.Address() + length2));
Printf("%s", d.Default());
scariness.Print();
stack->Print();
@@ -329,10 +326,30 @@ void ErrorBadParamsToAnnotateContiguousContainer::Print() {
" end : %p\n"
" old_mid : %p\n"
" new_mid : %p\n",
- beg, end, old_mid, new_mid);
- uptr granularity = SHADOW_GRANULARITY;
+ (void *)beg, (void *)end, (void *)old_mid, (void *)new_mid);
+ uptr granularity = ASAN_SHADOW_GRANULARITY;
if (!IsAligned(beg, granularity))
- Report("ERROR: beg is not aligned by %d\n", granularity);
+ Report("ERROR: beg is not aligned by %zu\n", granularity);
+ stack->Print();
+ ReportErrorSummary(scariness.GetDescription(), stack);
+}
+
+void ErrorBadParamsToAnnotateDoubleEndedContiguousContainer::Print() {
+ Report(
+ "ERROR: AddressSanitizer: bad parameters to "
+ "__sanitizer_annotate_double_ended_contiguous_container:\n"
+ " storage_beg : %p\n"
+ " storage_end : %p\n"
+ " old_container_beg : %p\n"
+ " old_container_end : %p\n"
+ " new_container_beg : %p\n"
+ " new_container_end : %p\n",
+ (void *)storage_beg, (void *)storage_end, (void *)old_container_beg,
+ (void *)old_container_end, (void *)new_container_beg,
+ (void *)new_container_end);
+ uptr granularity = ASAN_SHADOW_GRANULARITY;
+ if (!IsAligned(storage_beg, granularity))
+ Report("ERROR: storage_beg is not aligned by %zu\n", granularity);
stack->Print();
ReportErrorSummary(scariness.GetDescription(), stack);
}
@@ -341,12 +358,12 @@ void ErrorODRViolation::Print() {
Decorator d;
Printf("%s", d.Error());
Report("ERROR: AddressSanitizer: %s (%p):\n", scariness.GetDescription(),
- global1.beg);
+ (void *)global1.beg);
Printf("%s", d.Default());
InternalScopedString g1_loc;
InternalScopedString g2_loc;
- PrintGlobalLocation(&g1_loc, global1);
- PrintGlobalLocation(&g2_loc, global2);
+ PrintGlobalLocation(&g1_loc, global1, /*print_module_name=*/true);
+ PrintGlobalLocation(&g2_loc, global2, /*print_module_name=*/true);
Printf(" [1] size=%zd '%s' %s\n", global1.size,
MaybeDemangleGlobalName(global1.name), g1_loc.data());
Printf(" [2] size=%zd '%s' %s\n", global2.size,
@@ -362,8 +379,8 @@ void ErrorODRViolation::Print() {
"HINT: if you don't care about these errors you may set "
"ASAN_OPTIONS=detect_odr_violation=0\n");
InternalScopedString error_msg;
- error_msg.append("%s: global '%s' at %s", scariness.GetDescription(),
- MaybeDemangleGlobalName(global1.name), g1_loc.data());
+ error_msg.AppendF("%s: global '%s' at %s", scariness.GetDescription(),
+ MaybeDemangleGlobalName(global1.name), g1_loc.data());
ReportErrorSummary(error_msg.data());
}
@@ -371,7 +388,8 @@ void ErrorInvalidPointerPair::Print() {
Decorator d;
Printf("%s", d.Error());
Report("ERROR: AddressSanitizer: %s: %p %p\n", scariness.GetDescription(),
- addr1_description.Address(), addr2_description.Address());
+ (void *)addr1_description.Address(),
+ (void *)addr2_description.Address());
Printf("%s", d.Default());
GET_STACK_TRACE_FATAL(pc, bp);
stack.Print();
@@ -410,7 +428,8 @@ ErrorGeneric::ErrorGeneric(u32 tid, uptr pc_, uptr bp_, uptr sp_, uptr addr,
if (AddrIsInMem(addr)) {
u8 *shadow_addr = (u8 *)MemToShadow(addr);
// If we are accessing 16 bytes, look at the second shadow byte.
- if (*shadow_addr == 0 && access_size > SHADOW_GRANULARITY) shadow_addr++;
+ if (*shadow_addr == 0 && access_size > ASAN_SHADOW_GRANULARITY)
+ shadow_addr++;
// If we are in the partial right redzone, look at the next shadow byte.
if (*shadow_addr > 0 && *shadow_addr < 128) shadow_addr++;
bool far_from_bounds = false;
@@ -498,14 +517,15 @@ static void PrintShadowByte(InternalScopedString *str, const char *before,
}
static void PrintLegend(InternalScopedString *str) {
- str->append(
+ str->AppendF(
"Shadow byte legend (one shadow byte represents %d "
"application bytes):\n",
- (int)SHADOW_GRANULARITY);
+ (int)ASAN_SHADOW_GRANULARITY);
PrintShadowByte(str, " Addressable: ", 0);
- str->append(" Partially addressable: ");
- for (u8 i = 1; i < SHADOW_GRANULARITY; i++) PrintShadowByte(str, "", i, " ");
- str->append("\n");
+ str->AppendF(" Partially addressable: ");
+ for (u8 i = 1; i < ASAN_SHADOW_GRANULARITY; i++)
+ PrintShadowByte(str, "", i, " ");
+ str->AppendF("\n");
PrintShadowByte(str, " Heap left redzone: ",
kAsanHeapLeftRedzoneMagic);
PrintShadowByte(str, " Freed heap region: ", kAsanHeapFreeMagic);
@@ -538,7 +558,9 @@ static void PrintLegend(InternalScopedString *str) {
static void PrintShadowBytes(InternalScopedString *str, const char *before,
u8 *bytes, u8 *guilty, uptr n) {
Decorator d;
- if (before) str->append("%s%p:", before, bytes);
+ if (before)
+ str->AppendF("%s%p:", before,
+ (void *)ShadowToMem(reinterpret_cast<uptr>(bytes)));
for (uptr i = 0; i < n; i++) {
u8 *p = bytes + i;
const char *before =
@@ -546,7 +568,7 @@ static void PrintShadowBytes(InternalScopedString *str, const char *before,
const char *after = p == guilty ? "]" : "";
PrintShadowByte(str, before, *p, after);
}
- str->append("\n");
+ str->AppendF("\n");
}
static void PrintShadowMemoryForAddress(uptr addr) {
@@ -555,7 +577,7 @@ static void PrintShadowMemoryForAddress(uptr addr) {
const uptr n_bytes_per_row = 16;
uptr aligned_shadow = shadow_addr & ~(n_bytes_per_row - 1);
InternalScopedString str;
- str.append("Shadow bytes around the buggy address:\n");
+ str.AppendF("Shadow bytes around the buggy address:\n");
for (int i = -5; i <= 5; i++) {
uptr row_shadow_addr = aligned_shadow + i * n_bytes_per_row;
// Skip rows that would be outside the shadow range. This can happen when
@@ -575,7 +597,7 @@ void ErrorGeneric::Print() {
Printf("%s", d.Error());
uptr addr = addr_description.Address();
Report("ERROR: AddressSanitizer: %s on address %p at pc %p bp %p sp %p\n",
- bug_descr, (void *)addr, pc, bp, sp);
+ bug_descr, (void *)addr, (void *)pc, (void *)bp, (void *)sp);
Printf("%s", d.Default());
Printf("%s%s of size %zu at %p thread %s%s\n", d.Access(),
diff --git a/contrib/llvm-project/compiler-rt/lib/asan/asan_errors.h b/contrib/llvm-project/compiler-rt/lib/asan/asan_errors.h
index a7fda2fd9f5d..634f6da54435 100644
--- a/contrib/llvm-project/compiler-rt/lib/asan/asan_errors.h
+++ b/contrib/llvm-project/compiler-rt/lib/asan/asan_errors.h
@@ -53,9 +53,9 @@ struct ErrorDeadlySignal : ErrorBase {
scariness.Scare(10, "null-deref");
} else if (signal.addr == signal.pc) {
scariness.Scare(60, "wild-jump");
- } else if (signal.write_flag == SignalContext::WRITE) {
+ } else if (signal.write_flag == SignalContext::Write) {
scariness.Scare(30, "wild-addr-write");
- } else if (signal.write_flag == SignalContext::READ) {
+ } else if (signal.write_flag == SignalContext::Read) {
scariness.Scare(20, "wild-addr-read");
} else {
scariness.Scare(25, "wild-addr");
@@ -331,6 +331,28 @@ struct ErrorBadParamsToAnnotateContiguousContainer : ErrorBase {
void Print();
};
+struct ErrorBadParamsToAnnotateDoubleEndedContiguousContainer : ErrorBase {
+ const BufferedStackTrace *stack;
+ uptr storage_beg, storage_end, old_container_beg, old_container_end,
+ new_container_beg, new_container_end;
+
+ ErrorBadParamsToAnnotateDoubleEndedContiguousContainer() = default; // (*)
+ ErrorBadParamsToAnnotateDoubleEndedContiguousContainer(
+ u32 tid, BufferedStackTrace *stack_, uptr storage_beg_, uptr storage_end_,
+ uptr old_container_beg_, uptr old_container_end_, uptr new_container_beg_,
+ uptr new_container_end_)
+ : ErrorBase(tid, 10,
+ "bad-__sanitizer_annotate_double_ended_contiguous_container"),
+ stack(stack_),
+ storage_beg(storage_beg_),
+ storage_end(storage_end_),
+ old_container_beg(old_container_beg_),
+ old_container_end(old_container_end_),
+ new_container_beg(new_container_beg_),
+ new_container_end(new_container_end_) {}
+ void Print();
+};
+
struct ErrorODRViolation : ErrorBase {
__asan_global global1, global2;
u32 stack_id1, stack_id2;
@@ -372,34 +394,35 @@ struct ErrorGeneric : ErrorBase {
u8 shadow_val;
ErrorGeneric() = default; // (*)
- ErrorGeneric(u32 tid, uptr addr, uptr pc_, uptr bp_, uptr sp_, bool is_write_,
+ ErrorGeneric(u32 tid, uptr pc_, uptr bp_, uptr sp_, uptr addr, bool is_write_,
uptr access_size_);
void Print();
};
// clang-format off
-#define ASAN_FOR_EACH_ERROR_KIND(macro) \
- macro(DeadlySignal) \
- macro(DoubleFree) \
- macro(NewDeleteTypeMismatch) \
- macro(FreeNotMalloced) \
- macro(AllocTypeMismatch) \
- macro(MallocUsableSizeNotOwned) \
- macro(SanitizerGetAllocatedSizeNotOwned) \
- macro(CallocOverflow) \
- macro(ReallocArrayOverflow) \
- macro(PvallocOverflow) \
- macro(InvalidAllocationAlignment) \
- macro(InvalidAlignedAllocAlignment) \
- macro(InvalidPosixMemalignAlignment) \
- macro(AllocationSizeTooBig) \
- macro(RssLimitExceeded) \
- macro(OutOfMemory) \
- macro(StringFunctionMemoryRangesOverlap) \
- macro(StringFunctionSizeOverflow) \
- macro(BadParamsToAnnotateContiguousContainer) \
- macro(ODRViolation) \
- macro(InvalidPointerPair) \
+#define ASAN_FOR_EACH_ERROR_KIND(macro) \
+ macro(DeadlySignal) \
+ macro(DoubleFree) \
+ macro(NewDeleteTypeMismatch) \
+ macro(FreeNotMalloced) \
+ macro(AllocTypeMismatch) \
+ macro(MallocUsableSizeNotOwned) \
+ macro(SanitizerGetAllocatedSizeNotOwned) \
+ macro(CallocOverflow) \
+ macro(ReallocArrayOverflow) \
+ macro(PvallocOverflow) \
+ macro(InvalidAllocationAlignment) \
+ macro(InvalidAlignedAllocAlignment) \
+ macro(InvalidPosixMemalignAlignment) \
+ macro(AllocationSizeTooBig) \
+ macro(RssLimitExceeded) \
+ macro(OutOfMemory) \
+ macro(StringFunctionMemoryRangesOverlap) \
+ macro(StringFunctionSizeOverflow) \
+ macro(BadParamsToAnnotateContiguousContainer) \
+ macro(BadParamsToAnnotateDoubleEndedContiguousContainer) \
+ macro(ODRViolation) \
+ macro(InvalidPointerPair) \
macro(Generic)
// clang-format on
diff --git a/contrib/llvm-project/compiler-rt/lib/asan/asan_fake_stack.cpp b/contrib/llvm-project/compiler-rt/lib/asan/asan_fake_stack.cpp
index bf5c342ee59d..7443ff166984 100644
--- a/contrib/llvm-project/compiler-rt/lib/asan/asan_fake_stack.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/asan/asan_fake_stack.cpp
@@ -28,8 +28,8 @@ static const u64 kAllocaRedzoneMask = 31UL;
// For small size classes inline PoisonShadow for better performance.
ALWAYS_INLINE void SetShadow(uptr ptr, uptr size, uptr class_id, u64 magic) {
u64 *shadow = reinterpret_cast<u64*>(MemToShadow(ptr));
- if (SHADOW_SCALE == 3 && class_id <= 6) {
- // This code expects SHADOW_SCALE=3.
+ if (ASAN_SHADOW_SCALE == 3 && class_id <= 6) {
+ // This code expects ASAN_SHADOW_SCALE=3.
for (uptr i = 0; i < (((uptr)1) << class_id); i++) {
shadow[i] = magic;
// Make sure this does not become memset.
@@ -54,10 +54,11 @@ FakeStack *FakeStack::Create(uptr stack_size_log) {
: MmapOrDie(size, "FakeStack"));
res->stack_size_log_ = stack_size_log;
u8 *p = reinterpret_cast<u8 *>(res);
- VReport(1, "T%d: FakeStack created: %p -- %p stack_size_log: %zd; "
+ VReport(1,
+ "T%d: FakeStack created: %p -- %p stack_size_log: %zd; "
"mmapped %zdK, noreserve=%d \n",
- GetCurrentTidOrInvalid(), p,
- p + FakeStack::RequiredSize(stack_size_log), stack_size_log,
+ GetCurrentTidOrInvalid(), (void *)p,
+ (void *)(p + FakeStack::RequiredSize(stack_size_log)), stack_size_log,
size >> 10, flags()->uar_noreserve);
return res;
}
@@ -67,8 +68,8 @@ void FakeStack::Destroy(int tid) {
if (Verbosity() >= 2) {
InternalScopedString str;
for (uptr class_id = 0; class_id < kNumberOfSizeClasses; class_id++)
- str.append("%zd: %zd/%zd; ", class_id, hint_position_[class_id],
- NumberOfFrames(stack_size_log(), class_id));
+ str.AppendF("%zd: %zd/%zd; ", class_id, hint_position_[class_id],
+ NumberOfFrames(stack_size_log(), class_id));
Report("T%d: FakeStack destroyed: %s\n", tid, str.data());
}
uptr size = RequiredSize(stack_size_log_);
@@ -132,6 +133,12 @@ void FakeStack::HandleNoReturn() {
needs_gc_ = true;
}
+// Hack: The statement below is not true if we take into account sigaltstack or
+// makecontext. It should be possible to make GC to discard wrong stack frame if
+// we use these tools. For now, let's support the simplest case and allow GC to
+// discard only frames from the default stack, assuming there is no buffer on
+// the stack which is used for makecontext or sigaltstack.
+//
// When throw, longjmp or some such happens we don't call OnFree() and
// as the result may leak one or more fake frames, but the good news is that
// we are notified about all such events by HandleNoReturn().
@@ -139,7 +146,14 @@ void FakeStack::HandleNoReturn() {
// We do it based on their 'real_stack' values -- everything that is lower
// than the current real_stack is garbage.
NOINLINE void FakeStack::GC(uptr real_stack) {
- uptr collected = 0;
+ AsanThread *curr_thread = GetCurrentThread();
+ if (!curr_thread)
+ return; // Try again when we have a thread.
+ auto top = curr_thread->stack_top();
+ auto bottom = curr_thread->stack_bottom();
+ if (real_stack < bottom || real_stack > top)
+ return; // Not the default stack.
+
for (uptr class_id = 0; class_id < kNumberOfSizeClasses; class_id++) {
u8 *flags = GetFlags(stack_size_log(), class_id);
for (uptr i = 0, n = NumberOfFrames(stack_size_log(), class_id); i < n;
@@ -147,9 +161,12 @@ NOINLINE void FakeStack::GC(uptr real_stack) {
if (flags[i] == 0) continue; // not allocated.
FakeFrame *ff = reinterpret_cast<FakeFrame *>(
GetFrame(stack_size_log(), class_id, i));
- if (ff->real_stack < real_stack) {
+ // GC only on the default stack.
+ if (bottom < ff->real_stack && ff->real_stack < real_stack) {
flags[i] = 0;
- collected++;
+ // Poison the frame, so the any access will be reported as UAR.
+ SetShadow(reinterpret_cast<uptr>(ff), BytesInSizeClass(class_id),
+ class_id, kMagic8);
}
}
}
@@ -206,11 +223,12 @@ static FakeStack *GetFakeStackFastAlways() {
static ALWAYS_INLINE uptr OnMalloc(uptr class_id, uptr size) {
FakeStack *fs = GetFakeStackFast();
- if (!fs) return 0;
- uptr local_stack;
- uptr real_stack = reinterpret_cast<uptr>(&local_stack);
- FakeFrame *ff = fs->Allocate(fs->stack_size_log(), class_id, real_stack);
- if (!ff) return 0; // Out of fake stack.
+ if (!fs)
+ return 0;
+ FakeFrame *ff =
+ fs->Allocate(fs->stack_size_log(), class_id, GET_CURRENT_FRAME());
+ if (!ff)
+ return 0; // Out of fake stack.
uptr ptr = reinterpret_cast<uptr>(ff);
SetShadow(ptr, size, class_id, 0);
return ptr;
@@ -220,9 +238,8 @@ static ALWAYS_INLINE uptr OnMallocAlways(uptr class_id, uptr size) {
FakeStack *fs = GetFakeStackFastAlways();
if (!fs)
return 0;
- uptr local_stack;
- uptr real_stack = reinterpret_cast<uptr>(&local_stack);
- FakeFrame *ff = fs->Allocate(fs->stack_size_log(), class_id, real_stack);
+ FakeFrame *ff =
+ fs->Allocate(fs->stack_size_log(), class_id, GET_CURRENT_FRAME());
if (!ff)
return 0; // Out of fake stack.
uptr ptr = reinterpret_cast<uptr>(ff);
@@ -293,10 +310,10 @@ void __asan_alloca_poison(uptr addr, uptr size) {
uptr LeftRedzoneAddr = addr - kAllocaRedzoneSize;
uptr PartialRzAddr = addr + size;
uptr RightRzAddr = (PartialRzAddr + kAllocaRedzoneMask) & ~kAllocaRedzoneMask;
- uptr PartialRzAligned = PartialRzAddr & ~(SHADOW_GRANULARITY - 1);
+ uptr PartialRzAligned = PartialRzAddr & ~(ASAN_SHADOW_GRANULARITY - 1);
FastPoisonShadow(LeftRedzoneAddr, kAllocaRedzoneSize, kAsanAllocaLeftMagic);
FastPoisonShadowPartialRightRedzone(
- PartialRzAligned, PartialRzAddr % SHADOW_GRANULARITY,
+ PartialRzAligned, PartialRzAddr % ASAN_SHADOW_GRANULARITY,
RightRzAddr - PartialRzAligned, kAsanAllocaRightMagic);
FastPoisonShadow(RightRzAddr, kAllocaRedzoneSize, kAsanAllocaRightMagic);
}
@@ -304,7 +321,8 @@ void __asan_alloca_poison(uptr addr, uptr size) {
SANITIZER_INTERFACE_ATTRIBUTE
void __asan_allocas_unpoison(uptr top, uptr bottom) {
if ((!top) || (top > bottom)) return;
- REAL(memset)(reinterpret_cast<void*>(MemToShadow(top)), 0,
- (bottom - top) / SHADOW_GRANULARITY);
+ REAL(memset)
+ (reinterpret_cast<void *>(MemToShadow(top)), 0,
+ (bottom - top) / ASAN_SHADOW_GRANULARITY);
}
} // extern "C"
diff --git a/contrib/llvm-project/compiler-rt/lib/asan/asan_flags.cpp b/contrib/llvm-project/compiler-rt/lib/asan/asan_flags.cpp
index c64e46470287..239898433232 100644
--- a/contrib/llvm-project/compiler-rt/lib/asan/asan_flags.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/asan/asan_flags.cpp
@@ -87,7 +87,7 @@ void InitializeFlags() {
RegisterCommonFlags(&ubsan_parser);
#endif
- if (SANITIZER_MAC) {
+ if (SANITIZER_APPLE) {
// Support macOS MallocScribble and MallocPreScribble:
// <https://developer.apple.com/library/content/documentation/Performance/
// Conceptual/ManagingMemory/Articles/MallocDebug.html>
@@ -140,9 +140,9 @@ void InitializeFlags() {
SanitizerToolName);
Die();
}
- // Ensure that redzone is at least SHADOW_GRANULARITY.
- if (f->redzone < (int)SHADOW_GRANULARITY)
- f->redzone = SHADOW_GRANULARITY;
+ // Ensure that redzone is at least ASAN_SHADOW_GRANULARITY.
+ if (f->redzone < (int)ASAN_SHADOW_GRANULARITY)
+ f->redzone = ASAN_SHADOW_GRANULARITY;
// Make "strict_init_order" imply "check_initialization_order".
// TODO(samsonov): Use a single runtime flag for an init-order checker.
if (f->strict_init_order) {
diff --git a/contrib/llvm-project/compiler-rt/lib/asan/asan_flags.inc b/contrib/llvm-project/compiler-rt/lib/asan/asan_flags.inc
index 514b225c4073..fad1577d912a 100644
--- a/contrib/llvm-project/compiler-rt/lib/asan/asan_flags.inc
+++ b/contrib/llvm-project/compiler-rt/lib/asan/asan_flags.inc
@@ -49,9 +49,10 @@ ASAN_FLAG(
"to find more errors.")
ASAN_FLAG(bool, replace_intrin, true,
"If set, uses custom wrappers for memset/memcpy/memmove intrinsics.")
-ASAN_FLAG(bool, detect_stack_use_after_return, false,
+ASAN_FLAG(bool, detect_stack_use_after_return,
+ SANITIZER_LINUX && !SANITIZER_ANDROID,
"Enables stack-use-after-return checking at run-time.")
-ASAN_FLAG(int, min_uar_stack_size_log, 16, // We can't do smaller anyway.
+ASAN_FLAG(int, min_uar_stack_size_log, 16, // We can't do smaller anyway.
"Minimum fake stack size log.")
ASAN_FLAG(int, max_uar_stack_size_log,
20, // 1Mb per size class, i.e. ~11Mb per thread
@@ -82,6 +83,10 @@ ASAN_FLAG(
int, sleep_after_init, 0,
"Number of seconds to sleep after AddressSanitizer is initialized. "
"Useful for debugging purposes (e.g. when one needs to attach gdb).")
+ASAN_FLAG(
+ int, sleep_before_init, 0,
+ "Number of seconds to sleep before AddressSanitizer starts initializing. "
+ "Useful for debugging purposes (e.g. when one needs to attach gdb).")
ASAN_FLAG(bool, check_malloc_usable_size, true,
"Allows the users to work around the bug in Nvidia drivers prior to "
"295.*.")
@@ -117,7 +122,7 @@ ASAN_FLAG(bool, poison_array_cookie, true,
// https://github.com/google/sanitizers/issues/309
// TODO(glider,timurrrr): Fix known issues and enable this back.
ASAN_FLAG(bool, alloc_dealloc_mismatch,
- !SANITIZER_MAC && !SANITIZER_WINDOWS && !SANITIZER_ANDROID,
+ !SANITIZER_APPLE && !SANITIZER_WINDOWS && !SANITIZER_ANDROID,
"Report errors on malloc/delete, new/free, new/delete[], etc.")
ASAN_FLAG(bool, new_delete_type_mismatch, true,
diff --git a/contrib/llvm-project/compiler-rt/lib/asan/asan_fuchsia.cpp b/contrib/llvm-project/compiler-rt/lib/asan/asan_fuchsia.cpp
index b0c7255144ac..12625e9d7583 100644
--- a/contrib/llvm-project/compiler-rt/lib/asan/asan_fuchsia.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/asan/asan_fuchsia.cpp
@@ -14,16 +14,17 @@
#include "sanitizer_common/sanitizer_fuchsia.h"
#if SANITIZER_FUCHSIA
-#include "asan_interceptors.h"
-#include "asan_internal.h"
-#include "asan_stack.h"
-#include "asan_thread.h"
-
#include <limits.h>
#include <zircon/sanitizer.h>
#include <zircon/syscalls.h>
#include <zircon/threads.h>
+# include "asan_interceptors.h"
+# include "asan_internal.h"
+# include "asan_stack.h"
+# include "asan_thread.h"
+# include "lsan/lsan_common.h"
+
namespace __asan {
// The system already set up the shadow memory for us.
@@ -31,7 +32,8 @@ namespace __asan {
// AsanInitInternal->InitializeHighMemEnd (asan_rtl.cpp).
// Just do some additional sanity checks here.
void InitializeShadowMemory() {
- if (Verbosity()) PrintAddressSpaceLayout();
+ if (Verbosity())
+ PrintAddressSpaceLayout();
// Make sure SHADOW_OFFSET doesn't use __asan_shadow_memory_dynamic_address.
__asan_shadow_memory_dynamic_address = kDefaultShadowSentinel;
@@ -62,7 +64,34 @@ void AsanOnDeadlySignal(int signo, void *siginfo, void *context) {
UNIMPLEMENTED();
}
-bool PlatformUnpoisonStacks() { return false; }
+bool PlatformUnpoisonStacks() {
+ // The current sp might not point to the default stack. This
+ // could be because we are in a crash stack from fuzzing for example.
+ // Unpoison the default stack and the current stack page.
+ AsanThread *curr_thread = GetCurrentThread();
+ CHECK(curr_thread != nullptr);
+ uptr top = curr_thread->stack_top();
+ uptr bottom = curr_thread->stack_bottom();
+ // The default stack grows from top to bottom. (bottom < top).
+
+ uptr local_stack = reinterpret_cast<uptr>(__builtin_frame_address(0));
+ if (local_stack >= bottom && local_stack <= top) {
+ // The current stack is the default stack.
+ // We only need to unpoison from where we are using until the end.
+ bottom = RoundDownTo(local_stack, GetPageSize());
+ UnpoisonStack(bottom, top, "default");
+ } else {
+ // The current stack is not the default stack.
+ // Unpoison the entire default stack and the current stack page.
+ UnpoisonStack(bottom, top, "default");
+ bottom = RoundDownTo(local_stack, GetPageSize());
+ top = bottom + GetPageSize();
+ UnpoisonStack(bottom, top, "unknown");
+ return true;
+ }
+
+ return false;
+}
// We can use a plain thread_local variable for TSD.
static thread_local void *per_thread;
@@ -90,14 +119,12 @@ struct AsanThread::InitOptions {
// Shared setup between thread creation and startup for the initial thread.
static AsanThread *CreateAsanThread(StackTrace *stack, u32 parent_tid,
- uptr user_id, bool detached,
- const char *name) {
+ bool detached, const char *name) {
// In lieu of AsanThread::Create.
AsanThread *thread = (AsanThread *)MmapOrDie(AsanThreadMmapSize(), __func__);
AsanThreadContext::CreateThreadContextArgs args = {thread, stack};
- u32 tid =
- asanThreadRegistry().CreateThread(user_id, detached, parent_tid, &args);
+ u32 tid = asanThreadRegistry().CreateThread(0, detached, parent_tid, &args);
asanThreadRegistry().SetThreadName(tid, name);
return thread;
@@ -124,7 +151,7 @@ AsanThread *CreateMainThread() {
CHECK_NE(__sanitizer::MainThreadStackBase, 0);
CHECK_GT(__sanitizer::MainThreadStackSize, 0);
AsanThread *t = CreateAsanThread(
- nullptr, 0, reinterpret_cast<uptr>(self), true,
+ nullptr, 0, true,
_zx_object_get_property(thrd_get_zx_handle(self), ZX_PROP_NAME, name,
sizeof(name)) == ZX_OK
? name
@@ -148,13 +175,13 @@ static void *BeforeThreadCreateHook(uptr user_id, bool detached,
uptr stack_size) {
EnsureMainThreadIDIsCorrect();
// Strict init-order checking is thread-hostile.
- if (flags()->strict_init_order) StopInitOrderChecking();
+ if (flags()->strict_init_order)
+ StopInitOrderChecking();
GET_STACK_TRACE_THREAD;
u32 parent_tid = GetCurrentTidOrInvalid();
- AsanThread *thread =
- CreateAsanThread(&stack, parent_tid, user_id, detached, name);
+ AsanThread *thread = CreateAsanThread(&stack, parent_tid, detached, name);
// On other systems, AsanThread::Init() is called from the new
// thread itself. But on Fuchsia we already know the stack address
@@ -209,8 +236,20 @@ void FlushUnneededASanShadowMemory(uptr p, uptr size) {
__sanitizer_fill_shadow(p, size, 0, 0);
}
+// On Fuchsia, leak detection is done by a special hook after atexit hooks.
+// So this doesn't install any atexit hook like on other platforms.
+void InstallAtExitCheckLeaks() {}
+
+void InstallAtForkHandler() {}
+
} // namespace __asan
+namespace __lsan {
+
+bool UseExitcodeOnLeak() { return __asan::flags()->halt_on_error; }
+
+} // namespace __lsan
+
// These are declared (in extern "C") by <zircon/sanitizer.h>.
// The system runtime will call our definitions directly.
diff --git a/contrib/llvm-project/compiler-rt/lib/asan/asan_globals.cpp b/contrib/llvm-project/compiler-rt/lib/asan/asan_globals.cpp
index 9d7dbc6f264c..6ac64c4b776b 100644
--- a/contrib/llvm-project/compiler-rt/lib/asan/asan_globals.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/asan/asan_globals.cpp
@@ -35,8 +35,7 @@ struct ListOfGlobals {
ListOfGlobals *next;
};
-static BlockingMutex mu_for_globals(LINKER_INITIALIZED);
-static LowLevelAllocator allocator_for_globals;
+static Mutex mu_for_globals;
static ListOfGlobals *list_of_all_globals;
static const int kDynamicInitGlobalsInitialCapacity = 512;
@@ -61,14 +60,13 @@ ALWAYS_INLINE void PoisonShadowForGlobal(const Global *g, u8 value) {
}
ALWAYS_INLINE void PoisonRedZones(const Global &g) {
- uptr aligned_size = RoundUpTo(g.size, SHADOW_GRANULARITY);
+ uptr aligned_size = RoundUpTo(g.size, ASAN_SHADOW_GRANULARITY);
FastPoisonShadow(g.beg + aligned_size, g.size_with_redzone - aligned_size,
kAsanGlobalRedzoneMagic);
if (g.size != aligned_size) {
FastPoisonShadowPartialRightRedzone(
- g.beg + RoundDownTo(g.size, SHADOW_GRANULARITY),
- g.size % SHADOW_GRANULARITY,
- SHADOW_GRANULARITY,
+ g.beg + RoundDownTo(g.size, ASAN_SHADOW_GRANULARITY),
+ g.size % ASAN_SHADOW_GRANULARITY, ASAN_SHADOW_GRANULARITY,
kAsanGlobalRedzoneMagic);
}
}
@@ -82,15 +80,21 @@ static bool IsAddressNearGlobal(uptr addr, const __asan_global &g) {
}
static void ReportGlobal(const Global &g, const char *prefix) {
+ DataInfo info;
+ bool symbolized = Symbolizer::GetOrInit()->SymbolizeData(g.beg, &info);
Report(
- "%s Global[%p]: beg=%p size=%zu/%zu name=%s module=%s dyn_init=%zu "
+ "%s Global[%p]: beg=%p size=%zu/%zu name=%s source=%s module=%s "
+ "dyn_init=%zu "
"odr_indicator=%p\n",
- prefix, &g, (void *)g.beg, g.size, g.size_with_redzone, g.name,
- g.module_name, g.has_dynamic_init, (void *)g.odr_indicator);
- if (g.location) {
- Report(" location (%p): name=%s[%p], %d %d\n", g.location,
- g.location->filename, g.location->filename, g.location->line_no,
- g.location->column_no);
+ prefix, (void *)&g, (void *)g.beg, g.size, g.size_with_redzone, g.name,
+ g.module_name, (symbolized ? info.module : "?"), g.has_dynamic_init,
+ (void *)g.odr_indicator);
+
+ if (symbolized && info.line != 0) {
+ Report(" location: name=%s, %d\n", info.file, static_cast<int>(info.line));
+ } else if (g.gcc_location != 0) {
+ // Fallback to Global::gcc_location
+ Report(" location: name=%s, %d\n", g.gcc_location->filename, g.gcc_location->line_no);
}
}
@@ -108,7 +112,7 @@ static u32 FindRegistrationSite(const Global *g) {
int GetGlobalsForAddress(uptr addr, Global *globals, u32 *reg_sites,
int max_globals) {
if (!flags()->report_globals) return 0;
- BlockingMutexLock lock(&mu_for_globals);
+ Lock lock(&mu_for_globals);
int res = 0;
for (ListOfGlobals *l = list_of_all_globals; l; l = l->next) {
const Global &g = *l->g;
@@ -195,7 +199,7 @@ static inline bool UseODRIndicator(const Global *g) {
// This function may be called more than once for every global
// so we store the globals in a map.
static void RegisterGlobal(const Global *g) {
- CHECK(asan_inited);
+ CHECK(AsanInited());
if (flags()->report_globals >= 2)
ReportGlobal(*g, "Added");
CHECK(flags()->report_globals);
@@ -221,13 +225,13 @@ static void RegisterGlobal(const Global *g) {
}
if (CanPoisonMemory())
PoisonRedZones(*g);
- ListOfGlobals *l = new(allocator_for_globals) ListOfGlobals;
+ ListOfGlobals *l = new (GetGlobalLowLevelAllocator()) ListOfGlobals;
l->g = g;
l->next = list_of_all_globals;
list_of_all_globals = l;
if (g->has_dynamic_init) {
if (!dynamic_init_globals) {
- dynamic_init_globals = new (allocator_for_globals) VectorOfGlobals;
+ dynamic_init_globals = new (GetGlobalLowLevelAllocator()) VectorOfGlobals;
dynamic_init_globals->reserve(kDynamicInitGlobalsInitialCapacity);
}
DynInitGlobal dyn_global = { *g, false };
@@ -236,7 +240,7 @@ static void RegisterGlobal(const Global *g) {
}
static void UnregisterGlobal(const Global *g) {
- CHECK(asan_inited);
+ CHECK(AsanInited());
if (flags()->report_globals >= 2)
ReportGlobal(*g, "Removed");
CHECK(flags()->report_globals);
@@ -257,7 +261,7 @@ static void UnregisterGlobal(const Global *g) {
}
void StopInitOrderChecking() {
- BlockingMutexLock lock(&mu_for_globals);
+ Lock lock(&mu_for_globals);
if (!flags()->check_initialization_order || !dynamic_init_globals)
return;
flags()->check_initialization_order = false;
@@ -292,23 +296,28 @@ void PrintGlobalNameIfASCII(InternalScopedString *str, const __asan_global &g) {
if (c == '\0' || !IsASCII(c)) return;
}
if (*(char *)(g.beg + g.size - 1) != '\0') return;
- str->append(" '%s' is ascii string '%s'\n", MaybeDemangleGlobalName(g.name),
- (char *)g.beg);
+ str->AppendF(" '%s' is ascii string '%s'\n", MaybeDemangleGlobalName(g.name),
+ (char *)g.beg);
}
-static const char *GlobalFilename(const __asan_global &g) {
- const char *res = g.module_name;
- // Prefer the filename from source location, if is available.
- if (g.location) res = g.location->filename;
- CHECK(res);
- return res;
-}
-
-void PrintGlobalLocation(InternalScopedString *str, const __asan_global &g) {
- str->append("%s", GlobalFilename(g));
- if (!g.location) return;
- if (g.location->line_no) str->append(":%d", g.location->line_no);
- if (g.location->column_no) str->append(":%d", g.location->column_no);
+void PrintGlobalLocation(InternalScopedString *str, const __asan_global &g,
+ bool print_module_name) {
+ DataInfo info;
+ if (Symbolizer::GetOrInit()->SymbolizeData(g.beg, &info) && info.line != 0) {
+ str->AppendF("%s:%d", info.file, static_cast<int>(info.line));
+ } else if (g.gcc_location != 0) {
+ // Fallback to Global::gcc_location
+ str->AppendF("%s", g.gcc_location->filename ? g.gcc_location->filename
+ : g.module_name);
+ if (g.gcc_location->line_no)
+ str->AppendF(":%d", g.gcc_location->line_no);
+ if (g.gcc_location->column_no)
+ str->AppendF(":%d", g.gcc_location->column_no);
+ } else {
+ str->AppendF("%s", g.module_name);
+ }
+ if (print_module_name && info.module)
+ str->AppendF(" in %s", info.module);
}
} // namespace __asan
@@ -359,17 +368,18 @@ void __asan_register_globals(__asan_global *globals, uptr n) {
if (!flags()->report_globals) return;
GET_STACK_TRACE_MALLOC;
u32 stack_id = StackDepotPut(stack);
- BlockingMutexLock lock(&mu_for_globals);
+ Lock lock(&mu_for_globals);
if (!global_registration_site_vector) {
global_registration_site_vector =
- new (allocator_for_globals) GlobalRegistrationSiteVector;
+ new (GetGlobalLowLevelAllocator()) GlobalRegistrationSiteVector;
global_registration_site_vector->reserve(128);
}
GlobalRegistrationSite site = {stack_id, &globals[0], &globals[n - 1]};
global_registration_site_vector->push_back(site);
if (flags()->report_globals >= 2) {
PRINT_CURRENT_STACK();
- Printf("=== ID %d; %p %p\n", stack_id, &globals[0], &globals[n - 1]);
+ Printf("=== ID %d; %p %p\n", stack_id, (void *)&globals[0],
+ (void *)&globals[n - 1]);
}
for (uptr i = 0; i < n; i++) {
if (SANITIZER_WINDOWS && globals[i].beg == 0) {
@@ -398,7 +408,7 @@ void __asan_register_globals(__asan_global *globals, uptr n) {
// We must do this when a shared objects gets dlclosed.
void __asan_unregister_globals(__asan_global *globals, uptr n) {
if (!flags()->report_globals) return;
- BlockingMutexLock lock(&mu_for_globals);
+ Lock lock(&mu_for_globals);
for (uptr i = 0; i < n; i++) {
if (SANITIZER_WINDOWS && globals[i].beg == 0) {
// Skip globals that look like padding from the MSVC incremental linker.
@@ -423,8 +433,8 @@ void __asan_before_dynamic_init(const char *module_name) {
return;
bool strict_init_order = flags()->strict_init_order;
CHECK(module_name);
- CHECK(asan_inited);
- BlockingMutexLock lock(&mu_for_globals);
+ CHECK(AsanInited());
+ Lock lock(&mu_for_globals);
if (flags()->report_globals >= 3)
Printf("DynInitPoison module: %s\n", module_name);
for (uptr i = 0, n = dynamic_init_globals->size(); i < n; ++i) {
@@ -447,8 +457,8 @@ void __asan_after_dynamic_init() {
!CanPoisonMemory() ||
!dynamic_init_globals)
return;
- CHECK(asan_inited);
- BlockingMutexLock lock(&mu_for_globals);
+ CHECK(AsanInited());
+ Lock lock(&mu_for_globals);
// FIXME: Optionally report that we're unpoisoning globals from a module.
for (uptr i = 0, n = dynamic_init_globals->size(); i < n; ++i) {
DynInitGlobal &dyn_g = (*dynamic_init_globals)[i];
diff --git a/contrib/llvm-project/compiler-rt/lib/asan/asan_interceptors.cpp b/contrib/llvm-project/compiler-rt/lib/asan/asan_interceptors.cpp
index d0a6dd48a748..4de2fa356374 100644
--- a/contrib/llvm-project/compiler-rt/lib/asan/asan_interceptors.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/asan/asan_interceptors.cpp
@@ -12,6 +12,7 @@
//===----------------------------------------------------------------------===//
#include "asan_interceptors.h"
+
#include "asan_allocator.h"
#include "asan_internal.h"
#include "asan_mapping.h"
@@ -20,7 +21,10 @@
#include "asan_stack.h"
#include "asan_stats.h"
#include "asan_suppressions.h"
+#include "asan_thread.h"
#include "lsan/lsan_common.h"
+#include "sanitizer_common/sanitizer_errno.h"
+#include "sanitizer_common/sanitizer_internal_defs.h"
#include "sanitizer_common/sanitizer_libc.h"
// There is no general interception at all on Fuchsia.
@@ -49,8 +53,8 @@ namespace __asan {
ASAN_READ_RANGE((ctx), (s), \
common_flags()->strict_string_checks ? (len) + 1 : (n))
-#define ASAN_READ_STRING(ctx, s, n) \
- ASAN_READ_STRING_OF_LEN((ctx), (s), REAL(strlen)(s), (n))
+# define ASAN_READ_STRING(ctx, s, n) \
+ ASAN_READ_STRING_OF_LEN((ctx), (s), internal_strlen(s), (n))
static inline uptr MaybeRealStrnlen(const char *s, uptr maxlen) {
#if SANITIZER_INTERCEPT_STRNLEN
@@ -84,12 +88,6 @@ using namespace __asan;
DECLARE_REAL_AND_INTERCEPTOR(void *, malloc, uptr)
DECLARE_REAL_AND_INTERCEPTOR(void, free, void *)
-#define ASAN_INTERCEPTOR_ENTER(ctx, func) \
- AsanInterceptorContext _ctx = {#func}; \
- ctx = (void *)&_ctx; \
- (void) ctx; \
-
-#define COMMON_INTERCEPT_FUNCTION(name) ASAN_INTERCEPT_FUNC(name)
#define COMMON_INTERCEPT_FUNCTION_VER(name, ver) \
ASAN_INTERCEPT_FUNC_VER(name, ver)
#define COMMON_INTERCEPT_FUNCTION_VER_UNVERSIONED_FALLBACK(name, ver) \
@@ -98,15 +96,17 @@ DECLARE_REAL_AND_INTERCEPTOR(void, free, void *)
ASAN_WRITE_RANGE(ctx, ptr, size)
#define COMMON_INTERCEPTOR_READ_RANGE(ctx, ptr, size) \
ASAN_READ_RANGE(ctx, ptr, size)
-#define COMMON_INTERCEPTOR_ENTER(ctx, func, ...) \
- ASAN_INTERCEPTOR_ENTER(ctx, func); \
- do { \
- if (asan_init_is_running) \
- return REAL(func)(__VA_ARGS__); \
- if (SANITIZER_MAC && UNLIKELY(!asan_inited)) \
- return REAL(func)(__VA_ARGS__); \
- ENSURE_ASAN_INITED(); \
- } while (false)
+# define COMMON_INTERCEPTOR_ENTER(ctx, func, ...) \
+ ASAN_INTERCEPTOR_ENTER(ctx, func); \
+ do { \
+ if constexpr (SANITIZER_APPLE) { \
+ if (UNLIKELY(!AsanInited())) \
+ return REAL(func)(__VA_ARGS__); \
+ } else { \
+ if (!TryAsanInitFromRtl()) \
+ return REAL(func)(__VA_ARGS__); \
+ } \
+ } while (false)
#define COMMON_INTERCEPTOR_DIR_ACQUIRE(ctx, path) \
do { \
} while (false)
@@ -130,40 +130,65 @@ DECLARE_REAL_AND_INTERCEPTOR(void, free, void *)
#define COMMON_INTERCEPTOR_BLOCK_REAL(name) REAL(name)
// Strict init-order checking is dlopen-hostile:
// https://github.com/google/sanitizers/issues/178
-#define COMMON_INTERCEPTOR_ON_DLOPEN(filename, flag) \
- do { \
- if (flags()->strict_init_order) \
- StopInitOrderChecking(); \
- CheckNoDeepBind(filename, flag); \
- } while (false)
-#define COMMON_INTERCEPTOR_ON_EXIT(ctx) OnExit()
-#define COMMON_INTERCEPTOR_LIBRARY_LOADED(filename, handle)
-#define COMMON_INTERCEPTOR_LIBRARY_UNLOADED()
-#define COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED (!asan_inited)
-#define COMMON_INTERCEPTOR_GET_TLS_RANGE(begin, end) \
- if (AsanThread *t = GetCurrentThread()) { \
- *begin = t->tls_begin(); \
- *end = t->tls_end(); \
- } else { \
- *begin = *end = 0; \
+# define COMMON_INTERCEPTOR_DLOPEN(filename, flag) \
+ ({ \
+ if (flags()->strict_init_order) \
+ StopInitOrderChecking(); \
+ CheckNoDeepBind(filename, flag); \
+ REAL(dlopen)(filename, flag); \
+ })
+# define COMMON_INTERCEPTOR_ON_EXIT(ctx) OnExit()
+# define COMMON_INTERCEPTOR_LIBRARY_LOADED(filename, handle)
+# define COMMON_INTERCEPTOR_LIBRARY_UNLOADED()
+# define COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED (!AsanInited())
+# define COMMON_INTERCEPTOR_GET_TLS_RANGE(begin, end) \
+ if (AsanThread *t = GetCurrentThread()) { \
+ *begin = t->tls_begin(); \
+ *end = t->tls_end(); \
+ } else { \
+ *begin = *end = 0; \
+ }
+
+template <class Mmap>
+static void* mmap_interceptor(Mmap real_mmap, void *addr, SIZE_T length,
+ int prot, int flags, int fd, OFF64_T offset) {
+ void *res = real_mmap(addr, length, prot, flags, fd, offset);
+ if (length && res != (void *)-1) {
+ const uptr beg = reinterpret_cast<uptr>(res);
+ DCHECK(IsAligned(beg, GetPageSize()));
+ SIZE_T rounded_length = RoundUpTo(length, GetPageSize());
+ // Only unpoison shadow if it's an ASAN managed address.
+ if (AddrIsInMem(beg) && AddrIsInMem(beg + rounded_length - 1))
+ PoisonShadow(beg, RoundUpTo(length, GetPageSize()), 0);
}
+ return res;
+}
-#define COMMON_INTERCEPTOR_MEMMOVE_IMPL(ctx, to, from, size) \
- do { \
- ASAN_INTERCEPTOR_ENTER(ctx, memmove); \
- ASAN_MEMMOVE_IMPL(ctx, to, from, size); \
- } while (false)
+template <class Munmap>
+static int munmap_interceptor(Munmap real_munmap, void *addr, SIZE_T length) {
+ // We should not tag if munmap fail, but it's to late to tag after
+ // real_munmap, as the pages could be mmaped by another thread.
+ const uptr beg = reinterpret_cast<uptr>(addr);
+ if (length && IsAligned(beg, GetPageSize())) {
+ SIZE_T rounded_length = RoundUpTo(length, GetPageSize());
+ // Protect from unmapping the shadow.
+ if (AddrIsInMem(beg) && AddrIsInMem(beg + rounded_length - 1))
+ PoisonShadow(beg, rounded_length, 0);
+ }
+ return real_munmap(addr, length);
+}
-#define COMMON_INTERCEPTOR_MEMCPY_IMPL(ctx, to, from, size) \
- do { \
- ASAN_INTERCEPTOR_ENTER(ctx, memcpy); \
- ASAN_MEMCPY_IMPL(ctx, to, from, size); \
+# define COMMON_INTERCEPTOR_MMAP_IMPL(ctx, mmap, addr, length, prot, flags, \
+ fd, offset) \
+ do { \
+ (void)(ctx); \
+ return mmap_interceptor(REAL(mmap), addr, sz, prot, flags, fd, off); \
} while (false)
-#define COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, block, c, size) \
- do { \
- ASAN_INTERCEPTOR_ENTER(ctx, memset); \
- ASAN_MEMSET_IMPL(ctx, block, c, size); \
+# define COMMON_INTERCEPTOR_MUNMAP_IMPL(ctx, addr, length) \
+ do { \
+ (void)(ctx); \
+ return munmap_interceptor(REAL(munmap), addr, sz); \
} while (false)
#if CAN_SANITIZE_LEAKS
@@ -171,8 +196,13 @@ DECLARE_REAL_AND_INTERCEPTOR(void, free, void *)
__lsan::ScopedInterceptorDisabler disabler
#endif
-#include "sanitizer_common/sanitizer_common_interceptors.inc"
-#include "sanitizer_common/sanitizer_signal_interceptors.inc"
+# define SIGNAL_INTERCEPTOR_ENTER() \
+ do { \
+ AsanInitFromRtl(); \
+ } while (false)
+
+# include "sanitizer_common/sanitizer_common_interceptors.inc"
+# include "sanitizer_common/sanitizer_signal_interceptors.inc"
// Syscall interceptors don't have contexts, we don't support suppressions
// for them.
@@ -195,23 +225,44 @@ DECLARE_REAL_AND_INTERCEPTOR(void, free, void *)
static thread_return_t THREAD_CALLING_CONV asan_thread_start(void *arg) {
AsanThread *t = (AsanThread *)arg;
SetCurrentThread(t);
- return t->ThreadStart(GetTid());
+ auto self = GetThreadSelf();
+ auto args = asanThreadArgRetval().GetArgs(self);
+ t->ThreadStart(GetTid());
+
+# if SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD || \
+ SANITIZER_SOLARIS
+ __sanitizer_sigset_t sigset;
+ t->GetStartData(sigset);
+ SetSigProcMask(&sigset, nullptr);
+# endif
+
+ thread_return_t retval = (*args.routine)(args.arg_retval);
+ asanThreadArgRetval().Finish(self, retval);
+ return retval;
}
-INTERCEPTOR(int, pthread_create, void *thread,
- void *attr, void *(*start_routine)(void*), void *arg) {
+INTERCEPTOR(int, pthread_create, void *thread, void *attr,
+ void *(*start_routine)(void *), void *arg) {
EnsureMainThreadIDIsCorrect();
// Strict init-order checking is thread-hostile.
if (flags()->strict_init_order)
StopInitOrderChecking();
GET_STACK_TRACE_THREAD;
- int detached = 0;
- if (attr)
- REAL(pthread_attr_getdetachstate)(attr, &detached);
+ bool detached = [attr]() {
+ int d = 0;
+ return attr && !REAL(pthread_attr_getdetachstate)(attr, &d) &&
+ IsStateDetached(d);
+ }();
u32 current_tid = GetCurrentTidOrInvalid();
- AsanThread *t =
- AsanThread::Create(start_routine, arg, current_tid, &stack, detached);
+
+ __sanitizer_sigset_t sigset = {};
+# if SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD || \
+ SANITIZER_SOLARIS
+ ScopedBlockSignals block(&sigset);
+# endif
+
+ AsanThread *t = AsanThread::Create(sigset, current_tid, &stack, detached);
int result;
{
@@ -219,10 +270,13 @@ INTERCEPTOR(int, pthread_create, void *thread,
// stored by pthread for future reuse even after thread destruction, and
// the linked list it's stored in doesn't even hold valid pointers to the
// objects, the latter are calculated by obscure pointer arithmetic.
-#if CAN_SANITIZE_LEAKS
+# if CAN_SANITIZE_LEAKS
__lsan::ScopedInterceptorDisabler disabler;
-#endif
- result = REAL(pthread_create)(thread, attr, asan_thread_start, t);
+# endif
+ asanThreadArgRetval().Create(detached, {start_routine, arg}, [&]() -> uptr {
+ result = REAL(pthread_create)(thread, attr, asan_thread_start, t);
+ return result ? 0 : *(uptr *)(thread);
+ });
}
if (result != 0) {
// If the thread didn't start delete the AsanThread to avoid leaking it.
@@ -233,24 +287,101 @@ INTERCEPTOR(int, pthread_create, void *thread,
return result;
}
-INTERCEPTOR(int, pthread_join, void *t, void **arg) {
- return real_pthread_join(t, arg);
+INTERCEPTOR(int, pthread_join, void *thread, void **retval) {
+ int result;
+ asanThreadArgRetval().Join((uptr)thread, [&]() {
+ result = REAL(pthread_join)(thread, retval);
+ return !result;
+ });
+ return result;
+}
+
+INTERCEPTOR(int, pthread_detach, void *thread) {
+ int result;
+ asanThreadArgRetval().Detach((uptr)thread, [&]() {
+ result = REAL(pthread_detach)(thread);
+ return !result;
+ });
+ return result;
+}
+
+INTERCEPTOR(void, pthread_exit, void *retval) {
+ asanThreadArgRetval().Finish(GetThreadSelf(), retval);
+ REAL(pthread_exit)(retval);
+}
+
+# if ASAN_INTERCEPT_TRYJOIN
+INTERCEPTOR(int, pthread_tryjoin_np, void *thread, void **ret) {
+ int result;
+ asanThreadArgRetval().Join((uptr)thread, [&]() {
+ result = REAL(pthread_tryjoin_np)(thread, ret);
+ return !result;
+ });
+ return result;
+}
+# endif
+
+# if ASAN_INTERCEPT_TIMEDJOIN
+INTERCEPTOR(int, pthread_timedjoin_np, void *thread, void **ret,
+ const struct timespec *abstime) {
+ int result;
+ asanThreadArgRetval().Join((uptr)thread, [&]() {
+ result = REAL(pthread_timedjoin_np)(thread, ret, abstime);
+ return !result;
+ });
+ return result;
}
+# endif
DEFINE_REAL_PTHREAD_FUNCTIONS
#endif // ASAN_INTERCEPT_PTHREAD_CREATE
#if ASAN_INTERCEPT_SWAPCONTEXT
static void ClearShadowMemoryForContextStack(uptr stack, uptr ssize) {
+ // Only clear if we know the stack. This should be true only for contexts
+ // created with makecontext().
+ if (!ssize)
+ return;
// Align to page size.
uptr PageSize = GetPageSizeCached();
- uptr bottom = stack & ~(PageSize - 1);
+ uptr bottom = RoundDownTo(stack, PageSize);
+ if (!AddrIsInMem(bottom))
+ return;
ssize += stack - bottom;
ssize = RoundUpTo(ssize, PageSize);
- static const uptr kMaxSaneContextStackSize = 1 << 22; // 4 Mb
- if (AddrIsInMem(bottom) && ssize && ssize <= kMaxSaneContextStackSize) {
- PoisonShadow(bottom, ssize, 0);
- }
+ PoisonShadow(bottom, ssize, 0);
+}
+
+INTERCEPTOR(void, makecontext, struct ucontext_t *ucp, void (*func)(), int argc,
+ ...) {
+ va_list ap;
+ uptr args[64];
+ // We don't know a better way to forward ... into REAL function. We can
+ // increase args size if neccecary.
+ CHECK_LE(argc, ARRAY_SIZE(args));
+ internal_memset(args, 0, sizeof(args));
+ va_start(ap, argc);
+ for (int i = 0; i < argc; ++i) args[i] = va_arg(ap, uptr);
+ va_end(ap);
+
+# define ENUMERATE_ARRAY_4(start) \
+ args[start], args[start + 1], args[start + 2], args[start + 3]
+# define ENUMERATE_ARRAY_16(start) \
+ ENUMERATE_ARRAY_4(start), ENUMERATE_ARRAY_4(start + 4), \
+ ENUMERATE_ARRAY_4(start + 8), ENUMERATE_ARRAY_4(start + 12)
+# define ENUMERATE_ARRAY_64() \
+ ENUMERATE_ARRAY_16(0), ENUMERATE_ARRAY_16(16), ENUMERATE_ARRAY_16(32), \
+ ENUMERATE_ARRAY_16(48)
+
+ REAL(makecontext)
+ ((struct ucontext_t *)ucp, func, argc, ENUMERATE_ARRAY_64());
+
+# undef ENUMERATE_ARRAY_4
+# undef ENUMERATE_ARRAY_16
+# undef ENUMERATE_ARRAY_64
+
+ // Sign the stack so we can identify it for unpoisoning.
+ SignContextStack(ucp);
}
INTERCEPTOR(int, swapcontext, struct ucontext_t *oucp,
@@ -266,15 +397,15 @@ INTERCEPTOR(int, swapcontext, struct ucontext_t *oucp,
uptr stack, ssize;
ReadContextStack(ucp, &stack, &ssize);
ClearShadowMemoryForContextStack(stack, ssize);
-#if __has_attribute(__indirect_return__) && \
- (defined(__x86_64__) || defined(__i386__))
+
+# if __has_attribute(__indirect_return__) && \
+ (defined(__x86_64__) || defined(__i386__))
int (*real_swapcontext)(struct ucontext_t *, struct ucontext_t *)
- __attribute__((__indirect_return__))
- = REAL(swapcontext);
+ __attribute__((__indirect_return__)) = REAL(swapcontext);
int res = real_swapcontext(oucp, ucp);
-#else
+# else
int res = REAL(swapcontext)(oucp, ucp);
-#endif
+# endif
// swapcontext technically does not return, but program may swap context to
// "oucp" later, that would look as if swapcontext() returned 0.
// We need to clear shadow for ucp once again, as it may be in arbitrary
@@ -352,9 +483,9 @@ INTERCEPTOR(_Unwind_Reason_Code, _Unwind_SjLj_RaiseException,
#if ASAN_INTERCEPT_INDEX
# if ASAN_USE_ALIAS_ATTRIBUTE_FOR_INDEX
INTERCEPTOR(char*, index, const char *string, int c)
- ALIAS(WRAPPER_NAME(strchr));
+ ALIAS(WRAP(strchr));
# else
-# if SANITIZER_MAC
+# if SANITIZER_APPLE
DECLARE_REAL(char*, index, const char *string, int c)
OVERRIDE_FUNCTION(index, strchr);
# else
@@ -368,11 +499,11 @@ DEFINE_REAL(char*, index, const char *string, int c)
INTERCEPTOR(char *, strcat, char *to, const char *from) {
void *ctx;
ASAN_INTERCEPTOR_ENTER(ctx, strcat);
- ENSURE_ASAN_INITED();
+ AsanInitFromRtl();
if (flags()->replace_str) {
- uptr from_length = REAL(strlen)(from);
+ uptr from_length = internal_strlen(from);
ASAN_READ_RANGE(ctx, from, from_length + 1);
- uptr to_length = REAL(strlen)(to);
+ uptr to_length = internal_strlen(to);
ASAN_READ_STRING_OF_LEN(ctx, to, to_length, to_length);
ASAN_WRITE_RANGE(ctx, to + to_length, from_length + 1);
// If the copying actually happens, the |from| string should not overlap
@@ -389,12 +520,12 @@ DEFINE_REAL(char*, index, const char *string, int c)
INTERCEPTOR(char*, strncat, char *to, const char *from, uptr size) {
void *ctx;
ASAN_INTERCEPTOR_ENTER(ctx, strncat);
- ENSURE_ASAN_INITED();
+ AsanInitFromRtl();
if (flags()->replace_str) {
uptr from_length = MaybeRealStrnlen(from, size);
uptr copy_length = Min(size, from_length + 1);
ASAN_READ_RANGE(ctx, from, copy_length);
- uptr to_length = REAL(strlen)(to);
+ uptr to_length = internal_strlen(to);
ASAN_READ_STRING_OF_LEN(ctx, to, to_length, to_length);
ASAN_WRITE_RANGE(ctx, to + to_length, from_length + 1);
if (from_length > 0) {
@@ -408,18 +539,18 @@ INTERCEPTOR(char*, strncat, char *to, const char *from, uptr size) {
INTERCEPTOR(char *, strcpy, char *to, const char *from) {
void *ctx;
ASAN_INTERCEPTOR_ENTER(ctx, strcpy);
-#if SANITIZER_MAC
- if (UNLIKELY(!asan_inited))
- return REAL(strcpy)(to, from);
-#endif
- // strcpy is called from malloc_default_purgeable_zone()
- // in __asan::ReplaceSystemAlloc() on Mac.
- if (asan_init_is_running) {
- return REAL(strcpy)(to, from);
+ if constexpr (SANITIZER_APPLE) {
+ // strcpy is called from malloc_default_purgeable_zone()
+ // in __asan::ReplaceSystemAlloc() on Mac.
+ if (UNLIKELY(!AsanInited()))
+ return REAL(strcpy)(to, from);
+ } else {
+ if (!TryAsanInitFromRtl())
+ return REAL(strcpy)(to, from);
}
- ENSURE_ASAN_INITED();
+
if (flags()->replace_str) {
- uptr from_size = REAL(strlen)(from) + 1;
+ uptr from_size = internal_strlen(from) + 1;
CHECK_RANGES_OVERLAP("strcpy", to, from_size, from, from_size);
ASAN_READ_RANGE(ctx, from, from_size);
ASAN_WRITE_RANGE(ctx, to, from_size);
@@ -430,15 +561,17 @@ INTERCEPTOR(char *, strcpy, char *to, const char *from) {
INTERCEPTOR(char*, strdup, const char *s) {
void *ctx;
ASAN_INTERCEPTOR_ENTER(ctx, strdup);
- if (UNLIKELY(!asan_inited)) return internal_strdup(s);
- ENSURE_ASAN_INITED();
- uptr length = REAL(strlen)(s);
+ if (UNLIKELY(!TryAsanInitFromRtl()))
+ return internal_strdup(s);
+ uptr length = internal_strlen(s);
if (flags()->replace_str) {
ASAN_READ_RANGE(ctx, s, length + 1);
}
GET_STACK_TRACE_MALLOC;
void *new_mem = asan_malloc(length + 1, &stack);
- REAL(memcpy)(new_mem, s, length + 1);
+ if (new_mem) {
+ REAL(memcpy)(new_mem, s, length + 1);
+ }
return reinterpret_cast<char*>(new_mem);
}
@@ -446,15 +579,17 @@ INTERCEPTOR(char*, strdup, const char *s) {
INTERCEPTOR(char*, __strdup, const char *s) {
void *ctx;
ASAN_INTERCEPTOR_ENTER(ctx, strdup);
- if (UNLIKELY(!asan_inited)) return internal_strdup(s);
- ENSURE_ASAN_INITED();
- uptr length = REAL(strlen)(s);
+ if (UNLIKELY(!TryAsanInitFromRtl()))
+ return internal_strdup(s);
+ uptr length = internal_strlen(s);
if (flags()->replace_str) {
ASAN_READ_RANGE(ctx, s, length + 1);
}
GET_STACK_TRACE_MALLOC;
void *new_mem = asan_malloc(length + 1, &stack);
- REAL(memcpy)(new_mem, s, length + 1);
+ if (new_mem) {
+ REAL(memcpy)(new_mem, s, length + 1);
+ }
return reinterpret_cast<char*>(new_mem);
}
#endif // ASAN_INTERCEPT___STRDUP
@@ -462,7 +597,7 @@ INTERCEPTOR(char*, __strdup, const char *s) {
INTERCEPTOR(char*, strncpy, char *to, const char *from, uptr size) {
void *ctx;
ASAN_INTERCEPTOR_ENTER(ctx, strncpy);
- ENSURE_ASAN_INITED();
+ AsanInitFromRtl();
if (flags()->replace_str) {
uptr from_size = Min(size, MaybeRealStrnlen(from, size) + 1);
CHECK_RANGES_OVERLAP("strncpy", to, from_size, from, from_size);
@@ -472,26 +607,40 @@ INTERCEPTOR(char*, strncpy, char *to, const char *from, uptr size) {
return REAL(strncpy)(to, from, size);
}
-INTERCEPTOR(long, strtol, const char *nptr, char **endptr, int base) {
- void *ctx;
- ASAN_INTERCEPTOR_ENTER(ctx, strtol);
- ENSURE_ASAN_INITED();
- if (!flags()->replace_str) {
- return REAL(strtol)(nptr, endptr, base);
- }
+template <typename Fn>
+static ALWAYS_INLINE auto StrtolImpl(void *ctx, Fn real, const char *nptr,
+ char **endptr, int base)
+ -> decltype(real(nullptr, nullptr, 0)) {
+ if (!flags()->replace_str)
+ return real(nptr, endptr, base);
char *real_endptr;
- long result = REAL(strtol)(nptr, &real_endptr, base);
+ auto res = real(nptr, &real_endptr, base);
StrtolFixAndCheck(ctx, nptr, endptr, real_endptr, base);
- return result;
+ return res;
}
+# define INTERCEPTOR_STRTO_BASE(ret_type, func) \
+ INTERCEPTOR(ret_type, func, const char *nptr, char **endptr, int base) { \
+ void *ctx; \
+ ASAN_INTERCEPTOR_ENTER(ctx, func); \
+ AsanInitFromRtl(); \
+ return StrtolImpl(ctx, REAL(func), nptr, endptr, base); \
+ }
+
+INTERCEPTOR_STRTO_BASE(long, strtol)
+INTERCEPTOR_STRTO_BASE(long long, strtoll)
+
+# if SANITIZER_GLIBC
+INTERCEPTOR_STRTO_BASE(long, __isoc23_strtol)
+INTERCEPTOR_STRTO_BASE(long long, __isoc23_strtoll)
+# endif
+
INTERCEPTOR(int, atoi, const char *nptr) {
void *ctx;
ASAN_INTERCEPTOR_ENTER(ctx, atoi);
-#if SANITIZER_MAC
- if (UNLIKELY(!asan_inited)) return REAL(atoi)(nptr);
-#endif
- ENSURE_ASAN_INITED();
+ if (SANITIZER_APPLE && UNLIKELY(!AsanInited()))
+ return REAL(atoi)(nptr);
+ AsanInitFromRtl();
if (!flags()->replace_str) {
return REAL(atoi)(nptr);
}
@@ -509,10 +658,9 @@ INTERCEPTOR(int, atoi, const char *nptr) {
INTERCEPTOR(long, atol, const char *nptr) {
void *ctx;
ASAN_INTERCEPTOR_ENTER(ctx, atol);
-#if SANITIZER_MAC
- if (UNLIKELY(!asan_inited)) return REAL(atol)(nptr);
-#endif
- ENSURE_ASAN_INITED();
+ if (SANITIZER_APPLE && UNLIKELY(!AsanInited()))
+ return REAL(atol)(nptr);
+ AsanInitFromRtl();
if (!flags()->replace_str) {
return REAL(atol)(nptr);
}
@@ -523,24 +671,10 @@ INTERCEPTOR(long, atol, const char *nptr) {
return result;
}
-#if ASAN_INTERCEPT_ATOLL_AND_STRTOLL
-INTERCEPTOR(long long, strtoll, const char *nptr, char **endptr, int base) {
- void *ctx;
- ASAN_INTERCEPTOR_ENTER(ctx, strtoll);
- ENSURE_ASAN_INITED();
- if (!flags()->replace_str) {
- return REAL(strtoll)(nptr, endptr, base);
- }
- char *real_endptr;
- long long result = REAL(strtoll)(nptr, &real_endptr, base);
- StrtolFixAndCheck(ctx, nptr, endptr, real_endptr, base);
- return result;
-}
-
INTERCEPTOR(long long, atoll, const char *nptr) {
void *ctx;
ASAN_INTERCEPTOR_ENTER(ctx, atoll);
- ENSURE_ASAN_INITED();
+ AsanInitFromRtl();
if (!flags()->replace_str) {
return REAL(atoll)(nptr);
}
@@ -550,7 +684,6 @@ INTERCEPTOR(long long, atoll, const char *nptr) {
ASAN_READ_STRING(ctx, nptr, (real_endptr - nptr) + 1);
return result;
}
-#endif // ASAN_INTERCEPT_ATOLL_AND_STRTOLL
#if ASAN_INTERCEPT___CXA_ATEXIT || ASAN_INTERCEPT_ATEXIT
static void AtCxaAtexit(void *unused) {
@@ -562,11 +695,10 @@ static void AtCxaAtexit(void *unused) {
#if ASAN_INTERCEPT___CXA_ATEXIT
INTERCEPTOR(int, __cxa_atexit, void (*func)(void *), void *arg,
void *dso_handle) {
-#if SANITIZER_MAC
- if (UNLIKELY(!asan_inited)) return REAL(__cxa_atexit)(func, arg, dso_handle);
-#endif
- ENSURE_ASAN_INITED();
-#if CAN_SANITIZE_LEAKS
+ if (SANITIZER_APPLE && UNLIKELY(!AsanInited()))
+ return REAL(__cxa_atexit)(func, arg, dso_handle);
+ AsanInitFromRtl();
+# if CAN_SANITIZE_LEAKS
__lsan::ScopedInterceptorDisabler disabler;
#endif
int res = REAL(__cxa_atexit)(func, arg, dso_handle);
@@ -577,11 +709,11 @@ INTERCEPTOR(int, __cxa_atexit, void (*func)(void *), void *arg,
#if ASAN_INTERCEPT_ATEXIT
INTERCEPTOR(int, atexit, void (*func)()) {
- ENSURE_ASAN_INITED();
-#if CAN_SANITIZE_LEAKS
+ AsanInitFromRtl();
+# if CAN_SANITIZE_LEAKS
__lsan::ScopedInterceptorDisabler disabler;
#endif
- // Avoid calling real atexit as it is unrechable on at least on Linux.
+ // Avoid calling real atexit as it is unreachable on at least on Linux.
int res = REAL(__cxa_atexit)((void (*)(void *a))func, nullptr, nullptr);
REAL(__cxa_atexit)(AtCxaAtexit, nullptr, nullptr);
return res;
@@ -616,6 +748,7 @@ void InitializeAsanInterceptors() {
static bool was_called_once;
CHECK(!was_called_once);
was_called_once = true;
+ InitializePlatformInterceptors();
InitializeCommonInterceptors();
InitializeSignalInterceptors();
@@ -634,19 +767,22 @@ void InitializeAsanInterceptors() {
ASAN_INTERCEPT_FUNC(atoi);
ASAN_INTERCEPT_FUNC(atol);
- ASAN_INTERCEPT_FUNC(strtol);
-#if ASAN_INTERCEPT_ATOLL_AND_STRTOLL
ASAN_INTERCEPT_FUNC(atoll);
+ ASAN_INTERCEPT_FUNC(strtol);
ASAN_INTERCEPT_FUNC(strtoll);
-#endif
+# if SANITIZER_GLIBC
+ ASAN_INTERCEPT_FUNC(__isoc23_strtol);
+ ASAN_INTERCEPT_FUNC(__isoc23_strtoll);
+# endif
// Intecept jump-related functions.
ASAN_INTERCEPT_FUNC(longjmp);
-#if ASAN_INTERCEPT_SWAPCONTEXT
+# if ASAN_INTERCEPT_SWAPCONTEXT
ASAN_INTERCEPT_FUNC(swapcontext);
-#endif
-#if ASAN_INTERCEPT__LONGJMP
+ ASAN_INTERCEPT_FUNC(makecontext);
+# endif
+# if ASAN_INTERCEPT__LONGJMP
ASAN_INTERCEPT_FUNC(_longjmp);
#endif
#if ASAN_INTERCEPT___LONGJMP_CHK
@@ -665,11 +801,11 @@ void InitializeAsanInterceptors() {
#endif
// Indirectly intercept std::rethrow_exception.
#if ASAN_INTERCEPT__UNWIND_RAISEEXCEPTION
- INTERCEPT_FUNCTION(_Unwind_RaiseException);
+ ASAN_INTERCEPT_FUNC(_Unwind_RaiseException);
#endif
// Indirectly intercept std::rethrow_exception.
#if ASAN_INTERCEPT__UNWIND_SJLJ_RAISEEXCEPTION
- INTERCEPT_FUNCTION(_Unwind_SjLj_RaiseException);
+ ASAN_INTERCEPT_FUNC(_Unwind_SjLj_RaiseException);
#endif
// Intercept threading-related functions
@@ -681,6 +817,16 @@ void InitializeAsanInterceptors() {
ASAN_INTERCEPT_FUNC(pthread_create);
#endif
ASAN_INTERCEPT_FUNC(pthread_join);
+ ASAN_INTERCEPT_FUNC(pthread_detach);
+ ASAN_INTERCEPT_FUNC(pthread_exit);
+# endif
+
+# if ASAN_INTERCEPT_TIMEDJOIN
+ ASAN_INTERCEPT_FUNC(pthread_timedjoin_np);
+#endif
+
+#if ASAN_INTERCEPT_TRYJOIN
+ ASAN_INTERCEPT_FUNC(pthread_tryjoin_np);
#endif
// Intercept atexit function.
@@ -700,8 +846,6 @@ void InitializeAsanInterceptors() {
ASAN_INTERCEPT_FUNC(vfork);
#endif
- InitializePlatformInterceptors();
-
VReport(1, "AddressSanitizer: libc interceptors initialized\n");
}
diff --git a/contrib/llvm-project/compiler-rt/lib/asan/asan_interceptors.h b/contrib/llvm-project/compiler-rt/lib/asan/asan_interceptors.h
index a9249dea45b9..826b45f5ada8 100644
--- a/contrib/llvm-project/compiler-rt/lib/asan/asan_interceptors.h
+++ b/contrib/llvm-project/compiler-rt/lib/asan/asan_interceptors.h
@@ -24,14 +24,6 @@ namespace __asan {
void InitializeAsanInterceptors();
void InitializePlatformInterceptors();
-#define ENSURE_ASAN_INITED() \
- do { \
- CHECK(!asan_init_is_running); \
- if (UNLIKELY(!asan_inited)) { \
- AsanInitFromRtl(); \
- } \
- } while (0)
-
} // namespace __asan
// There is no general interception at all on Fuchsia.
@@ -42,12 +34,10 @@ void InitializePlatformInterceptors();
// Use macro to describe if specific function should be
// intercepted on a given platform.
#if !SANITIZER_WINDOWS
-# define ASAN_INTERCEPT_ATOLL_AND_STRTOLL 1
# define ASAN_INTERCEPT__LONGJMP 1
# define ASAN_INTERCEPT_INDEX 1
# define ASAN_INTERCEPT_PTHREAD_CREATE 1
#else
-# define ASAN_INTERCEPT_ATOLL_AND_STRTOLL 0
# define ASAN_INTERCEPT__LONGJMP 0
# define ASAN_INTERCEPT_INDEX 0
# define ASAN_INTERCEPT_PTHREAD_CREATE 0
@@ -78,8 +68,8 @@ void InitializePlatformInterceptors();
# define ASAN_INTERCEPT___LONGJMP_CHK 0
#endif
-#if ASAN_HAS_EXCEPTIONS && !SANITIZER_WINDOWS && !SANITIZER_SOLARIS && \
- !SANITIZER_NETBSD
+#if ASAN_HAS_EXCEPTIONS && !SANITIZER_SOLARIS && !SANITIZER_NETBSD && \
+ (!SANITIZER_WINDOWS || (defined(__MINGW32__) && defined(__i386__)))
# define ASAN_INTERCEPT___CXA_THROW 1
# define ASAN_INTERCEPT___CXA_RETHROW_PRIMARY_EXCEPTION 1
# if defined(_GLIBCXX_SJLJ_EXCEPTIONS) || (SANITIZER_IOS && defined(__arm__))
@@ -112,9 +102,17 @@ void InitializePlatformInterceptors();
# define ASAN_INTERCEPT___STRDUP 0
#endif
+#if SANITIZER_GLIBC && ASAN_INTERCEPT_PTHREAD_CREATE
+# define ASAN_INTERCEPT_TIMEDJOIN 1
+# define ASAN_INTERCEPT_TRYJOIN 1
+#else
+# define ASAN_INTERCEPT_TIMEDJOIN 0
+# define ASAN_INTERCEPT_TRYJOIN 0
+#endif
+
#if SANITIZER_LINUX && \
(defined(__arm__) || defined(__aarch64__) || defined(__i386__) || \
- defined(__x86_64__) || SANITIZER_RISCV64)
+ defined(__x86_64__) || SANITIZER_RISCV64 || SANITIZER_LOONGARCH64)
# define ASAN_INTERCEPT_VFORK 1
#else
# define ASAN_INTERCEPT_VFORK 0
@@ -133,29 +131,36 @@ DECLARE_REAL(char*, strncpy, char *to, const char *from, uptr size)
DECLARE_REAL(uptr, strnlen, const char *s, uptr maxlen)
DECLARE_REAL(char*, strstr, const char *s1, const char *s2)
-#if !SANITIZER_MAC
-#define ASAN_INTERCEPT_FUNC(name) \
- do { \
- if (!INTERCEPT_FUNCTION(name)) \
- VReport(1, "AddressSanitizer: failed to intercept '%s'\n", #name); \
- } while (0)
-#define ASAN_INTERCEPT_FUNC_VER(name, ver) \
- do { \
- if (!INTERCEPT_FUNCTION_VER(name, ver)) \
- VReport(1, "AddressSanitizer: failed to intercept '%s@@%s'\n", #name, \
- #ver); \
- } while (0)
-#define ASAN_INTERCEPT_FUNC_VER_UNVERSIONED_FALLBACK(name, ver) \
- do { \
- if (!INTERCEPT_FUNCTION_VER(name, ver) && !INTERCEPT_FUNCTION(name)) \
- VReport(1, "AddressSanitizer: failed to intercept '%s@@%s' or '%s'\n", \
- #name, #ver, #name); \
- } while (0)
-
-#else
+# if !SANITIZER_APPLE
+# define ASAN_INTERCEPT_FUNC(name) \
+ do { \
+ if (!INTERCEPT_FUNCTION(name)) \
+ VReport(1, "AddressSanitizer: failed to intercept '%s'\n", #name); \
+ } while (0)
+# define ASAN_INTERCEPT_FUNC_VER(name, ver) \
+ do { \
+ if (!INTERCEPT_FUNCTION_VER(name, ver)) \
+ VReport(1, "AddressSanitizer: failed to intercept '%s@@%s'\n", \
+ #name, ver); \
+ } while (0)
+# define ASAN_INTERCEPT_FUNC_VER_UNVERSIONED_FALLBACK(name, ver) \
+ do { \
+ if (!INTERCEPT_FUNCTION_VER(name, ver) && !INTERCEPT_FUNCTION(name)) \
+ VReport(1, \
+ "AddressSanitizer: failed to intercept '%s@@%s' or '%s'\n", \
+ #name, ver, #name); \
+ } while (0)
+
+# else
// OS X interceptors don't need to be initialized with INTERCEPT_FUNCTION.
-#define ASAN_INTERCEPT_FUNC(name)
-#endif // SANITIZER_MAC
+# define ASAN_INTERCEPT_FUNC(name)
+# endif // SANITIZER_APPLE
+
+#define ASAN_INTERCEPTOR_ENTER(ctx, func) \
+ AsanInterceptorContext _ctx = {#func}; \
+ ctx = (void *)&_ctx; \
+ (void) ctx;
+#define COMMON_INTERCEPT_FUNCTION(name) ASAN_INTERCEPT_FUNC(name)
#endif // !SANITIZER_FUCHSIA
diff --git a/contrib/llvm-project/compiler-rt/lib/asan/asan_interceptors_memintrinsics.cpp b/contrib/llvm-project/compiler-rt/lib/asan/asan_interceptors_memintrinsics.cpp
index 9c316bb95749..bdf328f89206 100644
--- a/contrib/llvm-project/compiler-rt/lib/asan/asan_interceptors_memintrinsics.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/asan/asan_interceptors_memintrinsics.cpp
@@ -11,13 +11,54 @@
// ASan versions of memcpy, memmove, and memset.
//===---------------------------------------------------------------------===//
+#define SANITIZER_COMMON_NO_REDEFINE_BUILTINS
+
#include "asan_interceptors_memintrinsics.h"
+
+#include "asan_interceptors.h"
#include "asan_report.h"
#include "asan_stack.h"
#include "asan_suppressions.h"
using namespace __asan;
+// memcpy is called during __asan_init() from the internals of printf(...).
+// We do not treat memcpy with to==from as a bug.
+// See http://llvm.org/bugs/show_bug.cgi?id=11763.
+#define ASAN_MEMCPY_IMPL(ctx, to, from, size) \
+ do { \
+ if (LIKELY(replace_intrin_cached)) { \
+ if (LIKELY(to != from)) { \
+ CHECK_RANGES_OVERLAP("memcpy", to, size, from, size); \
+ } \
+ ASAN_READ_RANGE(ctx, from, size); \
+ ASAN_WRITE_RANGE(ctx, to, size); \
+ } else if (UNLIKELY(!AsanInited())) { \
+ return internal_memcpy(to, from, size); \
+ } \
+ return REAL(memcpy)(to, from, size); \
+ } while (0)
+
+// memset is called inside Printf.
+#define ASAN_MEMSET_IMPL(ctx, block, c, size) \
+ do { \
+ if (LIKELY(replace_intrin_cached)) { \
+ ASAN_WRITE_RANGE(ctx, block, size); \
+ } else if (UNLIKELY(!AsanInited())) { \
+ return internal_memset(block, c, size); \
+ } \
+ return REAL(memset)(block, c, size); \
+ } while (0)
+
+#define ASAN_MEMMOVE_IMPL(ctx, to, from, size) \
+ do { \
+ if (LIKELY(replace_intrin_cached)) { \
+ ASAN_READ_RANGE(ctx, from, size); \
+ ASAN_WRITE_RANGE(ctx, to, size); \
+ } \
+ return internal_memmove(to, from, size); \
+ } while (0)
+
void *__asan_memcpy(void *to, const void *from, uptr size) {
ASAN_MEMCPY_IMPL(nullptr, to, from, size);
}
@@ -40,4 +81,26 @@ extern "C" decltype(__asan_memcpy) memcpy[[gnu::alias("__asan_memcpy")]];
extern "C" decltype(__asan_memmove) memmove[[gnu::alias("__asan_memmove")]];
extern "C" decltype(__asan_memset) memset[[gnu::alias("__asan_memset")]];
+#else // SANITIZER_FUCHSIA
+
+#define COMMON_INTERCEPTOR_MEMMOVE_IMPL(ctx, to, from, size) \
+ do { \
+ ASAN_INTERCEPTOR_ENTER(ctx, memmove); \
+ ASAN_MEMMOVE_IMPL(ctx, to, from, size); \
+ } while (false)
+
+#define COMMON_INTERCEPTOR_MEMCPY_IMPL(ctx, to, from, size) \
+ do { \
+ ASAN_INTERCEPTOR_ENTER(ctx, memcpy); \
+ ASAN_MEMCPY_IMPL(ctx, to, from, size); \
+ } while (false)
+
+#define COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, block, c, size) \
+ do { \
+ ASAN_INTERCEPTOR_ENTER(ctx, memset); \
+ ASAN_MEMSET_IMPL(ctx, block, c, size); \
+ } while (false)
+
+#include "sanitizer_common/sanitizer_common_interceptors_memintrinsics.inc"
+
#endif // SANITIZER_FUCHSIA
diff --git a/contrib/llvm-project/compiler-rt/lib/asan/asan_interceptors_memintrinsics.h b/contrib/llvm-project/compiler-rt/lib/asan/asan_interceptors_memintrinsics.h
index 632f0515a9eb..eb44f8f2f729 100644
--- a/contrib/llvm-project/compiler-rt/lib/asan/asan_interceptors_memintrinsics.h
+++ b/contrib/llvm-project/compiler-rt/lib/asan/asan_interceptors_memintrinsics.h
@@ -18,26 +18,29 @@
#include "asan_mapping.h"
#include "interception/interception.h"
-DECLARE_REAL(void*, memcpy, void *to, const void *from, uptr size)
-DECLARE_REAL(void*, memset, void *block, int c, uptr size)
+DECLARE_REAL(void *, memcpy, void *to, const void *from, uptr size)
+DECLARE_REAL(void *, memset, void *block, int c, uptr size)
namespace __asan {
// Return true if we can quickly decide that the region is unpoisoned.
// We assume that a redzone is at least 16 bytes.
static inline bool QuickCheckForUnpoisonedRegion(uptr beg, uptr size) {
- if (size == 0) return true;
- if (size <= 32)
- return !AddressIsPoisoned(beg) &&
- !AddressIsPoisoned(beg + size - 1) &&
- !AddressIsPoisoned(beg + size / 2);
- if (size <= 64)
- return !AddressIsPoisoned(beg) &&
- !AddressIsPoisoned(beg + size / 4) &&
- !AddressIsPoisoned(beg + size - 1) &&
- !AddressIsPoisoned(beg + 3 * size / 4) &&
- !AddressIsPoisoned(beg + size / 2);
- return false;
+ if (UNLIKELY(size == 0 || size > sizeof(uptr) * ASAN_SHADOW_GRANULARITY))
+ return !size;
+
+ uptr last = beg + size - 1;
+ uptr shadow_first = MEM_TO_SHADOW(beg);
+ uptr shadow_last = MEM_TO_SHADOW(last);
+ uptr uptr_first = RoundDownTo(shadow_first, sizeof(uptr));
+ uptr uptr_last = RoundDownTo(shadow_last, sizeof(uptr));
+ if (LIKELY(((*reinterpret_cast<const uptr *>(uptr_first) |
+ *reinterpret_cast<const uptr *>(uptr_last)) == 0)))
+ return true;
+ u8 shadow = AddressIsPoisoned(last);
+ for (; shadow_first < shadow_last; ++shadow_first)
+ shadow |= *((u8 *)shadow_first);
+ return !shadow;
}
struct AsanInterceptorContext {
@@ -49,75 +52,31 @@ struct AsanInterceptorContext {
// that no extra frames are created, and stack trace contains
// relevant information only.
// We check all shadow bytes.
-#define ACCESS_MEMORY_RANGE(ctx, offset, size, isWrite) do { \
- uptr __offset = (uptr)(offset); \
- uptr __size = (uptr)(size); \
- uptr __bad = 0; \
- if (__offset > __offset + __size) { \
- GET_STACK_TRACE_FATAL_HERE; \
- ReportStringFunctionSizeOverflow(__offset, __size, &stack); \
- } \
- if (!QuickCheckForUnpoisonedRegion(__offset, __size) && \
- (__bad = __asan_region_is_poisoned(__offset, __size))) { \
- AsanInterceptorContext *_ctx = (AsanInterceptorContext *)ctx; \
- bool suppressed = false; \
- if (_ctx) { \
- suppressed = IsInterceptorSuppressed(_ctx->interceptor_name); \
- if (!suppressed && HaveStackTraceBasedSuppressions()) { \
- GET_STACK_TRACE_FATAL_HERE; \
- suppressed = IsStackTraceSuppressed(&stack); \
- } \
- } \
- if (!suppressed) { \
- GET_CURRENT_PC_BP_SP; \
- ReportGenericError(pc, bp, sp, __bad, isWrite, __size, 0, false);\
- } \
- } \
- } while (0)
-
-// memcpy is called during __asan_init() from the internals of printf(...).
-// We do not treat memcpy with to==from as a bug.
-// See http://llvm.org/bugs/show_bug.cgi?id=11763.
-#define ASAN_MEMCPY_IMPL(ctx, to, from, size) \
- do { \
- if (UNLIKELY(!asan_inited)) return internal_memcpy(to, from, size); \
- if (asan_init_is_running) { \
- return REAL(memcpy)(to, from, size); \
- } \
- ENSURE_ASAN_INITED(); \
- if (flags()->replace_intrin) { \
- if (to != from) { \
- CHECK_RANGES_OVERLAP("memcpy", to, size, from, size); \
- } \
- ASAN_READ_RANGE(ctx, from, size); \
- ASAN_WRITE_RANGE(ctx, to, size); \
- } \
- return REAL(memcpy)(to, from, size); \
- } while (0)
-
-// memset is called inside Printf.
-#define ASAN_MEMSET_IMPL(ctx, block, c, size) \
- do { \
- if (UNLIKELY(!asan_inited)) return internal_memset(block, c, size); \
- if (asan_init_is_running) { \
- return REAL(memset)(block, c, size); \
- } \
- ENSURE_ASAN_INITED(); \
- if (flags()->replace_intrin) { \
- ASAN_WRITE_RANGE(ctx, block, size); \
- } \
- return REAL(memset)(block, c, size); \
- } while (0)
-
-#define ASAN_MEMMOVE_IMPL(ctx, to, from, size) \
- do { \
- if (UNLIKELY(!asan_inited)) return internal_memmove(to, from, size); \
- ENSURE_ASAN_INITED(); \
- if (flags()->replace_intrin) { \
- ASAN_READ_RANGE(ctx, from, size); \
- ASAN_WRITE_RANGE(ctx, to, size); \
- } \
- return internal_memmove(to, from, size); \
+#define ACCESS_MEMORY_RANGE(ctx, offset, size, isWrite) \
+ do { \
+ uptr __offset = (uptr)(offset); \
+ uptr __size = (uptr)(size); \
+ uptr __bad = 0; \
+ if (UNLIKELY(__offset > __offset + __size)) { \
+ GET_STACK_TRACE_FATAL_HERE; \
+ ReportStringFunctionSizeOverflow(__offset, __size, &stack); \
+ } \
+ if (UNLIKELY(!QuickCheckForUnpoisonedRegion(__offset, __size)) && \
+ (__bad = __asan_region_is_poisoned(__offset, __size))) { \
+ AsanInterceptorContext *_ctx = (AsanInterceptorContext *)ctx; \
+ bool suppressed = false; \
+ if (_ctx) { \
+ suppressed = IsInterceptorSuppressed(_ctx->interceptor_name); \
+ if (!suppressed && HaveStackTraceBasedSuppressions()) { \
+ GET_STACK_TRACE_FATAL_HERE; \
+ suppressed = IsStackTraceSuppressed(&stack); \
+ } \
+ } \
+ if (!suppressed) { \
+ GET_CURRENT_PC_BP_SP; \
+ ReportGenericError(pc, bp, sp, __bad, isWrite, __size, 0, false); \
+ } \
+ } \
} while (0)
#define ASAN_READ_RANGE(ctx, offset, size) \
@@ -136,7 +95,7 @@ static inline bool RangesOverlap(const char *offset1, uptr length1,
do { \
const char *offset1 = (const char *)_offset1; \
const char *offset2 = (const char *)_offset2; \
- if (RangesOverlap(offset1, length1, offset2, length2)) { \
+ if (UNLIKELY(RangesOverlap(offset1, length1, offset2, length2))) { \
GET_STACK_TRACE_FATAL_HERE; \
bool suppressed = IsInterceptorSuppressed(name); \
if (!suppressed && HaveStackTraceBasedSuppressions()) { \
diff --git a/contrib/llvm-project/compiler-rt/lib/asan/asan_interceptors_vfork.S b/contrib/llvm-project/compiler-rt/lib/asan/asan_interceptors_vfork.S
index 3ae5503e83cd..ec29adc7b132 100644
--- a/contrib/llvm-project/compiler-rt/lib/asan/asan_interceptors_vfork.S
+++ b/contrib/llvm-project/compiler-rt/lib/asan/asan_interceptors_vfork.S
@@ -6,6 +6,7 @@
#include "sanitizer_common/sanitizer_common_interceptors_vfork_aarch64.inc.S"
#include "sanitizer_common/sanitizer_common_interceptors_vfork_arm.inc.S"
#include "sanitizer_common/sanitizer_common_interceptors_vfork_i386.inc.S"
+#include "sanitizer_common/sanitizer_common_interceptors_vfork_loongarch64.inc.S"
#include "sanitizer_common/sanitizer_common_interceptors_vfork_riscv64.inc.S"
#include "sanitizer_common/sanitizer_common_interceptors_vfork_x86_64.inc.S"
#endif
diff --git a/contrib/llvm-project/compiler-rt/lib/asan/asan_interface.inc b/contrib/llvm-project/compiler-rt/lib/asan/asan_interface.inc
index ea28fc8ae87c..bfc44b461962 100644
--- a/contrib/llvm-project/compiler-rt/lib/asan/asan_interface.inc
+++ b/contrib/llvm-project/compiler-rt/lib/asan/asan_interface.inc
@@ -7,6 +7,7 @@
//===----------------------------------------------------------------------===//
// Asan interface list.
//===----------------------------------------------------------------------===//
+
INTERFACE_FUNCTION(__asan_addr_is_in_fake_stack)
INTERFACE_FUNCTION(__asan_address_is_poisoned)
INTERFACE_FUNCTION(__asan_after_dynamic_init)
@@ -107,6 +108,13 @@ INTERFACE_FUNCTION(__asan_report_store_n_noabort)
INTERFACE_FUNCTION(__asan_set_death_callback)
INTERFACE_FUNCTION(__asan_set_error_report_callback)
INTERFACE_FUNCTION(__asan_set_shadow_00)
+INTERFACE_FUNCTION(__asan_set_shadow_01)
+INTERFACE_FUNCTION(__asan_set_shadow_02)
+INTERFACE_FUNCTION(__asan_set_shadow_03)
+INTERFACE_FUNCTION(__asan_set_shadow_04)
+INTERFACE_FUNCTION(__asan_set_shadow_05)
+INTERFACE_FUNCTION(__asan_set_shadow_06)
+INTERFACE_FUNCTION(__asan_set_shadow_07)
INTERFACE_FUNCTION(__asan_set_shadow_f1)
INTERFACE_FUNCTION(__asan_set_shadow_f2)
INTERFACE_FUNCTION(__asan_set_shadow_f3)
diff --git a/contrib/llvm-project/compiler-rt/lib/asan/asan_interface_internal.h b/contrib/llvm-project/compiler-rt/lib/asan/asan_interface_internal.h
index 3e6e66028874..a99826378022 100644
--- a/contrib/llvm-project/compiler-rt/lib/asan/asan_interface_internal.h
+++ b/contrib/llvm-project/compiler-rt/lib/asan/asan_interface_internal.h
@@ -53,8 +53,10 @@ extern "C" {
const char *module_name; // Module name as a C string. This pointer is a
// unique identifier of a module.
uptr has_dynamic_init; // Non-zero if the global has dynamic initializer.
- __asan_global_source_location *location; // Source location of a global,
- // or NULL if it is unknown.
+ __asan_global_source_location *gcc_location; // Source location of a global,
+ // used by GCC compiler. LLVM uses
+ // llvm-symbolizer that relies
+ // on DWARF debugging info.
uptr odr_indicator; // The address of the ODR indicator symbol.
};
@@ -89,6 +91,20 @@ extern "C" {
SANITIZER_INTERFACE_ATTRIBUTE
void __asan_set_shadow_00(uptr addr, uptr size);
SANITIZER_INTERFACE_ATTRIBUTE
+ void __asan_set_shadow_01(uptr addr, uptr size);
+ SANITIZER_INTERFACE_ATTRIBUTE
+ void __asan_set_shadow_02(uptr addr, uptr size);
+ SANITIZER_INTERFACE_ATTRIBUTE
+ void __asan_set_shadow_03(uptr addr, uptr size);
+ SANITIZER_INTERFACE_ATTRIBUTE
+ void __asan_set_shadow_04(uptr addr, uptr size);
+ SANITIZER_INTERFACE_ATTRIBUTE
+ void __asan_set_shadow_05(uptr addr, uptr size);
+ SANITIZER_INTERFACE_ATTRIBUTE
+ void __asan_set_shadow_06(uptr addr, uptr size);
+ SANITIZER_INTERFACE_ATTRIBUTE
+ void __asan_set_shadow_07(uptr addr, uptr size);
+ SANITIZER_INTERFACE_ATTRIBUTE
void __asan_set_shadow_f1(uptr addr, uptr size);
SANITIZER_INTERFACE_ATTRIBUTE
void __asan_set_shadow_f2(uptr addr, uptr size);
diff --git a/contrib/llvm-project/compiler-rt/lib/asan/asan_internal.h b/contrib/llvm-project/compiler-rt/lib/asan/asan_internal.h
index ad3320304d0d..2944ebe213b5 100644
--- a/contrib/llvm-project/compiler-rt/lib/asan/asan_internal.h
+++ b/contrib/llvm-project/compiler-rt/lib/asan/asan_internal.h
@@ -17,19 +17,19 @@
#include "asan_interface_internal.h"
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_internal_defs.h"
-#include "sanitizer_common/sanitizer_stacktrace.h"
#include "sanitizer_common/sanitizer_libc.h"
+#include "sanitizer_common/sanitizer_stacktrace.h"
#if __has_feature(address_sanitizer) || defined(__SANITIZE_ADDRESS__)
-# error "The AddressSanitizer run-time should not be"
- " instrumented by AddressSanitizer"
+# error \
+ "The AddressSanitizer run-time should not be instrumented by AddressSanitizer"
#endif
// Build-time configuration options.
// If set, asan will intercept C++ exception api call(s).
#ifndef ASAN_HAS_EXCEPTIONS
-# define ASAN_HAS_EXCEPTIONS 1
+# define ASAN_HAS_EXCEPTIONS 1
#endif
// If set, values like allocator chunk size, as well as defaults for some flags
@@ -43,11 +43,11 @@
#endif
#ifndef ASAN_DYNAMIC
-# ifdef PIC
-# define ASAN_DYNAMIC 1
-# else
-# define ASAN_DYNAMIC 0
-# endif
+# ifdef PIC
+# define ASAN_DYNAMIC 1
+# else
+# define ASAN_DYNAMIC 0
+# endif
#endif
// All internal functions in asan reside inside the __asan namespace
@@ -60,6 +60,7 @@ class AsanThread;
using __sanitizer::StackTrace;
void AsanInitFromRtl();
+bool TryAsanInitFromRtl();
// asan_win.cpp
void InitializePlatformExceptionHandlers();
@@ -105,6 +106,7 @@ void AsanApplyToGlobals(globals_op_fptr op, const void *needle);
void AsanOnDeadlySignal(int, void *siginfo, void *context);
+void SignContextStack(void *context);
void ReadContextStack(void *context, uptr *stack, uptr *ssize);
void StopInitOrderChecking();
@@ -123,26 +125,18 @@ void *AsanDlSymNext(const char *sym);
// `dlopen()` specific initialization inside this function.
bool HandleDlopenInit();
-// Add convenient macro for interface functions that may be represented as
-// weak hooks.
-#define ASAN_MALLOC_HOOK(ptr, size) \
- do { \
- if (&__sanitizer_malloc_hook) __sanitizer_malloc_hook(ptr, size); \
- RunMallocHooks(ptr, size); \
- } while (false)
-#define ASAN_FREE_HOOK(ptr) \
- do { \
- if (&__sanitizer_free_hook) __sanitizer_free_hook(ptr); \
- RunFreeHooks(ptr); \
- } while (false)
+void InstallAtExitCheckLeaks();
+void InstallAtForkHandler();
+
#define ASAN_ON_ERROR() \
- if (&__asan_on_error) __asan_on_error()
+ if (&__asan_on_error) \
+ __asan_on_error()
-extern int asan_inited;
-// Used to avoid infinite recursion in __asan_init().
-extern bool asan_init_is_running;
+bool AsanInited();
+extern bool replace_intrin_cached;
extern void (*death_callback)(void);
-// These magic values are written to shadow for better error reporting.
+// These magic values are written to shadow for better error
+// reporting.
const int kAsanHeapLeftRedzoneMagic = 0xfa;
const int kAsanHeapFreeMagic = 0xfd;
const int kAsanStackLeftRedzoneMagic = 0xf1;
diff --git a/contrib/llvm-project/compiler-rt/lib/asan/asan_linux.cpp b/contrib/llvm-project/compiler-rt/lib/asan/asan_linux.cpp
index 4bcbe5d02e33..37d3bad1b1ec 100644
--- a/contrib/llvm-project/compiler-rt/lib/asan/asan_linux.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/asan/asan_linux.cpp
@@ -15,55 +15,48 @@
#if SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD || \
SANITIZER_SOLARIS
-#include "asan_interceptors.h"
-#include "asan_internal.h"
-#include "asan_premap_shadow.h"
-#include "asan_thread.h"
-#include "sanitizer_common/sanitizer_flags.h"
-#include "sanitizer_common/sanitizer_freebsd.h"
-#include "sanitizer_common/sanitizer_libc.h"
-#include "sanitizer_common/sanitizer_procmaps.h"
-
-#include <sys/time.h>
-#include <sys/resource.h>
-#include <sys/mman.h>
-#include <sys/syscall.h>
-#include <sys/types.h>
-#include <dlfcn.h>
-#include <fcntl.h>
-#include <limits.h>
-#include <pthread.h>
-#include <stdio.h>
-#include <unistd.h>
-#include <unwind.h>
-
-#if SANITIZER_FREEBSD
-#include <sys/link_elf.h>
-#endif
-
-#if SANITIZER_SOLARIS
-#include <link.h>
-#endif
-
-#if SANITIZER_ANDROID || SANITIZER_FREEBSD || SANITIZER_SOLARIS
-#include <ucontext.h>
-extern "C" void* _DYNAMIC;
-#elif SANITIZER_NETBSD
-#include <link_elf.h>
-#include <ucontext.h>
+# include <dlfcn.h>
+# include <fcntl.h>
+# include <limits.h>
+# include <pthread.h>
+# include <stdio.h>
+# include <sys/mman.h>
+# include <sys/resource.h>
+# include <sys/syscall.h>
+# include <sys/time.h>
+# include <sys/types.h>
+# include <unistd.h>
+# include <unwind.h>
+
+# include "asan_interceptors.h"
+# include "asan_internal.h"
+# include "asan_premap_shadow.h"
+# include "asan_thread.h"
+# include "sanitizer_common/sanitizer_flags.h"
+# include "sanitizer_common/sanitizer_hash.h"
+# include "sanitizer_common/sanitizer_libc.h"
+# include "sanitizer_common/sanitizer_procmaps.h"
+
+# if SANITIZER_FREEBSD
+# include <sys/link_elf.h>
+# endif
+
+# if SANITIZER_SOLARIS
+# include <link.h>
+# endif
+
+# if SANITIZER_ANDROID || SANITIZER_FREEBSD || SANITIZER_SOLARIS
+# include <ucontext.h>
+extern "C" void *_DYNAMIC;
+# elif SANITIZER_NETBSD
+# include <link_elf.h>
+# include <ucontext.h>
extern Elf_Dyn _DYNAMIC;
-#else
-#include <sys/ucontext.h>
-#include <link.h>
+# else
+# include <link.h>
+# include <sys/ucontext.h>
extern ElfW(Dyn) _DYNAMIC[];
-#endif
-
-// x86-64 FreeBSD 9.2 and older define 'ucontext_t' incorrectly in
-// 32-bit mode.
-#if SANITIZER_FREEBSD && (SANITIZER_WORDSIZE == 32) && \
- __FreeBSD_version <= 902001 // v9.2
-#define ucontext_t xucontext_t
-#endif
+# endif
typedef enum {
ASAN_RT_VERSION_UNDEFINED = 0,
@@ -74,21 +67,21 @@ typedef enum {
// FIXME: perhaps also store abi version here?
extern "C" {
SANITIZER_INTERFACE_ATTRIBUTE
-asan_rt_version_t __asan_rt_version;
+asan_rt_version_t __asan_rt_version;
}
namespace __asan {
void InitializePlatformInterceptors() {}
void InitializePlatformExceptionHandlers() {}
-bool IsSystemHeapAddress (uptr addr) { return false; }
+bool IsSystemHeapAddress(uptr addr) { return false; }
void *AsanDoesNotSupportStaticLinkage() {
// This will fail to link with -static.
return &_DYNAMIC;
}
-#if ASAN_PREMAP_SHADOW
+# if ASAN_PREMAP_SHADOW
uptr FindPremappedShadowStart(uptr shadow_size_bytes) {
uptr granularity = GetMmapGranularity();
uptr shadow_start = reinterpret_cast<uptr>(&__asan_shadow);
@@ -98,16 +91,16 @@ uptr FindPremappedShadowStart(uptr shadow_size_bytes) {
UnmapFromTo(shadow_start + shadow_size, shadow_start + premap_shadow_size);
return shadow_start;
}
-#endif
+# endif
uptr FindDynamicShadowStart() {
uptr shadow_size_bytes = MemToShadowSize(kHighMemEnd);
-#if ASAN_PREMAP_SHADOW
+# if ASAN_PREMAP_SHADOW
if (!PremapShadowFailed())
return FindPremappedShadowStart(shadow_size_bytes);
-#endif
+# endif
- return MapDynamicShadow(shadow_size_bytes, SHADOW_SCALE,
+ return MapDynamicShadow(shadow_size_bytes, ASAN_SHADOW_SCALE,
/*min_shadow_base_alignment*/ 0, kHighMemEnd);
}
@@ -121,46 +114,45 @@ void FlushUnneededASanShadowMemory(uptr p, uptr size) {
ReleaseMemoryPagesToOS(MemToShadow(p), MemToShadow(p + size));
}
-#if SANITIZER_ANDROID
+# if SANITIZER_ANDROID
// FIXME: should we do anything for Android?
void AsanCheckDynamicRTPrereqs() {}
void AsanCheckIncompatibleRT() {}
-#else
+# else
static int FindFirstDSOCallback(struct dl_phdr_info *info, size_t size,
void *data) {
- VReport(2, "info->dlpi_name = %s\tinfo->dlpi_addr = %p\n",
- info->dlpi_name, info->dlpi_addr);
-
- // Continue until the first dynamic library is found
- if (!info->dlpi_name || info->dlpi_name[0] == 0)
- return 0;
+ VReport(2, "info->dlpi_name = %s\tinfo->dlpi_addr = %p\n", info->dlpi_name,
+ (void *)info->dlpi_addr);
- // Ignore vDSO
- if (internal_strncmp(info->dlpi_name, "linux-", sizeof("linux-") - 1) == 0)
- return 0;
+ const char **name = (const char **)data;
-#if SANITIZER_FREEBSD || SANITIZER_NETBSD
// Ignore first entry (the main program)
- char **p = (char **)data;
- if (!(*p)) {
- *p = (char *)-1;
+ if (!*name) {
+ *name = "";
return 0;
}
-#endif
-#if SANITIZER_SOLARIS
- // Ignore executable on Solaris
- if (info->dlpi_addr == 0)
+# if SANITIZER_LINUX
+ // Ignore vDSO. glibc versions earlier than 2.15 (and some patched
+ // by distributors) return an empty name for the vDSO entry, so
+ // detect this as well.
+ if (!info->dlpi_name[0] ||
+ internal_strncmp(info->dlpi_name, "linux-", sizeof("linux-") - 1) == 0)
return 0;
-#endif
+# endif
+# if SANITIZER_FREEBSD
+ // Ignore vDSO.
+ if (internal_strcmp(info->dlpi_name, "[vdso]") == 0)
+ return 0;
+# endif
- *(const char **)data = info->dlpi_name;
+ *name = info->dlpi_name;
return 1;
}
static bool IsDynamicRTName(const char *libname) {
return internal_strstr(libname, "libclang_rt.asan") ||
- internal_strstr(libname, "libasan.so");
+ internal_strstr(libname, "libasan.so");
}
static void ReportIncompatibleRT() {
@@ -175,10 +167,11 @@ void AsanCheckDynamicRTPrereqs() {
// Ensure that dynamic RT is the first DSO in the list
const char *first_dso_name = nullptr;
dl_iterate_phdr(FindFirstDSOCallback, &first_dso_name);
- if (first_dso_name && !IsDynamicRTName(first_dso_name)) {
- Report("ASan runtime does not come first in initial library list; "
- "you should either link runtime to your application or "
- "manually preload it with LD_PRELOAD.\n");
+ if (first_dso_name && first_dso_name[0] && !IsDynamicRTName(first_dso_name)) {
+ Report(
+ "ASan runtime does not come first in initial library list; "
+ "you should either link runtime to your application or "
+ "manually preload it with LD_PRELOAD.\n");
Die();
}
}
@@ -196,13 +189,14 @@ void AsanCheckIncompatibleRT() {
// as early as possible, otherwise ASan interceptors could bind to
// the functions in dynamic ASan runtime instead of the functions in
// system libraries, causing crashes later in ASan initialization.
- MemoryMappingLayout proc_maps(/*cache_enabled*/true);
+ MemoryMappingLayout proc_maps(/*cache_enabled*/ true);
char filename[PATH_MAX];
MemoryMappedSegment segment(filename, sizeof(filename));
while (proc_maps.Next(&segment)) {
if (IsDynamicRTName(segment.filename)) {
- Report("Your application is linked against "
- "incompatible ASan runtimes.\n");
+ Report(
+ "Your application is linked against "
+ "incompatible ASan runtimes.\n");
Die();
}
}
@@ -212,23 +206,36 @@ void AsanCheckIncompatibleRT() {
}
}
}
-#endif // SANITIZER_ANDROID
+# endif // SANITIZER_ANDROID
-#if !SANITIZER_ANDROID
-void ReadContextStack(void *context, uptr *stack, uptr *ssize) {
- ucontext_t *ucp = (ucontext_t*)context;
- *stack = (uptr)ucp->uc_stack.ss_sp;
- *ssize = ucp->uc_stack.ss_size;
+# if ASAN_INTERCEPT_SWAPCONTEXT
+constexpr u32 kAsanContextStackFlagsMagic = 0x51260eea;
+
+static int HashContextStack(const ucontext_t &ucp) {
+ MurMur2Hash64Builder hash(kAsanContextStackFlagsMagic);
+ hash.add(reinterpret_cast<uptr>(ucp.uc_stack.ss_sp));
+ hash.add(ucp.uc_stack.ss_size);
+ return static_cast<int>(hash.get());
}
-#else
-void ReadContextStack(void *context, uptr *stack, uptr *ssize) {
- UNIMPLEMENTED();
+
+void SignContextStack(void *context) {
+ ucontext_t *ucp = reinterpret_cast<ucontext_t *>(context);
+ ucp->uc_stack.ss_flags = HashContextStack(*ucp);
}
-#endif
-void *AsanDlSymNext(const char *sym) {
- return dlsym(RTLD_NEXT, sym);
+void ReadContextStack(void *context, uptr *stack, uptr *ssize) {
+ const ucontext_t *ucp = reinterpret_cast<const ucontext_t *>(context);
+ if (HashContextStack(*ucp) == ucp->uc_stack.ss_flags) {
+ *stack = reinterpret_cast<uptr>(ucp->uc_stack.ss_sp);
+ *ssize = ucp->uc_stack.ss_size;
+ return;
+ }
+ *stack = 0;
+ *ssize = 0;
}
+# endif // ASAN_INTERCEPT_SWAPCONTEXT
+
+void *AsanDlSymNext(const char *sym) { return dlsym(RTLD_NEXT, sym); }
bool HandleDlopenInit() {
// Not supported on this platform.
@@ -237,7 +244,7 @@ bool HandleDlopenInit() {
return false;
}
-} // namespace __asan
+} // namespace __asan
#endif // SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD ||
// SANITIZER_SOLARIS
diff --git a/contrib/llvm-project/compiler-rt/lib/asan/asan_lock.h b/contrib/llvm-project/compiler-rt/lib/asan/asan_lock.h
deleted file mode 100644
index e69de29bb2d1..000000000000
--- a/contrib/llvm-project/compiler-rt/lib/asan/asan_lock.h
+++ /dev/null
diff --git a/contrib/llvm-project/compiler-rt/lib/asan/asan_mac.cpp b/contrib/llvm-project/compiler-rt/lib/asan/asan_mac.cpp
index c6950547f089..1b0e9b3fe006 100644
--- a/contrib/llvm-project/compiler-rt/lib/asan/asan_mac.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/asan/asan_mac.cpp
@@ -12,7 +12,7 @@
//===----------------------------------------------------------------------===//
#include "sanitizer_common/sanitizer_platform.h"
-#if SANITIZER_MAC
+#if SANITIZER_APPLE
#include "asan_interceptors.h"
#include "asan_internal.h"
@@ -55,7 +55,7 @@ void *AsanDoesNotSupportStaticLinkage() {
}
uptr FindDynamicShadowStart() {
- return MapDynamicShadow(MemToShadowSize(kHighMemEnd), SHADOW_SCALE,
+ return MapDynamicShadow(MemToShadowSize(kHighMemEnd), ASAN_SHADOW_SCALE,
/*min_shadow_base_alignment*/ 0, kHighMemEnd);
}
@@ -95,10 +95,6 @@ void FlushUnneededASanShadowMemory(uptr p, uptr size) {
ReleaseMemoryPagesToOS(MemToShadow(p), MemToShadow(p + size));
}
-void ReadContextStack(void *context, uptr *stack, uptr *ssize) {
- UNIMPLEMENTED();
-}
-
// Support for the following functions from libdispatch on Mac OS:
// dispatch_async_f()
// dispatch_async()
@@ -134,6 +130,20 @@ typedef void* dispatch_source_t;
typedef u64 dispatch_time_t;
typedef void (*dispatch_function_t)(void *block);
typedef void* (*worker_t)(void *block);
+typedef unsigned long dispatch_mach_reason;
+typedef void *dispatch_mach_msg_t;
+typedef int mach_error_t;
+typedef void *dispatch_mach_t;
+
+typedef void (*dispatch_mach_handler_function_t)(void *context,
+ dispatch_mach_reason reason,
+ dispatch_mach_msg_t message,
+ mach_error_t error);
+# if !defined(MISSING_BLOCKS_SUPPORT)
+typedef void (^dispatch_mach_handler_t)(dispatch_mach_reason reason,
+ dispatch_mach_msg_t message,
+ mach_error_t error);
+# endif
// A wrapper for the ObjC blocks used to support libdispatch.
typedef struct {
@@ -146,8 +156,7 @@ ALWAYS_INLINE
void asan_register_worker_thread(int parent_tid, StackTrace *stack) {
AsanThread *t = GetCurrentThread();
if (!t) {
- t = AsanThread::Create(/* start_routine */ nullptr, /* arg */ nullptr,
- parent_tid, stack, /* detached */ true);
+ t = AsanThread::Create(parent_tid, stack, /* detached */ true);
t->Init();
asanThreadRegistry().StartThread(t->tid(), GetTid(), ThreadType::Worker,
nullptr);
@@ -164,7 +173,7 @@ void asan_dispatch_call_block_and_release(void *block) {
VReport(2,
"asan_dispatch_call_block_and_release(): "
"context: %p, pthread_self: %p\n",
- block, pthread_self());
+ block, (void*)pthread_self());
asan_register_worker_thread(context->parent_tid, &stack);
// Call the original dispatcher for the block.
context->func(context->block);
@@ -197,7 +206,7 @@ asan_block_context_t *alloc_asan_context(void *ctxt, dispatch_function_t func,
asan_block_context_t *asan_ctxt = alloc_asan_context(ctxt, func, &stack); \
if (Verbosity() >= 2) { \
Report(#dispatch_x_f "(): context: %p, pthread_self: %p\n", \
- asan_ctxt, pthread_self()); \
+ (void*)asan_ctxt, (void*)pthread_self()); \
PRINT_CURRENT_STACK(); \
} \
return REAL(dispatch_x_f)(dq, (void*)asan_ctxt, \
@@ -214,7 +223,7 @@ INTERCEPTOR(void, dispatch_after_f, dispatch_time_t when,
GET_STACK_TRACE_THREAD;
asan_block_context_t *asan_ctxt = alloc_asan_context(ctxt, func, &stack);
if (Verbosity() >= 2) {
- Report("dispatch_after_f: %p\n", asan_ctxt);
+ Report("dispatch_after_f: %p\n", (void*)asan_ctxt);
PRINT_CURRENT_STACK();
}
return REAL(dispatch_after_f)(when, dq, (void*)asan_ctxt,
@@ -228,7 +237,7 @@ INTERCEPTOR(void, dispatch_group_async_f, dispatch_group_t group,
asan_block_context_t *asan_ctxt = alloc_asan_context(ctxt, func, &stack);
if (Verbosity() >= 2) {
Report("dispatch_group_async_f(): context: %p, pthread_self: %p\n",
- asan_ctxt, pthread_self());
+ (void*)asan_ctxt, (void*)pthread_self());
PRINT_CURRENT_STACK();
}
REAL(dispatch_group_async_f)(group, dq, (void*)asan_ctxt,
@@ -245,6 +254,8 @@ void dispatch_after(dispatch_time_t when, dispatch_queue_t queue,
void dispatch_source_set_cancel_handler(dispatch_source_t ds,
void(^work)(void));
void dispatch_source_set_event_handler(dispatch_source_t ds, void(^work)(void));
+dispatch_mach_t dispatch_mach_create(const char *label, dispatch_queue_t queue,
+ dispatch_mach_handler_t handler);
}
#define GET_ASAN_BLOCK(work) \
@@ -294,6 +305,34 @@ INTERCEPTOR(void, dispatch_source_set_event_handler,
GET_ASAN_BLOCK(work);
REAL(dispatch_source_set_event_handler)(ds, asan_block);
}
+
+INTERCEPTOR(void *, dispatch_mach_create, const char *label,
+ dispatch_queue_t dq, dispatch_mach_handler_t handler) {
+ int parent_tid = GetCurrentTidOrInvalid();
+ return REAL(dispatch_mach_create)(
+ label, dq,
+ ^(dispatch_mach_reason reason, dispatch_mach_msg_t message,
+ mach_error_t error) {
+ GET_STACK_TRACE_THREAD;
+ asan_register_worker_thread(parent_tid, &stack);
+ handler(reason, message, error);
+ });
+}
+
+INTERCEPTOR(void *, dispatch_mach_create_f, const char *label,
+ dispatch_queue_t dq, void *ctxt,
+ dispatch_mach_handler_function_t handler) {
+ int parent_tid = GetCurrentTidOrInvalid();
+ return REAL(dispatch_mach_create)(
+ label, dq,
+ ^(dispatch_mach_reason reason, dispatch_mach_msg_t message,
+ mach_error_t error) {
+ GET_STACK_TRACE_THREAD;
+ asan_register_worker_thread(parent_tid, &stack);
+ handler(ctxt, reason, message, error);
+ });
+}
+
#endif
-#endif // SANITIZER_MAC
+#endif // SANITIZER_APPLE
diff --git a/contrib/llvm-project/compiler-rt/lib/asan/asan_malloc_linux.cpp b/contrib/llvm-project/compiler-rt/lib/asan/asan_malloc_linux.cpp
index c6bec8551bc5..d426b923c94e 100644
--- a/contrib/llvm-project/compiler-rt/lib/asan/asan_malloc_linux.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/asan/asan_malloc_linux.cpp
@@ -21,137 +21,71 @@
# include "asan_interceptors.h"
# include "asan_internal.h"
# include "asan_stack.h"
+# include "lsan/lsan_common.h"
# include "sanitizer_common/sanitizer_allocator_checks.h"
+# include "sanitizer_common/sanitizer_allocator_dlsym.h"
# include "sanitizer_common/sanitizer_errno.h"
# include "sanitizer_common/sanitizer_tls_get_addr.h"
// ---------------------- Replacement functions ---------------- {{{1
using namespace __asan;
-static uptr allocated_for_dlsym;
-static uptr last_dlsym_alloc_size_in_words;
-static const uptr kDlsymAllocPoolSize = 1024;
-static uptr alloc_memory_for_dlsym[kDlsymAllocPoolSize];
-
-static inline bool IsInDlsymAllocPool(const void *ptr) {
- uptr off = (uptr)ptr - (uptr)alloc_memory_for_dlsym;
- return off < allocated_for_dlsym * sizeof(alloc_memory_for_dlsym[0]);
-}
-
-static void *AllocateFromLocalPool(uptr size_in_bytes) {
- uptr size_in_words = RoundUpTo(size_in_bytes, kWordSize) / kWordSize;
- void *mem = (void*)&alloc_memory_for_dlsym[allocated_for_dlsym];
- last_dlsym_alloc_size_in_words = size_in_words;
- allocated_for_dlsym += size_in_words;
- CHECK_LT(allocated_for_dlsym, kDlsymAllocPoolSize);
- return mem;
-}
-
-static void DeallocateFromLocalPool(const void *ptr) {
- // Hack: since glibc 2.27 dlsym no longer uses stack-allocated memory to store
- // error messages and instead uses malloc followed by free. To avoid pool
- // exhaustion due to long object filenames, handle that special case here.
- uptr prev_offset = allocated_for_dlsym - last_dlsym_alloc_size_in_words;
- void *prev_mem = (void*)&alloc_memory_for_dlsym[prev_offset];
- if (prev_mem == ptr) {
- REAL(memset)(prev_mem, 0, last_dlsym_alloc_size_in_words * kWordSize);
- allocated_for_dlsym = prev_offset;
- last_dlsym_alloc_size_in_words = 0;
+struct DlsymAlloc : public DlSymAllocator<DlsymAlloc> {
+ static bool UseImpl() { return !TryAsanInitFromRtl(); }
+ static void OnAllocate(const void *ptr, uptr size) {
+# if CAN_SANITIZE_LEAKS
+ // Suppress leaks from dlerror(). Previously dlsym hack on global array was
+ // used by leak sanitizer as a root region.
+ __lsan_register_root_region(ptr, size);
+# endif
}
-}
-
-static int PosixMemalignFromLocalPool(void **memptr, uptr alignment,
- uptr size_in_bytes) {
- if (UNLIKELY(!CheckPosixMemalignAlignment(alignment)))
- return errno_EINVAL;
-
- CHECK(alignment >= kWordSize);
-
- uptr addr = (uptr)&alloc_memory_for_dlsym[allocated_for_dlsym];
- uptr aligned_addr = RoundUpTo(addr, alignment);
- uptr aligned_size = RoundUpTo(size_in_bytes, kWordSize);
-
- uptr *end_mem = (uptr*)(aligned_addr + aligned_size);
- uptr allocated = end_mem - alloc_memory_for_dlsym;
- if (allocated >= kDlsymAllocPoolSize)
- return errno_ENOMEM;
-
- allocated_for_dlsym = allocated;
- *memptr = (void*)aligned_addr;
- return 0;
-}
-
-static inline bool MaybeInDlsym() {
- // Fuchsia doesn't use dlsym-based interceptors.
- return !SANITIZER_FUCHSIA && asan_init_is_running;
-}
-
-static inline bool UseLocalPool() { return MaybeInDlsym(); }
-
-static void *ReallocFromLocalPool(void *ptr, uptr size) {
- const uptr offset = (uptr)ptr - (uptr)alloc_memory_for_dlsym;
- const uptr copy_size = Min(size, kDlsymAllocPoolSize - offset);
- void *new_ptr;
- if (UNLIKELY(UseLocalPool())) {
- new_ptr = AllocateFromLocalPool(size);
- } else {
- ENSURE_ASAN_INITED();
- GET_STACK_TRACE_MALLOC;
- new_ptr = asan_malloc(size, &stack);
+ static void OnFree(const void *ptr, uptr size) {
+# if CAN_SANITIZE_LEAKS
+ __lsan_unregister_root_region(ptr, size);
+# endif
}
- internal_memcpy(new_ptr, ptr, copy_size);
- return new_ptr;
-}
+};
INTERCEPTOR(void, free, void *ptr) {
- if (UNLIKELY(IsInDlsymAllocPool(ptr))) {
- DeallocateFromLocalPool(ptr);
- return;
- }
+ if (DlsymAlloc::PointerIsMine(ptr))
+ return DlsymAlloc::Free(ptr);
GET_STACK_TRACE_FREE;
asan_free(ptr, &stack, FROM_MALLOC);
}
#if SANITIZER_INTERCEPT_CFREE
INTERCEPTOR(void, cfree, void *ptr) {
- if (UNLIKELY(IsInDlsymAllocPool(ptr)))
- return;
+ if (DlsymAlloc::PointerIsMine(ptr))
+ return DlsymAlloc::Free(ptr);
GET_STACK_TRACE_FREE;
asan_free(ptr, &stack, FROM_MALLOC);
}
#endif // SANITIZER_INTERCEPT_CFREE
INTERCEPTOR(void*, malloc, uptr size) {
- if (UNLIKELY(UseLocalPool()))
- // Hack: dlsym calls malloc before REAL(malloc) is retrieved from dlsym.
- return AllocateFromLocalPool(size);
- ENSURE_ASAN_INITED();
+ if (DlsymAlloc::Use())
+ return DlsymAlloc::Allocate(size);
GET_STACK_TRACE_MALLOC;
return asan_malloc(size, &stack);
}
INTERCEPTOR(void*, calloc, uptr nmemb, uptr size) {
- if (UNLIKELY(UseLocalPool()))
- // Hack: dlsym calls calloc before REAL(calloc) is retrieved from dlsym.
- return AllocateFromLocalPool(nmemb * size);
- ENSURE_ASAN_INITED();
+ if (DlsymAlloc::Use())
+ return DlsymAlloc::Callocate(nmemb, size);
GET_STACK_TRACE_MALLOC;
return asan_calloc(nmemb, size, &stack);
}
INTERCEPTOR(void*, realloc, void *ptr, uptr size) {
- if (UNLIKELY(IsInDlsymAllocPool(ptr)))
- return ReallocFromLocalPool(ptr, size);
- if (UNLIKELY(UseLocalPool()))
- return AllocateFromLocalPool(size);
- ENSURE_ASAN_INITED();
+ if (DlsymAlloc::Use() || DlsymAlloc::PointerIsMine(ptr))
+ return DlsymAlloc::Realloc(ptr, size);
GET_STACK_TRACE_MALLOC;
return asan_realloc(ptr, size, &stack);
}
#if SANITIZER_INTERCEPT_REALLOCARRAY
INTERCEPTOR(void*, reallocarray, void *ptr, uptr nmemb, uptr size) {
- ENSURE_ASAN_INITED();
+ AsanInitFromRtl();
GET_STACK_TRACE_MALLOC;
return asan_reallocarray(ptr, nmemb, size, &stack);
}
@@ -205,8 +139,6 @@ INTERCEPTOR(int, mallopt, int cmd, int value) {
#endif // SANITIZER_INTERCEPT_MALLOPT_AND_MALLINFO
INTERCEPTOR(int, posix_memalign, void **memptr, uptr alignment, uptr size) {
- if (UNLIKELY(UseLocalPool()))
- return PosixMemalignFromLocalPool(memptr, alignment, size);
GET_STACK_TRACE_MALLOC;
return asan_posix_memalign(memptr, alignment, size, &stack);
}
diff --git a/contrib/llvm-project/compiler-rt/lib/asan/asan_malloc_mac.cpp b/contrib/llvm-project/compiler-rt/lib/asan/asan_malloc_mac.cpp
index e8484685daed..f25d7e190153 100644
--- a/contrib/llvm-project/compiler-rt/lib/asan/asan_malloc_mac.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/asan/asan_malloc_mac.cpp
@@ -12,7 +12,7 @@
//===----------------------------------------------------------------------===//
#include "sanitizer_common/sanitizer_platform.h"
-#if SANITIZER_MAC
+#if SANITIZER_APPLE
#include "asan_interceptors.h"
#include "asan_report.h"
@@ -22,46 +22,48 @@
using namespace __asan;
#define COMMON_MALLOC_ZONE_NAME "asan"
-#define COMMON_MALLOC_ENTER() ENSURE_ASAN_INITED()
-#define COMMON_MALLOC_SANITIZER_INITIALIZED asan_inited
-#define COMMON_MALLOC_FORCE_LOCK() asan_mz_force_lock()
-#define COMMON_MALLOC_FORCE_UNLOCK() asan_mz_force_unlock()
-#define COMMON_MALLOC_MEMALIGN(alignment, size) \
- GET_STACK_TRACE_MALLOC; \
- void *p = asan_memalign(alignment, size, &stack, FROM_MALLOC)
-#define COMMON_MALLOC_MALLOC(size) \
- GET_STACK_TRACE_MALLOC; \
- void *p = asan_malloc(size, &stack)
-#define COMMON_MALLOC_REALLOC(ptr, size) \
- GET_STACK_TRACE_MALLOC; \
- void *p = asan_realloc(ptr, size, &stack);
-#define COMMON_MALLOC_CALLOC(count, size) \
- GET_STACK_TRACE_MALLOC; \
- void *p = asan_calloc(count, size, &stack);
-#define COMMON_MALLOC_POSIX_MEMALIGN(memptr, alignment, size) \
- GET_STACK_TRACE_MALLOC; \
- int res = asan_posix_memalign(memptr, alignment, size, &stack);
-#define COMMON_MALLOC_VALLOC(size) \
- GET_STACK_TRACE_MALLOC; \
- void *p = asan_memalign(GetPageSizeCached(), size, &stack, FROM_MALLOC);
-#define COMMON_MALLOC_FREE(ptr) \
- GET_STACK_TRACE_FREE; \
- asan_free(ptr, &stack, FROM_MALLOC);
-#define COMMON_MALLOC_SIZE(ptr) \
- uptr size = asan_mz_size(ptr);
-#define COMMON_MALLOC_FILL_STATS(zone, stats) \
- AsanMallocStats malloc_stats; \
- FillMallocStatistics(&malloc_stats); \
- CHECK(sizeof(malloc_statistics_t) == sizeof(AsanMallocStats)); \
- internal_memcpy(stats, &malloc_stats, sizeof(malloc_statistics_t));
-#define COMMON_MALLOC_REPORT_UNKNOWN_REALLOC(ptr, zone_ptr, zone_name) \
- GET_STACK_TRACE_FREE; \
- ReportMacMzReallocUnknown((uptr)ptr, (uptr)zone_ptr, zone_name, &stack);
-#define COMMON_MALLOC_NAMESPACE __asan
-#define COMMON_MALLOC_HAS_ZONE_ENUMERATOR 0
-#define COMMON_MALLOC_HAS_EXTRA_INTROSPECTION_INIT 1
+# define COMMON_MALLOC_ENTER() \
+ do { \
+ AsanInitFromRtl(); \
+ } while (false)
+# define COMMON_MALLOC_SANITIZER_INITIALIZED AsanInited()
+# define COMMON_MALLOC_FORCE_LOCK() asan_mz_force_lock()
+# define COMMON_MALLOC_FORCE_UNLOCK() asan_mz_force_unlock()
+# define COMMON_MALLOC_MEMALIGN(alignment, size) \
+ GET_STACK_TRACE_MALLOC; \
+ void *p = asan_memalign(alignment, size, &stack, FROM_MALLOC)
+# define COMMON_MALLOC_MALLOC(size) \
+ GET_STACK_TRACE_MALLOC; \
+ void *p = asan_malloc(size, &stack)
+# define COMMON_MALLOC_REALLOC(ptr, size) \
+ GET_STACK_TRACE_MALLOC; \
+ void *p = asan_realloc(ptr, size, &stack);
+# define COMMON_MALLOC_CALLOC(count, size) \
+ GET_STACK_TRACE_MALLOC; \
+ void *p = asan_calloc(count, size, &stack);
+# define COMMON_MALLOC_POSIX_MEMALIGN(memptr, alignment, size) \
+ GET_STACK_TRACE_MALLOC; \
+ int res = asan_posix_memalign(memptr, alignment, size, &stack);
+# define COMMON_MALLOC_VALLOC(size) \
+ GET_STACK_TRACE_MALLOC; \
+ void *p = asan_memalign(GetPageSizeCached(), size, &stack, FROM_MALLOC);
+# define COMMON_MALLOC_FREE(ptr) \
+ GET_STACK_TRACE_FREE; \
+ asan_free(ptr, &stack, FROM_MALLOC);
+# define COMMON_MALLOC_SIZE(ptr) uptr size = asan_mz_size(ptr);
+# define COMMON_MALLOC_FILL_STATS(zone, stats) \
+ AsanMallocStats malloc_stats; \
+ FillMallocStatistics(&malloc_stats); \
+ CHECK(sizeof(malloc_statistics_t) == sizeof(AsanMallocStats)); \
+ internal_memcpy(stats, &malloc_stats, sizeof(malloc_statistics_t));
+# define COMMON_MALLOC_REPORT_UNKNOWN_REALLOC(ptr, zone_ptr, zone_name) \
+ GET_STACK_TRACE_FREE; \
+ ReportMacMzReallocUnknown((uptr)ptr, (uptr)zone_ptr, zone_name, &stack);
+# define COMMON_MALLOC_NAMESPACE __asan
+# define COMMON_MALLOC_HAS_ZONE_ENUMERATOR 0
+# define COMMON_MALLOC_HAS_EXTRA_INTROSPECTION_INIT 1
-#include "sanitizer_common/sanitizer_malloc_mac.inc"
+# include "sanitizer_common/sanitizer_malloc_mac.inc"
namespace COMMON_MALLOC_NAMESPACE {
diff --git a/contrib/llvm-project/compiler-rt/lib/asan/asan_malloc_win.cpp b/contrib/llvm-project/compiler-rt/lib/asan/asan_malloc_win.cpp
index 4b76d4ebd3eb..7e1d04c36dd5 100644
--- a/contrib/llvm-project/compiler-rt/lib/asan/asan_malloc_win.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/asan/asan_malloc_win.cpp
@@ -211,7 +211,7 @@ INTERCEPTOR_WINAPI(size_t, HeapSize, HANDLE hHeap, DWORD dwFlags,
// interception takes place, so if it is not owned by the RTL heap we can
// pass it to the ASAN heap for inspection.
if (flags()->windows_hook_rtl_allocators) {
- if (!asan_inited || OWNED_BY_RTL(hHeap, lpMem))
+ if (!AsanInited() || OWNED_BY_RTL(hHeap, lpMem))
return REAL(HeapSize)(hHeap, dwFlags, lpMem);
} else {
CHECK(dwFlags == 0 && "unsupported heap flags");
@@ -226,7 +226,7 @@ INTERCEPTOR_WINAPI(LPVOID, HeapAlloc, HANDLE hHeap, DWORD dwFlags,
// If the ASAN runtime is not initialized, or we encounter an unsupported
// flag, fall back to the original allocator.
if (flags()->windows_hook_rtl_allocators) {
- if (UNLIKELY(!asan_inited ||
+ if (UNLIKELY(!AsanInited() ||
(dwFlags & HEAP_ALLOCATE_UNSUPPORTED_FLAGS) != 0)) {
return REAL(HeapAlloc)(hHeap, dwFlags, dwBytes);
}
@@ -297,7 +297,7 @@ void *SharedReAlloc(ReAllocFunction reallocFunc, SizeFunction heapSizeFunc,
// If this heap block which was allocated before the ASAN
// runtime came up, use the real HeapFree function.
- if (UNLIKELY(!asan_inited)) {
+ if (UNLIKELY(!AsanInited())) {
return reallocFunc(hHeap, dwFlags, lpMem, dwBytes);
}
bool only_asan_supported_flags =
@@ -420,7 +420,7 @@ size_t RtlSizeHeap(void* HeapHandle, DWORD Flags, void* BaseAddress);
INTERCEPTOR_WINAPI(size_t, RtlSizeHeap, HANDLE HeapHandle, DWORD Flags,
void* BaseAddress) {
if (!flags()->windows_hook_rtl_allocators ||
- UNLIKELY(!asan_inited || OWNED_BY_RTL(HeapHandle, BaseAddress))) {
+ UNLIKELY(!AsanInited() || OWNED_BY_RTL(HeapHandle, BaseAddress))) {
return REAL(RtlSizeHeap)(HeapHandle, Flags, BaseAddress);
}
GET_CURRENT_PC_BP_SP;
@@ -448,7 +448,7 @@ INTERCEPTOR_WINAPI(void*, RtlAllocateHeap, HANDLE HeapHandle, DWORD Flags,
// If the ASAN runtime is not initialized, or we encounter an unsupported
// flag, fall back to the original allocator.
if (!flags()->windows_hook_rtl_allocators ||
- UNLIKELY(!asan_inited ||
+ UNLIKELY(!AsanInited() ||
(Flags & HEAP_ALLOCATE_UNSUPPORTED_FLAGS) != 0)) {
return REAL(RtlAllocateHeap)(HeapHandle, Flags, Size);
}
@@ -508,10 +508,10 @@ void ReplaceSystemMalloc() {
TryToOverrideFunction("_expand_base", (uptr)_expand);
if (flags()->windows_hook_rtl_allocators) {
- INTERCEPT_FUNCTION(HeapSize);
- INTERCEPT_FUNCTION(HeapFree);
- INTERCEPT_FUNCTION(HeapReAlloc);
- INTERCEPT_FUNCTION(HeapAlloc);
+ ASAN_INTERCEPT_FUNC(HeapSize);
+ ASAN_INTERCEPT_FUNC(HeapFree);
+ ASAN_INTERCEPT_FUNC(HeapReAlloc);
+ ASAN_INTERCEPT_FUNC(HeapAlloc);
// Undocumented functions must be intercepted by name, not by symbol.
__interception::OverrideFunction("RtlSizeHeap", (uptr)WRAP(RtlSizeHeap),
diff --git a/contrib/llvm-project/compiler-rt/lib/asan/asan_mapping.h b/contrib/llvm-project/compiler-rt/lib/asan/asan_mapping.h
index e5a7f2007aea..c5f95c07a210 100644
--- a/contrib/llvm-project/compiler-rt/lib/asan/asan_mapping.h
+++ b/contrib/llvm-project/compiler-rt/lib/asan/asan_mapping.h
@@ -13,7 +13,7 @@
#ifndef ASAN_MAPPING_H
#define ASAN_MAPPING_H
-#include "asan_internal.h"
+#include "sanitizer_common/sanitizer_platform.h"
// The full explanation of the memory mapping could be found here:
// https://github.com/google/sanitizers/wiki/AddressSanitizerAlgorithm
@@ -114,6 +114,13 @@
// || `[0x0080000000000, 0x008ffffffffff]` || LowShadow ||
// || `[0x0000000000000, 0x007ffffffffff]` || LowMem ||
//
+// Default Linux/LoongArch64 (47-bit VMA) mapping:
+// || `[0x500000000000, 0x7fffffffffff]` || HighMem ||
+// || `[0x4a0000000000, 0x4fffffffffff]` || HighShadow ||
+// || `[0x480000000000, 0x49ffffffffff]` || ShadowGap ||
+// || `[0x400000000000, 0x47ffffffffff]` || LowShadow ||
+// || `[0x000000000000, 0x3fffffffffff]` || LowMem ||
+//
// Shadow mapping on FreeBSD/x86-64 with SHADOW_OFFSET == 0x400000000000:
// || `[0x500000000000, 0x7fffffffffff]` || HighMem ||
// || `[0x4a0000000000, 0x4fffffffffff]` || HighShadow ||
@@ -151,149 +158,151 @@
// || `[0x30000000, 0x35ffffff]` || LowShadow ||
// || `[0x00000000, 0x2fffffff]` || LowMem ||
-#if defined(ASAN_SHADOW_SCALE)
-static const u64 kDefaultShadowScale = ASAN_SHADOW_SCALE;
-#else
-static const u64 kDefaultShadowScale = 3;
-#endif
-static const u64 kDefaultShadowSentinel = ~(uptr)0;
-static const u64 kDefaultShadowOffset32 = 1ULL << 29; // 0x20000000
-static const u64 kDefaultShadowOffset64 = 1ULL << 44;
-static const u64 kDefaultShort64bitShadowOffset =
- 0x7FFFFFFF & (~0xFFFULL << kDefaultShadowScale); // < 2G.
-static const u64 kAArch64_ShadowOffset64 = 1ULL << 36;
-static const u64 kRiscv64_ShadowOffset64 = 0xd55550000;
-static const u64 kMIPS32_ShadowOffset32 = 0x0aaa0000;
-static const u64 kMIPS64_ShadowOffset64 = 1ULL << 37;
-static const u64 kPPC64_ShadowOffset64 = 1ULL << 44;
-static const u64 kSystemZ_ShadowOffset64 = 1ULL << 52;
-static const u64 kSPARC64_ShadowOffset64 = 1ULL << 43; // 0x80000000000
-static const u64 kFreeBSD_ShadowOffset32 = 1ULL << 30; // 0x40000000
-static const u64 kFreeBSD_ShadowOffset64 = 1ULL << 46; // 0x400000000000
-static const u64 kNetBSD_ShadowOffset32 = 1ULL << 30; // 0x40000000
-static const u64 kNetBSD_ShadowOffset64 = 1ULL << 46; // 0x400000000000
-static const u64 kWindowsShadowOffset32 = 3ULL << 28; // 0x30000000
-
-#define SHADOW_SCALE kDefaultShadowScale
+#define ASAN_SHADOW_SCALE 3
#if SANITIZER_FUCHSIA
-# define SHADOW_OFFSET (0)
+# define ASAN_SHADOW_OFFSET_CONST (0)
#elif SANITIZER_WORDSIZE == 32
# if SANITIZER_ANDROID
-# define SHADOW_OFFSET __asan_shadow_memory_dynamic_address
+# define ASAN_SHADOW_OFFSET_DYNAMIC
# elif defined(__mips__)
-# define SHADOW_OFFSET kMIPS32_ShadowOffset32
+# define ASAN_SHADOW_OFFSET_CONST 0x0aaa0000
# elif SANITIZER_FREEBSD
-# define SHADOW_OFFSET kFreeBSD_ShadowOffset32
+# define ASAN_SHADOW_OFFSET_CONST 0x40000000
# elif SANITIZER_NETBSD
-# define SHADOW_OFFSET kNetBSD_ShadowOffset32
+# define ASAN_SHADOW_OFFSET_CONST 0x40000000
# elif SANITIZER_WINDOWS
-# define SHADOW_OFFSET kWindowsShadowOffset32
+# define ASAN_SHADOW_OFFSET_CONST 0x30000000
# elif SANITIZER_IOS
-# define SHADOW_OFFSET __asan_shadow_memory_dynamic_address
+# define ASAN_SHADOW_OFFSET_DYNAMIC
# else
-# define SHADOW_OFFSET kDefaultShadowOffset32
+# define ASAN_SHADOW_OFFSET_CONST 0x20000000
# endif
#else
# if SANITIZER_IOS
-# define SHADOW_OFFSET __asan_shadow_memory_dynamic_address
-# elif SANITIZER_MAC && defined(__aarch64__)
-# define SHADOW_OFFSET __asan_shadow_memory_dynamic_address
-#elif SANITIZER_RISCV64
-#define SHADOW_OFFSET kRiscv64_ShadowOffset64
+# define ASAN_SHADOW_OFFSET_DYNAMIC
+# elif SANITIZER_APPLE && defined(__aarch64__)
+# define ASAN_SHADOW_OFFSET_DYNAMIC
+# elif SANITIZER_FREEBSD && defined(__aarch64__)
+# define ASAN_SHADOW_OFFSET_CONST 0x0000800000000000
+# elif SANITIZER_RISCV64
+# define ASAN_SHADOW_OFFSET_CONST 0x0000000d55550000
# elif defined(__aarch64__)
-# define SHADOW_OFFSET kAArch64_ShadowOffset64
+# define ASAN_SHADOW_OFFSET_CONST 0x0000001000000000
# elif defined(__powerpc64__)
-# define SHADOW_OFFSET kPPC64_ShadowOffset64
+# define ASAN_SHADOW_OFFSET_CONST 0x0000100000000000
# elif defined(__s390x__)
-# define SHADOW_OFFSET kSystemZ_ShadowOffset64
+# define ASAN_SHADOW_OFFSET_CONST 0x0010000000000000
# elif SANITIZER_FREEBSD
-# define SHADOW_OFFSET kFreeBSD_ShadowOffset64
+# define ASAN_SHADOW_OFFSET_CONST 0x0000400000000000
# elif SANITIZER_NETBSD
-# define SHADOW_OFFSET kNetBSD_ShadowOffset64
-# elif SANITIZER_MAC
-# define SHADOW_OFFSET kDefaultShadowOffset64
+# define ASAN_SHADOW_OFFSET_CONST 0x0000400000000000
+# elif SANITIZER_APPLE
+# define ASAN_SHADOW_OFFSET_CONST 0x0000100000000000
# elif defined(__mips64)
-# define SHADOW_OFFSET kMIPS64_ShadowOffset64
-#elif defined(__sparc__)
-#define SHADOW_OFFSET kSPARC64_ShadowOffset64
+# define ASAN_SHADOW_OFFSET_CONST 0x0000002000000000
+# elif defined(__sparc__)
+# define ASAN_SHADOW_OFFSET_CONST 0x0000080000000000
+# elif SANITIZER_LOONGARCH64
+# define ASAN_SHADOW_OFFSET_CONST 0x0000400000000000
# elif SANITIZER_WINDOWS64
-# define SHADOW_OFFSET __asan_shadow_memory_dynamic_address
+# define ASAN_SHADOW_OFFSET_DYNAMIC
# else
-# define SHADOW_OFFSET kDefaultShort64bitShadowOffset
+# if ASAN_SHADOW_SCALE != 3
+# error "Value below is based on shadow scale = 3."
+# error "Original formula was: 0x7FFFFFFF & (~0xFFFULL << SHADOW_SCALE)."
+# endif
+# define ASAN_SHADOW_OFFSET_CONST 0x000000007fff8000
# endif
#endif
-#if SANITIZER_ANDROID && defined(__arm__)
-# define ASAN_PREMAP_SHADOW 1
-#else
-# define ASAN_PREMAP_SHADOW 0
-#endif
+#if defined(__cplusplus)
+# include "asan_internal.h"
-#define SHADOW_GRANULARITY (1ULL << SHADOW_SCALE)
+static const u64 kDefaultShadowSentinel = ~(uptr)0;
-#define DO_ASAN_MAPPING_PROFILE 0 // Set to 1 to profile the functions below.
+# if defined(ASAN_SHADOW_OFFSET_CONST)
+static const u64 kConstShadowOffset = ASAN_SHADOW_OFFSET_CONST;
+# define ASAN_SHADOW_OFFSET kConstShadowOffset
+# elif defined(ASAN_SHADOW_OFFSET_DYNAMIC)
+# define ASAN_SHADOW_OFFSET __asan_shadow_memory_dynamic_address
+# else
+# error "ASAN_SHADOW_OFFSET can't be determined."
+# endif
-#if DO_ASAN_MAPPING_PROFILE
-# define PROFILE_ASAN_MAPPING() AsanMappingProfile[__LINE__]++;
-#else
-# define PROFILE_ASAN_MAPPING()
-#endif
+# if SANITIZER_ANDROID && defined(__arm__)
+# define ASAN_PREMAP_SHADOW 1
+# else
+# define ASAN_PREMAP_SHADOW 0
+# endif
+
+# define ASAN_SHADOW_GRANULARITY (1ULL << ASAN_SHADOW_SCALE)
+
+# define DO_ASAN_MAPPING_PROFILE 0 // Set to 1 to profile the functions below.
+
+# if DO_ASAN_MAPPING_PROFILE
+# define PROFILE_ASAN_MAPPING() AsanMappingProfile[__LINE__]++;
+# else
+# define PROFILE_ASAN_MAPPING()
+# endif
// If 1, all shadow boundaries are constants.
// Don't set to 1 other than for testing.
-#define ASAN_FIXED_MAPPING 0
+# define ASAN_FIXED_MAPPING 0
namespace __asan {
extern uptr AsanMappingProfile[];
-#if ASAN_FIXED_MAPPING
+# if ASAN_FIXED_MAPPING
// Fixed mapping for 64-bit Linux. Mostly used for performance comparison
// with non-fixed mapping. As of r175253 (Feb 2013) the performance
// difference between fixed and non-fixed mapping is below the noise level.
static uptr kHighMemEnd = 0x7fffffffffffULL;
-static uptr kMidMemBeg = 0x3000000000ULL;
-static uptr kMidMemEnd = 0x4fffffffffULL;
-#else
+static uptr kMidMemBeg = 0x3000000000ULL;
+static uptr kMidMemEnd = 0x4fffffffffULL;
+# else
extern uptr kHighMemEnd, kMidMemBeg, kMidMemEnd; // Initialized in __asan_init.
-#endif
+# endif
} // namespace __asan
-#if defined(__sparc__) && SANITIZER_WORDSIZE == 64
-# include "asan_mapping_sparc64.h"
-#else
-#define MEM_TO_SHADOW(mem) (((mem) >> SHADOW_SCALE) + (SHADOW_OFFSET))
+# if defined(__sparc__) && SANITIZER_WORDSIZE == 64
+# include "asan_mapping_sparc64.h"
+# else
+# define MEM_TO_SHADOW(mem) \
+ (((mem) >> ASAN_SHADOW_SCALE) + (ASAN_SHADOW_OFFSET))
+# define SHADOW_TO_MEM(mem) \
+ (((mem) - (ASAN_SHADOW_OFFSET)) << (ASAN_SHADOW_SCALE))
-#define kLowMemBeg 0
-#define kLowMemEnd (SHADOW_OFFSET ? SHADOW_OFFSET - 1 : 0)
+# define kLowMemBeg 0
+# define kLowMemEnd (ASAN_SHADOW_OFFSET ? ASAN_SHADOW_OFFSET - 1 : 0)
-#define kLowShadowBeg SHADOW_OFFSET
-#define kLowShadowEnd MEM_TO_SHADOW(kLowMemEnd)
+# define kLowShadowBeg ASAN_SHADOW_OFFSET
+# define kLowShadowEnd MEM_TO_SHADOW(kLowMemEnd)
-#define kHighMemBeg (MEM_TO_SHADOW(kHighMemEnd) + 1)
+# define kHighMemBeg (MEM_TO_SHADOW(kHighMemEnd) + 1)
-#define kHighShadowBeg MEM_TO_SHADOW(kHighMemBeg)
-#define kHighShadowEnd MEM_TO_SHADOW(kHighMemEnd)
+# define kHighShadowBeg MEM_TO_SHADOW(kHighMemBeg)
+# define kHighShadowEnd MEM_TO_SHADOW(kHighMemEnd)
-# define kMidShadowBeg MEM_TO_SHADOW(kMidMemBeg)
-# define kMidShadowEnd MEM_TO_SHADOW(kMidMemEnd)
+# define kMidShadowBeg MEM_TO_SHADOW(kMidMemBeg)
+# define kMidShadowEnd MEM_TO_SHADOW(kMidMemEnd)
// With the zero shadow base we can not actually map pages starting from 0.
// This constant is somewhat arbitrary.
-#define kZeroBaseShadowStart 0
-#define kZeroBaseMaxShadowStart (1 << 18)
+# define kZeroBaseShadowStart 0
+# define kZeroBaseMaxShadowStart (1 << 18)
-#define kShadowGapBeg (kLowShadowEnd ? kLowShadowEnd + 1 \
- : kZeroBaseShadowStart)
-#define kShadowGapEnd ((kMidMemBeg ? kMidShadowBeg : kHighShadowBeg) - 1)
+# define kShadowGapBeg \
+ (kLowShadowEnd ? kLowShadowEnd + 1 : kZeroBaseShadowStart)
+# define kShadowGapEnd ((kMidMemBeg ? kMidShadowBeg : kHighShadowBeg) - 1)
-#define kShadowGap2Beg (kMidMemBeg ? kMidShadowEnd + 1 : 0)
-#define kShadowGap2End (kMidMemBeg ? kMidMemBeg - 1 : 0)
+# define kShadowGap2Beg (kMidMemBeg ? kMidShadowEnd + 1 : 0)
+# define kShadowGap2End (kMidMemBeg ? kMidMemBeg - 1 : 0)
-#define kShadowGap3Beg (kMidMemBeg ? kMidMemEnd + 1 : 0)
-#define kShadowGap3End (kMidMemBeg ? kHighShadowBeg - 1 : 0)
+# define kShadowGap3Beg (kMidMemBeg ? kMidMemEnd + 1 : 0)
+# define kShadowGap3End (kMidMemBeg ? kHighShadowBeg - 1 : 0)
namespace __asan {
@@ -331,29 +340,31 @@ static inline bool AddrIsInShadowGap(uptr a) {
PROFILE_ASAN_MAPPING();
if (kMidMemBeg) {
if (a <= kShadowGapEnd)
- return SHADOW_OFFSET == 0 || a >= kShadowGapBeg;
+ return ASAN_SHADOW_OFFSET == 0 || a >= kShadowGapBeg;
return (a >= kShadowGap2Beg && a <= kShadowGap2End) ||
(a >= kShadowGap3Beg && a <= kShadowGap3End);
}
// In zero-based shadow mode we treat addresses near zero as addresses
// in shadow gap as well.
- if (SHADOW_OFFSET == 0)
+ if (ASAN_SHADOW_OFFSET == 0)
return a <= kShadowGapEnd;
return a >= kShadowGapBeg && a <= kShadowGapEnd;
}
} // namespace __asan
-#endif
+# endif
namespace __asan {
-static inline uptr MemToShadowSize(uptr size) { return size >> SHADOW_SCALE; }
+static inline uptr MemToShadowSize(uptr size) {
+ return size >> ASAN_SHADOW_SCALE;
+}
static inline bool AddrIsInMem(uptr a) {
PROFILE_ASAN_MAPPING();
return AddrIsInLowMem(a) || AddrIsInMidMem(a) || AddrIsInHighMem(a) ||
- (flags()->protect_shadow_gap == 0 && AddrIsInShadowGap(a));
+ (flags()->protect_shadow_gap == 0 && AddrIsInShadowGap(a));
}
static inline uptr MemToShadow(uptr p) {
@@ -367,19 +378,25 @@ static inline bool AddrIsInShadow(uptr a) {
return AddrIsInLowShadow(a) || AddrIsInMidShadow(a) || AddrIsInHighShadow(a);
}
+static inline uptr ShadowToMem(uptr p) {
+ PROFILE_ASAN_MAPPING();
+ CHECK(AddrIsInShadow(p));
+ return SHADOW_TO_MEM(p);
+}
+
static inline bool AddrIsAlignedByGranularity(uptr a) {
PROFILE_ASAN_MAPPING();
- return (a & (SHADOW_GRANULARITY - 1)) == 0;
+ return (a & (ASAN_SHADOW_GRANULARITY - 1)) == 0;
}
static inline bool AddressIsPoisoned(uptr a) {
PROFILE_ASAN_MAPPING();
const uptr kAccessSize = 1;
- u8 *shadow_address = (u8*)MEM_TO_SHADOW(a);
+ u8 *shadow_address = (u8 *)MEM_TO_SHADOW(a);
s8 shadow_value = *shadow_address;
if (shadow_value) {
- u8 last_accessed_byte = (a & (SHADOW_GRANULARITY - 1))
- + kAccessSize - 1;
+ u8 last_accessed_byte =
+ (a & (ASAN_SHADOW_GRANULARITY - 1)) + kAccessSize - 1;
return (last_accessed_byte >= shadow_value);
}
return false;
@@ -390,4 +407,6 @@ static const uptr kAsanMappingProfileSize = __LINE__;
} // namespace __asan
+#endif // __cplusplus
+
#endif // ASAN_MAPPING_H
diff --git a/contrib/llvm-project/compiler-rt/lib/asan/asan_mapping_sparc64.h b/contrib/llvm-project/compiler-rt/lib/asan/asan_mapping_sparc64.h
index 432a1816f797..e310c12fe301 100644
--- a/contrib/llvm-project/compiler-rt/lib/asan/asan_mapping_sparc64.h
+++ b/contrib/llvm-project/compiler-rt/lib/asan/asan_mapping_sparc64.h
@@ -25,13 +25,15 @@
// The idea is to chop the high bits before doing the scaling, so the two
// parts become contiguous again and the usual scheme can be applied.
-#define MEM_TO_SHADOW(mem) \
- ((((mem) << HIGH_BITS) >> (HIGH_BITS + (SHADOW_SCALE))) + (SHADOW_OFFSET))
+#define MEM_TO_SHADOW(mem) \
+ ((((mem) << HIGH_BITS) >> (HIGH_BITS + (ASAN_SHADOW_SCALE))) + \
+ (ASAN_SHADOW_OFFSET))
+#define SHADOW_TO_MEM(ptr) (__asan::ShadowToMemSparc64(ptr))
#define kLowMemBeg 0
-#define kLowMemEnd (SHADOW_OFFSET - 1)
+#define kLowMemEnd (ASAN_SHADOW_OFFSET - 1)
-#define kLowShadowBeg SHADOW_OFFSET
+#define kLowShadowBeg ASAN_SHADOW_OFFSET
#define kLowShadowEnd MEM_TO_SHADOW(kLowMemEnd)
// But of course there is the huge hole between the high shadow memory,
@@ -96,6 +98,24 @@ static inline bool AddrIsInShadowGap(uptr a) {
return a >= kShadowGapBeg && a <= kShadowGapEnd;
}
+static inline constexpr uptr ShadowToMemSparc64(uptr p) {
+ PROFILE_ASAN_MAPPING();
+ p -= ASAN_SHADOW_OFFSET;
+ p <<= ASAN_SHADOW_SCALE;
+ if (p >= 0x8000000000000) {
+ p |= (~0ULL) << VMA_BITS;
+ }
+ return p;
+}
+
+static_assert(ShadowToMemSparc64(MEM_TO_SHADOW(0x0000000000000000)) ==
+ 0x0000000000000000);
+static_assert(ShadowToMemSparc64(MEM_TO_SHADOW(0xfff8000000000000)) ==
+ 0xfff8000000000000);
+// Gets aligned down.
+static_assert(ShadowToMemSparc64(MEM_TO_SHADOW(0x0007ffffffffffff)) ==
+ 0x0007fffffffffff8);
+
} // namespace __asan
#endif // ASAN_MAPPING_SPARC64_H
diff --git a/contrib/llvm-project/compiler-rt/lib/asan/asan_memory_profile.cpp b/contrib/llvm-project/compiler-rt/lib/asan/asan_memory_profile.cpp
index 4fcd5600ed1a..3396fc2bab94 100644
--- a/contrib/llvm-project/compiler-rt/lib/asan/asan_memory_profile.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/asan/asan_memory_profile.cpp
@@ -11,12 +11,11 @@
// This file implements __sanitizer_print_memory_profile.
//===----------------------------------------------------------------------===//
+#include "asan/asan_allocator.h"
+#include "lsan/lsan_common.h"
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_stackdepot.h"
#include "sanitizer_common/sanitizer_stacktrace.h"
-#include "sanitizer_common/sanitizer_stoptheworld.h"
-#include "lsan/lsan_common.h"
-#include "asan/asan_allocator.h"
#if CAN_SANITIZE_LEAKS
@@ -100,17 +99,16 @@ static void ChunkCallback(uptr chunk, void *arg) {
FindHeapChunkByAllocBeg(chunk));
}
-static void MemoryProfileCB(const SuspendedThreadsList &suspended_threads_list,
- void *argument) {
+static void MemoryProfileCB(uptr top_percent, uptr max_number_of_contexts) {
HeapProfile hp;
+ __lsan::LockAllocator();
__lsan::ForEachChunk(ChunkCallback, &hp);
- uptr *Arg = reinterpret_cast<uptr*>(argument);
- hp.Print(Arg[0], Arg[1]);
+ __lsan::UnlockAllocator();
+ hp.Print(top_percent, max_number_of_contexts);
if (Verbosity())
__asan_print_accumulated_stats();
}
-
} // namespace __asan
#endif // CAN_SANITIZE_LEAKS
@@ -120,10 +118,7 @@ SANITIZER_INTERFACE_ATTRIBUTE
void __sanitizer_print_memory_profile(uptr top_percent,
uptr max_number_of_contexts) {
#if CAN_SANITIZE_LEAKS
- uptr Arg[2];
- Arg[0] = top_percent;
- Arg[1] = max_number_of_contexts;
- __sanitizer::StopTheWorld(__asan::MemoryProfileCB, Arg);
+ __asan::MemoryProfileCB(top_percent, max_number_of_contexts);
#endif // CAN_SANITIZE_LEAKS
}
} // extern "C"
diff --git a/contrib/llvm-project/compiler-rt/lib/asan/asan_new_delete.cpp b/contrib/llvm-project/compiler-rt/lib/asan/asan_new_delete.cpp
index da446072de18..b5b1ced8ac5e 100644
--- a/contrib/llvm-project/compiler-rt/lib/asan/asan_new_delete.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/asan/asan_new_delete.cpp
@@ -48,15 +48,6 @@ COMMENT_EXPORT("??_V@YAXPAX@Z") // operator delete[]
using namespace __asan;
-// FreeBSD prior v9.2 have wrong definition of 'size_t'.
-// http://svnweb.freebsd.org/base?view=revision&revision=232261
-#if SANITIZER_FREEBSD && SANITIZER_WORDSIZE == 32
-#include <sys/param.h>
-#if __FreeBSD_version <= 902001 // v9.2
-#define size_t unsigned
-#endif // __FreeBSD_version
-#endif // SANITIZER_FREEBSD && SANITIZER_WORDSIZE == 32
-
// This code has issues on OSX.
// See https://github.com/google/sanitizers/issues/131.
@@ -89,7 +80,7 @@ enum class align_val_t: size_t {};
// delete.
// To make sure that C++ allocation/deallocation operators are overridden on
// OS X we need to intercept them using their mangled names.
-#if !SANITIZER_MAC
+#if !SANITIZER_APPLE
CXX_OPERATOR_ATTRIBUTE
void *operator new(size_t size)
{ OPERATOR_NEW_BODY(FROM_NEW, false /*nothrow*/); }
@@ -115,7 +106,7 @@ CXX_OPERATOR_ATTRIBUTE
void *operator new[](size_t size, std::align_val_t align, std::nothrow_t const&)
{ OPERATOR_NEW_BODY_ALIGN(FROM_NEW_BR, true /*nothrow*/); }
-#else // SANITIZER_MAC
+#else // SANITIZER_APPLE
INTERCEPTOR(void *, _Znwm, size_t size) {
OPERATOR_NEW_BODY(FROM_NEW, false /*nothrow*/);
}
@@ -128,7 +119,7 @@ INTERCEPTOR(void *, _ZnwmRKSt9nothrow_t, size_t size, std::nothrow_t const&) {
INTERCEPTOR(void *, _ZnamRKSt9nothrow_t, size_t size, std::nothrow_t const&) {
OPERATOR_NEW_BODY(FROM_NEW_BR, true /*nothrow*/);
}
-#endif // !SANITIZER_MAC
+#endif // !SANITIZER_APPLE
#define OPERATOR_DELETE_BODY(type) \
GET_STACK_TRACE_FREE; \
@@ -146,7 +137,7 @@ INTERCEPTOR(void *, _ZnamRKSt9nothrow_t, size_t size, std::nothrow_t const&) {
GET_STACK_TRACE_FREE; \
asan_delete(ptr, size, static_cast<uptr>(align), &stack, type);
-#if !SANITIZER_MAC
+#if !SANITIZER_APPLE
CXX_OPERATOR_ATTRIBUTE
void operator delete(void *ptr) NOEXCEPT
{ OPERATOR_DELETE_BODY(FROM_NEW); }
@@ -184,7 +175,7 @@ CXX_OPERATOR_ATTRIBUTE
void operator delete[](void *ptr, size_t size, std::align_val_t align) NOEXCEPT
{ OPERATOR_DELETE_BODY_SIZE_ALIGN(FROM_NEW_BR); }
-#else // SANITIZER_MAC
+#else // SANITIZER_APPLE
INTERCEPTOR(void, _ZdlPv, void *ptr)
{ OPERATOR_DELETE_BODY(FROM_NEW); }
INTERCEPTOR(void, _ZdaPv, void *ptr)
@@ -193,4 +184,4 @@ INTERCEPTOR(void, _ZdlPvRKSt9nothrow_t, void *ptr, std::nothrow_t const&)
{ OPERATOR_DELETE_BODY(FROM_NEW); }
INTERCEPTOR(void, _ZdaPvRKSt9nothrow_t, void *ptr, std::nothrow_t const&)
{ OPERATOR_DELETE_BODY(FROM_NEW_BR); }
-#endif // !SANITIZER_MAC
+#endif // !SANITIZER_APPLE
diff --git a/contrib/llvm-project/compiler-rt/lib/asan/asan_poisoning.cpp b/contrib/llvm-project/compiler-rt/lib/asan/asan_poisoning.cpp
index 5f215fe0f9bb..746ad61813c6 100644
--- a/contrib/llvm-project/compiler-rt/lib/asan/asan_poisoning.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/asan/asan_poisoning.cpp
@@ -12,11 +12,13 @@
//===----------------------------------------------------------------------===//
#include "asan_poisoning.h"
+
#include "asan_report.h"
#include "asan_stack.h"
#include "sanitizer_common/sanitizer_atomic.h"
-#include "sanitizer_common/sanitizer_libc.h"
#include "sanitizer_common/sanitizer_flags.h"
+#include "sanitizer_common/sanitizer_interface_internal.h"
+#include "sanitizer_common/sanitizer_libc.h"
namespace __asan {
@@ -35,7 +37,7 @@ void PoisonShadow(uptr addr, uptr size, u8 value) {
CHECK(AddrIsAlignedByGranularity(addr));
CHECK(AddrIsInMem(addr));
CHECK(AddrIsAlignedByGranularity(addr + size));
- CHECK(AddrIsInMem(addr + size - SHADOW_GRANULARITY));
+ CHECK(AddrIsInMem(addr + size - ASAN_SHADOW_GRANULARITY));
CHECK(REAL(memset));
FastPoisonShadow(addr, size, value);
}
@@ -52,12 +54,12 @@ void PoisonShadowPartialRightRedzone(uptr addr,
struct ShadowSegmentEndpoint {
u8 *chunk;
- s8 offset; // in [0, SHADOW_GRANULARITY)
+ s8 offset; // in [0, ASAN_SHADOW_GRANULARITY)
s8 value; // = *chunk;
explicit ShadowSegmentEndpoint(uptr address) {
chunk = (u8*)MemToShadow(address);
- offset = address & (SHADOW_GRANULARITY - 1);
+ offset = address & (ASAN_SHADOW_GRANULARITY - 1);
value = *chunk;
}
};
@@ -66,20 +68,20 @@ void AsanPoisonOrUnpoisonIntraObjectRedzone(uptr ptr, uptr size, bool poison) {
uptr end = ptr + size;
if (Verbosity()) {
Printf("__asan_%spoison_intra_object_redzone [%p,%p) %zd\n",
- poison ? "" : "un", ptr, end, size);
+ poison ? "" : "un", (void *)ptr, (void *)end, size);
if (Verbosity() >= 2)
PRINT_CURRENT_STACK();
}
CHECK(size);
CHECK_LE(size, 4096);
- CHECK(IsAligned(end, SHADOW_GRANULARITY));
- if (!IsAligned(ptr, SHADOW_GRANULARITY)) {
+ CHECK(IsAligned(end, ASAN_SHADOW_GRANULARITY));
+ if (!IsAligned(ptr, ASAN_SHADOW_GRANULARITY)) {
*(u8 *)MemToShadow(ptr) =
- poison ? static_cast<u8>(ptr % SHADOW_GRANULARITY) : 0;
- ptr |= SHADOW_GRANULARITY - 1;
+ poison ? static_cast<u8>(ptr % ASAN_SHADOW_GRANULARITY) : 0;
+ ptr |= ASAN_SHADOW_GRANULARITY - 1;
ptr++;
}
- for (; ptr < end; ptr += SHADOW_GRANULARITY)
+ for (; ptr < end; ptr += ASAN_SHADOW_GRANULARITY)
*(u8*)MemToShadow(ptr) = poison ? kAsanIntraObjectRedzone : 0;
}
@@ -158,10 +160,6 @@ void __asan_unpoison_memory_region(void const volatile *addr, uptr size) {
return;
}
CHECK_LT(beg.chunk, end.chunk);
- if (beg.offset > 0) {
- *beg.chunk = 0;
- beg.chunk++;
- }
REAL(memset)(beg.chunk, 0, end.chunk - beg.chunk);
if (end.offset > 0 && end.value != 0) {
*end.chunk = Max(end.value, end.offset);
@@ -181,12 +179,12 @@ uptr __asan_region_is_poisoned(uptr beg, uptr size) {
if (!AddrIsInMem(end))
return end;
CHECK_LT(beg, end);
- uptr aligned_b = RoundUpTo(beg, SHADOW_GRANULARITY);
- uptr aligned_e = RoundDownTo(end, SHADOW_GRANULARITY);
+ uptr aligned_b = RoundUpTo(beg, ASAN_SHADOW_GRANULARITY);
+ uptr aligned_e = RoundDownTo(end, ASAN_SHADOW_GRANULARITY);
uptr shadow_beg = MemToShadow(aligned_b);
uptr shadow_end = MemToShadow(aligned_e);
// First check the first and the last application bytes,
- // then check the SHADOW_GRANULARITY-aligned region by calling
+ // then check the ASAN_SHADOW_GRANULARITY-aligned region by calling
// mem_is_zero on the corresponding shadow.
if (!__asan::AddressIsPoisoned(beg) && !__asan::AddressIsPoisoned(end - 1) &&
(shadow_end <= shadow_beg ||
@@ -285,7 +283,7 @@ uptr __asan_load_cxx_array_cookie(uptr *p) {
// assumes that left border of region to be poisoned is properly aligned.
static void PoisonAlignedStackMemory(uptr addr, uptr size, bool do_poison) {
if (size == 0) return;
- uptr aligned_size = size & ~(SHADOW_GRANULARITY - 1);
+ uptr aligned_size = size & ~(ASAN_SHADOW_GRANULARITY - 1);
PoisonShadow(addr, aligned_size,
do_poison ? kAsanStackUseAfterScopeMagic : 0);
if (size == aligned_size)
@@ -310,6 +308,34 @@ void __asan_set_shadow_00(uptr addr, uptr size) {
REAL(memset)((void *)addr, 0, size);
}
+void __asan_set_shadow_01(uptr addr, uptr size) {
+ REAL(memset)((void *)addr, 0x01, size);
+}
+
+void __asan_set_shadow_02(uptr addr, uptr size) {
+ REAL(memset)((void *)addr, 0x02, size);
+}
+
+void __asan_set_shadow_03(uptr addr, uptr size) {
+ REAL(memset)((void *)addr, 0x03, size);
+}
+
+void __asan_set_shadow_04(uptr addr, uptr size) {
+ REAL(memset)((void *)addr, 0x04, size);
+}
+
+void __asan_set_shadow_05(uptr addr, uptr size) {
+ REAL(memset)((void *)addr, 0x05, size);
+}
+
+void __asan_set_shadow_06(uptr addr, uptr size) {
+ REAL(memset)((void *)addr, 0x06, size);
+}
+
+void __asan_set_shadow_07(uptr addr, uptr size) {
+ REAL(memset)((void *)addr, 0x07, size);
+}
+
void __asan_set_shadow_f1(uptr addr, uptr size) {
REAL(memset)((void *)addr, 0xf1, size);
}
@@ -340,30 +366,77 @@ void __asan_unpoison_stack_memory(uptr addr, uptr size) {
PoisonAlignedStackMemory(addr, size, false);
}
+static void FixUnalignedStorage(uptr storage_beg, uptr storage_end,
+ uptr &old_beg, uptr &old_end, uptr &new_beg,
+ uptr &new_end) {
+ constexpr uptr granularity = ASAN_SHADOW_GRANULARITY;
+ if (UNLIKELY(!AddrIsAlignedByGranularity(storage_end))) {
+ uptr end_down = RoundDownTo(storage_end, granularity);
+ // Ignore the last unaligned granule if the storage is followed by
+ // unpoisoned byte, because we can't poison the prefix anyway. Don't call
+ // AddressIsPoisoned at all if container changes does not affect the last
+ // granule at all.
+ if ((((old_end != new_end) && Max(old_end, new_end) > end_down) ||
+ ((old_beg != new_beg) && Max(old_beg, new_beg) > end_down)) &&
+ !AddressIsPoisoned(storage_end)) {
+ old_beg = Min(end_down, old_beg);
+ old_end = Min(end_down, old_end);
+ new_beg = Min(end_down, new_beg);
+ new_end = Min(end_down, new_end);
+ }
+ }
+
+ // Handle misaligned begin and cut it off.
+ if (UNLIKELY(!AddrIsAlignedByGranularity(storage_beg))) {
+ uptr beg_up = RoundUpTo(storage_beg, granularity);
+ // The first unaligned granule needs special handling only if we had bytes
+ // there before and will have none after.
+ if ((new_beg == new_end || new_beg >= beg_up) && old_beg != old_end &&
+ old_beg < beg_up) {
+ // Keep granule prefix outside of the storage unpoisoned.
+ uptr beg_down = RoundDownTo(storage_beg, granularity);
+ *(u8 *)MemToShadow(beg_down) = storage_beg - beg_down;
+ old_beg = Max(beg_up, old_beg);
+ old_end = Max(beg_up, old_end);
+ new_beg = Max(beg_up, new_beg);
+ new_end = Max(beg_up, new_end);
+ }
+ }
+}
+
void __sanitizer_annotate_contiguous_container(const void *beg_p,
const void *end_p,
const void *old_mid_p,
const void *new_mid_p) {
- if (!flags()->detect_container_overflow) return;
+ if (!flags()->detect_container_overflow)
+ return;
VPrintf(2, "contiguous_container: %p %p %p %p\n", beg_p, end_p, old_mid_p,
new_mid_p);
- uptr beg = reinterpret_cast<uptr>(beg_p);
- uptr end = reinterpret_cast<uptr>(end_p);
- uptr old_mid = reinterpret_cast<uptr>(old_mid_p);
- uptr new_mid = reinterpret_cast<uptr>(new_mid_p);
- uptr granularity = SHADOW_GRANULARITY;
- if (!(beg <= old_mid && beg <= new_mid && old_mid <= end && new_mid <= end &&
- IsAligned(beg, granularity))) {
+ uptr storage_beg = reinterpret_cast<uptr>(beg_p);
+ uptr storage_end = reinterpret_cast<uptr>(end_p);
+ uptr old_end = reinterpret_cast<uptr>(old_mid_p);
+ uptr new_end = reinterpret_cast<uptr>(new_mid_p);
+ uptr old_beg = storage_beg;
+ uptr new_beg = storage_beg;
+ uptr granularity = ASAN_SHADOW_GRANULARITY;
+ if (!(storage_beg <= old_end && storage_beg <= new_end &&
+ old_end <= storage_end && new_end <= storage_end)) {
GET_STACK_TRACE_FATAL_HERE;
- ReportBadParamsToAnnotateContiguousContainer(beg, end, old_mid, new_mid,
- &stack);
+ ReportBadParamsToAnnotateContiguousContainer(storage_beg, storage_end,
+ old_end, new_end, &stack);
}
- CHECK_LE(end - beg,
- FIRST_32_SECOND_64(1UL << 30, 1ULL << 40)); // Sanity check.
+ CHECK_LE(storage_end - storage_beg,
+ FIRST_32_SECOND_64(1UL << 30, 1ULL << 40)); // Sanity check.
+
+ if (old_end == new_end)
+ return; // Nothing to do here.
- uptr a = RoundDownTo(Min(old_mid, new_mid), granularity);
- uptr c = RoundUpTo(Max(old_mid, new_mid), granularity);
- uptr d1 = RoundDownTo(old_mid, granularity);
+ FixUnalignedStorage(storage_beg, storage_end, old_beg, old_end, new_beg,
+ new_end);
+
+ uptr a = RoundDownTo(Min(old_end, new_end), granularity);
+ uptr c = RoundUpTo(Max(old_end, new_end), granularity);
+ uptr d1 = RoundDownTo(old_end, granularity);
// uptr d2 = RoundUpTo(old_mid, granularity);
// Currently we should be in this state:
// [a, d1) is good, [d2, c) is bad, [d1, d2) is partially good.
@@ -372,56 +445,176 @@ void __sanitizer_annotate_contiguous_container(const void *beg_p,
// FIXME: Two of these three checks are disabled until we fix
// https://github.com/google/sanitizers/issues/258.
// if (d1 != d2)
- // CHECK_EQ(*(u8*)MemToShadow(d1), old_mid - d1);
- if (a + granularity <= d1)
- CHECK_EQ(*(u8*)MemToShadow(a), 0);
+ // DCHECK_EQ(*(u8*)MemToShadow(d1), old_mid - d1);
+ //
+ // NOTE: curly brackets for the "if" below to silence a MSVC warning.
+ if (a + granularity <= d1) {
+ DCHECK_EQ(*(u8 *)MemToShadow(a), 0);
+ }
// if (d2 + granularity <= c && c <= end)
- // CHECK_EQ(*(u8 *)MemToShadow(c - granularity),
+ // DCHECK_EQ(*(u8 *)MemToShadow(c - granularity),
// kAsanContiguousContainerOOBMagic);
- uptr b1 = RoundDownTo(new_mid, granularity);
- uptr b2 = RoundUpTo(new_mid, granularity);
+ uptr b1 = RoundDownTo(new_end, granularity);
+ uptr b2 = RoundUpTo(new_end, granularity);
// New state:
// [a, b1) is good, [b2, c) is bad, [b1, b2) is partially good.
- PoisonShadow(a, b1 - a, 0);
- PoisonShadow(b2, c - b2, kAsanContiguousContainerOOBMagic);
+ if (b1 > a)
+ PoisonShadow(a, b1 - a, 0);
+ else if (c > b2)
+ PoisonShadow(b2, c - b2, kAsanContiguousContainerOOBMagic);
if (b1 != b2) {
CHECK_EQ(b2 - b1, granularity);
- *(u8*)MemToShadow(b1) = static_cast<u8>(new_mid - b1);
+ *(u8 *)MemToShadow(b1) = static_cast<u8>(new_end - b1);
}
}
+// Annotates a double ended contiguous memory area like std::deque's chunk.
+// It allows detecting buggy accesses to allocated but not used begining
+// or end items of such a container.
+void __sanitizer_annotate_double_ended_contiguous_container(
+ const void *storage_beg_p, const void *storage_end_p,
+ const void *old_container_beg_p, const void *old_container_end_p,
+ const void *new_container_beg_p, const void *new_container_end_p) {
+ if (!flags()->detect_container_overflow)
+ return;
+
+ VPrintf(2, "contiguous_container: %p %p %p %p %p %p\n", storage_beg_p,
+ storage_end_p, old_container_beg_p, old_container_end_p,
+ new_container_beg_p, new_container_end_p);
+
+ uptr storage_beg = reinterpret_cast<uptr>(storage_beg_p);
+ uptr storage_end = reinterpret_cast<uptr>(storage_end_p);
+ uptr old_beg = reinterpret_cast<uptr>(old_container_beg_p);
+ uptr old_end = reinterpret_cast<uptr>(old_container_end_p);
+ uptr new_beg = reinterpret_cast<uptr>(new_container_beg_p);
+ uptr new_end = reinterpret_cast<uptr>(new_container_end_p);
+
+ constexpr uptr granularity = ASAN_SHADOW_GRANULARITY;
+
+ if (!(old_beg <= old_end && new_beg <= new_end) ||
+ !(storage_beg <= new_beg && new_end <= storage_end) ||
+ !(storage_beg <= old_beg && old_end <= storage_end)) {
+ GET_STACK_TRACE_FATAL_HERE;
+ ReportBadParamsToAnnotateDoubleEndedContiguousContainer(
+ storage_beg, storage_end, old_beg, old_end, new_beg, new_end, &stack);
+ }
+ CHECK_LE(storage_end - storage_beg,
+ FIRST_32_SECOND_64(1UL << 30, 1ULL << 40)); // Sanity check.
+
+ if ((old_beg == old_end && new_beg == new_end) ||
+ (old_beg == new_beg && old_end == new_end))
+ return; // Nothing to do here.
+
+ FixUnalignedStorage(storage_beg, storage_end, old_beg, old_end, new_beg,
+ new_end);
+
+ // Handle non-intersecting new/old containers separately have simpler
+ // intersecting case.
+ if (old_beg == old_end || new_beg == new_end || new_end <= old_beg ||
+ old_end <= new_beg) {
+ if (old_beg != old_end) {
+ // Poisoning the old container.
+ uptr a = RoundDownTo(old_beg, granularity);
+ uptr b = RoundUpTo(old_end, granularity);
+ PoisonShadow(a, b - a, kAsanContiguousContainerOOBMagic);
+ }
+
+ if (new_beg != new_end) {
+ // Unpoisoning the new container.
+ uptr a = RoundDownTo(new_beg, granularity);
+ uptr b = RoundDownTo(new_end, granularity);
+ PoisonShadow(a, b - a, 0);
+ if (!AddrIsAlignedByGranularity(new_end))
+ *(u8 *)MemToShadow(b) = static_cast<u8>(new_end - b);
+ }
+
+ return;
+ }
+
+ // Intersection of old and new containers is not empty.
+ CHECK_LT(new_beg, old_end);
+ CHECK_GT(new_end, old_beg);
+
+ if (new_beg < old_beg) {
+ // Round down because we can't poison prefixes.
+ uptr a = RoundDownTo(new_beg, granularity);
+ // Round down and ignore the [c, old_beg) as its state defined by unchanged
+ // [old_beg, old_end).
+ uptr c = RoundDownTo(old_beg, granularity);
+ PoisonShadow(a, c - a, 0);
+ } else if (new_beg > old_beg) {
+ // Round down and poison [a, old_beg) because it was unpoisoned only as a
+ // prefix.
+ uptr a = RoundDownTo(old_beg, granularity);
+ // Round down and ignore the [c, new_beg) as its state defined by unchanged
+ // [new_beg, old_end).
+ uptr c = RoundDownTo(new_beg, granularity);
+
+ PoisonShadow(a, c - a, kAsanContiguousContainerOOBMagic);
+ }
+
+ if (new_end > old_end) {
+ // Round down to poison the prefix.
+ uptr a = RoundDownTo(old_end, granularity);
+ // Round down and handle remainder below.
+ uptr c = RoundDownTo(new_end, granularity);
+ PoisonShadow(a, c - a, 0);
+ if (!AddrIsAlignedByGranularity(new_end))
+ *(u8 *)MemToShadow(c) = static_cast<u8>(new_end - c);
+ } else if (new_end < old_end) {
+ // Round up and handle remained below.
+ uptr a2 = RoundUpTo(new_end, granularity);
+ // Round up to poison entire granule as we had nothing in [old_end, c2).
+ uptr c2 = RoundUpTo(old_end, granularity);
+ PoisonShadow(a2, c2 - a2, kAsanContiguousContainerOOBMagic);
+
+ if (!AddrIsAlignedByGranularity(new_end)) {
+ uptr a = RoundDownTo(new_end, granularity);
+ *(u8 *)MemToShadow(a) = static_cast<u8>(new_end - a);
+ }
+ }
+}
+
+static const void *FindBadAddress(uptr begin, uptr end, bool poisoned) {
+ CHECK_LE(begin, end);
+ constexpr uptr kMaxRangeToCheck = 32;
+ if (end - begin > kMaxRangeToCheck * 2) {
+ if (auto *bad = FindBadAddress(begin, begin + kMaxRangeToCheck, poisoned))
+ return bad;
+ if (auto *bad = FindBadAddress(end - kMaxRangeToCheck, end, poisoned))
+ return bad;
+ }
+
+ for (uptr i = begin; i < end; ++i)
+ if (AddressIsPoisoned(i) != poisoned)
+ return reinterpret_cast<const void *>(i);
+ return nullptr;
+}
+
const void *__sanitizer_contiguous_container_find_bad_address(
const void *beg_p, const void *mid_p, const void *end_p) {
if (!flags()->detect_container_overflow)
return nullptr;
+ uptr granularity = ASAN_SHADOW_GRANULARITY;
uptr beg = reinterpret_cast<uptr>(beg_p);
uptr end = reinterpret_cast<uptr>(end_p);
uptr mid = reinterpret_cast<uptr>(mid_p);
CHECK_LE(beg, mid);
CHECK_LE(mid, end);
- // Check some bytes starting from beg, some bytes around mid, and some bytes
- // ending with end.
- uptr kMaxRangeToCheck = 32;
- uptr r1_beg = beg;
- uptr r1_end = Min(beg + kMaxRangeToCheck, mid);
- uptr r2_beg = Max(beg, mid - kMaxRangeToCheck);
- uptr r2_end = Min(end, mid + kMaxRangeToCheck);
- uptr r3_beg = Max(end - kMaxRangeToCheck, mid);
- uptr r3_end = end;
- for (uptr i = r1_beg; i < r1_end; i++)
- if (AddressIsPoisoned(i))
- return reinterpret_cast<const void *>(i);
- for (uptr i = r2_beg; i < mid; i++)
- if (AddressIsPoisoned(i))
- return reinterpret_cast<const void *>(i);
- for (uptr i = mid; i < r2_end; i++)
- if (!AddressIsPoisoned(i))
- return reinterpret_cast<const void *>(i);
- for (uptr i = r3_beg; i < r3_end; i++)
- if (!AddressIsPoisoned(i))
- return reinterpret_cast<const void *>(i);
- return nullptr;
+ // If the byte after the storage is unpoisoned, everything in the granule
+ // before must stay unpoisoned.
+ uptr annotations_end =
+ (!AddrIsAlignedByGranularity(end) && !AddressIsPoisoned(end))
+ ? RoundDownTo(end, granularity)
+ : end;
+ beg = Min(beg, annotations_end);
+ mid = Min(mid, annotations_end);
+ if (auto *bad = FindBadAddress(beg, mid, false))
+ return bad;
+ if (auto *bad = FindBadAddress(mid, annotations_end, true))
+ return bad;
+ return FindBadAddress(annotations_end, end, false);
}
int __sanitizer_verify_contiguous_container(const void *beg_p,
@@ -431,6 +624,48 @@ int __sanitizer_verify_contiguous_container(const void *beg_p,
end_p) == nullptr;
}
+const void *__sanitizer_double_ended_contiguous_container_find_bad_address(
+ const void *storage_beg_p, const void *container_beg_p,
+ const void *container_end_p, const void *storage_end_p) {
+ if (!flags()->detect_container_overflow)
+ return nullptr;
+ uptr granularity = ASAN_SHADOW_GRANULARITY;
+ uptr storage_beg = reinterpret_cast<uptr>(storage_beg_p);
+ uptr storage_end = reinterpret_cast<uptr>(storage_end_p);
+ uptr beg = reinterpret_cast<uptr>(container_beg_p);
+ uptr end = reinterpret_cast<uptr>(container_end_p);
+
+ // The prefix of the firs granule of the container is unpoisoned.
+ if (beg != end)
+ beg = Max(storage_beg, RoundDownTo(beg, granularity));
+
+ // If the byte after the storage is unpoisoned, the prefix of the last granule
+ // is unpoisoned.
+ uptr annotations_end = (!AddrIsAlignedByGranularity(storage_end) &&
+ !AddressIsPoisoned(storage_end))
+ ? RoundDownTo(storage_end, granularity)
+ : storage_end;
+ storage_beg = Min(storage_beg, annotations_end);
+ beg = Min(beg, annotations_end);
+ end = Min(end, annotations_end);
+
+ if (auto *bad = FindBadAddress(storage_beg, beg, true))
+ return bad;
+ if (auto *bad = FindBadAddress(beg, end, false))
+ return bad;
+ if (auto *bad = FindBadAddress(end, annotations_end, true))
+ return bad;
+ return FindBadAddress(annotations_end, storage_end, false);
+}
+
+int __sanitizer_verify_double_ended_contiguous_container(
+ const void *storage_beg_p, const void *container_beg_p,
+ const void *container_end_p, const void *storage_end_p) {
+ return __sanitizer_double_ended_contiguous_container_find_bad_address(
+ storage_beg_p, container_beg_p, container_end_p, storage_end_p) ==
+ nullptr;
+}
+
extern "C" SANITIZER_INTERFACE_ATTRIBUTE
void __asan_poison_intra_object_redzone(uptr ptr, uptr size) {
AsanPoisonOrUnpoisonIntraObjectRedzone(ptr, size, true);
diff --git a/contrib/llvm-project/compiler-rt/lib/asan/asan_poisoning.h b/contrib/llvm-project/compiler-rt/lib/asan/asan_poisoning.h
index 3d536f2d3097..600bd011f304 100644
--- a/contrib/llvm-project/compiler-rt/lib/asan/asan_poisoning.h
+++ b/contrib/llvm-project/compiler-rt/lib/asan/asan_poisoning.h
@@ -44,8 +44,8 @@ ALWAYS_INLINE void FastPoisonShadow(uptr aligned_beg, uptr aligned_size,
common_flags()->clear_shadow_mmap_threshold);
#else
uptr shadow_beg = MEM_TO_SHADOW(aligned_beg);
- uptr shadow_end = MEM_TO_SHADOW(
- aligned_beg + aligned_size - SHADOW_GRANULARITY) + 1;
+ uptr shadow_end =
+ MEM_TO_SHADOW(aligned_beg + aligned_size - ASAN_SHADOW_GRANULARITY) + 1;
// FIXME: Page states are different on Windows, so using the same interface
// for mapping shadow and zeroing out pages doesn't "just work", so we should
// probably provide higher-level interface for these operations.
@@ -78,11 +78,12 @@ ALWAYS_INLINE void FastPoisonShadowPartialRightRedzone(
DCHECK(CanPoisonMemory());
bool poison_partial = flags()->poison_partial;
u8 *shadow = (u8*)MEM_TO_SHADOW(aligned_addr);
- for (uptr i = 0; i < redzone_size; i += SHADOW_GRANULARITY, shadow++) {
- if (i + SHADOW_GRANULARITY <= size) {
+ for (uptr i = 0; i < redzone_size; i += ASAN_SHADOW_GRANULARITY, shadow++) {
+ if (i + ASAN_SHADOW_GRANULARITY <= size) {
*shadow = 0; // fully addressable
} else if (i >= size) {
- *shadow = (SHADOW_GRANULARITY == 128) ? 0xff : value; // unaddressable
+ *shadow =
+ (ASAN_SHADOW_GRANULARITY == 128) ? 0xff : value; // unaddressable
} else {
// first size-i bytes are addressable
*shadow = poison_partial ? static_cast<u8>(size - i) : 0;
diff --git a/contrib/llvm-project/compiler-rt/lib/asan/asan_posix.cpp b/contrib/llvm-project/compiler-rt/lib/asan/asan_posix.cpp
index 63ad735f8bba..76564538bd5d 100644
--- a/contrib/llvm-project/compiler-rt/lib/asan/asan_posix.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/asan/asan_posix.cpp
@@ -14,22 +14,23 @@
#include "sanitizer_common/sanitizer_platform.h"
#if SANITIZER_POSIX
-#include "asan_internal.h"
-#include "asan_interceptors.h"
-#include "asan_mapping.h"
-#include "asan_poisoning.h"
-#include "asan_report.h"
-#include "asan_stack.h"
-#include "sanitizer_common/sanitizer_libc.h"
-#include "sanitizer_common/sanitizer_posix.h"
-#include "sanitizer_common/sanitizer_procmaps.h"
-
-#include <pthread.h>
-#include <signal.h>
-#include <stdlib.h>
-#include <sys/time.h>
-#include <sys/resource.h>
-#include <unistd.h>
+# include <pthread.h>
+# include <signal.h>
+# include <stdlib.h>
+# include <sys/resource.h>
+# include <sys/time.h>
+# include <unistd.h>
+
+# include "asan_interceptors.h"
+# include "asan_internal.h"
+# include "asan_mapping.h"
+# include "asan_poisoning.h"
+# include "asan_report.h"
+# include "asan_stack.h"
+# include "lsan/lsan_common.h"
+# include "sanitizer_common/sanitizer_libc.h"
+# include "sanitizer_common/sanitizer_posix.h"
+# include "sanitizer_common/sanitizer_procmaps.h"
namespace __asan {
@@ -131,15 +132,64 @@ void AsanTSDSet(void *tsd) {
}
void PlatformTSDDtor(void *tsd) {
- AsanThreadContext *context = (AsanThreadContext*)tsd;
+ AsanThreadContext *context = (AsanThreadContext *)tsd;
if (context->destructor_iterations > 1) {
context->destructor_iterations--;
CHECK_EQ(0, pthread_setspecific(tsd_key, tsd));
return;
}
+# if SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD || \
+ SANITIZER_SOLARIS
+ // After this point it's unsafe to execute signal handlers which may be
+ // instrumented. It's probably not just a Linux issue.
+ BlockSignals();
+# endif
AsanThread::TSDDtor(tsd);
}
-#endif
+# endif
+
+static void BeforeFork() {
+ if (CAN_SANITIZE_LEAKS) {
+ __lsan::LockGlobal();
+ }
+ // `_lsan` functions defined regardless of `CAN_SANITIZE_LEAKS` and lock the
+ // stuff we need.
+ __lsan::LockThreads();
+ __lsan::LockAllocator();
+ StackDepotLockBeforeFork();
+}
+
+static void AfterFork(bool fork_child) {
+ StackDepotUnlockAfterFork(fork_child);
+ // `_lsan` functions defined regardless of `CAN_SANITIZE_LEAKS` and unlock
+ // the stuff we need.
+ __lsan::UnlockAllocator();
+ __lsan::UnlockThreads();
+ if (CAN_SANITIZE_LEAKS) {
+ __lsan::UnlockGlobal();
+ }
+}
+
+void InstallAtForkHandler() {
+# if SANITIZER_SOLARIS || SANITIZER_NETBSD || SANITIZER_APPLE
+ return; // FIXME: Implement FutexWait.
+# endif
+ pthread_atfork(
+ &BeforeFork, []() { AfterFork(/* fork_child= */ false); },
+ []() { AfterFork(/* fork_child= */ true); });
+}
+
+void InstallAtExitCheckLeaks() {
+ if (CAN_SANITIZE_LEAKS) {
+ if (common_flags()->detect_leaks && common_flags()->leak_check_at_exit) {
+ if (flags()->halt_on_error)
+ Atexit(__lsan::DoLeakCheck);
+ else
+ Atexit(__lsan::DoRecoverableLeakCheckVoid);
+ }
+ }
+}
+
} // namespace __asan
#endif // SANITIZER_POSIX
diff --git a/contrib/llvm-project/compiler-rt/lib/asan/asan_premap_shadow.cpp b/contrib/llvm-project/compiler-rt/lib/asan/asan_premap_shadow.cpp
index 666bb9b34bd3..bed2f62a2251 100644
--- a/contrib/llvm-project/compiler-rt/lib/asan/asan_premap_shadow.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/asan/asan_premap_shadow.cpp
@@ -26,7 +26,7 @@ namespace __asan {
// Conservative upper limit.
uptr PremapShadowSize() {
uptr granularity = GetMmapGranularity();
- return RoundUpTo(GetMaxVirtualAddress() >> SHADOW_SCALE, granularity);
+ return RoundUpTo(GetMaxVirtualAddress() >> ASAN_SHADOW_SCALE, granularity);
}
// Returns an address aligned to 8 pages, such that one page on the left and
diff --git a/contrib/llvm-project/compiler-rt/lib/asan/asan_report.cpp b/contrib/llvm-project/compiler-rt/lib/asan/asan_report.cpp
index 03f1ed2b0186..7603e8131154 100644
--- a/contrib/llvm-project/compiler-rt/lib/asan/asan_report.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/asan/asan_report.cpp
@@ -11,17 +11,19 @@
// This file contains error reporting code.
//===----------------------------------------------------------------------===//
+#include "asan_report.h"
+
+#include "asan_descriptions.h"
#include "asan_errors.h"
#include "asan_flags.h"
-#include "asan_descriptions.h"
#include "asan_internal.h"
#include "asan_mapping.h"
-#include "asan_report.h"
#include "asan_scariness_score.h"
#include "asan_stack.h"
#include "asan_thread.h"
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_flags.h"
+#include "sanitizer_common/sanitizer_interface_internal.h"
#include "sanitizer_common/sanitizer_report_decorator.h"
#include "sanitizer_common/sanitizer_stackdepot.h"
#include "sanitizer_common/sanitizer_symbolizer.h"
@@ -32,12 +34,12 @@ namespace __asan {
static void (*error_report_callback)(const char*);
static char *error_message_buffer = nullptr;
static uptr error_message_buffer_pos = 0;
-static BlockingMutex error_message_buf_mutex(LINKER_INITIALIZED);
+static Mutex error_message_buf_mutex;
static const unsigned kAsanBuggyPcPoolSize = 25;
static __sanitizer::atomic_uintptr_t AsanBuggyPcPool[kAsanBuggyPcPoolSize];
void AppendToErrorMessageBuffer(const char *buffer) {
- BlockingMutexLock l(&error_message_buf_mutex);
+ Lock l(&error_message_buf_mutex);
if (!error_message_buffer) {
error_message_buffer =
(char*)MmapOrDieQuietly(kErrorMessageBufferSize, __func__);
@@ -58,23 +60,23 @@ void AppendToErrorMessageBuffer(const char *buffer) {
void PrintMemoryByte(InternalScopedString *str, const char *before, u8 byte,
bool in_shadow, const char *after) {
Decorator d;
- str->append("%s%s%x%x%s%s", before,
- in_shadow ? d.ShadowByte(byte) : d.MemoryByte(), byte >> 4,
- byte & 15, d.Default(), after);
+ str->AppendF("%s%s%x%x%s%s", before,
+ in_shadow ? d.ShadowByte(byte) : d.MemoryByte(), byte >> 4,
+ byte & 15, d.Default(), after);
}
static void PrintZoneForPointer(uptr ptr, uptr zone_ptr,
const char *zone_name) {
if (zone_ptr) {
if (zone_name) {
- Printf("malloc_zone_from_ptr(%p) = %p, which is %s\n",
- ptr, zone_ptr, zone_name);
+ Printf("malloc_zone_from_ptr(%p) = %p, which is %s\n", (void *)ptr,
+ (void *)zone_ptr, zone_name);
} else {
Printf("malloc_zone_from_ptr(%p) = %p, which doesn't have a name\n",
- ptr, zone_ptr);
+ (void *)ptr, (void *)zone_ptr);
}
} else {
- Printf("malloc_zone_from_ptr(%p) = 0\n", ptr);
+ Printf("malloc_zone_from_ptr(%p) = 0\n", (void *)ptr);
}
}
@@ -155,10 +157,10 @@ class ScopedInErrorReport {
DumpProcessMap();
// Copy the message buffer so that we could start logging without holding a
- // lock that gets aquired during printing.
+ // lock that gets acquired during printing.
InternalMmapVector<char> buffer_copy(kErrorMessageBufferSize);
{
- BlockingMutexLock l(&error_message_buf_mutex);
+ Lock l(&error_message_buf_mutex);
internal_memcpy(buffer_copy.data(),
error_message_buffer, kErrorMessageBufferSize);
// Clear error_message_buffer so that if we find other errors
@@ -352,6 +354,18 @@ void ReportBadParamsToAnnotateContiguousContainer(uptr beg, uptr end,
in_report.ReportError(error);
}
+void ReportBadParamsToAnnotateDoubleEndedContiguousContainer(
+ uptr storage_beg, uptr storage_end, uptr old_container_beg,
+ uptr old_container_end, uptr new_container_beg, uptr new_container_end,
+ BufferedStackTrace *stack) {
+ ScopedInErrorReport in_report;
+ ErrorBadParamsToAnnotateDoubleEndedContiguousContainer error(
+ GetCurrentTidOrInvalid(), stack, storage_beg, storage_end,
+ old_container_beg, old_container_end, new_container_beg,
+ new_container_end);
+ in_report.ReportError(error);
+}
+
void ReportODRViolation(const __asan_global *g1, u32 stack_id1,
const __asan_global *g2, u32 stack_id2) {
ScopedInErrorReport in_report;
@@ -435,9 +449,10 @@ static inline void CheckForInvalidPointerPair(void *p1, void *p2) {
void ReportMacMzReallocUnknown(uptr addr, uptr zone_ptr, const char *zone_name,
BufferedStackTrace *stack) {
ScopedInErrorReport in_report;
- Printf("mz_realloc(%p) -- attempting to realloc unallocated memory.\n"
- "This is an unrecoverable problem, exiting now.\n",
- addr);
+ Printf(
+ "mz_realloc(%p) -- attempting to realloc unallocated memory.\n"
+ "This is an unrecoverable problem, exiting now.\n",
+ (void *)addr);
PrintZoneForPointer(addr, zone_ptr, zone_name);
stack->Print();
DescribeAddressIfHeap(addr);
@@ -459,6 +474,10 @@ static bool SuppressErrorReport(uptr pc) {
void ReportGenericError(uptr pc, uptr bp, uptr sp, uptr addr, bool is_write,
uptr access_size, u32 exp, bool fatal) {
+ if (__asan_test_only_reported_buggy_pointer) {
+ *__asan_test_only_reported_buggy_pointer = addr;
+ return;
+ }
if (!fatal && SuppressErrorReport(pc)) return;
ENABLE_FRAME_POINTER;
@@ -490,7 +509,7 @@ void __asan_report_error(uptr pc, uptr bp, uptr sp, uptr addr, int is_write,
}
void NOINLINE __asan_set_error_report_callback(void (*callback)(const char*)) {
- BlockingMutexLock l(&error_message_buf_mutex);
+ Lock l(&error_message_buf_mutex);
error_report_callback = callback;
}
diff --git a/contrib/llvm-project/compiler-rt/lib/asan/asan_report.h b/contrib/llvm-project/compiler-rt/lib/asan/asan_report.h
index dcf60894ef34..3540b3b4b1bf 100644
--- a/contrib/llvm-project/compiler-rt/lib/asan/asan_report.h
+++ b/contrib/llvm-project/compiler-rt/lib/asan/asan_report.h
@@ -35,7 +35,8 @@ int GetGlobalsForAddress(uptr addr, __asan_global *globals, u32 *reg_sites,
const char *MaybeDemangleGlobalName(const char *name);
void PrintGlobalNameIfASCII(InternalScopedString *str, const __asan_global &g);
-void PrintGlobalLocation(InternalScopedString *str, const __asan_global &g);
+void PrintGlobalLocation(InternalScopedString *str, const __asan_global &g,
+ bool print_module_name);
void PrintMemoryByte(InternalScopedString *str, const char *before, u8 byte,
bool in_shadow, const char *after = "\n");
@@ -83,6 +84,10 @@ void ReportStringFunctionSizeOverflow(uptr offset, uptr size,
void ReportBadParamsToAnnotateContiguousContainer(uptr beg, uptr end,
uptr old_mid, uptr new_mid,
BufferedStackTrace *stack);
+void ReportBadParamsToAnnotateDoubleEndedContiguousContainer(
+ uptr storage_beg, uptr storage_end, uptr old_container_beg,
+ uptr old_container_end, uptr new_container_beg, uptr new_container_end,
+ BufferedStackTrace *stack);
void ReportODRViolation(const __asan_global *g1, u32 stack_id1,
const __asan_global *g2, u32 stack_id2);
diff --git a/contrib/llvm-project/compiler-rt/lib/asan/asan_rtl.cpp b/contrib/llvm-project/compiler-rt/lib/asan/asan_rtl.cpp
index bfaa3bc27027..a61deed7382b 100644
--- a/contrib/llvm-project/compiler-rt/lib/asan/asan_rtl.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/asan/asan_rtl.cpp
@@ -27,6 +27,7 @@
#include "lsan/lsan_common.h"
#include "sanitizer_common/sanitizer_atomic.h"
#include "sanitizer_common/sanitizer_flags.h"
+#include "sanitizer_common/sanitizer_interface_internal.h"
#include "sanitizer_common/sanitizer_libc.h"
#include "sanitizer_common/sanitizer_symbolizer.h"
#include "ubsan/ubsan_init.h"
@@ -44,14 +45,15 @@ static void AsanDie() {
static atomic_uint32_t num_calls;
if (atomic_fetch_add(&num_calls, 1, memory_order_relaxed) != 0) {
// Don't die twice - run a busy loop.
- while (1) { }
+ while (1) {
+ internal_sched_yield();
+ }
}
if (common_flags()->print_module_map >= 1)
DumpProcessMap();
- if (flags()->sleep_before_dying) {
- Report("Sleeping for %d second(s)\n", flags()->sleep_before_dying);
- SleepForSeconds(flags()->sleep_before_dying);
- }
+
+ WaitForDebugger(flags()->sleep_before_dying, "before dying");
+
if (flags()->unmap_shadow_on_exit) {
if (kMidMemBeg) {
UnmapOrDie((void*)kLowShadowBeg, kMidMemBeg - kLowShadowBeg);
@@ -69,8 +71,18 @@ static void CheckUnwind() {
}
// -------------------------- Globals --------------------- {{{1
-int asan_inited;
-bool asan_init_is_running;
+static StaticSpinMutex asan_inited_mutex;
+static atomic_uint8_t asan_inited = {0};
+
+static void SetAsanInited() {
+ atomic_store(&asan_inited, 1, memory_order_release);
+}
+
+bool AsanInited() {
+ return atomic_load(&asan_inited, memory_order_acquire) == 1;
+}
+
+bool replace_intrin_cached;
#if !ASAN_FIXED_MAPPING
uptr kHighMemEnd, kMidMemBeg, kMidMemEnd;
@@ -85,12 +97,8 @@ void ShowStatsAndAbort() {
NOINLINE
static void ReportGenericErrorWrapper(uptr addr, bool is_write, int size,
int exp_arg, bool fatal) {
- if (__asan_test_only_reported_buggy_pointer) {
- *__asan_test_only_reported_buggy_pointer = addr;
- } else {
- GET_CALLER_PC_BP_SP;
- ReportGenericError(pc, bp, sp, addr, is_write, size, exp_arg, fatal);
- }
+ GET_CALLER_PC_BP_SP;
+ ReportGenericError(pc, bp, sp, addr, is_write, size, exp_arg, fatal);
}
// --------------- LowLevelAllocateCallbac ---------- {{{1
@@ -150,11 +158,11 @@ ASAN_REPORT_ERROR_N(store, true)
#define ASAN_MEMORY_ACCESS_CALLBACK_BODY(type, is_write, size, exp_arg, fatal) \
uptr sp = MEM_TO_SHADOW(addr); \
- uptr s = size <= SHADOW_GRANULARITY ? *reinterpret_cast<u8 *>(sp) \
- : *reinterpret_cast<u16 *>(sp); \
+ uptr s = size <= ASAN_SHADOW_GRANULARITY ? *reinterpret_cast<u8 *>(sp) \
+ : *reinterpret_cast<u16 *>(sp); \
if (UNLIKELY(s)) { \
- if (UNLIKELY(size >= SHADOW_GRANULARITY || \
- ((s8)((addr & (SHADOW_GRANULARITY - 1)) + size - 1)) >= \
+ if (UNLIKELY(size >= ASAN_SHADOW_GRANULARITY || \
+ ((s8)((addr & (ASAN_SHADOW_GRANULARITY - 1)) + size - 1)) >= \
(s8)s)) { \
ReportGenericErrorWrapper(addr, is_write, size, exp_arg, fatal); \
} \
@@ -188,7 +196,7 @@ ASAN_MEMORY_ACCESS_CALLBACK(store, true, 16)
extern "C"
NOINLINE INTERFACE_ATTRIBUTE
void __asan_loadN(uptr addr, uptr size) {
- if (__asan_region_is_poisoned(addr, size)) {
+ if ((addr = __asan_region_is_poisoned(addr, size))) {
GET_CALLER_PC_BP_SP;
ReportGenericError(pc, bp, sp, addr, false, size, 0, true);
}
@@ -197,7 +205,7 @@ void __asan_loadN(uptr addr, uptr size) {
extern "C"
NOINLINE INTERFACE_ATTRIBUTE
void __asan_exp_loadN(uptr addr, uptr size, u32 exp) {
- if (__asan_region_is_poisoned(addr, size)) {
+ if ((addr = __asan_region_is_poisoned(addr, size))) {
GET_CALLER_PC_BP_SP;
ReportGenericError(pc, bp, sp, addr, false, size, exp, true);
}
@@ -206,7 +214,7 @@ void __asan_exp_loadN(uptr addr, uptr size, u32 exp) {
extern "C"
NOINLINE INTERFACE_ATTRIBUTE
void __asan_loadN_noabort(uptr addr, uptr size) {
- if (__asan_region_is_poisoned(addr, size)) {
+ if ((addr = __asan_region_is_poisoned(addr, size))) {
GET_CALLER_PC_BP_SP;
ReportGenericError(pc, bp, sp, addr, false, size, 0, false);
}
@@ -215,7 +223,7 @@ void __asan_loadN_noabort(uptr addr, uptr size) {
extern "C"
NOINLINE INTERFACE_ATTRIBUTE
void __asan_storeN(uptr addr, uptr size) {
- if (__asan_region_is_poisoned(addr, size)) {
+ if ((addr = __asan_region_is_poisoned(addr, size))) {
GET_CALLER_PC_BP_SP;
ReportGenericError(pc, bp, sp, addr, true, size, 0, true);
}
@@ -224,7 +232,7 @@ void __asan_storeN(uptr addr, uptr size) {
extern "C"
NOINLINE INTERFACE_ATTRIBUTE
void __asan_exp_storeN(uptr addr, uptr size, u32 exp) {
- if (__asan_region_is_poisoned(addr, size)) {
+ if ((addr = __asan_region_is_poisoned(addr, size))) {
GET_CALLER_PC_BP_SP;
ReportGenericError(pc, bp, sp, addr, true, size, exp, true);
}
@@ -233,7 +241,7 @@ void __asan_exp_storeN(uptr addr, uptr size, u32 exp) {
extern "C"
NOINLINE INTERFACE_ATTRIBUTE
void __asan_storeN_noabort(uptr addr, uptr size) {
- if (__asan_region_is_poisoned(addr, size)) {
+ if ((addr = __asan_region_is_poisoned(addr, size))) {
GET_CALLER_PC_BP_SP;
ReportGenericError(pc, bp, sp, addr, true, size, 0, false);
}
@@ -289,11 +297,18 @@ static NOINLINE void force_interface_symbols() {
case 38: __asan_region_is_poisoned(0, 0); break;
case 39: __asan_describe_address(0); break;
case 40: __asan_set_shadow_00(0, 0); break;
- case 41: __asan_set_shadow_f1(0, 0); break;
- case 42: __asan_set_shadow_f2(0, 0); break;
- case 43: __asan_set_shadow_f3(0, 0); break;
- case 44: __asan_set_shadow_f5(0, 0); break;
- case 45: __asan_set_shadow_f8(0, 0); break;
+ case 41: __asan_set_shadow_01(0, 0); break;
+ case 42: __asan_set_shadow_02(0, 0); break;
+ case 43: __asan_set_shadow_03(0, 0); break;
+ case 44: __asan_set_shadow_04(0, 0); break;
+ case 45: __asan_set_shadow_05(0, 0); break;
+ case 46: __asan_set_shadow_06(0, 0); break;
+ case 47: __asan_set_shadow_07(0, 0); break;
+ case 48: __asan_set_shadow_f1(0, 0); break;
+ case 49: __asan_set_shadow_f2(0, 0); break;
+ case 50: __asan_set_shadow_f3(0, 0); break;
+ case 51: __asan_set_shadow_f5(0, 0); break;
+ case 52: __asan_set_shadow_f8(0, 0); break;
}
// clang-format on
}
@@ -313,7 +328,7 @@ static void InitializeHighMemEnd() {
kHighMemEnd = GetMaxUserVirtualAddress();
// Increase kHighMemEnd to make sure it's properly
// aligned together with kHighMemBeg:
- kHighMemEnd |= (GetMmapGranularity() << SHADOW_SCALE) - 1;
+ kHighMemEnd |= (GetMmapGranularity() << ASAN_SHADOW_SCALE) - 1;
#endif // !ASAN_FIXED_MAPPING
CHECK_EQ((kHighMemBeg % GetMmapGranularity()), 0);
}
@@ -365,34 +380,20 @@ void PrintAddressSpaceLayout() {
Printf("malloc_context_size=%zu\n",
(uptr)common_flags()->malloc_context_size);
- Printf("SHADOW_SCALE: %d\n", (int)SHADOW_SCALE);
- Printf("SHADOW_GRANULARITY: %d\n", (int)SHADOW_GRANULARITY);
- Printf("SHADOW_OFFSET: 0x%zx\n", (uptr)SHADOW_OFFSET);
- CHECK(SHADOW_SCALE >= 3 && SHADOW_SCALE <= 7);
+ Printf("SHADOW_SCALE: %d\n", (int)ASAN_SHADOW_SCALE);
+ Printf("SHADOW_GRANULARITY: %d\n", (int)ASAN_SHADOW_GRANULARITY);
+ Printf("SHADOW_OFFSET: 0x%zx\n", (uptr)ASAN_SHADOW_OFFSET);
+ CHECK(ASAN_SHADOW_SCALE >= 3 && ASAN_SHADOW_SCALE <= 7);
if (kMidMemBeg)
CHECK(kMidShadowBeg > kLowShadowEnd &&
kMidMemBeg > kMidShadowEnd &&
kHighShadowBeg > kMidMemEnd);
}
-#if defined(__thumb__) && defined(__linux__)
-#define START_BACKGROUND_THREAD_IN_ASAN_INTERNAL
-#endif
-
-#ifndef START_BACKGROUND_THREAD_IN_ASAN_INTERNAL
-static bool UNUSED __local_asan_dyninit = [] {
- MaybeStartBackgroudThread();
- SetSoftRssLimitExceededCallback(AsanSoftRssLimitExceededCallback);
-
- return false;
-}();
-#endif
-
-static void AsanInitInternal() {
- if (LIKELY(asan_inited)) return;
+static bool AsanInitInternal() {
+ if (LIKELY(AsanInited()))
+ return true;
SanitizerToolName = "AddressSanitizer";
- CHECK(!asan_init_is_running && "ASan init calls itself!");
- asan_init_is_running = true;
CacheBinaryName();
@@ -400,12 +401,13 @@ static void AsanInitInternal() {
// initialization steps look at flags().
InitializeFlags();
+ WaitForDebugger(flags()->sleep_before_init, "before init");
+
// Stop performing init at this point if we are being loaded via
// dlopen() and the platform supports it.
if (SANITIZER_SUPPORTS_INIT_FOR_DLOPEN && UNLIKELY(HandleDlopenInit())) {
- asan_init_is_running = false;
VReport(1, "AddressSanitizer init is being performed for dlopen().\n");
- return;
+ return false;
}
AsanCheckIncompatibleRT();
@@ -434,11 +436,8 @@ static void AsanInitInternal() {
__sanitizer::InitializePlatformEarly();
- // Re-exec ourselves if we need to set additional env or command line args.
- MaybeReexec();
-
// Setup internal allocator callback.
- SetLowLevelAllocateMinAlignment(SHADOW_GRANULARITY);
+ SetLowLevelAllocateMinAlignment(ASAN_SHADOW_GRANULARITY);
SetLowLevelAllocateCallback(OnLowLevelAllocate);
InitializeAsanInterceptors();
@@ -462,15 +461,13 @@ static void AsanInitInternal() {
allocator_options.SetFrom(flags(), common_flags());
InitializeAllocator(allocator_options);
-#ifdef START_BACKGROUND_THREAD_IN_ASAN_INTERNAL
- MaybeStartBackgroudThread();
- SetSoftRssLimitExceededCallback(AsanSoftRssLimitExceededCallback);
-#endif
+ if (SANITIZER_START_BACKGROUND_THREAD_IN_ASAN_INTERNAL)
+ MaybeStartBackgroudThread();
// On Linux AsanThread::ThreadStart() calls malloc() that's why asan_inited
// should be set to 1 prior to initializing the threads.
- asan_inited = 1;
- asan_init_is_running = false;
+ replace_intrin_cached = flags()->replace_intrin;
+ SetAsanInited();
if (flags()->atexit)
Atexit(asan_atexit);
@@ -493,14 +490,11 @@ static void AsanInitInternal() {
if (CAN_SANITIZE_LEAKS) {
__lsan::InitCommonLsan();
- if (common_flags()->detect_leaks && common_flags()->leak_check_at_exit) {
- if (flags()->halt_on_error)
- Atexit(__lsan::DoLeakCheck);
- else
- Atexit(__lsan::DoRecoverableLeakCheckVoid);
- }
+ InstallAtExitCheckLeaks();
}
+ InstallAtForkHandler();
+
#if CAN_SANITIZE_UB
__ubsan::InitAsPlugin();
#endif
@@ -518,18 +512,30 @@ static void AsanInitInternal() {
VReport(1, "AddressSanitizer Init done\n");
- if (flags()->sleep_after_init) {
- Report("Sleeping for %d second(s)\n", flags()->sleep_after_init);
- SleepForSeconds(flags()->sleep_after_init);
- }
+ WaitForDebugger(flags()->sleep_after_init, "after init");
+
+ return true;
}
// Initialize as requested from some part of ASan runtime library (interceptors,
// allocator, etc).
void AsanInitFromRtl() {
+ if (LIKELY(AsanInited()))
+ return;
+ SpinMutexLock lock(&asan_inited_mutex);
AsanInitInternal();
}
+bool TryAsanInitFromRtl() {
+ if (LIKELY(AsanInited()))
+ return true;
+ if (!asan_inited_mutex.TryLock())
+ return false;
+ bool result = AsanInitInternal();
+ asan_inited_mutex.Unlock();
+ return result;
+}
+
#if ASAN_DYNAMIC
// Initialize runtime in case it's LD_PRELOAD-ed into unsanitized executable
// (and thus normal initializers from .preinit_array or modules haven't run).
@@ -557,10 +563,11 @@ void UnpoisonStack(uptr bottom, uptr top, const char *type) {
"False positive error reports may follow\n"
"For details see "
"https://github.com/google/sanitizers/issues/189\n",
- type, top, bottom, top - bottom, top - bottom);
+ type, (void *)top, (void *)bottom, (void *)(top - bottom),
+ top - bottom);
return;
}
- PoisonShadow(bottom, RoundUpTo(top - bottom, SHADOW_GRANULARITY), 0);
+ PoisonShadow(bottom, RoundUpTo(top - bottom, ASAN_SHADOW_GRANULARITY), 0);
}
static void UnpoisonDefaultStack() {
@@ -599,7 +606,7 @@ static void UnpoisonFakeStack() {
using namespace __asan;
void NOINLINE __asan_handle_no_return() {
- if (asan_init_is_running)
+ if (UNLIKELY(!AsanInited()))
return;
if (!PlatformUnpoisonStacks())
@@ -629,7 +636,7 @@ void NOINLINE __asan_set_death_callback(void (*callback)(void)) {
// We use this call as a trigger to wake up ASan from deactivated state.
void __asan_init() {
AsanActivate();
- AsanInitInternal();
+ AsanInitFromRtl();
}
void __asan_version_mismatch_check() {
diff --git a/contrib/llvm-project/compiler-rt/lib/asan/asan_rtl_static.cpp b/contrib/llvm-project/compiler-rt/lib/asan/asan_rtl_static.cpp
new file mode 100644
index 000000000000..a6f812bb8915
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/asan/asan_rtl_static.cpp
@@ -0,0 +1,36 @@
+//===-- asan_static_rtl.cpp -----------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of AddressSanitizer, an address sanity checker.
+//
+// Main file of the ASan run-time library.
+//===----------------------------------------------------------------------===//
+
+// This file is empty for now. Main reason to have it is workaround for Windows
+// build, which complains because no files are part of the asan_static lib.
+
+#include "sanitizer_common/sanitizer_common.h"
+
+#define REPORT_FUNCTION(Name) \
+ extern "C" SANITIZER_WEAK_ATTRIBUTE void Name(__asan::uptr addr); \
+ extern "C" void Name##_asm(uptr addr) { Name(addr); }
+
+namespace __asan {
+
+REPORT_FUNCTION(__asan_report_load1)
+REPORT_FUNCTION(__asan_report_load2)
+REPORT_FUNCTION(__asan_report_load4)
+REPORT_FUNCTION(__asan_report_load8)
+REPORT_FUNCTION(__asan_report_load16)
+REPORT_FUNCTION(__asan_report_store1)
+REPORT_FUNCTION(__asan_report_store2)
+REPORT_FUNCTION(__asan_report_store4)
+REPORT_FUNCTION(__asan_report_store8)
+REPORT_FUNCTION(__asan_report_store16)
+
+} // namespace __asan
diff --git a/contrib/llvm-project/compiler-rt/lib/asan/asan_rtl_x86_64.S b/contrib/llvm-project/compiler-rt/lib/asan/asan_rtl_x86_64.S
new file mode 100644
index 000000000000..9c5289856d8a
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/asan/asan_rtl_x86_64.S
@@ -0,0 +1,150 @@
+#include "asan_mapping.h"
+#include "sanitizer_common/sanitizer_asm.h"
+
+#if defined(__x86_64__)
+#include "sanitizer_common/sanitizer_platform.h"
+
+.file "asan_rtl_x86_64.S"
+
+#define NAME(n, reg, op, s, i) n##_##op##_##i##_##s##_##reg
+
+#define FNAME(reg, op, s, i) NAME(__asan_check, reg, op, s, i)
+#define RLABEL(reg, op, s, i) NAME(.return, reg, op, s, i)
+#define CLABEL(reg, op, s, i) NAME(.check, reg, op, s, i)
+#define FLABEL(reg, op, s, i) NAME(.fail, reg, op, s, i)
+
+#define BEGINF(reg, op, s, i) \
+.section .text.FNAME(reg, op, s, i),"ax",@progbits ;\
+.globl FNAME(reg, op, s, i) ;\
+.hidden FNAME(reg, op, s, i) ;\
+ASM_TYPE_FUNCTION(FNAME(reg, op, s, i)) ;\
+.cfi_startproc ;\
+FNAME(reg, op, s, i): ;\
+
+#define ENDF .cfi_endproc ;\
+
+// Access check functions for 1,2 and 4 byte types, which require extra checks.
+#define ASAN_MEMORY_ACCESS_INITIAL_CHECK_ADD(reg, op, s) \
+ mov %##reg,%r10 ;\
+ shr $0x3,%r10 ;\
+ .if ASAN_SHADOW_OFFSET_CONST < 0x80000000 ;\
+ movsbl ASAN_SHADOW_OFFSET_CONST(%r10),%r10d ;\
+ .else ;\
+ movabsq $ASAN_SHADOW_OFFSET_CONST,%r11 ;\
+ movsbl (%r10,%r11),%r10d ;\
+ .endif ;\
+ test %r10d,%r10d ;\
+ jne CLABEL(reg, op, s, add) ;\
+RLABEL(reg, op, s, add): ;\
+ retq ;\
+
+#define ASAN_MEMORY_ACCESS_EXTRA_CHECK_1(reg, op, i) \
+CLABEL(reg, op, 1, i): ;\
+ mov %##reg,%r11 ;\
+ and $0x7,%r11d ;\
+ cmp %r10d,%r11d ;\
+ jl RLABEL(reg, op, 1, i);\
+ mov %##reg,%rdi ;\
+ jmp __asan_report_##op##1_asm ;\
+
+#define ASAN_MEMORY_ACCESS_EXTRA_CHECK_2(reg, op, i) \
+CLABEL(reg, op, 2, i): ;\
+ mov %##reg,%r11 ;\
+ and $0x7,%r11d ;\
+ add $0x1,%r11d ;\
+ cmp %r10d,%r11d ;\
+ jl RLABEL(reg, op, 2, i);\
+ mov %##reg,%rdi ;\
+ jmp __asan_report_##op##2_asm ;\
+
+#define ASAN_MEMORY_ACCESS_EXTRA_CHECK_4(reg, op, i) \
+CLABEL(reg, op, 4, i): ;\
+ mov %##reg,%r11 ;\
+ and $0x7,%r11d ;\
+ add $0x3,%r11d ;\
+ cmp %r10d,%r11d ;\
+ jl RLABEL(reg, op, 4, i);\
+ mov %##reg,%rdi ;\
+ jmp __asan_report_##op##4_asm ;\
+
+#define ASAN_MEMORY_ACCESS_CALLBACK_ADD_1(reg, op) \
+BEGINF(reg, op, 1, add) ;\
+ ASAN_MEMORY_ACCESS_INITIAL_CHECK_ADD(reg, op, 1) ;\
+ ASAN_MEMORY_ACCESS_EXTRA_CHECK_1(reg, op, add) ;\
+ENDF
+
+#define ASAN_MEMORY_ACCESS_CALLBACK_ADD_2(reg, op) \
+BEGINF(reg, op, 2, add) ;\
+ ASAN_MEMORY_ACCESS_INITIAL_CHECK_ADD(reg, op, 2) ;\
+ ASAN_MEMORY_ACCESS_EXTRA_CHECK_2(reg, op, add) ;\
+ENDF
+
+#define ASAN_MEMORY_ACCESS_CALLBACK_ADD_4(reg, op) \
+BEGINF(reg, op, 4, add) ;\
+ ASAN_MEMORY_ACCESS_INITIAL_CHECK_ADD(reg, op, 4) ;\
+ ASAN_MEMORY_ACCESS_EXTRA_CHECK_4(reg, op, add) ;\
+ENDF
+
+// Access check functions for 8 and 16 byte types: no extra checks required.
+#define ASAN_MEMORY_ACCESS_CHECK_ADD(reg, op, s, c) \
+ mov %##reg,%r10 ;\
+ shr $0x3,%r10 ;\
+ .if ASAN_SHADOW_OFFSET_CONST < 0x80000000 ;\
+ ##c $0x0,ASAN_SHADOW_OFFSET_CONST(%r10) ;\
+ .else ;\
+ movabsq $ASAN_SHADOW_OFFSET_CONST,%r11 ;\
+ ##c $0x0,(%r10,%r11) ;\
+ .endif ;\
+ jne FLABEL(reg, op, s, add) ;\
+ retq ;\
+
+#define ASAN_MEMORY_ACCESS_FAIL(reg, op, s, i) \
+FLABEL(reg, op, s, i): ;\
+ mov %##reg,%rdi ;\
+ jmp __asan_report_##op##s##_asm;\
+
+#define ASAN_MEMORY_ACCESS_CALLBACK_ADD_8(reg, op) \
+BEGINF(reg, op, 8, add) ;\
+ ASAN_MEMORY_ACCESS_CHECK_ADD(reg, op, 8, cmpb) ;\
+ ASAN_MEMORY_ACCESS_FAIL(reg, op, 8, add) ;\
+ENDF
+
+#define ASAN_MEMORY_ACCESS_CALLBACK_ADD_16(reg, op) \
+BEGINF(reg, op, 16, add) ;\
+ ASAN_MEMORY_ACCESS_CHECK_ADD(reg, op, 16, cmpw) ;\
+ ASAN_MEMORY_ACCESS_FAIL(reg, op, 16, add) ;\
+ENDF
+
+#define ASAN_MEMORY_ACCESS_CALLBACKS_ADD(reg) \
+ASAN_MEMORY_ACCESS_CALLBACK_ADD_1(reg, load) \
+ASAN_MEMORY_ACCESS_CALLBACK_ADD_1(reg, store) \
+ASAN_MEMORY_ACCESS_CALLBACK_ADD_2(reg, load) \
+ASAN_MEMORY_ACCESS_CALLBACK_ADD_2(reg, store) \
+ASAN_MEMORY_ACCESS_CALLBACK_ADD_4(reg, load) \
+ASAN_MEMORY_ACCESS_CALLBACK_ADD_4(reg, store) \
+ASAN_MEMORY_ACCESS_CALLBACK_ADD_8(reg, load) \
+ASAN_MEMORY_ACCESS_CALLBACK_ADD_8(reg, store) \
+ASAN_MEMORY_ACCESS_CALLBACK_ADD_16(reg, load) \
+ASAN_MEMORY_ACCESS_CALLBACK_ADD_16(reg, store) \
+
+
+// Instantiate all but R10 and R11 callbacks. We are using PLTSafe class with
+// the intrinsic, which guarantees that the code generation will never emit
+// R10 or R11 callback.
+ASAN_MEMORY_ACCESS_CALLBACKS_ADD(RAX)
+ASAN_MEMORY_ACCESS_CALLBACKS_ADD(RBX)
+ASAN_MEMORY_ACCESS_CALLBACKS_ADD(RCX)
+ASAN_MEMORY_ACCESS_CALLBACKS_ADD(RDX)
+ASAN_MEMORY_ACCESS_CALLBACKS_ADD(RSI)
+ASAN_MEMORY_ACCESS_CALLBACKS_ADD(RDI)
+ASAN_MEMORY_ACCESS_CALLBACKS_ADD(RBP)
+ASAN_MEMORY_ACCESS_CALLBACKS_ADD(R8)
+ASAN_MEMORY_ACCESS_CALLBACKS_ADD(R9)
+ASAN_MEMORY_ACCESS_CALLBACKS_ADD(R12)
+ASAN_MEMORY_ACCESS_CALLBACKS_ADD(R13)
+ASAN_MEMORY_ACCESS_CALLBACKS_ADD(R14)
+ASAN_MEMORY_ACCESS_CALLBACKS_ADD(R15)
+
+#endif
+
+NO_EXEC_STACK_DIRECTIVE
diff --git a/contrib/llvm-project/compiler-rt/lib/asan/asan_shadow_setup.cpp b/contrib/llvm-project/compiler-rt/lib/asan/asan_shadow_setup.cpp
index 6e6260d3413f..fc6de39622b5 100644
--- a/contrib/llvm-project/compiler-rt/lib/asan/asan_shadow_setup.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/asan/asan_shadow_setup.cpp
@@ -33,7 +33,7 @@ static void ProtectGap(uptr addr, uptr size) {
"protect_shadow_gap=0:"
" not protecting shadow gap, allocating gap's shadow\n"
"|| `[%p, %p]` || ShadowGap's shadow ||\n",
- GapShadowBeg, GapShadowEnd);
+ (void*)GapShadowBeg, (void*)GapShadowEnd);
ReserveShadowMemoryRange(GapShadowBeg, GapShadowEnd,
"unprotected gap shadow");
return;
@@ -113,7 +113,7 @@ void InitializeShadowMemory() {
"Shadow memory range interleaves with an existing memory mapping. "
"ASan cannot proceed correctly. ABORTING.\n");
Report("ASan shadow was supposed to be located in the [%p-%p] range.\n",
- shadow_start, kHighShadowEnd);
+ (void*)shadow_start, (void*)kHighShadowEnd);
MaybeReportLinuxPIEBug();
DumpProcessMap();
Die();
diff --git a/contrib/llvm-project/compiler-rt/lib/asan/asan_stack.cpp b/contrib/llvm-project/compiler-rt/lib/asan/asan_stack.cpp
index 048295d6928a..764c6ac193fb 100644
--- a/contrib/llvm-project/compiler-rt/lib/asan/asan_stack.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/asan/asan_stack.cpp
@@ -57,7 +57,7 @@ void __sanitizer::BufferedStackTrace::UnwindImpl(
uptr pc, uptr bp, void *context, bool request_fast, u32 max_depth) {
using namespace __asan;
size = 0;
- if (UNLIKELY(!asan_inited))
+ if (UNLIKELY(!AsanInited()))
return;
request_fast = StackTrace::WillUseFastUnwind(request_fast);
AsanThread *t = GetCurrentThread();
diff --git a/contrib/llvm-project/compiler-rt/lib/asan/asan_stack.h b/contrib/llvm-project/compiler-rt/lib/asan/asan_stack.h
index b9575d2f427e..02a76af847ae 100644
--- a/contrib/llvm-project/compiler-rt/lib/asan/asan_stack.h
+++ b/contrib/llvm-project/compiler-rt/lib/asan/asan_stack.h
@@ -32,24 +32,24 @@ u32 GetMallocContextSize();
// as early as possible (in functions exposed to the user), as we generally
// don't want stack trace to contain functions from ASan internals.
-#define GET_STACK_TRACE(max_size, fast) \
- BufferedStackTrace stack; \
- if (max_size <= 2) { \
- stack.size = max_size; \
- if (max_size > 0) { \
- stack.top_frame_bp = GET_CURRENT_FRAME(); \
- stack.trace_buffer[0] = StackTrace::GetCurrentPc(); \
- if (max_size > 1) stack.trace_buffer[1] = GET_CALLER_PC(); \
- } \
- } else { \
- stack.Unwind(StackTrace::GetCurrentPc(), \
- GET_CURRENT_FRAME(), nullptr, fast, max_size); \
+#define GET_STACK_TRACE(max_size, fast) \
+ UNINITIALIZED BufferedStackTrace stack; \
+ if (max_size <= 2) { \
+ stack.size = max_size; \
+ if (max_size > 0) { \
+ stack.top_frame_bp = GET_CURRENT_FRAME(); \
+ stack.trace_buffer[0] = StackTrace::GetCurrentPc(); \
+ if (max_size > 1) \
+ stack.trace_buffer[1] = GET_CALLER_PC(); \
+ } \
+ } else { \
+ stack.Unwind(StackTrace::GetCurrentPc(), GET_CURRENT_FRAME(), nullptr, \
+ fast, max_size); \
}
-#define GET_STACK_TRACE_FATAL(pc, bp) \
- BufferedStackTrace stack; \
- stack.Unwind(pc, bp, nullptr, \
- common_flags()->fast_unwind_on_fatal)
+#define GET_STACK_TRACE_FATAL(pc, bp) \
+ UNINITIALIZED BufferedStackTrace stack; \
+ stack.Unwind(pc, bp, nullptr, common_flags()->fast_unwind_on_fatal)
#define GET_STACK_TRACE_FATAL_HERE \
GET_STACK_TRACE(kStackTraceMax, common_flags()->fast_unwind_on_fatal)
diff --git a/contrib/llvm-project/compiler-rt/lib/asan/asan_stats.cpp b/contrib/llvm-project/compiler-rt/lib/asan/asan_stats.cpp
index 00ded8f5ef50..78cb30ec763d 100644
--- a/contrib/llvm-project/compiler-rt/lib/asan/asan_stats.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/asan/asan_stats.cpp
@@ -62,11 +62,11 @@ void AsanStats::MergeFrom(const AsanStats *stats) {
dst_ptr[i] += src_ptr[i];
}
-static BlockingMutex print_lock(LINKER_INITIALIZED);
+static Mutex print_lock;
static AsanStats unknown_thread_stats(LINKER_INITIALIZED);
static AsanStats dead_threads_stats(LINKER_INITIALIZED);
-static BlockingMutex dead_threads_stats_lock(LINKER_INITIALIZED);
+static Mutex dead_threads_stats_lock;
// Required for malloc_zone_statistics() on OS X. This can't be stored in
// per-thread AsanStats.
static uptr max_malloced_memory;
@@ -87,7 +87,7 @@ static void GetAccumulatedStats(AsanStats *stats) {
}
stats->MergeFrom(&unknown_thread_stats);
{
- BlockingMutexLock lock(&dead_threads_stats_lock);
+ Lock lock(&dead_threads_stats_lock);
stats->MergeFrom(&dead_threads_stats);
}
// This is not very accurate: we may miss allocation peaks that happen
@@ -99,7 +99,7 @@ static void GetAccumulatedStats(AsanStats *stats) {
}
void FlushToDeadThreadStats(AsanStats *stats) {
- BlockingMutexLock lock(&dead_threads_stats_lock);
+ Lock lock(&dead_threads_stats_lock);
dead_threads_stats.MergeFrom(stats);
stats->Clear();
}
@@ -122,11 +122,11 @@ static void PrintAccumulatedStats() {
AsanStats stats;
GetAccumulatedStats(&stats);
// Use lock to keep reports from mixing up.
- BlockingMutexLock lock(&print_lock);
+ Lock lock(&print_lock);
stats.Print();
- StackDepotStats *stack_depot_stats = StackDepotGetStats();
+ StackDepotStats stack_depot_stats = StackDepotGetStats();
Printf("Stats: StackDepot: %zd ids; %zdM allocated\n",
- stack_depot_stats->n_uniq_ids, stack_depot_stats->allocated >> 20);
+ stack_depot_stats.n_uniq_ids, stack_depot_stats.allocated >> 20);
PrintInternalAllocatorStats();
}
@@ -142,7 +142,7 @@ uptr __sanitizer_get_current_allocated_bytes() {
uptr freed = stats.freed;
// Return sane value if malloced < freed due to racy
// way we update accumulated stats.
- return (malloced > freed) ? malloced - freed : 1;
+ return (malloced > freed) ? malloced - freed : 0;
}
uptr __sanitizer_get_heap_size() {
@@ -161,7 +161,7 @@ uptr __sanitizer_get_free_bytes() {
+ stats.malloced_redzones;
// Return sane value if total_free < total_used due to racy
// way we update accumulated stats.
- return (total_free > total_used) ? total_free - total_used : 1;
+ return (total_free > total_used) ? total_free - total_used : 0;
}
uptr __sanitizer_get_unmapped_bytes() {
diff --git a/contrib/llvm-project/compiler-rt/lib/asan/asan_suppressions.cpp b/contrib/llvm-project/compiler-rt/lib/asan/asan_suppressions.cpp
index 8cb2c3e3b9b6..e71d23182186 100644
--- a/contrib/llvm-project/compiler-rt/lib/asan/asan_suppressions.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/asan/asan_suppressions.cpp
@@ -81,9 +81,10 @@ bool IsStackTraceSuppressed(const StackTrace *stack) {
}
if (suppression_ctx->HasSuppressionType(kInterceptorViaFunction)) {
- SymbolizedStack *frames = symbolizer->SymbolizePC(addr);
+ SymbolizedStackHolder symbolized_stack(symbolizer->SymbolizePC(addr));
+ const SymbolizedStack *frames = symbolized_stack.get();
CHECK(frames);
- for (SymbolizedStack *cur = frames; cur; cur = cur->next) {
+ for (const SymbolizedStack *cur = frames; cur; cur = cur->next) {
const char *function_name = cur->info.function;
if (!function_name) {
continue;
@@ -91,11 +92,9 @@ bool IsStackTraceSuppressed(const StackTrace *stack) {
// Match "interceptor_via_fun" suppressions.
if (suppression_ctx->Match(function_name, kInterceptorViaFunction,
&s)) {
- frames->ClearAll();
return true;
}
}
- frames->ClearAll();
}
}
return false;
diff --git a/contrib/llvm-project/compiler-rt/lib/asan/asan_thread.cpp b/contrib/llvm-project/compiler-rt/lib/asan/asan_thread.cpp
index 35d4467e7b53..8798968947e8 100644
--- a/contrib/llvm-project/compiler-rt/lib/asan/asan_thread.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/asan/asan_thread.cpp
@@ -10,24 +10,25 @@
//
// Thread-related code.
//===----------------------------------------------------------------------===//
+#include "asan_thread.h"
+
#include "asan_allocator.h"
#include "asan_interceptors.h"
+#include "asan_mapping.h"
#include "asan_poisoning.h"
#include "asan_stack.h"
-#include "asan_thread.h"
-#include "asan_mapping.h"
+#include "lsan/lsan_common.h"
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_placement_new.h"
#include "sanitizer_common/sanitizer_stackdepot.h"
#include "sanitizer_common/sanitizer_tls_get_addr.h"
-#include "lsan/lsan_common.h"
namespace __asan {
// AsanThreadContext implementation.
void AsanThreadContext::OnCreated(void *arg) {
- CreateThreadContextArgs *args = static_cast<CreateThreadContextArgs*>(arg);
+ CreateThreadContextArgs *args = static_cast<CreateThreadContextArgs *>(arg);
if (args->stack)
stack_id = StackDepotPut(*args->stack);
thread = args->thread;
@@ -39,34 +40,49 @@ void AsanThreadContext::OnFinished() {
thread = nullptr;
}
-// MIPS requires aligned address
-static ALIGNED(16) char thread_registry_placeholder[sizeof(ThreadRegistry)];
static ThreadRegistry *asan_thread_registry;
+static ThreadArgRetval *thread_data;
-static BlockingMutex mu_for_thread_context(LINKER_INITIALIZED);
-static LowLevelAllocator allocator_for_thread_context;
+static Mutex mu_for_thread_context;
static ThreadContextBase *GetAsanThreadContext(u32 tid) {
- BlockingMutexLock lock(&mu_for_thread_context);
- return new(allocator_for_thread_context) AsanThreadContext(tid);
+ Lock lock(&mu_for_thread_context);
+ return new (GetGlobalLowLevelAllocator()) AsanThreadContext(tid);
}
-ThreadRegistry &asanThreadRegistry() {
+static void InitThreads() {
static bool initialized;
// Don't worry about thread_safety - this should be called when there is
// a single thread.
- if (!initialized) {
- // Never reuse ASan threads: we store pointer to AsanThreadContext
- // in TSD and can't reliably tell when no more TSD destructors will
- // be called. It would be wrong to reuse AsanThreadContext for another
- // thread before all TSD destructors will be called for it.
- asan_thread_registry =
- new (thread_registry_placeholder) ThreadRegistry(GetAsanThreadContext);
- initialized = true;
- }
+ if (LIKELY(initialized))
+ return;
+ // Never reuse ASan threads: we store pointer to AsanThreadContext
+ // in TSD and can't reliably tell when no more TSD destructors will
+ // be called. It would be wrong to reuse AsanThreadContext for another
+ // thread before all TSD destructors will be called for it.
+
+ // MIPS requires aligned address
+ static ALIGNED(alignof(
+ ThreadRegistry)) char thread_registry_placeholder[sizeof(ThreadRegistry)];
+ static ALIGNED(alignof(
+ ThreadArgRetval)) char thread_data_placeholder[sizeof(ThreadArgRetval)];
+
+ asan_thread_registry =
+ new (thread_registry_placeholder) ThreadRegistry(GetAsanThreadContext);
+ thread_data = new (thread_data_placeholder) ThreadArgRetval();
+ initialized = true;
+}
+
+ThreadRegistry &asanThreadRegistry() {
+ InitThreads();
return *asan_thread_registry;
}
+ThreadArgRetval &asanThreadArgRetval() {
+ InitThreads();
+ return *thread_data;
+}
+
AsanThreadContext *GetThreadContextByTidLocked(u32 tid) {
return static_cast<AsanThreadContext *>(
asanThreadRegistry().GetThreadLocked(tid));
@@ -74,23 +90,29 @@ AsanThreadContext *GetThreadContextByTidLocked(u32 tid) {
// AsanThread implementation.
-AsanThread *AsanThread::Create(thread_callback_t start_routine, void *arg,
+AsanThread *AsanThread::Create(const void *start_data, uptr data_size,
u32 parent_tid, StackTrace *stack,
bool detached) {
uptr PageSize = GetPageSizeCached();
uptr size = RoundUpTo(sizeof(AsanThread), PageSize);
- AsanThread *thread = (AsanThread*)MmapOrDie(size, __func__);
- thread->start_routine_ = start_routine;
- thread->arg_ = arg;
+ AsanThread *thread = (AsanThread *)MmapOrDie(size, __func__);
+ if (data_size) {
+ uptr availible_size = (uptr)thread + size - (uptr)(thread->start_data_);
+ CHECK_LE(data_size, availible_size);
+ internal_memcpy(thread->start_data_, start_data, data_size);
+ }
AsanThreadContext::CreateThreadContextArgs args = {thread, stack};
- asanThreadRegistry().CreateThread(*reinterpret_cast<uptr *>(thread), detached,
- parent_tid, &args);
+ asanThreadRegistry().CreateThread(0, detached, parent_tid, &args);
return thread;
}
+void AsanThread::GetStartData(void *out, uptr out_size) const {
+ internal_memcpy(out, start_data_, out_size);
+}
+
void AsanThread::TSDDtor(void *tsd) {
- AsanThreadContext *context = (AsanThreadContext*)tsd;
+ AsanThreadContext *context = (AsanThreadContext *)tsd;
VReport(1, "T%d TSDDtor\n", context->tid);
if (context->thread)
context->thread->Destroy();
@@ -144,8 +166,7 @@ void AsanThread::StartSwitchFiber(FakeStack **fake_stack_save, uptr bottom,
current_fake_stack->Destroy(this->tid());
}
-void AsanThread::FinishSwitchFiber(FakeStack *fake_stack_save,
- uptr *bottom_old,
+void AsanThread::FinishSwitchFiber(FakeStack *fake_stack_save, uptr *bottom_old,
uptr *size_old) {
if (!atomic_load(&stack_switching_, memory_order_relaxed)) {
Report("ERROR: finishing a fiber switch that has not started\n");
@@ -171,7 +192,8 @@ void AsanThread::FinishSwitchFiber(FakeStack *fake_stack_save,
inline AsanThread::StackBounds AsanThread::GetStackBounds() const {
if (!atomic_load(&stack_switching_, memory_order_acquire)) {
// Make sure the stack bounds are fully initialized.
- if (stack_bottom_ >= stack_top_) return {0, 0};
+ if (stack_bottom_ >= stack_top_)
+ return {0, 0};
return {stack_bottom_, stack_top_};
}
char local;
@@ -184,13 +206,9 @@ inline AsanThread::StackBounds AsanThread::GetStackBounds() const {
return {stack_bottom_, stack_top_};
}
-uptr AsanThread::stack_top() {
- return GetStackBounds().top;
-}
+uptr AsanThread::stack_top() { return GetStackBounds().top; }
-uptr AsanThread::stack_bottom() {
- return GetStackBounds().bottom;
-}
+uptr AsanThread::stack_bottom() { return GetStackBounds().bottom; }
uptr AsanThread::stack_size() {
const auto bounds = GetStackBounds();
@@ -211,8 +229,8 @@ FakeStack *AsanThread::AsyncSignalSafeLazyInitFakeStack() {
// This CAS checks if the state was 0 and if so changes it to state 1,
// if that was successful, it initializes the pointer.
if (atomic_compare_exchange_strong(
- reinterpret_cast<atomic_uintptr_t *>(&fake_stack_), &old_val, 1UL,
- memory_order_relaxed)) {
+ reinterpret_cast<atomic_uintptr_t *>(&fake_stack_), &old_val, 1UL,
+ memory_order_relaxed)) {
uptr stack_size_log = Log2(RoundUpToPowerOfTwo(stack_size));
CHECK_LE(flags()->min_uar_stack_size_log, flags()->max_uar_stack_size_log);
stack_size_log =
@@ -254,43 +272,24 @@ void AsanThread::Init(const InitOptions *options) {
int local = 0;
VReport(1, "T%d: stack [%p,%p) size 0x%zx; local=%p\n", tid(),
(void *)stack_bottom_, (void *)stack_top_, stack_top_ - stack_bottom_,
- &local);
+ (void *)&local);
}
// Fuchsia doesn't use ThreadStart.
// asan_fuchsia.c definies CreateMainThread and SetThreadStackAndTls.
#if !SANITIZER_FUCHSIA
-thread_return_t AsanThread::ThreadStart(tid_t os_id) {
+void AsanThread::ThreadStart(tid_t os_id) {
Init();
asanThreadRegistry().StartThread(tid(), os_id, ThreadType::Regular, nullptr);
- if (common_flags()->use_sigaltstack) SetAlternateSignalStack();
-
- if (!start_routine_) {
- // start_routine_ == 0 if we're on the main thread or on one of the
- // OS X libdispatch worker threads. But nobody is supposed to call
- // ThreadStart() for the worker threads.
- CHECK_EQ(tid(), 0);
- return 0;
- }
-
- thread_return_t res = start_routine_(arg_);
-
- // On POSIX systems we defer this to the TSD destructor. LSan will consider
- // the thread's memory as non-live from the moment we call Destroy(), even
- // though that memory might contain pointers to heap objects which will be
- // cleaned up by a user-defined TSD destructor. Thus, calling Destroy() before
- // the TSD destructors have run might cause false positives in LSan.
- if (!SANITIZER_POSIX)
- this->Destroy();
-
- return res;
+ if (common_flags()->use_sigaltstack)
+ SetAlternateSignalStack();
}
AsanThread *CreateMainThread() {
AsanThread *main_thread = AsanThread::Create(
- /* start_routine */ nullptr, /* arg */ nullptr, /* parent_tid */ kMainTid,
+ /* parent_tid */ kMainTid,
/* stack */ nullptr, /* detached */ true);
SetCurrentThread(main_thread);
main_thread->ThreadStart(internal_getpid());
@@ -306,7 +305,8 @@ void AsanThread::SetThreadStackAndTls(const InitOptions *options) {
uptr stack_size = 0;
GetThreadStackAndTls(tid() == kMainTid, &stack_bottom_, &stack_size,
&tls_begin_, &tls_size);
- stack_top_ = RoundDownTo(stack_bottom_ + stack_size, SHADOW_GRANULARITY);
+ stack_top_ = RoundDownTo(stack_bottom_ + stack_size, ASAN_SHADOW_GRANULARITY);
+ stack_bottom_ = RoundDownTo(stack_bottom_, ASAN_SHADOW_GRANULARITY);
tls_end_ = tls_begin_ + tls_size;
dtls_ = DTLS_Get();
@@ -322,11 +322,9 @@ void AsanThread::ClearShadowForThreadStackAndTLS() {
if (stack_top_ != stack_bottom_)
PoisonShadow(stack_bottom_, stack_top_ - stack_bottom_, 0);
if (tls_begin_ != tls_end_) {
- uptr tls_begin_aligned = RoundDownTo(tls_begin_, SHADOW_GRANULARITY);
- uptr tls_end_aligned = RoundUpTo(tls_end_, SHADOW_GRANULARITY);
- FastPoisonShadowPartialRightRedzone(tls_begin_aligned,
- tls_end_ - tls_begin_aligned,
- tls_end_aligned - tls_end_, 0);
+ uptr tls_begin_aligned = RoundDownTo(tls_begin_, ASAN_SHADOW_GRANULARITY);
+ uptr tls_end_aligned = RoundUpTo(tls_end_, ASAN_SHADOW_GRANULARITY);
+ FastPoisonShadow(tls_begin_aligned, tls_end_aligned - tls_begin_aligned, 0);
}
}
@@ -342,36 +340,36 @@ bool AsanThread::GetStackFrameAccessByAddr(uptr addr,
bottom = fake_stack->AddrIsInFakeStack(addr);
CHECK(bottom);
access->offset = addr - bottom;
- access->frame_pc = ((uptr*)bottom)[2];
- access->frame_descr = (const char *)((uptr*)bottom)[1];
+ access->frame_pc = ((uptr *)bottom)[2];
+ access->frame_descr = (const char *)((uptr *)bottom)[1];
return true;
}
uptr aligned_addr = RoundDownTo(addr, SANITIZER_WORDSIZE / 8); // align addr.
- uptr mem_ptr = RoundDownTo(aligned_addr, SHADOW_GRANULARITY);
- u8 *shadow_ptr = (u8*)MemToShadow(aligned_addr);
- u8 *shadow_bottom = (u8*)MemToShadow(bottom);
+ uptr mem_ptr = RoundDownTo(aligned_addr, ASAN_SHADOW_GRANULARITY);
+ u8 *shadow_ptr = (u8 *)MemToShadow(aligned_addr);
+ u8 *shadow_bottom = (u8 *)MemToShadow(bottom);
while (shadow_ptr >= shadow_bottom &&
*shadow_ptr != kAsanStackLeftRedzoneMagic) {
shadow_ptr--;
- mem_ptr -= SHADOW_GRANULARITY;
+ mem_ptr -= ASAN_SHADOW_GRANULARITY;
}
while (shadow_ptr >= shadow_bottom &&
*shadow_ptr == kAsanStackLeftRedzoneMagic) {
shadow_ptr--;
- mem_ptr -= SHADOW_GRANULARITY;
+ mem_ptr -= ASAN_SHADOW_GRANULARITY;
}
if (shadow_ptr < shadow_bottom) {
return false;
}
- uptr* ptr = (uptr*)(mem_ptr + SHADOW_GRANULARITY);
+ uptr *ptr = (uptr *)(mem_ptr + ASAN_SHADOW_GRANULARITY);
CHECK(ptr[0] == kCurrentStackFrameMagic);
access->offset = addr - (uptr)ptr;
access->frame_pc = ptr[2];
- access->frame_descr = (const char*)ptr[1];
+ access->frame_descr = (const char *)ptr[1];
return true;
}
@@ -389,8 +387,8 @@ uptr AsanThread::GetStackVariableShadowStart(uptr addr) {
}
uptr aligned_addr = RoundDownTo(addr, SANITIZER_WORDSIZE / 8); // align addr.
- u8 *shadow_ptr = (u8*)MemToShadow(aligned_addr);
- u8 *shadow_bottom = (u8*)MemToShadow(bottom);
+ u8 *shadow_ptr = (u8 *)MemToShadow(aligned_addr);
+ u8 *shadow_bottom = (u8 *)MemToShadow(bottom);
while (shadow_ptr >= shadow_bottom &&
(*shadow_ptr != kAsanStackLeftRedzoneMagic &&
@@ -443,7 +441,7 @@ AsanThread *GetCurrentThread() {
void SetCurrentThread(AsanThread *t) {
CHECK(t->context());
- VReport(2, "SetCurrentThread: %p for thread %p\n", t->context(),
+ VReport(2, "SetCurrentThread: %p for thread %p\n", (void *)t->context(),
(void *)GetThreadSelf());
// Make sure we do not reset the current AsanThread.
CHECK_EQ(0, AsanTSDGet());
@@ -474,18 +472,37 @@ void EnsureMainThreadIDIsCorrect() {
__asan::AsanThread *GetAsanThreadByOsIDLocked(tid_t os_id) {
__asan::AsanThreadContext *context = static_cast<__asan::AsanThreadContext *>(
__asan::asanThreadRegistry().FindThreadContextByOsIDLocked(os_id));
- if (!context) return nullptr;
+ if (!context)
+ return nullptr;
return context->thread;
}
-} // namespace __asan
+} // namespace __asan
// --- Implementation of LSan-specific functions --- {{{1
namespace __lsan {
+void LockThreads() {
+ __asan::asanThreadRegistry().Lock();
+ __asan::asanThreadArgRetval().Lock();
+}
+
+void UnlockThreads() {
+ __asan::asanThreadArgRetval().Unlock();
+ __asan::asanThreadRegistry().Unlock();
+}
+
+static ThreadRegistry *GetAsanThreadRegistryLocked() {
+ __asan::asanThreadRegistry().CheckLocked();
+ return &__asan::asanThreadRegistry();
+}
+
+void EnsureMainThreadIDIsCorrect() { __asan::EnsureMainThreadIDIsCorrect(); }
+
bool GetThreadRangesLocked(tid_t os_id, uptr *stack_begin, uptr *stack_end,
uptr *tls_begin, uptr *tls_end, uptr *cache_begin,
uptr *cache_end, DTLS **dtls) {
__asan::AsanThread *t = __asan::GetAsanThreadByOsIDLocked(os_id);
- if (!t) return false;
+ if (!t)
+ return false;
*stack_begin = t->stack_bottom();
*stack_end = t->stack_top();
*tls_begin = t->tls_begin();
@@ -499,34 +516,47 @@ bool GetThreadRangesLocked(tid_t os_id, uptr *stack_begin, uptr *stack_end,
void GetAllThreadAllocatorCachesLocked(InternalMmapVector<uptr> *caches) {}
-void ForEachExtraStackRange(tid_t os_id, RangeIteratorCallback callback,
- void *arg) {
+void GetThreadExtraStackRangesLocked(tid_t os_id,
+ InternalMmapVector<Range> *ranges) {
__asan::AsanThread *t = __asan::GetAsanThreadByOsIDLocked(os_id);
if (!t)
return;
__asan::FakeStack *fake_stack = t->get_fake_stack();
if (!fake_stack)
return;
- fake_stack->ForEachFakeFrame(callback, arg);
-}
-void LockThreadRegistry() {
- __asan::asanThreadRegistry().Lock();
+ fake_stack->ForEachFakeFrame(
+ [](uptr begin, uptr end, void *arg) {
+ reinterpret_cast<InternalMmapVector<Range> *>(arg)->push_back(
+ {begin, end});
+ },
+ ranges);
}
-void UnlockThreadRegistry() {
- __asan::asanThreadRegistry().Unlock();
+void GetThreadExtraStackRangesLocked(InternalMmapVector<Range> *ranges) {
+ GetAsanThreadRegistryLocked()->RunCallbackForEachThreadLocked(
+ [](ThreadContextBase *tctx, void *arg) {
+ GetThreadExtraStackRangesLocked(
+ tctx->os_id, reinterpret_cast<InternalMmapVector<Range> *>(arg));
+ },
+ ranges);
}
-ThreadRegistry *GetThreadRegistryLocked() {
- __asan::asanThreadRegistry().CheckLocked();
- return &__asan::asanThreadRegistry();
+void GetAdditionalThreadContextPtrsLocked(InternalMmapVector<uptr> *ptrs) {
+ __asan::asanThreadArgRetval().GetAllPtrsLocked(ptrs);
}
-void EnsureMainThreadIDIsCorrect() {
- __asan::EnsureMainThreadIDIsCorrect();
+void GetRunningThreadsLocked(InternalMmapVector<tid_t> *threads) {
+ GetAsanThreadRegistryLocked()->RunCallbackForEachThreadLocked(
+ [](ThreadContextBase *tctx, void *threads) {
+ if (tctx->status == ThreadStatusRunning)
+ reinterpret_cast<InternalMmapVector<tid_t> *>(threads)->push_back(
+ tctx->os_id);
+ },
+ threads);
}
-} // namespace __lsan
+
+} // namespace __lsan
// ---------------------- Interface ---------------- {{{1
using namespace __asan;
@@ -540,20 +570,18 @@ void __sanitizer_start_switch_fiber(void **fakestacksave, const void *bottom,
VReport(1, "__asan_start_switch_fiber called from unknown thread\n");
return;
}
- t->StartSwitchFiber((FakeStack**)fakestacksave, (uptr)bottom, size);
+ t->StartSwitchFiber((FakeStack **)fakestacksave, (uptr)bottom, size);
}
SANITIZER_INTERFACE_ATTRIBUTE
-void __sanitizer_finish_switch_fiber(void* fakestack,
- const void **bottom_old,
+void __sanitizer_finish_switch_fiber(void *fakestack, const void **bottom_old,
uptr *size_old) {
AsanThread *t = GetCurrentThread();
if (!t) {
VReport(1, "__asan_finish_switch_fiber called from unknown thread\n");
return;
}
- t->FinishSwitchFiber((FakeStack*)fakestack,
- (uptr*)bottom_old,
- (uptr*)size_old);
+ t->FinishSwitchFiber((FakeStack *)fakestack, (uptr *)bottom_old,
+ (uptr *)size_old);
}
}
diff --git a/contrib/llvm-project/compiler-rt/lib/asan/asan_thread.h b/contrib/llvm-project/compiler-rt/lib/asan/asan_thread.h
index 801a3960ec6c..62f1b5337fe4 100644
--- a/contrib/llvm-project/compiler-rt/lib/asan/asan_thread.h
+++ b/contrib/llvm-project/compiler-rt/lib/asan/asan_thread.h
@@ -15,11 +15,12 @@
#define ASAN_THREAD_H
#include "asan_allocator.h"
-#include "asan_internal.h"
#include "asan_fake_stack.h"
+#include "asan_internal.h"
#include "asan_stats.h"
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_libc.h"
+#include "sanitizer_common/sanitizer_thread_arg_retval.h"
#include "sanitizer_common/sanitizer_thread_registry.h"
namespace __sanitizer {
@@ -55,18 +56,32 @@ class AsanThreadContext final : public ThreadContextBase {
// AsanThreadContext objects are never freed, so we need many of them.
COMPILER_CHECK(sizeof(AsanThreadContext) <= 256);
+#if defined(_MSC_VER) && !defined(__clang__)
+// MSVC raises a warning about a nonstandard extension being used for the 0
+// sized element in this array. Disable this for warn-as-error builds.
+# pragma warning(push)
+# pragma warning(disable : 4200)
+#endif
+
// AsanThread are stored in TSD and destroyed when the thread dies.
class AsanThread {
public:
- static AsanThread *Create(thread_callback_t start_routine, void *arg,
- u32 parent_tid, StackTrace *stack, bool detached);
+ template <typename T>
+ static AsanThread *Create(const T &data, u32 parent_tid, StackTrace *stack,
+ bool detached) {
+ return Create(&data, sizeof(data), parent_tid, stack, detached);
+ }
+ static AsanThread *Create(u32 parent_tid, StackTrace *stack, bool detached) {
+ return Create(nullptr, 0, parent_tid, stack, detached);
+ }
static void TSDDtor(void *tsd);
void Destroy();
struct InitOptions;
void Init(const InitOptions *options = nullptr);
- thread_return_t ThreadStart(tid_t os_id);
+ void ThreadStart(tid_t os_id);
+ thread_return_t RunThread();
uptr stack_top();
uptr stack_bottom();
@@ -129,12 +144,18 @@ class AsanThread {
void *extra_spill_area() { return &extra_spill_area_; }
- void *get_arg() { return arg_; }
+ template <typename T>
+ void GetStartData(T &data) const {
+ GetStartData(&data, sizeof(data));
+ }
private:
// NOTE: There is no AsanThread constructor. It is allocated
// via mmap() and *must* be valid in zero-initialized state.
+ static AsanThread *Create(const void *start_data, uptr data_size,
+ u32 parent_tid, StackTrace *stack, bool detached);
+
void SetThreadStackAndTls(const InitOptions *options);
void ClearShadowForThreadStackAndTLS();
@@ -146,9 +167,9 @@ class AsanThread {
};
StackBounds GetStackBounds() const;
+ void GetStartData(void *out, uptr out_size) const;
+
AsanThreadContext *context_;
- thread_callback_t start_routine_;
- void *arg_;
uptr stack_top_;
uptr stack_bottom_;
@@ -167,10 +188,17 @@ class AsanThread {
AsanStats stats_;
bool unwinding_;
uptr extra_spill_area_;
+
+ char start_data_[];
};
+#if defined(_MSC_VER) && !defined(__clang__)
+# pragma warning(pop)
+#endif
+
// Returns a single instance of registry.
ThreadRegistry &asanThreadRegistry();
+ThreadArgRetval &asanThreadArgRetval();
// Must be called under ThreadRegistryLock.
AsanThreadContext *GetThreadContextByTidLocked(u32 tid);
diff --git a/contrib/llvm-project/compiler-rt/lib/asan/asan_win.cpp b/contrib/llvm-project/compiler-rt/lib/asan/asan_win.cpp
index 1577c83cf994..8507e675684e 100644
--- a/contrib/llvm-project/compiler-rt/lib/asan/asan_win.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/asan/asan_win.cpp
@@ -1,4 +1,5 @@
-//===-- asan_win.cpp ------------------------------------------------------===//
+//===-- asan_win.cpp
+//------------------------------------------------------===//>
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
@@ -13,21 +14,20 @@
#include "sanitizer_common/sanitizer_platform.h"
#if SANITIZER_WINDOWS
-#define WIN32_LEAN_AND_MEAN
-#include <windows.h>
-
-#include <stdlib.h>
-
-#include "asan_interceptors.h"
-#include "asan_internal.h"
-#include "asan_mapping.h"
-#include "asan_report.h"
-#include "asan_stack.h"
-#include "asan_thread.h"
-#include "sanitizer_common/sanitizer_libc.h"
-#include "sanitizer_common/sanitizer_mutex.h"
-#include "sanitizer_common/sanitizer_win.h"
-#include "sanitizer_common/sanitizer_win_defs.h"
+# define WIN32_LEAN_AND_MEAN
+# include <stdlib.h>
+# include <windows.h>
+
+# include "asan_interceptors.h"
+# include "asan_internal.h"
+# include "asan_mapping.h"
+# include "asan_report.h"
+# include "asan_stack.h"
+# include "asan_thread.h"
+# include "sanitizer_common/sanitizer_libc.h"
+# include "sanitizer_common/sanitizer_mutex.h"
+# include "sanitizer_common/sanitizer_win.h"
+# include "sanitizer_common/sanitizer_win_defs.h"
using namespace __asan;
@@ -49,8 +49,8 @@ uptr __asan_get_shadow_memory_dynamic_address() {
static LPTOP_LEVEL_EXCEPTION_FILTER default_seh_handler;
static LPTOP_LEVEL_EXCEPTION_FILTER user_seh_handler;
-extern "C" SANITIZER_INTERFACE_ATTRIBUTE
-long __asan_unhandled_exception_filter(EXCEPTION_POINTERS *info) {
+extern "C" SANITIZER_INTERFACE_ATTRIBUTE long __asan_unhandled_exception_filter(
+ EXCEPTION_POINTERS *info) {
EXCEPTION_RECORD *exception_record = info->ExceptionRecord;
CONTEXT *context = info->ContextRecord;
@@ -131,10 +131,22 @@ INTERCEPTOR(int, _except_handler4, void *a, void *b, void *c, void *d) {
}
#endif
+struct ThreadStartParams {
+ thread_callback_t start_routine;
+ void *arg;
+};
+
static thread_return_t THREAD_CALLING_CONV asan_thread_start(void *arg) {
AsanThread *t = (AsanThread *)arg;
SetCurrentThread(t);
- return t->ThreadStart(GetTid());
+ t->ThreadStart(GetTid());
+
+ ThreadStartParams params;
+ t->GetStartData(params);
+
+ auto res = (*params.start_routine)(params.arg);
+ t->Destroy(); // POSIX calls this from TSD destructor.
+ return res;
}
INTERCEPTOR_WINAPI(HANDLE, CreateThread, LPSECURITY_ATTRIBUTES security,
@@ -148,8 +160,8 @@ INTERCEPTOR_WINAPI(HANDLE, CreateThread, LPSECURITY_ATTRIBUTES security,
// one. This is a bandaid fix for PR22025.
bool detached = false; // FIXME: how can we determine it on Windows?
u32 current_tid = GetCurrentTidOrInvalid();
- AsanThread *t =
- AsanThread::Create(start_routine, arg, current_tid, &stack, detached);
+ ThreadStartParams params = {start_routine, arg};
+ AsanThread *t = AsanThread::Create(params, current_tid, &stack, detached);
return REAL(CreateThread)(security, stack_size, asan_thread_start, t,
thr_flags, tid);
}
@@ -159,6 +171,8 @@ INTERCEPTOR_WINAPI(HANDLE, CreateThread, LPSECURITY_ATTRIBUTES security,
namespace __asan {
void InitializePlatformInterceptors() {
+ __interception::SetErrorReportCallback(Report);
+
// The interceptors were not designed to be removable, so we have to keep this
// module alive for the life of the process.
HMODULE pinned;
@@ -187,14 +201,21 @@ void InitializePlatformInterceptors() {
}
}
+void InstallAtExitCheckLeaks() {}
+
+void InstallAtForkHandler() {}
+
void AsanApplyToGlobals(globals_op_fptr op, const void *needle) {
UNIMPLEMENTED();
}
void FlushUnneededASanShadowMemory(uptr p, uptr size) {
+ // Only asan on 64-bit Windows supports committing shadow memory on demand.
+#if SANITIZER_WINDOWS64
// Since asan's mapping is compacting, the shadow chunk may be
// not page-aligned, so we only flush the page-aligned portion.
ReleaseMemoryPagesToOS(MemToShadow(p), MemToShadow(p + size));
+#endif
}
// ---------------------- TSD ---------------- {{{
@@ -245,15 +266,10 @@ void PlatformTSDDtor(void *tsd) { AsanThread::TSDDtor(tsd); }
// }}}
// ---------------------- Various stuff ---------------- {{{
-void *AsanDoesNotSupportStaticLinkage() {
-#if defined(_DEBUG)
-#error Please build the runtime with a non-debug CRT: /MD or /MT
-#endif
- return 0;
-}
+void *AsanDoesNotSupportStaticLinkage() { return 0; }
uptr FindDynamicShadowStart() {
- return MapDynamicShadow(MemToShadowSize(kHighMemEnd), SHADOW_SCALE,
+ return MapDynamicShadow(MemToShadowSize(kHighMemEnd), ASAN_SHADOW_SCALE,
/*min_shadow_base_alignment*/ 0, kHighMemEnd);
}
@@ -261,10 +277,6 @@ void AsanCheckDynamicRTPrereqs() {}
void AsanCheckIncompatibleRT() {}
-void ReadContextStack(void *context, uptr *stack, uptr *ssize) {
- UNIMPLEMENTED();
-}
-
void AsanOnDeadlySignal(int, void *siginfo, void *context) { UNIMPLEMENTED(); }
bool PlatformUnpoisonStacks() { return false; }
diff --git a/contrib/llvm-project/compiler-rt/lib/asan/asan_win_dll_thunk.cpp b/contrib/llvm-project/compiler-rt/lib/asan/asan_win_dll_thunk.cpp
index a5671cc9dffd..0fa636bec0d0 100644
--- a/contrib/llvm-project/compiler-rt/lib/asan/asan_win_dll_thunk.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/asan/asan_win_dll_thunk.cpp
@@ -56,8 +56,16 @@ INTERCEPT_WRAP_W_W(_expand_dbg)
// TODO(timurrrr): Do we need to add _Crt* stuff here? (see asan_malloc_win.cpp)
+# if defined(_MSC_VER) && !defined(__clang__)
+// Disable warnings such as: 'void memchr(void)': incorrect number of arguments
+// for intrinsic function, expected '3' arguments.
+# pragma warning(push)
+# pragma warning(disable : 4392)
+# endif
+
INTERCEPT_LIBRARY_FUNCTION(atoi);
INTERCEPT_LIBRARY_FUNCTION(atol);
+INTERCEPT_LIBRARY_FUNCTION(atoll);
INTERCEPT_LIBRARY_FUNCTION(frexp);
INTERCEPT_LIBRARY_FUNCTION(longjmp);
#if SANITIZER_INTERCEPT_MEMCHR
@@ -84,9 +92,14 @@ INTERCEPT_LIBRARY_FUNCTION(strspn);
INTERCEPT_LIBRARY_FUNCTION(strstr);
INTERCEPT_LIBRARY_FUNCTION(strtok);
INTERCEPT_LIBRARY_FUNCTION(strtol);
+INTERCEPT_LIBRARY_FUNCTION(strtoll);
INTERCEPT_LIBRARY_FUNCTION(wcslen);
INTERCEPT_LIBRARY_FUNCTION(wcsnlen);
+# if defined(_MSC_VER) && !defined(__clang__)
+# pragma warning(pop)
+# endif
+
#ifdef _WIN64
INTERCEPT_LIBRARY_FUNCTION(__C_specific_handler);
#else
diff --git a/contrib/llvm-project/compiler-rt/lib/asan/weak_symbols.txt b/contrib/llvm-project/compiler-rt/lib/asan/weak_symbols.txt
index fe680f8a9a4f..b087f4f78e23 100644
--- a/contrib/llvm-project/compiler-rt/lib/asan/weak_symbols.txt
+++ b/contrib/llvm-project/compiler-rt/lib/asan/weak_symbols.txt
@@ -2,6 +2,13 @@ ___asan_default_options
___asan_default_suppressions
___asan_on_error
___asan_set_shadow_00
+___asan_set_shadow_01
+___asan_set_shadow_02
+___asan_set_shadow_03
+___asan_set_shadow_04
+___asan_set_shadow_05
+___asan_set_shadow_06
+___asan_set_shadow_07
___asan_set_shadow_f1
___asan_set_shadow_f2
___asan_set_shadow_f3
diff --git a/contrib/llvm-project/compiler-rt/lib/asan_abi/asan_abi.cpp b/contrib/llvm-project/compiler-rt/lib/asan_abi/asan_abi.cpp
new file mode 100644
index 000000000000..cf8663024eb7
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/asan_abi/asan_abi.cpp
@@ -0,0 +1,89 @@
+//===-asan_abi.cpp - ASan Stable ABI---------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "asan_abi.h"
+
+extern "C" {
+// Functions concerning instrumented global variables:
+void __asan_abi_register_image_globals(void) {}
+void __asan_abi_unregister_image_globals(void) {}
+void __asan_abi_register_elf_globals(bool *flag, void *start, void *stop) {}
+void __asan_abi_unregister_elf_globals(bool *flag, void *start, void *stop) {}
+void __asan_abi_register_globals(void *globals, size_t n) {}
+void __asan_abi_unregister_globals(void *globals, size_t n) {}
+
+// Functions concerning dynamic library initialization
+void __asan_abi_before_dynamic_init(const char *module_name) {}
+void __asan_abi_after_dynamic_init(void) {}
+
+// Functions concerning block memory destinations
+void *__asan_abi_memcpy(void *d, const void *s, size_t n) { return NULL; }
+void *__asan_abi_memmove(void *d, const void *s, size_t n) { return NULL; }
+void *__asan_abi_memset(void *p, int c, size_t n) { return NULL; }
+
+// Functions concerning RTL startup and initialization
+void __asan_abi_init(void) {}
+void __asan_abi_handle_no_return(void) {}
+
+// Functions concerning memory load and store reporting
+void __asan_abi_report_load_n(void *p, size_t n, bool abort) {}
+void __asan_abi_report_exp_load_n(void *p, size_t n, int exp, bool abort) {}
+void __asan_abi_report_store_n(void *p, size_t n, bool abort) {}
+void __asan_abi_report_exp_store_n(void *p, size_t n, int exp, bool abort) {}
+
+// Functions concerning memory load and store
+void __asan_abi_load_n(void *p, size_t n, bool abort) {}
+void __asan_abi_exp_load_n(void *p, size_t n, int exp, bool abort) {}
+void __asan_abi_store_n(void *p, size_t n, bool abort) {}
+void __asan_abi_exp_store_n(void *p, size_t n, int exp, bool abort) {}
+
+// Functions concerning query about whether memory is poisoned
+int __asan_abi_address_is_poisoned(void const volatile *p) { return 0; }
+void *__asan_abi_region_is_poisoned(void const volatile *p, size_t size) {
+ return NULL;
+}
+
+// Functions concerning the poisoning of memory
+void __asan_abi_poison_memory_region(void const volatile *p, size_t n) {}
+void __asan_abi_unpoison_memory_region(void const volatile *p, size_t n) {}
+
+// Functions concerning the partial poisoning of memory
+void __asan_abi_set_shadow_xx_n(void *p, unsigned char xx, size_t n) {}
+
+// Functions concerning stack poisoning
+void __asan_abi_poison_stack_memory(void *p, size_t n) {}
+void __asan_abi_unpoison_stack_memory(void *p, size_t n) {}
+
+// Functions concerning redzone poisoning
+void __asan_abi_poison_intra_object_redzone(void *p, size_t size) {}
+void __asan_abi_unpoison_intra_object_redzone(void *p, size_t size) {}
+
+// Functions concerning array cookie poisoning
+void __asan_abi_poison_cxx_array_cookie(void *p) {}
+void *__asan_abi_load_cxx_array_cookie(void **p) { return NULL; }
+
+// Functions concerning fake stacks
+void *__asan_abi_get_current_fake_stack(void) { return NULL; }
+void *__asan_abi_addr_is_in_fake_stack(void *fake_stack, void *addr, void **beg,
+ void **end) {
+ return NULL;
+}
+
+// Functions concerning poisoning and unpoisoning fake stack alloca
+void __asan_abi_alloca_poison(void *addr, size_t size) {}
+void __asan_abi_allocas_unpoison(void *top, void *bottom) {}
+
+// Functions concerning fake stack malloc
+void *__asan_abi_stack_malloc_n(size_t scale, size_t size) { return NULL; }
+void *__asan_abi_stack_malloc_always_n(size_t scale, size_t size) {
+ return NULL;
+}
+
+// Functions concerning fake stack free
+void __asan_abi_stack_free_n(int scale, void *p, size_t n) {}
+}
diff --git a/contrib/llvm-project/compiler-rt/lib/asan_abi/asan_abi.h b/contrib/llvm-project/compiler-rt/lib/asan_abi/asan_abi.h
new file mode 100644
index 000000000000..8702bcd13391
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/asan_abi/asan_abi.h
@@ -0,0 +1,91 @@
+//===-asan_abi.h - ASan Stable ABI Interface-------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef ASAN_ABI_H
+#define ASAN_ABI_H
+
+#include <stdbool.h>
+#include <stddef.h>
+#include <sys/cdefs.h>
+#include <sys/types.h>
+
+__BEGIN_DECLS
+
+// Functions concerning instrumented global variables:
+void __asan_abi_register_image_globals();
+void __asan_abi_unregister_image_globals();
+void __asan_abi_register_elf_globals(bool *flag, void *start, void *stop);
+void __asan_abi_unregister_elf_globals(bool *flag, void *start, void *stop);
+void __asan_abi_register_globals(void *globals, size_t n);
+void __asan_abi_unregister_globals(void *globals, size_t n);
+
+// Functions concerning dynamic library initialization
+void __asan_abi_before_dynamic_init(const char *module_name);
+void __asan_abi_after_dynamic_init();
+
+// Functions concerning block memory destinations
+void *__asan_abi_memcpy(void *d, const void *s, size_t n);
+void *__asan_abi_memmove(void *d, const void *s, size_t n);
+void *__asan_abi_memset(void *p, int c, size_t n);
+
+// Functions concerning RTL startup and initialization
+void __asan_abi_init();
+void __asan_abi_handle_no_return();
+
+// Functions concerning memory load and store reporting
+void __asan_abi_report_load_n(void *p, size_t n, bool abort);
+void __asan_abi_report_exp_load_n(void *p, size_t n, int exp, bool abort);
+void __asan_abi_report_store_n(void *p, size_t n, bool abort);
+void __asan_abi_report_exp_store_n(void *p, size_t n, int exp, bool abort);
+
+// Functions concerning memory load and store
+void __asan_abi_load_n(void *p, size_t n, bool abort);
+void __asan_abi_exp_load_n(void *p, size_t n, int exp, bool abort);
+void __asan_abi_store_n(void *p, size_t n, bool abort);
+void __asan_abi_exp_store_n(void *p, size_t n, int exp, bool abort);
+
+// Functions concerning query about whether memory is poisoned
+int __asan_abi_address_is_poisoned(void const volatile *p);
+void *__asan_abi_region_is_poisoned(void const volatile *p, size_t size);
+
+// Functions concerning the poisoning of memory
+void __asan_abi_unpoison_memory_region(void const volatile *p, size_t n);
+void __asan_abi_poison_memory_region(void const volatile *p, size_t n);
+
+// Functions concerning the partial poisoning of memory
+void __asan_abi_set_shadow_xx_n(void *p, unsigned char xx, size_t n);
+
+// Functions concerning stack poisoning
+void __asan_abi_poison_stack_memory(void *p, size_t n);
+void __asan_abi_unpoison_stack_memory(void *p, size_t n);
+
+// Functions concerning redzone poisoning
+void __asan_abi_poison_intra_object_redzone(void *p, size_t size);
+void __asan_abi_unpoison_intra_object_redzone(void *p, size_t size);
+
+// Functions concerning array cookie poisoning
+void __asan_abi_poison_cxx_array_cookie(void *p);
+void *__asan_abi_load_cxx_array_cookie(void **p);
+
+// Functions concerning fake stacks
+void *__asan_abi_get_current_fake_stack();
+void *__asan_abi_addr_is_in_fake_stack(void *fake_stack, void *addr, void **beg,
+ void **end);
+// Functions concerning poisoning and unpoisoning fake stack alloca
+void __asan_abi_alloca_poison(void *addr, size_t size);
+void __asan_abi_allocas_unpoison(void *top, void *bottom);
+
+// Functions concerning fake stack malloc
+void *__asan_abi_stack_malloc_n(size_t scale, size_t size);
+void *__asan_abi_stack_malloc_always_n(size_t scale, size_t size);
+
+// Functions concerning fake stack free
+void __asan_abi_stack_free_n(int scale, void *p, size_t n);
+
+__END_DECLS
+#endif // ASAN_ABI_H
diff --git a/contrib/llvm-project/compiler-rt/lib/asan_abi/asan_abi_shim.cpp b/contrib/llvm-project/compiler-rt/lib/asan_abi/asan_abi_shim.cpp
new file mode 100644
index 000000000000..35c45dff96f6
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/asan_abi/asan_abi_shim.cpp
@@ -0,0 +1,481 @@
+//===-asan_abi_shim.cpp - ASan Stable ABI Shim-----------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "../asan/asan_interface_internal.h"
+#include "asan_abi.h"
+#include <assert.h>
+
+extern "C" {
+// Functions concerning instrumented global variables
+void __asan_register_image_globals(uptr *flag) {
+ __asan_abi_register_image_globals();
+}
+void __asan_unregister_image_globals(uptr *flag) {
+ __asan_abi_unregister_image_globals();
+}
+void __asan_register_elf_globals(uptr *flag, void *start, void *stop) {
+ bool bFlag = *flag;
+ __asan_abi_register_elf_globals(&bFlag, start, stop);
+ *flag = bFlag;
+}
+void __asan_unregister_elf_globals(uptr *flag, void *start, void *stop) {
+ bool bFlag = *flag;
+ __asan_abi_unregister_elf_globals(&bFlag, start, stop);
+ *flag = bFlag;
+}
+void __asan_register_globals(__asan_global *globals, uptr n) {
+ __asan_abi_register_globals(globals, n);
+}
+void __asan_unregister_globals(__asan_global *globals, uptr n) {
+ __asan_abi_unregister_globals(globals, n);
+}
+
+// Functions concerning dynamic library initialization
+void __asan_before_dynamic_init(const char *module_name) {
+ __asan_abi_before_dynamic_init(module_name);
+}
+void __asan_after_dynamic_init(void) { __asan_abi_after_dynamic_init(); }
+
+// Functions concerning block memory destinations
+void *__asan_memcpy(void *dst, const void *src, uptr size) {
+ return __asan_abi_memcpy(dst, src, size);
+}
+void *__asan_memset(void *s, int c, uptr n) {
+ return __asan_abi_memset(s, c, n);
+}
+void *__asan_memmove(void *dest, const void *src, uptr n) {
+ return __asan_abi_memmove(dest, src, n);
+}
+
+// Functions concerning RTL startup and initialization
+void __asan_init(void) {
+ static_assert(sizeof(uptr) == 8);
+ static_assert(sizeof(u64) == 8);
+ static_assert(sizeof(u32) == 4);
+
+ __asan_abi_init();
+}
+
+void __asan_handle_no_return(void) { __asan_abi_handle_no_return(); }
+
+// Variables concerning RTL state. These provisionally exist for completeness
+// but will likely move into the Stable ABI implementation and not in the shim.
+uptr __asan_shadow_memory_dynamic_address = (uptr)0L;
+int __asan_option_detect_stack_use_after_return = 1;
+
+// Functions concerning memory load and store reporting
+void __asan_report_load1(uptr addr) {
+ __asan_abi_report_load_n((void *)addr, 1, true);
+}
+void __asan_report_load2(uptr addr) {
+ __asan_abi_report_load_n((void *)addr, 2, true);
+}
+void __asan_report_load4(uptr addr) {
+ __asan_abi_report_load_n((void *)addr, 4, true);
+}
+void __asan_report_load8(uptr addr) {
+ __asan_abi_report_load_n((void *)addr, 8, true);
+}
+void __asan_report_load16(uptr addr) {
+ __asan_abi_report_load_n((void *)addr, 16, true);
+}
+void __asan_report_load_n(uptr addr, uptr size) {
+ __asan_abi_report_load_n((void *)addr, size, true);
+}
+void __asan_report_store1(uptr addr) {
+ __asan_abi_report_store_n((void *)addr, 1, true);
+}
+void __asan_report_store2(uptr addr) {
+ __asan_abi_report_store_n((void *)addr, 2, true);
+}
+void __asan_report_store4(uptr addr) {
+ __asan_abi_report_store_n((void *)addr, 4, true);
+}
+void __asan_report_store8(uptr addr) {
+ __asan_abi_report_store_n((void *)addr, 8, true);
+}
+void __asan_report_store16(uptr addr) {
+ __asan_abi_report_store_n((void *)addr, 16, true);
+}
+void __asan_report_store_n(uptr addr, uptr size) {
+ __asan_abi_report_store_n((void *)addr, size, true);
+}
+
+// Functions concerning memory load and store reporting (experimental variants)
+void __asan_report_exp_load1(uptr addr, u32 exp) {
+ __asan_abi_report_exp_load_n((void *)addr, exp, 1, true);
+}
+void __asan_report_exp_load2(uptr addr, u32 exp) {
+ __asan_abi_report_exp_load_n((void *)addr, exp, 2, true);
+}
+void __asan_report_exp_load4(uptr addr, u32 exp) {
+ __asan_abi_report_exp_load_n((void *)addr, exp, 4, true);
+}
+void __asan_report_exp_load8(uptr addr, u32 exp) {
+ __asan_abi_report_exp_load_n((void *)addr, exp, 8, true);
+}
+void __asan_report_exp_load16(uptr addr, u32 exp) {
+ __asan_abi_report_exp_load_n((void *)addr, exp, 16, true);
+}
+void __asan_report_exp_load_n(uptr addr, uptr size, u32 exp) {
+ __asan_abi_report_exp_load_n((void *)addr, size, exp, true);
+}
+void __asan_report_exp_store1(uptr addr, u32 exp) {
+ __asan_abi_report_exp_store_n((void *)addr, exp, 1, true);
+}
+void __asan_report_exp_store2(uptr addr, u32 exp) {
+ __asan_abi_report_exp_store_n((void *)addr, exp, 2, true);
+}
+void __asan_report_exp_store4(uptr addr, u32 exp) {
+ __asan_abi_report_exp_store_n((void *)addr, exp, 4, true);
+}
+void __asan_report_exp_store8(uptr addr, u32 exp) {
+ __asan_abi_report_exp_store_n((void *)addr, exp, 8, true);
+}
+void __asan_report_exp_store16(uptr addr, u32 exp) {
+ __asan_abi_report_exp_store_n((void *)addr, exp, 16, true);
+}
+void __asan_report_exp_store_n(uptr addr, uptr size, u32 exp) {
+ __asan_abi_report_exp_store_n((void *)addr, size, exp, true);
+}
+
+// Functions concerning memory load and store reporting (noabort variants)
+void __asan_report_load1_noabort(uptr addr) {
+ __asan_abi_report_load_n((void *)addr, 1, false);
+}
+void __asan_report_load2_noabort(uptr addr) {
+ __asan_abi_report_load_n((void *)addr, 2, false);
+}
+void __asan_report_load4_noabort(uptr addr) {
+ __asan_abi_report_load_n((void *)addr, 4, false);
+}
+void __asan_report_load8_noabort(uptr addr) {
+ __asan_abi_report_load_n((void *)addr, 8, false);
+}
+void __asan_report_load16_noabort(uptr addr) {
+ __asan_abi_report_load_n((void *)addr, 16, false);
+}
+void __asan_report_load_n_noabort(uptr addr, uptr size) {
+ __asan_abi_report_load_n((void *)addr, size, false);
+}
+void __asan_report_store1_noabort(uptr addr) {
+ __asan_abi_report_store_n((void *)addr, 1, false);
+}
+void __asan_report_store2_noabort(uptr addr) {
+ __asan_abi_report_store_n((void *)addr, 2, false);
+}
+void __asan_report_store4_noabort(uptr addr) {
+ __asan_abi_report_store_n((void *)addr, 4, false);
+}
+void __asan_report_store8_noabort(uptr addr) {
+ __asan_abi_report_store_n((void *)addr, 8, false);
+}
+void __asan_report_store16_noabort(uptr addr) {
+ __asan_abi_report_store_n((void *)addr, 16, false);
+}
+void __asan_report_store_n_noabort(uptr addr, uptr size) {
+ __asan_abi_report_store_n((void *)addr, size, false);
+}
+
+// Functions concerning memory load and store
+void __asan_load1(uptr addr) { __asan_abi_load_n((void *)addr, 1, true); }
+void __asan_load2(uptr addr) { __asan_abi_load_n((void *)addr, 2, true); }
+void __asan_load4(uptr addr) { __asan_abi_load_n((void *)addr, 4, true); }
+void __asan_load8(uptr addr) { __asan_abi_load_n((void *)addr, 8, true); }
+void __asan_load16(uptr addr) { __asan_abi_load_n((void *)addr, 16, true); }
+void __asan_loadN(uptr addr, uptr size) {
+ __asan_abi_load_n((void *)addr, size, true);
+}
+void __asan_store1(uptr addr) { __asan_abi_store_n((void *)addr, 1, true); }
+void __asan_store2(uptr addr) { __asan_abi_store_n((void *)addr, 2, true); }
+void __asan_store4(uptr addr) { __asan_abi_store_n((void *)addr, 4, true); }
+void __asan_store8(uptr addr) { __asan_abi_store_n((void *)addr, 8, true); }
+void __asan_store16(uptr addr) { __asan_abi_store_n((void *)addr, 16, true); }
+void __asan_storeN(uptr addr, uptr size) {
+ __asan_abi_store_n((void *)addr, size, true);
+}
+
+// Functions concerning memory load and store (experimental variants)
+void __asan_exp_load1(uptr addr, u32 exp) {
+ __asan_abi_exp_load_n((void *)addr, 1, exp, true);
+}
+void __asan_exp_load2(uptr addr, u32 exp) {
+ __asan_abi_exp_load_n((void *)addr, 2, exp, true);
+}
+void __asan_exp_load4(uptr addr, u32 exp) {
+ __asan_abi_exp_load_n((void *)addr, 4, exp, true);
+}
+void __asan_exp_load8(uptr addr, u32 exp) {
+ __asan_abi_exp_load_n((void *)addr, 8, exp, true);
+}
+void __asan_exp_load16(uptr addr, u32 exp) {
+ __asan_abi_exp_load_n((void *)addr, 16, exp, true);
+}
+void __asan_exp_loadN(uptr addr, uptr size, u32 exp) {
+ __asan_abi_exp_load_n((void *)addr, size, exp, true);
+}
+void __asan_exp_store1(uptr addr, u32 exp) {
+ __asan_abi_exp_store_n((void *)addr, 1, exp, true);
+}
+void __asan_exp_store2(uptr addr, u32 exp) {
+ __asan_abi_exp_store_n((void *)addr, 2, exp, true);
+}
+void __asan_exp_store4(uptr addr, u32 exp) {
+ __asan_abi_exp_store_n((void *)addr, 4, exp, true);
+}
+void __asan_exp_store8(uptr addr, u32 exp) {
+ __asan_abi_exp_store_n((void *)addr, 8, exp, true);
+}
+void __asan_exp_store16(uptr addr, u32 exp) {
+ __asan_abi_exp_store_n((void *)addr, 16, exp, true);
+}
+void __asan_exp_storeN(uptr addr, uptr size, u32 exp) {
+ __asan_abi_exp_store_n((void *)addr, size, exp, true);
+}
+
+// Functions concerning memory load and store (noabort variants)
+void __asan_load1_noabort(uptr addr) {
+ __asan_abi_load_n((void *)addr, 1, false);
+}
+void __asan_load2_noabort(uptr addr) {
+ __asan_abi_load_n((void *)addr, 2, false);
+}
+void __asan_load4_noabort(uptr addr) {
+ __asan_abi_load_n((void *)addr, 4, false);
+}
+void __asan_load8_noabort(uptr addr) {
+ __asan_abi_load_n((void *)addr, 8, false);
+}
+void __asan_load16_noabort(uptr addr) {
+ __asan_abi_load_n((void *)addr, 16, false);
+}
+void __asan_loadN_noabort(uptr addr, uptr size) {
+ __asan_abi_load_n((void *)addr, size, false);
+}
+void __asan_store1_noabort(uptr addr) {
+ __asan_abi_store_n((void *)addr, 1, false);
+}
+void __asan_store2_noabort(uptr addr) {
+ __asan_abi_store_n((void *)addr, 2, false);
+}
+void __asan_store4_noabort(uptr addr) {
+ __asan_abi_store_n((void *)addr, 4, false);
+}
+void __asan_store8_noabort(uptr addr) {
+ __asan_abi_store_n((void *)addr, 8, false);
+}
+void __asan_store16_noabort(uptr addr) {
+ __asan_abi_store_n((void *)addr, 16, false);
+}
+void __asan_storeN_noabort(uptr addr, uptr size) {
+ __asan_abi_store_n((void *)addr, size, false);
+}
+
+// Functions concerning query about whether memory is poisoned
+int __asan_address_is_poisoned(void const volatile *addr) {
+ return __asan_abi_address_is_poisoned(addr);
+}
+uptr __asan_region_is_poisoned(uptr beg, uptr size) {
+ return (uptr)__asan_abi_region_is_poisoned((void *)beg, size);
+}
+
+// Functions concerning the poisoning of memory
+void __asan_poison_memory_region(void const volatile *addr, uptr size) {
+ __asan_abi_poison_memory_region(addr, size);
+}
+void __asan_unpoison_memory_region(void const volatile *addr, uptr size) {
+ __asan_abi_unpoison_memory_region(addr, size);
+}
+
+// Functions concerning the partial poisoning of memory
+void __asan_set_shadow_00(uptr addr, uptr size) {
+ __asan_abi_set_shadow_xx_n((void *)addr, 0x00, size);
+}
+void __asan_set_shadow_01(uptr addr, uptr size) {
+ __asan_abi_set_shadow_xx_n((void *)addr, 0x01, size);
+}
+void __asan_set_shadow_02(uptr addr, uptr size) {
+ __asan_abi_set_shadow_xx_n((void *)addr, 0x02, size);
+}
+void __asan_set_shadow_03(uptr addr, uptr size) {
+ __asan_abi_set_shadow_xx_n((void *)addr, 0x03, size);
+}
+void __asan_set_shadow_04(uptr addr, uptr size) {
+ __asan_abi_set_shadow_xx_n((void *)addr, 0x04, size);
+}
+void __asan_set_shadow_05(uptr addr, uptr size) {
+ __asan_abi_set_shadow_xx_n((void *)addr, 0x05, size);
+}
+void __asan_set_shadow_06(uptr addr, uptr size) {
+ __asan_abi_set_shadow_xx_n((void *)addr, 0x06, size);
+}
+void __asan_set_shadow_07(uptr addr, uptr size) {
+ __asan_abi_set_shadow_xx_n((void *)addr, 0x07, size);
+}
+void __asan_set_shadow_f1(uptr addr, uptr size) {
+ __asan_abi_set_shadow_xx_n((void *)addr, 0xf1, size);
+}
+void __asan_set_shadow_f2(uptr addr, uptr size) {
+ __asan_abi_set_shadow_xx_n((void *)addr, 0xf2, size);
+}
+void __asan_set_shadow_f3(uptr addr, uptr size) {
+ __asan_abi_set_shadow_xx_n((void *)addr, 0xf3, size);
+}
+void __asan_set_shadow_f5(uptr addr, uptr size) {
+ __asan_abi_set_shadow_xx_n((void *)addr, 0xf5, size);
+}
+void __asan_set_shadow_f8(uptr addr, uptr size) {
+ __asan_abi_set_shadow_xx_n((void *)addr, 0xf8, size);
+}
+
+// Functions concerning stack poisoning
+void __asan_poison_stack_memory(uptr addr, uptr size) {
+ __asan_abi_poison_stack_memory((void *)addr, size);
+}
+void __asan_unpoison_stack_memory(uptr addr, uptr size) {
+ __asan_abi_unpoison_stack_memory((void *)addr, size);
+}
+
+// Functions concerning redzone poisoning
+void __asan_poison_intra_object_redzone(uptr p, uptr size) {
+ __asan_abi_poison_intra_object_redzone((void *)p, size);
+}
+void __asan_unpoison_intra_object_redzone(uptr p, uptr size) {
+ __asan_abi_unpoison_intra_object_redzone((void *)p, size);
+}
+
+// Functions concerning array cookie poisoning
+void __asan_poison_cxx_array_cookie(uptr p) {
+ __asan_abi_poison_cxx_array_cookie((void *)p);
+}
+uptr __asan_load_cxx_array_cookie(uptr *p) {
+ return (uptr)__asan_abi_load_cxx_array_cookie((void **)p);
+}
+
+// Functions concerning fake stacks
+void *__asan_get_current_fake_stack(void) {
+ return __asan_abi_get_current_fake_stack();
+}
+void *__asan_addr_is_in_fake_stack(void *fake_stack, void *addr, void **beg,
+ void **end) {
+ return __asan_abi_addr_is_in_fake_stack(fake_stack, addr, beg, end);
+}
+
+// Functions concerning poisoning and unpoisoning fake stack alloca
+void __asan_alloca_poison(uptr addr, uptr size) {
+ __asan_abi_alloca_poison((void *)addr, size);
+}
+void __asan_allocas_unpoison(uptr top, uptr bottom) {
+ __asan_abi_allocas_unpoison((void *)top, (void *)bottom);
+}
+
+// Functions concerning fake stack malloc
+uptr __asan_stack_malloc_0(uptr size) {
+ return (uptr)__asan_abi_stack_malloc_n(0, size);
+}
+uptr __asan_stack_malloc_1(uptr size) {
+ return (uptr)__asan_abi_stack_malloc_n(1, size);
+}
+uptr __asan_stack_malloc_2(uptr size) {
+ return (uptr)__asan_abi_stack_malloc_n(2, size);
+}
+uptr __asan_stack_malloc_3(uptr size) {
+ return (uptr)__asan_abi_stack_malloc_n(3, size);
+}
+uptr __asan_stack_malloc_4(uptr size) {
+ return (uptr)__asan_abi_stack_malloc_n(4, size);
+}
+uptr __asan_stack_malloc_5(uptr size) {
+ return (uptr)__asan_abi_stack_malloc_n(5, size);
+}
+uptr __asan_stack_malloc_6(uptr size) {
+ return (uptr)__asan_abi_stack_malloc_n(6, size);
+}
+uptr __asan_stack_malloc_7(uptr size) {
+ return (uptr)__asan_abi_stack_malloc_n(7, size);
+}
+uptr __asan_stack_malloc_8(uptr size) {
+ return (uptr)__asan_abi_stack_malloc_n(8, size);
+}
+uptr __asan_stack_malloc_9(uptr size) {
+ return (uptr)__asan_abi_stack_malloc_n(9, size);
+}
+uptr __asan_stack_malloc_10(uptr size) {
+ return (uptr)__asan_abi_stack_malloc_n(10, size);
+}
+
+// Functions concerning fake stack malloc (always variants)
+uptr __asan_stack_malloc_always_0(uptr size) {
+ return (uptr)__asan_abi_stack_malloc_always_n(0, size);
+}
+uptr __asan_stack_malloc_always_1(uptr size) {
+ return (uptr)__asan_abi_stack_malloc_always_n(1, size);
+}
+uptr __asan_stack_malloc_always_2(uptr size) {
+ return (uptr)__asan_abi_stack_malloc_always_n(2, size);
+}
+uptr __asan_stack_malloc_always_3(uptr size) {
+ return (uptr)__asan_abi_stack_malloc_always_n(3, size);
+}
+uptr __asan_stack_malloc_always_4(uptr size) {
+ return (uptr)__asan_abi_stack_malloc_always_n(4, size);
+}
+uptr __asan_stack_malloc_always_5(uptr size) {
+ return (uptr)__asan_abi_stack_malloc_always_n(5, size);
+}
+uptr __asan_stack_malloc_always_6(uptr size) {
+ return (uptr)__asan_abi_stack_malloc_always_n(6, size);
+}
+uptr __asan_stack_malloc_always_7(uptr size) {
+ return (uptr)__asan_abi_stack_malloc_always_n(7, size);
+}
+uptr __asan_stack_malloc_always_8(uptr size) {
+ return (uptr)__asan_abi_stack_malloc_always_n(8, size);
+}
+uptr __asan_stack_malloc_always_9(uptr size) {
+ return (uptr)__asan_abi_stack_malloc_always_n(9, size);
+}
+uptr __asan_stack_malloc_always_10(uptr size) {
+ return (uptr)__asan_abi_stack_malloc_always_n(10, size);
+}
+
+// Functions concerning fake stack free
+void __asan_stack_free_0(uptr ptr, uptr size) {
+ __asan_abi_stack_free_n(0, (void *)ptr, size);
+}
+void __asan_stack_free_1(uptr ptr, uptr size) {
+ __asan_abi_stack_free_n(1, (void *)ptr, size);
+}
+void __asan_stack_free_2(uptr ptr, uptr size) {
+ __asan_abi_stack_free_n(2, (void *)ptr, size);
+}
+void __asan_stack_free_3(uptr ptr, uptr size) {
+ __asan_abi_stack_free_n(3, (void *)ptr, size);
+}
+void __asan_stack_free_4(uptr ptr, uptr size) {
+ __asan_abi_stack_free_n(4, (void *)ptr, size);
+}
+void __asan_stack_free_5(uptr ptr, uptr size) {
+ __asan_abi_stack_free_n(5, (void *)ptr, size);
+}
+void __asan_stack_free_6(uptr ptr, uptr size) {
+ __asan_abi_stack_free_n(6, (void *)ptr, size);
+}
+void __asan_stack_free_7(uptr ptr, uptr size) {
+ __asan_abi_stack_free_n(7, (void *)ptr, size);
+}
+void __asan_stack_free_8(uptr ptr, uptr size) {
+ __asan_abi_stack_free_n(8, (void *)ptr, size);
+}
+void __asan_stack_free_9(uptr ptr, uptr size) {
+ __asan_abi_stack_free_n(9, (void *)ptr, size);
+}
+void __asan_stack_free_10(uptr ptr, uptr size) {
+ __asan_abi_stack_free_n(10, (void *)ptr, size);
+}
+}
diff --git a/contrib/llvm-project/compiler-rt/lib/asan_abi/asan_abi_tbd.txt b/contrib/llvm-project/compiler-rt/lib/asan_abi/asan_abi_tbd.txt
new file mode 100644
index 000000000000..a712093d7b21
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/asan_abi/asan_abi_tbd.txt
@@ -0,0 +1,22 @@
+__asan_default_options
+__asan_default_suppressions
+__asan_get_free_stack
+__asan_get_shadow_mapping
+__asan_handle_vfork
+__asan_locate_address
+__asan_on_error
+__asan_print_accumulated_stats
+__asan_set_death_callback
+__asan_update_allocation_context
+__asan_describe_address
+__asan_get_alloc_stack
+__asan_get_report_access_size
+__asan_get_report_access_type
+__asan_get_report_address
+__asan_get_report_bp
+__asan_get_report_description
+__asan_get_report_pc
+__asan_get_report_sp
+__asan_report_error
+__asan_report_present
+__asan_set_error_report_callback
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/README.txt b/contrib/llvm-project/compiler-rt/lib/builtins/README.txt
index d66d725e7ab5..2d213d95f333 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/README.txt
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/README.txt
@@ -35,13 +35,13 @@ typedef uint64_t du_int;
// Integral bit manipulation
-di_int __ashldi3(di_int a, si_int b); // a << b
-ti_int __ashlti3(ti_int a, si_int b); // a << b
+di_int __ashldi3(di_int a, int b); // a << b
+ti_int __ashlti3(ti_int a, int b); // a << b
-di_int __ashrdi3(di_int a, si_int b); // a >> b arithmetic (sign fill)
-ti_int __ashrti3(ti_int a, si_int b); // a >> b arithmetic (sign fill)
-di_int __lshrdi3(di_int a, si_int b); // a >> b logical (zero fill)
-ti_int __lshrti3(ti_int a, si_int b); // a >> b logical (zero fill)
+di_int __ashrdi3(di_int a, int b); // a >> b arithmetic (sign fill)
+ti_int __ashrti3(ti_int a, int b); // a >> b arithmetic (sign fill)
+di_int __lshrdi3(di_int a, int b); // a >> b logical (zero fill)
+ti_int __lshrti3(ti_int a, int b); // a >> b logical (zero fill)
int __clzsi2(si_int a); // count leading zeros
int __clzdi2(di_int a); // count leading zeros
@@ -137,49 +137,54 @@ si_int __ucmpti2(tu_int a, tu_int b);
di_int __fixsfdi( float a);
di_int __fixdfdi( double a);
di_int __fixxfdi(long double a);
+di_int __fixtfdi( tf_float a);
ti_int __fixsfti( float a);
ti_int __fixdfti( double a);
ti_int __fixxfti(long double a);
-uint64_t __fixtfdi(long double input); // ppc only, doesn't match documentation
+ti_int __fixtfti( tf_float a);
su_int __fixunssfsi( float a);
su_int __fixunsdfsi( double a);
su_int __fixunsxfsi(long double a);
+su_int __fixunstfsi( tf_float a);
du_int __fixunssfdi( float a);
du_int __fixunsdfdi( double a);
du_int __fixunsxfdi(long double a);
+du_int __fixunstfdi( tf_float a);
tu_int __fixunssfti( float a);
tu_int __fixunsdfti( double a);
tu_int __fixunsxfti(long double a);
-uint64_t __fixunstfdi(long double input); // ppc only
+tu_int __fixunstfti( tf_float a);
float __floatdisf(di_int a);
double __floatdidf(di_int a);
long double __floatdixf(di_int a);
-long double __floatditf(int64_t a); // ppc only
+tf_float __floatditf(int64_t a);
float __floattisf(ti_int a);
double __floattidf(ti_int a);
long double __floattixf(ti_int a);
+tf_float __floattitf(ti_int a);
float __floatundisf(du_int a);
double __floatundidf(du_int a);
long double __floatundixf(du_int a);
-long double __floatunditf(uint64_t a); // ppc only
+tf_float __floatunditf(du_int a);
float __floatuntisf(tu_int a);
double __floatuntidf(tu_int a);
long double __floatuntixf(tu_int a);
+tf_float __floatuntixf(tu_int a);
// Floating point raised to integer power
float __powisf2( float a, int b); // a ^ b
double __powidf2( double a, int b); // a ^ b
long double __powixf2(long double a, int b); // a ^ b
-long double __powitf2(long double a, int b); // ppc only, a ^ b
+tf_float __powitf2( tf_float a, int b); // a ^ b
// Complex arithmetic
@@ -189,8 +194,7 @@ long double __powitf2(long double a, int b); // ppc only, a ^ b
double _Complex __muldc3(double a, double b, double c, double d);
long double _Complex __mulxc3(long double a, long double b,
long double c, long double d);
-long double _Complex __multc3(long double a, long double b,
- long double c, long double d); // ppc only
+ tf_float _Complex __multc3(tf_float a, tf_float b, tf_float c, tf_float d);
// (a + ib) / (c + id)
@@ -198,8 +202,7 @@ long double _Complex __multc3(long double a, long double b,
double _Complex __divdc3(double a, double b, double c, double d);
long double _Complex __divxc3(long double a, long double b,
long double c, long double d);
-long double _Complex __divtc3(long double a, long double b,
- long double c, long double d); // ppc only
+ tf_float _Complex __divtc3(tf_float a, tf_float b, tf_float c, tf_float d);
// Runtime support
@@ -271,8 +274,8 @@ switchu8
// There is no C interface to the *_vfp_d8_d15_regs functions. There are
// called in the prolog and epilog of Thumb1 functions. When the C++ ABI use
-// SJLJ for exceptions, each function with a catch clause or destuctors needs
-// to save and restore all registers in it prolog and epliog. But there is
+// SJLJ for exceptions, each function with a catch clause or destructors needs
+// to save and restore all registers in it prolog and epilog. But there is
// no way to access vector and high float registers from thumb1 code, so the
// compiler must add call outs to these helper functions in the prolog and
// epilog.
@@ -311,9 +314,9 @@ double __floatsidfvfp(int a); // Appears to convert from
float __floatsisfvfp(int a); // Appears to convert from
// int to float.
double __floatunssidfvfp(unsigned int a); // Appears to convert from
- // unisgned int to double.
+ // unsigned int to double.
float __floatunssisfvfp(unsigned int a); // Appears to convert from
- // unisgned int to float.
+ // unsigned int to float.
int __gedf2vfp(double a, double b); // Appears to return __gedf2
// (a >= b)
int __gesf2vfp(float a, float b); // Appears to return __gesf2
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/aarch64/fp_mode.c b/contrib/llvm-project/compiler-rt/lib/builtins/aarch64/fp_mode.c
index 94c2ff3bb26d..03d75cd8be66 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/aarch64/fp_mode.c
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/aarch64/fp_mode.c
@@ -27,7 +27,7 @@ CRT_FE_ROUND_MODE __attribute__((weak)) __aarch64_fe_default_rmode =
CRT_FE_TONEAREST;
#endif
-CRT_FE_ROUND_MODE __fe_getround() {
+CRT_FE_ROUND_MODE __fe_getround(void) {
#ifdef __ARM_FP
uint64_t fpcr;
__asm__ __volatile__("mrs %0, fpcr" : "=r" (fpcr));
@@ -48,7 +48,7 @@ CRT_FE_ROUND_MODE __fe_getround() {
#endif
}
-int __fe_raise_inexact() {
+int __fe_raise_inexact(void) {
#ifdef __ARM_FP
uint64_t fpsr;
__asm__ __volatile__("mrs %0, fpsr" : "=r" (fpsr));
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/aarch64/lse.S b/contrib/llvm-project/compiler-rt/lib/builtins/aarch64/lse.S
index 5dc0d5320b5a..1fe18f4a4681 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/aarch64/lse.S
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/aarch64/lse.S
@@ -7,7 +7,7 @@
// Out-of-line LSE atomics helpers. Ported from libgcc library.
// N = {1, 2, 4, 8}
// M = {1, 2, 4, 8, 16}
-// ORDER = {'relax', 'acq', 'rel', 'acq_rel'}
+// ORDER = {'relax', 'acq', 'rel', 'acq_rel', 'sync'}
// Routines implemented:
//
// iM __aarch64_casM_ORDER(iM expected, iM desired, iM *ptr)
@@ -35,8 +35,8 @@ HIDDEN(___aarch64_have_lse_atomics)
#endif
// Generate mnemonics for
-// L_cas: SIZE: 1,2,4,8,16 MODEL: 1,2,3,4
-// L_swp L_ldadd L_ldclr L_ldeor L_ldset: SIZE: 1,2,4,8 MODEL: 1,2,3,4
+// L_cas: SIZE: 1,2,4,8,16 MODEL: 1,2,3,4,5
+// L_swp L_ldadd L_ldclr L_ldeor L_ldset: SIZE: 1,2,4,8 MODEL: 1,2,3,4,5
#if SIZE == 1
#define S b
@@ -64,24 +64,44 @@ HIDDEN(___aarch64_have_lse_atomics)
#define L
#define M 0x000000
#define N 0x000000
+#define BARRIER
#elif MODEL == 2
#define SUFF _acq
#define A a
#define L
#define M 0x400000
#define N 0x800000
+#define BARRIER
#elif MODEL == 3
#define SUFF _rel
#define A
#define L l
#define M 0x008000
#define N 0x400000
+#define BARRIER
#elif MODEL == 4
#define SUFF _acq_rel
#define A a
#define L l
#define M 0x408000
#define N 0xc00000
+#define BARRIER
+#elif MODEL == 5
+#define SUFF _sync
+#ifdef L_swp
+// swp has _acq semantics.
+#define A a
+#define L
+#define M 0x400000
+#define N 0x800000
+#else
+// All other _sync functions have _seq semantics.
+#define A a
+#define L l
+#define M 0x408000
+#define N 0xc00000
+#endif
+#define BARRIER dmb ish
#else
#error
#endif // MODEL
@@ -96,7 +116,12 @@ HIDDEN(___aarch64_have_lse_atomics)
#endif
#define NAME(BASE) GLUE4(__aarch64_, BASE, SIZE, SUFF)
+#if MODEL == 5
+// Drop A for _sync functions.
+#define LDXR GLUE3(ld, xr, S)
+#else
#define LDXR GLUE4(ld, A, xr, S)
+#endif
#define STXR GLUE4(st, L, xr, S)
// Define temporary registers.
@@ -136,9 +161,15 @@ DEFINE_COMPILERRT_OUTLINE_FUNCTION_UNMANGLED(NAME(cas))
STXR w(tmp1), s(1), [x2]
cbnz w(tmp1), 0b
1:
+ BARRIER
ret
#else
+#if MODEL == 5
+// Drop A for _sync functions.
+#define LDXP GLUE2(ld, xp)
+#else
#define LDXP GLUE3(ld, A, xp)
+#endif
#define STXP GLUE3(st, L, xp)
#ifdef HAS_ASM_LSE
#define CASP GLUE3(casp, A, L) x0, x1, x2, x3, [x4]
@@ -159,6 +190,7 @@ DEFINE_COMPILERRT_OUTLINE_FUNCTION_UNMANGLED(NAME(cas))
STXP w(tmp2), x2, x3, [x4]
cbnz w(tmp2), 0b
1:
+ BARRIER
ret
#endif
END_COMPILERRT_OUTLINE_FUNCTION(NAME(cas))
@@ -180,6 +212,7 @@ DEFINE_COMPILERRT_OUTLINE_FUNCTION_UNMANGLED(NAME(swp))
LDXR s(0), [x1]
STXR w(tmp1), s(tmp0), [x1]
cbnz w(tmp1), 0b
+ BARRIER
ret
END_COMPILERRT_OUTLINE_FUNCTION(NAME(swp))
#endif // L_swp
@@ -224,6 +257,7 @@ DEFINE_COMPILERRT_OUTLINE_FUNCTION_UNMANGLED(NAME(LDNM))
OP s(tmp1), s(0), s(tmp0)
STXR w(tmp2), s(tmp1), [x1]
cbnz w(tmp2), 0b
+ BARRIER
ret
END_COMPILERRT_OUTLINE_FUNCTION(NAME(LDNM))
#endif // L_ldadd L_ldclr L_ldeor L_ldset
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/aarch64/sme-abi-init.c b/contrib/llvm-project/compiler-rt/lib/builtins/aarch64/sme-abi-init.c
new file mode 100644
index 000000000000..b6ee12170d56
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/aarch64/sme-abi-init.c
@@ -0,0 +1,52 @@
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+
+__attribute__((visibility("hidden"), nocommon))
+_Bool __aarch64_has_sme_and_tpidr2_el0;
+
+// We have multiple ways to check that the function has SME, depending on our
+// target.
+// * For Linux we can use __getauxval().
+// * For newlib we can use __aarch64_sme_accessible().
+
+#if defined(__linux__)
+
+#ifndef AT_HWCAP2
+#define AT_HWCAP2 26
+#endif
+
+#ifndef HWCAP2_SME
+#define HWCAP2_SME (1 << 23)
+#endif
+
+extern unsigned long int __getauxval (unsigned long int);
+
+static _Bool has_sme(void) {
+ return __getauxval(AT_HWCAP2) & HWCAP2_SME;
+}
+
+#else // defined(__linux__)
+
+#if defined(COMPILER_RT_SHARED_LIB)
+__attribute__((weak))
+#endif
+extern _Bool __aarch64_sme_accessible(void);
+
+static _Bool has_sme(void) {
+#if defined(COMPILER_RT_SHARED_LIB)
+ if (!__aarch64_sme_accessible)
+ return 0;
+#endif
+ return __aarch64_sme_accessible();
+}
+
+#endif // defined(__linux__)
+
+#if __GNUC__ >= 9
+#pragma GCC diagnostic ignored "-Wprio-ctor-dtor"
+#endif
+__attribute__((constructor(90)))
+static void init_aarch64_has_sme(void) {
+ __aarch64_has_sme_and_tpidr2_el0 = has_sme();
+}
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/aarch64/sme-abi.S b/contrib/llvm-project/compiler-rt/lib/builtins/aarch64/sme-abi.S
new file mode 100644
index 000000000000..d470ecaf7aaa
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/aarch64/sme-abi.S
@@ -0,0 +1,176 @@
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+
+// This patch implements the support routines for the SME ABI,
+// described here:
+// https://github.com/ARM-software/abi-aa/blob/main/aapcs64/aapcs64.rst#sme-support-routines
+
+#include "../assembly.h"
+
+
+#if !defined(__APPLE__)
+#define TPIDR2_SYMBOL SYMBOL_NAME(__aarch64_has_sme_and_tpidr2_el0)
+#define TPIDR2_SYMBOL_OFFSET :lo12:SYMBOL_NAME(__aarch64_has_sme_and_tpidr2_el0)
+#else
+// MachO requires @page/@pageoff directives because the global is defined
+// in a different file. Otherwise this file may fail to build.
+#define TPIDR2_SYMBOL SYMBOL_NAME(__aarch64_has_sme_and_tpidr2_el0)@page
+#define TPIDR2_SYMBOL_OFFSET SYMBOL_NAME(__aarch64_has_sme_and_tpidr2_el0)@pageoff
+#endif
+
+.arch armv9-a+sme
+
+// Utility function which calls a system's abort() routine. Because the function
+// is streaming-compatible it should disable streaming-SVE mode before calling
+// abort(). Note that there is no need to preserve any state before the call,
+// because the function does not return.
+DEFINE_COMPILERRT_PRIVATE_FUNCTION(do_abort)
+.cfi_startproc
+ .variant_pcs SYMBOL_NAME(do_abort)
+ stp x29, x30, [sp, #-32]!
+ cntd x0
+ // Store VG to a stack location that we describe with .cfi_offset
+ str x0, [sp, #16]
+ .cfi_def_cfa_offset 32
+ .cfi_offset w30, -24
+ .cfi_offset w29, -32
+ .cfi_offset 46, -16
+ bl __arm_sme_state
+ tbz x0, #0, 2f
+1:
+ smstop sm
+2:
+ // We can't make this into a tail-call because the unwinder would
+ // need to restore the value of VG.
+ bl SYMBOL_NAME(abort)
+.cfi_endproc
+END_COMPILERRT_FUNCTION(do_abort)
+
+// __arm_sme_state fills the result registers based on a local
+// that is set as part of the compiler-rt startup code.
+// __aarch64_has_sme_and_tpidr2_el0
+DEFINE_COMPILERRT_OUTLINE_FUNCTION_UNMANGLED(__arm_sme_state)
+ .variant_pcs __arm_sme_state
+ mov x0, xzr
+ mov x1, xzr
+
+ adrp x16, TPIDR2_SYMBOL
+ ldrb w16, [x16, TPIDR2_SYMBOL_OFFSET]
+ cbz w16, 1f
+0:
+ orr x0, x0, #0xC000000000000000
+ mrs x16, SVCR
+ bfxil x0, x16, #0, #2
+ mrs x1, TPIDR2_EL0
+1:
+ ret
+END_COMPILERRT_OUTLINE_FUNCTION(__arm_sme_state)
+
+DEFINE_COMPILERRT_OUTLINE_FUNCTION_UNMANGLED(__arm_tpidr2_restore)
+ .variant_pcs __arm_tpidr2_restore
+ // If TPIDR2_EL0 is nonnull, the subroutine aborts in some platform-specific
+ // manner.
+ mrs x14, TPIDR2_EL0
+ cbnz x14, 2f
+
+ // If any of the reserved bytes in the first 16 bytes of BLK are nonzero,
+ // the subroutine [..] aborts in some platform-defined manner.
+ ldrh w14, [x0, #10]
+ cbnz w14, 2f
+ ldr w14, [x0, #12]
+ cbnz w14, 2f
+
+ // If BLK.za_save_buffer is NULL, the subroutine does nothing.
+ ldr x16, [x0]
+ cbz x16, 1f
+
+ // If BLK.num_za_save_slices is zero, the subroutine does nothing.
+ ldrh w14, [x0, #8]
+ cbz x14, 1f
+
+ mov x15, xzr
+0:
+ ldr za[w15,0], [x16]
+ addsvl x16, x16, #1
+ add x15, x15, #1
+ cmp x14, x15
+ b.ne 0b
+1:
+ ret
+2:
+ b SYMBOL_NAME(do_abort)
+END_COMPILERRT_OUTLINE_FUNCTION(__arm_tpidr2_restore)
+
+DEFINE_COMPILERRT_OUTLINE_FUNCTION_UNMANGLED(__arm_tpidr2_save)
+ .variant_pcs __arm_tpidr2_restore
+ // If the current thread does not have access to TPIDR2_EL0, the subroutine
+ // does nothing.
+ adrp x14, TPIDR2_SYMBOL
+ ldrb w14, [x14, TPIDR2_SYMBOL_OFFSET]
+ cbz w14, 1f
+
+ // If TPIDR2_EL0 is null, the subroutine does nothing.
+ mrs x16, TPIDR2_EL0
+ cbz x16, 1f
+
+ // If any of the reserved bytes in the first 16 bytes of the TPIDR2 block are
+ // nonzero, the subroutine [..] aborts in some platform-defined manner.
+ ldrh w14, [x16, #10]
+ cbnz w14, 2f
+ ldr w14, [x16, #12]
+ cbnz w14, 2f
+
+ // If num_za_save_slices is zero, the subroutine does nothing.
+ ldrh w14, [x16, #8]
+ cbz x14, 1f
+
+ // If za_save_buffer is NULL, the subroutine does nothing.
+ ldr x16, [x16]
+ cbz x16, 1f
+
+ mov x15, xzr
+0:
+ str za[w15,0], [x16]
+ addsvl x16, x16, #1
+ add x15, x15, #1
+ cmp x14, x15
+ b.ne 0b
+1:
+ ret
+2:
+ b SYMBOL_NAME(do_abort)
+END_COMPILERRT_OUTLINE_FUNCTION(__arm_tpidr2_save)
+
+DEFINE_COMPILERRT_OUTLINE_FUNCTION_UNMANGLED(__arm_za_disable)
+ .variant_pcs __arm_tpidr2_restore
+ // If the current thread does not have access to SME, the subroutine does
+ // nothing.
+ adrp x14, TPIDR2_SYMBOL
+ ldrb w14, [x14, TPIDR2_SYMBOL_OFFSET]
+ cbz w14, 0f
+
+ // Otherwise, the subroutine behaves as if it did the following:
+ // * Call __arm_tpidr2_save.
+ stp x29, x30, [sp, #-16]!
+ .cfi_def_cfa_offset 16
+ mov x29, sp
+ .cfi_def_cfa w29, 16
+ .cfi_offset w30, -8
+ .cfi_offset w29, -16
+ bl __arm_tpidr2_save
+
+ // * Set TPIDR2_EL0 to null.
+ msr TPIDR2_EL0, xzr
+
+ // * Set PSTATE.ZA to 0.
+ smstop za
+
+ .cfi_def_cfa wsp, 16
+ ldp x29, x30, [sp], #16
+ .cfi_def_cfa_offset 0
+ .cfi_restore w30
+ .cfi_restore w29
+0:
+ ret
+END_COMPILERRT_OUTLINE_FUNCTION(__arm_za_disable)
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/aarch64/sme-libc-routines.c b/contrib/llvm-project/compiler-rt/lib/builtins/aarch64/sme-libc-routines.c
new file mode 100644
index 000000000000..cd73025a19cc
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/aarch64/sme-libc-routines.c
@@ -0,0 +1,87 @@
+#include <stdlib.h>
+
+// WARNING: When building the scalar versions of these functions you need to
+// use the compiler flag "-mllvm -disable-loop-idiom-all" to prevent clang
+// from recognising a loop idiom and planting calls to memcpy!
+
+static void *__arm_sc_memcpy_fwd(void *dest, const void *src,
+ size_t n) __arm_streaming_compatible {
+ unsigned char *destp = (unsigned char *)dest;
+ const unsigned char *srcp = (const unsigned char *)src;
+ for (size_t i = 0; i < n; ++i)
+ destp[i] = srcp[i];
+
+ return dest;
+}
+
+// If dest and src overlap then behaviour is undefined, hence we can add the
+// restrict keywords here. This also matches the definition of the libc memcpy
+// according to the man page.
+void *__arm_sc_memcpy(void *__restrict__ dest, const void *__restrict__ src,
+ size_t n) __arm_streaming_compatible {
+ return __arm_sc_memcpy_fwd(dest, src, n);
+}
+
+void *__arm_sc_memset(void *dest, int c, size_t n) __arm_streaming_compatible {
+ unsigned char *destp = (unsigned char *)dest;
+ unsigned char c8 = (unsigned char)c;
+ for (size_t i = 0; i < n; ++i)
+ destp[i] = c8;
+
+ return dest;
+}
+
+static void *__arm_sc_memcpy_rev(void *dest, const void *src,
+ size_t n) __arm_streaming_compatible {
+ unsigned char *destp = (unsigned char *)dest;
+ const unsigned char *srcp = (const unsigned char *)src;
+ // TODO: Improve performance by copying larger chunks in reverse, or by
+ // using SVE.
+ while (n > 0) {
+ --n;
+ destp[n] = srcp[n];
+ }
+ return dest;
+}
+
+// Semantically a memmove is equivalent to the following:
+// 1. Copy the entire contents of src to a temporary array that does not
+// overlap with src or dest.
+// 2. Copy the contents of the temporary array into dest.
+void *__arm_sc_memmove(void *dest, const void *src,
+ size_t n) __arm_streaming_compatible {
+ unsigned char *destp = (unsigned char *)dest;
+ const unsigned char *srcp = (const unsigned char *)src;
+
+ // If src and dest don't overlap then just invoke memcpy
+ if ((srcp > (destp + n)) || (destp > (srcp + n)))
+ return __arm_sc_memcpy_fwd(dest, src, n);
+
+ // Overlap case 1:
+ // src: Low | -> | High
+ // dest: Low | -> | High
+ // Here src is always ahead of dest at a higher addres. If we first read a
+ // chunk of data from src we can safely write the same chunk to dest without
+ // corrupting future reads of src.
+ if (srcp > destp)
+ return __arm_sc_memcpy_fwd(dest, src, n);
+
+ // Overlap case 2:
+ // src: Low | -> | High
+ // dest: Low | -> | High
+ // While we're in the overlap region we're always corrupting future reads of
+ // src when writing to dest. An efficient way to do this is to copy the data
+ // in reverse by starting at the highest address.
+ return __arm_sc_memcpy_rev(dest, src, n);
+}
+
+const void *__arm_sc_memchr(const void *src, int c,
+ size_t n) __arm_streaming_compatible {
+ const unsigned char *srcp = (const unsigned char *)src;
+ unsigned char c8 = (unsigned char)c;
+ for (size_t i = 0; i < n; ++i)
+ if (srcp[i] == c8)
+ return &srcp[i];
+
+ return NULL;
+}
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/absvdi2.c b/contrib/llvm-project/compiler-rt/lib/builtins/absvdi2.c
index b9566cd874fe..291ab5f7f91d 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/absvdi2.c
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/absvdi2.c
@@ -18,7 +18,7 @@
COMPILER_RT_ABI di_int __absvdi2(di_int a) {
const int N = (int)(sizeof(di_int) * CHAR_BIT);
- if (a == ((di_int)1 << (N - 1)))
+ if (a == ((di_int)((du_int)1 << (N - 1))))
compilerrt_abort();
const di_int t = a >> (N - 1);
return (a ^ t) - t;
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/absvsi2.c b/contrib/llvm-project/compiler-rt/lib/builtins/absvsi2.c
index 9d5de7e8a3f2..9977c33d8f7e 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/absvsi2.c
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/absvsi2.c
@@ -18,7 +18,7 @@
COMPILER_RT_ABI si_int __absvsi2(si_int a) {
const int N = (int)(sizeof(si_int) * CHAR_BIT);
- if (a == ((si_int)1 << (N - 1)))
+ if (a == ((si_int)((su_int)1 << (N - 1))))
compilerrt_abort();
const si_int t = a >> (N - 1);
return (a ^ t) - t;
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/absvti2.c b/contrib/llvm-project/compiler-rt/lib/builtins/absvti2.c
index 491d99d7ce0f..bc6933bd2a1c 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/absvti2.c
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/absvti2.c
@@ -20,7 +20,7 @@
COMPILER_RT_ABI ti_int __absvti2(ti_int a) {
const int N = (int)(sizeof(ti_int) * CHAR_BIT);
- if (a == ((ti_int)1 << (N - 1)))
+ if (a == (ti_int)((tu_int)1 << (N - 1)))
compilerrt_abort();
const ti_int s = a >> (N - 1);
return (a ^ s) - s;
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/addtf3.c b/contrib/llvm-project/compiler-rt/lib/builtins/addtf3.c
index 86e4f4cfc3fc..2cb3a4d59191 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/addtf3.c
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/addtf3.c
@@ -13,7 +13,7 @@
#define QUAD_PRECISION
#include "fp_lib.h"
-#if defined(CRT_HAS_128BIT) && defined(CRT_LDBL_128BIT)
+#if defined(CRT_HAS_TF_MODE)
#include "fp_add_impl.inc"
COMPILER_RT_ABI fp_t __addtf3(fp_t a, fp_t b) {
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/apple_versioning.c b/contrib/llvm-project/compiler-rt/lib/builtins/apple_versioning.c
index f87b42820c15..83d419418f24 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/apple_versioning.c
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/apple_versioning.c
@@ -138,13 +138,13 @@ NOT_HERE_BEFORE_10_6(__udivti3)
NOT_HERE_BEFORE_10_6(__umoddi3)
NOT_HERE_BEFORE_10_6(__umodti3)
-#if __ppc__
+#if __powerpc__
NOT_HERE_BEFORE_10_6(__gcc_qadd)
NOT_HERE_BEFORE_10_6(__gcc_qdiv)
NOT_HERE_BEFORE_10_6(__gcc_qmul)
NOT_HERE_BEFORE_10_6(__gcc_qsub)
NOT_HERE_BEFORE_10_6(__trampoline_setup)
-#endif // __ppc__
+#endif // __powerpc__
NOT_HERE_IN_10_8_AND_EARLIER(__atomic_compare_exchange)
NOT_HERE_IN_10_8_AND_EARLIER(__atomic_compare_exchange_1)
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/arm/aeabi_cdcmp.S b/contrib/llvm-project/compiler-rt/lib/builtins/arm/aeabi_cdcmp.S
index bd039a0329ea..c7abdb003a68 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/arm/aeabi_cdcmp.S
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/arm/aeabi_cdcmp.S
@@ -8,10 +8,6 @@
#include "../assembly.h"
-#if __BYTE_ORDER__ != __ORDER_LITTLE_ENDIAN__
-#error big endian support not implemented
-#endif
-
#define APSR_Z (1 << 30)
#define APSR_C (1 << 29)
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/arm/aeabi_cfcmp.S b/contrib/llvm-project/compiler-rt/lib/builtins/arm/aeabi_cfcmp.S
index a26cb2a3ce16..81c47661c8b5 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/arm/aeabi_cfcmp.S
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/arm/aeabi_cfcmp.S
@@ -8,10 +8,6 @@
#include "../assembly.h"
-#if __BYTE_ORDER__ != __ORDER_LITTLE_ENDIAN__
-#error big endian support not implemented
-#endif
-
#define APSR_Z (1 << 30)
#define APSR_C (1 << 29)
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/arm/divsi3.S b/contrib/llvm-project/compiler-rt/lib/builtins/arm/divsi3.S
index 761bf49d3ed0..faf9af917ab6 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/arm/divsi3.S
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/arm/divsi3.S
@@ -37,7 +37,8 @@ DEFINE_COMPILERRT_FUNCTION(__divsi3)
sdiv r0, r0, r1
bx lr
LOCAL_LABEL(divzero):
- mov r0,#0
+ // Use movs for compatibility with v8-m.base.
+ movs r0,#0
bx lr
#else
ESTABLISH_FRAME
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/arm/fp_mode.c b/contrib/llvm-project/compiler-rt/lib/builtins/arm/fp_mode.c
index f356e0b1316b..064f4e94fb84 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/arm/fp_mode.c
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/arm/fp_mode.c
@@ -27,7 +27,7 @@ CRT_FE_ROUND_MODE __attribute__((weak)) __arm_fe_default_rmode =
CRT_FE_TONEAREST;
#endif
-CRT_FE_ROUND_MODE __fe_getround() {
+CRT_FE_ROUND_MODE __fe_getround(void) {
#ifdef __ARM_FP
uint32_t fpscr;
__asm__ __volatile__("vmrs %0, fpscr" : "=r" (fpscr));
@@ -48,7 +48,7 @@ CRT_FE_ROUND_MODE __fe_getround() {
#endif
}
-int __fe_raise_inexact() {
+int __fe_raise_inexact(void) {
#ifdef __ARM_FP
uint32_t fpscr;
__asm__ __volatile__("vmrs %0, fpscr" : "=r" (fpscr));
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/arm/sync-ops.h b/contrib/llvm-project/compiler-rt/lib/builtins/arm/sync-ops.h
index c9623249e5d2..dca201d8aef7 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/arm/sync-ops.h
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/arm/sync-ops.h
@@ -14,35 +14,41 @@
#include "../assembly.h"
+#if __ARM_ARCH >= 7
+#define DMB dmb
+#elif __ARM_ARCH >= 6
+#define DMB mcr p15, #0, r0, c7, c10, #5
+#else
+#error DMB is only supported on ARMv6+
+#endif
+
#define SYNC_OP_4(op) \
.p2align 2; \
- .thumb; \
.syntax unified; \
- DEFINE_COMPILERRT_THUMB_FUNCTION(__sync_fetch_and_##op) \
- dmb; \
+ DEFINE_COMPILERRT_FUNCTION(__sync_fetch_and_##op) \
+ DMB; \
mov r12, r0; \
LOCAL_LABEL(tryatomic_##op) : ldrex r0, [r12]; \
op(r2, r0, r1); \
strex r3, r2, [r12]; \
cmp r3, #0; \
bne LOCAL_LABEL(tryatomic_##op); \
- dmb; \
+ DMB; \
bx lr
#define SYNC_OP_8(op) \
.p2align 2; \
- .thumb; \
.syntax unified; \
- DEFINE_COMPILERRT_THUMB_FUNCTION(__sync_fetch_and_##op) \
+ DEFINE_COMPILERRT_FUNCTION(__sync_fetch_and_##op) \
push {r4, r5, r6, lr}; \
- dmb; \
+ DMB; \
mov r12, r0; \
LOCAL_LABEL(tryatomic_##op) : ldrexd r0, r1, [r12]; \
op(r4, r5, r0, r1, r2, r3); \
strexd r6, r4, r5, [r12]; \
cmp r6, #0; \
bne LOCAL_LABEL(tryatomic_##op); \
- dmb; \
+ DMB; \
pop { r4, r5, r6, pc }
#define MINMAX_4(rD, rN, rM, cmp_kind) \
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/arm/truncdfsf2vfp.S b/contrib/llvm-project/compiler-rt/lib/builtins/arm/truncdfsf2vfp.S
index a3c0a73466e9..e1c171262a78 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/arm/truncdfsf2vfp.S
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/arm/truncdfsf2vfp.S
@@ -11,9 +11,9 @@
//
// extern float __truncdfsf2vfp(double a);
//
-// Converts double precision float to signle precision result.
+// Converts double precision float to single precision result.
// Uses Darwin calling convention where a double precision parameter is
-// passed in a R0/R1 pair and a signle precision result is returned in R0.
+// passed in a R0/R1 pair and a single precision result is returned in R0.
//
.syntax unified
.p2align 2
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/arm/udivsi3.S b/contrib/llvm-project/compiler-rt/lib/builtins/arm/udivsi3.S
index 9b1b035b33d6..16528e8bbd82 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/arm/udivsi3.S
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/arm/udivsi3.S
@@ -32,7 +32,8 @@ DEFINE_COMPILERRT_FUNCTION(__udivsi3)
bx lr
LOCAL_LABEL(divby0):
- mov r0, #0
+ // Use movs for compatibility with v8-m.base.
+ movs r0, #0
# ifdef __ARM_EABI__
b __aeabi_idiv0
# else
@@ -203,7 +204,7 @@ LOCAL_LABEL(divby0):
LOCAL_LABEL(block_skip_##shift) :; \
adcs r3, r3 // same as ((r3 << 1) | Carry). Carry is set if r0 >= r2.
- // TODO: if current location counter is not not word aligned, we don't
+ // TODO: if current location counter is not word aligned, we don't
// need the .p2align and nop
// Label div0block must be word-aligned. First align block 31
.p2align 2
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/ashldi3.c b/contrib/llvm-project/compiler-rt/lib/builtins/ashldi3.c
index 04f22228f11d..7b835da865d7 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/ashldi3.c
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/ashldi3.c
@@ -28,7 +28,8 @@ COMPILER_RT_ABI di_int __ashldi3(di_int a, int b) {
if (b == 0)
return a;
result.s.low = input.s.low << b;
- result.s.high = (input.s.high << b) | (input.s.low >> (bits_in_word - b));
+ result.s.high =
+ ((su_int)input.s.high << b) | (input.s.low >> (bits_in_word - b));
}
return result.all;
}
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/ashlti3.c b/contrib/llvm-project/compiler-rt/lib/builtins/ashlti3.c
index 2d7bd4a89380..2bebf10401d3 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/ashlti3.c
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/ashlti3.c
@@ -18,7 +18,7 @@
// Precondition: 0 <= b < bits_in_tword
-COMPILER_RT_ABI ti_int __ashlti3(ti_int a, si_int b) {
+COMPILER_RT_ABI ti_int __ashlti3(ti_int a, int b) {
const int bits_in_dword = (int)(sizeof(di_int) * CHAR_BIT);
twords input;
twords result;
@@ -30,7 +30,8 @@ COMPILER_RT_ABI ti_int __ashlti3(ti_int a, si_int b) {
if (b == 0)
return a;
result.s.low = input.s.low << b;
- result.s.high = (input.s.high << b) | (input.s.low >> (bits_in_dword - b));
+ result.s.high =
+ ((du_int)input.s.high << b) | (input.s.low >> (bits_in_dword - b));
}
return result.all;
}
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/ashrdi3.c b/contrib/llvm-project/compiler-rt/lib/builtins/ashrdi3.c
index 934a5c47fd69..c0879b8b252d 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/ashrdi3.c
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/ashrdi3.c
@@ -29,7 +29,8 @@ COMPILER_RT_ABI di_int __ashrdi3(di_int a, int b) {
if (b == 0)
return a;
result.s.high = input.s.high >> b;
- result.s.low = (input.s.high << (bits_in_word - b)) | (input.s.low >> b);
+ result.s.low =
+ ((su_int)input.s.high << (bits_in_word - b)) | (input.s.low >> b);
}
return result.all;
}
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/ashrti3.c b/contrib/llvm-project/compiler-rt/lib/builtins/ashrti3.c
index f573b6d6ccba..d6b1ad9192bf 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/ashrti3.c
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/ashrti3.c
@@ -18,7 +18,7 @@
// Precondition: 0 <= b < bits_in_tword
-COMPILER_RT_ABI ti_int __ashrti3(ti_int a, si_int b) {
+COMPILER_RT_ABI ti_int __ashrti3(ti_int a, int b) {
const int bits_in_dword = (int)(sizeof(di_int) * CHAR_BIT);
twords input;
twords result;
@@ -31,7 +31,8 @@ COMPILER_RT_ABI ti_int __ashrti3(ti_int a, si_int b) {
if (b == 0)
return a;
result.s.high = input.s.high >> b;
- result.s.low = (input.s.high << (bits_in_dword - b)) | (input.s.low >> b);
+ result.s.low =
+ ((du_int)input.s.high << (bits_in_dword - b)) | (input.s.low >> b);
}
return result.all;
}
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/assembly.h b/contrib/llvm-project/compiler-rt/lib/builtins/assembly.h
index 9c015059af5a..8c42fc773483 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/assembly.h
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/assembly.h
@@ -14,6 +14,12 @@
#ifndef COMPILERRT_ASSEMBLY_H
#define COMPILERRT_ASSEMBLY_H
+#if defined(__linux__) && defined(__CET__)
+#if __has_include(<cet.h>)
+#include <cet.h>
+#endif
+#endif
+
#if defined(__APPLE__) && defined(__aarch64__)
#define SEPARATOR %%
#else
@@ -254,14 +260,15 @@
.globl name SEPARATOR \
SYMBOL_IS_FUNC(name) SEPARATOR \
DECLARE_SYMBOL_VISIBILITY_UNMANGLED(name) SEPARATOR \
- CFI_START SEPARATOR \
DECLARE_FUNC_ENCODING \
- name: SEPARATOR BTI_C
+ name: \
+ SEPARATOR CFI_START \
+ SEPARATOR BTI_C
#define DEFINE_COMPILERRT_FUNCTION_ALIAS(name, target) \
.globl SYMBOL_NAME(name) SEPARATOR \
SYMBOL_IS_FUNC(SYMBOL_NAME(name)) SEPARATOR \
- DECLARE_SYMBOL_VISIBILITY(SYMBOL_NAME(name)) SEPARATOR \
+ DECLARE_SYMBOL_VISIBILITY(name) SEPARATOR \
.set SYMBOL_NAME(name), SYMBOL_NAME(target) SEPARATOR
#if defined(__ARM_EABI__)
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/atomic.c b/contrib/llvm-project/compiler-rt/lib/builtins/atomic.c
index 64bf72dfa345..852bb20f0867 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/atomic.c
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/atomic.c
@@ -92,6 +92,8 @@ __inline static void lock(Lock *l) { OSSpinLockLock(l); }
static Lock locks[SPINLOCK_COUNT]; // initialized to OS_SPINLOCK_INIT which is 0
#else
+_Static_assert(__atomic_always_lock_free(sizeof(uintptr_t), 0),
+ "Implementation assumes lock-free pointer-size cmpxchg");
typedef _Atomic(uintptr_t) Lock;
/// Unlock a lock. This is a release operation.
__inline static void unlock(Lock *l) {
@@ -336,6 +338,18 @@ OPTIMISED_CASES
return tmp; \
}
+#define ATOMIC_RMW_NAND(n, lockfree, type) \
+ type __atomic_fetch_nand_##n(type *ptr, type val, int model) { \
+ if (lockfree(ptr)) \
+ return __c11_atomic_fetch_nand((_Atomic(type) *)ptr, val, model); \
+ Lock *l = lock_for_pointer(ptr); \
+ lock(l); \
+ type tmp = *ptr; \
+ *ptr = ~(tmp & val); \
+ unlock(l); \
+ return tmp; \
+ }
+
#define OPTIMISED_CASE(n, lockfree, type) ATOMIC_RMW(n, lockfree, type, add, +)
OPTIMISED_CASES
#undef OPTIMISED_CASE
@@ -351,3 +365,9 @@ OPTIMISED_CASES
#define OPTIMISED_CASE(n, lockfree, type) ATOMIC_RMW(n, lockfree, type, xor, ^)
OPTIMISED_CASES
#undef OPTIMISED_CASE
+// Allow build with clang without __c11_atomic_fetch_nand builtin (pre-14)
+#if __has_builtin(__c11_atomic_fetch_nand)
+#define OPTIMISED_CASE(n, lockfree, type) ATOMIC_RMW_NAND(n, lockfree, type)
+OPTIMISED_CASES
+#undef OPTIMISED_CASE
+#endif
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/avr/divmodhi4.S b/contrib/llvm-project/compiler-rt/lib/builtins/avr/divmodhi4.S
new file mode 100644
index 000000000000..37171331f4b3
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/avr/divmodhi4.S
@@ -0,0 +1,57 @@
+//===------------- divmodhi4.S - sint16 div & mod -------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// As described at
+// https://gcc.gnu.org/wiki/avr-gcc#Exceptions_to_the_Calling_Convention, the
+// prototype is `struct {sint16, sint16} __divmodhi4(sint16, sint16)`.
+// The sint16 quotient is returned via R23:R22, and the sint16 remainder is
+// returned via R25:R24, while registers R21/R26/27/Rtmp and bit T in SREG
+// are clobbered.
+//
+//===----------------------------------------------------------------------===//
+
+ .text
+ .align 2
+
+#ifdef __AVR_TINY__
+ .set __tmp_reg__, 16
+#else
+ .set __tmp_reg__, 0
+#endif
+
+ .globl __divmodhi4
+ .type __divmodhi4, @function
+
+__divmodhi4:
+ bst r25, 7
+ mov __tmp_reg__, r23
+ brtc __divmodhi4_a
+ com __tmp_reg__
+ rcall __divmodhi4_b
+
+__divmodhi4_a:
+ sbrc r23, 7
+ rcall __divmodhi4_c
+ rcall __udivmodhi4 ; Call __udivmodhi4 to do real calculation.
+ sbrc __tmp_reg__, 7
+ rcall __divmodhi4_c
+ brtc __divmodhi4_exit
+
+__divmodhi4_b:
+ com r25
+ neg r24
+ sbci r25, 255
+ ret ; Return quotient via R23:R22 and remainder via R25:R24.
+
+__divmodhi4_c:
+ com r23
+ neg r22
+ sbci r23, 255
+
+__divmodhi4_exit:
+ ret ; Return quotient via R23:R22 and remainder via R25:r24.
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/avr/divmodqi4.S b/contrib/llvm-project/compiler-rt/lib/builtins/avr/divmodqi4.S
new file mode 100644
index 000000000000..66cfc0c69bba
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/avr/divmodqi4.S
@@ -0,0 +1,44 @@
+//===------------- divmodqi4.S - sint8 div & mod --------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// As described at
+// https://gcc.gnu.org/wiki/avr-gcc#Exceptions_to_the_Calling_Convention, the
+// prototype is `struct {sint8, sint8} __divmodqi4(sint8, sint8)`.
+// The sint8 quotient is returned via R24, and the sint8 remainder is returned
+// via R25, while registers R23/Rtmp and bit T in SREG are clobbered.
+//
+//===----------------------------------------------------------------------===//
+
+ .text
+ .align 2
+
+#ifdef __AVR_TINY__
+ .set __tmp_reg__, 16
+#else
+ .set __tmp_reg__, 0
+#endif
+
+ .globl __divmodqi4
+ .type __divmodqi4, @function
+
+__divmodqi4:
+ bst r24, 7
+ mov __tmp_reg__, r24
+ eor __tmp_reg__, r22
+ sbrc r24, 7
+ neg r24
+ sbrc r22, 7
+ neg r22
+ rcall __udivmodqi4 ; Call __udivmodqi4 to do real calculation.
+ brtc __divmodqi4_1
+ neg r25
+
+__divmodqi4_1:
+ sbrc __tmp_reg__, 7
+ neg r24
+ ret ; Return quotient via R24 and remainder via R25.
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/avr/exit.S b/contrib/llvm-project/compiler-rt/lib/builtins/avr/exit.S
new file mode 100644
index 000000000000..3cd9c5dafdec
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/avr/exit.S
@@ -0,0 +1,18 @@
+//===------------ exit.S - global terminator for AVR ----------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+ .text
+ .align 2
+
+ .globl _exit
+ .type _exit, @function
+
+_exit:
+ cli ; Disable all interrupts.
+__stop_program:
+ rjmp __stop_program ; Fall into an infinite loop.
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/avr/mulhi3.S b/contrib/llvm-project/compiler-rt/lib/builtins/avr/mulhi3.S
new file mode 100644
index 000000000000..d65f52ff27b5
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/avr/mulhi3.S
@@ -0,0 +1,71 @@
+//===------------ mulhi3.S - int16 multiplication -------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// The corresponding C code is something like:
+//
+// int __mulhi3(int A, int B) {
+// int S = 0;
+// while (A != 0) {
+// if (A & 1)
+// S += B;
+// A = ((unsigned int) A) >> 1;
+// B <<= 1;
+// }
+// return S;
+// }
+//
+// __mulhi3 has special ABI, as the implementation of libgcc, R25:R24 is used
+// to return result, while Rtmp/R21/R22/R23 are clobbered.
+//
+//===----------------------------------------------------------------------===//
+
+ .text
+ .align 2
+
+#ifdef __AVR_TINY__
+ .set __tmp_reg__, 16
+ .set __zero_reg__, 17
+#else
+ .set __tmp_reg__, 0
+ .set __zero_reg__, 1
+#endif
+
+ .globl __mulhi3
+ .type __mulhi3, @function
+
+__mulhi3:
+ ; Use Rzero:Rtmp to store the result.
+ clr __tmp_reg__
+ clr __zero_reg__ ; S = 0;
+
+__mulhi3_loop:
+ clr r21
+ cp r24, r21
+ cpc r25, r21
+ breq __mulhi3_end ; while (A != 0) {
+
+ mov r21, r24
+ andi r21, 1
+ breq __mulhi3_loop_a ; if (A & 1)
+ add __tmp_reg__, r22
+ adc __zero_reg__, r23 ; S += B;
+
+__mulhi3_loop_a:
+ lsr r25
+ ror r24 ; A = ((unsigned int) A) >> 1;
+ lsl r22
+ rol r23 ; B <<= 1;
+ rjmp __mulhi3_loop ; }
+
+__mulhi3_end:
+ ; Return the result via R25:R24.
+ mov r24, __tmp_reg__
+ mov r25, __zero_reg__
+ ; Restore __zero_reg__ to 0.
+ clr __zero_reg__
+ ret ; return S;
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/avr/mulqi3.S b/contrib/llvm-project/compiler-rt/lib/builtins/avr/mulqi3.S
new file mode 100644
index 000000000000..914735cc6458
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/avr/mulqi3.S
@@ -0,0 +1,53 @@
+//===------------ mulhi3.S - int8 multiplication --------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// The corresponding C code is something like:
+//
+// char __mulqi3(char A, char B) {
+// int S = 0;
+// while (A != 0) {
+// if (A & 1)
+// S += B;
+// B <<= 1;
+// A = ((unsigned char) A) >> 1;
+// }
+// return S;
+// }
+//
+// __mulqi3 has special ABI, as the implementation of libgcc, the result is
+// returned via R24, while Rtmp and R22 are clobbered.
+//
+//===----------------------------------------------------------------------===//
+
+ .text
+ .align 2
+
+#ifdef __AVR_TINY__
+ .set __tmp_reg__, 16
+#else
+ .set __tmp_reg__, 0
+#endif
+
+ .globl __mulqi3
+ .type __mulqi3, @function
+
+__mulqi3:
+ clr __tmp_reg__ ; S = 0;
+
+__mulqi3_loop:
+ cpi r24, 0
+ breq __mulqi3_end ; while (A != 0) {
+ sbrc r24, 0 ; if (A & 1)
+ add __tmp_reg__, r22 ; S += B;
+ add r22, r22 ; B <<= 1;
+ lsr r24 ; A = ((unsigned char) A) >> 1;
+ rjmp __mulqi3_loop ; }
+
+__mulqi3_end:
+ mov r24, __tmp_reg__
+ ret ; return S;
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/avr/udivmodhi4.S b/contrib/llvm-project/compiler-rt/lib/builtins/avr/udivmodhi4.S
new file mode 100644
index 000000000000..0e52b86ec797
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/avr/udivmodhi4.S
@@ -0,0 +1,49 @@
+//===------------ udivmodhi4.S - uint16 div & mod -------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// As described at
+// https://gcc.gnu.org/wiki/avr-gcc#Exceptions_to_the_Calling_Convention, the
+// prototype is `struct {uint16, uint16} __udivmodhi4(uint16, uint16)`.
+// The uint16 quotient is returned via R23:R22, and the uint16 remainder is
+// returned via R25:R24, while R21/R26/R27 are clobbered.
+//
+//===----------------------------------------------------------------------===//
+
+ .text
+ .align 2
+
+ .globl __udivmodhi4
+ .type __udivmodhi4, @function
+
+__udivmodhi4:
+ sub r26, r26
+ sub r27, r27 ; Initialize the remainder to zero.
+ ldi r21, 17 ; Only loop 16 rounds for uint16.
+
+__udivmodhi4_loop:
+ adc r24, r24
+ adc r25, r25
+ dec r21
+ breq __udivmodhi4_end
+ adc r26, r26
+ adc r27, r27
+ cp r26, r22
+ cpc r27, r23 ; Compare with the divisor.
+ brcs __udivmodhi4_loop
+ sub r26, r22
+ sbc r27, r23 ; Subtract the divisor.
+ rjmp __udivmodhi4_loop
+
+__udivmodhi4_end:
+ com r24
+ com r25
+ mov r22, r24
+ mov r23, r25 ; The quotient is returned in R23:R22.
+ mov r24, r26
+ mov r25, r27 ; The remainder is returned in in R25:R24.
+ ret
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/avr/udivmodqi4.S b/contrib/llvm-project/compiler-rt/lib/builtins/avr/udivmodqi4.S
new file mode 100644
index 000000000000..99aec3442936
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/avr/udivmodqi4.S
@@ -0,0 +1,39 @@
+//===------------ udivmodqi4.S - uint8 div & mod --------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// As described at
+// https://gcc.gnu.org/wiki/avr-gcc#Exceptions_to_the_Calling_Convention, the
+// prototype is `struct {uint8, uint8} __udivmodqi4(uint8, uint8)`.
+// The uint8 quotient is returned via R24, and the uint8 remainder is returned
+// via R25, while R23 is clobbered.
+//
+//===----------------------------------------------------------------------===//
+
+ .text
+ .align 2
+
+ .globl __udivmodqi4
+ .type __udivmodqi4, @function
+
+__udivmodqi4:
+ sub r25, r25 ; Initialize the remainder to zero.
+ ldi r23, 9 ; Only loop 8 rounds for uint8.
+
+__udivmodqi4_loop:
+ adc r24, r24
+ dec r23
+ breq __udivmodqi4_end
+ adc r25, r25
+ cp r25, r22 ; Compare with the divisor.
+ brcs __udivmodqi4_loop
+ sub r25, r22 ; Subtract the divisor.
+ rjmp __udivmodqi4_loop
+
+__udivmodqi4_end:
+ com r24 ; The uint8 quotient is returned via R24.
+ ret ; The uint8 remainder is returned via R25.
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/clear_cache.c b/contrib/llvm-project/compiler-rt/lib/builtins/clear_cache.c
index da0715914b41..2ac99b25c243 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/clear_cache.c
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/clear_cache.c
@@ -91,12 +91,35 @@ void __clear_cache(void *start, void *end) {
#else
compilerrt_abort();
#endif
-#elif defined(__linux__) && defined(__mips__)
+#elif defined(__linux__) && defined(__loongarch__)
+ __asm__ volatile("ibar 0");
+#elif defined(__mips__)
const uintptr_t start_int = (uintptr_t)start;
const uintptr_t end_int = (uintptr_t)end;
- syscall(__NR_cacheflush, start, (end_int - start_int), BCACHE);
-#elif defined(__mips__) && defined(__OpenBSD__)
- cacheflush(start, (uintptr_t)end - (uintptr_t)start, BCACHE);
+ uintptr_t synci_step;
+ __asm__ volatile("rdhwr %0, $1" : "=r"(synci_step));
+ if (synci_step != 0) {
+#if __mips_isa_rev >= 6
+ for (uintptr_t p = start_int; p < end_int; p += synci_step)
+ __asm__ volatile("synci 0(%0)" : : "r"(p));
+
+ // The last "move $at, $0" is the target of jr.hb instead of delay slot.
+ __asm__ volatile(".set noat\n"
+ "sync\n"
+ "addiupc $at, 12\n"
+ "jr.hb $at\n"
+ "move $at, $0\n"
+ ".set at");
+#elif defined(__linux__) || defined(__OpenBSD__)
+ // Pre-R6 may not be globalized. And some implementations may give strange
+ // synci_step. So, let's use libc call for it.
+ _flush_cache(start, end_int - start_int, BCACHE);
+#else
+ (void)start_int;
+ (void)end_int;
+ compilerrt_abort();
+#endif
+ }
#elif defined(__aarch64__) && !defined(__APPLE__)
uint64_t xstart = (uint64_t)(uintptr_t)start;
uint64_t xend = (uint64_t)(uintptr_t)end;
@@ -130,7 +153,10 @@ void __clear_cache(void *start, void *end) {
__asm __volatile("dsb ish");
}
__asm __volatile("isb sy");
-#elif defined(__powerpc64__)
+#elif defined(__powerpc__)
+ // Newer CPUs have a bigger line size made of multiple blocks, so the
+ // following value is a minimal common denominator for what used to be
+ // a single block cache line and is therefore inneficient.
const size_t line_size = 32;
const size_t len = (uintptr_t)end - (uintptr_t)start;
@@ -173,12 +199,12 @@ void __clear_cache(void *start, void *end) {
arg.len = (uintptr_t)end - (uintptr_t)start;
sysarch(RISCV_SYNC_ICACHE, &arg);
+#elif defined(__ve__)
+ __asm__ volatile("fencec 2");
#else
#if __APPLE__
// On Darwin, sys_icache_invalidate() provides this functionality
sys_icache_invalidate(start, end - start);
-#elif defined(__ve__)
- __asm__ volatile("fencec 2");
#else
compilerrt_abort();
#endif
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/comparetf2.c b/contrib/llvm-project/compiler-rt/lib/builtins/comparetf2.c
index f1592454138c..be5e9e5e44dd 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/comparetf2.c
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/comparetf2.c
@@ -39,7 +39,7 @@
#define QUAD_PRECISION
#include "fp_lib.h"
-#if defined(CRT_HAS_128BIT) && defined(CRT_LDBL_128BIT)
+#if defined(CRT_HAS_TF_MODE)
#include "fp_compare_impl.inc"
COMPILER_RT_ABI CMP_RESULT __letf2(fp_t a, fp_t b) { return __leXf2__(a, b); }
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/cpu_model/aarch64.c b/contrib/llvm-project/compiler-rt/lib/builtins/cpu_model/aarch64.c
new file mode 100644
index 000000000000..17bddfca46f0
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/cpu_model/aarch64.c
@@ -0,0 +1,152 @@
+//===-- cpu_model/aarch64.c - Support for __cpu_model builtin ----*- C -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is based on LLVM's lib/Support/Host.cpp.
+// It implements __aarch64_have_lse_atomics, __aarch64_cpu_features for
+// AArch64.
+//
+//===----------------------------------------------------------------------===//
+
+#include "cpu_model.h"
+
+#if !defined(__aarch64__)
+#error This file is intended only for aarch64-based targets
+#endif
+
+#if __has_include(<sys/ifunc.h>)
+#include <sys/ifunc.h>
+#else
+typedef struct __ifunc_arg_t {
+ unsigned long _size;
+ unsigned long _hwcap;
+ unsigned long _hwcap2;
+} __ifunc_arg_t;
+#endif // __has_include(<sys/ifunc.h>)
+
+// LSE support detection for out-of-line atomics
+// using HWCAP and Auxiliary vector
+_Bool __aarch64_have_lse_atomics
+ __attribute__((visibility("hidden"), nocommon)) = false;
+
+#if defined(__FreeBSD__)
+// clang-format off: should not reorder sys/auxv.h alphabetically
+#include <sys/auxv.h>
+// clang-format on
+#include "aarch64/hwcap.inc"
+#include "aarch64/lse_atomics/freebsd.inc"
+#elif defined(__Fuchsia__)
+#include "aarch64/hwcap.inc"
+#include "aarch64/lse_atomics/fuchsia.inc"
+#elif defined(__ANDROID__)
+#include "aarch64/hwcap.inc"
+#include "aarch64/lse_atomics/android.inc"
+#elif __has_include(<sys/auxv.h>)
+#include "aarch64/hwcap.inc"
+#include "aarch64/lse_atomics/sysauxv.inc"
+#else
+// When unimplemented, we leave __aarch64_have_lse_atomics initialized to false.
+#endif
+
+#if !defined(DISABLE_AARCH64_FMV)
+// CPUFeatures must correspond to the same AArch64 features in
+// AArch64TargetParser.h
+enum CPUFeatures {
+ FEAT_RNG,
+ FEAT_FLAGM,
+ FEAT_FLAGM2,
+ FEAT_FP16FML,
+ FEAT_DOTPROD,
+ FEAT_SM4,
+ FEAT_RDM,
+ FEAT_LSE,
+ FEAT_FP,
+ FEAT_SIMD,
+ FEAT_CRC,
+ FEAT_SHA1,
+ FEAT_SHA2,
+ FEAT_SHA3,
+ FEAT_AES,
+ FEAT_PMULL,
+ FEAT_FP16,
+ FEAT_DIT,
+ FEAT_DPB,
+ FEAT_DPB2,
+ FEAT_JSCVT,
+ FEAT_FCMA,
+ FEAT_RCPC,
+ FEAT_RCPC2,
+ FEAT_FRINTTS,
+ FEAT_DGH,
+ FEAT_I8MM,
+ FEAT_BF16,
+ FEAT_EBF16,
+ FEAT_RPRES,
+ FEAT_SVE,
+ FEAT_SVE_BF16,
+ FEAT_SVE_EBF16,
+ FEAT_SVE_I8MM,
+ FEAT_SVE_F32MM,
+ FEAT_SVE_F64MM,
+ FEAT_SVE2,
+ FEAT_SVE_AES,
+ FEAT_SVE_PMULL128,
+ FEAT_SVE_BITPERM,
+ FEAT_SVE_SHA3,
+ FEAT_SVE_SM4,
+ FEAT_SME,
+ FEAT_MEMTAG,
+ FEAT_MEMTAG2,
+ FEAT_MEMTAG3,
+ FEAT_SB,
+ FEAT_PREDRES,
+ FEAT_SSBS,
+ FEAT_SSBS2,
+ FEAT_BTI,
+ FEAT_LS64,
+ FEAT_LS64_V,
+ FEAT_LS64_ACCDATA,
+ FEAT_WFXT,
+ FEAT_SME_F64,
+ FEAT_SME_I64,
+ FEAT_SME2,
+ FEAT_RCPC3,
+ FEAT_MOPS,
+ FEAT_MAX,
+ FEAT_EXT = 62, // Reserved to indicate presence of additional features field
+ // in __aarch64_cpu_features
+ FEAT_INIT // Used as flag of features initialization completion
+};
+
+// Architecture features used
+// in Function Multi Versioning
+struct {
+ unsigned long long features;
+ // As features grows new fields could be added
+} __aarch64_cpu_features __attribute__((visibility("hidden"), nocommon));
+
+// The formatter wants to re-order these includes, but doing so is incorrect:
+// clang-format off
+#if defined(__APPLE__)
+#include "aarch64/fmv/apple.inc"
+#elif defined(__FreeBSD__)
+#include "aarch64/fmv/mrs.inc"
+#include "aarch64/fmv/freebsd.inc"
+#elif defined(__Fuchsia__)
+#include "aarch64/fmv/fuchsia.inc"
+#elif defined(__ANDROID__)
+#include "aarch64/fmv/mrs.inc"
+#include "aarch64/fmv/android.inc"
+#elif __has_include(<sys/auxv.h>)
+#include "aarch64/fmv/mrs.inc"
+#include "aarch64/fmv/sysauxv.inc"
+#else
+#include "aarch64/fmv/unimplemented.inc"
+#endif
+// clang-format on
+
+#endif // !defined(DISABLE_AARCH64_FMV)
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/cpu_model/aarch64/fmv/android.inc b/contrib/llvm-project/compiler-rt/lib/builtins/cpu_model/aarch64/fmv/android.inc
new file mode 100644
index 000000000000..f711431489cc
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/cpu_model/aarch64/fmv/android.inc
@@ -0,0 +1,36 @@
+void __init_cpu_features_resolver(unsigned long hwcap,
+ const __ifunc_arg_t *arg) {
+ if (__aarch64_cpu_features.features)
+ return;
+
+ // ifunc resolvers don't have hwcaps in arguments on Android API lower
+ // than 30. If so, set feature detection done and keep all CPU features
+ // unsupported (zeros). To detect this case in runtime we check existence
+ // of memfd_create function from Standard C library which was introduced in
+ // Android API 30.
+ int memfd_create(const char *, unsigned int) __attribute__((weak));
+ if (!memfd_create)
+ return;
+
+ __init_cpu_features_constructor(hwcap, arg);
+}
+
+void CONSTRUCTOR_ATTRIBUTE __init_cpu_features(void) {
+ // CPU features already initialized.
+ if (__aarch64_cpu_features.features)
+ return;
+
+ // Don't set any CPU features,
+ // detection could be wrong on Exynos 9810.
+ if (__isExynos9810())
+ return;
+
+ unsigned long hwcap = getauxval(AT_HWCAP);
+ unsigned long hwcap2 = getauxval(AT_HWCAP2);
+
+ __ifunc_arg_t arg;
+ arg._size = sizeof(__ifunc_arg_t);
+ arg._hwcap = hwcap;
+ arg._hwcap2 = hwcap2;
+ __init_cpu_features_constructor(hwcap | _IFUNC_ARG_HWCAP, &arg);
+}
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/cpu_model/aarch64/fmv/apple.inc b/contrib/llvm-project/compiler-rt/lib/builtins/cpu_model/aarch64/fmv/apple.inc
new file mode 100644
index 000000000000..0bb755f4b305
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/cpu_model/aarch64/fmv/apple.inc
@@ -0,0 +1,71 @@
+#include <TargetConditionals.h>
+#if TARGET_OS_OSX || TARGET_OS_IPHONE
+#include <dispatch/dispatch.h>
+#include <sys/sysctl.h>
+
+static bool isKnownAndSupported(const char *name) {
+ int32_t val = 0;
+ size_t size = sizeof(val);
+ if (sysctlbyname(name, &val, &size, NULL, 0))
+ return false;
+ return val;
+}
+
+void __init_cpu_features_resolver(void) {
+ // On Darwin platforms, this may be called concurrently by multiple threads
+ // because the resolvers that use it are called lazily at runtime (unlike on
+ // ELF platforms, where IFuncs are resolved serially at load time). This
+ // function's effect on __aarch64_cpu_features must be idempotent.
+
+ if (!__atomic_load_n(&__aarch64_cpu_features.features, __ATOMIC_RELAXED)) {
+ uint64_t features = 0;
+
+ // https://developer.apple.com/documentation/kernel/1387446-sysctlbyname/determining_instruction_set_characteristics
+ static const struct {
+ const char *sysctl_name;
+ enum CPUFeatures feature;
+ } feature_checks[] = {
+ {"hw.optional.arm.FEAT_FlagM", FEAT_FLAGM},
+ {"hw.optional.arm.FEAT_FlagM2", FEAT_FLAGM2},
+ {"hw.optional.arm.FEAT_FHM", FEAT_FP16FML},
+ {"hw.optional.arm.FEAT_DotProd", FEAT_DOTPROD},
+ {"hw.optional.arm.FEAT_RDM", FEAT_RDM},
+ {"hw.optional.arm.FEAT_LSE", FEAT_LSE},
+ {"hw.optional.floatingpoint", FEAT_FP},
+ {"hw.optional.AdvSIMD", FEAT_SIMD},
+ {"hw.optional.armv8_crc32", FEAT_CRC},
+ {"hw.optional.arm.FEAT_SHA1", FEAT_SHA1},
+ {"hw.optional.arm.FEAT_SHA256", FEAT_SHA2},
+ {"hw.optional.arm.FEAT_SHA3", FEAT_SHA3},
+ {"hw.optional.arm.FEAT_AES", FEAT_AES},
+ {"hw.optional.arm.FEAT_PMULL", FEAT_PMULL},
+ {"hw.optional.arm.FEAT_FP16", FEAT_FP16},
+ {"hw.optional.arm.FEAT_DIT", FEAT_DIT},
+ {"hw.optional.arm.FEAT_DPB", FEAT_DPB},
+ {"hw.optional.arm.FEAT_DPB2", FEAT_DPB2},
+ {"hw.optional.arm.FEAT_JSCVT", FEAT_JSCVT},
+ {"hw.optional.arm.FEAT_FCMA", FEAT_FCMA},
+ {"hw.optional.arm.FEAT_LRCPC", FEAT_RCPC},
+ {"hw.optional.arm.FEAT_LRCPC2", FEAT_RCPC2},
+ {"hw.optional.arm.FEAT_FRINTTS", FEAT_FRINTTS},
+ {"hw.optional.arm.FEAT_I8MM", FEAT_I8MM},
+ {"hw.optional.arm.FEAT_BF16", FEAT_BF16},
+ {"hw.optional.arm.FEAT_SB", FEAT_SB},
+ {"hw.optional.arm.FEAT_SPECRES", FEAT_PREDRES},
+ {"hw.optional.arm.FEAT_SSBS", FEAT_SSBS2},
+ {"hw.optional.arm.FEAT_BTI", FEAT_BTI},
+ };
+
+ for (size_t I = 0, E = sizeof(feature_checks) / sizeof(feature_checks[0]);
+ I != E; ++I)
+ if (isKnownAndSupported(feature_checks[I].sysctl_name))
+ features |= (1ULL << feature_checks[I].feature);
+
+ features |= (1ULL << FEAT_INIT);
+
+ __atomic_store(&__aarch64_cpu_features.features, &features,
+ __ATOMIC_RELAXED);
+ }
+}
+
+#endif // TARGET_OS_OSX || TARGET_OS_IPHONE
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/cpu_model/aarch64/fmv/freebsd.inc b/contrib/llvm-project/compiler-rt/lib/builtins/cpu_model/aarch64/fmv/freebsd.inc
new file mode 100644
index 000000000000..793adef44b93
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/cpu_model/aarch64/fmv/freebsd.inc
@@ -0,0 +1,27 @@
+void __init_cpu_features_resolver(unsigned long hwcap,
+ const __ifunc_arg_t *arg) {
+ if (__aarch64_cpu_features.features)
+ return;
+
+ __init_cpu_features_constructor(hwcap, arg);
+}
+
+void CONSTRUCTOR_ATTRIBUTE __init_cpu_features(void) {
+ unsigned long hwcap = 0;
+ unsigned long hwcap2 = 0;
+ // CPU features already initialized.
+ if (__aarch64_cpu_features.features)
+ return;
+
+ int res = 0;
+ res = elf_aux_info(AT_HWCAP, &hwcap, sizeof hwcap);
+ res |= elf_aux_info(AT_HWCAP2, &hwcap2, sizeof hwcap2);
+ if (res)
+ return;
+
+ __ifunc_arg_t arg;
+ arg._size = sizeof(__ifunc_arg_t);
+ arg._hwcap = hwcap;
+ arg._hwcap2 = hwcap2;
+ __init_cpu_features_constructor(hwcap | _IFUNC_ARG_HWCAP, &arg);
+}
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/cpu_model/aarch64/fmv/fuchsia.inc b/contrib/llvm-project/compiler-rt/lib/builtins/cpu_model/aarch64/fmv/fuchsia.inc
new file mode 100644
index 000000000000..d8e0280f4041
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/cpu_model/aarch64/fmv/fuchsia.inc
@@ -0,0 +1,51 @@
+#include <zircon/features.h>
+#include <zircon/syscalls.h>
+
+void __init_cpu_features_resolver() {
+ if (__aarch64_cpu_features.features)
+ return;
+
+ // This ensures the vDSO is a direct link-time dependency of anything that
+ // needs this initializer code.
+#pragma comment(lib, "zircon")
+ uint32_t features;
+ zx_status_t status = _zx_system_get_features(ZX_FEATURE_KIND_CPU, &features);
+ if (status != ZX_OK)
+ return;
+
+#define setCPUFeature(cpu_feature) \
+ __aarch64_cpu_features.features |= 1ULL << cpu_feature
+
+ if (features & ZX_ARM64_FEATURE_ISA_FP)
+ setCPUFeature(FEAT_FP);
+ if (features & ZX_ARM64_FEATURE_ISA_ASIMD)
+ setCPUFeature(FEAT_SIMD);
+ if (features & ZX_ARM64_FEATURE_ISA_AES)
+ setCPUFeature(FEAT_AES);
+ if (features & ZX_ARM64_FEATURE_ISA_PMULL)
+ setCPUFeature(FEAT_PMULL);
+ if (features & ZX_ARM64_FEATURE_ISA_SHA1)
+ setCPUFeature(FEAT_SHA1);
+ if (features & ZX_ARM64_FEATURE_ISA_SHA256)
+ setCPUFeature(FEAT_SHA2);
+ if (features & ZX_ARM64_FEATURE_ISA_CRC32)
+ setCPUFeature(FEAT_CRC);
+ if (features & ZX_ARM64_FEATURE_ISA_RDM)
+ setCPUFeature(FEAT_RDM);
+ if (features & ZX_ARM64_FEATURE_ISA_SHA3)
+ setCPUFeature(FEAT_SHA3);
+ if (features & ZX_ARM64_FEATURE_ISA_SM4)
+ setCPUFeature(FEAT_SM4);
+ if (features & ZX_ARM64_FEATURE_ISA_DP)
+ setCPUFeature(FEAT_DOTPROD);
+ if (features & ZX_ARM64_FEATURE_ISA_FHM)
+ setCPUFeature(FEAT_FP16FML);
+ if (features & ZX_ARM64_FEATURE_ISA_SHA512)
+ setCPUFeature(FEAT_SHA3);
+ if (features & ZX_ARM64_FEATURE_ISA_I8MM)
+ setCPUFeature(FEAT_I8MM);
+ if (features & ZX_ARM64_FEATURE_ISA_SVE)
+ setCPUFeature(FEAT_SVE);
+
+ setCPUFeature(FEAT_INIT);
+}
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/cpu_model/aarch64/fmv/mrs.inc b/contrib/llvm-project/compiler-rt/lib/builtins/cpu_model/aarch64/fmv/mrs.inc
new file mode 100644
index 000000000000..32a21a2fba9a
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/cpu_model/aarch64/fmv/mrs.inc
@@ -0,0 +1,198 @@
+#if __has_include(<sys/auxv.h>)
+#include <sys/auxv.h>
+#define HAVE_SYS_AUXV_H
+#endif
+
+
+
+static void __init_cpu_features_constructor(unsigned long hwcap,
+ const __ifunc_arg_t *arg) {
+#define setCPUFeature(F) __aarch64_cpu_features.features |= 1ULL << F
+#define getCPUFeature(id, ftr) __asm__("mrs %0, " #id : "=r"(ftr))
+#define extractBits(val, start, number) \
+ (val & ((1ULL << number) - 1ULL) << start) >> start
+ unsigned long hwcap2 = 0;
+ if (hwcap & _IFUNC_ARG_HWCAP)
+ hwcap2 = arg->_hwcap2;
+ if (hwcap & HWCAP_CRC32)
+ setCPUFeature(FEAT_CRC);
+ if (hwcap & HWCAP_PMULL)
+ setCPUFeature(FEAT_PMULL);
+ if (hwcap & HWCAP_FLAGM)
+ setCPUFeature(FEAT_FLAGM);
+ if (hwcap2 & HWCAP2_FLAGM2) {
+ setCPUFeature(FEAT_FLAGM);
+ setCPUFeature(FEAT_FLAGM2);
+ }
+ if (hwcap & HWCAP_SM3 && hwcap & HWCAP_SM4)
+ setCPUFeature(FEAT_SM4);
+ if (hwcap & HWCAP_ASIMDDP)
+ setCPUFeature(FEAT_DOTPROD);
+ if (hwcap & HWCAP_ASIMDFHM)
+ setCPUFeature(FEAT_FP16FML);
+ if (hwcap & HWCAP_FPHP) {
+ setCPUFeature(FEAT_FP16);
+ setCPUFeature(FEAT_FP);
+ }
+ if (hwcap & HWCAP_DIT)
+ setCPUFeature(FEAT_DIT);
+ if (hwcap & HWCAP_ASIMDRDM)
+ setCPUFeature(FEAT_RDM);
+ if (hwcap & HWCAP_ILRCPC)
+ setCPUFeature(FEAT_RCPC2);
+ if (hwcap & HWCAP_AES)
+ setCPUFeature(FEAT_AES);
+ if (hwcap & HWCAP_SHA1)
+ setCPUFeature(FEAT_SHA1);
+ if (hwcap & HWCAP_SHA2)
+ setCPUFeature(FEAT_SHA2);
+ if (hwcap & HWCAP_JSCVT)
+ setCPUFeature(FEAT_JSCVT);
+ if (hwcap & HWCAP_FCMA)
+ setCPUFeature(FEAT_FCMA);
+ if (hwcap & HWCAP_SB)
+ setCPUFeature(FEAT_SB);
+ if (hwcap & HWCAP_SSBS)
+ setCPUFeature(FEAT_SSBS2);
+ if (hwcap2 & HWCAP2_MTE) {
+ setCPUFeature(FEAT_MEMTAG);
+ setCPUFeature(FEAT_MEMTAG2);
+ }
+ if (hwcap2 & HWCAP2_MTE3) {
+ setCPUFeature(FEAT_MEMTAG);
+ setCPUFeature(FEAT_MEMTAG2);
+ setCPUFeature(FEAT_MEMTAG3);
+ }
+ if (hwcap2 & HWCAP2_SVEAES)
+ setCPUFeature(FEAT_SVE_AES);
+ if (hwcap2 & HWCAP2_SVEPMULL) {
+ setCPUFeature(FEAT_SVE_AES);
+ setCPUFeature(FEAT_SVE_PMULL128);
+ }
+ if (hwcap2 & HWCAP2_SVEBITPERM)
+ setCPUFeature(FEAT_SVE_BITPERM);
+ if (hwcap2 & HWCAP2_SVESHA3)
+ setCPUFeature(FEAT_SVE_SHA3);
+ if (hwcap2 & HWCAP2_SVESM4)
+ setCPUFeature(FEAT_SVE_SM4);
+ if (hwcap2 & HWCAP2_DCPODP)
+ setCPUFeature(FEAT_DPB2);
+ if (hwcap & HWCAP_ATOMICS)
+ setCPUFeature(FEAT_LSE);
+ if (hwcap2 & HWCAP2_RNG)
+ setCPUFeature(FEAT_RNG);
+ if (hwcap2 & HWCAP2_I8MM)
+ setCPUFeature(FEAT_I8MM);
+ if (hwcap2 & HWCAP2_EBF16)
+ setCPUFeature(FEAT_EBF16);
+ if (hwcap2 & HWCAP2_SVE_EBF16)
+ setCPUFeature(FEAT_SVE_EBF16);
+ if (hwcap2 & HWCAP2_DGH)
+ setCPUFeature(FEAT_DGH);
+ if (hwcap2 & HWCAP2_FRINT)
+ setCPUFeature(FEAT_FRINTTS);
+ if (hwcap2 & HWCAP2_SVEI8MM)
+ setCPUFeature(FEAT_SVE_I8MM);
+ if (hwcap2 & HWCAP2_SVEF32MM)
+ setCPUFeature(FEAT_SVE_F32MM);
+ if (hwcap2 & HWCAP2_SVEF64MM)
+ setCPUFeature(FEAT_SVE_F64MM);
+ if (hwcap2 & HWCAP2_BTI)
+ setCPUFeature(FEAT_BTI);
+ if (hwcap2 & HWCAP2_RPRES)
+ setCPUFeature(FEAT_RPRES);
+ if (hwcap2 & HWCAP2_WFXT)
+ setCPUFeature(FEAT_WFXT);
+ if (hwcap2 & HWCAP2_SME)
+ setCPUFeature(FEAT_SME);
+ if (hwcap2 & HWCAP2_SME_I16I64)
+ setCPUFeature(FEAT_SME_I64);
+ if (hwcap2 & HWCAP2_SME_F64F64)
+ setCPUFeature(FEAT_SME_F64);
+ if (hwcap2 & HWCAP2_MOPS)
+ setCPUFeature(FEAT_MOPS);
+ if (hwcap & HWCAP_CPUID) {
+ unsigned long ftr;
+ getCPUFeature(ID_AA64PFR1_EL1, ftr);
+ // ID_AA64PFR1_EL1.MTE >= 0b0001
+ if (extractBits(ftr, 8, 4) >= 0x1)
+ setCPUFeature(FEAT_MEMTAG);
+ // ID_AA64PFR1_EL1.SSBS == 0b0001
+ if (extractBits(ftr, 4, 4) == 0x1)
+ setCPUFeature(FEAT_SSBS);
+ // ID_AA64PFR1_EL1.SME == 0b0010
+ if (extractBits(ftr, 24, 4) == 0x2)
+ setCPUFeature(FEAT_SME2);
+ getCPUFeature(ID_AA64PFR0_EL1, ftr);
+ // ID_AA64PFR0_EL1.FP != 0b1111
+ if (extractBits(ftr, 16, 4) != 0xF) {
+ setCPUFeature(FEAT_FP);
+ // ID_AA64PFR0_EL1.AdvSIMD has the same value as ID_AA64PFR0_EL1.FP
+ setCPUFeature(FEAT_SIMD);
+ }
+ // ID_AA64PFR0_EL1.SVE != 0b0000
+ if (extractBits(ftr, 32, 4) != 0x0) {
+ // get ID_AA64ZFR0_EL1, that name supported
+ // if sve enabled only
+ getCPUFeature(S3_0_C0_C4_4, ftr);
+ // ID_AA64ZFR0_EL1.SVEver == 0b0000
+ if (extractBits(ftr, 0, 4) == 0x0)
+ setCPUFeature(FEAT_SVE);
+ // ID_AA64ZFR0_EL1.SVEver == 0b0001
+ if (extractBits(ftr, 0, 4) == 0x1)
+ setCPUFeature(FEAT_SVE2);
+ // ID_AA64ZFR0_EL1.BF16 != 0b0000
+ if (extractBits(ftr, 20, 4) != 0x0)
+ setCPUFeature(FEAT_SVE_BF16);
+ }
+ getCPUFeature(ID_AA64ISAR0_EL1, ftr);
+ // ID_AA64ISAR0_EL1.SHA3 != 0b0000
+ if (extractBits(ftr, 32, 4) != 0x0)
+ setCPUFeature(FEAT_SHA3);
+ getCPUFeature(ID_AA64ISAR1_EL1, ftr);
+ // ID_AA64ISAR1_EL1.DPB >= 0b0001
+ if (extractBits(ftr, 0, 4) >= 0x1)
+ setCPUFeature(FEAT_DPB);
+ // ID_AA64ISAR1_EL1.LRCPC != 0b0000
+ if (extractBits(ftr, 20, 4) != 0x0)
+ setCPUFeature(FEAT_RCPC);
+ // ID_AA64ISAR1_EL1.LRCPC == 0b0011
+ if (extractBits(ftr, 20, 4) == 0x3)
+ setCPUFeature(FEAT_RCPC3);
+ // ID_AA64ISAR1_EL1.SPECRES == 0b0001
+ if (extractBits(ftr, 40, 4) == 0x2)
+ setCPUFeature(FEAT_PREDRES);
+ // ID_AA64ISAR1_EL1.BF16 != 0b0000
+ if (extractBits(ftr, 44, 4) != 0x0)
+ setCPUFeature(FEAT_BF16);
+ // ID_AA64ISAR1_EL1.LS64 >= 0b0001
+ if (extractBits(ftr, 60, 4) >= 0x1)
+ setCPUFeature(FEAT_LS64);
+ // ID_AA64ISAR1_EL1.LS64 >= 0b0010
+ if (extractBits(ftr, 60, 4) >= 0x2)
+ setCPUFeature(FEAT_LS64_V);
+ // ID_AA64ISAR1_EL1.LS64 >= 0b0011
+ if (extractBits(ftr, 60, 4) >= 0x3)
+ setCPUFeature(FEAT_LS64_ACCDATA);
+ } else {
+ // Set some features in case of no CPUID support
+ if (hwcap & (HWCAP_FP | HWCAP_FPHP)) {
+ setCPUFeature(FEAT_FP);
+ // FP and AdvSIMD fields have the same value
+ setCPUFeature(FEAT_SIMD);
+ }
+ if (hwcap & HWCAP_DCPOP || hwcap2 & HWCAP2_DCPODP)
+ setCPUFeature(FEAT_DPB);
+ if (hwcap & HWCAP_LRCPC || hwcap & HWCAP_ILRCPC)
+ setCPUFeature(FEAT_RCPC);
+ if (hwcap2 & HWCAP2_BF16 || hwcap2 & HWCAP2_EBF16)
+ setCPUFeature(FEAT_BF16);
+ if (hwcap2 & HWCAP2_SVEBF16)
+ setCPUFeature(FEAT_SVE_BF16);
+ if (hwcap2 & HWCAP2_SVE2 && hwcap & HWCAP_SVE)
+ setCPUFeature(FEAT_SVE2);
+ if (hwcap & HWCAP_SHA3)
+ setCPUFeature(FEAT_SHA3);
+ }
+ setCPUFeature(FEAT_INIT);
+}
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/cpu_model/aarch64/fmv/sysauxv.inc b/contrib/llvm-project/compiler-rt/lib/builtins/cpu_model/aarch64/fmv/sysauxv.inc
new file mode 100644
index 000000000000..fb5722c4306f
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/cpu_model/aarch64/fmv/sysauxv.inc
@@ -0,0 +1,21 @@
+void __init_cpu_features_resolver(unsigned long hwcap,
+ const __ifunc_arg_t *arg) {
+ if (__aarch64_cpu_features.features)
+ return;
+ __init_cpu_features_constructor(hwcap, arg);
+}
+
+void CONSTRUCTOR_ATTRIBUTE __init_cpu_features(void) {
+ // CPU features already initialized.
+ if (__aarch64_cpu_features.features)
+ return;
+
+ unsigned long hwcap = getauxval(AT_HWCAP);
+ unsigned long hwcap2 = getauxval(AT_HWCAP2);
+
+ __ifunc_arg_t arg;
+ arg._size = sizeof(__ifunc_arg_t);
+ arg._hwcap = hwcap;
+ arg._hwcap2 = hwcap2;
+ __init_cpu_features_constructor(hwcap | _IFUNC_ARG_HWCAP, &arg);
+}
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/cpu_model/aarch64/fmv/unimplemented.inc b/contrib/llvm-project/compiler-rt/lib/builtins/cpu_model/aarch64/fmv/unimplemented.inc
new file mode 100644
index 000000000000..dc34624807b7
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/cpu_model/aarch64/fmv/unimplemented.inc
@@ -0,0 +1,8 @@
+// On platforms that have not implemented this yet, we provide an implementation
+// that does not claim support for any features by leaving
+// __aarch64_cpu_features.features initialized to 0.
+
+void __init_cpu_features_resolver(unsigned long hwcap,
+ const __ifunc_arg_t *arg) {}
+
+void __init_cpu_features(void) {}
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/cpu_model/aarch64/hwcap.inc b/contrib/llvm-project/compiler-rt/lib/builtins/cpu_model/aarch64/hwcap.inc
new file mode 100644
index 000000000000..7ddc125b26da
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/cpu_model/aarch64/hwcap.inc
@@ -0,0 +1,183 @@
+#if __has_include(<sys/hwcap.h>)
+#include <sys/hwcap.h>
+#define HAVE_SYS_HWCAP_H
+#endif
+
+#ifndef _IFUNC_ARG_HWCAP
+#define _IFUNC_ARG_HWCAP (1ULL << 62)
+#endif
+#ifndef AT_HWCAP
+#define AT_HWCAP 16
+#endif
+#ifndef HWCAP_CPUID
+#define HWCAP_CPUID (1 << 11)
+#endif
+#ifndef HWCAP_FP
+#define HWCAP_FP (1 << 0)
+#endif
+#ifndef HWCAP_ASIMD
+#define HWCAP_ASIMD (1 << 1)
+#endif
+#ifndef HWCAP_AES
+#define HWCAP_AES (1 << 3)
+#endif
+#ifndef HWCAP_PMULL
+#define HWCAP_PMULL (1 << 4)
+#endif
+#ifndef HWCAP_SHA1
+#define HWCAP_SHA1 (1 << 5)
+#endif
+#ifndef HWCAP_SHA2
+#define HWCAP_SHA2 (1 << 6)
+#endif
+#ifndef HWCAP_CRC32
+#define HWCAP_CRC32 (1 << 7)
+#endif
+#ifndef HWCAP_ATOMICS
+#define HWCAP_ATOMICS (1 << 8)
+#endif
+#ifndef HWCAP_FPHP
+#define HWCAP_FPHP (1 << 9)
+#endif
+#ifndef HWCAP_ASIMDHP
+#define HWCAP_ASIMDHP (1 << 10)
+#endif
+#ifndef HWCAP_ASIMDRDM
+#define HWCAP_ASIMDRDM (1 << 12)
+#endif
+#ifndef HWCAP_JSCVT
+#define HWCAP_JSCVT (1 << 13)
+#endif
+#ifndef HWCAP_FCMA
+#define HWCAP_FCMA (1 << 14)
+#endif
+#ifndef HWCAP_LRCPC
+#define HWCAP_LRCPC (1 << 15)
+#endif
+#ifndef HWCAP_DCPOP
+#define HWCAP_DCPOP (1 << 16)
+#endif
+#ifndef HWCAP_SHA3
+#define HWCAP_SHA3 (1 << 17)
+#endif
+#ifndef HWCAP_SM3
+#define HWCAP_SM3 (1 << 18)
+#endif
+#ifndef HWCAP_SM4
+#define HWCAP_SM4 (1 << 19)
+#endif
+#ifndef HWCAP_ASIMDDP
+#define HWCAP_ASIMDDP (1 << 20)
+#endif
+#ifndef HWCAP_SHA512
+#define HWCAP_SHA512 (1 << 21)
+#endif
+#ifndef HWCAP_SVE
+#define HWCAP_SVE (1 << 22)
+#endif
+#ifndef HWCAP_ASIMDFHM
+#define HWCAP_ASIMDFHM (1 << 23)
+#endif
+#ifndef HWCAP_DIT
+#define HWCAP_DIT (1 << 24)
+#endif
+#ifndef HWCAP_ILRCPC
+#define HWCAP_ILRCPC (1 << 26)
+#endif
+#ifndef HWCAP_FLAGM
+#define HWCAP_FLAGM (1 << 27)
+#endif
+#ifndef HWCAP_SSBS
+#define HWCAP_SSBS (1 << 28)
+#endif
+#ifndef HWCAP_SB
+#define HWCAP_SB (1 << 29)
+#endif
+
+#ifndef AT_HWCAP2
+#define AT_HWCAP2 26
+#endif
+#ifndef HWCAP2_DCPODP
+#define HWCAP2_DCPODP (1 << 0)
+#endif
+#ifndef HWCAP2_SVE2
+#define HWCAP2_SVE2 (1 << 1)
+#endif
+#ifndef HWCAP2_SVEAES
+#define HWCAP2_SVEAES (1 << 2)
+#endif
+#ifndef HWCAP2_SVEPMULL
+#define HWCAP2_SVEPMULL (1 << 3)
+#endif
+#ifndef HWCAP2_SVEBITPERM
+#define HWCAP2_SVEBITPERM (1 << 4)
+#endif
+#ifndef HWCAP2_SVESHA3
+#define HWCAP2_SVESHA3 (1 << 5)
+#endif
+#ifndef HWCAP2_SVESM4
+#define HWCAP2_SVESM4 (1 << 6)
+#endif
+#ifndef HWCAP2_FLAGM2
+#define HWCAP2_FLAGM2 (1 << 7)
+#endif
+#ifndef HWCAP2_FRINT
+#define HWCAP2_FRINT (1 << 8)
+#endif
+#ifndef HWCAP2_SVEI8MM
+#define HWCAP2_SVEI8MM (1 << 9)
+#endif
+#ifndef HWCAP2_SVEF32MM
+#define HWCAP2_SVEF32MM (1 << 10)
+#endif
+#ifndef HWCAP2_SVEF64MM
+#define HWCAP2_SVEF64MM (1 << 11)
+#endif
+#ifndef HWCAP2_SVEBF16
+#define HWCAP2_SVEBF16 (1 << 12)
+#endif
+#ifndef HWCAP2_I8MM
+#define HWCAP2_I8MM (1 << 13)
+#endif
+#ifndef HWCAP2_BF16
+#define HWCAP2_BF16 (1 << 14)
+#endif
+#ifndef HWCAP2_DGH
+#define HWCAP2_DGH (1 << 15)
+#endif
+#ifndef HWCAP2_RNG
+#define HWCAP2_RNG (1 << 16)
+#endif
+#ifndef HWCAP2_BTI
+#define HWCAP2_BTI (1 << 17)
+#endif
+#ifndef HWCAP2_MTE
+#define HWCAP2_MTE (1 << 18)
+#endif
+#ifndef HWCAP2_RPRES
+#define HWCAP2_RPRES (1 << 21)
+#endif
+#ifndef HWCAP2_MTE3
+#define HWCAP2_MTE3 (1 << 22)
+#endif
+#ifndef HWCAP2_SME
+#define HWCAP2_SME (1 << 23)
+#endif
+#ifndef HWCAP2_SME_I16I64
+#define HWCAP2_SME_I16I64 (1 << 24)
+#endif
+#ifndef HWCAP2_SME_F64F64
+#define HWCAP2_SME_F64F64 (1 << 25)
+#endif
+#ifndef HWCAP2_WFXT
+#define HWCAP2_WFXT (1UL << 31)
+#endif
+#ifndef HWCAP2_EBF16
+#define HWCAP2_EBF16 (1ULL << 32)
+#endif
+#ifndef HWCAP2_SVE_EBF16
+#define HWCAP2_SVE_EBF16 (1ULL << 33)
+#endif
+#ifndef HWCAP2_MOPS
+#define HWCAP2_MOPS (1ULL << 43)
+#endif
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/cpu_model/aarch64/lse_atomics/android.inc b/contrib/llvm-project/compiler-rt/lib/builtins/cpu_model/aarch64/lse_atomics/android.inc
new file mode 100644
index 000000000000..94bf64a5b0b0
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/cpu_model/aarch64/lse_atomics/android.inc
@@ -0,0 +1,28 @@
+#include <string.h>
+#include <sys/auxv.h>
+#include <sys/system_properties.h>
+
+static bool __isExynos9810(void) {
+ char arch[PROP_VALUE_MAX];
+ return __system_property_get("ro.arch", arch) > 0 &&
+ strncmp(arch, "exynos9810", sizeof("exynos9810") - 1) == 0;
+}
+
+static void CONSTRUCTOR_ATTRIBUTE init_have_lse_atomics(void) {
+ unsigned long hwcap = getauxval(AT_HWCAP);
+ _Bool result = (hwcap & HWCAP_ATOMICS) != 0;
+ if (result) {
+ // Some cores in the Exynos 9810 CPU are ARMv8.2 and others are ARMv8.0;
+ // only the former support LSE atomics. However, the kernel in the
+ // initial Android 8.0 release of Galaxy S9/S9+ devices incorrectly
+ // reported the feature as being supported.
+ //
+ // The kernel appears to have been corrected to mark it unsupported as of
+ // the Android 9.0 release on those devices, and this issue has not been
+ // observed anywhere else. Thus, this workaround may be removed if
+ // compiler-rt ever drops support for Android 8.0.
+ if (__isExynos9810())
+ result = false;
+ }
+ __aarch64_have_lse_atomics = result;
+}
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/cpu_model/aarch64/lse_atomics/freebsd.inc b/contrib/llvm-project/compiler-rt/lib/builtins/cpu_model/aarch64/lse_atomics/freebsd.inc
new file mode 100644
index 000000000000..4a1f9c2c27c8
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/cpu_model/aarch64/lse_atomics/freebsd.inc
@@ -0,0 +1,5 @@
+static void CONSTRUCTOR_ATTRIBUTE init_have_lse_atomics(void) {
+ unsigned long hwcap;
+ int result = elf_aux_info(AT_HWCAP, &hwcap, sizeof hwcap);
+ __aarch64_have_lse_atomics = result == 0 && (hwcap & HWCAP_ATOMICS) != 0;
+}
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/cpu_model/aarch64/lse_atomics/fuchsia.inc b/contrib/llvm-project/compiler-rt/lib/builtins/cpu_model/aarch64/lse_atomics/fuchsia.inc
new file mode 100644
index 000000000000..91eac70ae6c5
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/cpu_model/aarch64/lse_atomics/fuchsia.inc
@@ -0,0 +1,12 @@
+#include <zircon/features.h>
+#include <zircon/syscalls.h>
+
+static void CONSTRUCTOR_ATTRIBUTE init_have_lse_atomics(void) {
+ // This ensures the vDSO is a direct link-time dependency of anything that
+ // needs this initializer code.
+#pragma comment(lib, "zircon")
+ uint32_t features;
+ zx_status_t status = _zx_system_get_features(ZX_FEATURE_KIND_CPU, &features);
+ __aarch64_have_lse_atomics =
+ status == ZX_OK && (features & ZX_ARM64_FEATURE_ISA_ATOMICS) != 0;
+}
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/cpu_model/aarch64/lse_atomics/sysauxv.inc b/contrib/llvm-project/compiler-rt/lib/builtins/cpu_model/aarch64/lse_atomics/sysauxv.inc
new file mode 100644
index 000000000000..6642c1f5b60b
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/cpu_model/aarch64/lse_atomics/sysauxv.inc
@@ -0,0 +1,6 @@
+#include <sys/auxv.h>
+
+static void CONSTRUCTOR_ATTRIBUTE init_have_lse_atomics(void) {
+ unsigned long hwcap = getauxval(AT_HWCAP);
+ __aarch64_have_lse_atomics = (hwcap & HWCAP_ATOMICS) != 0;
+}
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/cpu_model/cpu_model.h b/contrib/llvm-project/compiler-rt/lib/builtins/cpu_model/cpu_model.h
new file mode 100644
index 000000000000..924ca89cf60f
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/cpu_model/cpu_model.h
@@ -0,0 +1,41 @@
+//===-- cpu_model_common.c - Utilities for cpu model detection ----*- C -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements common utilities for runtime cpu model detection.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef COMPILER_RT_LIB_BUILTINS_CPU_MODEL_COMMON_H
+#define COMPILER_RT_LIB_BUILTINS_CPU_MODEL_COMMON_H
+
+#define bool int
+#define true 1
+#define false 0
+
+#ifndef __has_attribute
+#define __has_attribute(attr) 0
+#endif
+
+#if __has_attribute(constructor)
+#if __GNUC__ >= 9
+// Ordinarily init priorities below 101 are disallowed as they are reserved for
+// the implementation. However, we are the implementation, so silence the
+// diagnostic, since it doesn't apply to us.
+#pragma GCC diagnostic ignored "-Wprio-ctor-dtor"
+#endif
+// We're choosing init priority 90 to force our constructors to run before any
+// constructors in the end user application (starting at priority 101). This
+// value matches the libgcc choice for the same functions.
+#define CONSTRUCTOR_ATTRIBUTE __attribute__((constructor(90)))
+#else
+// FIXME: For MSVC, we should make a function pointer global in .CRT$X?? so that
+// this runs during initialization.
+#define CONSTRUCTOR_ATTRIBUTE
+#endif
+
+#endif
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/cpu_model.c b/contrib/llvm-project/compiler-rt/lib/builtins/cpu_model/x86.c
index ebc2c522c83f..0750e29f989a 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/cpu_model.c
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/cpu_model/x86.c
@@ -1,4 +1,4 @@
-//===-- cpu_model.c - Support for __cpu_model builtin ------------*- C -*-===//
+//===-- cpu_model/x86.c - Support for __cpu_model builtin --------*- C -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
@@ -8,39 +8,25 @@
//
// This file is based on LLVM's lib/Support/Host.cpp.
// It implements the operating system Host concept and builtin
-// __cpu_model for the compiler_rt library for x86 and
-// __aarch64_have_lse_atomics for AArch64.
+// __cpu_model for the compiler_rt library for x86.
//
//===----------------------------------------------------------------------===//
-#if defined(HAVE_INIT_PRIORITY)
-#define CONSTRUCTOR_ATTRIBUTE __attribute__((__constructor__ 101))
-#elif __has_attribute(__constructor__)
-#define CONSTRUCTOR_ATTRIBUTE __attribute__((__constructor__))
-#else
-// FIXME: For MSVC, we should make a function pointer global in .CRT$X?? so that
-// this runs during initialization.
-#define CONSTRUCTOR_ATTRIBUTE
+#include "cpu_model.h"
+
+#if !(defined(__i386__) || defined(_M_IX86) || defined(__x86_64__) || \
+ defined(_M_X64))
+#error This file is intended only for x86-based targets
#endif
-#if (defined(__i386__) || defined(_M_IX86) || defined(__x86_64__) || \
- defined(_M_X64)) && \
- (defined(__GNUC__) || defined(__clang__) || defined(_MSC_VER))
+#if defined(__GNUC__) || defined(__clang__) || defined(_MSC_VER)
#include <assert.h>
-#define bool int
-#define true 1
-#define false 0
-
#ifdef _MSC_VER
#include <intrin.h>
#endif
-#ifndef __has_attribute
-#define __has_attribute(attr) 0
-#endif
-
enum VendorSignatures {
SIG_INTEL = 0x756e6547, // Genu
SIG_AMD = 0x68747541, // Auth
@@ -69,6 +55,10 @@ enum ProcessorTypes {
INTEL_GOLDMONT_PLUS,
INTEL_TREMONT,
AMDFAM19H,
+ ZHAOXIN_FAM7H,
+ INTEL_SIERRAFOREST,
+ INTEL_GRANDRIDGE,
+ INTEL_CLEARWATERFOREST,
CPU_TYPE_MAX
};
@@ -100,6 +90,13 @@ enum ProcessorSubtypes {
INTEL_COREI7_ALDERLAKE,
AMDFAM19H_ZNVER3,
INTEL_COREI7_ROCKETLAKE,
+ ZHAOXIN_FAM7H_LUJIAZUI,
+ AMDFAM19H_ZNVER4,
+ INTEL_COREI7_GRANITERAPIDS,
+ INTEL_COREI7_GRANITERAPIDS_D,
+ INTEL_COREI7_ARROWLAKE,
+ INTEL_COREI7_ARROWLAKE_S,
+ INTEL_COREI7_PANTHERLAKE,
CPU_SUBTYPE_MAX
};
@@ -142,6 +139,20 @@ enum ProcessorFeatures {
FEATURE_AVX512BITALG,
FEATURE_AVX512BF16,
FEATURE_AVX512VP2INTERSECT,
+
+ FEATURE_CMPXCHG16B = 46,
+ FEATURE_F16C = 49,
+ FEATURE_LAHF_LM = 54,
+ FEATURE_LM,
+ FEATURE_WP,
+ FEATURE_LZCNT,
+ FEATURE_MOVBE,
+
+ FEATURE_AVX512FP16 = 94,
+ FEATURE_X86_64_BASELINE,
+ FEATURE_X86_64_V2,
+ FEATURE_X86_64_V3,
+ FEATURE_X86_64_V4,
CPU_FEATURE_MAX
};
@@ -149,7 +160,7 @@ enum ProcessorFeatures {
// Check motivated by bug reports for OpenSSL crashing on CPUs without CPUID
// support. Consequently, for i386, the presence of CPUID is checked first
// via the corresponding eflags bit.
-static bool isCpuIdSupported() {
+static bool isCpuIdSupported(void) {
#if defined(__GNUC__) || defined(__clang__)
#if defined(__i386__)
int __cpuid_supported;
@@ -288,12 +299,12 @@ static void detectX86FamilyModel(unsigned EAX, unsigned *Family,
}
}
-static const char *
-getIntelProcessorTypeAndSubtype(unsigned Family, unsigned Model,
- const unsigned *Features,
- unsigned *Type, unsigned *Subtype) {
-#define testFeature(F) \
- (Features[F / 32] & (1 << (F % 32))) != 0
+static const char *getIntelProcessorTypeAndSubtype(unsigned Family,
+ unsigned Model,
+ const unsigned *Features,
+ unsigned *Type,
+ unsigned *Subtype) {
+#define testFeature(F) (Features[F / 32] & (1 << (F % 32))) != 0
// We select CPU strings to match the code in Host.cpp, but we don't use them
// in compiler-rt.
@@ -326,7 +337,7 @@ getIntelProcessorTypeAndSubtype(unsigned Family, unsigned Model,
case 0x1e: // Intel(R) Core(TM) i7 CPU 870 @ 2.93GHz.
// As found in a Summer 2010 model iMac.
case 0x1f:
- case 0x2e: // Nehalem EX
+ case 0x2e: // Nehalem EX
CPU = "nehalem";
*Type = INTEL_COREI7;
*Subtype = INTEL_COREI7_NEHALEM;
@@ -347,7 +358,7 @@ getIntelProcessorTypeAndSubtype(unsigned Family, unsigned Model,
*Subtype = INTEL_COREI7_SANDYBRIDGE;
break;
case 0x3a:
- case 0x3e: // Ivy Bridge EP
+ case 0x3e: // Ivy Bridge EP
CPU = "ivybridge";
*Type = INTEL_COREI7;
*Subtype = INTEL_COREI7_IVYBRIDGE;
@@ -374,12 +385,12 @@ getIntelProcessorTypeAndSubtype(unsigned Family, unsigned Model,
break;
// Skylake:
- case 0x4e: // Skylake mobile
- case 0x5e: // Skylake desktop
- case 0x8e: // Kaby Lake mobile
- case 0x9e: // Kaby Lake desktop
- case 0xa5: // Comet Lake-H/S
- case 0xa6: // Comet Lake-U
+ case 0x4e: // Skylake mobile
+ case 0x5e: // Skylake desktop
+ case 0x8e: // Kaby Lake mobile
+ case 0x9e: // Kaby Lake desktop
+ case 0xa5: // Comet Lake-H/S
+ case 0xa6: // Comet Lake-U
CPU = "skylake";
*Type = INTEL_COREI7;
*Subtype = INTEL_COREI7_SKYLAKE;
@@ -422,6 +433,54 @@ getIntelProcessorTypeAndSubtype(unsigned Family, unsigned Model,
*Subtype = INTEL_COREI7_ICELAKE_CLIENT;
break;
+ // Tigerlake:
+ case 0x8c:
+ case 0x8d:
+ CPU = "tigerlake";
+ *Type = INTEL_COREI7;
+ *Subtype = INTEL_COREI7_TIGERLAKE;
+ break;
+
+ // Alderlake:
+ case 0x97:
+ case 0x9a:
+ // Raptorlake:
+ case 0xb7:
+ case 0xba:
+ case 0xbf:
+ // Meteorlake:
+ case 0xaa:
+ case 0xac:
+ // Gracemont:
+ case 0xbe:
+ CPU = "alderlake";
+ *Type = INTEL_COREI7;
+ *Subtype = INTEL_COREI7_ALDERLAKE;
+ break;
+
+ // Arrowlake:
+ case 0xc5:
+ CPU = "arrowlake";
+ *Type = INTEL_COREI7;
+ *Subtype = INTEL_COREI7_ARROWLAKE;
+ break;
+
+ // Arrowlake S:
+ case 0xc6:
+ // Lunarlake:
+ case 0xbd:
+ CPU = "arrowlake-s";
+ *Type = INTEL_COREI7;
+ *Subtype = INTEL_COREI7_ARROWLAKE_S;
+ break;
+
+ // Pantherlake:
+ case 0xcc:
+ CPU = "pantherlake";
+ *Type = INTEL_COREI7;
+ *Subtype = INTEL_COREI7_PANTHERLAKE;
+ break;
+
// Icelake Xeon:
case 0x6a:
case 0x6c:
@@ -430,6 +489,8 @@ getIntelProcessorTypeAndSubtype(unsigned Family, unsigned Model,
*Subtype = INTEL_COREI7_ICELAKE_SERVER;
break;
+ // Emerald Rapids:
+ case 0xcf:
// Sapphire Rapids:
case 0x8f:
CPU = "sapphirerapids";
@@ -437,6 +498,20 @@ getIntelProcessorTypeAndSubtype(unsigned Family, unsigned Model,
*Subtype = INTEL_COREI7_SAPPHIRERAPIDS;
break;
+ // Granite Rapids:
+ case 0xad:
+ CPU = "graniterapids";
+ *Type = INTEL_COREI7;
+ *Subtype = INTEL_COREI7_GRANITERAPIDS;
+ break;
+
+ // Granite Rapids D:
+ case 0xae:
+ CPU = "graniterapids-d";
+ *Type = INTEL_COREI7;
+ *Subtype = INTEL_COREI7_GRANITERAPIDS_D;
+ break;
+
case 0x1c: // Most 45 nm Intel Atom processors
case 0x26: // 45 nm Atom Lincroft
case 0x27: // 32 nm Atom Medfield
@@ -467,10 +542,32 @@ getIntelProcessorTypeAndSubtype(unsigned Family, unsigned Model,
*Type = INTEL_GOLDMONT_PLUS;
break;
case 0x86:
+ case 0x8a: // Lakefield
+ case 0x96: // Elkhart Lake
+ case 0x9c: // Jasper Lake
CPU = "tremont";
*Type = INTEL_TREMONT;
break;
+ // Sierraforest:
+ case 0xaf:
+ CPU = "sierraforest";
+ *Type = INTEL_SIERRAFOREST;
+ break;
+
+ // Grandridge:
+ case 0xb6:
+ CPU = "grandridge";
+ *Type = INTEL_GRANDRIDGE;
+ break;
+
+ // Clearwaterforest:
+ case 0xdd:
+ CPU = "clearwaterforest";
+ *Type = INTEL_COREI7;
+ *Subtype = INTEL_CLEARWATERFOREST;
+ break;
+
case 0x57:
CPU = "knl";
*Type = INTEL_KNL;
@@ -492,10 +589,11 @@ getIntelProcessorTypeAndSubtype(unsigned Family, unsigned Model,
return CPU;
}
-static const char *
-getAMDProcessorTypeAndSubtype(unsigned Family, unsigned Model,
- const unsigned *Features,
- unsigned *Type, unsigned *Subtype) {
+static const char *getAMDProcessorTypeAndSubtype(unsigned Family,
+ unsigned Model,
+ const unsigned *Features,
+ unsigned *Type,
+ unsigned *Subtype) {
// We select CPU strings to match the code in Host.cpp, but we don't use them
// in compiler-rt.
const char *CPU = 0;
@@ -550,24 +648,59 @@ getAMDProcessorTypeAndSubtype(unsigned Family, unsigned Model,
case 23:
CPU = "znver1";
*Type = AMDFAM17H;
- if ((Model >= 0x30 && Model <= 0x3f) || Model == 0x71) {
+ if ((Model >= 0x30 && Model <= 0x3f) || (Model == 0x47) ||
+ (Model >= 0x60 && Model <= 0x67) || (Model >= 0x68 && Model <= 0x6f) ||
+ (Model >= 0x70 && Model <= 0x7f) || (Model >= 0x84 && Model <= 0x87) ||
+ (Model >= 0x90 && Model <= 0x97) || (Model >= 0x98 && Model <= 0x9f) ||
+ (Model >= 0xa0 && Model <= 0xaf)) {
+ // Family 17h Models 30h-3Fh (Starship) Zen 2
+ // Family 17h Models 47h (Cardinal) Zen 2
+ // Family 17h Models 60h-67h (Renoir) Zen 2
+ // Family 17h Models 68h-6Fh (Lucienne) Zen 2
+ // Family 17h Models 70h-7Fh (Matisse) Zen 2
+ // Family 17h Models 84h-87h (ProjectX) Zen 2
+ // Family 17h Models 90h-97h (VanGogh) Zen 2
+ // Family 17h Models 98h-9Fh (Mero) Zen 2
+ // Family 17h Models A0h-AFh (Mendocino) Zen 2
CPU = "znver2";
*Subtype = AMDFAM17H_ZNVER2;
- break; // 30h-3fh, 71h: Zen2
+ break;
}
- if (Model <= 0x0f) {
+ if ((Model >= 0x10 && Model <= 0x1f) || (Model >= 0x20 && Model <= 0x2f)) {
+ // Family 17h Models 10h-1Fh (Raven1) Zen
+ // Family 17h Models 10h-1Fh (Picasso) Zen+
+ // Family 17h Models 20h-2Fh (Raven2 x86) Zen
*Subtype = AMDFAM17H_ZNVER1;
- break; // 00h-0Fh: Zen1
+ break;
}
break;
case 25:
CPU = "znver3";
*Type = AMDFAM19H;
- if (Model <= 0x0f) {
+ if ((Model <= 0x0f) || (Model >= 0x20 && Model <= 0x2f) ||
+ (Model >= 0x30 && Model <= 0x3f) || (Model >= 0x40 && Model <= 0x4f) ||
+ (Model >= 0x50 && Model <= 0x5f)) {
+ // Family 19h Models 00h-0Fh (Genesis, Chagall) Zen 3
+ // Family 19h Models 20h-2Fh (Vermeer) Zen 3
+ // Family 19h Models 30h-3Fh (Badami) Zen 3
+ // Family 19h Models 40h-4Fh (Rembrandt) Zen 3+
+ // Family 19h Models 50h-5Fh (Cezanne) Zen 3
*Subtype = AMDFAM19H_ZNVER3;
- break; // 00h-0Fh: Zen3
+ break;
}
- break;
+ if ((Model >= 0x10 && Model <= 0x1f) || (Model >= 0x60 && Model <= 0x6f) ||
+ (Model >= 0x70 && Model <= 0x77) || (Model >= 0x78 && Model <= 0x7f) ||
+ (Model >= 0xa0 && Model <= 0xaf)) {
+ // Family 19h Models 10h-1Fh (Stones; Storm Peak) Zen 4
+ // Family 19h Models 60h-6Fh (Raphael) Zen 4
+ // Family 19h Models 70h-77h (Phoenix, Hawkpoint1) Zen 4
+ // Family 19h Models 78h-7Fh (Phoenix 2, Hawkpoint2) Zen 4
+ // Family 19h Models A0h-AFh (Stones-Dense) Zen 4
+ CPU = "znver4";
+ *Subtype = AMDFAM19H_ZNVER4;
+ break; // "znver4"
+ }
+ break; // family 19h
default:
break; // Unknown AMD CPU.
}
@@ -577,10 +710,10 @@ getAMDProcessorTypeAndSubtype(unsigned Family, unsigned Model,
static void getAvailableFeatures(unsigned ECX, unsigned EDX, unsigned MaxLeaf,
unsigned *Features) {
- unsigned EAX, EBX;
+ unsigned EAX = 0, EBX = 0;
-#define setFeature(F) \
- Features[F / 32] |= 1U << (F % 32)
+#define hasFeature(F) ((Features[F / 32] >> (F % 32)) & 1)
+#define setFeature(F) Features[F / 32] |= 1U << (F % 32)
if ((EDX >> 15) & 1)
setFeature(FEATURE_CMOV);
@@ -599,14 +732,20 @@ static void getAvailableFeatures(unsigned ECX, unsigned EDX, unsigned MaxLeaf,
setFeature(FEATURE_SSSE3);
if ((ECX >> 12) & 1)
setFeature(FEATURE_FMA);
+ if ((ECX >> 13) & 1)
+ setFeature(FEATURE_CMPXCHG16B);
if ((ECX >> 19) & 1)
setFeature(FEATURE_SSE4_1);
if ((ECX >> 20) & 1)
setFeature(FEATURE_SSE4_2);
+ if ((ECX >> 22) & 1)
+ setFeature(FEATURE_MOVBE);
if ((ECX >> 23) & 1)
setFeature(FEATURE_POPCNT);
if ((ECX >> 25) & 1)
setFeature(FEATURE_AES);
+ if ((ECX >> 29) & 1)
+ setFeature(FEATURE_F16C);
// If CPUID indicates support for XSAVE, XRESTORE and AVX, and XGETBV
// indicates that the AVX registers will be saved and restored on context
@@ -674,9 +813,14 @@ static void getAvailableFeatures(unsigned ECX, unsigned EDX, unsigned MaxLeaf,
setFeature(FEATURE_AVX5124FMAPS);
if (HasLeaf7 && ((EDX >> 8) & 1) && HasAVX512Save)
setFeature(FEATURE_AVX512VP2INTERSECT);
+ if (HasLeaf7 && ((EDX >> 23) & 1) && HasAVX512Save)
+ setFeature(FEATURE_AVX512FP16);
+ // EAX from subleaf 0 is the maximum subleaf supported. Some CPUs don't
+ // return all 0s for invalid subleaves so check the limit.
bool HasLeaf7Subleaf1 =
- MaxLeaf >= 0x7 && !getX86CpuIDAndInfoEx(0x7, 0x1, &EAX, &EBX, &ECX, &EDX);
+ HasLeaf7 && EAX >= 1 &&
+ !getX86CpuIDAndInfoEx(0x7, 0x1, &EAX, &EBX, &ECX, &EDX);
if (HasLeaf7Subleaf1 && ((EAX >> 5) & 1) && HasAVX512Save)
setFeature(FEATURE_AVX512BF16);
@@ -685,12 +829,39 @@ static void getAvailableFeatures(unsigned ECX, unsigned EDX, unsigned MaxLeaf,
bool HasExtLeaf1 = MaxExtLevel >= 0x80000001 &&
!getX86CpuIDAndInfo(0x80000001, &EAX, &EBX, &ECX, &EDX);
- if (HasExtLeaf1 && ((ECX >> 6) & 1))
- setFeature(FEATURE_SSE4_A);
- if (HasExtLeaf1 && ((ECX >> 11) & 1))
- setFeature(FEATURE_XOP);
- if (HasExtLeaf1 && ((ECX >> 16) & 1))
- setFeature(FEATURE_FMA4);
+ if (HasExtLeaf1) {
+ if (ECX & 1)
+ setFeature(FEATURE_LAHF_LM);
+ if ((ECX >> 5) & 1)
+ setFeature(FEATURE_LZCNT);
+ if (((ECX >> 6) & 1))
+ setFeature(FEATURE_SSE4_A);
+ if (((ECX >> 11) & 1))
+ setFeature(FEATURE_XOP);
+ if (((ECX >> 16) & 1))
+ setFeature(FEATURE_FMA4);
+ if (((EDX >> 29) & 1))
+ setFeature(FEATURE_LM);
+ }
+
+ if (hasFeature(FEATURE_LM) && hasFeature(FEATURE_SSE2)) {
+ setFeature(FEATURE_X86_64_BASELINE);
+ if (hasFeature(FEATURE_CMPXCHG16B) && hasFeature(FEATURE_POPCNT) &&
+ hasFeature(FEATURE_LAHF_LM) && hasFeature(FEATURE_SSE4_2)) {
+ setFeature(FEATURE_X86_64_V2);
+ if (hasFeature(FEATURE_AVX2) && hasFeature(FEATURE_BMI) &&
+ hasFeature(FEATURE_BMI2) && hasFeature(FEATURE_F16C) &&
+ hasFeature(FEATURE_FMA) && hasFeature(FEATURE_LZCNT) &&
+ hasFeature(FEATURE_MOVBE)) {
+ setFeature(FEATURE_X86_64_V3);
+ if (hasFeature(FEATURE_AVX512BW) && hasFeature(FEATURE_AVX512CD) &&
+ hasFeature(FEATURE_AVX512DQ) && hasFeature(FEATURE_AVX512VL))
+ setFeature(FEATURE_X86_64_V4);
+ }
+ }
+ }
+
+#undef hasFeature
#undef setFeature
}
@@ -712,7 +883,7 @@ struct __processor_model {
#ifndef _WIN32
__attribute__((visibility("hidden")))
#endif
-unsigned int __cpu_features2 = 0;
+unsigned __cpu_features2[(CPU_FEATURE_MAX - 1) / 32];
// A constructor function that is sets __cpu_model and __cpu_features2 with
// the right values. This needs to run only once. This constructor is
@@ -726,6 +897,8 @@ int CONSTRUCTOR_ATTRIBUTE __cpu_indicator_init(void) {
unsigned Vendor;
unsigned Model, Family;
unsigned Features[(CPU_FEATURE_MAX + 31) / 32] = {0};
+ static_assert(sizeof(Features) / sizeof(Features[0]) == 4, "");
+ static_assert(sizeof(__cpu_features2) / sizeof(__cpu_features2[0]) == 3, "");
// This function needs to run just once.
if (__cpu_model.__cpu_vendor)
@@ -743,9 +916,10 @@ int CONSTRUCTOR_ATTRIBUTE __cpu_indicator_init(void) {
// Find available features.
getAvailableFeatures(ECX, EDX, MaxLeaf, &Features[0]);
- assert((sizeof(Features)/sizeof(Features[0])) == 2);
__cpu_model.__cpu_features[0] = Features[0];
- __cpu_features2 = Features[1];
+ __cpu_features2[0] = Features[1];
+ __cpu_features2[1] = Features[2];
+ __cpu_features2[2] = Features[3];
if (Vendor == SIG_INTEL) {
// Get CPU type.
@@ -768,30 +942,4 @@ int CONSTRUCTOR_ATTRIBUTE __cpu_indicator_init(void) {
return 0;
}
-#elif defined(__aarch64__)
-// LSE support detection for out-of-line atomics
-// using HWCAP and Auxiliary vector
-_Bool __aarch64_have_lse_atomics
- __attribute__((visibility("hidden"), nocommon));
-#if defined(__has_include)
-#if __has_include(<sys/auxv.h>)
-#include <sys/auxv.h>
-#ifndef AT_HWCAP
-#define AT_HWCAP 16
-#endif
-#ifndef HWCAP_ATOMICS
-#define HWCAP_ATOMICS (1 << 8)
-#endif
-static void CONSTRUCTOR_ATTRIBUTE init_have_lse_atomics(void) {
-#if defined(__FreeBSD__)
- unsigned long hwcap;
- int result = elf_aux_info(AT_HWCAP, &hwcap, sizeof hwcap);
- __aarch64_have_lse_atomics = result == 0 && (hwcap & HWCAP_ATOMICS) != 0;
-#else
- unsigned long hwcap = getauxval(AT_HWCAP);
- __aarch64_have_lse_atomics = (hwcap & HWCAP_ATOMICS) != 0;
-#endif
-}
-#endif // defined(__has_include)
-#endif // __has_include(<sys/auxv.h>)
-#endif // defined(__aarch64__)
+#endif // defined(__GNUC__) || defined(__clang__) || defined(_MSC_VER)
diff --git a/contrib/llvm-project/compiler-rt/lib/crt/crtbegin.c b/contrib/llvm-project/compiler-rt/lib/builtins/crtbegin.c
index 481c158ac777..a0860ca12ea0 100644
--- a/contrib/llvm-project/compiler-rt/lib/crt/crtbegin.c
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/crtbegin.c
@@ -28,7 +28,7 @@ extern fp __CTOR_LIST_END__[];
extern void __cxa_finalize(void *) __attribute__((weak));
-static void __attribute__((used)) __do_init() {
+static void __attribute__((used)) __do_init(void) {
static _Bool __initialized;
if (__builtin_expect(__initialized, 0))
return;
@@ -50,25 +50,29 @@ __attribute__((section(".init_array"),
used)) static void (*__init)(void) = __do_init;
#elif defined(__i386__) || defined(__x86_64__)
__asm__(".pushsection .init,\"ax\",@progbits\n\t"
- "call " __USER_LABEL_PREFIX__ "__do_init\n\t"
- ".popsection");
+ "call __do_init\n\t"
+ ".popsection");
#elif defined(__riscv)
__asm__(".pushsection .init,\"ax\",%progbits\n\t"
- "call " __USER_LABEL_PREFIX__ "__do_init\n\t"
+ "call __do_init\n\t"
".popsection");
#elif defined(__arm__) || defined(__aarch64__)
__asm__(".pushsection .init,\"ax\",%progbits\n\t"
- "bl " __USER_LABEL_PREFIX__ "__do_init\n\t"
- ".popsection");
+ "bl __do_init\n\t"
+ ".popsection");
+#elif defined(__mips__)
+__asm__(".pushsection .init,\"ax\",@progbits\n\t"
+ "jal __do_init\n\t"
+ ".popsection");
#elif defined(__powerpc__) || defined(__powerpc64__)
__asm__(".pushsection .init,\"ax\",@progbits\n\t"
- "bl " __USER_LABEL_PREFIX__ "__do_init\n\t"
- "nop\n\t"
- ".popsection");
+ "bl __do_init\n\t"
+ "nop\n\t"
+ ".popsection");
#elif defined(__sparc__)
__asm__(".pushsection .init,\"ax\",@progbits\n\t"
- "call " __USER_LABEL_PREFIX__ "__do_init\n\t"
- ".popsection");
+ "call __do_init\n\t"
+ ".popsection");
#else
#error "crtbegin without .init_fini array unimplemented for this architecture"
#endif // CRT_HAS_INITFINI_ARRAY
@@ -79,7 +83,7 @@ static fp __DTOR_LIST__[]
extern fp __DTOR_LIST_END__[];
#endif
-static void __attribute__((used)) __do_fini() {
+static void __attribute__((used)) __do_fini(void) {
static _Bool __finalized;
if (__builtin_expect(__finalized, 0))
return;
@@ -103,25 +107,29 @@ __attribute__((section(".fini_array"),
used)) static void (*__fini)(void) = __do_fini;
#elif defined(__i386__) || defined(__x86_64__)
__asm__(".pushsection .fini,\"ax\",@progbits\n\t"
- "call " __USER_LABEL_PREFIX__ "__do_fini\n\t"
- ".popsection");
+ "call __do_fini\n\t"
+ ".popsection");
#elif defined(__arm__) || defined(__aarch64__)
__asm__(".pushsection .fini,\"ax\",%progbits\n\t"
- "bl " __USER_LABEL_PREFIX__ "__do_fini\n\t"
- ".popsection");
+ "bl __do_fini\n\t"
+ ".popsection");
+#elif defined(__mips__)
+__asm__(".pushsection .fini,\"ax\",@progbits\n\t"
+ "jal __do_fini\n\t"
+ ".popsection");
#elif defined(__powerpc__) || defined(__powerpc64__)
__asm__(".pushsection .fini,\"ax\",@progbits\n\t"
- "bl " __USER_LABEL_PREFIX__ "__do_fini\n\t"
- "nop\n\t"
- ".popsection");
+ "bl __do_fini\n\t"
+ "nop\n\t"
+ ".popsection");
#elif defined(__riscv)
__asm__(".pushsection .fini,\"ax\",@progbits\n\t"
- "call " __USER_LABEL_PREFIX__ "__do_fini\n\t"
+ "call __do_fini\n\t"
".popsection");
#elif defined(__sparc__)
__asm__(".pushsection .fini,\"ax\",@progbits\n\t"
- "call " __USER_LABEL_PREFIX__ "__do_fini\n\t"
- ".popsection");
+ "call __do_fini\n\t"
+ ".popsection");
#else
#error "crtbegin without .init_fini array unimplemented for this architecture"
#endif // CRT_HAS_INIT_FINI_ARRAY
diff --git a/contrib/llvm-project/compiler-rt/lib/crt/crtend.c b/contrib/llvm-project/compiler-rt/lib/builtins/crtend.c
index ebcc60b89a10..ebcc60b89a10 100644
--- a/contrib/llvm-project/compiler-rt/lib/crt/crtend.c
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/crtend.c
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/divmoddi4.c b/contrib/llvm-project/compiler-rt/lib/builtins/divmoddi4.c
index e7cbbb1aaa30..64bbb6934601 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/divmoddi4.c
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/divmoddi4.c
@@ -18,8 +18,8 @@ COMPILER_RT_ABI di_int __divmoddi4(di_int a, di_int b, di_int *rem) {
const int bits_in_dword_m1 = (int)(sizeof(di_int) * CHAR_BIT) - 1;
di_int s_a = a >> bits_in_dword_m1; // s_a = a < 0 ? -1 : 0
di_int s_b = b >> bits_in_dword_m1; // s_b = b < 0 ? -1 : 0
- a = (a ^ s_a) - s_a; // negate if s_a == -1
- b = (b ^ s_b) - s_b; // negate if s_b == -1
+ a = (du_int)(a ^ s_a) - s_a; // negate if s_a == -1
+ b = (du_int)(b ^ s_b) - s_b; // negate if s_b == -1
s_b ^= s_a; // sign of quotient
du_int r;
di_int q = (__udivmoddi4(a, b, &r) ^ s_b) - s_b; // negate if s_b == -1
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/divmodsi4.c b/contrib/llvm-project/compiler-rt/lib/builtins/divmodsi4.c
index a85e2993b4e9..193f81053568 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/divmodsi4.c
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/divmodsi4.c
@@ -19,8 +19,8 @@ COMPILER_RT_ABI si_int __divmodsi4(si_int a, si_int b, si_int *rem) {
const int bits_in_word_m1 = (int)(sizeof(si_int) * CHAR_BIT) - 1;
si_int s_a = a >> bits_in_word_m1; // s_a = a < 0 ? -1 : 0
si_int s_b = b >> bits_in_word_m1; // s_b = b < 0 ? -1 : 0
- a = (a ^ s_a) - s_a; // negate if s_a == -1
- b = (b ^ s_b) - s_b; // negate if s_b == -1
+ a = (su_int)(a ^ s_a) - s_a; // negate if s_a == -1
+ b = (su_int)(b ^ s_b) - s_b; // negate if s_b == -1
s_b ^= s_a; // sign of quotient
su_int r;
si_int q = (__udivmodsi4(a, b, &r) ^ s_b) - s_b; // negate if s_b == -1
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/divmodti4.c b/contrib/llvm-project/compiler-rt/lib/builtins/divmodti4.c
index b243ba4ef853..185d3d47f365 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/divmodti4.c
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/divmodti4.c
@@ -20,8 +20,8 @@ COMPILER_RT_ABI ti_int __divmodti4(ti_int a, ti_int b, ti_int *rem) {
const int bits_in_tword_m1 = (int)(sizeof(ti_int) * CHAR_BIT) - 1;
ti_int s_a = a >> bits_in_tword_m1; // s_a = a < 0 ? -1 : 0
ti_int s_b = b >> bits_in_tword_m1; // s_b = b < 0 ? -1 : 0
- a = (a ^ s_a) - s_a; // negate if s_a == -1
- b = (b ^ s_b) - s_b; // negate if s_b == -1
+ a = (tu_int)(a ^ s_a) - s_a; // negate if s_a == -1
+ b = (tu_int)(b ^ s_b) - s_b; // negate if s_b == -1
s_b ^= s_a; // sign of quotient
tu_int r;
ti_int q = (__udivmodti4(a, b, &r) ^ s_b) - s_b; // negate if s_b == -1
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/divtc3.c b/contrib/llvm-project/compiler-rt/lib/builtins/divtc3.c
index 0e4799295f32..099de5802daf 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/divtc3.c
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/divtc3.c
@@ -12,44 +12,45 @@
#define QUAD_PRECISION
#include "fp_lib.h"
-#include "int_lib.h"
-#include "int_math.h"
+
+#if defined(CRT_HAS_F128)
// Returns: the quotient of (a + ib) / (c + id)
-COMPILER_RT_ABI Lcomplex __divtc3(long double __a, long double __b,
- long double __c, long double __d) {
+COMPILER_RT_ABI Qcomplex __divtc3(fp_t __a, fp_t __b, fp_t __c, fp_t __d) {
int __ilogbw = 0;
- long double __logbw =
- __compiler_rt_logbl(__compiler_rt_fmaxl(crt_fabsl(__c), crt_fabsl(__d)));
+ fp_t __logbw = __compiler_rt_logbtf(
+ __compiler_rt_fmaxtf(crt_fabstf(__c), crt_fabstf(__d)));
if (crt_isfinite(__logbw)) {
__ilogbw = (int)__logbw;
- __c = __compiler_rt_scalbnl(__c, -__ilogbw);
- __d = __compiler_rt_scalbnl(__d, -__ilogbw);
+ __c = __compiler_rt_scalbntf(__c, -__ilogbw);
+ __d = __compiler_rt_scalbntf(__d, -__ilogbw);
}
- long double __denom = __c * __c + __d * __d;
- Lcomplex z;
- COMPLEX_REAL(z) =
- __compiler_rt_scalbnl((__a * __c + __b * __d) / __denom, -__ilogbw);
- COMPLEX_IMAGINARY(z) =
- __compiler_rt_scalbnl((__b * __c - __a * __d) / __denom, -__ilogbw);
- if (crt_isnan(COMPLEX_REAL(z)) && crt_isnan(COMPLEX_IMAGINARY(z))) {
+ fp_t __denom = __c * __c + __d * __d;
+ Qcomplex z;
+ COMPLEXTF_REAL(z) =
+ __compiler_rt_scalbntf((__a * __c + __b * __d) / __denom, -__ilogbw);
+ COMPLEXTF_IMAGINARY(z) =
+ __compiler_rt_scalbntf((__b * __c - __a * __d) / __denom, -__ilogbw);
+ if (crt_isnan(COMPLEXTF_REAL(z)) && crt_isnan(COMPLEXTF_IMAGINARY(z))) {
if ((__denom == 0.0) && (!crt_isnan(__a) || !crt_isnan(__b))) {
- COMPLEX_REAL(z) = crt_copysignl(CRT_INFINITY, __c) * __a;
- COMPLEX_IMAGINARY(z) = crt_copysignl(CRT_INFINITY, __c) * __b;
+ COMPLEXTF_REAL(z) = crt_copysigntf(CRT_INFINITY, __c) * __a;
+ COMPLEXTF_IMAGINARY(z) = crt_copysigntf(CRT_INFINITY, __c) * __b;
} else if ((crt_isinf(__a) || crt_isinf(__b)) && crt_isfinite(__c) &&
crt_isfinite(__d)) {
- __a = crt_copysignl(crt_isinf(__a) ? 1.0 : 0.0, __a);
- __b = crt_copysignl(crt_isinf(__b) ? 1.0 : 0.0, __b);
- COMPLEX_REAL(z) = CRT_INFINITY * (__a * __c + __b * __d);
- COMPLEX_IMAGINARY(z) = CRT_INFINITY * (__b * __c - __a * __d);
+ __a = crt_copysigntf(crt_isinf(__a) ? (fp_t)1.0 : (fp_t)0.0, __a);
+ __b = crt_copysigntf(crt_isinf(__b) ? (fp_t)1.0 : (fp_t)0.0, __b);
+ COMPLEXTF_REAL(z) = CRT_INFINITY * (__a * __c + __b * __d);
+ COMPLEXTF_IMAGINARY(z) = CRT_INFINITY * (__b * __c - __a * __d);
} else if (crt_isinf(__logbw) && __logbw > 0.0 && crt_isfinite(__a) &&
crt_isfinite(__b)) {
- __c = crt_copysignl(crt_isinf(__c) ? 1.0 : 0.0, __c);
- __d = crt_copysignl(crt_isinf(__d) ? 1.0 : 0.0, __d);
- COMPLEX_REAL(z) = 0.0 * (__a * __c + __b * __d);
- COMPLEX_IMAGINARY(z) = 0.0 * (__b * __c - __a * __d);
+ __c = crt_copysigntf(crt_isinf(__c) ? (fp_t)1.0 : (fp_t)0.0, __c);
+ __d = crt_copysigntf(crt_isinf(__d) ? (fp_t)1.0 : (fp_t)0.0, __d);
+ COMPLEXTF_REAL(z) = 0.0 * (__a * __c + __b * __d);
+ COMPLEXTF_IMAGINARY(z) = 0.0 * (__b * __c - __a * __d);
}
}
return z;
}
+
+#endif
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/divtf3.c b/contrib/llvm-project/compiler-rt/lib/builtins/divtf3.c
index 5bcc9a8e4aa1..bd76763b07d3 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/divtf3.c
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/divtf3.c
@@ -14,7 +14,7 @@
#define QUAD_PRECISION
#include "fp_lib.h"
-#if defined(CRT_HAS_128BIT) && defined(CRT_LDBL_128BIT)
+#if defined(CRT_HAS_TF_MODE)
#define NUMBER_OF_HALF_ITERATIONS 4
#define NUMBER_OF_FULL_ITERATIONS 1
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/divxc3.c b/contrib/llvm-project/compiler-rt/lib/builtins/divxc3.c
index 97ffd2eac211..3423334f2006 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/divxc3.c
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/divxc3.c
@@ -17,16 +17,16 @@
// Returns: the quotient of (a + ib) / (c + id)
-COMPILER_RT_ABI Lcomplex __divxc3(long double __a, long double __b,
- long double __c, long double __d) {
+COMPILER_RT_ABI Lcomplex __divxc3(xf_float __a, xf_float __b, xf_float __c,
+ xf_float __d) {
int __ilogbw = 0;
- long double __logbw = crt_logbl(crt_fmaxl(crt_fabsl(__c), crt_fabsl(__d)));
+ xf_float __logbw = crt_logbl(crt_fmaxl(crt_fabsl(__c), crt_fabsl(__d)));
if (crt_isfinite(__logbw)) {
__ilogbw = (int)__logbw;
__c = crt_scalbnl(__c, -__ilogbw);
__d = crt_scalbnl(__d, -__ilogbw);
}
- long double __denom = __c * __c + __d * __d;
+ xf_float __denom = __c * __c + __d * __d;
Lcomplex z;
COMPLEX_REAL(z) = crt_scalbnl((__a * __c + __b * __d) / __denom, -__ilogbw);
COMPLEX_IMAGINARY(z) =
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/emutls.c b/contrib/llvm-project/compiler-rt/lib/builtins/emutls.c
index 98cabd917d6c..390ffb25f6cf 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/emutls.c
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/emutls.c
@@ -30,7 +30,7 @@
// MSVC raises a warning about a nonstandard extension being used for the 0
// sized element in this array. Disable this for warn-as-error builds.
#pragma warning(push)
-#pragma warning(disable : 4206)
+#pragma warning(disable : 4200)
#endif
typedef struct emutls_address_array {
@@ -93,7 +93,7 @@ static __inline void emutls_setspecific(emutls_address_array *value) {
pthread_setspecific(emutls_pthread_key, (void *)value);
}
-static __inline emutls_address_array *emutls_getspecific() {
+static __inline emutls_address_array *emutls_getspecific(void) {
return (emutls_address_array *)pthread_getspecific(emutls_pthread_key);
}
@@ -125,9 +125,9 @@ static __inline void emutls_init_once(void) {
pthread_once(&once, emutls_init);
}
-static __inline void emutls_lock() { pthread_mutex_lock(&emutls_mutex); }
+static __inline void emutls_lock(void) { pthread_mutex_lock(&emutls_mutex); }
-static __inline void emutls_unlock() { pthread_mutex_unlock(&emutls_mutex); }
+static __inline void emutls_unlock(void) { pthread_mutex_unlock(&emutls_mutex); }
#else // _WIN32
@@ -150,7 +150,7 @@ static void win_error(DWORD last_err, const char *hint) {
NULL, last_err, 0, (LPSTR)&buffer, 1, NULL)) {
fprintf(stderr, "Windows error: %s\n", buffer);
} else {
- fprintf(stderr, "Unkown Windows error: %s\n", hint);
+ fprintf(stderr, "Unknown Windows error: %s\n", hint);
}
LocalFree(buffer);
}
@@ -209,16 +209,16 @@ static __inline void emutls_init_once(void) {
InitOnceExecuteOnce(&once, emutls_init, NULL, NULL);
}
-static __inline void emutls_lock() { EnterCriticalSection(emutls_mutex); }
+static __inline void emutls_lock(void) { EnterCriticalSection(emutls_mutex); }
-static __inline void emutls_unlock() { LeaveCriticalSection(emutls_mutex); }
+static __inline void emutls_unlock(void) { LeaveCriticalSection(emutls_mutex); }
static __inline void emutls_setspecific(emutls_address_array *value) {
if (TlsSetValue(emutls_tls_index, (LPVOID)value) == 0)
win_abort(GetLastError(), "TlsSetValue");
}
-static __inline emutls_address_array *emutls_getspecific() {
+static __inline emutls_address_array *emutls_getspecific(void) {
LPVOID value = TlsGetValue(emutls_tls_index);
if (value == NULL) {
const DWORD err = GetLastError();
@@ -374,6 +374,21 @@ emutls_get_address_array(uintptr_t index) {
return array;
}
+#ifndef _WIN32
+// Our emulated TLS implementation relies on local state (e.g. for the pthread
+// key), and if we duplicate this state across different shared libraries,
+// accesses to the same TLS variable from different shared libraries will yield
+// different results (see https://github.com/android/ndk/issues/1551 for an
+// example). __emutls_get_address is the only external entry point for emulated
+// TLS, and by making it default visibility and weak, we can rely on the dynamic
+// linker to coalesce multiple copies at runtime and ensure a single unique copy
+// of TLS state. This is a best effort; it won't work if the user is linking
+// with -Bsymbolic or -Bsymbolic-functions, and it also won't work on Windows,
+// where the dynamic linker has no notion of coalescing weak symbols at runtime.
+// A more robust solution would be to create a separate shared library for
+// emulated TLS, to ensure a single copy of its state.
+__attribute__((visibility("default"), weak))
+#endif
void *__emutls_get_address(__emutls_control *control) {
uintptr_t index = emutls_get_index(control);
emutls_address_array *array = emutls_get_address_array(index--);
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/eprintf.c b/contrib/llvm-project/compiler-rt/lib/builtins/eprintf.c
index 89fb0e315b2e..daf90b4993ec 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/eprintf.c
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/eprintf.c
@@ -15,6 +15,7 @@
//
// It should never be exported from a dylib, so it is marked
// visibility hidden.
+#ifndef DONT_DEFINE_EPRINTF
#ifndef _WIN32
__attribute__((visibility("hidden")))
#endif
@@ -25,3 +26,4 @@ __eprintf(const char *format, const char *assertion_expression,
fflush(stderr);
compilerrt_abort();
}
+#endif
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/extenddftf2.c b/contrib/llvm-project/compiler-rt/lib/builtins/extenddftf2.c
index ddf470ecd629..a61ef53147ab 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/extenddftf2.c
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/extenddftf2.c
@@ -9,13 +9,11 @@
#define QUAD_PRECISION
#include "fp_lib.h"
-#if defined(CRT_HAS_128BIT) && defined(CRT_LDBL_128BIT)
+#if defined(CRT_HAS_TF_MODE)
#define SRC_DOUBLE
#define DST_QUAD
#include "fp_extend_impl.inc"
-COMPILER_RT_ABI fp_t __extenddftf2(double a) {
- return __extendXfYf2__(a);
-}
+COMPILER_RT_ABI dst_t __extenddftf2(src_t a) { return __extendXfYf2__(a); }
#endif
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/extendhftf2.c b/contrib/llvm-project/compiler-rt/lib/builtins/extendhftf2.c
index aefe9737d34f..7609db6f06e4 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/extendhftf2.c
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/extendhftf2.c
@@ -10,14 +10,11 @@
#define QUAD_PRECISION
#include "fp_lib.h"
-#if defined(CRT_HAS_128BIT) && defined(CRT_LDBL_128BIT) && \
- defined(COMPILER_RT_HAS_FLOAT16)
+#if defined(CRT_HAS_TF_MODE) && defined(COMPILER_RT_HAS_FLOAT16)
#define SRC_HALF
#define DST_QUAD
#include "fp_extend_impl.inc"
-COMPILER_RT_ABI long double __extendhftf2(_Float16 a) {
- return __extendXfYf2__(a);
-}
+COMPILER_RT_ABI dst_t __extendhftf2(src_t a) { return __extendXfYf2__(a); }
#endif
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/extendsftf2.c b/contrib/llvm-project/compiler-rt/lib/builtins/extendsftf2.c
index cf1fd2face20..4ab2982ce514 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/extendsftf2.c
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/extendsftf2.c
@@ -9,13 +9,11 @@
#define QUAD_PRECISION
#include "fp_lib.h"
-#if defined(CRT_HAS_128BIT) && defined(CRT_LDBL_128BIT)
+#if defined(CRT_HAS_TF_MODE)
#define SRC_SINGLE
#define DST_QUAD
#include "fp_extend_impl.inc"
-COMPILER_RT_ABI fp_t __extendsftf2(float a) {
- return __extendXfYf2__(a);
-}
+COMPILER_RT_ABI dst_t __extendsftf2(src_t a) { return __extendXfYf2__(a); }
#endif
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/extendxftf2.c b/contrib/llvm-project/compiler-rt/lib/builtins/extendxftf2.c
new file mode 100644
index 000000000000..c1d97b5cfa15
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/extendxftf2.c
@@ -0,0 +1,24 @@
+//===-- lib/extendxftf2.c - long double -> quad conversion --------*- C -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+// Assumption: long double is a IEEE 80 bit floating point type padded to 128
+// bits.
+
+#define QUAD_PRECISION
+#include "fp_lib.h"
+
+#if defined(CRT_HAS_TF_MODE) && __LDBL_MANT_DIG__ == 64 && defined(__x86_64__)
+#define SRC_80
+#define DST_QUAD
+#include "fp_extend_impl.inc"
+
+COMPILER_RT_ABI tf_float __extendxftf2(xf_float a) {
+ return __extendXfYf2__(a);
+}
+
+#endif
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/fixdfdi.c b/contrib/llvm-project/compiler-rt/lib/builtins/fixdfdi.c
index 511568fc12fd..a48facb68598 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/fixdfdi.c
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/fixdfdi.c
@@ -42,3 +42,7 @@ AEABI_RTABI di_int __aeabi_d2lz(fp_t a) { return __fixdfdi(a); }
COMPILER_RT_ALIAS(__fixdfdi, __aeabi_d2lz)
#endif
#endif
+
+#if defined(__MINGW32__) && defined(__arm__)
+COMPILER_RT_ALIAS(__fixdfdi, __dtoi64)
+#endif
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/fixsfdi.c b/contrib/llvm-project/compiler-rt/lib/builtins/fixsfdi.c
index 0cf71c30311a..3a66fb9e2f06 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/fixsfdi.c
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/fixsfdi.c
@@ -42,3 +42,7 @@ AEABI_RTABI di_int __aeabi_f2lz(fp_t a) { return __fixsfdi(a); }
COMPILER_RT_ALIAS(__fixsfdi, __aeabi_f2lz)
#endif
#endif
+
+#if defined(__MINGW32__) && defined(__arm__)
+COMPILER_RT_ALIAS(__fixsfdi, __stoi64)
+#endif
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/fixtfdi.c b/contrib/llvm-project/compiler-rt/lib/builtins/fixtfdi.c
index fe570e6b3755..d27a99b6f364 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/fixtfdi.c
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/fixtfdi.c
@@ -9,7 +9,7 @@
#define QUAD_PRECISION
#include "fp_lib.h"
-#if defined(CRT_HAS_128BIT) && defined(CRT_LDBL_128BIT)
+#if defined(CRT_HAS_TF_MODE)
typedef di_int fixint_t;
typedef du_int fixuint_t;
#include "fp_fixint_impl.inc"
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/fixtfsi.c b/contrib/llvm-project/compiler-rt/lib/builtins/fixtfsi.c
index a32bd964caa3..01e352acc592 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/fixtfsi.c
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/fixtfsi.c
@@ -9,7 +9,7 @@
#define QUAD_PRECISION
#include "fp_lib.h"
-#if defined(CRT_HAS_128BIT) && defined(CRT_LDBL_128BIT)
+#if defined(CRT_HAS_TF_MODE)
typedef si_int fixint_t;
typedef su_int fixuint_t;
#include "fp_fixint_impl.inc"
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/fixtfti.c b/contrib/llvm-project/compiler-rt/lib/builtins/fixtfti.c
index 19f84ce38907..491fca502113 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/fixtfti.c
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/fixtfti.c
@@ -9,7 +9,7 @@
#define QUAD_PRECISION
#include "fp_lib.h"
-#if defined(CRT_HAS_128BIT) && defined(CRT_LDBL_128BIT)
+#if defined(CRT_HAS_TF_MODE)
typedef ti_int fixint_t;
typedef tu_int fixuint_t;
#include "fp_fixint_impl.inc"
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/fixunsdfdi.c b/contrib/llvm-project/compiler-rt/lib/builtins/fixunsdfdi.c
index ccb256d2c7e0..f15f86788e85 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/fixunsdfdi.c
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/fixunsdfdi.c
@@ -40,3 +40,7 @@ AEABI_RTABI du_int __aeabi_d2ulz(fp_t a) { return __fixunsdfdi(a); }
COMPILER_RT_ALIAS(__fixunsdfdi, __aeabi_d2ulz)
#endif
#endif
+
+#if defined(__MINGW32__) && defined(__arm__)
+COMPILER_RT_ALIAS(__fixunsdfdi, __dtou64)
+#endif
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/fixunssfdi.c b/contrib/llvm-project/compiler-rt/lib/builtins/fixunssfdi.c
index 647185fbabf1..e8f600df9766 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/fixunssfdi.c
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/fixunssfdi.c
@@ -41,3 +41,7 @@ AEABI_RTABI du_int __aeabi_f2ulz(fp_t a) { return __fixunssfdi(a); }
COMPILER_RT_ALIAS(__fixunssfdi, __aeabi_f2ulz)
#endif
#endif
+
+#if defined(__MINGW32__) && defined(__arm__)
+COMPILER_RT_ALIAS(__fixunssfdi, __stou64)
+#endif
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/fixunstfdi.c b/contrib/llvm-project/compiler-rt/lib/builtins/fixunstfdi.c
index a0805e63db82..febdb8f5682f 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/fixunstfdi.c
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/fixunstfdi.c
@@ -9,7 +9,7 @@
#define QUAD_PRECISION
#include "fp_lib.h"
-#if defined(CRT_HAS_128BIT) && defined(CRT_LDBL_128BIT)
+#if defined(CRT_HAS_TF_MODE)
typedef du_int fixuint_t;
#include "fp_fixuint_impl.inc"
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/fixunstfsi.c b/contrib/llvm-project/compiler-rt/lib/builtins/fixunstfsi.c
index 3a1320ed3e0a..4efc387df453 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/fixunstfsi.c
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/fixunstfsi.c
@@ -9,7 +9,7 @@
#define QUAD_PRECISION
#include "fp_lib.h"
-#if defined(CRT_HAS_128BIT) && defined(CRT_LDBL_128BIT)
+#if defined(CRT_HAS_TF_MODE)
typedef su_int fixuint_t;
#include "fp_fixuint_impl.inc"
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/fixunstfti.c b/contrib/llvm-project/compiler-rt/lib/builtins/fixunstfti.c
index 23cd1ab615a7..fa9e7aa07108 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/fixunstfti.c
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/fixunstfti.c
@@ -9,7 +9,7 @@
#define QUAD_PRECISION
#include "fp_lib.h"
-#if defined(CRT_HAS_128BIT) && defined(CRT_LDBL_128BIT)
+#if defined(CRT_HAS_TF_MODE)
typedef tu_int fixuint_t;
#include "fp_fixuint_impl.inc"
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/fixunsxfdi.c b/contrib/llvm-project/compiler-rt/lib/builtins/fixunsxfdi.c
index 097a4e55e931..957c263aa3c5 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/fixunsxfdi.c
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/fixunsxfdi.c
@@ -26,14 +26,14 @@
// mmmm mmmm mmmm
#if defined(_MSC_VER) && !defined(__clang__)
-// MSVC throws a warning about 'unitialized variable use' here,
+// MSVC throws a warning about 'uninitialized variable use' here,
// disable it for builds that warn-as-error
#pragma warning(push)
#pragma warning(disable : 4700)
#endif
-COMPILER_RT_ABI du_int __fixunsxfdi(long double a) {
- long_double_bits fb;
+COMPILER_RT_ABI du_int __fixunsxfdi(xf_float a) {
+ xf_bits fb;
fb.f = a;
int e = (fb.u.high.s.low & 0x00007FFF) - 16383;
if (e < 0 || (fb.u.high.s.low & 0x00008000))
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/fixunsxfsi.c b/contrib/llvm-project/compiler-rt/lib/builtins/fixunsxfsi.c
index 3bc1288d38a1..a0abb82b7917 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/fixunsxfsi.c
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/fixunsxfsi.c
@@ -26,14 +26,14 @@
// mmmm mmmm mmmm
#if defined(_MSC_VER) && !defined(__clang__)
-// MSVC throws a warning about 'unitialized variable use' here,
+// MSVC throws a warning about 'uninitialized variable use' here,
// disable it for builds that warn-as-error
#pragma warning(push)
#pragma warning(disable : 4700)
#endif
-COMPILER_RT_ABI su_int __fixunsxfsi(long double a) {
- long_double_bits fb;
+COMPILER_RT_ABI su_int __fixunsxfsi(xf_float a) {
+ xf_bits fb;
fb.f = a;
int e = (fb.u.high.s.low & 0x00007FFF) - 16383;
if (e < 0 || (fb.u.high.s.low & 0x00008000))
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/fixunsxfti.c b/contrib/llvm-project/compiler-rt/lib/builtins/fixunsxfti.c
index 508554e4f8f6..be3f75f04f7f 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/fixunsxfti.c
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/fixunsxfti.c
@@ -25,8 +25,8 @@
// eeee | 1mmm mmmm mmmm mmmm mmmm mmmm mmmm mmmm | mmmm mmmm mmmm mmmm mmmm
// mmmm mmmm mmmm
-COMPILER_RT_ABI tu_int __fixunsxfti(long double a) {
- long_double_bits fb;
+COMPILER_RT_ABI tu_int __fixunsxfti(xf_float a) {
+ xf_bits fb;
fb.f = a;
int e = (fb.u.high.s.low & 0x00007FFF) - 16383;
if (e < 0 || (fb.u.high.s.low & 0x00008000))
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/fixxfdi.c b/contrib/llvm-project/compiler-rt/lib/builtins/fixxfdi.c
index a7a0464feb9d..35d7083f56b0 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/fixxfdi.c
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/fixxfdi.c
@@ -25,16 +25,16 @@
// mmmm mmmm mmmm
#if defined(_MSC_VER) && !defined(__clang__)
-// MSVC throws a warning about 'unitialized variable use' here,
+// MSVC throws a warning about 'uninitialized variable use' here,
// disable it for builds that warn-as-error
#pragma warning(push)
#pragma warning(disable : 4700)
#endif
-COMPILER_RT_ABI di_int __fixxfdi(long double a) {
+COMPILER_RT_ABI di_int __fixxfdi(xf_float a) {
const di_int di_max = (di_int)((~(du_int)0) / 2);
const di_int di_min = -di_max - 1;
- long_double_bits fb;
+ xf_bits fb;
fb.f = a;
int e = (fb.u.high.s.low & 0x00007FFF) - 16383;
if (e < 0)
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/fixxfti.c b/contrib/llvm-project/compiler-rt/lib/builtins/fixxfti.c
index 90e03116e7ca..95038dfafd5d 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/fixxfti.c
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/fixxfti.c
@@ -24,10 +24,10 @@
// eeee | 1mmm mmmm mmmm mmmm mmmm mmmm mmmm mmmm | mmmm mmmm mmmm mmmm mmmm
// mmmm mmmm mmmm
-COMPILER_RT_ABI ti_int __fixxfti(long double a) {
+COMPILER_RT_ABI ti_int __fixxfti(xf_float a) {
const ti_int ti_max = (ti_int)((~(tu_int)0) / 2);
const ti_int ti_min = -ti_max - 1;
- long_double_bits fb;
+ xf_bits fb;
fb.f = a;
int e = (fb.u.high.s.low & 0x00007FFF) - 16383;
if (e < 0)
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/floatdidf.c b/contrib/llvm-project/compiler-rt/lib/builtins/floatdidf.c
index 7ecb30bca71e..6da81f7a05bf 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/floatdidf.c
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/floatdidf.c
@@ -45,53 +45,11 @@ COMPILER_RT_ABI double __floatdidf(di_int a) {
// flags to set, and we don't want to code-gen to an unknown soft-float
// implementation.
-COMPILER_RT_ABI double __floatdidf(di_int a) {
- if (a == 0)
- return 0.0;
- const unsigned N = sizeof(di_int) * CHAR_BIT;
- const di_int s = a >> (N - 1);
- a = (a ^ s) - s;
- int sd = N - __builtin_clzll(a); // number of significant digits
- int e = sd - 1; // exponent
- if (sd > DBL_MANT_DIG) {
- // start: 0000000000000000000001xxxxxxxxxxxxxxxxxxxxxxPQxxxxxxxxxxxxxxxxxx
- // finish: 000000000000000000000000000000000000001xxxxxxxxxxxxxxxxxxxxxxPQR
- // 12345678901234567890123456
- // 1 = msb 1 bit
- // P = bit DBL_MANT_DIG-1 bits to the right of 1
- // Q = bit DBL_MANT_DIG bits to the right of 1
- // R = "or" of all bits to the right of Q
- switch (sd) {
- case DBL_MANT_DIG + 1:
- a <<= 1;
- break;
- case DBL_MANT_DIG + 2:
- break;
- default:
- a = ((du_int)a >> (sd - (DBL_MANT_DIG + 2))) |
- ((a & ((du_int)(-1) >> ((N + DBL_MANT_DIG + 2) - sd))) != 0);
- };
- // finish:
- a |= (a & 4) != 0; // Or P into R
- ++a; // round - this step may add a significant bit
- a >>= 2; // dump Q and R
- // a is now rounded to DBL_MANT_DIG or DBL_MANT_DIG+1 bits
- if (a & ((du_int)1 << DBL_MANT_DIG)) {
- a >>= 1;
- ++e;
- }
- // a is now rounded to DBL_MANT_DIG bits
- } else {
- a <<= (DBL_MANT_DIG - sd);
- // a is now rounded to DBL_MANT_DIG bits
- }
- double_bits fb;
- fb.u.s.high = ((su_int)s & 0x80000000) | // sign
- ((su_int)(e + 1023) << 20) | // exponent
- ((su_int)(a >> 32) & 0x000FFFFF); // mantissa-high
- fb.u.s.low = (su_int)a; // mantissa-low
- return fb.f;
-}
+#define SRC_I64
+#define DST_DOUBLE
+#include "int_to_fp_impl.inc"
+
+COMPILER_RT_ABI double __floatdidf(di_int a) { return __floatXiYf__(a); }
#endif
#if defined(__ARM_EABI__)
@@ -101,3 +59,7 @@ AEABI_RTABI double __aeabi_l2d(di_int a) { return __floatdidf(a); }
COMPILER_RT_ALIAS(__floatdidf, __aeabi_l2d)
#endif
#endif
+
+#if defined(__MINGW32__) && defined(__arm__)
+COMPILER_RT_ALIAS(__floatdidf, __i64tod)
+#endif
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/floatdisf.c b/contrib/llvm-project/compiler-rt/lib/builtins/floatdisf.c
index faaa1bcb3c8e..0bb88c5c518e 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/floatdisf.c
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/floatdisf.c
@@ -19,52 +19,11 @@
#include "int_lib.h"
-COMPILER_RT_ABI float __floatdisf(di_int a) {
- if (a == 0)
- return 0.0F;
- const unsigned N = sizeof(di_int) * CHAR_BIT;
- const di_int s = a >> (N - 1);
- a = (a ^ s) - s;
- int sd = N - __builtin_clzll(a); // number of significant digits
- si_int e = sd - 1; // exponent
- if (sd > FLT_MANT_DIG) {
- // start: 0000000000000000000001xxxxxxxxxxxxxxxxxxxxxxPQxxxxxxxxxxxxxxxxxx
- // finish: 000000000000000000000000000000000000001xxxxxxxxxxxxxxxxxxxxxxPQR
- // 12345678901234567890123456
- // 1 = msb 1 bit
- // P = bit FLT_MANT_DIG-1 bits to the right of 1
- // Q = bit FLT_MANT_DIG bits to the right of 1
- // R = "or" of all bits to the right of Q
- switch (sd) {
- case FLT_MANT_DIG + 1:
- a <<= 1;
- break;
- case FLT_MANT_DIG + 2:
- break;
- default:
- a = ((du_int)a >> (sd - (FLT_MANT_DIG + 2))) |
- ((a & ((du_int)(-1) >> ((N + FLT_MANT_DIG + 2) - sd))) != 0);
- };
- // finish:
- a |= (a & 4) != 0; // Or P into R
- ++a; // round - this step may add a significant bit
- a >>= 2; // dump Q and R
- // a is now rounded to FLT_MANT_DIG or FLT_MANT_DIG+1 bits
- if (a & ((du_int)1 << FLT_MANT_DIG)) {
- a >>= 1;
- ++e;
- }
- // a is now rounded to FLT_MANT_DIG bits
- } else {
- a <<= (FLT_MANT_DIG - sd);
- // a is now rounded to FLT_MANT_DIG bits
- }
- float_bits fb;
- fb.u = ((su_int)s & 0x80000000) | // sign
- ((e + 127) << 23) | // exponent
- ((su_int)a & 0x007FFFFF); // mantissa
- return fb.f;
-}
+#define SRC_I64
+#define DST_SINGLE
+#include "int_to_fp_impl.inc"
+
+COMPILER_RT_ABI float __floatdisf(di_int a) { return __floatXiYf__(a); }
#if defined(__ARM_EABI__)
#if defined(COMPILER_RT_ARMHF_TARGET)
@@ -73,3 +32,7 @@ AEABI_RTABI float __aeabi_l2f(di_int a) { return __floatdisf(a); }
COMPILER_RT_ALIAS(__floatdisf, __aeabi_l2f)
#endif
#endif
+
+#if defined(__MINGW32__) && defined(__arm__)
+COMPILER_RT_ALIAS(__floatdisf, __i64tos)
+#endif
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/floatditf.c b/contrib/llvm-project/compiler-rt/lib/builtins/floatditf.c
index 9b07b65825b8..c6e326a1923a 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/floatditf.c
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/floatditf.c
@@ -15,7 +15,7 @@
#define QUAD_PRECISION
#include "fp_lib.h"
-#if defined(CRT_HAS_128BIT) && defined(CRT_LDBL_128BIT)
+#if defined(CRT_HAS_TF_MODE)
COMPILER_RT_ABI fp_t __floatditf(di_int a) {
const int aWidth = sizeof a * CHAR_BIT;
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/floatdixf.c b/contrib/llvm-project/compiler-rt/lib/builtins/floatdixf.c
index ad5deb2d4bf5..3d9e664e4814 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/floatdixf.c
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/floatdixf.c
@@ -23,7 +23,7 @@
// eeee | 1mmm mmmm mmmm mmmm mmmm mmmm mmmm mmmm | mmmm mmmm mmmm mmmm mmmm
// mmmm mmmm mmmm
-COMPILER_RT_ABI long double __floatdixf(di_int a) {
+COMPILER_RT_ABI xf_float __floatdixf(di_int a) {
if (a == 0)
return 0.0;
const unsigned N = sizeof(di_int) * CHAR_BIT;
@@ -31,7 +31,7 @@ COMPILER_RT_ABI long double __floatdixf(di_int a) {
a = (a ^ s) - s;
int clz = __builtin_clzll(a);
int e = (N - 1) - clz; // exponent
- long_double_bits fb;
+ xf_bits fb;
fb.u.high.s.low = ((su_int)s & 0x00008000) | // sign
(e + 16383); // exponent
fb.u.low.all = a << clz; // mantissa
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/floatsidf.c b/contrib/llvm-project/compiler-rt/lib/builtins/floatsidf.c
index 28cf32f6388b..a23b31e7bc7e 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/floatsidf.c
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/floatsidf.c
@@ -27,20 +27,21 @@ COMPILER_RT_ABI fp_t __floatsidf(si_int a) {
// All other cases begin by extracting the sign and absolute value of a
rep_t sign = 0;
+ su_int aAbs = (su_int)a;
if (a < 0) {
sign = signBit;
- a = -a;
+ aAbs = -aAbs;
}
// Exponent of (fp_t)a is the width of abs(a).
- const int exponent = (aWidth - 1) - clzsi(a);
+ const int exponent = (aWidth - 1) - clzsi(aAbs);
rep_t result;
// Shift a into the significand field and clear the implicit bit. Extra
// cast to unsigned int is necessary to get the correct behavior for
// the input INT_MIN.
const int shift = significandBits - exponent;
- result = (rep_t)(su_int)a << shift ^ implicitBit;
+ result = (rep_t)aAbs << shift ^ implicitBit;
// Insert the exponent
result += (rep_t)(exponent + exponentBias) << significandBits;
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/floatsisf.c b/contrib/llvm-project/compiler-rt/lib/builtins/floatsisf.c
index fe060407755b..5ede30b703e0 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/floatsisf.c
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/floatsisf.c
@@ -17,7 +17,7 @@
#include "int_lib.h"
-COMPILER_RT_ABI fp_t __floatsisf(int a) {
+COMPILER_RT_ABI fp_t __floatsisf(si_int a) {
const int aWidth = sizeof a * CHAR_BIT;
@@ -27,23 +27,24 @@ COMPILER_RT_ABI fp_t __floatsisf(int a) {
// All other cases begin by extracting the sign and absolute value of a
rep_t sign = 0;
+ su_int aAbs = (su_int)a;
if (a < 0) {
sign = signBit;
- a = -a;
+ aAbs = -aAbs;
}
// Exponent of (fp_t)a is the width of abs(a).
- const int exponent = (aWidth - 1) - __builtin_clz(a);
+ const int exponent = (aWidth - 1) - clzsi(aAbs);
rep_t result;
// Shift a into the significand field, rounding if it is a right-shift
if (exponent <= significandBits) {
const int shift = significandBits - exponent;
- result = (rep_t)a << shift ^ implicitBit;
+ result = (rep_t)aAbs << shift ^ implicitBit;
} else {
const int shift = exponent - significandBits;
- result = (rep_t)a >> shift ^ implicitBit;
- rep_t round = (rep_t)a << (typeWidth - shift);
+ result = (rep_t)aAbs >> shift ^ implicitBit;
+ rep_t round = (rep_t)aAbs << (typeWidth - shift);
if (round > signBit)
result++;
if (round == signBit)
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/floatsitf.c b/contrib/llvm-project/compiler-rt/lib/builtins/floatsitf.c
index f56063f368d9..314a8a7bbdfe 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/floatsitf.c
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/floatsitf.c
@@ -15,8 +15,8 @@
#define QUAD_PRECISION
#include "fp_lib.h"
-#if defined(CRT_HAS_128BIT) && defined(CRT_LDBL_128BIT)
-COMPILER_RT_ABI fp_t __floatsitf(int a) {
+#if defined(CRT_HAS_TF_MODE)
+COMPILER_RT_ABI fp_t __floatsitf(si_int a) {
const int aWidth = sizeof a * CHAR_BIT;
@@ -26,14 +26,14 @@ COMPILER_RT_ABI fp_t __floatsitf(int a) {
// All other cases begin by extracting the sign and absolute value of a
rep_t sign = 0;
- unsigned aAbs = (unsigned)a;
+ su_int aAbs = (su_int)a;
if (a < 0) {
sign = signBit;
- aAbs = ~(unsigned)a + 1U;
+ aAbs = -aAbs;
}
// Exponent of (fp_t)a is the width of abs(a).
- const int exponent = (aWidth - 1) - __builtin_clz(aAbs);
+ const int exponent = (aWidth - 1) - clzsi(aAbs);
rep_t result;
// Shift a into the significand field and clear the implicit bit.
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/floattidf.c b/contrib/llvm-project/compiler-rt/lib/builtins/floattidf.c
index 0a1c04bec82e..ef8fe180e2f5 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/floattidf.c
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/floattidf.c
@@ -14,6 +14,10 @@
#ifdef CRT_HAS_128BIT
+#define SRC_I128
+#define DST_DOUBLE
+#include "int_to_fp_impl.inc"
+
// Returns: convert a to a double, rounding toward even.
// Assumption: double is a IEEE 64 bit floating point type
@@ -22,52 +26,6 @@
// seee eeee eeee mmmm mmmm mmmm mmmm mmmm | mmmm mmmm mmmm mmmm mmmm mmmm mmmm
// mmmm
-COMPILER_RT_ABI double __floattidf(ti_int a) {
- if (a == 0)
- return 0.0;
- const unsigned N = sizeof(ti_int) * CHAR_BIT;
- const ti_int s = a >> (N - 1);
- a = (a ^ s) - s;
- int sd = N - __clzti2(a); // number of significant digits
- int e = sd - 1; // exponent
- if (sd > DBL_MANT_DIG) {
- // start: 0000000000000000000001xxxxxxxxxxxxxxxxxxxxxxPQxxxxxxxxxxxxxxxxxx
- // finish: 000000000000000000000000000000000000001xxxxxxxxxxxxxxxxxxxxxxPQR
- // 12345678901234567890123456
- // 1 = msb 1 bit
- // P = bit DBL_MANT_DIG-1 bits to the right of 1
- // Q = bit DBL_MANT_DIG bits to the right of 1
- // R = "or" of all bits to the right of Q
- switch (sd) {
- case DBL_MANT_DIG + 1:
- a <<= 1;
- break;
- case DBL_MANT_DIG + 2:
- break;
- default:
- a = ((tu_int)a >> (sd - (DBL_MANT_DIG + 2))) |
- ((a & ((tu_int)(-1) >> ((N + DBL_MANT_DIG + 2) - sd))) != 0);
- };
- // finish:
- a |= (a & 4) != 0; // Or P into R
- ++a; // round - this step may add a significant bit
- a >>= 2; // dump Q and R
- // a is now rounded to DBL_MANT_DIG or DBL_MANT_DIG+1 bits
- if (a & ((tu_int)1 << DBL_MANT_DIG)) {
- a >>= 1;
- ++e;
- }
- // a is now rounded to DBL_MANT_DIG bits
- } else {
- a <<= (DBL_MANT_DIG - sd);
- // a is now rounded to DBL_MANT_DIG bits
- }
- double_bits fb;
- fb.u.s.high = ((su_int)s & 0x80000000) | // sign
- ((e + 1023) << 20) | // exponent
- ((su_int)(a >> 32) & 0x000FFFFF); // mantissa-high
- fb.u.s.low = (su_int)a; // mantissa-low
- return fb.f;
-}
+COMPILER_RT_ABI double __floattidf(ti_int a) { return __floatXiYf__(a); }
#endif // CRT_HAS_128BIT
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/floattisf.c b/contrib/llvm-project/compiler-rt/lib/builtins/floattisf.c
index a8fcdbe14c07..77589902f544 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/floattisf.c
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/floattisf.c
@@ -14,6 +14,10 @@
#ifdef CRT_HAS_128BIT
+#define SRC_I128
+#define DST_SINGLE
+#include "int_to_fp_impl.inc"
+
// Returns: convert a to a float, rounding toward even.
// Assumption: float is a IEEE 32 bit floating point type
@@ -21,51 +25,6 @@
// seee eeee emmm mmmm mmmm mmmm mmmm mmmm
-COMPILER_RT_ABI float __floattisf(ti_int a) {
- if (a == 0)
- return 0.0F;
- const unsigned N = sizeof(ti_int) * CHAR_BIT;
- const ti_int s = a >> (N - 1);
- a = (a ^ s) - s;
- int sd = N - __clzti2(a); // number of significant digits
- int e = sd - 1; // exponent
- if (sd > FLT_MANT_DIG) {
- // start: 0000000000000000000001xxxxxxxxxxxxxxxxxxxxxxPQxxxxxxxxxxxxxxxxxx
- // finish: 000000000000000000000000000000000000001xxxxxxxxxxxxxxxxxxxxxxPQR
- // 12345678901234567890123456
- // 1 = msb 1 bit
- // P = bit FLT_MANT_DIG-1 bits to the right of 1
- // Q = bit FLT_MANT_DIG bits to the right of 1
- // R = "or" of all bits to the right of Q
- switch (sd) {
- case FLT_MANT_DIG + 1:
- a <<= 1;
- break;
- case FLT_MANT_DIG + 2:
- break;
- default:
- a = ((tu_int)a >> (sd - (FLT_MANT_DIG + 2))) |
- ((a & ((tu_int)(-1) >> ((N + FLT_MANT_DIG + 2) - sd))) != 0);
- };
- // finish:
- a |= (a & 4) != 0; // Or P into R
- ++a; // round - this step may add a significant bit
- a >>= 2; // dump Q and R
- // a is now rounded to FLT_MANT_DIG or FLT_MANT_DIG+1 bits
- if (a & ((tu_int)1 << FLT_MANT_DIG)) {
- a >>= 1;
- ++e;
- }
- // a is now rounded to FLT_MANT_DIG bits
- } else {
- a <<= (FLT_MANT_DIG - sd);
- // a is now rounded to FLT_MANT_DIG bits
- }
- float_bits fb;
- fb.u = ((su_int)s & 0x80000000) | // sign
- ((e + 127) << 23) | // exponent
- ((su_int)a & 0x007FFFFF); // mantissa
- return fb.f;
-}
+COMPILER_RT_ABI float __floattisf(ti_int a) { return __floatXiYf__(a); }
#endif // CRT_HAS_128BIT
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/floattitf.c b/contrib/llvm-project/compiler-rt/lib/builtins/floattitf.c
index 196cbdae14e0..5dffe22fdb4e 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/floattitf.c
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/floattitf.c
@@ -16,6 +16,11 @@
#include "fp_lib.h"
#include "int_lib.h"
+#if defined(CRT_HAS_TF_MODE)
+#define SRC_I128
+#define DST_QUAD
+#include "int_to_fp_impl.inc"
+
// Returns: convert a ti_int to a fp_t, rounding toward even.
// Assumption: fp_t is a IEEE 128 bit floating point type
@@ -25,54 +30,6 @@
// mmmm | mmmm mmmm mmmm mmmm mmmm mmmm mmmm mmmm | mmmm mmmm mmmm mmmm mmmm
// mmmm mmmm mmmm
-#if defined(CRT_HAS_128BIT) && defined(CRT_LDBL_128BIT)
-COMPILER_RT_ABI fp_t __floattitf(ti_int a) {
- if (a == 0)
- return 0.0;
- const unsigned N = sizeof(ti_int) * CHAR_BIT;
- const ti_int s = a >> (N - 1);
- a = (a ^ s) - s;
- int sd = N - __clzti2(a); // number of significant digits
- int e = sd - 1; // exponent
- if (sd > LDBL_MANT_DIG) {
- // start: 0000000000000000000001xxxxxxxxxxxxxxxxxxxxxxPQxxxxxxxxxxxxxxxxxx
- // finish: 000000000000000000000000000000000000001xxxxxxxxxxxxxxxxxxxxxxPQR
- // 12345678901234567890123456
- // 1 = msb 1 bit
- // P = bit LDBL_MANT_DIG-1 bits to the right of 1
- // Q = bit LDBL_MANT_DIG bits to the right of 1
- // R = "or" of all bits to the right of Q
- switch (sd) {
- case LDBL_MANT_DIG + 1:
- a <<= 1;
- break;
- case LDBL_MANT_DIG + 2:
- break;
- default:
- a = ((tu_int)a >> (sd - (LDBL_MANT_DIG + 2))) |
- ((a & ((tu_int)(-1) >> ((N + LDBL_MANT_DIG + 2) - sd))) != 0);
- };
- // finish:
- a |= (a & 4) != 0; // Or P into R
- ++a; // round - this step may add a significant bit
- a >>= 2; // dump Q and R
- // a is now rounded to LDBL_MANT_DIG or LDBL_MANT_DIG+1 bits
- if (a & ((tu_int)1 << LDBL_MANT_DIG)) {
- a >>= 1;
- ++e;
- }
- // a is now rounded to LDBL_MANT_DIG bits
- } else {
- a <<= (LDBL_MANT_DIG - sd);
- // a is now rounded to LDBL_MANT_DIG bits
- }
-
- long_double_bits fb;
- fb.u.high.all = (s & 0x8000000000000000LL) // sign
- | (du_int)(e + 16383) << 48 // exponent
- | ((a >> 64) & 0x0000ffffffffffffLL); // significand
- fb.u.low.all = (du_int)(a);
- return fb.f;
-}
+COMPILER_RT_ABI fp_t __floattitf(ti_int a) { return __floatXiYf__(a); }
#endif
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/floattixf.c b/contrib/llvm-project/compiler-rt/lib/builtins/floattixf.c
index 23796f1bb56f..c80bc714459c 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/floattixf.c
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/floattixf.c
@@ -23,7 +23,7 @@
// eeee | 1mmm mmmm mmmm mmmm mmmm mmmm mmmm mmmm | mmmm mmmm mmmm mmmm mmmm
// mmmm mmmm mmmm
-COMPILER_RT_ABI long double __floattixf(ti_int a) {
+COMPILER_RT_ABI xf_float __floattixf(ti_int a) {
if (a == 0)
return 0.0;
const unsigned N = sizeof(ti_int) * CHAR_BIT;
@@ -63,7 +63,7 @@ COMPILER_RT_ABI long double __floattixf(ti_int a) {
a <<= (LDBL_MANT_DIG - sd);
// a is now rounded to LDBL_MANT_DIG bits
}
- long_double_bits fb;
+ xf_bits fb;
fb.u.high.s.low = ((su_int)s & 0x8000) | // sign
(e + 16383); // exponent
fb.u.low.all = (du_int)a; // mantissa
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/floatundidf.c b/contrib/llvm-project/compiler-rt/lib/builtins/floatundidf.c
index e5e533042a34..9743e96ec679 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/floatundidf.c
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/floatundidf.c
@@ -51,50 +51,11 @@ COMPILER_RT_ABI double __floatundidf(du_int a) {
// flags to set, and we don't want to code-gen to an unknown soft-float
// implementation.
-COMPILER_RT_ABI double __floatundidf(du_int a) {
- if (a == 0)
- return 0.0;
- const unsigned N = sizeof(du_int) * CHAR_BIT;
- int sd = N - __builtin_clzll(a); // number of significant digits
- int e = sd - 1; // exponent
- if (sd > DBL_MANT_DIG) {
- // start: 0000000000000000000001xxxxxxxxxxxxxxxxxxxxxxPQxxxxxxxxxxxxxxxxxx
- // finish: 000000000000000000000000000000000000001xxxxxxxxxxxxxxxxxxxxxxPQR
- // 12345678901234567890123456
- // 1 = msb 1 bit
- // P = bit DBL_MANT_DIG-1 bits to the right of 1
- // Q = bit DBL_MANT_DIG bits to the right of 1
- // R = "or" of all bits to the right of Q
- switch (sd) {
- case DBL_MANT_DIG + 1:
- a <<= 1;
- break;
- case DBL_MANT_DIG + 2:
- break;
- default:
- a = (a >> (sd - (DBL_MANT_DIG + 2))) |
- ((a & ((du_int)(-1) >> ((N + DBL_MANT_DIG + 2) - sd))) != 0);
- };
- // finish:
- a |= (a & 4) != 0; // Or P into R
- ++a; // round - this step may add a significant bit
- a >>= 2; // dump Q and R
- // a is now rounded to DBL_MANT_DIG or DBL_MANT_DIG+1 bits
- if (a & ((du_int)1 << DBL_MANT_DIG)) {
- a >>= 1;
- ++e;
- }
- // a is now rounded to DBL_MANT_DIG bits
- } else {
- a <<= (DBL_MANT_DIG - sd);
- // a is now rounded to DBL_MANT_DIG bits
- }
- double_bits fb;
- fb.u.s.high = ((su_int)(e + 1023) << 20) | // exponent
- ((su_int)(a >> 32) & 0x000FFFFF); // mantissa-high
- fb.u.s.low = (su_int)a; // mantissa-low
- return fb.f;
-}
+#define SRC_U64
+#define DST_DOUBLE
+#include "int_to_fp_impl.inc"
+
+COMPILER_RT_ABI double __floatundidf(du_int a) { return __floatXiYf__(a); }
#endif
#if defined(__ARM_EABI__)
@@ -104,3 +65,7 @@ AEABI_RTABI double __aeabi_ul2d(du_int a) { return __floatundidf(a); }
COMPILER_RT_ALIAS(__floatundidf, __aeabi_ul2d)
#endif
#endif
+
+#if defined(__MINGW32__) && defined(__arm__)
+COMPILER_RT_ALIAS(__floatundidf, __u64tod)
+#endif
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/floatundisf.c b/contrib/llvm-project/compiler-rt/lib/builtins/floatundisf.c
index 00d61b0c6310..d4b418efd406 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/floatundisf.c
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/floatundisf.c
@@ -19,49 +19,11 @@
#include "int_lib.h"
-COMPILER_RT_ABI float __floatundisf(du_int a) {
- if (a == 0)
- return 0.0F;
- const unsigned N = sizeof(du_int) * CHAR_BIT;
- int sd = N - __builtin_clzll(a); // number of significant digits
- si_int e = sd - 1; // 8 exponent
- if (sd > FLT_MANT_DIG) {
- // start: 0000000000000000000001xxxxxxxxxxxxxxxxxxxxxxPQxxxxxxxxxxxxxxxxxx
- // finish: 000000000000000000000000000000000000001xxxxxxxxxxxxxxxxxxxxxxPQR
- // 12345678901234567890123456
- // 1 = msb 1 bit
- // P = bit FLT_MANT_DIG-1 bits to the right of 1
- // Q = bit FLT_MANT_DIG bits to the right of 1
- // R = "or" of all bits to the right of Q
- switch (sd) {
- case FLT_MANT_DIG + 1:
- a <<= 1;
- break;
- case FLT_MANT_DIG + 2:
- break;
- default:
- a = (a >> (sd - (FLT_MANT_DIG + 2))) |
- ((a & ((du_int)(-1) >> ((N + FLT_MANT_DIG + 2) - sd))) != 0);
- };
- // finish:
- a |= (a & 4) != 0; // Or P into R
- ++a; // round - this step may add a significant bit
- a >>= 2; // dump Q and R
- // a is now rounded to FLT_MANT_DIG or FLT_MANT_DIG+1 bits
- if (a & ((du_int)1 << FLT_MANT_DIG)) {
- a >>= 1;
- ++e;
- }
- // a is now rounded to FLT_MANT_DIG bits
- } else {
- a <<= (FLT_MANT_DIG - sd);
- // a is now rounded to FLT_MANT_DIG bits
- }
- float_bits fb;
- fb.u = ((e + 127) << 23) | // exponent
- ((su_int)a & 0x007FFFFF); // mantissa
- return fb.f;
-}
+#define SRC_U64
+#define DST_SINGLE
+#include "int_to_fp_impl.inc"
+
+COMPILER_RT_ABI float __floatundisf(du_int a) { return __floatXiYf__(a); }
#if defined(__ARM_EABI__)
#if defined(COMPILER_RT_ARMHF_TARGET)
@@ -70,3 +32,7 @@ AEABI_RTABI float __aeabi_ul2f(du_int a) { return __floatundisf(a); }
COMPILER_RT_ALIAS(__floatundisf, __aeabi_ul2f)
#endif
#endif
+
+#if defined(__MINGW32__) && defined(__arm__)
+COMPILER_RT_ALIAS(__floatundisf, __u64tos)
+#endif
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/floatunditf.c b/contrib/llvm-project/compiler-rt/lib/builtins/floatunditf.c
index 8d310851e179..abe0ca9ed8c5 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/floatunditf.c
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/floatunditf.c
@@ -15,7 +15,7 @@
#define QUAD_PRECISION
#include "fp_lib.h"
-#if defined(CRT_HAS_128BIT) && defined(CRT_LDBL_128BIT)
+#if defined(CRT_HAS_TF_MODE)
COMPILER_RT_ABI fp_t __floatunditf(du_int a) {
const int aWidth = sizeof a * CHAR_BIT;
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/floatundixf.c b/contrib/llvm-project/compiler-rt/lib/builtins/floatundixf.c
index 85264adac197..3e3c6556d65b 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/floatundixf.c
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/floatundixf.c
@@ -22,13 +22,13 @@
// gggg gggg gggg gggg gggg gggg gggg gggg | gggg gggg gggg gggg seee eeee eeee
// eeee | 1mmm mmmm mmmm mmmm mmmm mmmm mmmm mmmm | mmmm mmmm mmmm mmmm mmmm
// mmmm mmmm mmmm
-COMPILER_RT_ABI long double __floatundixf(du_int a) {
+COMPILER_RT_ABI xf_float __floatundixf(du_int a) {
if (a == 0)
return 0.0;
const unsigned N = sizeof(du_int) * CHAR_BIT;
int clz = __builtin_clzll(a);
int e = (N - 1) - clz; // exponent
- long_double_bits fb;
+ xf_bits fb;
fb.u.high.s.low = (e + 16383); // exponent
fb.u.low.all = a << clz; // mantissa
return fb.f;
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/floatunsisf.c b/contrib/llvm-project/compiler-rt/lib/builtins/floatunsisf.c
index 33a1b5ae2a63..ec062b5943e9 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/floatunsisf.c
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/floatunsisf.c
@@ -17,7 +17,7 @@
#include "int_lib.h"
-COMPILER_RT_ABI fp_t __floatunsisf(unsigned int a) {
+COMPILER_RT_ABI fp_t __floatunsisf(su_int a) {
const int aWidth = sizeof a * CHAR_BIT;
@@ -26,7 +26,7 @@ COMPILER_RT_ABI fp_t __floatunsisf(unsigned int a) {
return fromRep(0);
// Exponent of (fp_t)a is the width of abs(a).
- const int exponent = (aWidth - 1) - __builtin_clz(a);
+ const int exponent = (aWidth - 1) - clzsi(a);
rep_t result;
// Shift a into the significand field, rounding if it is a right-shift
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/floatunsitf.c b/contrib/llvm-project/compiler-rt/lib/builtins/floatunsitf.c
index a4bf0f65fe1c..3f0a5249fddd 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/floatunsitf.c
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/floatunsitf.c
@@ -15,8 +15,8 @@
#define QUAD_PRECISION
#include "fp_lib.h"
-#if defined(CRT_HAS_128BIT) && defined(CRT_LDBL_128BIT)
-COMPILER_RT_ABI fp_t __floatunsitf(unsigned int a) {
+#if defined(CRT_HAS_TF_MODE)
+COMPILER_RT_ABI fp_t __floatunsitf(su_int a) {
const int aWidth = sizeof a * CHAR_BIT;
@@ -25,7 +25,7 @@ COMPILER_RT_ABI fp_t __floatunsitf(unsigned int a) {
return fromRep(0);
// Exponent of (fp_t)a is the width of abs(a).
- const int exponent = (aWidth - 1) - __builtin_clz(a);
+ const int exponent = (aWidth - 1) - clzsi(a);
rep_t result;
// Shift a into the significand field and clear the implicit bit.
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/floatuntidf.c b/contrib/llvm-project/compiler-rt/lib/builtins/floatuntidf.c
index e69e65c1ace4..9abeacc30c3c 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/floatuntidf.c
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/floatuntidf.c
@@ -14,6 +14,10 @@
#ifdef CRT_HAS_128BIT
+#define SRC_U128
+#define DST_DOUBLE
+#include "int_to_fp_impl.inc"
+
// Returns: convert a to a double, rounding toward even.
// Assumption: double is a IEEE 64 bit floating point type
@@ -22,49 +26,6 @@
// seee eeee eeee mmmm mmmm mmmm mmmm mmmm | mmmm mmmm mmmm mmmm mmmm mmmm mmmm
// mmmm
-COMPILER_RT_ABI double __floatuntidf(tu_int a) {
- if (a == 0)
- return 0.0;
- const unsigned N = sizeof(tu_int) * CHAR_BIT;
- int sd = N - __clzti2(a); // number of significant digits
- int e = sd - 1; // exponent
- if (sd > DBL_MANT_DIG) {
- // start: 0000000000000000000001xxxxxxxxxxxxxxxxxxxxxxPQxxxxxxxxxxxxxxxxxx
- // finish: 000000000000000000000000000000000000001xxxxxxxxxxxxxxxxxxxxxxPQR
- // 12345678901234567890123456
- // 1 = msb 1 bit
- // P = bit DBL_MANT_DIG-1 bits to the right of 1
- // Q = bit DBL_MANT_DIG bits to the right of 1
- // R = "or" of all bits to the right of Q
- switch (sd) {
- case DBL_MANT_DIG + 1:
- a <<= 1;
- break;
- case DBL_MANT_DIG + 2:
- break;
- default:
- a = (a >> (sd - (DBL_MANT_DIG + 2))) |
- ((a & ((tu_int)(-1) >> ((N + DBL_MANT_DIG + 2) - sd))) != 0);
- };
- // finish:
- a |= (a & 4) != 0; // Or P into R
- ++a; // round - this step may add a significant bit
- a >>= 2; // dump Q and R
- // a is now rounded to DBL_MANT_DIG or DBL_MANT_DIG+1 bits
- if (a & ((tu_int)1 << DBL_MANT_DIG)) {
- a >>= 1;
- ++e;
- }
- // a is now rounded to DBL_MANT_DIG bits
- } else {
- a <<= (DBL_MANT_DIG - sd);
- // a is now rounded to DBL_MANT_DIG bits
- }
- double_bits fb;
- fb.u.s.high = ((e + 1023) << 20) | // exponent
- ((su_int)(a >> 32) & 0x000FFFFF); // mantissa-high
- fb.u.s.low = (su_int)a; // mantissa-low
- return fb.f;
-}
+COMPILER_RT_ABI double __floatuntidf(tu_int a) { return __floatXiYf__(a); }
#endif // CRT_HAS_128BIT
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/floatuntisf.c b/contrib/llvm-project/compiler-rt/lib/builtins/floatuntisf.c
index 9dec0ab5c58f..997c1569acd6 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/floatuntisf.c
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/floatuntisf.c
@@ -14,6 +14,10 @@
#ifdef CRT_HAS_128BIT
+#define SRC_U128
+#define DST_SINGLE
+#include "int_to_fp_impl.inc"
+
// Returns: convert a to a float, rounding toward even.
// Assumption: float is a IEEE 32 bit floating point type
@@ -21,48 +25,6 @@
// seee eeee emmm mmmm mmmm mmmm mmmm mmmm
-COMPILER_RT_ABI float __floatuntisf(tu_int a) {
- if (a == 0)
- return 0.0F;
- const unsigned N = sizeof(tu_int) * CHAR_BIT;
- int sd = N - __clzti2(a); // number of significant digits
- int e = sd - 1; // exponent
- if (sd > FLT_MANT_DIG) {
- // start: 0000000000000000000001xxxxxxxxxxxxxxxxxxxxxxPQxxxxxxxxxxxxxxxxxx
- // finish: 000000000000000000000000000000000000001xxxxxxxxxxxxxxxxxxxxxxPQR
- // 12345678901234567890123456
- // 1 = msb 1 bit
- // P = bit FLT_MANT_DIG-1 bits to the right of 1
- // Q = bit FLT_MANT_DIG bits to the right of 1
- // R = "or" of all bits to the right of Q
- switch (sd) {
- case FLT_MANT_DIG + 1:
- a <<= 1;
- break;
- case FLT_MANT_DIG + 2:
- break;
- default:
- a = (a >> (sd - (FLT_MANT_DIG + 2))) |
- ((a & ((tu_int)(-1) >> ((N + FLT_MANT_DIG + 2) - sd))) != 0);
- };
- // finish:
- a |= (a & 4) != 0; // Or P into R
- ++a; // round - this step may add a significant bit
- a >>= 2; // dump Q and R
- // a is now rounded to FLT_MANT_DIG or FLT_MANT_DIG+1 bits
- if (a & ((tu_int)1 << FLT_MANT_DIG)) {
- a >>= 1;
- ++e;
- }
- // a is now rounded to FLT_MANT_DIG bits
- } else {
- a <<= (FLT_MANT_DIG - sd);
- // a is now rounded to FLT_MANT_DIG bits
- }
- float_bits fb;
- fb.u = ((e + 127) << 23) | // exponent
- ((su_int)a & 0x007FFFFF); // mantissa
- return fb.f;
-}
+COMPILER_RT_ABI float __floatuntisf(tu_int a) { return __floatXiYf__(a); }
#endif // CRT_HAS_128BIT
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/floatuntitf.c b/contrib/llvm-project/compiler-rt/lib/builtins/floatuntitf.c
index d308d3118d03..1c5998a40b9f 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/floatuntitf.c
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/floatuntitf.c
@@ -16,6 +16,11 @@
#include "fp_lib.h"
#include "int_lib.h"
+#if defined(CRT_HAS_TF_MODE)
+#define SRC_U128
+#define DST_QUAD
+#include "int_to_fp_impl.inc"
+
// Returns: convert a tu_int to a fp_t, rounding toward even.
// Assumption: fp_t is a IEEE 128 bit floating point type
@@ -25,51 +30,6 @@
// mmmm | mmmm mmmm mmmm mmmm mmmm mmmm mmmm mmmm | mmmm mmmm mmmm mmmm mmmm
// mmmm mmmm mmmm
-#if defined(CRT_HAS_128BIT) && defined(CRT_LDBL_128BIT)
-COMPILER_RT_ABI fp_t __floatuntitf(tu_int a) {
- if (a == 0)
- return 0.0;
- const unsigned N = sizeof(tu_int) * CHAR_BIT;
- int sd = N - __clzti2(a); // number of significant digits
- int e = sd - 1; // exponent
- if (sd > LDBL_MANT_DIG) {
- // start: 0000000000000000000001xxxxxxxxxxxxxxxxxxxxxxPQxxxxxxxxxxxxxxxxxx
- // finish: 000000000000000000000000000000000000001xxxxxxxxxxxxxxxxxxxxxxPQR
- // 12345678901234567890123456
- // 1 = msb 1 bit
- // P = bit LDBL_MANT_DIG-1 bits to the right of 1
- // Q = bit LDBL_MANT_DIG bits to the right of 1
- // R = "or" of all bits to the right of Q
- switch (sd) {
- case LDBL_MANT_DIG + 1:
- a <<= 1;
- break;
- case LDBL_MANT_DIG + 2:
- break;
- default:
- a = (a >> (sd - (LDBL_MANT_DIG + 2))) |
- ((a & ((tu_int)(-1) >> ((N + LDBL_MANT_DIG + 2) - sd))) != 0);
- };
- // finish:
- a |= (a & 4) != 0; // Or P into R
- ++a; // round - this step may add a significant bit
- a >>= 2; // dump Q and R
- // a is now rounded to LDBL_MANT_DIG or LDBL_MANT_DIG+1 bits
- if (a & ((tu_int)1 << LDBL_MANT_DIG)) {
- a >>= 1;
- ++e;
- }
- // a is now rounded to LDBL_MANT_DIG bits
- } else {
- a <<= (LDBL_MANT_DIG - sd);
- // a is now rounded to LDBL_MANT_DIG bits
- }
-
- long_double_bits fb;
- fb.u.high.all = (du_int)(e + 16383) << 48 // exponent
- | ((a >> 64) & 0x0000ffffffffffffLL); // significand
- fb.u.low.all = (du_int)(a);
- return fb.f;
-}
+COMPILER_RT_ABI fp_t __floatuntitf(tu_int a) { return __floatXiYf__(a); }
#endif
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/floatuntixf.c b/contrib/llvm-project/compiler-rt/lib/builtins/floatuntixf.c
index efd8a27a0875..4c53775229ea 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/floatuntixf.c
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/floatuntixf.c
@@ -23,7 +23,7 @@
// eeee | 1mmm mmmm mmmm mmmm mmmm mmmm mmmm mmmm | mmmm mmmm mmmm mmmm mmmm
// mmmm mmmm mmmm
-COMPILER_RT_ABI long double __floatuntixf(tu_int a) {
+COMPILER_RT_ABI xf_float __floatuntixf(tu_int a) {
if (a == 0)
return 0.0;
const unsigned N = sizeof(tu_int) * CHAR_BIT;
@@ -61,7 +61,7 @@ COMPILER_RT_ABI long double __floatuntixf(tu_int a) {
a <<= (LDBL_MANT_DIG - sd);
// a is now rounded to LDBL_MANT_DIG bits
}
- long_double_bits fb;
+ xf_bits fb;
fb.u.high.s.low = (e + 16383); // exponent
fb.u.low.all = (du_int)a; // mantissa
return fb.f;
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/fp_compare_impl.inc b/contrib/llvm-project/compiler-rt/lib/builtins/fp_compare_impl.inc
index 40fc7df4c679..a9a4f6fbf5df 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/fp_compare_impl.inc
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/fp_compare_impl.inc
@@ -18,6 +18,9 @@ typedef int CMP_RESULT;
#elif __SIZEOF_POINTER__ == 8 && __SIZEOF_LONG__ == 4
// LLP64 ABIs use long long instead of long.
typedef long long CMP_RESULT;
+#elif __AVR__
+// AVR uses a single byte for the return value.
+typedef char CMP_RESULT;
#else
// Otherwise the comparison functions return long.
typedef long CMP_RESULT;
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/fp_extend.h b/contrib/llvm-project/compiler-rt/lib/builtins/fp_extend.h
index aad4436730dd..95ea2a7ac4b2 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/fp_extend.h
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/fp_extend.h
@@ -20,24 +20,46 @@
typedef float src_t;
typedef uint32_t src_rep_t;
#define SRC_REP_C UINT32_C
-static const int srcSigBits = 23;
+static const int srcBits = sizeof(src_t) * CHAR_BIT;
+static const int srcSigFracBits = 23;
+// -1 accounts for the sign bit.
+// srcBits - srcSigFracBits - 1
+static const int srcExpBits = 8;
#define src_rep_t_clz clzsi
#elif defined SRC_DOUBLE
typedef double src_t;
typedef uint64_t src_rep_t;
#define SRC_REP_C UINT64_C
-static const int srcSigBits = 52;
-static __inline int src_rep_t_clz(src_rep_t a) {
+static const int srcBits = sizeof(src_t) * CHAR_BIT;
+static const int srcSigFracBits = 52;
+// -1 accounts for the sign bit.
+// srcBits - srcSigFracBits - 1
+static const int srcExpBits = 11;
+
+static inline int src_rep_t_clz_impl(src_rep_t a) {
#if defined __LP64__
return __builtin_clzl(a);
#else
if (a & REP_C(0xffffffff00000000))
- return __builtin_clz(a >> 32);
+ return clzsi(a >> 32);
else
- return 32 + __builtin_clz(a & REP_C(0xffffffff));
+ return 32 + clzsi(a & REP_C(0xffffffff));
#endif
}
+#define src_rep_t_clz src_rep_t_clz_impl
+
+#elif defined SRC_80
+typedef xf_float src_t;
+typedef __uint128_t src_rep_t;
+#define SRC_REP_C (__uint128_t)
+// sign bit, exponent and significand occupy the lower 80 bits.
+static const int srcBits = 80;
+static const int srcSigFracBits = 63;
+// -1 accounts for the sign bit.
+// -1 accounts for the explicitly stored integer bit.
+// srcBits - srcSigFracBits - 1 - 1
+static const int srcExpBits = 15;
#elif defined SRC_HALF
#ifdef COMPILER_RT_HAS_FLOAT16
@@ -47,8 +69,17 @@ typedef uint16_t src_t;
#endif
typedef uint16_t src_rep_t;
#define SRC_REP_C UINT16_C
-static const int srcSigBits = 10;
-#define src_rep_t_clz __builtin_clz
+static const int srcBits = sizeof(src_t) * CHAR_BIT;
+static const int srcSigFracBits = 10;
+// -1 accounts for the sign bit.
+// srcBits - srcSigFracBits - 1
+static const int srcExpBits = 5;
+
+static inline int src_rep_t_clz_impl(src_rep_t a) {
+ return __builtin_clz(a) - 16;
+}
+
+#define src_rep_t_clz src_rep_t_clz_impl
#else
#error Source should be half, single, or double precision!
@@ -58,28 +89,72 @@ static const int srcSigBits = 10;
typedef float dst_t;
typedef uint32_t dst_rep_t;
#define DST_REP_C UINT32_C
-static const int dstSigBits = 23;
+static const int dstBits = sizeof(dst_t) * CHAR_BIT;
+static const int dstSigFracBits = 23;
+// -1 accounts for the sign bit.
+// dstBits - dstSigFracBits - 1
+static const int dstExpBits = 8;
#elif defined DST_DOUBLE
typedef double dst_t;
typedef uint64_t dst_rep_t;
#define DST_REP_C UINT64_C
-static const int dstSigBits = 52;
+static const int dstBits = sizeof(dst_t) * CHAR_BIT;
+static const int dstSigFracBits = 52;
+// -1 accounts for the sign bit.
+// dstBits - dstSigFracBits - 1
+static const int dstExpBits = 11;
#elif defined DST_QUAD
-typedef long double dst_t;
+typedef tf_float dst_t;
typedef __uint128_t dst_rep_t;
#define DST_REP_C (__uint128_t)
-static const int dstSigBits = 112;
+static const int dstBits = sizeof(dst_t) * CHAR_BIT;
+static const int dstSigFracBits = 112;
+// -1 accounts for the sign bit.
+// dstBits - dstSigFracBits - 1
+static const int dstExpBits = 15;
#else
#error Destination should be single, double, or quad precision!
#endif // end destination precision
-// End of specialization parameters. Two helper routines for conversion to and
-// from the representation of floating-point data as integer values follow.
+// End of specialization parameters.
+
+// TODO: These helper routines should be placed into fp_lib.h
+// Currently they depend on macros/constants defined above.
+
+static inline src_rep_t extract_sign_from_src(src_rep_t x) {
+ const src_rep_t srcSignMask = SRC_REP_C(1) << (srcBits - 1);
+ return (x & srcSignMask) >> (srcBits - 1);
+}
+
+static inline src_rep_t extract_exp_from_src(src_rep_t x) {
+ const int srcSigBits = srcBits - 1 - srcExpBits;
+ const src_rep_t srcExpMask = ((SRC_REP_C(1) << srcExpBits) - 1) << srcSigBits;
+ return (x & srcExpMask) >> srcSigBits;
+}
+
+static inline src_rep_t extract_sig_frac_from_src(src_rep_t x) {
+ const src_rep_t srcSigFracMask = (SRC_REP_C(1) << srcSigFracBits) - 1;
+ return x & srcSigFracMask;
+}
+
+#ifdef src_rep_t_clz
+static inline int clz_in_sig_frac(src_rep_t sigFrac) {
+ const int skip = 1 + srcExpBits;
+ return src_rep_t_clz(sigFrac) - skip;
+}
+#endif
+
+static inline dst_rep_t construct_dst_rep(dst_rep_t sign, dst_rep_t exp, dst_rep_t sigFrac) {
+ return (sign << (dstBits - 1)) | (exp << (dstBits - 1 - dstExpBits)) | sigFrac;
+}
+
+// Two helper routines for conversion to and from the representation of
+// floating-point data as integer values follow.
-static __inline src_rep_t srcToRep(src_t x) {
+static inline src_rep_t srcToRep(src_t x) {
const union {
src_t f;
src_rep_t i;
@@ -87,7 +162,7 @@ static __inline src_rep_t srcToRep(src_t x) {
return rep.i;
}
-static __inline dst_t dstFromRep(dst_rep_t x) {
+static inline dst_t dstFromRep(dst_rep_t x) {
const union {
dst_t f;
dst_rep_t i;
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/fp_extend_impl.inc b/contrib/llvm-project/compiler-rt/lib/builtins/fp_extend_impl.inc
index d1c9c02a00c5..f4f663066431 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/fp_extend_impl.inc
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/fp_extend_impl.inc
@@ -37,71 +37,72 @@
#include "fp_extend.h"
+// The source type may use a usual IEEE-754 interchange format or Intel 80-bit
+// format. In particular, for the source type srcSigFracBits may be not equal to
+// srcSigBits. The destination type is assumed to be one of IEEE-754 standard
+// types.
static __inline dst_t __extendXfYf2__(src_t a) {
// Various constants whose values follow from the type parameters.
// Any reasonable optimizer will fold and propagate all of these.
- const int srcBits = sizeof(src_t) * CHAR_BIT;
- const int srcExpBits = srcBits - srcSigBits - 1;
const int srcInfExp = (1 << srcExpBits) - 1;
const int srcExpBias = srcInfExp >> 1;
- const src_rep_t srcMinNormal = SRC_REP_C(1) << srcSigBits;
- const src_rep_t srcInfinity = (src_rep_t)srcInfExp << srcSigBits;
- const src_rep_t srcSignMask = SRC_REP_C(1) << (srcSigBits + srcExpBits);
- const src_rep_t srcAbsMask = srcSignMask - 1;
- const src_rep_t srcQNaN = SRC_REP_C(1) << (srcSigBits - 1);
- const src_rep_t srcNaNCode = srcQNaN - 1;
-
- const int dstBits = sizeof(dst_t) * CHAR_BIT;
- const int dstExpBits = dstBits - dstSigBits - 1;
const int dstInfExp = (1 << dstExpBits) - 1;
const int dstExpBias = dstInfExp >> 1;
- const dst_rep_t dstMinNormal = DST_REP_C(1) << dstSigBits;
-
// Break a into a sign and representation of the absolute value.
const src_rep_t aRep = srcToRep(a);
- const src_rep_t aAbs = aRep & srcAbsMask;
- const src_rep_t sign = aRep & srcSignMask;
- dst_rep_t absResult;
+ const src_rep_t srcSign = extract_sign_from_src(aRep);
+ const src_rep_t srcExp = extract_exp_from_src(aRep);
+ const src_rep_t srcSigFrac = extract_sig_frac_from_src(aRep);
+
+ dst_rep_t dstSign = srcSign;
+ dst_rep_t dstExp;
+ dst_rep_t dstSigFrac;
- // If sizeof(src_rep_t) < sizeof(int), the subtraction result is promoted
- // to (signed) int. To avoid that, explicitly cast to src_rep_t.
- if ((src_rep_t)(aAbs - srcMinNormal) < srcInfinity - srcMinNormal) {
+ if (srcExp >= 1 && srcExp < (src_rep_t)srcInfExp) {
// a is a normal number.
- // Extend to the destination type by shifting the significand and
- // exponent into the proper position and rebiasing the exponent.
- absResult = (dst_rep_t)aAbs << (dstSigBits - srcSigBits);
- absResult += (dst_rep_t)(dstExpBias - srcExpBias) << dstSigBits;
+ dstExp = (dst_rep_t)srcExp + (dst_rep_t)(dstExpBias - srcExpBias);
+ dstSigFrac = (dst_rep_t)srcSigFrac << (dstSigFracBits - srcSigFracBits);
}
- else if (aAbs >= srcInfinity) {
+ else if (srcExp == srcInfExp) {
// a is NaN or infinity.
- // Conjure the result by beginning with infinity, then setting the qNaN
- // bit (if needed) and right-aligning the rest of the trailing NaN
- // payload field.
- absResult = (dst_rep_t)dstInfExp << dstSigBits;
- absResult |= (dst_rep_t)(aAbs & srcQNaN) << (dstSigBits - srcSigBits);
- absResult |= (dst_rep_t)(aAbs & srcNaNCode) << (dstSigBits - srcSigBits);
+ dstExp = dstInfExp;
+ dstSigFrac = (dst_rep_t)srcSigFrac << (dstSigFracBits - srcSigFracBits);
}
- else if (aAbs) {
+ else if (srcSigFrac) {
// a is denormal.
- // renormalize the significand and clear the leading bit, then insert
- // the correct adjusted exponent in the destination type.
- const int scale = src_rep_t_clz(aAbs) - src_rep_t_clz(srcMinNormal);
- absResult = (dst_rep_t)aAbs << (dstSigBits - srcSigBits + scale);
- absResult ^= dstMinNormal;
- const int resultExponent = dstExpBias - srcExpBias - scale + 1;
- absResult |= (dst_rep_t)resultExponent << dstSigBits;
+ if (srcExpBits == dstExpBits) {
+ // The exponent fields are identical and this is a denormal number, so all
+ // the non-significand bits are zero. In particular, this branch is always
+ // taken when we extend a denormal F80 to F128.
+ dstExp = 0;
+ dstSigFrac = ((dst_rep_t)srcSigFrac) << (dstSigFracBits - srcSigFracBits);
+ } else {
+#ifndef src_rep_t_clz
+ // If src_rep_t_clz is not defined this branch must be unreachable.
+ __builtin_unreachable();
+#else
+ // Renormalize the significand and clear the leading bit.
+ // For F80 -> F128 this codepath is unused.
+ const int scale = clz_in_sig_frac(srcSigFrac) + 1;
+ dstExp = dstExpBias - srcExpBias - scale + 1;
+ dstSigFrac = (dst_rep_t)srcSigFrac
+ << (dstSigFracBits - srcSigFracBits + scale);
+ const dst_rep_t dstMinNormal = DST_REP_C(1) << (dstBits - 1 - dstExpBits);
+ dstSigFrac ^= dstMinNormal;
+#endif
+ }
}
else {
// a is zero.
- absResult = 0;
+ dstExp = 0;
+ dstSigFrac = 0;
}
- // Apply the signbit to the absolute value.
- const dst_rep_t result = absResult | (dst_rep_t)sign << (dstBits - srcBits);
+ const dst_rep_t result = construct_dst_rep(dstSign, dstExp, dstSigFrac);
return dstFromRep(result);
}
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/fp_fixint_impl.inc b/contrib/llvm-project/compiler-rt/lib/builtins/fp_fixint_impl.inc
index 2196d712f05f..3556bad9990b 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/fp_fixint_impl.inc
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/fp_fixint_impl.inc
@@ -36,5 +36,5 @@ static __inline fixint_t __fixint(fp_t a) {
if (exponent < significandBits)
return sign * (significand >> (significandBits - exponent));
else
- return sign * ((fixint_t)significand << (exponent - significandBits));
+ return sign * ((fixuint_t)significand << (exponent - significandBits));
}
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/fp_lib.h b/contrib/llvm-project/compiler-rt/lib/builtins/fp_lib.h
index 3fb13a033a14..c4f0a5b9587f 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/fp_lib.h
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/fp_lib.h
@@ -22,22 +22,11 @@
#include "int_lib.h"
#include "int_math.h"
+#include "int_types.h"
#include <limits.h>
#include <stdbool.h>
#include <stdint.h>
-// x86_64 FreeBSD prior v9.3 define fixed-width types incorrectly in
-// 32-bit mode.
-#if defined(__FreeBSD__) && defined(__i386__)
-#include <sys/param.h>
-#if __FreeBSD_version < 903000 // v9.3
-#define uint64_t unsigned long long
-#define int64_t long long
-#undef UINT64_C
-#define UINT64_C(c) (c##ULL)
-#endif
-#endif
-
#if defined SINGLE_PRECISION
typedef uint16_t half_rep_t;
@@ -105,17 +94,18 @@ static __inline void wideMultiply(rep_t a, rep_t b, rep_t *hi, rep_t *lo) {
COMPILER_RT_ABI fp_t __adddf3(fp_t a, fp_t b);
#elif defined QUAD_PRECISION
-#if __LDBL_MANT_DIG__ == 113 && defined(__SIZEOF_INT128__)
-#define CRT_LDBL_128BIT
+#if defined(CRT_HAS_F128) && defined(CRT_HAS_128BIT)
typedef uint64_t half_rep_t;
typedef __uint128_t rep_t;
typedef __int128_t srep_t;
-typedef long double fp_t;
+typedef tf_float fp_t;
#define HALF_REP_C UINT64_C
#define REP_C (__uint128_t)
+#if defined(CRT_HAS_IEEE_TF)
// Note: Since there is no explicit way to tell compiler the constant is a
// 128-bit integer, we let the constant be casted to 128-bit integer
#define significandBits 112
+#define TF_MANT_DIG (significandBits + 1)
static __inline int rep_clz(rep_t a) {
const union {
@@ -200,27 +190,17 @@ static __inline void wideMultiply(rep_t a, rep_t b, rep_t *hi, rep_t *lo) {
#undef Word_HiMask
#undef Word_LoMask
#undef Word_FullMask
-#endif // __LDBL_MANT_DIG__ == 113 && __SIZEOF_INT128__
+#endif // defined(CRT_HAS_IEEE_TF)
+#else
+typedef long double fp_t;
+#endif // defined(CRT_HAS_F128) && defined(CRT_HAS_128BIT)
#else
#error SINGLE_PRECISION, DOUBLE_PRECISION or QUAD_PRECISION must be defined.
#endif
#if defined(SINGLE_PRECISION) || defined(DOUBLE_PRECISION) || \
- defined(CRT_LDBL_128BIT)
+ (defined(QUAD_PRECISION) && defined(CRT_HAS_TF_MODE))
#define typeWidth (sizeof(rep_t) * CHAR_BIT)
-#define exponentBits (typeWidth - significandBits - 1)
-#define maxExponent ((1 << exponentBits) - 1)
-#define exponentBias (maxExponent >> 1)
-
-#define implicitBit (REP_C(1) << significandBits)
-#define significandMask (implicitBit - 1U)
-#define signBit (REP_C(1) << (significandBits + exponentBits))
-#define absMask (signBit - 1U)
-#define exponentMask (absMask ^ significandMask)
-#define oneRep ((rep_t)exponentBias << significandBits)
-#define infRep exponentMask
-#define quietBit (implicitBit >> 1)
-#define qnanRep (exponentMask | quietBit)
static __inline rep_t toRep(fp_t x) {
const union {
@@ -238,6 +218,21 @@ static __inline fp_t fromRep(rep_t x) {
return rep.f;
}
+#if !defined(QUAD_PRECISION) || defined(CRT_HAS_IEEE_TF)
+#define exponentBits (typeWidth - significandBits - 1)
+#define maxExponent ((1 << exponentBits) - 1)
+#define exponentBias (maxExponent >> 1)
+
+#define implicitBit (REP_C(1) << significandBits)
+#define significandMask (implicitBit - 1U)
+#define signBit (REP_C(1) << (significandBits + exponentBits))
+#define absMask (signBit - 1U)
+#define exponentMask (absMask ^ significandMask)
+#define oneRep ((rep_t)exponentBias << significandBits)
+#define infRep exponentMask
+#define quietBit (implicitBit >> 1)
+#define qnanRep (exponentMask | quietBit)
+
static __inline int normalize(rep_t *significand) {
const int shift = rep_clz(*significand) - rep_clz(implicitBit);
*significand <<= shift;
@@ -340,6 +335,8 @@ static __inline fp_t __compiler_rt_scalbnX(fp_t x, int y) {
return fromRep(sign | ((rep_t)exp << significandBits) | sig);
}
+#endif // !defined(QUAD_PRECISION) || defined(CRT_HAS_IEEE_TF)
+
// Avoid using fmax from libm.
static __inline fp_t __compiler_rt_fmaxX(fp_t x, fp_t y) {
// If either argument is NaN, return the other argument. If both are NaN,
@@ -386,31 +383,42 @@ static __inline fp_t __compiler_rt_fmax(fp_t x, fp_t y) {
#endif
}
-#elif defined(QUAD_PRECISION)
-
-#if defined(CRT_LDBL_128BIT)
-static __inline fp_t __compiler_rt_logbl(fp_t x) {
+#elif defined(QUAD_PRECISION) && defined(CRT_HAS_TF_MODE)
+// The generic implementation only works for ieee754 floating point. For other
+// floating point types, continue to rely on the libm implementation for now.
+#if defined(CRT_HAS_IEEE_TF)
+static __inline tf_float __compiler_rt_logbtf(tf_float x) {
return __compiler_rt_logbX(x);
}
-static __inline fp_t __compiler_rt_scalbnl(fp_t x, int y) {
+static __inline tf_float __compiler_rt_scalbntf(tf_float x, int y) {
return __compiler_rt_scalbnX(x, y);
}
-static __inline fp_t __compiler_rt_fmaxl(fp_t x, fp_t y) {
+static __inline tf_float __compiler_rt_fmaxtf(tf_float x, tf_float y) {
return __compiler_rt_fmaxX(x, y);
}
-#else
-// The generic implementation only works for ieee754 floating point. For other
-// floating point types, continue to rely on the libm implementation for now.
-static __inline long double __compiler_rt_logbl(long double x) {
+#define __compiler_rt_logbl __compiler_rt_logbtf
+#define __compiler_rt_scalbnl __compiler_rt_scalbntf
+#define __compiler_rt_fmaxl __compiler_rt_fmaxtf
+#define crt_fabstf crt_fabsf128
+#define crt_copysigntf crt_copysignf128
+#elif defined(CRT_LDBL_128BIT)
+static __inline tf_float __compiler_rt_logbtf(tf_float x) {
return crt_logbl(x);
}
-static __inline long double __compiler_rt_scalbnl(long double x, int y) {
+static __inline tf_float __compiler_rt_scalbntf(tf_float x, int y) {
return crt_scalbnl(x, y);
}
-static __inline long double __compiler_rt_fmaxl(long double x, long double y) {
+static __inline tf_float __compiler_rt_fmaxtf(tf_float x, tf_float y) {
return crt_fmaxl(x, y);
}
-#endif // CRT_LDBL_128BIT
+#define __compiler_rt_logbl crt_logbl
+#define __compiler_rt_scalbnl crt_scalbnl
+#define __compiler_rt_fmaxl crt_fmaxl
+#define crt_fabstf crt_fabsl
+#define crt_copysigntf crt_copysignl
+#else
+#error Unsupported TF mode type
+#endif
#endif // *_PRECISION
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/fp_mode.c b/contrib/llvm-project/compiler-rt/lib/builtins/fp_mode.c
index b84df8abb27d..51865473cda7 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/fp_mode.c
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/fp_mode.c
@@ -15,8 +15,8 @@
#include "fp_mode.h"
// IEEE-754 default rounding (to nearest, ties to even).
-CRT_FE_ROUND_MODE __fe_getround() { return CRT_FE_TONEAREST; }
+CRT_FE_ROUND_MODE __fe_getround(void) { return CRT_FE_TONEAREST; }
-int __fe_raise_inexact() {
+int __fe_raise_inexact(void) {
return 0;
}
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/fp_mode.h b/contrib/llvm-project/compiler-rt/lib/builtins/fp_mode.h
index 26a3f4d10942..5b4969a441f2 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/fp_mode.h
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/fp_mode.h
@@ -13,8 +13,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef FP_MODE
-#define FP_MODE
+#ifndef FP_MODE_H
+#define FP_MODE_H
typedef enum {
CRT_FE_TONEAREST,
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/fp_trunc.h b/contrib/llvm-project/compiler-rt/lib/builtins/fp_trunc.h
index 00595edd5e01..141fe63e132d 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/fp_trunc.h
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/fp_trunc.h
@@ -19,19 +19,31 @@
typedef float src_t;
typedef uint32_t src_rep_t;
#define SRC_REP_C UINT32_C
-static const int srcSigBits = 23;
+static const int srcBits = sizeof(src_t) * CHAR_BIT;
+static const int srcSigFracBits = 23;
+// -1 accounts for the sign bit.
+// srcBits - srcSigFracBits - 1
+static const int srcExpBits = 8;
#elif defined SRC_DOUBLE
typedef double src_t;
typedef uint64_t src_rep_t;
#define SRC_REP_C UINT64_C
-static const int srcSigBits = 52;
+static const int srcBits = sizeof(src_t) * CHAR_BIT;
+static const int srcSigFracBits = 52;
+// -1 accounts for the sign bit.
+// srcBits - srcSigFracBits - 1
+static const int srcExpBits = 11;
#elif defined SRC_QUAD
-typedef long double src_t;
+typedef tf_float src_t;
typedef __uint128_t src_rep_t;
#define SRC_REP_C (__uint128_t)
-static const int srcSigBits = 112;
+static const int srcBits = sizeof(src_t) * CHAR_BIT;
+static const int srcSigFracBits = 112;
+// -1 accounts for the sign bit.
+// srcBits - srcSigFracBits - 1
+static const int srcExpBits = 15;
#else
#error Source should be double precision or quad precision!
@@ -41,13 +53,32 @@ static const int srcSigBits = 112;
typedef double dst_t;
typedef uint64_t dst_rep_t;
#define DST_REP_C UINT64_C
-static const int dstSigBits = 52;
+static const int dstBits = sizeof(dst_t) * CHAR_BIT;
+static const int dstSigFracBits = 52;
+// -1 accounts for the sign bit.
+// dstBits - dstSigFracBits - 1
+static const int dstExpBits = 11;
+
+#elif defined DST_80
+typedef xf_float dst_t;
+typedef __uint128_t dst_rep_t;
+#define DST_REP_C (__uint128_t)
+static const int dstBits = 80;
+static const int dstSigFracBits = 63;
+// -1 accounts for the sign bit.
+// -1 accounts for the explicitly stored integer bit.
+// dstBits - dstSigFracBits - 1 - 1
+static const int dstExpBits = 15;
#elif defined DST_SINGLE
typedef float dst_t;
typedef uint32_t dst_rep_t;
#define DST_REP_C UINT32_C
-static const int dstSigBits = 23;
+static const int dstBits = sizeof(dst_t) * CHAR_BIT;
+static const int dstSigFracBits = 23;
+// -1 accounts for the sign bit.
+// dstBits - dstSigFracBits - 1
+static const int dstExpBits = 8;
#elif defined DST_HALF
#ifdef COMPILER_RT_HAS_FLOAT16
@@ -57,16 +88,58 @@ typedef uint16_t dst_t;
#endif
typedef uint16_t dst_rep_t;
#define DST_REP_C UINT16_C
-static const int dstSigBits = 10;
+static const int dstBits = sizeof(dst_t) * CHAR_BIT;
+static const int dstSigFracBits = 10;
+// -1 accounts for the sign bit.
+// dstBits - dstSigFracBits - 1
+static const int dstExpBits = 5;
+
+#elif defined DST_BFLOAT
+typedef __bf16 dst_t;
+typedef uint16_t dst_rep_t;
+#define DST_REP_C UINT16_C
+static const int dstBits = sizeof(dst_t) * CHAR_BIT;
+static const int dstSigFracBits = 7;
+// -1 accounts for the sign bit.
+// dstBits - dstSigFracBits - 1
+static const int dstExpBits = 8;
#else
#error Destination should be single precision or double precision!
#endif // end destination precision
+// TODO: These helper routines should be placed into fp_lib.h
+// Currently they depend on macros/constants defined above.
+
+static inline src_rep_t extract_sign_from_src(src_rep_t x) {
+ const src_rep_t srcSignMask = SRC_REP_C(1) << (srcBits - 1);
+ return (x & srcSignMask) >> (srcBits - 1);
+}
+
+static inline src_rep_t extract_exp_from_src(src_rep_t x) {
+ const int srcSigBits = srcBits - 1 - srcExpBits;
+ const src_rep_t srcExpMask = ((SRC_REP_C(1) << srcExpBits) - 1) << srcSigBits;
+ return (x & srcExpMask) >> srcSigBits;
+}
+
+static inline src_rep_t extract_sig_frac_from_src(src_rep_t x) {
+ const src_rep_t srcSigFracMask = (SRC_REP_C(1) << srcSigFracBits) - 1;
+ return x & srcSigFracMask;
+}
+
+static inline dst_rep_t construct_dst_rep(dst_rep_t sign, dst_rep_t exp, dst_rep_t sigFrac) {
+ dst_rep_t result = (sign << (dstBits - 1)) | (exp << (dstBits - 1 - dstExpBits)) | sigFrac;
+ // Set the explicit integer bit in F80 if present.
+ if (dstBits == 80 && exp) {
+ result |= (DST_REP_C(1) << dstSigFracBits);
+ }
+ return result;
+}
+
// End of specialization parameters. Two helper routines for conversion to and
// from the representation of floating-point data as integer values follow.
-static __inline src_rep_t srcToRep(src_t x) {
+static inline src_rep_t srcToRep(src_t x) {
const union {
src_t f;
src_rep_t i;
@@ -74,7 +147,7 @@ static __inline src_rep_t srcToRep(src_t x) {
return rep.i;
}
-static __inline dst_t dstFromRep(dst_rep_t x) {
+static inline dst_t dstFromRep(dst_rep_t x) {
const union {
dst_t f;
dst_rep_t i;
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/fp_trunc_impl.inc b/contrib/llvm-project/compiler-rt/lib/builtins/fp_trunc_impl.inc
index 6662be7607e7..f68492495697 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/fp_trunc_impl.inc
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/fp_trunc_impl.inc
@@ -38,95 +38,118 @@
#include "fp_trunc.h"
+// The destination type may use a usual IEEE-754 interchange format or Intel
+// 80-bit format. In particular, for the destination type dstSigFracBits may be
+// not equal to dstSigBits. The source type is assumed to be one of IEEE-754
+// standard types.
static __inline dst_t __truncXfYf2__(src_t a) {
// Various constants whose values follow from the type parameters.
// Any reasonable optimizer will fold and propagate all of these.
- const int srcBits = sizeof(src_t) * CHAR_BIT;
- const int srcExpBits = srcBits - srcSigBits - 1;
const int srcInfExp = (1 << srcExpBits) - 1;
const int srcExpBias = srcInfExp >> 1;
- const src_rep_t srcMinNormal = SRC_REP_C(1) << srcSigBits;
- const src_rep_t srcSignificandMask = srcMinNormal - 1;
- const src_rep_t srcInfinity = (src_rep_t)srcInfExp << srcSigBits;
- const src_rep_t srcSignMask = SRC_REP_C(1) << (srcSigBits + srcExpBits);
- const src_rep_t srcAbsMask = srcSignMask - 1;
- const src_rep_t roundMask = (SRC_REP_C(1) << (srcSigBits - dstSigBits)) - 1;
- const src_rep_t halfway = SRC_REP_C(1) << (srcSigBits - dstSigBits - 1);
- const src_rep_t srcQNaN = SRC_REP_C(1) << (srcSigBits - 1);
+ const src_rep_t srcMinNormal = SRC_REP_C(1) << srcSigFracBits;
+ const src_rep_t roundMask =
+ (SRC_REP_C(1) << (srcSigFracBits - dstSigFracBits)) - 1;
+ const src_rep_t halfway = SRC_REP_C(1)
+ << (srcSigFracBits - dstSigFracBits - 1);
+ const src_rep_t srcQNaN = SRC_REP_C(1) << (srcSigFracBits - 1);
const src_rep_t srcNaNCode = srcQNaN - 1;
- const int dstBits = sizeof(dst_t) * CHAR_BIT;
- const int dstExpBits = dstBits - dstSigBits - 1;
const int dstInfExp = (1 << dstExpBits) - 1;
const int dstExpBias = dstInfExp >> 1;
-
- const int underflowExponent = srcExpBias + 1 - dstExpBias;
const int overflowExponent = srcExpBias + dstInfExp - dstExpBias;
- const src_rep_t underflow = (src_rep_t)underflowExponent << srcSigBits;
- const src_rep_t overflow = (src_rep_t)overflowExponent << srcSigBits;
- const dst_rep_t dstQNaN = DST_REP_C(1) << (dstSigBits - 1);
+ const dst_rep_t dstQNaN = DST_REP_C(1) << (dstSigFracBits - 1);
const dst_rep_t dstNaNCode = dstQNaN - 1;
- // Break a into a sign and representation of the absolute value.
const src_rep_t aRep = srcToRep(a);
- const src_rep_t aAbs = aRep & srcAbsMask;
- const src_rep_t sign = aRep & srcSignMask;
- dst_rep_t absResult;
+ const src_rep_t srcSign = extract_sign_from_src(aRep);
+ const src_rep_t srcExp = extract_exp_from_src(aRep);
+ const src_rep_t srcSigFrac = extract_sig_frac_from_src(aRep);
+
+ dst_rep_t dstSign = srcSign;
+ dst_rep_t dstExp;
+ dst_rep_t dstSigFrac;
+
+ // Same size exponents and a's significand tail is 0.
+ // The significand can be truncated and the exponent can be copied over.
+ const int sigFracTailBits = srcSigFracBits - dstSigFracBits;
+ if (srcExpBits == dstExpBits &&
+ ((aRep >> sigFracTailBits) << sigFracTailBits) == aRep) {
+ dstExp = srcExp;
+ dstSigFrac = (dst_rep_t)(srcSigFrac >> sigFracTailBits);
+ return dstFromRep(construct_dst_rep(dstSign, dstExp, dstSigFrac));
+ }
- if (aAbs - underflow < aAbs - overflow) {
+ const int dstExpCandidate = ((int)srcExp - srcExpBias) + dstExpBias;
+ if (dstExpCandidate >= 1 && dstExpCandidate < dstInfExp) {
// The exponent of a is within the range of normal numbers in the
- // destination format. We can convert by simply right-shifting with
+ // destination format. We can convert by simply right-shifting with
// rounding and adjusting the exponent.
- absResult = aAbs >> (srcSigBits - dstSigBits);
- absResult -= (dst_rep_t)(srcExpBias - dstExpBias) << dstSigBits;
+ dstExp = dstExpCandidate;
+ dstSigFrac = (dst_rep_t)(srcSigFrac >> sigFracTailBits);
- const src_rep_t roundBits = aAbs & roundMask;
+ const src_rep_t roundBits = srcSigFrac & roundMask;
// Round to nearest.
if (roundBits > halfway)
- absResult++;
+ dstSigFrac++;
// Tie to even.
else if (roundBits == halfway)
- absResult += absResult & 1;
- } else if (aAbs > srcInfinity) {
+ dstSigFrac += dstSigFrac & 1;
+
+ // Rounding has changed the exponent.
+ if (dstSigFrac >= (DST_REP_C(1) << dstSigFracBits)) {
+ dstExp += 1;
+ dstSigFrac ^= (DST_REP_C(1) << dstSigFracBits);
+ }
+ } else if (srcExp == srcInfExp && srcSigFrac) {
// a is NaN.
// Conjure the result by beginning with infinity, setting the qNaN
// bit and inserting the (truncated) trailing NaN field.
- absResult = (dst_rep_t)dstInfExp << dstSigBits;
- absResult |= dstQNaN;
- absResult |=
- ((aAbs & srcNaNCode) >> (srcSigBits - dstSigBits)) & dstNaNCode;
- } else if (aAbs >= overflow) {
- // a overflows to infinity.
- absResult = (dst_rep_t)dstInfExp << dstSigBits;
+ dstExp = dstInfExp;
+ dstSigFrac = dstQNaN;
+ dstSigFrac |= ((srcSigFrac & srcNaNCode) >> sigFracTailBits) & dstNaNCode;
+ } else if ((int)srcExp >= overflowExponent) {
+ dstExp = dstInfExp;
+ dstSigFrac = 0;
} else {
// a underflows on conversion to the destination type or is an exact
// zero. The result may be a denormal or zero. Extract the exponent
// to get the shift amount for the denormalization.
- const int aExp = aAbs >> srcSigBits;
- const int shift = srcExpBias - dstExpBias - aExp + 1;
+ src_rep_t significand = srcSigFrac;
+ int shift = srcExpBias - dstExpBias - srcExp;
- const src_rep_t significand = (aRep & srcSignificandMask) | srcMinNormal;
+ if (srcExp) {
+ // Set the implicit integer bit if the source is a normal number.
+ significand |= srcMinNormal;
+ shift += 1;
+ }
// Right shift by the denormalization amount with sticky.
- if (shift > srcSigBits) {
- absResult = 0;
+ if (shift > srcSigFracBits) {
+ dstExp = 0;
+ dstSigFrac = 0;
} else {
- const bool sticky = (significand << (srcBits - shift)) != 0;
+ dstExp = 0;
+ const bool sticky = shift && ((significand << (srcBits - shift)) != 0);
src_rep_t denormalizedSignificand = significand >> shift | sticky;
- absResult = denormalizedSignificand >> (srcSigBits - dstSigBits);
+ dstSigFrac = denormalizedSignificand >> sigFracTailBits;
const src_rep_t roundBits = denormalizedSignificand & roundMask;
// Round to nearest
if (roundBits > halfway)
- absResult++;
+ dstSigFrac++;
// Ties to even
else if (roundBits == halfway)
- absResult += absResult & 1;
+ dstSigFrac += dstSigFrac & 1;
+
+ // Rounding has changed the exponent.
+ if (dstSigFrac >= (DST_REP_C(1) << dstSigFracBits)) {
+ dstExp += 1;
+ dstSigFrac ^= (DST_REP_C(1) << dstSigFracBits);
+ }
}
}
- // Apply the signbit to the absolute value.
- const dst_rep_t result = absResult | sign >> (srcBits - dstBits);
- return dstFromRep(result);
+ return dstFromRep(construct_dst_rep(dstSign, dstExp, dstSigFrac));
}
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/gcc_personality_v0.c b/contrib/llvm-project/compiler-rt/lib/builtins/gcc_personality_v0.c
index 792fe9022724..3437205a1cc4 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/gcc_personality_v0.c
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/gcc_personality_v0.c
@@ -158,7 +158,7 @@ static uintptr_t readEncodedPointer(const uint8_t **data, uint8_t encoding) {
}
#if defined(__arm__) && !defined(__USING_SJLJ_EXCEPTIONS__) && \
- !defined(__ARM_DWARF_EH__)
+ !defined(__ARM_DWARF_EH__) && !defined(__SEH__)
#define USING_ARM_EHABI 1
_Unwind_Reason_Code __gnu_unwind_frame(struct _Unwind_Exception *,
struct _Unwind_Context *);
@@ -234,7 +234,7 @@ COMPILER_RT_ABI _Unwind_Reason_Code __gcc_personality_v0(
}
// Walk call-site table looking for range that includes current PC.
uint8_t callSiteEncoding = *lsda++;
- uint32_t callSiteTableLength = readULEB128(&lsda);
+ size_t callSiteTableLength = readULEB128(&lsda);
const uint8_t *callSiteTableStart = lsda;
const uint8_t *callSiteTableEnd = callSiteTableStart + callSiteTableLength;
const uint8_t *p = callSiteTableStart;
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/i386/chkstk.S b/contrib/llvm-project/compiler-rt/lib/builtins/i386/chkstk.S
index f0bea2187457..cdd9a4c2a575 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/i386/chkstk.S
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/i386/chkstk.S
@@ -4,19 +4,19 @@
#include "../assembly.h"
-// _chkstk routine
+#ifdef __i386__
+
+// _chkstk (_alloca) routine - probe stack between %esp and (%esp-%eax) in 4k increments,
+// then decrement %esp by %eax. Preserves all registers except %esp and flags.
// This routine is windows specific
// http://msdn.microsoft.com/en-us/library/ms648426.aspx
-#ifdef __i386__
-
.text
.balign 4
-DEFINE_COMPILERRT_FUNCTION(__chkstk_ms)
+DEFINE_COMPILERRT_FUNCTION(_alloca) // _chkstk and _alloca are the same function
push %ecx
- push %eax
cmp $0x1000,%eax
- lea 12(%esp),%ecx
+ lea 8(%esp),%ecx // esp before calling this routine -> ecx
jb 1f
2:
sub $0x1000,%ecx
@@ -27,9 +27,13 @@ DEFINE_COMPILERRT_FUNCTION(__chkstk_ms)
1:
sub %eax,%ecx
test %ecx,(%ecx)
- pop %eax
- pop %ecx
+
+ lea 4(%esp),%eax // load pointer to the return address into eax
+ mov %ecx,%esp // install the new top of stack pointer into esp
+ mov -4(%eax),%ecx // restore ecx
+ push (%eax) // push return address onto the stack
+ sub %esp,%eax // restore the original value in eax
ret
-END_COMPILERRT_FUNCTION(__chkstk_ms)
+END_COMPILERRT_FUNCTION(_alloca)
#endif // __i386__
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/i386/chkstk2.S b/contrib/llvm-project/compiler-rt/lib/builtins/i386/chkstk2.S
deleted file mode 100644
index 5d6cbdfa5c99..000000000000
--- a/contrib/llvm-project/compiler-rt/lib/builtins/i386/chkstk2.S
+++ /dev/null
@@ -1,41 +0,0 @@
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-
-#include "../assembly.h"
-
-#ifdef __i386__
-
-// _chkstk (_alloca) routine - probe stack between %esp and (%esp-%eax) in 4k increments,
-// then decrement %esp by %eax. Preserves all registers except %esp and flags.
-// This routine is windows specific
-// http://msdn.microsoft.com/en-us/library/ms648426.aspx
-
-.text
-.balign 4
-DEFINE_COMPILERRT_FUNCTION(_alloca) // _chkstk and _alloca are the same function
-DEFINE_COMPILERRT_FUNCTION(__chkstk)
- push %ecx
- cmp $0x1000,%eax
- lea 8(%esp),%ecx // esp before calling this routine -> ecx
- jb 1f
-2:
- sub $0x1000,%ecx
- test %ecx,(%ecx)
- sub $0x1000,%eax
- cmp $0x1000,%eax
- ja 2b
-1:
- sub %eax,%ecx
- test %ecx,(%ecx)
-
- lea 4(%esp),%eax // load pointer to the return address into eax
- mov %ecx,%esp // install the new top of stack pointer into esp
- mov -4(%eax),%ecx // restore ecx
- push (%eax) // push return address onto the stack
- sub %esp,%eax // restore the original value in eax
- ret
-END_COMPILERRT_FUNCTION(__chkstk)
-END_COMPILERRT_FUNCTION(_alloca)
-
-#endif // __i386__
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/i386/floatdixf.S b/contrib/llvm-project/compiler-rt/lib/builtins/i386/floatdixf.S
index 19dd0835a9c5..486e3b004fa3 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/i386/floatdixf.S
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/i386/floatdixf.S
@@ -4,7 +4,7 @@
#include "../assembly.h"
-// long double __floatdixf(di_int a);
+// xf_float __floatdixf(di_int a);
#ifdef __i386__
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/i386/floatundixf.S b/contrib/llvm-project/compiler-rt/lib/builtins/i386/floatundixf.S
index 30b4d9f4b96c..778c3dc0cc76 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/i386/floatundixf.S
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/i386/floatundixf.S
@@ -4,7 +4,7 @@
#include "../assembly.h"
-// long double __floatundixf(du_int a);16
+// xf_float __floatundixf(du_int a);16
#ifdef __i386__
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/i386/fp_mode.c b/contrib/llvm-project/compiler-rt/lib/builtins/i386/fp_mode.c
index 80e272e4c9a3..887ca9c34c15 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/i386/fp_mode.c
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/i386/fp_mode.c
@@ -14,7 +14,7 @@
#define X87_TOWARDZERO 0x0c00
#define X87_RMODE_MASK (X87_TONEAREST | X87_UPWARD | X87_DOWNWARD | X87_TOWARDZERO)
-CRT_FE_ROUND_MODE __fe_getround() {
+CRT_FE_ROUND_MODE __fe_getround(void) {
// Assume that the rounding mode state for the fpu agrees with the SSE unit.
unsigned short cw;
__asm__ __volatile__ ("fnstcw %0" : "=m" (cw));
@@ -32,7 +32,7 @@ CRT_FE_ROUND_MODE __fe_getround() {
return CRT_FE_TONEAREST;
}
-int __fe_raise_inexact() {
+int __fe_raise_inexact(void) {
float f = 1.0f, g = 3.0f;
__asm__ __volatile__ ("fdivs %1" : "+t" (f) : "m" (g));
return 0;
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/int_endianness.h b/contrib/llvm-project/compiler-rt/lib/builtins/int_endianness.h
index def046c34a62..291c6b58c8ed 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/int_endianness.h
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/int_endianness.h
@@ -41,7 +41,7 @@
#error "unknown endianness"
#endif // !_LITTLE_ENDIAN
-#endif // Solaris and AuroraUX.
+#endif // Solaris
// ..
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/int_lib.h b/contrib/llvm-project/compiler-rt/lib/builtins/int_lib.h
index f10e643363ad..e5519518d978 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/int_lib.h
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/int_lib.h
@@ -51,7 +51,7 @@
#define SYMBOL_NAME(name) XSTR(__USER_LABEL_PREFIX__) #name
#if defined(__ELF__) || defined(__MINGW32__) || defined(__wasm__) || \
- defined(_AIX)
+ defined(_AIX) || defined(__CYGWIN__)
#define COMPILER_RT_ALIAS(name, aliasname) \
COMPILER_RT_ABI __typeof(name) aliasname __attribute__((__alias__(#name)));
#elif defined(__APPLE__)
@@ -121,14 +121,14 @@ COMPILER_RT_ABI tu_int __udivmodti4(tu_int a, tu_int b, tu_int *rem);
#if defined(_MSC_VER) && !defined(__clang__)
#include <intrin.h>
-int __inline __builtin_ctz(uint32_t value) {
+static int __inline __builtin_ctz(uint32_t value) {
unsigned long trailing_zero = 0;
if (_BitScanForward(&trailing_zero, value))
return trailing_zero;
return 32;
}
-int __inline __builtin_clz(uint32_t value) {
+static int __inline __builtin_clz(uint32_t value) {
unsigned long leading_zero = 0;
if (_BitScanReverse(&leading_zero, value))
return 31 - leading_zero;
@@ -136,14 +136,14 @@ int __inline __builtin_clz(uint32_t value) {
}
#if defined(_M_ARM) || defined(_M_X64)
-int __inline __builtin_clzll(uint64_t value) {
+static int __inline __builtin_clzll(uint64_t value) {
unsigned long leading_zero = 0;
if (_BitScanReverse64(&leading_zero, value))
return 63 - leading_zero;
return 64;
}
#else
-int __inline __builtin_clzll(uint64_t value) {
+static int __inline __builtin_clzll(uint64_t value) {
if (value == 0)
return 64;
uint32_t msh = (uint32_t)(value >> 32);
@@ -156,7 +156,7 @@ int __inline __builtin_clzll(uint64_t value) {
#define __builtin_clzl __builtin_clzll
-bool __inline __builtin_sadd_overflow(int x, int y, int *result) {
+static bool __inline __builtin_sadd_overflow(int x, int y, int *result) {
if ((x < 0) != (y < 0)) {
*result = x + y;
return false;
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/int_math.h b/contrib/llvm-project/compiler-rt/lib/builtins/int_math.h
index 48b9580f5961..74d3e311db5e 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/int_math.h
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/int_math.h
@@ -65,6 +65,11 @@
#define crt_copysign(x, y) __builtin_copysign((x), (y))
#define crt_copysignf(x, y) __builtin_copysignf((x), (y))
#define crt_copysignl(x, y) __builtin_copysignl((x), (y))
+#if __has_builtin(__builtin_copysignf128)
+#define crt_copysignf128(x, y) __builtin_copysignf128((x), (y))
+#elif __has_builtin(__builtin_copysignq) || (defined(__GNUC__) && __GNUC__ >= 7)
+#define crt_copysignf128(x, y) __builtin_copysignq((x), (y))
+#endif
#endif
#if defined(_MSC_VER) && !defined(__clang__)
@@ -75,6 +80,11 @@
#define crt_fabs(x) __builtin_fabs((x))
#define crt_fabsf(x) __builtin_fabsf((x))
#define crt_fabsl(x) __builtin_fabsl((x))
+#if __has_builtin(__builtin_fabsf128)
+#define crt_fabsf128(x) __builtin_fabsf128((x))
+#elif __has_builtin(__builtin_fabsq) || (defined(__GNUC__) && __GNUC__ >= 7)
+#define crt_fabsf128(x) __builtin_fabsq((x))
+#endif
#endif
#if defined(_MSC_VER) && !defined(__clang__)
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/int_mulo_impl.inc b/contrib/llvm-project/compiler-rt/lib/builtins/int_mulo_impl.inc
index 567d8b9e6e60..27e7c8c43d60 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/int_mulo_impl.inc
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/int_mulo_impl.inc
@@ -18,10 +18,10 @@
static __inline fixint_t __muloXi4(fixint_t a, fixint_t b, int *overflow) {
const int N = (int)(sizeof(fixint_t) * CHAR_BIT);
- const fixint_t MIN = (fixint_t)1 << (N - 1);
+ const fixint_t MIN = (fixint_t)((fixuint_t)1 << (N - 1));
const fixint_t MAX = ~MIN;
*overflow = 0;
- fixint_t result = a * b;
+ fixint_t result = (fixuint_t)a * b;
if (a == MIN) {
if (b != 0 && b != 1)
*overflow = 1;
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/int_mulv_impl.inc b/contrib/llvm-project/compiler-rt/lib/builtins/int_mulv_impl.inc
index 1e920716ec49..06559cf302ea 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/int_mulv_impl.inc
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/int_mulv_impl.inc
@@ -18,7 +18,7 @@
static __inline fixint_t __mulvXi3(fixint_t a, fixint_t b) {
const int N = (int)(sizeof(fixint_t) * CHAR_BIT);
- const fixint_t MIN = (fixint_t)1 << (N - 1);
+ const fixint_t MIN = (fixint_t)((fixuint_t)1 << (N - 1));
const fixint_t MAX = ~MIN;
if (a == MIN) {
if (b == 0 || b == 1)
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/int_to_fp.h b/contrib/llvm-project/compiler-rt/lib/builtins/int_to_fp.h
new file mode 100644
index 000000000000..2c1218f1e89c
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/int_to_fp.h
@@ -0,0 +1,82 @@
+//===-- int_to_fp.h - integer to floating point conversion ----------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Set source and destination defines in order to use a correctly
+// parameterised floatXiYf implementation.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef INT_TO_FP_H
+#define INT_TO_FP_H
+
+#include "int_lib.h"
+
+#if defined SRC_I64
+typedef int64_t src_t;
+typedef uint64_t usrc_t;
+static __inline int clzSrcT(usrc_t x) { return __builtin_clzll(x); }
+
+#elif defined SRC_U64
+typedef uint64_t src_t;
+typedef uint64_t usrc_t;
+static __inline int clzSrcT(usrc_t x) { return __builtin_clzll(x); }
+
+#elif defined SRC_I128
+typedef __int128_t src_t;
+typedef __uint128_t usrc_t;
+static __inline int clzSrcT(usrc_t x) { return __clzti2(x); }
+
+#elif defined SRC_U128
+typedef __uint128_t src_t;
+typedef __uint128_t usrc_t;
+static __inline int clzSrcT(usrc_t x) { return __clzti2(x); }
+
+#else
+#error Source should be a handled integer type.
+#endif
+
+#if defined DST_SINGLE
+typedef float dst_t;
+typedef uint32_t dst_rep_t;
+#define DST_REP_C UINT32_C
+
+enum {
+ dstSigBits = 23,
+};
+
+#elif defined DST_DOUBLE
+typedef double dst_t;
+typedef uint64_t dst_rep_t;
+#define DST_REP_C UINT64_C
+
+enum {
+ dstSigBits = 52,
+};
+
+#elif defined DST_QUAD
+typedef tf_float dst_t;
+typedef __uint128_t dst_rep_t;
+#define DST_REP_C (__uint128_t)
+
+enum {
+ dstSigBits = 112,
+};
+
+#else
+#error Destination should be a handled floating point type
+#endif
+
+static __inline dst_t dstFromRep(dst_rep_t x) {
+ const union {
+ dst_t f;
+ dst_rep_t i;
+ } rep = {.i = x};
+ return rep.f;
+}
+
+#endif // INT_TO_FP_H
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/int_to_fp_impl.inc b/contrib/llvm-project/compiler-rt/lib/builtins/int_to_fp_impl.inc
new file mode 100644
index 000000000000..51f76fd76d80
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/int_to_fp_impl.inc
@@ -0,0 +1,72 @@
+//===-- int_to_fp_impl.inc - integer to floating point conversion ---------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Thsi file implements a generic conversion from an integer type to an
+// IEEE-754 floating point type, allowing a common implementation to be hsared
+// without copy and paste.
+//
+//===----------------------------------------------------------------------===//
+
+#include "int_to_fp.h"
+
+static __inline dst_t __floatXiYf__(src_t a) {
+ if (a == 0)
+ return 0.0;
+
+ enum {
+ dstMantDig = dstSigBits + 1,
+ srcBits = sizeof(src_t) * CHAR_BIT,
+ srcIsSigned = ((src_t)-1) < 0,
+ };
+
+ const src_t s = srcIsSigned ? a >> (srcBits - 1) : 0;
+
+ a = (usrc_t)(a ^ s) - s;
+ int sd = srcBits - clzSrcT(a); // number of significant digits
+ int e = sd - 1; // exponent
+ if (sd > dstMantDig) {
+ // start: 0000000000000000000001xxxxxxxxxxxxxxxxxxxxxxPQxxxxxxxxxxxxxxxxxx
+ // finish: 000000000000000000000000000000000000001xxxxxxxxxxxxxxxxxxxxxxPQR
+ // 12345678901234567890123456
+ // 1 = msb 1 bit
+ // P = bit dstMantDig-1 bits to the right of 1
+ // Q = bit dstMantDig bits to the right of 1
+ // R = "or" of all bits to the right of Q
+ if (sd == dstMantDig + 1) {
+ a <<= 1;
+ } else if (sd == dstMantDig + 2) {
+ // Do nothing.
+ } else {
+ a = ((usrc_t)a >> (sd - (dstMantDig + 2))) |
+ ((a & ((usrc_t)(-1) >> ((srcBits + dstMantDig + 2) - sd))) != 0);
+ }
+ // finish:
+ a |= (a & 4) != 0; // Or P into R
+ ++a; // round - this step may add a significant bit
+ a >>= 2; // dump Q and R
+ // a is now rounded to dstMantDig or dstMantDig+1 bits
+ if (a & ((usrc_t)1 << dstMantDig)) {
+ a >>= 1;
+ ++e;
+ }
+ // a is now rounded to dstMantDig bits
+ } else {
+ a <<= (dstMantDig - sd);
+ // a is now rounded to dstMantDig bits
+ }
+ const int dstBits = sizeof(dst_t) * CHAR_BIT;
+ const dst_rep_t dstSignMask = DST_REP_C(1) << (dstBits - 1);
+ const int dstExpBits = dstBits - dstSigBits - 1;
+ const int dstExpBias = (1 << (dstExpBits - 1)) - 1;
+ const dst_rep_t dstSignificandMask = (DST_REP_C(1) << dstSigBits) - 1;
+ // Combine sign, exponent, and mantissa.
+ const dst_rep_t result = ((dst_rep_t)s & dstSignMask) |
+ ((dst_rep_t)(e + dstExpBias) << dstSigBits) |
+ ((dst_rep_t)(a) & dstSignificandMask);
+ return dstFromRep(result);
+}
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/int_types.h b/contrib/llvm-project/compiler-rt/lib/builtins/int_types.h
index 7a72de480676..ca97391fc284 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/int_types.h
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/int_types.h
@@ -64,7 +64,7 @@ typedef union {
} udwords;
#if defined(__LP64__) || defined(__wasm__) || defined(__mips64) || \
- defined(__riscv) || defined(_WIN64)
+ defined(__SIZEOF_INT128__) || defined(_WIN64)
#define CRT_HAS_128BIT
#endif
@@ -139,7 +139,6 @@ typedef union {
udwords u;
double f;
} double_bits;
-#endif
typedef struct {
#if _YUGA_LITTLE_ENDIAN
@@ -165,16 +164,83 @@ typedef struct {
#define HAS_80_BIT_LONG_DOUBLE 0
#endif
-#if CRT_HAS_FLOATING_POINT
+#if HAS_80_BIT_LONG_DOUBLE
+typedef long double xf_float;
+typedef union {
+ uqwords u;
+ xf_float f;
+} xf_bits;
+#endif
+
+#ifdef __powerpc64__
+// From https://gcc.gnu.org/wiki/Ieee128PowerPC:
+// PowerPC64 uses the following suffixes:
+// IFmode: IBM extended double
+// KFmode: IEEE 128-bit floating point
+// TFmode: Matches the default for long double. With -mabi=ieeelongdouble,
+// it is IEEE 128-bit, with -mabi=ibmlongdouble IBM extended double
+// Since compiler-rt only implements the tf set of libcalls, we use long double
+// for the tf_float typedef.
+typedef long double tf_float;
+#define CRT_LDBL_128BIT
+#define CRT_HAS_F128
+#if __LDBL_MANT_DIG__ == 113 && !defined(__LONG_DOUBLE_IBM128__)
+#define CRT_HAS_IEEE_TF
+#define CRT_LDBL_IEEE_F128
+#endif
+#define TF_C(x) x##L
+#elif __LDBL_MANT_DIG__ == 113 || \
+ (__FLT_RADIX__ == 16 && __LDBL_MANT_DIG__ == 28)
+// Use long double instead of __float128 if it matches the IEEE 128-bit format
+// or the IBM hexadecimal format.
+#define CRT_LDBL_128BIT
+#define CRT_HAS_F128
+#if __LDBL_MANT_DIG__ == 113
+#define CRT_HAS_IEEE_TF
+#define CRT_LDBL_IEEE_F128
+#endif
+typedef long double tf_float;
+#define TF_C(x) x##L
+#elif defined(__FLOAT128__) || defined(__SIZEOF_FLOAT128__)
+#define CRT_HAS___FLOAT128_KEYWORD
+#define CRT_HAS_F128
+// NB: we assume the __float128 type uses IEEE representation.
+#define CRT_HAS_IEEE_TF
+typedef __float128 tf_float;
+#define TF_C(x) x##Q
+#endif
+
+#ifdef CRT_HAS_F128
typedef union {
uqwords u;
- long double f;
-} long_double_bits;
+ tf_float f;
+} tf_bits;
+#endif
+
+// __(u)int128_t is currently needed to compile the *tf builtins as we would
+// otherwise need to manually expand the bit manipulation on two 64-bit value.
+#if defined(CRT_HAS_128BIT) && defined(CRT_HAS_F128)
+#define CRT_HAS_TF_MODE
+#endif
#if __STDC_VERSION__ >= 199901L
typedef float _Complex Fcomplex;
typedef double _Complex Dcomplex;
typedef long double _Complex Lcomplex;
+#if defined(CRT_LDBL_128BIT)
+typedef Lcomplex Qcomplex;
+#define CRT_HAS_NATIVE_COMPLEX_F128
+#elif defined(CRT_HAS___FLOAT128_KEYWORD)
+#if defined(__clang_major__) && __clang_major__ > 10
+// Clang prior to 11 did not support __float128 _Complex.
+typedef __float128 _Complex Qcomplex;
+#define CRT_HAS_NATIVE_COMPLEX_F128
+#elif defined(__GNUC__) && __GNUC__ >= 7
+// GCC does not allow __float128 _Complex, but accepts _Float128 _Complex.
+typedef _Float128 _Complex Qcomplex;
+#define CRT_HAS_NATIVE_COMPLEX_F128
+#endif
+#endif
#define COMPLEX_REAL(x) __real__(x)
#define COMPLEX_IMAGINARY(x) __imag__(x)
@@ -194,5 +260,17 @@ typedef struct {
#define COMPLEX_REAL(x) (x).real
#define COMPLEX_IMAGINARY(x) (x).imaginary
#endif
+
+#ifdef CRT_HAS_NATIVE_COMPLEX_F128
+#define COMPLEXTF_REAL(x) __real__(x)
+#define COMPLEXTF_IMAGINARY(x) __imag__(x)
+#elif defined(CRT_HAS_F128)
+typedef struct {
+ tf_float real, imaginary;
+} Qcomplex;
+#define COMPLEXTF_REAL(x) (x).real
+#define COMPLEXTF_IMAGINARY(x) (x).imaginary
#endif
+
+#endif // CRT_HAS_FLOATING_POINT
#endif // INT_TYPES_H
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/loongarch/fp_mode.c b/contrib/llvm-project/compiler-rt/lib/builtins/loongarch/fp_mode.c
new file mode 100644
index 000000000000..31877fb02bd5
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/loongarch/fp_mode.c
@@ -0,0 +1,59 @@
+//=== lib/builtins/loongarch/fp_mode.c - Floaing-point mode utilities -*- C -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+#include "../fp_mode.h"
+
+#define LOONGARCH_TONEAREST 0x0000
+#define LOONGARCH_TOWARDZERO 0x0100
+#define LOONGARCH_UPWARD 0x0200
+#define LOONGARCH_DOWNWARD 0x0300
+
+#define LOONGARCH_RMODE_MASK (LOONGARCH_TONEAREST | LOONGARCH_TOWARDZERO | \
+ LOONGARCH_UPWARD | LOONGARCH_DOWNWARD)
+
+#define LOONGARCH_INEXACT 0x10000
+
+CRT_FE_ROUND_MODE __fe_getround(void) {
+#if __loongarch_frlen != 0
+ int fcsr;
+# ifdef __clang__
+ __asm__ __volatile__("movfcsr2gr %0, $fcsr0" : "=r" (fcsr));
+# else
+ __asm__ __volatile__("movfcsr2gr %0, $r0" : "=r" (fcsr));
+# endif
+ fcsr &= LOONGARCH_RMODE_MASK;
+ switch (fcsr) {
+ case LOONGARCH_TOWARDZERO:
+ return CRT_FE_TOWARDZERO;
+ case LOONGARCH_DOWNWARD:
+ return CRT_FE_DOWNWARD;
+ case LOONGARCH_UPWARD:
+ return CRT_FE_UPWARD;
+ case LOONGARCH_TONEAREST:
+ default:
+ return CRT_FE_TONEAREST;
+ }
+#else
+ return CRT_FE_TONEAREST;
+#endif
+}
+
+int __fe_raise_inexact(void) {
+#if __loongarch_frlen != 0
+ int fcsr;
+# ifdef __clang__
+ __asm__ __volatile__("movfcsr2gr %0, $fcsr0" : "=r" (fcsr));
+ __asm__ __volatile__(
+ "movgr2fcsr $fcsr0, %0" :: "r" (fcsr | LOONGARCH_INEXACT));
+# else
+ __asm__ __volatile__("movfcsr2gr %0, $r0" : "=r" (fcsr));
+ __asm__ __volatile__(
+ "movgr2fcsr $r0, %0" :: "r" (fcsr | LOONGARCH_INEXACT));
+# endif
+#endif
+ return 0;
+}
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/lshrti3.c b/contrib/llvm-project/compiler-rt/lib/builtins/lshrti3.c
index d00a22095993..5dc8a0a2347f 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/lshrti3.c
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/lshrti3.c
@@ -18,7 +18,7 @@
// Precondition: 0 <= b < bits_in_tword
-COMPILER_RT_ABI ti_int __lshrti3(ti_int a, si_int b) {
+COMPILER_RT_ABI ti_int __lshrti3(ti_int a, int b) {
const int bits_in_dword = (int)(sizeof(di_int) * CHAR_BIT);
utwords input;
utwords result;
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/mingw_fixfloat.c b/contrib/llvm-project/compiler-rt/lib/builtins/mingw_fixfloat.c
deleted file mode 100644
index 945be9d4344a..000000000000
--- a/contrib/llvm-project/compiler-rt/lib/builtins/mingw_fixfloat.c
+++ /dev/null
@@ -1,34 +0,0 @@
-//===-- mingw_fixfloat.c - Wrap int/float conversions for arm/windows -----===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#include "int_lib.h"
-
-COMPILER_RT_ABI di_int __fixdfdi(double a);
-COMPILER_RT_ABI di_int __fixsfdi(float a);
-COMPILER_RT_ABI du_int __fixunsdfdi(double a);
-COMPILER_RT_ABI du_int __fixunssfdi(float a);
-COMPILER_RT_ABI double __floatdidf(di_int a);
-COMPILER_RT_ABI float __floatdisf(di_int a);
-COMPILER_RT_ABI double __floatundidf(du_int a);
-COMPILER_RT_ABI float __floatundisf(du_int a);
-
-COMPILER_RT_ABI di_int __dtoi64(double a) { return __fixdfdi(a); }
-
-COMPILER_RT_ABI di_int __stoi64(float a) { return __fixsfdi(a); }
-
-COMPILER_RT_ABI du_int __dtou64(double a) { return __fixunsdfdi(a); }
-
-COMPILER_RT_ABI du_int __stou64(float a) { return __fixunssfdi(a); }
-
-COMPILER_RT_ABI double __i64tod(di_int a) { return __floatdidf(a); }
-
-COMPILER_RT_ABI float __i64tos(di_int a) { return __floatdisf(a); }
-
-COMPILER_RT_ABI double __u64tod(du_int a) { return __floatundidf(a); }
-
-COMPILER_RT_ABI float __u64tos(du_int a) { return __floatundisf(a); }
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/mulodi4.c b/contrib/llvm-project/compiler-rt/lib/builtins/mulodi4.c
index 7209676a327e..6ecf92664fb5 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/mulodi4.c
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/mulodi4.c
@@ -11,6 +11,7 @@
//===----------------------------------------------------------------------===//
#define fixint_t di_int
+#define fixuint_t du_int
#include "int_mulo_impl.inc"
// Returns: a * b
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/mulosi4.c b/contrib/llvm-project/compiler-rt/lib/builtins/mulosi4.c
index 4e03c24455d6..3fd18a122a46 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/mulosi4.c
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/mulosi4.c
@@ -11,6 +11,7 @@
//===----------------------------------------------------------------------===//
#define fixint_t si_int
+#define fixuint_t su_int
#include "int_mulo_impl.inc"
// Returns: a * b
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/muloti4.c b/contrib/llvm-project/compiler-rt/lib/builtins/muloti4.c
index 9a7aa85b022b..9aab6fc3efb3 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/muloti4.c
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/muloti4.c
@@ -19,6 +19,7 @@
// Effects: sets *overflow to 1 if a * b overflows
#define fixint_t ti_int
+#define fixuint_t tu_int
#include "int_mulo_impl.inc"
COMPILER_RT_ABI ti_int __muloti4(ti_int a, ti_int b, int *overflow) {
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/multc3.c b/contrib/llvm-project/compiler-rt/lib/builtins/multc3.c
index bb7f6aabfe2c..61a3f45e4727 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/multc3.c
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/multc3.c
@@ -10,56 +10,61 @@
//
//===----------------------------------------------------------------------===//
+#define QUAD_PRECISION
+#include "fp_lib.h"
#include "int_lib.h"
#include "int_math.h"
+#if defined(CRT_HAS_F128)
+
// Returns: the product of a + ib and c + id
-COMPILER_RT_ABI long double _Complex __multc3(long double a, long double b,
- long double c, long double d) {
- long double ac = a * c;
- long double bd = b * d;
- long double ad = a * d;
- long double bc = b * c;
- long double _Complex z;
- __real__ z = ac - bd;
- __imag__ z = ad + bc;
- if (crt_isnan(__real__ z) && crt_isnan(__imag__ z)) {
+COMPILER_RT_ABI Qcomplex __multc3(fp_t a, fp_t b, fp_t c, fp_t d) {
+ fp_t ac = a * c;
+ fp_t bd = b * d;
+ fp_t ad = a * d;
+ fp_t bc = b * c;
+ Qcomplex z;
+ COMPLEXTF_REAL(z) = ac - bd;
+ COMPLEXTF_IMAGINARY(z) = ad + bc;
+ if (crt_isnan(COMPLEXTF_REAL(z)) && crt_isnan(COMPLEXTF_IMAGINARY(z))) {
int recalc = 0;
if (crt_isinf(a) || crt_isinf(b)) {
- a = crt_copysignl(crt_isinf(a) ? 1 : 0, a);
- b = crt_copysignl(crt_isinf(b) ? 1 : 0, b);
+ a = crt_copysigntf(crt_isinf(a) ? 1 : 0, a);
+ b = crt_copysigntf(crt_isinf(b) ? 1 : 0, b);
if (crt_isnan(c))
- c = crt_copysignl(0, c);
+ c = crt_copysigntf(0, c);
if (crt_isnan(d))
- d = crt_copysignl(0, d);
+ d = crt_copysigntf(0, d);
recalc = 1;
}
if (crt_isinf(c) || crt_isinf(d)) {
- c = crt_copysignl(crt_isinf(c) ? 1 : 0, c);
- d = crt_copysignl(crt_isinf(d) ? 1 : 0, d);
+ c = crt_copysigntf(crt_isinf(c) ? 1 : 0, c);
+ d = crt_copysigntf(crt_isinf(d) ? 1 : 0, d);
if (crt_isnan(a))
- a = crt_copysignl(0, a);
+ a = crt_copysigntf(0, a);
if (crt_isnan(b))
- b = crt_copysignl(0, b);
+ b = crt_copysigntf(0, b);
recalc = 1;
}
if (!recalc &&
(crt_isinf(ac) || crt_isinf(bd) || crt_isinf(ad) || crt_isinf(bc))) {
if (crt_isnan(a))
- a = crt_copysignl(0, a);
+ a = crt_copysigntf(0, a);
if (crt_isnan(b))
- b = crt_copysignl(0, b);
+ b = crt_copysigntf(0, b);
if (crt_isnan(c))
- c = crt_copysignl(0, c);
+ c = crt_copysigntf(0, c);
if (crt_isnan(d))
- d = crt_copysignl(0, d);
+ d = crt_copysigntf(0, d);
recalc = 1;
}
if (recalc) {
- __real__ z = CRT_INFINITY * (a * c - b * d);
- __imag__ z = CRT_INFINITY * (a * d + b * c);
+ COMPLEXTF_REAL(z) = CRT_INFINITY * (a * c - b * d);
+ COMPLEXTF_IMAGINARY(z) = CRT_INFINITY * (a * d + b * c);
}
}
return z;
}
+
+#endif
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/multf3.c b/contrib/llvm-project/compiler-rt/lib/builtins/multf3.c
index 0626fb8c7fc9..8fd73688712c 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/multf3.c
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/multf3.c
@@ -14,7 +14,7 @@
#define QUAD_PRECISION
#include "fp_lib.h"
-#if defined(CRT_HAS_128BIT) && defined(CRT_LDBL_128BIT)
+#if defined(CRT_HAS_TF_MODE)
#include "fp_mul_impl.inc"
COMPILER_RT_ABI fp_t __multf3(fp_t a, fp_t b) { return __mulXf3__(a, b); }
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/mulvdi3.c b/contrib/llvm-project/compiler-rt/lib/builtins/mulvdi3.c
index 1d672c6dc155..d787d297d564 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/mulvdi3.c
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/mulvdi3.c
@@ -11,6 +11,7 @@
//===----------------------------------------------------------------------===//
#define fixint_t di_int
+#define fixuint_t du_int
#include "int_mulv_impl.inc"
// Returns: a * b
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/mulvsi3.c b/contrib/llvm-project/compiler-rt/lib/builtins/mulvsi3.c
index 00b2e50eeca9..2571881195fc 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/mulvsi3.c
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/mulvsi3.c
@@ -11,6 +11,7 @@
//===----------------------------------------------------------------------===//
#define fixint_t si_int
+#define fixuint_t su_int
#include "int_mulv_impl.inc"
// Returns: a * b
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/mulvti3.c b/contrib/llvm-project/compiler-rt/lib/builtins/mulvti3.c
index ba355149f9a7..fad9b2ae2765 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/mulvti3.c
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/mulvti3.c
@@ -19,6 +19,7 @@
// Effects: aborts if a * b overflows
#define fixint_t ti_int
+#define fixuint_t tu_int
#include "int_mulv_impl.inc"
COMPILER_RT_ABI ti_int __mulvti3(ti_int a, ti_int b) { return __mulvXi3(a, b); }
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/mulxc3.c b/contrib/llvm-project/compiler-rt/lib/builtins/mulxc3.c
index 2f7f14c28453..66b5b58190f7 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/mulxc3.c
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/mulxc3.c
@@ -17,12 +17,12 @@
// Returns: the product of a + ib and c + id
-COMPILER_RT_ABI Lcomplex __mulxc3(long double __a, long double __b,
- long double __c, long double __d) {
- long double __ac = __a * __c;
- long double __bd = __b * __d;
- long double __ad = __a * __d;
- long double __bc = __b * __c;
+COMPILER_RT_ABI Lcomplex __mulxc3(xf_float __a, xf_float __b, xf_float __c,
+ xf_float __d) {
+ xf_float __ac = __a * __c;
+ xf_float __bd = __b * __d;
+ xf_float __ad = __a * __d;
+ xf_float __bc = __b * __c;
Lcomplex z;
COMPLEX_REAL(z) = __ac - __bd;
COMPLEX_IMAGINARY(z) = __ad + __bc;
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/negdi2.c b/contrib/llvm-project/compiler-rt/lib/builtins/negdi2.c
index 5a525d4b0e55..714ac8ca66d3 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/negdi2.c
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/negdi2.c
@@ -17,5 +17,5 @@
COMPILER_RT_ABI di_int __negdi2(di_int a) {
// Note: this routine is here for API compatibility; any sane compiler
// should expand it inline.
- return -a;
+ return -(du_int)a;
}
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/negti2.c b/contrib/llvm-project/compiler-rt/lib/builtins/negti2.c
index d52ba4e13a46..ab6e09ded819 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/negti2.c
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/negti2.c
@@ -19,7 +19,7 @@
COMPILER_RT_ABI ti_int __negti2(ti_int a) {
// Note: this routine is here for API compatibility; any sane compiler
// should expand it inline.
- return -a;
+ return -(tu_int)a;
}
#endif // CRT_HAS_128BIT
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/negvdi2.c b/contrib/llvm-project/compiler-rt/lib/builtins/negvdi2.c
index 5c52b3ec2aa6..8c1cf2fa58d4 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/negvdi2.c
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/negvdi2.c
@@ -17,7 +17,8 @@
// Effects: aborts if -a overflows
COMPILER_RT_ABI di_int __negvdi2(di_int a) {
- const di_int MIN = (di_int)1 << ((int)(sizeof(di_int) * CHAR_BIT) - 1);
+ const di_int MIN =
+ (di_int)((du_int)1 << ((int)(sizeof(di_int) * CHAR_BIT) - 1));
if (a == MIN)
compilerrt_abort();
return -a;
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/negvsi2.c b/contrib/llvm-project/compiler-rt/lib/builtins/negvsi2.c
index cccdee6dc5e5..70f214f9761d 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/negvsi2.c
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/negvsi2.c
@@ -17,7 +17,8 @@
// Effects: aborts if -a overflows
COMPILER_RT_ABI si_int __negvsi2(si_int a) {
- const si_int MIN = (si_int)1 << ((int)(sizeof(si_int) * CHAR_BIT) - 1);
+ const si_int MIN =
+ (si_int)((su_int)1 << ((int)(sizeof(si_int) * CHAR_BIT) - 1));
if (a == MIN)
compilerrt_abort();
return -a;
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/negvti2.c b/contrib/llvm-project/compiler-rt/lib/builtins/negvti2.c
index 8f92e1046d0c..fc1484015a8b 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/negvti2.c
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/negvti2.c
@@ -19,7 +19,7 @@
// Effects: aborts if -a overflows
COMPILER_RT_ABI ti_int __negvti2(ti_int a) {
- const ti_int MIN = (ti_int)1 << ((int)(sizeof(ti_int) * CHAR_BIT) - 1);
+ const ti_int MIN = (tu_int)1 << ((int)(sizeof(ti_int) * CHAR_BIT) - 1);
if (a == MIN)
compilerrt_abort();
return -a;
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/os_version_check.c b/contrib/llvm-project/compiler-rt/lib/builtins/os_version_check.c
index d7194b99ae54..182eabe7a6ae 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/os_version_check.c
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/os_version_check.c
@@ -86,6 +86,10 @@ typedef Boolean (*CFStringGetCStringFuncTy)(CFStringRef, char *, CFIndex,
CFStringEncoding);
typedef void (*CFReleaseFuncTy)(CFTypeRef);
+extern __attribute__((weak_import))
+bool _availability_version_check(uint32_t count,
+ dyld_build_version_t versions[]);
+
static void _initializeAvailabilityCheck(bool LoadPlist) {
if (AvailabilityVersionCheck && !LoadPlist) {
// New API is supported and we're not being asked to load the plist,
@@ -94,8 +98,8 @@ static void _initializeAvailabilityCheck(bool LoadPlist) {
}
// Use the new API if it's is available.
- AvailabilityVersionCheck = (AvailabilityVersionCheckFuncTy)dlsym(
- RTLD_DEFAULT, "_availability_version_check");
+ if (_availability_version_check)
+ AvailabilityVersionCheck = &_availability_version_check;
if (AvailabilityVersionCheck && !LoadPlist) {
// New API is supported and we're not being asked to load the plist,
@@ -307,8 +311,8 @@ static void readSystemProperties(void) {
}
int32_t __isOSVersionAtLeast(int32_t Major, int32_t Minor, int32_t Subminor) {
- (int32_t) Minor;
- (int32_t) Subminor;
+ (void) Minor;
+ (void) Subminor;
static pthread_once_t once = PTHREAD_ONCE_INIT;
pthread_once(&once, readSystemProperties);
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/powitf2.c b/contrib/llvm-project/compiler-rt/lib/builtins/powitf2.c
index 8e639a03a3c4..e02db40767ac 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/powitf2.c
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/powitf2.c
@@ -13,13 +13,13 @@
#define QUAD_PRECISION
#include "fp_lib.h"
-#if defined(CRT_HAS_128BIT) && defined(CRT_LDBL_128BIT)
+#if defined(CRT_HAS_TF_MODE)
// Returns: a ^ b
-COMPILER_RT_ABI long double __powitf2(long double a, int b) {
+COMPILER_RT_ABI fp_t __powitf2(fp_t a, int b) {
const int recip = b < 0;
- long double r = 1;
+ fp_t r = 1;
while (1) {
if (b & 1)
r *= a;
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/powixf2.c b/contrib/llvm-project/compiler-rt/lib/builtins/powixf2.c
index 3edfe9fd7af5..ab8c694ada2a 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/powixf2.c
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/powixf2.c
@@ -16,9 +16,9 @@
// Returns: a ^ b
-COMPILER_RT_ABI long double __powixf2(long double a, int b) {
+COMPILER_RT_ABI xf_float __powixf2(xf_float a, int b) {
const int recip = b < 0;
- long double r = 1;
+ xf_float r = 1;
while (1) {
if (b & 1)
r *= a;
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/riscv/fp_mode.c b/contrib/llvm-project/compiler-rt/lib/builtins/riscv/fp_mode.c
new file mode 100644
index 000000000000..1a5a3de95de9
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/riscv/fp_mode.c
@@ -0,0 +1,42 @@
+//=== lib/builtins/riscv/fp_mode.c - Floaing-point mode utilities -*- C -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+#include "../fp_mode.h"
+
+#define RISCV_TONEAREST 0x0
+#define RISCV_TOWARDZERO 0x1
+#define RISCV_DOWNWARD 0x2
+#define RISCV_UPWARD 0x3
+
+#define RISCV_INEXACT 0x1
+
+CRT_FE_ROUND_MODE __fe_getround(void) {
+#if defined(__riscv_f) || defined(__riscv_zfinx)
+ int frm;
+ __asm__ __volatile__("frrm %0" : "=r" (frm));
+ switch (frm) {
+ case RISCV_TOWARDZERO:
+ return CRT_FE_TOWARDZERO;
+ case RISCV_DOWNWARD:
+ return CRT_FE_DOWNWARD;
+ case RISCV_UPWARD:
+ return CRT_FE_UPWARD;
+ case RISCV_TONEAREST:
+ default:
+ return CRT_FE_TONEAREST;
+ }
+#else
+ return CRT_FE_TONEAREST;
+#endif
+}
+
+int __fe_raise_inexact(void) {
+#if defined(__riscv_f) || defined(__riscv_zfinx)
+ __asm__ __volatile__("csrsi fflags, %0" :: "i" (RISCV_INEXACT));
+#endif
+ return 0;
+}
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/riscv/restore.S b/contrib/llvm-project/compiler-rt/lib/builtins/riscv/restore.S
index 12f0d3365655..6f43842c8ca6 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/riscv/restore.S
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/riscv/restore.S
@@ -22,6 +22,8 @@
#if __riscv_xlen == 32
+#ifndef __riscv_32e
+
.globl __riscv_restore_12
.type __riscv_restore_12,@function
__riscv_restore_12:
@@ -86,14 +88,35 @@ __riscv_restore_0:
addi sp, sp, 16
ret
+#else
+
+ .globl __riscv_restore_2
+ .type __riscv_restore_2,@function
+ .globl __riscv_restore_1
+ .type __riscv_restore_1,@function
+ .globl __riscv_restore_0
+ .type __riscv_restore_0,@function
+__riscv_restore_2:
+__riscv_restore_1:
+__riscv_restore_0:
+ lw s1, 0(sp)
+ lw s0, 4(sp)
+ lw ra, 8(sp)
+ addi sp, sp, 12
+ ret
+
+#endif
+
#elif __riscv_xlen == 64
+#ifndef __riscv_64e
+
.globl __riscv_restore_12
.type __riscv_restore_12,@function
__riscv_restore_12:
ld s11, 8(sp)
addi sp, sp, 16
- // fallthrough into __riscv_restore_11/10/9/8
+ // fallthrough into __riscv_restore_11/10
.globl __riscv_restore_11
.type __riscv_restore_11,@function
@@ -143,10 +166,6 @@ __riscv_restore_4:
.type __riscv_restore_3,@function
.globl __riscv_restore_2
.type __riscv_restore_2,@function
- .globl __riscv_restore_1
- .type __riscv_restore_1,@function
- .globl __riscv_restore_0
- .type __riscv_restore_0,@function
__riscv_restore_3:
__riscv_restore_2:
ld s2, 0(sp)
@@ -154,6 +173,10 @@ __riscv_restore_2:
addi sp, sp, 16
// fallthrough into __riscv_restore_1/0
+ .globl __riscv_restore_1
+ .type __riscv_restore_1,@function
+ .globl __riscv_restore_0
+ .type __riscv_restore_0,@function
__riscv_restore_1:
__riscv_restore_0:
ld s0, 0(sp)
@@ -162,5 +185,24 @@ __riscv_restore_0:
ret
#else
+
+ .globl __riscv_restore_2
+ .type __riscv_restore_2,@function
+ .globl __riscv_restore_1
+ .type __riscv_restore_1,@function
+ .globl __riscv_restore_0
+ .type __riscv_restore_0,@function
+__riscv_restore_2:
+__riscv_restore_1:
+__riscv_restore_0:
+ ld s1, 0(sp)
+ ld s0, 8(sp)
+ ld ra, 16(sp)
+ addi sp, sp, 24
+ ret
+
+#endif
+
+#else
# error "xlen must be 32 or 64 for save-restore implementation
#endif
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/riscv/save.S b/contrib/llvm-project/compiler-rt/lib/builtins/riscv/save.S
index d811bf584fc3..3e044179ff7f 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/riscv/save.S
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/riscv/save.S
@@ -18,6 +18,8 @@
#if __riscv_xlen == 32
+#ifndef __riscv_32e
+
.globl __riscv_save_12
.type __riscv_save_12,@function
__riscv_save_12:
@@ -92,8 +94,29 @@ __riscv_save_0:
sw ra, 12(sp)
jr t0
+#else
+
+ .globl __riscv_save_2
+ .type __riscv_save_2,@function
+ .globl __riscv_save_1
+ .type __riscv_save_1,@function
+ .globl __riscv_save_0
+ .type __riscv_save_0,@function
+__riscv_save_2:
+__riscv_save_1:
+__riscv_save_0:
+ addi sp, sp, -12
+ sw s1, 0(sp)
+ sw s0, 4(sp)
+ sw ra, 8(sp)
+ jr t0
+
+#endif
+
#elif __riscv_xlen == 64
+#ifndef __riscv_64e
+
.globl __riscv_save_12
.type __riscv_save_12,@function
__riscv_save_12:
@@ -174,11 +197,32 @@ __riscv_save_2:
.type __riscv_save_1,@function
.globl __riscv_save_0
.type __riscv_save_0,@function
+__riscv_save_1:
+__riscv_save_0:
addi sp, sp, -16
sd s0, 0(sp)
sd ra, 8(sp)
jr t0
#else
+
+ .globl __riscv_save_2
+ .type __riscv_save_2,@function
+ .globl __riscv_save_1
+ .type __riscv_save_1,@function
+ .globl __riscv_save_0
+ .type __riscv_save_0,@function
+__riscv_save_2:
+__riscv_save_1:
+__riscv_save_0:
+ addi sp, sp, -24
+ sd s1, 0(sp)
+ sd s0, 8(sp)
+ sd ra, 16(sp)
+ jr t0
+
+#endif
+
+#else
# error "xlen must be 32 or 64 for save-restore implementation
#endif
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/subtf3.c b/contrib/llvm-project/compiler-rt/lib/builtins/subtf3.c
index 3364c28f8179..e1b1022034bf 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/subtf3.c
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/subtf3.c
@@ -13,7 +13,7 @@
#define QUAD_PRECISION
#include "fp_lib.h"
-#if defined(CRT_HAS_128BIT) && defined(CRT_LDBL_128BIT)
+#if defined(CRT_HAS_TF_MODE)
COMPILER_RT_ABI fp_t __addtf3(fp_t a, fp_t b);
// Subtraction; flip the sign bit of b and add.
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/trampoline_setup.c b/contrib/llvm-project/compiler-rt/lib/builtins/trampoline_setup.c
index a62431723d78..844eb2794414 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/trampoline_setup.c
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/trampoline_setup.c
@@ -16,7 +16,7 @@ extern void __clear_cache(void *start, void *end);
// which loads r11 with a pointer to the outer function's locals
// and then jumps to the target nested function.
-#if __ppc__ && !defined(__powerpc64__)
+#if __powerpc__ && !defined(__powerpc64__)
COMPILER_RT_ABI void __trampoline_setup(uint32_t *trampOnStack,
int trampSizeAllocated,
const void *realFunc, void *localsPtr) {
@@ -40,4 +40,4 @@ COMPILER_RT_ABI void __trampoline_setup(uint32_t *trampOnStack,
// clear instruction cache
__clear_cache(trampOnStack, &trampOnStack[10]);
}
-#endif // __ppc__ && !defined(__powerpc64__)
+#endif // __powerpc__ && !defined(__powerpc64__)
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/truncdfbf2.c b/contrib/llvm-project/compiler-rt/lib/builtins/truncdfbf2.c
new file mode 100644
index 000000000000..dbd54dcd50ca
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/truncdfbf2.c
@@ -0,0 +1,13 @@
+//===-- lib/truncdfbf2.c - double -> bfloat conversion ------------*- C -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#define SRC_DOUBLE
+#define DST_BFLOAT
+#include "fp_trunc_impl.inc"
+
+COMPILER_RT_ABI dst_t __truncdfbf2(double a) { return __truncXfYf2__(a); }
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/truncsfbf2.c b/contrib/llvm-project/compiler-rt/lib/builtins/truncsfbf2.c
new file mode 100644
index 000000000000..6bed116af986
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/truncsfbf2.c
@@ -0,0 +1,13 @@
+//===-- lib/truncsfbf2.c - single -> bfloat conversion ------------*- C -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#define SRC_SINGLE
+#define DST_BFLOAT
+#include "fp_trunc_impl.inc"
+
+COMPILER_RT_ABI dst_t __truncsfbf2(float a) { return __truncXfYf2__(a); }
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/trunctfdf2.c b/contrib/llvm-project/compiler-rt/lib/builtins/trunctfdf2.c
index 6857ea54d8a5..a5bdded53751 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/trunctfdf2.c
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/trunctfdf2.c
@@ -9,11 +9,11 @@
#define QUAD_PRECISION
#include "fp_lib.h"
-#if defined(CRT_HAS_128BIT) && defined(CRT_LDBL_128BIT)
+#if defined(CRT_HAS_TF_MODE)
#define SRC_QUAD
#define DST_DOUBLE
#include "fp_trunc_impl.inc"
-COMPILER_RT_ABI double __trunctfdf2(long double a) { return __truncXfYf2__(a); }
+COMPILER_RT_ABI dst_t __trunctfdf2(src_t a) { return __truncXfYf2__(a); }
#endif
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/trunctfhf2.c b/contrib/llvm-project/compiler-rt/lib/builtins/trunctfhf2.c
index e3a2309d954b..3f031e0f8445 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/trunctfhf2.c
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/trunctfhf2.c
@@ -10,14 +10,11 @@
#define QUAD_PRECISION
#include "fp_lib.h"
-#if defined(CRT_HAS_128BIT) && defined(CRT_LDBL_128BIT) && \
- defined(COMPILER_RT_HAS_FLOAT16)
+#if defined(CRT_HAS_TF_MODE) && defined(COMPILER_RT_HAS_FLOAT16)
#define SRC_QUAD
#define DST_HALF
#include "fp_trunc_impl.inc"
-COMPILER_RT_ABI _Float16 __trunctfhf2(long double a) {
- return __truncXfYf2__(a);
-}
+COMPILER_RT_ABI dst_t __trunctfhf2(src_t a) { return __truncXfYf2__(a); }
#endif
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/trunctfsf2.c b/contrib/llvm-project/compiler-rt/lib/builtins/trunctfsf2.c
index 0261b1e90f5d..b65b5af2fc00 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/trunctfsf2.c
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/trunctfsf2.c
@@ -9,11 +9,11 @@
#define QUAD_PRECISION
#include "fp_lib.h"
-#if defined(CRT_HAS_128BIT) && defined(CRT_LDBL_128BIT)
+#if defined(CRT_HAS_TF_MODE)
#define SRC_QUAD
#define DST_SINGLE
#include "fp_trunc_impl.inc"
-COMPILER_RT_ABI float __trunctfsf2(long double a) { return __truncXfYf2__(a); }
+COMPILER_RT_ABI dst_t __trunctfsf2(src_t a) { return __truncXfYf2__(a); }
#endif
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/trunctfxf2.c b/contrib/llvm-project/compiler-rt/lib/builtins/trunctfxf2.c
new file mode 100644
index 000000000000..49bd32d42aac
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/trunctfxf2.c
@@ -0,0 +1,23 @@
+//===-- lib/trunctfsf2.c - long double -> quad conversion ---------*- C -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+// Assumption: long double is a IEEE 80 bit floating point type padded to 128
+// bits.
+
+#define QUAD_PRECISION
+#include "fp_lib.h"
+
+#if defined(CRT_HAS_TF_MODE) && __LDBL_MANT_DIG__ == 64 && defined(__x86_64__)
+
+#define SRC_QUAD
+#define DST_80
+#include "fp_trunc_impl.inc"
+
+COMPILER_RT_ABI xf_float __trunctfxf2(tf_float a) { return __truncXfYf2__(a); }
+
+#endif
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/udivmoddi4.c b/contrib/llvm-project/compiler-rt/lib/builtins/udivmoddi4.c
index 10b41df28f84..123e5fb05f8c 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/udivmoddi4.c
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/udivmoddi4.c
@@ -21,7 +21,7 @@
// MSVC throws a warning about mod 0 here, disable it for builds that
// warn-as-error
#pragma warning(push)
-#pragma warning(disable : 4724)
+#pragma warning(disable : 4723 4724)
#endif
COMPILER_RT_ABI du_int __udivmoddi4(du_int a, du_int b, du_int *rem) {
@@ -82,7 +82,7 @@ COMPILER_RT_ABI du_int __udivmoddi4(du_int a, du_int b, du_int *rem) {
r.s.high = n.s.high & (d.s.high - 1);
*rem = r.all;
}
- return n.s.high >> __builtin_ctz(d.s.high);
+ return n.s.high >> ctzsi(d.s.high);
}
// K K
// ---
@@ -112,7 +112,7 @@ COMPILER_RT_ABI du_int __udivmoddi4(du_int a, du_int b, du_int *rem) {
*rem = n.s.low & (d.s.low - 1);
if (d.s.low == 1)
return n.all;
- sr = __builtin_ctz(d.s.low);
+ sr = ctzsi(d.s.low);
q.s.high = n.s.high >> sr;
q.s.low = (n.s.high << (n_uword_bits - sr)) | (n.s.low >> sr);
return q.all;
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/x86_64/chkstk2.S b/contrib/llvm-project/compiler-rt/lib/builtins/x86_64/chkstk2.S
deleted file mode 100644
index 33d10d5b63be..000000000000
--- a/contrib/llvm-project/compiler-rt/lib/builtins/x86_64/chkstk2.S
+++ /dev/null
@@ -1,43 +0,0 @@
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-
-#include "../assembly.h"
-
-#ifdef __x86_64__
-
-// _chkstk (_alloca) routine - probe stack between %rsp and (%rsp-%rax) in 4k increments,
-// then decrement %rsp by %rax. Preserves all registers except %rsp and flags.
-// This routine is windows specific
-// http://msdn.microsoft.com/en-us/library/ms648426.aspx
-
-.text
-.balign 4
-DEFINE_COMPILERRT_FUNCTION(__alloca)
- mov %rcx,%rax // x64 _alloca is a normal function with parameter in rcx
- // fallthrough
-DEFINE_COMPILERRT_FUNCTION(___chkstk)
- push %rcx
- cmp $0x1000,%rax
- lea 16(%rsp),%rcx // rsp before calling this routine -> rcx
- jb 1f
-2:
- sub $0x1000,%rcx
- test %rcx,(%rcx)
- sub $0x1000,%rax
- cmp $0x1000,%rax
- ja 2b
-1:
- sub %rax,%rcx
- test %rcx,(%rcx)
-
- lea 8(%rsp),%rax // load pointer to the return address into rax
- mov %rcx,%rsp // install the new top of stack pointer into rsp
- mov -8(%rax),%rcx // restore rcx
- push (%rax) // push return address onto the stack
- sub %rsp,%rax // restore the original value in rax
- ret
-END_COMPILERRT_FUNCTION(___chkstk)
-END_COMPILERRT_FUNCTION(__alloca)
-
-#endif // __x86_64__
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/x86_64/floatdixf.c b/contrib/llvm-project/compiler-rt/lib/builtins/x86_64/floatdixf.c
index cf8450ce6f42..54636e283a0e 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/x86_64/floatdixf.c
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/x86_64/floatdixf.c
@@ -2,12 +2,12 @@
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-// long double __floatdixf(di_int a);
+// xf_float __floatdixf(di_int a);
#ifdef __x86_64__
#include "../int_lib.h"
-long double __floatdixf(int64_t a) { return (long double)a; }
+xf_float __floatdixf(int64_t a) { return (xf_float)a; }
#endif // __i386__
diff --git a/contrib/llvm-project/compiler-rt/lib/builtins/x86_64/floatundixf.S b/contrib/llvm-project/compiler-rt/lib/builtins/x86_64/floatundixf.S
index 9e3bcedcb7e4..cf7286f0d6c0 100644
--- a/contrib/llvm-project/compiler-rt/lib/builtins/x86_64/floatundixf.S
+++ b/contrib/llvm-project/compiler-rt/lib/builtins/x86_64/floatundixf.S
@@ -4,7 +4,7 @@
#include "../assembly.h"
-// long double __floatundixf(du_int a);
+// xf_float __floatundixf(du_int a);
#ifdef __x86_64__
diff --git a/contrib/llvm-project/compiler-rt/lib/cfi/cfi.cpp b/contrib/llvm-project/compiler-rt/lib/cfi/cfi.cpp
index f691cfb94cfc..ad1c91623514 100644
--- a/contrib/llvm-project/compiler-rt/lib/cfi/cfi.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/cfi/cfi.cpp
@@ -51,7 +51,11 @@ using namespace __sanitizer;
namespace __cfi {
+#if SANITIZER_LOONGARCH64
+#define kCfiShadowLimitsStorageSize 16384 // 16KiB on loongarch64 per page
+#else
#define kCfiShadowLimitsStorageSize 4096 // 1 page
+#endif
// Lets hope that the data segment is mapped with 4K pages.
// The pointer to the cfi shadow region is stored at the start of this page.
// The rest of the page is unused and re-mapped read-only.
@@ -230,7 +234,7 @@ uptr find_cfi_check_in_dso(dl_phdr_info *info) {
}
if (symtab > strtab) {
- VReport(1, "Can not handle: symtab > strtab (%p > %zx)\n", symtab, strtab);
+ VReport(1, "Can not handle: symtab > strtab (%zx > %zx)\n", symtab, strtab);
return 0;
}
@@ -250,7 +254,7 @@ uptr find_cfi_check_in_dso(dl_phdr_info *info) {
if (phdr_idx == info->dlpi_phnum) {
// Nope, either different segments or just bogus pointers.
// Can not handle this.
- VReport(1, "Can not handle: symtab %p, strtab %zx\n", symtab, strtab);
+ VReport(1, "Can not handle: symtab %zx, strtab %zx\n", symtab, strtab);
return 0;
}
@@ -320,16 +324,16 @@ void InitShadow() {
}
THREADLOCAL int in_loader;
-BlockingMutex shadow_update_lock(LINKER_INITIALIZED);
+Mutex shadow_update_lock;
-void EnterLoader() NO_THREAD_SAFETY_ANALYSIS {
+void EnterLoader() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
if (in_loader == 0) {
shadow_update_lock.Lock();
}
++in_loader;
}
-void ExitLoader() NO_THREAD_SAFETY_ANALYSIS {
+void ExitLoader() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
CHECK(in_loader > 0);
--in_loader;
UpdateShadow();
@@ -359,7 +363,7 @@ ALWAYS_INLINE void CfiSlowPathCommon(u64 CallSiteTypeId, void *Ptr,
return;
}
CFICheckFn cfi_check = sv.get_cfi_check();
- VReport(2, "__cfi_check at %p\n", cfi_check);
+ VReport(2, "__cfi_check at %p\n", (void *)cfi_check);
cfi_check(CallSiteTypeId, Ptr, DiagData);
}
@@ -436,11 +440,11 @@ INTERCEPTOR(int, dlclose, void *handle) {
return res;
}
-static BlockingMutex interceptor_init_lock(LINKER_INITIALIZED);
+static Mutex interceptor_init_lock;
static bool interceptors_inited = false;
static void EnsureInterceptorsInitialized() {
- BlockingMutexLock lock(&interceptor_init_lock);
+ Lock lock(&interceptor_init_lock);
if (interceptors_inited)
return;
diff --git a/contrib/llvm-project/compiler-rt/lib/dfsan/dfsan.cpp b/contrib/llvm-project/compiler-rt/lib/dfsan/dfsan.cpp
index 6f9ae141d7ab..5e85c8fda3e2 100644
--- a/contrib/llvm-project/compiler-rt/lib/dfsan/dfsan.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/dfsan/dfsan.cpp
@@ -128,6 +128,17 @@ void __dfsan_unimplemented(char *fname) {
fname);
}
+extern "C" SANITIZER_INTERFACE_ATTRIBUTE void __dfsan_wrapper_extern_weak_null(
+ const void *addr, char *fname) {
+ if (!addr)
+ Report(
+ "ERROR: DataFlowSanitizer: dfsan generated wrapper calling null "
+ "extern_weak function %s\nIf this only happens with dfsan, the "
+ "dfsan instrumentation pass may be accidentally optimizing out a "
+ "null check\n",
+ fname);
+}
+
// Use '-mllvm -dfsan-debug-nonzero-labels' and break on this function
// to try to figure out where labels are being introduced in a nominally
// label-free program.
@@ -186,8 +197,7 @@ static dfsan_origin GetOriginIfTainted(uptr addr, uptr size) {
#define PRINT_CALLER_STACK_TRACE \
{ \
- GET_CALLER_PC_BP_SP; \
- (void)sp; \
+ GET_CALLER_PC_BP; \
GET_STORE_STACK_TRACE_PC_BP(pc, bp) \
stack.Print(); \
}
@@ -332,9 +342,9 @@ static void MoveOrigin(const void *dst, const void *src, uptr size,
// origins by copying origins in a reverse order; otherwise, copy origins in
// a normal order. The orders of origin transfer are consistent with the
// orders of how memcpy and memmove transfer user data.
- uptr src_aligned_beg = reinterpret_cast<uptr>(src) & ~3UL;
- uptr src_aligned_end = (reinterpret_cast<uptr>(src) + size) & ~3UL;
- uptr dst_aligned_beg = reinterpret_cast<uptr>(dst) & ~3UL;
+ uptr src_aligned_beg = OriginAlignDown((uptr)src);
+ uptr src_aligned_end = OriginAlignDown((uptr)src + size);
+ uptr dst_aligned_beg = OriginAlignDown((uptr)dst);
if (dst_aligned_beg < src_aligned_end && dst_aligned_beg >= src_aligned_beg)
return ReverseCopyOrigin(dst, src, size, stack);
return CopyOrigin(dst, src, size, stack);
@@ -369,40 +379,8 @@ static void SetOrigin(const void *dst, uptr size, u32 origin) {
*(u32 *)(end - kOriginAlign) = origin;
}
-static void WriteShadowInRange(dfsan_label label, uptr beg_shadow_addr,
- uptr end_shadow_addr) {
- // TODO: After changing dfsan_label to 8bit, use internal_memset when label
- // is not 0.
- dfsan_label *labelp = (dfsan_label *)beg_shadow_addr;
- if (label) {
- for (; (uptr)labelp < end_shadow_addr; ++labelp) *labelp = label;
- return;
- }
-
- for (; (uptr)labelp < end_shadow_addr; ++labelp) {
- // Don't write the label if it is already the value we need it to be.
- // In a program where most addresses are not labeled, it is common that
- // a page of shadow memory is entirely zeroed. The Linux copy-on-write
- // implementation will share all of the zeroed pages, making a copy of a
- // page when any value is written. The un-sharing will happen even if
- // the value written does not change the value in memory. Avoiding the
- // write when both |label| and |*labelp| are zero dramatically reduces
- // the amount of real memory used by large programs.
- if (!*labelp)
- continue;
-
- *labelp = 0;
- }
-}
-
-static void WriteShadowWithSize(dfsan_label label, uptr shadow_addr,
- uptr size) {
- WriteShadowInRange(label, shadow_addr, shadow_addr + size * sizeof(label));
-}
-
#define RET_CHAIN_ORIGIN(id) \
- GET_CALLER_PC_BP_SP; \
- (void)sp; \
+ GET_CALLER_PC_BP; \
GET_STORE_STACK_TRACE_PC_BP(pc, bp); \
return ChainOrigin(id, &stack);
@@ -432,12 +410,66 @@ extern "C" SANITIZER_INTERFACE_ATTRIBUTE void __dfsan_mem_origin_transfer(
MoveOrigin(dst, src, len, &stack);
}
-SANITIZER_INTERFACE_ATTRIBUTE void dfsan_mem_origin_transfer(const void *dst,
- const void *src,
- uptr len) {
+extern "C" SANITIZER_INTERFACE_ATTRIBUTE void dfsan_mem_origin_transfer(
+ const void *dst, const void *src, uptr len) {
__dfsan_mem_origin_transfer(dst, src, len);
}
+static void CopyShadow(void *dst, const void *src, uptr len) {
+ internal_memcpy((void *)__dfsan::shadow_for(dst),
+ (const void *)__dfsan::shadow_for(src),
+ len * sizeof(dfsan_label));
+}
+
+extern "C" SANITIZER_INTERFACE_ATTRIBUTE void dfsan_mem_shadow_transfer(
+ void *dst, const void *src, uptr len) {
+ CopyShadow(dst, src, len);
+}
+
+// Copy shadow and origins of the len bytes from src to dst.
+extern "C" SANITIZER_INTERFACE_ATTRIBUTE void
+__dfsan_mem_shadow_origin_transfer(void *dst, const void *src, uptr size) {
+ if (src == dst)
+ return;
+ CopyShadow(dst, src, size);
+ if (dfsan_get_track_origins()) {
+ // Duplicating code instead of calling __dfsan_mem_origin_transfer
+ // so that the getting the caller stack frame works correctly.
+ GET_CALLER_PC_BP;
+ GET_STORE_STACK_TRACE_PC_BP(pc, bp);
+ MoveOrigin(dst, src, size, &stack);
+ }
+}
+
+// Copy shadow and origins as per __atomic_compare_exchange.
+extern "C" SANITIZER_INTERFACE_ATTRIBUTE void
+__dfsan_mem_shadow_origin_conditional_exchange(u8 condition, void *target,
+ void *expected,
+ const void *desired, uptr size) {
+ void *dst;
+ const void *src;
+ // condition is result of native call to __atomic_compare_exchange
+ if (condition) {
+ // Copy desired into target
+ dst = target;
+ src = desired;
+ } else {
+ // Copy target into expected
+ dst = expected;
+ src = target;
+ }
+ if (src == dst)
+ return;
+ CopyShadow(dst, src, size);
+ if (dfsan_get_track_origins()) {
+ // Duplicating code instead of calling __dfsan_mem_origin_transfer
+ // so that the getting the caller stack frame works correctly.
+ GET_CALLER_PC_BP;
+ GET_STORE_STACK_TRACE_PC_BP(pc, bp);
+ MoveOrigin(dst, src, size, &stack);
+ }
+}
+
namespace __dfsan {
bool dfsan_inited = false;
@@ -445,27 +477,11 @@ bool dfsan_init_is_running = false;
void dfsan_copy_memory(void *dst, const void *src, uptr size) {
internal_memcpy(dst, src, size);
- internal_memcpy((void *)shadow_for(dst), (const void *)shadow_for(src),
- size * sizeof(dfsan_label));
+ dfsan_mem_shadow_transfer(dst, src, size);
if (dfsan_get_track_origins())
dfsan_mem_origin_transfer(dst, src, size);
}
-} // namespace __dfsan
-
-// If the label s is tainted, set the size bytes from the address p to be a new
-// origin chain with the previous ID o and the current stack trace. This is
-// used by instrumentation to reduce code size when too much code is inserted.
-extern "C" SANITIZER_INTERFACE_ATTRIBUTE void __dfsan_maybe_store_origin(
- dfsan_label s, void *p, uptr size, dfsan_origin o) {
- if (UNLIKELY(s)) {
- GET_CALLER_PC_BP_SP;
- (void)sp;
- GET_STORE_STACK_TRACE_PC_BP(pc, bp);
- SetOrigin(p, size, ChainOrigin(o, &stack));
- }
-}
-
// Releases the pages within the origin address range.
static void ReleaseOrigins(void *addr, uptr size) {
const uptr beg_origin_addr = (uptr)__dfsan::origin_for(addr);
@@ -484,6 +500,19 @@ static void ReleaseOrigins(void *addr, uptr size) {
Die();
}
+static void WriteZeroShadowInRange(uptr beg, uptr end) {
+ // Don't write the label if it is already the value we need it to be.
+ // In a program where most addresses are not labeled, it is common that
+ // a page of shadow memory is entirely zeroed. The Linux copy-on-write
+ // implementation will share all of the zeroed pages, making a copy of a
+ // page when any value is written. The un-sharing will happen even if
+ // the value written does not change the value in memory. Avoiding the
+ // write when both |label| and |*labelp| are zero dramatically reduces
+ // the amount of real memory used by large programs.
+ if (!mem_is_zero((const char *)beg, end - beg))
+ internal_memset((void *)beg, 0, end - beg);
+}
+
// Releases the pages within the shadow address range, and sets
// the shadow addresses not on the pages to be 0.
static void ReleaseOrClearShadows(void *addr, uptr size) {
@@ -492,20 +521,22 @@ static void ReleaseOrClearShadows(void *addr, uptr size) {
const uptr end_shadow_addr = (uptr)__dfsan::shadow_for(end_addr);
if (end_shadow_addr - beg_shadow_addr <
- common_flags()->clear_shadow_mmap_threshold)
- return WriteShadowWithSize(0, beg_shadow_addr, size);
+ common_flags()->clear_shadow_mmap_threshold) {
+ WriteZeroShadowInRange(beg_shadow_addr, end_shadow_addr);
+ return;
+ }
const uptr page_size = GetPageSizeCached();
const uptr beg_aligned = RoundUpTo(beg_shadow_addr, page_size);
const uptr end_aligned = RoundDownTo(end_shadow_addr, page_size);
if (beg_aligned >= end_aligned) {
- WriteShadowWithSize(0, beg_shadow_addr, size);
+ WriteZeroShadowInRange(beg_shadow_addr, end_shadow_addr);
} else {
if (beg_aligned != beg_shadow_addr)
- WriteShadowInRange(0, beg_shadow_addr, beg_aligned);
+ WriteZeroShadowInRange(beg_shadow_addr, beg_aligned);
if (end_aligned != end_shadow_addr)
- WriteShadowInRange(0, end_aligned, end_shadow_addr);
+ WriteZeroShadowInRange(end_aligned, end_shadow_addr);
if (!MmapFixedSuperNoReserve(beg_aligned, end_aligned - beg_aligned))
Die();
}
@@ -514,7 +545,7 @@ static void ReleaseOrClearShadows(void *addr, uptr size) {
void SetShadow(dfsan_label label, void *addr, uptr size, dfsan_origin origin) {
if (0 != label) {
const uptr beg_shadow_addr = (uptr)__dfsan::shadow_for(addr);
- WriteShadowWithSize(label, beg_shadow_addr, size);
+ internal_memset((void *)beg_shadow_addr, label, size);
if (dfsan_get_track_origins())
SetOrigin(addr, size, origin);
return;
@@ -526,9 +557,23 @@ void SetShadow(dfsan_label label, void *addr, uptr size, dfsan_origin origin) {
ReleaseOrClearShadows(addr, size);
}
+} // namespace __dfsan
+
+// If the label s is tainted, set the size bytes from the address p to be a new
+// origin chain with the previous ID o and the current stack trace. This is
+// used by instrumentation to reduce code size when too much code is inserted.
+extern "C" SANITIZER_INTERFACE_ATTRIBUTE void __dfsan_maybe_store_origin(
+ dfsan_label s, void *p, uptr size, dfsan_origin o) {
+ if (UNLIKELY(s)) {
+ GET_CALLER_PC_BP;
+ GET_STORE_STACK_TRACE_PC_BP(pc, bp);
+ SetOrigin(p, size, ChainOrigin(o, &stack));
+ }
+}
+
extern "C" SANITIZER_INTERFACE_ATTRIBUTE void __dfsan_set_label(
dfsan_label label, dfsan_origin origin, void *addr, uptr size) {
- SetShadow(label, addr, size, origin);
+ __dfsan::SetShadow(label, addr, size, origin);
}
SANITIZER_INTERFACE_ATTRIBUTE
@@ -539,7 +584,7 @@ void dfsan_set_label(dfsan_label label, void *addr, uptr size) {
GET_STORE_STACK_TRACE_PC_BP(pc, bp);
init_origin = ChainOrigin(0, &stack, true);
}
- SetShadow(label, addr, size, init_origin);
+ __dfsan::SetShadow(label, addr, size, init_origin);
}
SANITIZER_INTERFACE_ATTRIBUTE
@@ -616,6 +661,121 @@ dfsan_has_label(dfsan_label label, dfsan_label elem) {
return (label & elem) == elem;
}
+namespace __dfsan {
+
+typedef void (*dfsan_conditional_callback_t)(dfsan_label label,
+ dfsan_origin origin);
+static dfsan_conditional_callback_t conditional_callback = nullptr;
+static dfsan_label labels_in_signal_conditional = 0;
+
+static void ConditionalCallback(dfsan_label label, dfsan_origin origin) {
+ // Programs have many branches. For efficiency the conditional sink callback
+ // handler needs to ignore as many as possible as early as possible.
+ if (label == 0) {
+ return;
+ }
+ if (conditional_callback == nullptr) {
+ return;
+ }
+
+ // This initial ConditionalCallback handler needs to be in here in dfsan
+ // runtime (rather than being an entirely user implemented hook) so that it
+ // has access to dfsan thread information.
+ DFsanThread *t = GetCurrentThread();
+ // A callback operation which does useful work (like record the flow) will
+ // likely be too long executed in a signal handler.
+ if (t && t->InSignalHandler()) {
+ // Record set of labels used in signal handler for completeness.
+ labels_in_signal_conditional |= label;
+ return;
+ }
+
+ conditional_callback(label, origin);
+}
+
+} // namespace __dfsan
+
+extern "C" SANITIZER_INTERFACE_ATTRIBUTE void
+__dfsan_conditional_callback_origin(dfsan_label label, dfsan_origin origin) {
+ __dfsan::ConditionalCallback(label, origin);
+}
+
+extern "C" SANITIZER_INTERFACE_ATTRIBUTE void __dfsan_conditional_callback(
+ dfsan_label label) {
+ __dfsan::ConditionalCallback(label, 0);
+}
+
+extern "C" SANITIZER_INTERFACE_ATTRIBUTE void dfsan_set_conditional_callback(
+ __dfsan::dfsan_conditional_callback_t callback) {
+ __dfsan::conditional_callback = callback;
+}
+
+extern "C" SANITIZER_INTERFACE_ATTRIBUTE dfsan_label
+dfsan_get_labels_in_signal_conditional() {
+ return __dfsan::labels_in_signal_conditional;
+}
+
+namespace __dfsan {
+
+typedef void (*dfsan_reaches_function_callback_t)(dfsan_label label,
+ dfsan_origin origin,
+ const char *file,
+ unsigned int line,
+ const char *function);
+static dfsan_reaches_function_callback_t reaches_function_callback = nullptr;
+static dfsan_label labels_in_signal_reaches_function = 0;
+
+static void ReachesFunctionCallback(dfsan_label label, dfsan_origin origin,
+ const char *file, unsigned int line,
+ const char *function) {
+ if (label == 0) {
+ return;
+ }
+ if (reaches_function_callback == nullptr) {
+ return;
+ }
+
+ // This initial ReachesFunctionCallback handler needs to be in here in dfsan
+ // runtime (rather than being an entirely user implemented hook) so that it
+ // has access to dfsan thread information.
+ DFsanThread *t = GetCurrentThread();
+ // A callback operation which does useful work (like record the flow) will
+ // likely be too long executed in a signal handler.
+ if (t && t->InSignalHandler()) {
+ // Record set of labels used in signal handler for completeness.
+ labels_in_signal_reaches_function |= label;
+ return;
+ }
+
+ reaches_function_callback(label, origin, file, line, function);
+}
+
+} // namespace __dfsan
+
+extern "C" SANITIZER_INTERFACE_ATTRIBUTE void
+__dfsan_reaches_function_callback_origin(dfsan_label label, dfsan_origin origin,
+ const char *file, unsigned int line,
+ const char *function) {
+ __dfsan::ReachesFunctionCallback(label, origin, file, line, function);
+}
+
+extern "C" SANITIZER_INTERFACE_ATTRIBUTE void
+__dfsan_reaches_function_callback(dfsan_label label, const char *file,
+ unsigned int line, const char *function) {
+ __dfsan::ReachesFunctionCallback(label, 0, file, line, function);
+}
+
+extern "C" SANITIZER_INTERFACE_ATTRIBUTE void
+dfsan_set_reaches_function_callback(
+ __dfsan::dfsan_reaches_function_callback_t callback) {
+ __dfsan::reaches_function_callback = callback;
+}
+
+extern "C" SANITIZER_INTERFACE_ATTRIBUTE dfsan_label
+dfsan_get_labels_in_signal_reaches_function() {
+ return __dfsan::labels_in_signal_reaches_function;
+}
+
class Decorator : public __sanitizer::SanitizerCommonDecorator {
public:
Decorator() : SanitizerCommonDecorator() {}
@@ -646,22 +806,16 @@ void PrintInvalidOriginWarning(dfsan_label label, const void *address) {
d.Warning(), label, address, d.Default());
}
-bool PrintOriginTraceToStr(const void *addr, const char *description,
- InternalScopedString *out) {
- CHECK(out);
- CHECK(dfsan_get_track_origins());
+void PrintInvalidOriginIdWarning(dfsan_origin origin) {
Decorator d;
+ Printf(
+ " %sOrigin Id %d has invalid origin tracking. This can "
+ "be a DFSan bug.%s\n",
+ d.Warning(), origin, d.Default());
+}
- const dfsan_label label = *__dfsan::shadow_for(addr);
- CHECK(label);
-
- const dfsan_origin origin = *__dfsan::origin_for(addr);
-
- out->append(" %sTaint value 0x%x (at %p) origin tracking (%s)%s\n",
- d.Origin(), label, addr, description ? description : "",
- d.Default());
-
- Origin o = Origin::FromRawId(origin);
+bool PrintOriginTraceFramesToStr(Origin o, InternalScopedString *out) {
+ Decorator d;
bool found = false;
while (o.isChainedOrigin()) {
@@ -669,12 +823,12 @@ bool PrintOriginTraceToStr(const void *addr, const char *description,
dfsan_origin origin_id = o.raw_id();
o = o.getNextChainedOrigin(&stack);
if (o.isChainedOrigin())
- out->append(
+ out->AppendF(
" %sOrigin value: 0x%x, Taint value was stored to memory at%s\n",
d.Origin(), origin_id, d.Default());
else
- out->append(" %sOrigin value: 0x%x, Taint value was created at%s\n",
- d.Origin(), origin_id, d.Default());
+ out->AppendF(" %sOrigin value: 0x%x, Taint value was created at%s\n",
+ d.Origin(), origin_id, d.Default());
// Includes a trailing newline, so no need to add it again.
stack.PrintTo(out);
@@ -684,6 +838,25 @@ bool PrintOriginTraceToStr(const void *addr, const char *description,
return found;
}
+bool PrintOriginTraceToStr(const void *addr, const char *description,
+ InternalScopedString *out) {
+ CHECK(out);
+ CHECK(dfsan_get_track_origins());
+ Decorator d;
+
+ const dfsan_label label = *__dfsan::shadow_for(addr);
+ CHECK(label);
+
+ const dfsan_origin origin = *__dfsan::origin_for(addr);
+
+ out->AppendF(" %sTaint value 0x%x (at %p) origin tracking (%s)%s\n",
+ d.Origin(), label, addr, description ? description : "",
+ d.Default());
+
+ Origin o = Origin::FromRawId(origin);
+ return PrintOriginTraceFramesToStr(o, out);
+}
+
} // namespace
extern "C" SANITIZER_INTERFACE_ATTRIBUTE void dfsan_print_origin_trace(
@@ -709,9 +882,9 @@ extern "C" SANITIZER_INTERFACE_ATTRIBUTE void dfsan_print_origin_trace(
PrintInvalidOriginWarning(label, addr);
}
-extern "C" SANITIZER_INTERFACE_ATTRIBUTE size_t
+extern "C" SANITIZER_INTERFACE_ATTRIBUTE uptr
dfsan_sprint_origin_trace(const void *addr, const char *description,
- char *out_buf, size_t out_buf_size) {
+ char *out_buf, uptr out_buf_size) {
CHECK(out_buf);
if (!dfsan_get_track_origins()) {
@@ -741,6 +914,50 @@ dfsan_sprint_origin_trace(const void *addr, const char *description,
return trace.length();
}
+extern "C" SANITIZER_INTERFACE_ATTRIBUTE void dfsan_print_origin_id_trace(
+ dfsan_origin origin) {
+ if (!dfsan_get_track_origins()) {
+ PrintNoOriginTrackingWarning();
+ return;
+ }
+ Origin o = Origin::FromRawId(origin);
+
+ InternalScopedString trace;
+ bool success = PrintOriginTraceFramesToStr(o, &trace);
+
+ if (trace.length())
+ Printf("%s", trace.data());
+
+ if (!success)
+ PrintInvalidOriginIdWarning(origin);
+}
+
+extern "C" SANITIZER_INTERFACE_ATTRIBUTE uptr dfsan_sprint_origin_id_trace(
+ dfsan_origin origin, char *out_buf, uptr out_buf_size) {
+ CHECK(out_buf);
+
+ if (!dfsan_get_track_origins()) {
+ PrintNoOriginTrackingWarning();
+ return 0;
+ }
+ Origin o = Origin::FromRawId(origin);
+
+ InternalScopedString trace;
+ bool success = PrintOriginTraceFramesToStr(o, &trace);
+
+ if (!success) {
+ PrintInvalidOriginIdWarning(origin);
+ return 0;
+ }
+
+ if (out_buf_size) {
+ internal_strncpy(out_buf, trace.data(), out_buf_size - 1);
+ out_buf[out_buf_size - 1] = '\0';
+ }
+
+ return trace.length();
+}
+
extern "C" SANITIZER_INTERFACE_ATTRIBUTE dfsan_origin
dfsan_get_init_origin(const void *addr) {
if (!dfsan_get_track_origins())
@@ -780,8 +997,8 @@ extern "C" SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_print_stack_trace() {
stack.Print();
}
-extern "C" SANITIZER_INTERFACE_ATTRIBUTE size_t
-dfsan_sprint_stack_trace(char *out_buf, size_t out_buf_size) {
+extern "C" SANITIZER_INTERFACE_ATTRIBUTE uptr
+dfsan_sprint_stack_trace(char *out_buf, uptr out_buf_size) {
CHECK(out_buf);
GET_CALLER_PC_BP;
GET_STORE_STACK_TRACE_PC_BP(pc, bp);
@@ -837,6 +1054,20 @@ void dfsan_clear_thread_local_state() {
}
}
+SANITIZER_INTERFACE_ATTRIBUTE
+void dfsan_set_arg_tls(uptr offset, dfsan_label label) {
+ // 2x to match ShadowTLSAlignment.
+ // ShadowTLSAlignment should probably be changed.
+ // TODO: Consider reducing ShadowTLSAlignment to 1.
+ // Aligning to 2 bytes is probably a remnant of fast16 mode.
+ ((dfsan_label *)__dfsan_arg_tls)[offset * 2] = label;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void dfsan_set_arg_origin_tls(uptr offset, dfsan_origin o) {
+ __dfsan_arg_origin_tls[offset] = o;
+}
+
extern "C" void dfsan_flush() {
const uptr maxVirtualAddress = GetMaxUserVirtualAddress();
for (unsigned i = 0; i < kMemoryLayoutSize; ++i) {
@@ -857,6 +1088,8 @@ extern "C" void dfsan_flush() {
Die();
}
}
+ __dfsan::labels_in_signal_conditional = 0;
+ __dfsan::labels_in_signal_reaches_function = 0;
}
// TODO: CheckMemoryLayoutSanity is based on msan.
@@ -932,7 +1165,7 @@ static bool ProtectMemoryRange(uptr beg, uptr size, const char *name) {
// Consider refactoring these into a shared implementation.
bool InitShadow(bool init_origins) {
// Let user know mapping parameters first.
- VPrintf(1, "dfsan_init %p\n", &__dfsan::dfsan_init);
+ VPrintf(1, "dfsan_init %p\n", (void *)&__dfsan::dfsan_init);
for (unsigned i = 0; i < kMemoryLayoutSize; ++i)
VPrintf(1, "%s: %zx - %zx\n", kMemoryLayout[i].name, kMemoryLayout[i].start,
kMemoryLayout[i].end - 1);
@@ -1005,9 +1238,9 @@ static void DFsanInit(int argc, char **argv, char **envp) {
dfsan_allocator_init();
- DFsanThread *main_thread = DFsanThread::Create(nullptr, nullptr, nullptr);
+ DFsanThread *main_thread = DFsanThread::Create(nullptr, nullptr);
SetCurrentThread(main_thread);
- main_thread->ThreadStart();
+ main_thread->Init();
dfsan_init_is_running = false;
dfsan_inited = true;
diff --git a/contrib/llvm-project/compiler-rt/lib/dfsan/dfsan.h b/contrib/llvm-project/compiler-rt/lib/dfsan/dfsan.h
index b212298157eb..29938a08de54 100644
--- a/contrib/llvm-project/compiler-rt/lib/dfsan/dfsan.h
+++ b/contrib/llvm-project/compiler-rt/lib/dfsan/dfsan.h
@@ -36,6 +36,12 @@ void dfsan_clear_arg_tls(uptr offset, uptr size);
// Zero out the TLS storage.
void dfsan_clear_thread_local_state();
+// Set DFSan label and origin TLS of argument for a call.
+// Note that offset may not correspond with argument number.
+// Some arguments (aggregate/array) will use several offsets.
+void dfsan_set_arg_tls(uptr offset, dfsan_label label);
+void dfsan_set_arg_origin_tls(uptr offset, dfsan_origin o);
+
// Return the origin associated with the first taint byte in the size bytes
// from the address addr.
dfsan_origin dfsan_read_origin_of_first_taint(const void *addr, uptr size);
@@ -46,10 +52,14 @@ void dfsan_set_label_origin(dfsan_label label, dfsan_origin origin, void *addr,
// Copy or move the origins of the len bytes from src to dst.
void dfsan_mem_origin_transfer(const void *dst, const void *src, uptr len);
+
+// Copy shadow bytes from src to dst.
+// Note this preserves distinct taint labels at specific offsets.
+void dfsan_mem_shadow_transfer(void *dst, const void *src, uptr len);
} // extern "C"
template <typename T>
-void dfsan_set_label(dfsan_label label, T &data) { // NOLINT
+void dfsan_set_label(dfsan_label label, T &data) {
dfsan_set_label(label, (void *)&data, sizeof(T));
}
diff --git a/contrib/llvm-project/compiler-rt/lib/dfsan/dfsan_allocator.cpp b/contrib/llvm-project/compiler-rt/lib/dfsan/dfsan_allocator.cpp
index b2e94564446e..df8be2cf5ae0 100644
--- a/contrib/llvm-project/compiler-rt/lib/dfsan/dfsan_allocator.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/dfsan/dfsan_allocator.cpp
@@ -30,11 +30,19 @@ struct Metadata {
struct DFsanMapUnmapCallback {
void OnMap(uptr p, uptr size) const { dfsan_set_label(0, (void *)p, size); }
+ void OnMapSecondary(uptr p, uptr size, uptr user_begin,
+ uptr user_size) const {
+ OnMap(p, size);
+ }
void OnUnmap(uptr p, uptr size) const { dfsan_set_label(0, (void *)p, size); }
};
-static const uptr kAllocatorSpace = 0x700000000000ULL;
-static const uptr kMaxAllowedMallocSize = 8UL << 30;
+#if defined(__aarch64__)
+const uptr kAllocatorSpace = 0xE00000000000ULL;
+#else
+const uptr kAllocatorSpace = 0x700000000000ULL;
+#endif
+const uptr kMaxAllowedMallocSize = 8UL << 30;
struct AP64 { // Allocator64 parameters. Deliberately using a short name.
static const uptr kSpaceBeg = kAllocatorSpace;
@@ -87,6 +95,12 @@ static void *DFsanAllocate(uptr size, uptr alignment, bool zeroise) {
BufferedStackTrace stack;
ReportAllocationSizeTooBig(size, max_malloc_size, &stack);
}
+ if (UNLIKELY(IsRssLimitExceeded())) {
+ if (AllocatorMayReturnNull())
+ return nullptr;
+ BufferedStackTrace stack;
+ ReportRssLimitExceeded(&stack);
+ }
DFsanThread *t = GetCurrentThread();
void *allocated;
if (t) {
@@ -164,6 +178,20 @@ void *DFsanCalloc(uptr nmemb, uptr size) {
return DFsanAllocate(nmemb * size, sizeof(u64), true /*zeroise*/);
}
+static const void *AllocationBegin(const void *p) {
+ if (!p)
+ return nullptr;
+ void *beg = allocator.GetBlockBegin(p);
+ if (!beg)
+ return nullptr;
+ Metadata *b = (Metadata *)allocator.GetMetaData(beg);
+ if (!b)
+ return nullptr;
+ if (b->requested_size == 0)
+ return nullptr;
+ return (const void *)beg;
+}
+
static uptr AllocationSize(const void *p) {
if (!p)
return 0;
@@ -174,6 +202,10 @@ static uptr AllocationSize(const void *p) {
return b->requested_size;
}
+static uptr AllocationSizeFast(const void *p) {
+ return reinterpret_cast<Metadata *>(allocator.GetMetaData(p))->requested_size;
+}
+
void *dfsan_malloc(uptr size) {
return SetErrnoOnNull(DFsanAllocate(size, sizeof(u64), false /*zeroise*/));
}
@@ -284,4 +316,15 @@ uptr __sanitizer_get_estimated_allocated_size(uptr size) { return size; }
int __sanitizer_get_ownership(const void *p) { return AllocationSize(p) != 0; }
+const void *__sanitizer_get_allocated_begin(const void *p) {
+ return AllocationBegin(p);
+}
+
uptr __sanitizer_get_allocated_size(const void *p) { return AllocationSize(p); }
+
+uptr __sanitizer_get_allocated_size_fast(const void *p) {
+ DCHECK_EQ(p, __sanitizer_get_allocated_begin(p));
+ uptr ret = AllocationSizeFast(p);
+ DCHECK_EQ(ret, __sanitizer_get_allocated_size(p));
+ return ret;
+}
diff --git a/contrib/llvm-project/compiler-rt/lib/dfsan/dfsan_chained_origin_depot.cpp b/contrib/llvm-project/compiler-rt/lib/dfsan/dfsan_chained_origin_depot.cpp
index 9ec598bf2ce9..f95194d19f03 100644
--- a/contrib/llvm-project/compiler-rt/lib/dfsan/dfsan_chained_origin_depot.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/dfsan/dfsan_chained_origin_depot.cpp
@@ -19,4 +19,10 @@ static ChainedOriginDepot chainedOriginDepot;
ChainedOriginDepot* GetChainedOriginDepot() { return &chainedOriginDepot; }
+void ChainedOriginDepotLockBeforeFork() { chainedOriginDepot.LockBeforeFork(); }
+
+void ChainedOriginDepotUnlockAfterFork(bool fork_child) {
+ chainedOriginDepot.UnlockAfterFork(fork_child);
+}
+
} // namespace __dfsan
diff --git a/contrib/llvm-project/compiler-rt/lib/dfsan/dfsan_chained_origin_depot.h b/contrib/llvm-project/compiler-rt/lib/dfsan/dfsan_chained_origin_depot.h
index d715ef707f41..83b9e29e1b71 100644
--- a/contrib/llvm-project/compiler-rt/lib/dfsan/dfsan_chained_origin_depot.h
+++ b/contrib/llvm-project/compiler-rt/lib/dfsan/dfsan_chained_origin_depot.h
@@ -21,6 +21,9 @@ namespace __dfsan {
ChainedOriginDepot* GetChainedOriginDepot();
+void ChainedOriginDepotLockBeforeFork();
+void ChainedOriginDepotUnlockAfterFork(bool fork_child);
+
} // namespace __dfsan
#endif // DFSAN_CHAINED_ORIGIN_DEPOT_H
diff --git a/contrib/llvm-project/compiler-rt/lib/dfsan/dfsan_custom.cpp b/contrib/llvm-project/compiler-rt/lib/dfsan/dfsan_custom.cpp
index 3185184f29c8..3af26e9f64c9 100644
--- a/contrib/llvm-project/compiler-rt/lib/dfsan/dfsan_custom.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/dfsan/dfsan_custom.cpp
@@ -55,6 +55,10 @@ using namespace __dfsan;
#define DECLARE_WEAK_INTERCEPTOR_HOOK(f, ...) \
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void f(__VA_ARGS__);
+#define WRAPPER_ALIAS(fun, real) \
+ SANITIZER_INTERFACE_ATTRIBUTE void __dfsw_##fun() ALIAS(__dfsw_##real); \
+ SANITIZER_INTERFACE_ATTRIBUTE void __dfso_##fun() ALIAS(__dfso_##real);
+
// Async-safe, non-reentrant spin lock.
class SignalSpinLocker {
public:
@@ -204,6 +208,57 @@ SANITIZER_INTERFACE_ATTRIBUTE char *__dfso_strpbrk(
return const_cast<char *>(ret);
}
+SANITIZER_INTERFACE_ATTRIBUTE char *__dfsw_strsep(char **s, const char *delim,
+ dfsan_label s_label,
+ dfsan_label delim_label,
+ dfsan_label *ret_label) {
+ dfsan_label base_label = dfsan_read_label(s, sizeof(*s));
+ char *base = *s;
+ char *res = strsep(s, delim);
+ if (res != *s) {
+ char *token_start = res;
+ int token_length = strlen(res);
+ // the delimiter byte has been set to NULL
+ dfsan_set_label(0, token_start + token_length, 1);
+ }
+
+ if (flags().strict_data_dependencies) {
+ *ret_label = res ? base_label : 0;
+ } else {
+ size_t s_bytes_read = (res ? strlen(res) : strlen(base)) + 1;
+ *ret_label = dfsan_union(
+ dfsan_union(base_label, dfsan_read_label(base, sizeof(s_bytes_read))),
+ dfsan_union(dfsan_read_label(delim, strlen(delim) + 1),
+ dfsan_union(s_label, delim_label)));
+ }
+
+ return res;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE char *__dfso_strsep(
+ char **s, const char *delim, dfsan_label s_label, dfsan_label delim_label,
+ dfsan_label *ret_label, dfsan_origin s_origin, dfsan_origin delim_origin,
+ dfsan_origin *ret_origin) {
+ dfsan_origin base_origin = dfsan_read_origin_of_first_taint(s, sizeof(*s));
+ char *res = __dfsw_strsep(s, delim, s_label, delim_label, ret_label);
+ if (flags().strict_data_dependencies) {
+ if (res)
+ *ret_origin = base_origin;
+ } else {
+ if (*ret_label) {
+ if (base_origin) {
+ *ret_origin = base_origin;
+ } else {
+ dfsan_origin o =
+ dfsan_read_origin_of_first_taint(delim, strlen(delim) + 1);
+ *ret_origin = o ? o : (s_label ? s_origin : delim_origin);
+ }
+ }
+ }
+
+ return res;
+}
+
static int dfsan_memcmp_bcmp(const void *s1, const void *s2, size_t n,
size_t *bytes_read) {
const char *cs1 = (const char *) s1, *cs2 = (const char *) s2;
@@ -484,6 +539,36 @@ SANITIZER_INTERFACE_ATTRIBUTE size_t __dfso_strlen(const char *s,
return ret;
}
+SANITIZER_INTERFACE_ATTRIBUTE size_t __dfsw_strnlen(const char *s,
+ size_t maxlen,
+ dfsan_label s_label,
+ dfsan_label maxlen_label,
+ dfsan_label *ret_label) {
+ size_t ret = strnlen(s, maxlen);
+ if (flags().strict_data_dependencies) {
+ *ret_label = 0;
+ } else {
+ size_t full_len = strlen(s);
+ size_t covered_len = maxlen > (full_len + 1) ? (full_len + 1) : maxlen;
+ *ret_label = dfsan_union(maxlen_label, dfsan_read_label(s, covered_len));
+ }
+ return ret;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE size_t __dfso_strnlen(
+ const char *s, size_t maxlen, dfsan_label s_label, dfsan_label maxlen_label,
+ dfsan_label *ret_label, dfsan_origin s_origin, dfsan_origin maxlen_origin,
+ dfsan_origin *ret_origin) {
+ size_t ret = __dfsw_strnlen(s, maxlen, s_label, maxlen_label, ret_label);
+ if (!flags().strict_data_dependencies) {
+ size_t full_len = strlen(s);
+ size_t covered_len = maxlen > (full_len + 1) ? (full_len + 1) : maxlen;
+ dfsan_origin o = dfsan_read_origin_of_first_taint(s, covered_len);
+ *ret_origin = o ? o : maxlen_origin;
+ }
+ return ret;
+}
+
static void *dfsan_memmove(void *dest, const void *src, size_t n) {
dfsan_label *sdest = shadow_for(dest);
const dfsan_label *ssrc = shadow_for(src);
@@ -497,9 +582,7 @@ static void *dfsan_memmove_with_origin(void *dest, const void *src, size_t n) {
}
static void *dfsan_memcpy(void *dest, const void *src, size_t n) {
- dfsan_label *sdest = shadow_for(dest);
- const dfsan_label *ssrc = shadow_for(src);
- internal_memcpy((void *)sdest, (const void *)ssrc, n * sizeof(dfsan_label));
+ dfsan_mem_shadow_transfer(dest, src, n);
return internal_memcpy(dest, src, n);
}
@@ -583,11 +666,8 @@ SANITIZER_INTERFACE_ATTRIBUTE char *__dfsw_strcat(char *dest, const char *src,
dfsan_label src_label,
dfsan_label *ret_label) {
size_t dest_len = strlen(dest);
- char *ret = strcat(dest, src); // NOLINT
- dfsan_label *sdest = shadow_for(dest + dest_len);
- const dfsan_label *ssrc = shadow_for(src);
- internal_memcpy((void *)sdest, (const void *)ssrc,
- strlen(src) * sizeof(dfsan_label));
+ char *ret = strcat(dest, src);
+ dfsan_mem_shadow_transfer(dest + dest_len, src, strlen(src));
*ret_label = dest_label;
return ret;
}
@@ -597,13 +677,41 @@ SANITIZER_INTERFACE_ATTRIBUTE char *__dfso_strcat(
dfsan_label *ret_label, dfsan_origin dest_origin, dfsan_origin src_origin,
dfsan_origin *ret_origin) {
size_t dest_len = strlen(dest);
- char *ret = strcat(dest, src); // NOLINT
- dfsan_label *sdest = shadow_for(dest + dest_len);
- const dfsan_label *ssrc = shadow_for(src);
+ char *ret = strcat(dest, src);
size_t src_len = strlen(src);
dfsan_mem_origin_transfer(dest + dest_len, src, src_len);
- internal_memcpy((void *)sdest, (const void *)ssrc,
- src_len * sizeof(dfsan_label));
+ dfsan_mem_shadow_transfer(dest + dest_len, src, src_len);
+ *ret_label = dest_label;
+ *ret_origin = dest_origin;
+ return ret;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE char *__dfsw_strncat(
+ char *dest, const char *src, size_t num, dfsan_label dest_label,
+ dfsan_label src_label, dfsan_label num_label, dfsan_label *ret_label) {
+ size_t src_len = strlen(src);
+ src_len = src_len < num ? src_len : num;
+ size_t dest_len = strlen(dest);
+
+ char *ret = strncat(dest, src, num);
+ dfsan_mem_shadow_transfer(dest + dest_len, src, src_len);
+ *ret_label = dest_label;
+ return ret;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE char *__dfso_strncat(
+ char *dest, const char *src, size_t num, dfsan_label dest_label,
+ dfsan_label src_label, dfsan_label num_label, dfsan_label *ret_label,
+ dfsan_origin dest_origin, dfsan_origin src_origin, dfsan_origin num_origin,
+ dfsan_origin *ret_origin) {
+ size_t src_len = strlen(src);
+ src_len = src_len < num ? src_len : num;
+ size_t dest_len = strlen(dest);
+
+ char *ret = strncat(dest, src, num);
+
+ dfsan_mem_origin_transfer(dest + dest_len, src, src_len);
+ dfsan_mem_shadow_transfer(dest + dest_len, src, src_len);
*ret_label = dest_label;
*ret_origin = dest_origin;
return ret;
@@ -738,7 +846,7 @@ __dfsw_dlopen(const char *filename, int flag, dfsan_label filename_label,
dfsan_label flag_label, dfsan_label *ret_label) {
void *handle = dlopen(filename, flag);
link_map *map = GET_LINK_MAP_BY_DLOPEN_HANDLE(handle);
- if (map)
+ if (filename && map)
ForEachMappedRegion(map, dfsan_set_zero_label);
*ret_label = 0;
return handle;
@@ -755,11 +863,12 @@ SANITIZER_INTERFACE_ATTRIBUTE void *__dfso_dlopen(
static void *DFsanThreadStartFunc(void *arg) {
DFsanThread *t = (DFsanThread *)arg;
SetCurrentThread(t);
+ t->Init();
+ SetSigProcMask(&t->starting_sigset_, nullptr);
return t->ThreadStart();
}
static int dfsan_pthread_create(pthread_t *thread, const pthread_attr_t *attr,
- void *start_routine_trampoline,
void *start_routine, void *arg,
dfsan_label *ret_label,
bool track_origins = false) {
@@ -773,8 +882,8 @@ static int dfsan_pthread_create(pthread_t *thread, const pthread_attr_t *attr,
AdjustStackSize((void *)(const_cast<pthread_attr_t *>(attr)));
DFsanThread *t =
- DFsanThread::Create(start_routine_trampoline,
- (thread_callback_t)start_routine, arg, track_origins);
+ DFsanThread::Create((thread_callback_t)start_routine, arg, track_origins);
+ ScopedBlockSignals block(&t->starting_sigset_);
int res = pthread_create(thread, attr, DFsanThreadStartFunc, t);
if (attr == &myattr)
@@ -784,28 +893,22 @@ static int dfsan_pthread_create(pthread_t *thread, const pthread_attr_t *attr,
}
SANITIZER_INTERFACE_ATTRIBUTE int __dfsw_pthread_create(
- pthread_t *thread, const pthread_attr_t *attr,
- void *(*start_routine_trampoline)(void *, void *, dfsan_label,
- dfsan_label *),
- void *start_routine, void *arg, dfsan_label thread_label,
- dfsan_label attr_label, dfsan_label start_routine_label,
- dfsan_label arg_label, dfsan_label *ret_label) {
- return dfsan_pthread_create(thread, attr, (void *)start_routine_trampoline,
- start_routine, arg, ret_label);
+ pthread_t *thread, const pthread_attr_t *attr, void *start_routine,
+ void *arg, dfsan_label thread_label, dfsan_label attr_label,
+ dfsan_label start_routine_label, dfsan_label arg_label,
+ dfsan_label *ret_label) {
+ return dfsan_pthread_create(thread, attr, start_routine, arg, ret_label);
}
SANITIZER_INTERFACE_ATTRIBUTE int __dfso_pthread_create(
- pthread_t *thread, const pthread_attr_t *attr,
- void *(*start_routine_trampoline)(void *, void *, dfsan_label,
- dfsan_label *, dfsan_origin,
- dfsan_origin *),
- void *start_routine, void *arg, dfsan_label thread_label,
- dfsan_label attr_label, dfsan_label start_routine_label,
- dfsan_label arg_label, dfsan_label *ret_label, dfsan_origin thread_origin,
+ pthread_t *thread, const pthread_attr_t *attr, void *start_routine,
+ void *arg, dfsan_label thread_label, dfsan_label attr_label,
+ dfsan_label start_routine_label, dfsan_label arg_label,
+ dfsan_label *ret_label, dfsan_origin thread_origin,
dfsan_origin attr_origin, dfsan_origin start_routine_origin,
dfsan_origin arg_origin, dfsan_origin *ret_origin) {
- return dfsan_pthread_create(thread, attr, (void *)start_routine_trampoline,
- start_routine, arg, ret_label, true);
+ return dfsan_pthread_create(thread, attr, start_routine, arg, ret_label,
+ true);
}
SANITIZER_INTERFACE_ATTRIBUTE int __dfsw_pthread_join(pthread_t thread,
@@ -830,22 +933,7 @@ SANITIZER_INTERFACE_ATTRIBUTE int __dfso_pthread_join(
}
struct dl_iterate_phdr_info {
- int (*callback_trampoline)(void *callback, struct dl_phdr_info *info,
- size_t size, void *data, dfsan_label info_label,
- dfsan_label size_label, dfsan_label data_label,
- dfsan_label *ret_label);
- void *callback;
- void *data;
-};
-
-struct dl_iterate_phdr_origin_info {
- int (*callback_trampoline)(void *callback, struct dl_phdr_info *info,
- size_t size, void *data, dfsan_label info_label,
- dfsan_label size_label, dfsan_label data_label,
- dfsan_label *ret_label, dfsan_origin info_origin,
- dfsan_origin size_origin, dfsan_origin data_origin,
- dfsan_origin *ret_origin);
- void *callback;
+ int (*callback)(struct dl_phdr_info *info, size_t size, void *data);
void *data;
};
@@ -857,53 +945,28 @@ int dl_iterate_phdr_cb(struct dl_phdr_info *info, size_t size, void *data) {
dfsan_set_label(
0, const_cast<char *>(reinterpret_cast<const char *>(info->dlpi_phdr)),
sizeof(*info->dlpi_phdr) * info->dlpi_phnum);
- dfsan_label ret_label;
- return dipi->callback_trampoline(dipi->callback, info, size, dipi->data, 0, 0,
- 0, &ret_label);
-}
-int dl_iterate_phdr_origin_cb(struct dl_phdr_info *info, size_t size,
- void *data) {
- dl_iterate_phdr_origin_info *dipi = (dl_iterate_phdr_origin_info *)data;
- dfsan_set_label(0, *info);
- dfsan_set_label(0, const_cast<char *>(info->dlpi_name),
- strlen(info->dlpi_name) + 1);
- dfsan_set_label(
- 0, const_cast<char *>(reinterpret_cast<const char *>(info->dlpi_phdr)),
- sizeof(*info->dlpi_phdr) * info->dlpi_phnum);
- dfsan_label ret_label;
- dfsan_origin ret_origin;
- return dipi->callback_trampoline(dipi->callback, info, size, dipi->data, 0, 0,
- 0, &ret_label, 0, 0, 0, &ret_origin);
+ dfsan_clear_thread_local_state();
+ return dipi->callback(info, size, dipi->data);
}
SANITIZER_INTERFACE_ATTRIBUTE int __dfsw_dl_iterate_phdr(
- int (*callback_trampoline)(void *callback, struct dl_phdr_info *info,
- size_t size, void *data, dfsan_label info_label,
- dfsan_label size_label, dfsan_label data_label,
- dfsan_label *ret_label),
- void *callback, void *data, dfsan_label callback_label,
- dfsan_label data_label, dfsan_label *ret_label) {
- dl_iterate_phdr_info dipi = { callback_trampoline, callback, data };
+ int (*callback)(struct dl_phdr_info *info, size_t size, void *data),
+ void *data, dfsan_label callback_label, dfsan_label data_label,
+ dfsan_label *ret_label) {
+ dl_iterate_phdr_info dipi = {callback, data};
*ret_label = 0;
return dl_iterate_phdr(dl_iterate_phdr_cb, &dipi);
}
SANITIZER_INTERFACE_ATTRIBUTE int __dfso_dl_iterate_phdr(
- int (*callback_trampoline)(void *callback, struct dl_phdr_info *info,
- size_t size, void *data, dfsan_label info_label,
- dfsan_label size_label, dfsan_label data_label,
- dfsan_label *ret_label, dfsan_origin info_origin,
- dfsan_origin size_origin,
- dfsan_origin data_origin,
- dfsan_origin *ret_origin),
- void *callback, void *data, dfsan_label callback_label,
- dfsan_label data_label, dfsan_label *ret_label,
- dfsan_origin callback_origin, dfsan_origin data_origin,
- dfsan_origin *ret_origin) {
- dl_iterate_phdr_origin_info dipi = {callback_trampoline, callback, data};
+ int (*callback)(struct dl_phdr_info *info, size_t size, void *data),
+ void *data, dfsan_label callback_label, dfsan_label data_label,
+ dfsan_label *ret_label, dfsan_origin callback_origin,
+ dfsan_origin data_origin, dfsan_origin *ret_origin) {
+ dl_iterate_phdr_info dipi = {callback, data};
*ret_label = 0;
- return dl_iterate_phdr(dl_iterate_phdr_origin_cb, &dipi);
+ return dl_iterate_phdr(dl_iterate_phdr_cb, &dipi);
}
// This function is only available for glibc 2.27 or newer. Mark it weak so
@@ -1026,6 +1089,33 @@ char *__dfso_get_current_dir_name(dfsan_label *ret_label,
return __dfsw_get_current_dir_name(ret_label);
}
+// This function is only available for glibc 2.25 or newer. Mark it weak so
+// linking succeeds with older glibcs.
+SANITIZER_WEAK_ATTRIBUTE int getentropy(void *buffer, size_t length);
+
+SANITIZER_INTERFACE_ATTRIBUTE int __dfsw_getentropy(void *buffer, size_t length,
+ dfsan_label buffer_label,
+ dfsan_label length_label,
+ dfsan_label *ret_label) {
+ int ret = getentropy(buffer, length);
+ if (ret == 0) {
+ dfsan_set_label(0, buffer, length);
+ }
+ *ret_label = 0;
+ return ret;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE int __dfso_getentropy(void *buffer, size_t length,
+ dfsan_label buffer_label,
+ dfsan_label length_label,
+ dfsan_label *ret_label,
+ dfsan_origin buffer_origin,
+ dfsan_origin length_origin,
+ dfsan_origin *ret_origin) {
+ return __dfsw_getentropy(buffer, length, buffer_label, length_label,
+ ret_label);
+}
+
SANITIZER_INTERFACE_ATTRIBUTE
int __dfsw_gethostname(char *name, size_t len, dfsan_label name_label,
dfsan_label len_label, dfsan_label *ret_label) {
@@ -1088,10 +1178,9 @@ int __dfso_getrusage(int who, struct rusage *usage, dfsan_label who_label,
SANITIZER_INTERFACE_ATTRIBUTE
char *__dfsw_strcpy(char *dest, const char *src, dfsan_label dst_label,
dfsan_label src_label, dfsan_label *ret_label) {
- char *ret = strcpy(dest, src); // NOLINT
+ char *ret = strcpy(dest, src);
if (ret) {
- internal_memcpy(shadow_for(dest), shadow_for(src),
- sizeof(dfsan_label) * (strlen(src) + 1));
+ dfsan_mem_shadow_transfer(dest, src, strlen(src) + 1);
}
*ret_label = dst_label;
return ret;
@@ -1102,27 +1191,30 @@ char *__dfso_strcpy(char *dest, const char *src, dfsan_label dst_label,
dfsan_label src_label, dfsan_label *ret_label,
dfsan_origin dst_origin, dfsan_origin src_origin,
dfsan_origin *ret_origin) {
- char *ret = strcpy(dest, src); // NOLINT
+ char *ret = strcpy(dest, src);
if (ret) {
size_t str_len = strlen(src) + 1;
dfsan_mem_origin_transfer(dest, src, str_len);
- internal_memcpy(shadow_for(dest), shadow_for(src),
- sizeof(dfsan_label) * str_len);
+ dfsan_mem_shadow_transfer(dest, src, str_len);
}
*ret_label = dst_label;
*ret_origin = dst_origin;
return ret;
}
+}
-static long int dfsan_strtol(const char *nptr, char **endptr, int base,
- char **tmp_endptr) {
+template <typename Fn>
+static ALWAYS_INLINE auto dfsan_strtol_impl(
+ Fn real, const char *nptr, char **endptr, int base,
+ char **tmp_endptr) -> decltype(real(nullptr, nullptr, 0)) {
assert(tmp_endptr);
- long int ret = strtol(nptr, tmp_endptr, base);
+ auto ret = real(nptr, tmp_endptr, base);
if (endptr)
*endptr = *tmp_endptr;
return ret;
}
+extern "C" {
static void dfsan_strtolong_label(const char *nptr, const char *tmp_endptr,
dfsan_label base_label,
dfsan_label *ret_label) {
@@ -1152,30 +1244,6 @@ static void dfsan_strtolong_origin(const char *nptr, const char *tmp_endptr,
}
}
-SANITIZER_INTERFACE_ATTRIBUTE
-long int __dfsw_strtol(const char *nptr, char **endptr, int base,
- dfsan_label nptr_label, dfsan_label endptr_label,
- dfsan_label base_label, dfsan_label *ret_label) {
- char *tmp_endptr;
- long int ret = dfsan_strtol(nptr, endptr, base, &tmp_endptr);
- dfsan_strtolong_label(nptr, tmp_endptr, base_label, ret_label);
- return ret;
-}
-
-SANITIZER_INTERFACE_ATTRIBUTE
-long int __dfso_strtol(const char *nptr, char **endptr, int base,
- dfsan_label nptr_label, dfsan_label endptr_label,
- dfsan_label base_label, dfsan_label *ret_label,
- dfsan_origin nptr_origin, dfsan_origin endptr_origin,
- dfsan_origin base_origin, dfsan_origin *ret_origin) {
- char *tmp_endptr;
- long int ret = dfsan_strtol(nptr, endptr, base, &tmp_endptr);
- dfsan_strtolong_label(nptr, tmp_endptr, base_label, ret_label);
- dfsan_strtolong_origin(nptr, tmp_endptr, base_label, ret_label, base_origin,
- ret_origin);
- return ret;
-}
-
static double dfsan_strtod(const char *nptr, char **endptr, char **tmp_endptr) {
assert(tmp_endptr);
double ret = strtod(nptr, tmp_endptr);
@@ -1223,108 +1291,40 @@ double __dfso_strtod(const char *nptr, char **endptr, dfsan_label nptr_label,
return ret;
}
-static long long int dfsan_strtoll(const char *nptr, char **endptr, int base,
- char **tmp_endptr) {
- assert(tmp_endptr);
- long long int ret = strtoll(nptr, tmp_endptr, base);
- if (endptr)
- *endptr = *tmp_endptr;
- return ret;
-}
-
-SANITIZER_INTERFACE_ATTRIBUTE
-long long int __dfsw_strtoll(const char *nptr, char **endptr, int base,
- dfsan_label nptr_label, dfsan_label endptr_label,
- dfsan_label base_label, dfsan_label *ret_label) {
- char *tmp_endptr;
- long long int ret = dfsan_strtoll(nptr, endptr, base, &tmp_endptr);
- dfsan_strtolong_label(nptr, tmp_endptr, base_label, ret_label);
- return ret;
-}
-
-SANITIZER_INTERFACE_ATTRIBUTE
-long long int __dfso_strtoll(const char *nptr, char **endptr, int base,
- dfsan_label nptr_label, dfsan_label endptr_label,
- dfsan_label base_label, dfsan_label *ret_label,
- dfsan_origin nptr_origin,
- dfsan_origin endptr_origin,
- dfsan_origin base_origin,
- dfsan_origin *ret_origin) {
- char *tmp_endptr;
- long long int ret = dfsan_strtoll(nptr, endptr, base, &tmp_endptr);
- dfsan_strtolong_label(nptr, tmp_endptr, base_label, ret_label);
- dfsan_strtolong_origin(nptr, tmp_endptr, base_label, ret_label, base_origin,
- ret_origin);
- return ret;
-}
-
-static unsigned long int dfsan_strtoul(const char *nptr, char **endptr,
- int base, char **tmp_endptr) {
- assert(tmp_endptr);
- unsigned long int ret = strtoul(nptr, tmp_endptr, base);
- if (endptr)
- *endptr = *tmp_endptr;
- return ret;
-}
-
-SANITIZER_INTERFACE_ATTRIBUTE
-unsigned long int __dfsw_strtoul(const char *nptr, char **endptr, int base,
- dfsan_label nptr_label, dfsan_label endptr_label,
- dfsan_label base_label, dfsan_label *ret_label) {
- char *tmp_endptr;
- unsigned long int ret = dfsan_strtoul(nptr, endptr, base, &tmp_endptr);
- dfsan_strtolong_label(nptr, tmp_endptr, base_label, ret_label);
- return ret;
-}
-
-SANITIZER_INTERFACE_ATTRIBUTE
-unsigned long int __dfso_strtoul(
- const char *nptr, char **endptr, int base, dfsan_label nptr_label,
- dfsan_label endptr_label, dfsan_label base_label, dfsan_label *ret_label,
- dfsan_origin nptr_origin, dfsan_origin endptr_origin,
- dfsan_origin base_origin, dfsan_origin *ret_origin) {
- char *tmp_endptr;
- unsigned long int ret = dfsan_strtoul(nptr, endptr, base, &tmp_endptr);
- dfsan_strtolong_label(nptr, tmp_endptr, base_label, ret_label);
- dfsan_strtolong_origin(nptr, tmp_endptr, base_label, ret_label, base_origin,
- ret_origin);
- return ret;
-}
-
-static long long unsigned int dfsan_strtoull(const char *nptr, char **endptr,
- int base, char **tmp_endptr) {
- assert(tmp_endptr);
- long long unsigned int ret = strtoull(nptr, tmp_endptr, base);
- if (endptr)
- *endptr = *tmp_endptr;
- return ret;
-}
-
-SANITIZER_INTERFACE_ATTRIBUTE
-long long unsigned int __dfsw_strtoull(const char *nptr, char **endptr,
- int base, dfsan_label nptr_label,
- dfsan_label endptr_label,
- dfsan_label base_label,
- dfsan_label *ret_label) {
- char *tmp_endptr;
- long long unsigned int ret = dfsan_strtoull(nptr, endptr, base, &tmp_endptr);
- dfsan_strtolong_label(nptr, tmp_endptr, base_label, ret_label);
- return ret;
-}
-
-SANITIZER_INTERFACE_ATTRIBUTE
-long long unsigned int __dfso_strtoull(
- const char *nptr, char **endptr, int base, dfsan_label nptr_label,
- dfsan_label endptr_label, dfsan_label base_label, dfsan_label *ret_label,
- dfsan_origin nptr_origin, dfsan_origin endptr_origin,
- dfsan_origin base_origin, dfsan_origin *ret_origin) {
- char *tmp_endptr;
- long long unsigned int ret = dfsan_strtoull(nptr, endptr, base, &tmp_endptr);
- dfsan_strtolong_label(nptr, tmp_endptr, base_label, ret_label);
- dfsan_strtolong_origin(nptr, tmp_endptr, base_label, ret_label, base_origin,
- ret_origin);
- return ret;
-}
+WRAPPER_ALIAS(__isoc23_strtod, strtod)
+
+#define WRAPPER_STRTO(ret_type, fun) \
+ SANITIZER_INTERFACE_ATTRIBUTE ret_type __dfsw_##fun( \
+ const char *nptr, char **endptr, int base, dfsan_label nptr_label, \
+ dfsan_label endptr_label, dfsan_label base_label, \
+ dfsan_label *ret_label) { \
+ char *tmp_endptr; \
+ auto ret = dfsan_strtol_impl(fun, nptr, endptr, base, &tmp_endptr); \
+ dfsan_strtolong_label(nptr, tmp_endptr, base_label, ret_label); \
+ return ret; \
+ } \
+ SANITIZER_INTERFACE_ATTRIBUTE ret_type __dfso_##fun( \
+ const char *nptr, char **endptr, int base, dfsan_label nptr_label, \
+ dfsan_label endptr_label, dfsan_label base_label, \
+ dfsan_label *ret_label, dfsan_origin nptr_origin, \
+ dfsan_origin endptr_origin, dfsan_origin base_origin, \
+ dfsan_origin *ret_origin) { \
+ char *tmp_endptr; \
+ auto ret = dfsan_strtol_impl(fun, nptr, endptr, base, &tmp_endptr); \
+ dfsan_strtolong_label(nptr, tmp_endptr, base_label, ret_label); \
+ dfsan_strtolong_origin(nptr, tmp_endptr, base_label, ret_label, \
+ base_origin, ret_origin); \
+ return ret; \
+ }
+
+WRAPPER_STRTO(long, strtol)
+WRAPPER_STRTO(long long, strtoll)
+WRAPPER_STRTO(unsigned long, strtoul)
+WRAPPER_STRTO(unsigned long long, strtoull)
+WRAPPER_ALIAS(__isoc23_strtol, strtol)
+WRAPPER_ALIAS(__isoc23_strtoll, strtoll)
+WRAPPER_ALIAS(__isoc23_strtoul, strtoul)
+WRAPPER_ALIAS(__isoc23_strtoull, strtoull)
SANITIZER_INTERFACE_ATTRIBUTE
time_t __dfsw_time(time_t *t, dfsan_label t_label, dfsan_label *ret_label) {
@@ -1609,10 +1609,7 @@ static void SignalHandler(int signo) {
SignalHandlerScope signal_handler_scope;
ScopedClearThreadLocalState scoped_clear_tls;
- // Clear shadows for all inputs provided by system. This is why DFSan
- // instrumentation generates a trampoline function to each function pointer,
- // and uses the trampoline to clear shadows. However sigaction does not use
- // a function pointer directly, so we have to do this manually.
+ // Clear shadows for all inputs provided by system.
dfsan_clear_arg_tls(0, sizeof(dfsan_label));
typedef void (*signal_cb)(int x);
@@ -1713,22 +1710,18 @@ static sighandler_t dfsan_signal(int signum, sighandler_t handler,
}
SANITIZER_INTERFACE_ATTRIBUTE
-sighandler_t __dfsw_signal(int signum,
- void *(*handler_trampoline)(void *, int, dfsan_label,
- dfsan_label *),
- sighandler_t handler, dfsan_label signum_label,
- dfsan_label handler_label, dfsan_label *ret_label) {
+sighandler_t __dfsw_signal(int signum, sighandler_t handler,
+ dfsan_label signum_label, dfsan_label handler_label,
+ dfsan_label *ret_label) {
return dfsan_signal(signum, handler, ret_label);
}
SANITIZER_INTERFACE_ATTRIBUTE
-sighandler_t __dfso_signal(
- int signum,
- void *(*handler_trampoline)(void *, int, dfsan_label, dfsan_label *,
- dfsan_origin, dfsan_origin *),
- sighandler_t handler, dfsan_label signum_label, dfsan_label handler_label,
- dfsan_label *ret_label, dfsan_origin signum_origin,
- dfsan_origin handler_origin, dfsan_origin *ret_origin) {
+sighandler_t __dfso_signal(int signum, sighandler_t handler,
+ dfsan_label signum_label, dfsan_label handler_label,
+ dfsan_label *ret_label, dfsan_origin signum_origin,
+ dfsan_origin handler_origin,
+ dfsan_origin *ret_origin) {
return dfsan_signal(signum, handler, ret_label);
}
@@ -2068,47 +2061,62 @@ SANITIZER_INTERFACE_ATTRIBUTE int __dfso_getpeername(
addrlen_label, ret_label);
}
-// Type of the trampoline function passed to the custom version of
-// dfsan_set_write_callback.
-typedef void (*write_trampoline_t)(
- void *callback,
- int fd, const void *buf, ssize_t count,
- dfsan_label fd_label, dfsan_label buf_label, dfsan_label count_label);
-
-typedef void (*write_origin_trampoline_t)(
- void *callback, int fd, const void *buf, ssize_t count,
- dfsan_label fd_label, dfsan_label buf_label, dfsan_label count_label,
- dfsan_origin fd_origin, dfsan_origin buf_origin, dfsan_origin count_origin);
+// Type of the function passed to dfsan_set_write_callback.
+typedef void (*write_dfsan_callback_t)(int fd, const void *buf, ssize_t count);
// Calls to dfsan_set_write_callback() set the values in this struct.
// Calls to the custom version of write() read (and invoke) them.
static struct {
- write_trampoline_t write_callback_trampoline = nullptr;
- void *write_callback = nullptr;
+ write_dfsan_callback_t write_callback = nullptr;
} write_callback_info;
-static struct {
- write_origin_trampoline_t write_callback_trampoline = nullptr;
- void *write_callback = nullptr;
-} write_origin_callback_info;
-
-SANITIZER_INTERFACE_ATTRIBUTE void
-__dfsw_dfsan_set_write_callback(
- write_trampoline_t write_callback_trampoline,
- void *write_callback,
- dfsan_label write_callback_label,
+SANITIZER_INTERFACE_ATTRIBUTE void __dfsw_dfsan_set_write_callback(
+ write_dfsan_callback_t write_callback, dfsan_label write_callback_label,
dfsan_label *ret_label) {
- write_callback_info.write_callback_trampoline = write_callback_trampoline;
write_callback_info.write_callback = write_callback;
}
SANITIZER_INTERFACE_ATTRIBUTE void __dfso_dfsan_set_write_callback(
- write_origin_trampoline_t write_callback_trampoline, void *write_callback,
- dfsan_label write_callback_label, dfsan_label *ret_label,
- dfsan_origin write_callback_origin, dfsan_origin *ret_origin) {
- write_origin_callback_info.write_callback_trampoline =
- write_callback_trampoline;
- write_origin_callback_info.write_callback = write_callback;
+ write_dfsan_callback_t write_callback, dfsan_label write_callback_label,
+ dfsan_label *ret_label, dfsan_origin write_callback_origin,
+ dfsan_origin *ret_origin) {
+ write_callback_info.write_callback = write_callback;
+}
+
+static inline void setup_tls_args_for_write_callback(
+ dfsan_label fd_label, dfsan_label buf_label, dfsan_label count_label,
+ bool origins, dfsan_origin fd_origin, dfsan_origin buf_origin,
+ dfsan_origin count_origin) {
+ // The callback code will expect argument shadow labels in the args TLS,
+ // and origin labels in the origin args TLS.
+ // Previously this was done by a trampoline, but we want to remove this:
+ // https://github.com/llvm/llvm-project/issues/54172
+ //
+ // Instead, this code is manually setting up the args TLS data.
+ //
+ // The offsets used need to correspond with the instrumentation code,
+ // see llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp
+ // DFSanFunction::getShadowForTLSArgument.
+ // https://github.com/llvm/llvm-project/blob/0acc9e4b5edd8b39ff3d4c6d0e17f02007671c4e/llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp#L1684
+ // https://github.com/llvm/llvm-project/blob/0acc9e4b5edd8b39ff3d4c6d0e17f02007671c4e/llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp#L125
+ //
+ // Here the arguments are all primitives, but it can be more complex
+ // to compute offsets for array/aggregate type arguments.
+ //
+ // TODO(browneee): Consider a builtin to improve maintainabliity.
+ // With a builtin, we would provide the argument labels via builtin,
+ // and the builtin would reuse parts of the instrumentation code to ensure
+ // that this code and the instrumentation can never be out of sync.
+ // Note: Currently DFSan instrumentation does not run on this code, so
+ // the builtin may need to be handled outside DFSan instrumentation.
+ dfsan_set_arg_tls(0, fd_label);
+ dfsan_set_arg_tls(1, buf_label);
+ dfsan_set_arg_tls(2, count_label);
+ if (origins) {
+ dfsan_set_arg_origin_tls(0, fd_origin);
+ dfsan_set_arg_origin_tls(1, buf_origin);
+ dfsan_set_arg_origin_tls(2, count_origin);
+ }
}
SANITIZER_INTERFACE_ATTRIBUTE int
@@ -2116,10 +2124,9 @@ __dfsw_write(int fd, const void *buf, size_t count,
dfsan_label fd_label, dfsan_label buf_label,
dfsan_label count_label, dfsan_label *ret_label) {
if (write_callback_info.write_callback) {
- write_callback_info.write_callback_trampoline(
- write_callback_info.write_callback,
- fd, buf, count,
- fd_label, buf_label, count_label);
+ setup_tls_args_for_write_callback(fd_label, buf_label, count_label, false,
+ 0, 0, 0);
+ write_callback_info.write_callback(fd, buf, count);
}
*ret_label = 0;
@@ -2131,16 +2138,16 @@ SANITIZER_INTERFACE_ATTRIBUTE int __dfso_write(
dfsan_label buf_label, dfsan_label count_label, dfsan_label *ret_label,
dfsan_origin fd_origin, dfsan_origin buf_origin, dfsan_origin count_origin,
dfsan_origin *ret_origin) {
- if (write_origin_callback_info.write_callback) {
- write_origin_callback_info.write_callback_trampoline(
- write_origin_callback_info.write_callback, fd, buf, count, fd_label,
- buf_label, count_label, fd_origin, buf_origin, count_origin);
+ if (write_callback_info.write_callback) {
+ setup_tls_args_for_write_callback(fd_label, buf_label, count_label, true,
+ fd_origin, buf_origin, count_origin);
+ write_callback_info.write_callback(fd, buf, count);
}
*ret_label = 0;
return write(fd, buf, count);
}
-} // namespace __dfsan
+} // namespace __dfsan
// Type used to extract a dfsan_label with va_arg()
typedef int dfsan_label_va;
@@ -2149,8 +2156,14 @@ typedef int dfsan_label_va;
// '%.3f').
struct Formatter {
Formatter(char *str_, const char *fmt_, size_t size_)
- : str(str_), str_off(0), size(size_), fmt_start(fmt_), fmt_cur(fmt_),
- width(-1) {}
+ : str(str_),
+ str_off(0),
+ size(size_),
+ fmt_start(fmt_),
+ fmt_cur(fmt_),
+ width(-1),
+ num_scanned(-1),
+ skip(false) {}
int format() {
char *tmp_fmt = build_format_string();
@@ -2175,12 +2188,50 @@ struct Formatter {
return retval;
}
- char *build_format_string() {
+ int scan() {
+ char *tmp_fmt = build_format_string(true);
+ int read_count = 0;
+ int retval = sscanf(str + str_off, tmp_fmt, &read_count);
+ if (retval > 0) {
+ if (-1 == num_scanned)
+ num_scanned = 0;
+ num_scanned += retval;
+ }
+ free(tmp_fmt);
+ return read_count;
+ }
+
+ template <typename T>
+ int scan(T arg) {
+ char *tmp_fmt = build_format_string(true);
+ int read_count = 0;
+ int retval = sscanf(str + str_off, tmp_fmt, arg, &read_count);
+ if (retval > 0) {
+ if (-1 == num_scanned)
+ num_scanned = 0;
+ num_scanned += retval;
+ }
+ free(tmp_fmt);
+ return read_count;
+ }
+
+ // with_n -> toggles adding %n on/off; off by default
+ char *build_format_string(bool with_n = false) {
size_t fmt_size = fmt_cur - fmt_start + 1;
- char *new_fmt = (char *)malloc(fmt_size + 1);
+ size_t add_size = 0;
+ if (with_n)
+ add_size = 2;
+ char *new_fmt = (char *)malloc(fmt_size + 1 + add_size);
assert(new_fmt);
internal_memcpy(new_fmt, fmt_start, fmt_size);
- new_fmt[fmt_size] = '\0';
+ if (!with_n) {
+ new_fmt[fmt_size] = '\0';
+ } else {
+ new_fmt[fmt_size] = '%';
+ new_fmt[fmt_size + 1] = 'n';
+ new_fmt[fmt_size + 2] = '\0';
+ }
+
return new_fmt;
}
@@ -2212,6 +2263,8 @@ struct Formatter {
const char *fmt_start;
const char *fmt_cur;
int width;
+ int num_scanned;
+ bool skip;
};
// Formats the input and propagates the input labels to the output. The output
@@ -2339,9 +2392,8 @@ static int format_buffer(char *str, size_t size, const char *fmt,
formatter.num_written_bytes(retval));
}
va_labels++;
- internal_memcpy(shadow_for(formatter.str_cur()), shadow_for(arg),
- sizeof(dfsan_label) *
- formatter.num_written_bytes(retval));
+ dfsan_mem_shadow_transfer(formatter.str_cur(), arg,
+ formatter.num_written_bytes(retval));
end_fmt = true;
break;
}
@@ -2405,6 +2457,249 @@ static int format_buffer(char *str, size_t size, const char *fmt,
return formatter.str_off;
}
+// This function is an inverse of format_buffer: we take the input buffer,
+// scan it in search for format strings and store the results in the varargs.
+// The labels are propagated from the input buffer to the varargs.
+static int scan_buffer(char *str, size_t size, const char *fmt,
+ dfsan_label *va_labels, dfsan_label *ret_label,
+ dfsan_origin *str_origin, dfsan_origin *ret_origin,
+ va_list ap) {
+ Formatter formatter(str, fmt, size);
+ while (*formatter.fmt_cur) {
+ formatter.fmt_start = formatter.fmt_cur;
+ formatter.width = -1;
+ formatter.skip = false;
+ int read_count = 0;
+ void *dst_ptr = 0;
+ size_t write_size = 0;
+ if (*formatter.fmt_cur != '%') {
+ // Ordinary character. Consume all the characters until a '%' or the end
+ // of the string.
+ for (; *(formatter.fmt_cur + 1) && *(formatter.fmt_cur + 1) != '%';
+ ++formatter.fmt_cur) {
+ }
+ read_count = formatter.scan();
+ dfsan_set_label(0, formatter.str_cur(),
+ formatter.num_written_bytes(read_count));
+ } else {
+ // Conversion directive. Consume all the characters until a conversion
+ // specifier or the end of the string.
+ bool end_fmt = false;
+ for (; *formatter.fmt_cur && !end_fmt;) {
+ switch (*++formatter.fmt_cur) {
+ case 'd':
+ case 'i':
+ case 'o':
+ case 'u':
+ case 'x':
+ case 'X':
+ if (formatter.skip) {
+ read_count = formatter.scan();
+ } else {
+ switch (*(formatter.fmt_cur - 1)) {
+ case 'h':
+ // Also covers the 'hh' case (since the size of the arg is still
+ // an int).
+ dst_ptr = va_arg(ap, int *);
+ read_count = formatter.scan((int *)dst_ptr);
+ write_size = sizeof(int);
+ break;
+ case 'l':
+ if (formatter.fmt_cur - formatter.fmt_start >= 2 &&
+ *(formatter.fmt_cur - 2) == 'l') {
+ dst_ptr = va_arg(ap, long long int *);
+ read_count = formatter.scan((long long int *)dst_ptr);
+ write_size = sizeof(long long int);
+ } else {
+ dst_ptr = va_arg(ap, long int *);
+ read_count = formatter.scan((long int *)dst_ptr);
+ write_size = sizeof(long int);
+ }
+ break;
+ case 'q':
+ dst_ptr = va_arg(ap, long long int *);
+ read_count = formatter.scan((long long int *)dst_ptr);
+ write_size = sizeof(long long int);
+ break;
+ case 'j':
+ dst_ptr = va_arg(ap, intmax_t *);
+ read_count = formatter.scan((intmax_t *)dst_ptr);
+ write_size = sizeof(intmax_t);
+ break;
+ case 'z':
+ case 't':
+ dst_ptr = va_arg(ap, size_t *);
+ read_count = formatter.scan((size_t *)dst_ptr);
+ write_size = sizeof(size_t);
+ break;
+ default:
+ dst_ptr = va_arg(ap, int *);
+ read_count = formatter.scan((int *)dst_ptr);
+ write_size = sizeof(int);
+ }
+ // get the label associated with the string at the corresponding
+ // place
+ dfsan_label l = dfsan_read_label(
+ formatter.str_cur(), formatter.num_written_bytes(read_count));
+ dfsan_set_label(l, dst_ptr, write_size);
+ if (str_origin != nullptr) {
+ dfsan_set_label(l, dst_ptr, write_size);
+ size_t scan_count = formatter.num_written_bytes(read_count);
+ size_t size = scan_count > write_size ? write_size : scan_count;
+ dfsan_mem_origin_transfer(dst_ptr, formatter.str_cur(), size);
+ }
+ }
+ end_fmt = true;
+
+ break;
+
+ case 'a':
+ case 'A':
+ case 'e':
+ case 'E':
+ case 'f':
+ case 'F':
+ case 'g':
+ case 'G':
+ if (formatter.skip) {
+ read_count = formatter.scan();
+ } else {
+ if (*(formatter.fmt_cur - 1) == 'L') {
+ dst_ptr = va_arg(ap, long double *);
+ read_count = formatter.scan((long double *)dst_ptr);
+ write_size = sizeof(long double);
+ } else if (*(formatter.fmt_cur - 1) == 'l') {
+ dst_ptr = va_arg(ap, double *);
+ read_count = formatter.scan((double *)dst_ptr);
+ write_size = sizeof(double);
+ } else {
+ dst_ptr = va_arg(ap, float *);
+ read_count = formatter.scan((float *)dst_ptr);
+ write_size = sizeof(float);
+ }
+ dfsan_label l = dfsan_read_label(
+ formatter.str_cur(), formatter.num_written_bytes(read_count));
+ dfsan_set_label(l, dst_ptr, write_size);
+ if (str_origin != nullptr) {
+ dfsan_set_label(l, dst_ptr, write_size);
+ size_t scan_count = formatter.num_written_bytes(read_count);
+ size_t size = scan_count > write_size ? write_size : scan_count;
+ dfsan_mem_origin_transfer(dst_ptr, formatter.str_cur(), size);
+ }
+ }
+ end_fmt = true;
+ break;
+
+ case 'c':
+ if (formatter.skip) {
+ read_count = formatter.scan();
+ } else {
+ dst_ptr = va_arg(ap, char *);
+ read_count = formatter.scan((char *)dst_ptr);
+ write_size = sizeof(char);
+ dfsan_label l = dfsan_read_label(
+ formatter.str_cur(), formatter.num_written_bytes(read_count));
+ dfsan_set_label(l, dst_ptr, write_size);
+ if (str_origin != nullptr) {
+ size_t scan_count = formatter.num_written_bytes(read_count);
+ size_t size = scan_count > write_size ? write_size : scan_count;
+ dfsan_mem_origin_transfer(dst_ptr, formatter.str_cur(), size);
+ }
+ }
+ end_fmt = true;
+ break;
+
+ case 's': {
+ if (formatter.skip) {
+ read_count = formatter.scan();
+ } else {
+ dst_ptr = va_arg(ap, char *);
+ read_count = formatter.scan((char *)dst_ptr);
+ if (1 == read_count) {
+ // special case: we have parsed a single string and we need to
+ // update read_count with the string size
+ read_count = strlen((char *)dst_ptr);
+ }
+ if (str_origin)
+ dfsan_mem_origin_transfer(dst_ptr, formatter.str_cur(),
+ formatter.num_written_bytes(read_count));
+ va_labels++;
+ dfsan_mem_shadow_transfer(dst_ptr, formatter.str_cur(),
+ formatter.num_written_bytes(read_count));
+ }
+ end_fmt = true;
+ break;
+ }
+
+ case 'p':
+ if (formatter.skip) {
+ read_count = formatter.scan();
+ } else {
+ dst_ptr = va_arg(ap, void *);
+ read_count =
+ formatter.scan((int *)dst_ptr); // note: changing void* to int*
+ // since we need to call sizeof
+ write_size = sizeof(int);
+
+ dfsan_label l = dfsan_read_label(
+ formatter.str_cur(), formatter.num_written_bytes(read_count));
+ dfsan_set_label(l, dst_ptr, write_size);
+ if (str_origin != nullptr) {
+ dfsan_set_label(l, dst_ptr, write_size);
+ size_t scan_count = formatter.num_written_bytes(read_count);
+ size_t size = scan_count > write_size ? write_size : scan_count;
+ dfsan_mem_origin_transfer(dst_ptr, formatter.str_cur(), size);
+ }
+ }
+ end_fmt = true;
+ break;
+
+ case 'n': {
+ if (!formatter.skip) {
+ int *ptr = va_arg(ap, int *);
+ *ptr = (int)formatter.str_off;
+ *va_labels++ = 0;
+ dfsan_set_label(0, ptr, sizeof(*ptr));
+ if (str_origin != nullptr)
+ *str_origin++ = 0;
+ }
+ end_fmt = true;
+ break;
+ }
+
+ case '%':
+ read_count = formatter.scan();
+ end_fmt = true;
+ break;
+
+ case '*':
+ formatter.skip = true;
+ break;
+
+ default:
+ break;
+ }
+ }
+ }
+
+ if (read_count < 0) {
+ // There was an error.
+ return read_count;
+ }
+
+ formatter.fmt_cur++;
+ formatter.str_off += read_count;
+ }
+
+ (void)va_labels; // Silence unused-but-set-parameter warning
+ *ret_label = 0;
+ if (ret_origin)
+ *ret_origin = 0;
+
+ // Number of items scanned in total.
+ return formatter.num_scanned;
+}
+
extern "C" {
SANITIZER_INTERFACE_ATTRIBUTE
int __dfsw_sprintf(char *str, const char *format, dfsan_label str_label,
@@ -2412,7 +2707,8 @@ int __dfsw_sprintf(char *str, const char *format, dfsan_label str_label,
dfsan_label *ret_label, ...) {
va_list ap;
va_start(ap, ret_label);
- int ret = format_buffer(str, ~0ul, format, va_labels, ret_label, nullptr,
+
+ int ret = format_buffer(str, INT32_MAX, format, va_labels, ret_label, nullptr,
nullptr, ap);
va_end(ap);
return ret;
@@ -2426,8 +2722,8 @@ int __dfso_sprintf(char *str, const char *format, dfsan_label str_label,
dfsan_origin *ret_origin, ...) {
va_list ap;
va_start(ap, ret_origin);
- int ret = format_buffer(str, ~0ul, format, va_labels, ret_label, va_origins,
- ret_origin, ap);
+ int ret = format_buffer(str, INT32_MAX, format, va_labels, ret_label,
+ va_origins, ret_origin, ap);
va_end(ap);
return ret;
}
@@ -2460,14 +2756,43 @@ int __dfso_snprintf(char *str, size_t size, const char *format,
return ret;
}
+SANITIZER_INTERFACE_ATTRIBUTE
+int __dfsw_sscanf(char *str, const char *format, dfsan_label str_label,
+ dfsan_label format_label, dfsan_label *va_labels,
+ dfsan_label *ret_label, ...) {
+ va_list ap;
+ va_start(ap, ret_label);
+ int ret = scan_buffer(str, ~0ul, format, va_labels, ret_label, nullptr,
+ nullptr, ap);
+ va_end(ap);
+ return ret;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+int __dfso_sscanf(char *str, const char *format, dfsan_label str_label,
+ dfsan_label format_label, dfsan_label *va_labels,
+ dfsan_label *ret_label, dfsan_origin str_origin,
+ dfsan_origin format_origin, dfsan_origin *va_origins,
+ dfsan_origin *ret_origin, ...) {
+ va_list ap;
+ va_start(ap, ret_origin);
+ int ret = scan_buffer(str, ~0ul, format, va_labels, ret_label, &str_origin,
+ ret_origin, ap);
+ va_end(ap);
+ return ret;
+}
+
+WRAPPER_ALIAS(__isoc99_sscanf, sscanf)
+WRAPPER_ALIAS(__isoc23_sscanf, sscanf)
+
static void BeforeFork() {
- StackDepotLockAll();
- GetChainedOriginDepot()->LockAll();
+ StackDepotLockBeforeFork();
+ ChainedOriginDepotLockBeforeFork();
}
-static void AfterFork() {
- GetChainedOriginDepot()->UnlockAll();
- StackDepotUnlockAll();
+static void AfterFork(bool fork_child) {
+ ChainedOriginDepotUnlockAfterFork(fork_child);
+ StackDepotUnlockAfterFork(fork_child);
}
SANITIZER_INTERFACE_ATTRIBUTE
@@ -2481,7 +2806,7 @@ SANITIZER_INTERFACE_ATTRIBUTE
pid_t __dfso_fork(dfsan_label *ret_label, dfsan_origin *ret_origin) {
BeforeFork();
pid_t pid = __dfsw_fork(ret_label);
- AfterFork();
+ AfterFork(/* fork_child= */ pid == 0);
return pid;
}
@@ -2489,7 +2814,8 @@ pid_t __dfso_fork(dfsan_label *ret_label, dfsan_origin *ret_origin) {
SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_pc_guard, u32 *) {}
SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_pc_guard_init, u32 *,
u32 *) {}
-SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_pcs_init, void) {}
+SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_pcs_init, const uptr *beg,
+ const uptr *end) {}
SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_pc_indir, void) {}
SANITIZER_INTERFACE_WEAK_DEF(void, __dfsw___sanitizer_cov_trace_cmp, void) {}
diff --git a/contrib/llvm-project/compiler-rt/lib/dfsan/dfsan_interceptors.cpp b/contrib/llvm-project/compiler-rt/lib/dfsan/dfsan_interceptors.cpp
index 92be4fc87d49..d8fb9ea86618 100644
--- a/contrib/llvm-project/compiler-rt/lib/dfsan/dfsan_interceptors.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/dfsan/dfsan_interceptors.cpp
@@ -17,6 +17,7 @@
#include "dfsan/dfsan.h"
#include "dfsan/dfsan_thread.h"
#include "interception/interception.h"
+#include "sanitizer_common/sanitizer_allocator_dlsym.h"
#include "sanitizer_common/sanitizer_allocator_interface.h"
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_errno.h"
@@ -26,11 +27,11 @@
using namespace __sanitizer;
-namespace {
+static bool interceptors_initialized;
-bool interceptors_initialized;
-
-} // namespace
+struct DlsymAlloc : public DlSymAllocator<DlsymAlloc> {
+ static bool UseImpl() { return !__dfsan::dfsan_inited; }
+};
INTERCEPTOR(void *, reallocarray, void *ptr, SIZE_T nmemb, SIZE_T size) {
return __dfsan::dfsan_reallocarray(ptr, nmemb, size);
@@ -47,63 +48,37 @@ INTERCEPTOR(void *, aligned_alloc, SIZE_T alignment, SIZE_T size) {
return __dfsan::dfsan_aligned_alloc(alignment, size);
}
-static uptr allocated_for_dlsym;
-static const uptr kDlsymAllocPoolSize = 1024;
-static uptr alloc_memory_for_dlsym[kDlsymAllocPoolSize];
-
-static bool IsInDlsymAllocPool(const void *ptr) {
- uptr off = (uptr)ptr - (uptr)alloc_memory_for_dlsym;
- return off < sizeof(alloc_memory_for_dlsym);
-}
-
-static void *AllocateFromLocalPool(uptr size_in_bytes) {
- uptr size_in_words = RoundUpTo(size_in_bytes, kWordSize) / kWordSize;
- void *mem = (void *)&alloc_memory_for_dlsym[allocated_for_dlsym];
- allocated_for_dlsym += size_in_words;
- CHECK_LT(allocated_for_dlsym, kDlsymAllocPoolSize);
- return mem;
-}
-
INTERCEPTOR(void *, calloc, SIZE_T nmemb, SIZE_T size) {
- if (UNLIKELY(!__dfsan::dfsan_inited))
- // Hack: dlsym calls calloc before REAL(calloc) is retrieved from dlsym.
- return AllocateFromLocalPool(nmemb * size);
+ if (DlsymAlloc::Use())
+ return DlsymAlloc::Callocate(nmemb, size);
return __dfsan::dfsan_calloc(nmemb, size);
}
INTERCEPTOR(void *, realloc, void *ptr, SIZE_T size) {
- if (UNLIKELY(IsInDlsymAllocPool(ptr))) {
- uptr offset = (uptr)ptr - (uptr)alloc_memory_for_dlsym;
- uptr copy_size = Min(size, kDlsymAllocPoolSize - offset);
- void *new_ptr;
- if (UNLIKELY(!__dfsan::dfsan_inited)) {
- new_ptr = AllocateFromLocalPool(copy_size);
- } else {
- copy_size = size;
- new_ptr = __dfsan::dfsan_malloc(copy_size);
- }
- internal_memcpy(new_ptr, ptr, copy_size);
- return new_ptr;
- }
+ if (DlsymAlloc::Use() || DlsymAlloc::PointerIsMine(ptr))
+ return DlsymAlloc::Realloc(ptr, size);
return __dfsan::dfsan_realloc(ptr, size);
}
INTERCEPTOR(void *, malloc, SIZE_T size) {
- if (UNLIKELY(!__dfsan::dfsan_inited))
- // Hack: dlsym calls malloc before REAL(malloc) is retrieved from dlsym.
- return AllocateFromLocalPool(size);
+ if (DlsymAlloc::Use())
+ return DlsymAlloc::Allocate(size);
return __dfsan::dfsan_malloc(size);
}
INTERCEPTOR(void, free, void *ptr) {
- if (!ptr || UNLIKELY(IsInDlsymAllocPool(ptr)))
+ if (!ptr)
return;
+ if (DlsymAlloc::PointerIsMine(ptr))
+ return DlsymAlloc::Free(ptr);
return __dfsan::dfsan_deallocate(ptr);
}
INTERCEPTOR(void, cfree, void *ptr) {
- if (!ptr || UNLIKELY(IsInDlsymAllocPool(ptr)))
+ if (!ptr)
return;
+ if (DlsymAlloc::PointerIsMine(ptr))
+ return DlsymAlloc::Free(ptr);
return __dfsan::dfsan_deallocate(ptr);
}
@@ -152,12 +127,12 @@ INTERCEPTOR(uptr, malloc_usable_size, void *ptr) {
if (__dfsan::dfsan_init_is_running) \
return REAL(func)(__VA_ARGS__); \
ENSURE_DFSAN_INITED(); \
- dfsan_set_label(0, __errno_location(), sizeof(int)); /* NOLINT */
+ dfsan_set_label(0, __errno_location(), sizeof(int));
INTERCEPTOR(void *, mmap, void *addr, SIZE_T length, int prot, int flags,
int fd, OFF_T offset) {
if (common_flags()->detect_write_exec)
- ReportMmapWriteExec(prot);
+ ReportMmapWriteExec(prot, flags);
if (!__dfsan::dfsan_inited)
return (void *)internal_mmap(addr, length, prot, flags, fd, offset);
COMMON_INTERCEPTOR_ENTER(mmap, addr, length, prot, flags, fd, offset);
@@ -171,7 +146,7 @@ INTERCEPTOR(void *, mmap, void *addr, SIZE_T length, int prot, int flags,
INTERCEPTOR(void *, mmap64, void *addr, SIZE_T length, int prot, int flags,
int fd, OFF64_T offset) {
if (common_flags()->detect_write_exec)
- ReportMmapWriteExec(prot);
+ ReportMmapWriteExec(prot, flags);
if (!__dfsan::dfsan_inited)
return (void *)internal_mmap(addr, length, prot, flags, fd, offset);
COMMON_INTERCEPTOR_ENTER(mmap64, addr, length, prot, flags, fd, offset);
diff --git a/contrib/llvm-project/compiler-rt/lib/dfsan/dfsan_platform.h b/contrib/llvm-project/compiler-rt/lib/dfsan/dfsan_platform.h
index 9b4333ee99d0..b849b4b528ad 100644
--- a/contrib/llvm-project/compiler-rt/lib/dfsan/dfsan_platform.h
+++ b/contrib/llvm-project/compiler-rt/lib/dfsan/dfsan_platform.h
@@ -33,6 +33,32 @@ struct MappingDesc {
#if SANITIZER_LINUX && SANITIZER_WORDSIZE == 64
+# if defined(__aarch64__)
+// The mapping assumes 48-bit VMA. AArch64 maps:
+// - 0x0000000000000-0x0100000000000: 39/42/48-bits program own segments
+// - 0x0a00000000000-0x0b00000000000: 48-bits PIE program segments
+// Ideally, this would extend to 0x0c00000000000 (2^45 bytes - the
+// maximum ASLR region for 48-bit VMA) but it is too hard to fit in
+// the larger app/shadow/origin regions.
+// - 0x0e00000000000-0x1000000000000: 48-bits libraries segments
+const MappingDesc kMemoryLayout[] = {
+ {0X0000000000000, 0X0100000000000, MappingDesc::APP, "app-10-13"},
+ {0X0100000000000, 0X0200000000000, MappingDesc::SHADOW, "shadow-14"},
+ {0X0200000000000, 0X0300000000000, MappingDesc::INVALID, "invalid"},
+ {0X0300000000000, 0X0400000000000, MappingDesc::ORIGIN, "origin-14"},
+ {0X0400000000000, 0X0600000000000, MappingDesc::SHADOW, "shadow-15"},
+ {0X0600000000000, 0X0800000000000, MappingDesc::ORIGIN, "origin-15"},
+ {0X0800000000000, 0X0A00000000000, MappingDesc::INVALID, "invalid"},
+ {0X0A00000000000, 0X0B00000000000, MappingDesc::APP, "app-14"},
+ {0X0B00000000000, 0X0C00000000000, MappingDesc::SHADOW, "shadow-10-13"},
+ {0X0C00000000000, 0X0D00000000000, MappingDesc::INVALID, "invalid"},
+ {0X0D00000000000, 0X0E00000000000, MappingDesc::ORIGIN, "origin-10-13"},
+ {0X0E00000000000, 0X1000000000000, MappingDesc::APP, "app-15"},
+};
+# define MEM_TO_SHADOW(mem) ((uptr)mem ^ 0xB00000000000ULL)
+# define SHADOW_TO_ORIGIN(shadow) (((uptr)(shadow)) + 0x200000000000ULL)
+
+# else
// All of the following configurations are supported.
// ASLR disabled: main executable and DSOs at 0x555550000000
// PIE and ASLR: main executable and DSOs at 0x7f0000000000
@@ -51,8 +77,9 @@ const MappingDesc kMemoryLayout[] = {
{0x600000000000ULL, 0x610000000000ULL, MappingDesc::ORIGIN, "origin-1"},
{0x610000000000ULL, 0x700000000000ULL, MappingDesc::INVALID, "invalid"},
{0x700000000000ULL, 0x800000000000ULL, MappingDesc::APP, "app-3"}};
-# define MEM_TO_SHADOW(mem) (((uptr)(mem)) ^ 0x500000000000ULL)
-# define SHADOW_TO_ORIGIN(mem) (((uptr)(mem)) + 0x100000000000ULL)
+# define MEM_TO_SHADOW(mem) (((uptr)(mem)) ^ 0x500000000000ULL)
+# define SHADOW_TO_ORIGIN(mem) (((uptr)(mem)) + 0x100000000000ULL)
+# endif
#else
# error "Unsupported platform"
diff --git a/contrib/llvm-project/compiler-rt/lib/dfsan/dfsan_thread.cpp b/contrib/llvm-project/compiler-rt/lib/dfsan/dfsan_thread.cpp
index 6869cf231587..e64f0f818fb8 100644
--- a/contrib/llvm-project/compiler-rt/lib/dfsan/dfsan_thread.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/dfsan/dfsan_thread.cpp
@@ -7,13 +7,11 @@
namespace __dfsan {
-DFsanThread *DFsanThread::Create(void *start_routine_trampoline,
- thread_callback_t start_routine, void *arg,
+DFsanThread *DFsanThread::Create(thread_callback_t start_routine, void *arg,
bool track_origins) {
uptr PageSize = GetPageSizeCached();
uptr size = RoundUpTo(sizeof(DFsanThread), PageSize);
DFsanThread *thread = (DFsanThread *)MmapOrDie(size, __func__);
- thread->start_routine_trampoline_ = start_routine_trampoline;
thread->start_routine_ = start_routine;
thread->arg_ = arg;
thread->track_origins_ = track_origins;
@@ -67,8 +65,6 @@ void DFsanThread::Destroy() {
}
thread_return_t DFsanThread::ThreadStart() {
- Init();
-
if (!start_routine_) {
// start_routine_ == 0 if we're on the main thread or on one of the
// OS X libdispatch worker threads. But nobody is supposed to call
@@ -76,23 +72,15 @@ thread_return_t DFsanThread::ThreadStart() {
return 0;
}
- CHECK(start_routine_trampoline_);
-
- typedef void *(*thread_callback_trampoline_t)(void *, void *, dfsan_label,
- dfsan_label *);
- typedef void *(*thread_callback_origin_trampoline_t)(
- void *, void *, dfsan_label, dfsan_label *, dfsan_origin, dfsan_origin *);
-
- dfsan_label ret_label;
- if (!track_origins_)
- return ((thread_callback_trampoline_t)
- start_routine_trampoline_)((void *)start_routine_, arg_, 0,
- &ret_label);
+ // The only argument is void* arg.
+ //
+ // We have never supported propagating the pointer arg as tainted,
+ // __dfsw_pthread_create/__dfso_pthread_create ignore the taint label.
+ // Note that the bytes pointed-to (probably the much more common case)
+ // can still have taint labels attached to them.
+ dfsan_clear_thread_local_state();
- dfsan_origin ret_origin;
- return ((thread_callback_origin_trampoline_t)
- start_routine_trampoline_)((void *)start_routine_, arg_, 0,
- &ret_label, 0, &ret_origin);
+ return start_routine_(arg_);
}
DFsanThread::StackBounds DFsanThread::GetStackBounds() const {
diff --git a/contrib/llvm-project/compiler-rt/lib/dfsan/dfsan_thread.h b/contrib/llvm-project/compiler-rt/lib/dfsan/dfsan_thread.h
index 8dde626f5569..ebc25499e269 100644
--- a/contrib/llvm-project/compiler-rt/lib/dfsan/dfsan_thread.h
+++ b/contrib/llvm-project/compiler-rt/lib/dfsan/dfsan_thread.h
@@ -1,5 +1,4 @@
-//===-- dfsan_thread.h -------------------------------------------*- C++
-//-*-===//
+//===-- dfsan_thread.h ------------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
@@ -16,6 +15,7 @@
#include "dfsan_allocator.h"
#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_posix.h"
namespace __dfsan {
@@ -24,8 +24,7 @@ class DFsanThread {
// NOTE: There is no DFsanThread constructor. It is allocated
// via mmap() and *must* be valid in zero-initialized state.
- static DFsanThread *Create(void *start_routine_trampoline,
- thread_callback_t start_routine, void *arg,
+ static DFsanThread *Create(thread_callback_t start_routine, void *arg,
bool track_origins = false);
static void TSDDtor(void *tsd);
void Destroy();
@@ -46,6 +45,7 @@ class DFsanThread {
DFsanThreadLocalMallocStorage &malloc_storage() { return malloc_storage_; }
int destructor_iterations_;
+ __sanitizer_sigset_t starting_sigset_;
private:
void SetThreadStackAndTls();
@@ -58,7 +58,6 @@ class DFsanThread {
bool AddrIsInStack(uptr addr);
- void *start_routine_trampoline_;
thread_callback_t start_routine_;
void *arg_;
bool track_origins_;
diff --git a/contrib/llvm-project/compiler-rt/lib/dfsan/done_abilist.txt b/contrib/llvm-project/compiler-rt/lib/dfsan/done_abilist.txt
index 3c2670e04c29..86a42ee1b4dc 100644
--- a/contrib/llvm-project/compiler-rt/lib/dfsan/done_abilist.txt
+++ b/contrib/llvm-project/compiler-rt/lib/dfsan/done_abilist.txt
@@ -30,16 +30,32 @@ fun:dfsan_flush=uninstrumented
fun:dfsan_flush=discard
fun:dfsan_print_origin_trace=uninstrumented
fun:dfsan_print_origin_trace=discard
+fun:dfsan_print_origin_id_trace=uninstrumented
+fun:dfsan_print_origin_id_trace=discard
fun:dfsan_sprint_origin_trace=uninstrumented
fun:dfsan_sprint_origin_trace=discard
+fun:dfsan_sprint_origin_id_trace=uninstrumented
+fun:dfsan_sprint_origin_id_trace=discard
fun:dfsan_sprint_stack_trace=uninstrumented
fun:dfsan_sprint_stack_trace=discard
fun:dfsan_get_origin=uninstrumented
fun:dfsan_get_origin=custom
+fun:dfsan_read_origin_of_first_taint=uninstrumented
+fun:dfsan_read_origin_of_first_taint=discard
fun:dfsan_get_init_origin=uninstrumented
fun:dfsan_get_init_origin=discard
fun:dfsan_get_track_origins=uninstrumented
fun:dfsan_get_track_origins=discard
+fun:dfsan_set_conditional_callback=uninstrumented
+fun:dfsan_set_conditional_callback=discard
+fun:dfsan_get_labels_in_signal_conditional=uninstrumented
+fun:dfsan_get_labels_in_signal_conditional=discard
+fun:dfsan_set_reaches_function_callback=uninstrumented
+fun:dfsan_set_reaches_function_callback=discard
+fun:dfsan_get_labels_in_signal_reaches_function=uninstrumented
+fun:dfsan_get_labels_in_signal_reaches_function=discard
+fun:dfsan_reaches_function_callback=uninstrumented
+fun:dfsan_reaches_function_callback=discard
###############################################################################
# glibc
@@ -72,6 +88,7 @@ fun:isalnum=functional
fun:ispunct=functional
fun:isspace=functional
fun:tolower=functional
+fun:_tolower=functional
fun:toupper=functional
# Functions that return a value that is data-dependent on the input.
@@ -218,6 +235,7 @@ fun:fgets=custom
fun:fstat=custom
fun:getcwd=custom
fun:get_current_dir_name=custom
+fun:getentropy=custom
fun:gethostname=custom
fun:getpeername=custom
fun:getrlimit=custom
@@ -251,6 +269,12 @@ fun:strtoll=custom
fun:strtoul=custom
fun:strtoull=custom
fun:strcat=custom
+fun:strncat=custom
+fun:__isoc23_strtod=custom
+fun:__isoc23_strtol=custom
+fun:__isoc23_strtoll=custom
+fun:__isoc23_strtoul=custom
+fun:__isoc23_strtoull=custom
# Functions that produce an output that is computed from the input, but is not
# necessarily data dependent.
@@ -261,14 +285,16 @@ fun:strcasecmp=custom
fun:strchr=custom
fun:strcmp=custom
fun:strlen=custom
+fun:strnlen=custom
fun:strncasecmp=custom
fun:strncmp=custom
fun:strpbrk=custom
fun:strrchr=custom
fun:strstr=custom
+fun:strsep=custom
# Functions which take action based on global state, such as running a callback
-# set by a sepperate function.
+# set by a separate function.
fun:write=custom
# Functions that take a callback (wrap the callback manually).
@@ -287,6 +313,11 @@ fun:gettimeofday=custom
fun:sprintf=custom
fun:snprintf=custom
+# scanf-like
+fun:sscanf=custom
+fun:__isoc99_sscanf=custom
+fun:__isoc23_sscanf=custom
+
# TODO: custom
fun:asprintf=discard
fun:qsort=discard
@@ -430,8 +461,12 @@ fun:__sanitizer_get_estimated_allocated_size=uninstrumented
fun:__sanitizer_get_estimated_allocated_size=discard
fun:__sanitizer_get_ownership=uninstrumented
fun:__sanitizer_get_ownership=discard
+fun:__sanitizer_get_allocated_begin=uninstrumented
+fun:__sanitizer_get_allocated_begin=discard
fun:__sanitizer_get_allocated_size=uninstrumented
fun:__sanitizer_get_allocated_size=discard
+fun:__sanitizer_get_allocated_size_fast=uninstrumented
+fun:__sanitizer_get_allocated_size_fast=discard
fun:__sanitizer_print_stack_trace=uninstrumented
fun:__sanitizer_print_stack_trace=discard
diff --git a/contrib/llvm-project/compiler-rt/lib/dfsan/libc_ubuntu1404_abilist.txt b/contrib/llvm-project/compiler-rt/lib/dfsan/libc_ubuntu1404_abilist.txt
index a1ea0a06b537..9ffa56a23818 100644
--- a/contrib/llvm-project/compiler-rt/lib/dfsan/libc_ubuntu1404_abilist.txt
+++ b/contrib/llvm-project/compiler-rt/lib/dfsan/libc_ubuntu1404_abilist.txt
@@ -1,3 +1,8 @@
+fun:__isoc23_sscanf=uninstrumented
+fun:__isoc23_strtol=uninstrumented
+fun:__isoc23_strtoll=uninstrumented
+fun:__isoc23_strtoul=uninstrumented
+fun:__isoc23_strtoull=uninstrumented
fun:_Exit=uninstrumented
fun:_IO_adjust_column=uninstrumented
fun:_IO_adjust_wcolumn=uninstrumented
@@ -1852,6 +1857,7 @@ fun:getdirentries64=uninstrumented
fun:getdomainname=uninstrumented
fun:getdtablesize=uninstrumented
fun:getegid=uninstrumented
+fun:getentropy=uninstrumented
fun:getenv=uninstrumented
fun:geteuid=uninstrumented
fun:getfsent=uninstrumented
diff --git a/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerBuiltinsMsvc.h b/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerBuiltinsMsvc.h
index ab191b60ef6e..421dee7f6603 100644
--- a/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerBuiltinsMsvc.h
+++ b/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerBuiltinsMsvc.h
@@ -41,7 +41,8 @@ inline uint32_t Clzll(uint64_t X) {
#if !defined(_M_ARM) && !defined(_M_X64)
// Scan the high 32 bits.
if (_BitScanReverse(&LeadZeroIdx, static_cast<unsigned long>(X >> 32)))
- return static_cast<int>(63 - (LeadZeroIdx + 32)); // Create a bit offset from the MSB.
+ return static_cast<int>(
+ 63 - (LeadZeroIdx + 32)); // Create a bit offset from the MSB.
// Scan the low 32 bits.
if (_BitScanReverse(&LeadZeroIdx, static_cast<unsigned long>(X)))
return static_cast<int>(63 - LeadZeroIdx);
diff --git a/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerCommand.h b/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerCommand.h
index 87308864af53..718d7e951fb1 100644
--- a/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerCommand.h
+++ b/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerCommand.h
@@ -19,6 +19,7 @@
#include <sstream>
#include <string>
#include <vector>
+#include <thread>
namespace fuzzer {
@@ -33,7 +34,7 @@ public:
Command() : CombinedOutAndErr(false) {}
- explicit Command(const Vector<std::string> &ArgsToAdd)
+ explicit Command(const std::vector<std::string> &ArgsToAdd)
: Args(ArgsToAdd), CombinedOutAndErr(false) {}
explicit Command(const Command &Other)
@@ -58,7 +59,7 @@ public:
// Gets all of the current command line arguments, **including** those after
// "-ignore-remaining-args=1".
- const Vector<std::string> &getArguments() const { return Args; }
+ const std::vector<std::string> &getArguments() const { return Args; }
// Adds the given argument before "-ignore_remaining_args=1", or at the end
// if that flag isn't present.
@@ -68,7 +69,7 @@ public:
// Adds all given arguments before "-ignore_remaining_args=1", or at the end
// if that flag isn't present.
- void addArguments(const Vector<std::string> &ArgsToAdd) {
+ void addArguments(const std::vector<std::string> &ArgsToAdd) {
Args.insert(endMutableArgs(), ArgsToAdd.begin(), ArgsToAdd.end());
}
@@ -139,7 +140,7 @@ public:
// be the equivalent command line.
std::string toString() const {
std::stringstream SS;
- for (auto arg : getArguments())
+ for (const auto &arg : getArguments())
SS << arg << " ";
if (hasOutputFile())
SS << ">" << getOutputFile() << " ";
@@ -155,16 +156,16 @@ private:
Command(Command &&Other) = delete;
Command &operator=(Command &&Other) = delete;
- Vector<std::string>::iterator endMutableArgs() {
+ std::vector<std::string>::iterator endMutableArgs() {
return std::find(Args.begin(), Args.end(), ignoreRemainingArgs());
}
- Vector<std::string>::const_iterator endMutableArgs() const {
+ std::vector<std::string>::const_iterator endMutableArgs() const {
return std::find(Args.begin(), Args.end(), ignoreRemainingArgs());
}
// The command arguments. Args[0] is the command name.
- Vector<std::string> Args;
+ std::vector<std::string> Args;
// True indicates stderr is redirected to stdout.
bool CombinedOutAndErr;
diff --git a/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerCorpus.h b/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerCorpus.h
index f8c126072c96..48b5a2cff02e 100644
--- a/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerCorpus.h
+++ b/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerCorpus.h
@@ -18,6 +18,7 @@
#include "FuzzerSHA1.h"
#include "FuzzerTracePC.h"
#include <algorithm>
+#include <bitset>
#include <chrono>
#include <numeric>
#include <random>
@@ -39,13 +40,13 @@ struct InputInfo {
bool MayDeleteFile = false;
bool Reduced = false;
bool HasFocusFunction = false;
- Vector<uint32_t> UniqFeatureSet;
- Vector<uint8_t> DataFlowTraceForFocusFunction;
+ std::vector<uint32_t> UniqFeatureSet;
+ std::vector<uint8_t> DataFlowTraceForFocusFunction;
// Power schedule.
bool NeedsEnergyUpdate = false;
double Energy = 0.0;
double SumIncidence = 0.0;
- Vector<std::pair<uint32_t, uint16_t>> FeatureFreqs;
+ std::vector<std::pair<uint32_t, uint16_t>> FeatureFreqs;
// Delete feature Idx and its frequency from FeatureFreqs.
bool DeleteFeatureFreq(uint32_t Idx) {
@@ -77,7 +78,7 @@ struct InputInfo {
SumIncidence = 0.0;
// Apply add-one smoothing to locally discovered features.
- for (auto F : FeatureFreqs) {
+ for (const auto &F : FeatureFreqs) {
double LocalIncidence = F.second + 1;
Energy -= LocalIncidence * log(LocalIncidence);
SumIncidence += LocalIncidence;
@@ -209,7 +210,7 @@ public:
InputInfo *AddToCorpus(const Unit &U, size_t NumFeatures, bool MayDeleteFile,
bool HasFocusFunction, bool NeverReduce,
std::chrono::microseconds TimeOfUnit,
- const Vector<uint32_t> &FeatureSet,
+ const std::vector<uint32_t> &FeatureSet,
const DataFlowTrace &DFT, const InputInfo *BaseII) {
assert(!U.empty());
if (FeatureDebug)
@@ -258,7 +259,7 @@ public:
}
// Debug-only
- void PrintFeatureSet(const Vector<uint32_t> &FeatureSet) {
+ void PrintFeatureSet(const std::vector<uint32_t> &FeatureSet) {
if (!FeatureDebug) return;
Printf("{");
for (uint32_t Feature: FeatureSet)
@@ -284,7 +285,8 @@ public:
}
}
- void Replace(InputInfo *II, const Unit &U) {
+ void Replace(InputInfo *II, const Unit &U,
+ std::chrono::microseconds TimeOfUnit) {
assert(II->U.size() > U.size());
Hashes.erase(Sha1ToString(II->Sha1));
DeleteFile(*II);
@@ -292,6 +294,7 @@ public:
Hashes.insert(Sha1ToString(II->Sha1));
II->U = U;
II->Reduced = true;
+ II->TimeOfUnit = TimeOfUnit;
DistributionNeedsUpdate = true;
}
@@ -325,7 +328,8 @@ public:
const auto &II = *Inputs[i];
Printf(" [% 3zd %s] sz: % 5zd runs: % 5zd succ: % 5zd focus: %d\n", i,
Sha1ToString(II.Sha1).c_str(), II.U.size(),
- II.NumExecutedMutations, II.NumSuccessfullMutations, II.HasFocusFunction);
+ II.NumExecutedMutations, II.NumSuccessfullMutations,
+ II.HasFocusFunction);
}
}
@@ -379,6 +383,7 @@ public:
}
// Remove most abundant rare feature.
+ IsRareFeature[Delete] = false;
RareFeatures[Delete] = RareFeatures.back();
RareFeatures.pop_back();
@@ -394,6 +399,7 @@ public:
// Add rare feature, handle collisions, and update energy.
RareFeatures.push_back(Idx);
+ IsRareFeature[Idx] = true;
GlobalFeatureFreqs[Idx] = 0;
for (auto II : Inputs) {
II->DeleteFeatureFreq(Idx);
@@ -447,9 +453,7 @@ public:
uint16_t Freq = GlobalFeatureFreqs[Idx32]++;
// Skip if abundant.
- if (Freq > FreqOfMostAbundantRareFeature ||
- std::find(RareFeatures.begin(), RareFeatures.end(), Idx32) ==
- RareFeatures.end())
+ if (Freq > FreqOfMostAbundantRareFeature || !IsRareFeature[Idx32])
return;
// Update global frequencies.
@@ -563,11 +567,11 @@ private:
}
std::piecewise_constant_distribution<double> CorpusDistribution;
- Vector<double> Intervals;
- Vector<double> Weights;
+ std::vector<double> Intervals;
+ std::vector<double> Weights;
std::unordered_set<std::string> Hashes;
- Vector<InputInfo*> Inputs;
+ std::vector<InputInfo *> Inputs;
size_t NumAddedFeatures = 0;
size_t NumUpdatedFeatures = 0;
@@ -577,7 +581,8 @@ private:
bool DistributionNeedsUpdate = true;
uint16_t FreqOfMostAbundantRareFeature = 0;
uint16_t GlobalFeatureFreqs[kFeatureSetSize] = {};
- Vector<uint32_t> RareFeatures;
+ std::vector<uint32_t> RareFeatures;
+ std::bitset<kFeatureSetSize> IsRareFeature;
std::string OutputCorpus;
};
diff --git a/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerDataFlowTrace.cpp b/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerDataFlowTrace.cpp
index 23d422590d19..93bf817a857b 100644
--- a/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerDataFlowTrace.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerDataFlowTrace.cpp
@@ -37,7 +37,7 @@ bool BlockCoverage::AppendCoverage(const std::string &S) {
// Coverage lines have this form:
// CN X Y Z T
// where N is the number of the function, T is the total number of instrumented
-// BBs, and X,Y,Z, if present, are the indecies of covered BB.
+// BBs, and X,Y,Z, if present, are the indices of covered BB.
// BB #0, which is the entry block, is not explicitly listed.
bool BlockCoverage::AppendCoverage(std::istream &IN) {
std::string L;
@@ -52,7 +52,7 @@ bool BlockCoverage::AppendCoverage(std::istream &IN) {
continue;
}
if (L[0] != 'C') continue;
- Vector<uint32_t> CoveredBlocks;
+ std::vector<uint32_t> CoveredBlocks;
while (true) {
uint32_t BB = 0;
SS >> BB;
@@ -68,7 +68,7 @@ bool BlockCoverage::AppendCoverage(std::istream &IN) {
auto It = Functions.find(FunctionId);
auto &Counters =
It == Functions.end()
- ? Functions.insert({FunctionId, Vector<uint32_t>(NumBlocks)})
+ ? Functions.insert({FunctionId, std::vector<uint32_t>(NumBlocks)})
.first->second
: It->second;
@@ -86,9 +86,9 @@ bool BlockCoverage::AppendCoverage(std::istream &IN) {
// * any uncovered function gets weight 0.
// * a function with lots of uncovered blocks gets bigger weight.
// * a function with a less frequently executed code gets bigger weight.
-Vector<double> BlockCoverage::FunctionWeights(size_t NumFunctions) const {
- Vector<double> Res(NumFunctions);
- for (auto It : Functions) {
+std::vector<double> BlockCoverage::FunctionWeights(size_t NumFunctions) const {
+ std::vector<double> Res(NumFunctions);
+ for (const auto &It : Functions) {
auto FunctionID = It.first;
auto Counters = It.second;
assert(FunctionID < NumFunctions);
@@ -104,7 +104,7 @@ Vector<double> BlockCoverage::FunctionWeights(size_t NumFunctions) const {
}
void DataFlowTrace::ReadCoverage(const std::string &DirPath) {
- Vector<SizedFile> Files;
+ std::vector<SizedFile> Files;
GetSizedFilesFromDir(DirPath, &Files);
for (auto &SF : Files) {
auto Name = Basename(SF.File);
@@ -115,16 +115,16 @@ void DataFlowTrace::ReadCoverage(const std::string &DirPath) {
}
}
-static void DFTStringAppendToVector(Vector<uint8_t> *DFT,
+static void DFTStringAppendToVector(std::vector<uint8_t> *DFT,
const std::string &DFTString) {
assert(DFT->size() == DFTString.size());
for (size_t I = 0, Len = DFT->size(); I < Len; I++)
(*DFT)[I] = DFTString[I] == '1';
}
-// converts a string of '0' and '1' into a Vector<uint8_t>
-static Vector<uint8_t> DFTStringToVector(const std::string &DFTString) {
- Vector<uint8_t> DFT(DFTString.size());
+// converts a string of '0' and '1' into a std::vector<uint8_t>
+static std::vector<uint8_t> DFTStringToVector(const std::string &DFTString) {
+ std::vector<uint8_t> DFT(DFTString.size());
DFTStringAppendToVector(&DFT, DFTString);
return DFT;
}
@@ -159,14 +159,14 @@ static bool ParseDFTLine(const std::string &Line, size_t *FunctionNum,
}
bool DataFlowTrace::Init(const std::string &DirPath, std::string *FocusFunction,
- Vector<SizedFile> &CorporaFiles, Random &Rand) {
+ std::vector<SizedFile> &CorporaFiles, Random &Rand) {
if (DirPath.empty()) return false;
Printf("INFO: DataFlowTrace: reading from '%s'\n", DirPath.c_str());
- Vector<SizedFile> Files;
+ std::vector<SizedFile> Files;
GetSizedFilesFromDir(DirPath, &Files);
std::string L;
size_t FocusFuncIdx = SIZE_MAX;
- Vector<std::string> FunctionNames;
+ std::vector<std::string> FunctionNames;
// Collect the hashes of the corpus files.
for (auto &SF : CorporaFiles)
@@ -191,7 +191,7 @@ bool DataFlowTrace::Init(const std::string &DirPath, std::string *FocusFunction,
// * chooses a random function according to the weights.
ReadCoverage(DirPath);
auto Weights = Coverage.FunctionWeights(NumFunctions);
- Vector<double> Intervals(NumFunctions + 1);
+ std::vector<double> Intervals(NumFunctions + 1);
std::iota(Intervals.begin(), Intervals.end(), 0);
auto Distribution = std::piecewise_constant_distribution<double>(
Intervals.begin(), Intervals.end(), Weights.begin());
@@ -247,7 +247,7 @@ bool DataFlowTrace::Init(const std::string &DirPath, std::string *FocusFunction,
}
int CollectDataFlow(const std::string &DFTBinary, const std::string &DirPath,
- const Vector<SizedFile> &CorporaFiles) {
+ const std::vector<SizedFile> &CorporaFiles) {
Printf("INFO: collecting data flow: bin: %s dir: %s files: %zd\n",
DFTBinary.c_str(), DirPath.c_str(), CorporaFiles.size());
if (CorporaFiles.empty()) {
@@ -265,7 +265,7 @@ int CollectDataFlow(const std::string &DFTBinary, const std::string &DirPath,
// we then request tags in [0,Size/2) and [Size/2, Size), and so on.
// Function number => DFT.
auto OutPath = DirPlusFile(DirPath, Hash(FileToVector(F.File)));
- std::unordered_map<size_t, Vector<uint8_t>> DFTMap;
+ std::unordered_map<size_t, std::vector<uint8_t>> DFTMap;
std::unordered_set<std::string> Cov;
Command Cmd;
Cmd.addArgument(DFTBinary);
diff --git a/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerDataFlowTrace.h b/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerDataFlowTrace.h
index 07c03bb25651..054dce1bdcb6 100644
--- a/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerDataFlowTrace.h
+++ b/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerDataFlowTrace.h
@@ -39,7 +39,7 @@
namespace fuzzer {
int CollectDataFlow(const std::string &DFTBinary, const std::string &DirPath,
- const Vector<SizedFile> &CorporaFiles);
+ const std::vector<SizedFile> &CorporaFiles);
class BlockCoverage {
public:
@@ -77,11 +77,11 @@ public:
return Result;
}
- Vector<double> FunctionWeights(size_t NumFunctions) const;
+ std::vector<double> FunctionWeights(size_t NumFunctions) const;
void clear() { Functions.clear(); }
private:
- typedef Vector<uint32_t> CoverageVector;
+ typedef std::vector<uint32_t> CoverageVector;
uint32_t NumberOfCoveredBlocks(const CoverageVector &Counters) const {
uint32_t Res = 0;
@@ -117,9 +117,9 @@ class DataFlowTrace {
public:
void ReadCoverage(const std::string &DirPath);
bool Init(const std::string &DirPath, std::string *FocusFunction,
- Vector<SizedFile> &CorporaFiles, Random &Rand);
+ std::vector<SizedFile> &CorporaFiles, Random &Rand);
void Clear() { Traces.clear(); }
- const Vector<uint8_t> *Get(const std::string &InputSha1) const {
+ const std::vector<uint8_t> *Get(const std::string &InputSha1) const {
auto It = Traces.find(InputSha1);
if (It != Traces.end())
return &It->second;
@@ -128,9 +128,9 @@ class DataFlowTrace {
private:
// Input's sha1 => DFT for the FocusFunction.
- std::unordered_map<std::string, Vector<uint8_t> > Traces;
- BlockCoverage Coverage;
- std::unordered_set<std::string> CorporaHashes;
+ std::unordered_map<std::string, std::vector<uint8_t>> Traces;
+ BlockCoverage Coverage;
+ std::unordered_set<std::string> CorporaHashes;
};
} // namespace fuzzer
diff --git a/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerDefs.h b/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerDefs.h
index 1a2752af2f4d..db1f74a545e3 100644
--- a/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerDefs.h
+++ b/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerDefs.h
@@ -38,28 +38,8 @@ struct ExternalFunctions;
// Global interface to functions that may or may not be available.
extern ExternalFunctions *EF;
-// We are using a custom allocator to give a different symbol name to STL
-// containers in order to avoid ODR violations.
-template<typename T>
- class fuzzer_allocator: public std::allocator<T> {
- public:
- fuzzer_allocator() = default;
-
- template<class U>
- fuzzer_allocator(const fuzzer_allocator<U>&) {}
-
- template<class Other>
- struct rebind { typedef fuzzer_allocator<Other> other; };
- };
-
-template<typename T>
-using Vector = std::vector<T, fuzzer_allocator<T>>;
-
-template<typename T>
-using Set = std::set<T, std::less<T>, fuzzer_allocator<T>>;
-
-typedef Vector<uint8_t> Unit;
-typedef Vector<Unit> UnitVector;
+typedef std::vector<uint8_t> Unit;
+typedef std::vector<Unit> UnitVector;
typedef int (*UserCallback)(const uint8_t *Data, size_t Size);
int FuzzerDriver(int *argc, char ***argv, UserCallback Callback);
diff --git a/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerDictionary.h b/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerDictionary.h
index db55907d9363..48f063c7ee4e 100644
--- a/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerDictionary.h
+++ b/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerDictionary.h
@@ -52,10 +52,13 @@ class DictionaryEntry {
public:
DictionaryEntry() {}
DictionaryEntry(Word W) : W(W) {}
- DictionaryEntry(Word W, size_t PositionHint) : W(W), PositionHint(PositionHint) {}
+ DictionaryEntry(Word W, size_t PositionHint)
+ : W(W), PositionHint(PositionHint) {}
const Word &GetW() const { return W; }
- bool HasPositionHint() const { return PositionHint != std::numeric_limits<size_t>::max(); }
+ bool HasPositionHint() const {
+ return PositionHint != std::numeric_limits<size_t>::max();
+ }
size_t GetPositionHint() const {
assert(HasPositionHint());
return PositionHint;
@@ -108,12 +111,12 @@ private:
};
// Parses one dictionary entry.
-// If successful, write the enty to Unit and returns true,
+// If successful, writes the entry to Unit and returns true,
// otherwise returns false.
bool ParseOneDictionaryEntry(const std::string &Str, Unit *U);
// Parses the dictionary file, fills Units, returns true iff all lines
// were parsed successfully.
-bool ParseDictionaryFile(const std::string &Text, Vector<Unit> *Units);
+bool ParseDictionaryFile(const std::string &Text, std::vector<Unit> *Units);
} // namespace fuzzer
diff --git a/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerDriver.cpp b/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerDriver.cpp
index ceaa9070512f..8674d788932f 100644
--- a/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerDriver.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerDriver.cpp
@@ -86,7 +86,7 @@ static const FlagDescription FlagDescriptions [] {
static const size_t kNumFlags =
sizeof(FlagDescriptions) / sizeof(FlagDescriptions[0]);
-static Vector<std::string> *Inputs;
+static std::vector<std::string> *Inputs;
static std::string *ProgName;
static void PrintHelp() {
@@ -187,7 +187,7 @@ static bool ParseOneFlag(const char *Param) {
}
// We don't use any library to minimize dependencies.
-static void ParseFlags(const Vector<std::string> &Args,
+static void ParseFlags(const std::vector<std::string> &Args,
const ExternalFunctions *EF) {
for (size_t F = 0; F < kNumFlags; F++) {
if (FlagDescriptions[F].IntFlag)
@@ -206,7 +206,7 @@ static void ParseFlags(const Vector<std::string> &Args,
"Disabling -len_control by default.\n", EF->LLVMFuzzerCustomMutator);
}
- Inputs = new Vector<std::string>;
+ Inputs = new std::vector<std::string>;
for (size_t A = 1; A < Args.size(); A++) {
if (ParseOneFlag(Args[A].c_str())) {
if (Flags.ignore_remaining_args)
@@ -272,7 +272,7 @@ static void ValidateDirectoryExists(const std::string &Path,
exit(1);
}
-std::string CloneArgsWithoutX(const Vector<std::string> &Args,
+std::string CloneArgsWithoutX(const std::vector<std::string> &Args,
const char *X1, const char *X2) {
std::string Cmd;
for (auto &S : Args) {
@@ -283,18 +283,22 @@ std::string CloneArgsWithoutX(const Vector<std::string> &Args,
return Cmd;
}
-static int RunInMultipleProcesses(const Vector<std::string> &Args,
+static int RunInMultipleProcesses(const std::vector<std::string> &Args,
unsigned NumWorkers, unsigned NumJobs) {
std::atomic<unsigned> Counter(0);
std::atomic<bool> HasErrors(false);
Command Cmd(Args);
Cmd.removeFlag("jobs");
Cmd.removeFlag("workers");
- Vector<std::thread> V;
+ std::vector<std::thread> V;
std::thread Pulse(PulseThread);
Pulse.detach();
- for (unsigned i = 0; i < NumWorkers; i++)
- V.push_back(std::thread(WorkerThread, std::ref(Cmd), &Counter, NumJobs, &HasErrors));
+ V.resize(NumWorkers);
+ for (unsigned i = 0; i < NumWorkers; i++) {
+ V[i] = std::thread(WorkerThread, std::ref(Cmd), &Counter, NumJobs,
+ &HasErrors);
+ SetThreadName(V[i], "FuzzerWorker");
+ }
for (auto &T : V)
T.join();
return HasErrors ? 1 : 0;
@@ -348,8 +352,8 @@ static std::string GetDedupTokenFromCmdOutput(const std::string &S) {
return S.substr(Beg, End - Beg);
}
-int CleanseCrashInput(const Vector<std::string> &Args,
- const FuzzingOptions &Options) {
+int CleanseCrashInput(const std::vector<std::string> &Args,
+ const FuzzingOptions &Options) {
if (Inputs->size() != 1 || !Flags.exact_artifact_path) {
Printf("ERROR: -cleanse_crash should be given one input file and"
" -exact_artifact_path\n");
@@ -372,7 +376,7 @@ int CleanseCrashInput(const Vector<std::string> &Args,
auto U = FileToVector(CurrentFilePath);
size_t Size = U.size();
- const Vector<uint8_t> ReplacementBytes = {' ', 0xff};
+ const std::vector<uint8_t> ReplacementBytes = {' ', 0xff};
for (int NumAttempts = 0; NumAttempts < 5; NumAttempts++) {
bool Changed = false;
for (size_t Idx = 0; Idx < Size; Idx++) {
@@ -403,7 +407,7 @@ int CleanseCrashInput(const Vector<std::string> &Args,
return 0;
}
-int MinimizeCrashInput(const Vector<std::string> &Args,
+int MinimizeCrashInput(const std::vector<std::string> &Args,
const FuzzingOptions &Options) {
if (Inputs->size() != 1) {
Printf("ERROR: -minimize_crash should be given one input file\n");
@@ -462,7 +466,7 @@ int MinimizeCrashInput(const Vector<std::string> &Args,
CurrentFilePath = Flags.exact_artifact_path;
WriteToFile(U, CurrentFilePath);
}
- Printf("CRASH_MIN: failed to minimize beyond %s (%d bytes), exiting\n",
+ Printf("CRASH_MIN: failed to minimize beyond %s (%zu bytes), exiting\n",
CurrentFilePath.c_str(), U.size());
break;
}
@@ -500,17 +504,17 @@ int MinimizeCrashInputInternalStep(Fuzzer *F, InputCorpus *Corpus) {
F->MinimizeCrashLoop(U);
Printf("INFO: Done MinimizeCrashInputInternalStep, no crashes found\n");
exit(0);
- return 0;
}
-void Merge(Fuzzer *F, FuzzingOptions &Options, const Vector<std::string> &Args,
- const Vector<std::string> &Corpora, const char *CFPathOrNull) {
+void Merge(Fuzzer *F, FuzzingOptions &Options,
+ const std::vector<std::string> &Args,
+ const std::vector<std::string> &Corpora, const char *CFPathOrNull) {
if (Corpora.size() < 2) {
Printf("INFO: Merge requires two or more corpus dirs\n");
exit(0);
}
- Vector<SizedFile> OldCorpus, NewCorpus;
+ std::vector<SizedFile> OldCorpus, NewCorpus;
GetSizedFilesFromDir(Corpora[0], &OldCorpus);
for (size_t i = 1; i < Corpora.size(); i++)
GetSizedFilesFromDir(Corpora[i], &NewCorpus);
@@ -518,10 +522,10 @@ void Merge(Fuzzer *F, FuzzingOptions &Options, const Vector<std::string> &Args,
std::sort(NewCorpus.begin(), NewCorpus.end());
std::string CFPath = CFPathOrNull ? CFPathOrNull : TempPath("Merge", ".txt");
- Vector<std::string> NewFiles;
- Set<uint32_t> NewFeatures, NewCov;
+ std::vector<std::string> NewFiles;
+ std::set<uint32_t> NewFeatures, NewCov;
CrashResistantMerge(Args, OldCorpus, NewCorpus, &NewFiles, {}, &NewFeatures,
- {}, &NewCov, CFPath, true);
+ {}, &NewCov, CFPath, true, Flags.set_cover_merge);
for (auto &Path : NewFiles)
F->WriteToOutputCorpus(FileToVector(Path, Options.MaxLen));
// We are done, delete the control file if it was a temporary one.
@@ -531,17 +535,17 @@ void Merge(Fuzzer *F, FuzzingOptions &Options, const Vector<std::string> &Args,
exit(0);
}
-int AnalyzeDictionary(Fuzzer *F, const Vector<Unit>& Dict,
- UnitVector& Corpus) {
- Printf("Started dictionary minimization (up to %d tests)\n",
+int AnalyzeDictionary(Fuzzer *F, const std::vector<Unit> &Dict,
+ UnitVector &Corpus) {
+ Printf("Started dictionary minimization (up to %zu tests)\n",
Dict.size() * Corpus.size() * 2);
// Scores and usage count for each dictionary unit.
- Vector<int> Scores(Dict.size());
- Vector<int> Usages(Dict.size());
+ std::vector<int> Scores(Dict.size());
+ std::vector<int> Usages(Dict.size());
- Vector<size_t> InitialFeatures;
- Vector<size_t> ModifiedFeatures;
+ std::vector<size_t> InitialFeatures;
+ std::vector<size_t> ModifiedFeatures;
for (auto &C : Corpus) {
// Get coverage for the testcase without modifications.
F->ExecuteCallback(C.data(), C.size());
@@ -551,7 +555,7 @@ int AnalyzeDictionary(Fuzzer *F, const Vector<Unit>& Dict,
});
for (size_t i = 0; i < Dict.size(); ++i) {
- Vector<uint8_t> Data = C;
+ std::vector<uint8_t> Data = C;
auto StartPos = std::search(Data.begin(), Data.end(),
Dict[i].begin(), Dict[i].end());
// Skip dictionary unit, if the testcase does not contain it.
@@ -597,9 +601,9 @@ int AnalyzeDictionary(Fuzzer *F, const Vector<Unit>& Dict,
return 0;
}
-Vector<std::string> ParseSeedInuts(const char *seed_inputs) {
+std::vector<std::string> ParseSeedInuts(const char *seed_inputs) {
// Parse -seed_inputs=file1,file2,... or -seed_inputs=@seed_inputs_file
- Vector<std::string> Files;
+ std::vector<std::string> Files;
if (!seed_inputs) return Files;
std::string SeedInputs;
if (Flags.seed_inputs[0] == '@')
@@ -620,9 +624,10 @@ Vector<std::string> ParseSeedInuts(const char *seed_inputs) {
return Files;
}
-static Vector<SizedFile> ReadCorpora(const Vector<std::string> &CorpusDirs,
- const Vector<std::string> &ExtraSeedFiles) {
- Vector<SizedFile> SizedFiles;
+static std::vector<SizedFile>
+ReadCorpora(const std::vector<std::string> &CorpusDirs,
+ const std::vector<std::string> &ExtraSeedFiles) {
+ std::vector<SizedFile> SizedFiles;
size_t LastNumFiles = 0;
for (auto &Dir : CorpusDirs) {
GetSizedFilesFromDir(Dir, &SizedFiles);
@@ -645,7 +650,7 @@ int FuzzerDriver(int *argc, char ***argv, UserCallback Callback) {
EF->LLVMFuzzerInitialize(argc, argv);
if (EF->__msan_scoped_disable_interceptor_checks)
EF->__msan_scoped_disable_interceptor_checks();
- const Vector<std::string> Args(*argv, *argv + *argc);
+ const std::vector<std::string> Args(*argv, *argv + *argc);
assert(!Args.empty());
ProgName = new std::string(Args[0]);
if (Argv0 != *ProgName) {
@@ -734,7 +739,7 @@ int FuzzerDriver(int *argc, char ***argv, UserCallback Callback) {
ValidateDirectoryExists(DirName(Options.ExactArtifactPath),
Flags.create_missing_dirs);
}
- Vector<Unit> Dictionary;
+ std::vector<Unit> Dictionary;
if (Flags.dict)
if (!ParseDictionaryFile(FileToString(Flags.dict), &Dictionary))
return 1;
@@ -776,7 +781,7 @@ int FuzzerDriver(int *argc, char ***argv, UserCallback Callback) {
if (!Options.FocusFunction.empty())
Options.Entropic = false; // FocusFunction overrides entropic scheduling.
if (Options.Entropic)
- Printf("INFO: Running with entropic power schedule (0x%X, %d).\n",
+ Printf("INFO: Running with entropic power schedule (0x%zX, %zu).\n",
Options.EntropicFeatureFrequencyThreshold,
Options.EntropicNumberOfRarestFeatures);
struct EntropicOptions Entropic;
@@ -794,7 +799,8 @@ int FuzzerDriver(int *argc, char ***argv, UserCallback Callback) {
if (Flags.verbosity)
Printf("INFO: Seed: %u\n", Seed);
- if (Flags.collect_data_flow && !Flags.fork && !Flags.merge) {
+ if (Flags.collect_data_flow && Flags.data_flow_trace && !Flags.fork &&
+ !(Flags.merge || Flags.set_cover_merge)) {
if (RunIndividualFiles)
return CollectDataFlow(Flags.collect_data_flow, Flags.data_flow_trace,
ReadCorpora({}, *Inputs));
@@ -856,7 +862,7 @@ int FuzzerDriver(int *argc, char ***argv, UserCallback Callback) {
RunOneTest(F, Path.c_str(), Options.MaxLen);
auto StopTime = system_clock::now();
auto MS = duration_cast<milliseconds>(StopTime - StartTime).count();
- Printf("Executed %s in %zd ms\n", Path.c_str(), (long)MS);
+ Printf("Executed %s in %ld ms\n", Path.c_str(), (long)MS);
}
Printf("***\n"
"*** NOTE: fuzzing was not performed, you have only\n"
@@ -866,10 +872,11 @@ int FuzzerDriver(int *argc, char ***argv, UserCallback Callback) {
exit(0);
}
+ Options.ForkCorpusGroups = Flags.fork_corpus_groups;
if (Flags.fork)
FuzzWithFork(F->GetMD().GetRand(), Options, Args, *Inputs, Flags.fork);
- if (Flags.merge)
+ if (Flags.merge || Flags.set_cover_merge)
Merge(F, Options, Args, *Inputs, Flags.merge_control_file);
if (Flags.merge_inner) {
@@ -877,7 +884,8 @@ int FuzzerDriver(int *argc, char ***argv, UserCallback Callback) {
if (Options.MaxLen == 0)
F->SetMaxInputLen(kDefaultMaxMergeLen);
assert(Flags.merge_control_file);
- F->CrashResistantMergeInternalStep(Flags.merge_control_file);
+ F->CrashResistantMergeInternalStep(Flags.merge_control_file,
+ !strncmp(Flags.merge_inner, "2", 1));
exit(0);
}
diff --git a/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerExtraCounters.cpp b/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerExtraCounters.cpp
index 04f569a1a879..54ecbf7c62f1 100644
--- a/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerExtraCounters.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerExtraCounters.cpp
@@ -31,12 +31,4 @@ void ClearExtraCounters() { // hand-written memset, don't asan-ify.
} // namespace fuzzer
-#else
-// TODO: implement for other platforms.
-namespace fuzzer {
-uint8_t *ExtraCountersBegin() { return nullptr; }
-uint8_t *ExtraCountersEnd() { return nullptr; }
-void ClearExtraCounters() {}
-} // namespace fuzzer
-
#endif
diff --git a/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerExtraCountersDarwin.cpp b/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerExtraCountersDarwin.cpp
new file mode 100644
index 000000000000..2321ba8a3d40
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerExtraCountersDarwin.cpp
@@ -0,0 +1,22 @@
+//===- FuzzerExtraCountersDarwin.cpp - Extra coverage counters for Darwin -===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+// Extra coverage counters defined by user code for Darwin.
+//===----------------------------------------------------------------------===//
+
+#include "FuzzerPlatform.h"
+#include <cstdint>
+
+#if LIBFUZZER_APPLE
+
+namespace fuzzer {
+uint8_t *ExtraCountersBegin() { return nullptr; }
+uint8_t *ExtraCountersEnd() { return nullptr; }
+void ClearExtraCounters() {}
+} // namespace fuzzer
+
+#endif
diff --git a/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerExtraCountersWindows.cpp b/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerExtraCountersWindows.cpp
new file mode 100644
index 000000000000..102f5febdaec
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerExtraCountersWindows.cpp
@@ -0,0 +1,80 @@
+//===- FuzzerExtraCountersWindows.cpp - Extra coverage counters for Win32 -===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+// Extra coverage counters defined by user code for Windows.
+//===----------------------------------------------------------------------===//
+
+#include "FuzzerPlatform.h"
+#include <cstdint>
+
+#if LIBFUZZER_WINDOWS
+#include <windows.h>
+
+namespace fuzzer {
+
+//
+// The __start___libfuzzer_extra_counters variable is align 16, size 16 to
+// ensure the padding between it and the next variable in this section (either
+// __libfuzzer_extra_counters or __stop___libfuzzer_extra_counters) will be
+// located at (__start___libfuzzer_extra_counters +
+// sizeof(__start___libfuzzer_extra_counters)). Otherwise, the calculation of
+// (stop - (start + sizeof(start))) might be skewed.
+//
+// The section name, __libfuzzer_extra_countaaa ends with "aaa", so it sorts
+// before __libfuzzer_extra_counters alphabetically. We want the start symbol to
+// be placed in the section just before the user supplied counters (if present).
+//
+#pragma section(".data$__libfuzzer_extra_countaaa")
+ATTRIBUTE_ALIGNED(16)
+__declspec(allocate(".data$__libfuzzer_extra_countaaa")) uint8_t
+ __start___libfuzzer_extra_counters[16] = {0};
+
+//
+// Example of what the user-supplied counters should look like. First, the
+// pragma to create the section name. It will fall alphabetically between
+// ".data$__libfuzzer_extra_countaaa" and ".data$__libfuzzer_extra_countzzz".
+// Next, the declspec to allocate the variable inside the specified section.
+// Finally, some array, struct, whatever that is used to track the counter data.
+// The size of this variable is computed at runtime by finding the difference of
+// __stop___libfuzzer_extra_counters and __start___libfuzzer_extra_counters +
+// sizeof(__start___libfuzzer_extra_counters).
+//
+
+//
+// #pragma section(".data$__libfuzzer_extra_counters")
+// __declspec(allocate(".data$__libfuzzer_extra_counters"))
+// uint8_t any_name_variable[64 * 1024];
+//
+
+//
+// Here, the section name, __libfuzzer_extra_countzzz ends with "zzz", so it
+// sorts after __libfuzzer_extra_counters alphabetically. We want the stop
+// symbol to be placed in the section just after the user supplied counters (if
+// present). Align to 1 so there isn't any padding placed between this and the
+// previous variable.
+//
+#pragma section(".data$__libfuzzer_extra_countzzz")
+ATTRIBUTE_ALIGNED(1)
+__declspec(allocate(".data$__libfuzzer_extra_countzzz")) uint8_t
+ __stop___libfuzzer_extra_counters = 0;
+
+uint8_t *ExtraCountersBegin() {
+ return __start___libfuzzer_extra_counters +
+ sizeof(__start___libfuzzer_extra_counters);
+}
+
+uint8_t *ExtraCountersEnd() { return &__stop___libfuzzer_extra_counters; }
+
+ATTRIBUTE_NO_SANITIZE_ALL
+void ClearExtraCounters() {
+ uint8_t *Beg = ExtraCountersBegin();
+ SecureZeroMemory(Beg, ExtraCountersEnd() - Beg);
+}
+
+} // namespace fuzzer
+
+#endif
diff --git a/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerFlags.def b/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerFlags.def
index ab31da0ae5d6..fc3b3aa8c98a 100644
--- a/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerFlags.def
+++ b/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerFlags.def
@@ -58,12 +58,21 @@ FUZZER_FLAG_INT(max_total_time, 0, "If positive, indicates the maximal total "
FUZZER_FLAG_INT(help, 0, "Print help.")
FUZZER_FLAG_INT(fork, 0, "Experimental mode where fuzzing happens "
"in a subprocess")
+FUZZER_FLAG_INT(fork_corpus_groups, 0, "For fork mode, enable the corpus-group "
+ "strategy, The main corpus will be grouped according to size, "
+ "and each sub-process will randomly select seeds from different "
+ "groups as the sub-corpus.")
FUZZER_FLAG_INT(ignore_timeouts, 1, "Ignore timeouts in fork mode")
FUZZER_FLAG_INT(ignore_ooms, 1, "Ignore OOMs in fork mode")
FUZZER_FLAG_INT(ignore_crashes, 0, "Ignore crashes in fork mode")
FUZZER_FLAG_INT(merge, 0, "If 1, the 2-nd, 3-rd, etc corpora will be "
"merged into the 1-st corpus. Only interesting units will be taken. "
"This flag can be used to minimize a corpus.")
+FUZZER_FLAG_INT(set_cover_merge, 0, "If 1, the 2-nd, 3-rd, etc corpora will be "
+ "merged into the 1-st corpus. Same as the 'merge' flag, but uses the "
+ "standard greedy algorithm for the set cover problem to "
+ "compute an approximation of the minimum set of testcases that "
+ "provide the same coverage as the initial corpora")
FUZZER_FLAG_STRING(stop_file, "Stop fuzzing ASAP if this file exists")
FUZZER_FLAG_STRING(merge_inner, "internal flag")
FUZZER_FLAG_STRING(merge_control_file,
@@ -158,7 +167,7 @@ FUZZER_FLAG_INT(purge_allocator_interval, 1, "Purge allocator caches and "
"purge_allocator_interval=-1 to disable this functionality.")
FUZZER_FLAG_INT(trace_malloc, 0, "If >= 1 will print all mallocs/frees. "
"If >= 2 will also print stack traces.")
-FUZZER_FLAG_INT(rss_limit_mb, 2048, "If non-zero, the fuzzer will exit upon"
+FUZZER_FLAG_INT(rss_limit_mb, 2048, "If non-zero, the fuzzer will exit upon "
"reaching this limit of RSS memory usage.")
FUZZER_FLAG_INT(malloc_limit_mb, 0, "If non-zero, the fuzzer will exit "
"if the target tries to allocate this number of Mb with one malloc call. "
diff --git a/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerFork.cpp b/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerFork.cpp
index 5134a5d979e6..c248a1d246a3 100644
--- a/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerFork.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerFork.cpp
@@ -86,18 +86,21 @@ struct FuzzJob {
};
struct GlobalEnv {
- Vector<std::string> Args;
- Vector<std::string> CorpusDirs;
+ std::vector<std::string> Args;
+ std::vector<std::string> CorpusDirs;
std::string MainCorpusDir;
std::string TempDir;
std::string DFTDir;
std::string DataFlowBinary;
- Set<uint32_t> Features, Cov;
- Set<std::string> FilesWithDFT;
- Vector<std::string> Files;
+ std::set<uint32_t> Features, Cov;
+ std::set<std::string> FilesWithDFT;
+ std::vector<std::string> Files;
+ std::vector<std::size_t> FilesSizes;
Random *Rand;
std::chrono::system_clock::time_point ProcessStartTime;
int Verbosity = 0;
+ int Group = 0;
+ int NumCorpuses = 8;
size_t NumTimeouts = 0;
size_t NumOOMs = 0;
@@ -136,10 +139,24 @@ struct GlobalEnv {
if (size_t CorpusSubsetSize =
std::min(Files.size(), (size_t)sqrt(Files.size() + 2))) {
auto Time1 = std::chrono::system_clock::now();
- for (size_t i = 0; i < CorpusSubsetSize; i++) {
- auto &SF = Files[Rand->SkewTowardsLast(Files.size())];
- Seeds += (Seeds.empty() ? "" : ",") + SF;
- CollectDFT(SF);
+ if (Group) { // whether to group the corpus.
+ size_t AverageCorpusSize = Files.size() / NumCorpuses + 1;
+ size_t StartIndex = ((JobId - 1) % NumCorpuses) * AverageCorpusSize;
+ for (size_t i = 0; i < CorpusSubsetSize; i++) {
+ size_t RandNum = (*Rand)(AverageCorpusSize);
+ size_t Index = RandNum + StartIndex;
+ Index = Index < Files.size() ? Index
+ : Rand->SkewTowardsLast(Files.size());
+ auto &SF = Files[Index];
+ Seeds += (Seeds.empty() ? "" : ",") + SF;
+ CollectDFT(SF);
+ }
+ } else {
+ for (size_t i = 0; i < CorpusSubsetSize; i++) {
+ auto &SF = Files[Rand->SkewTowardsLast(Files.size())];
+ Seeds += (Seeds.empty() ? "" : ",") + SF;
+ CollectDFT(SF);
+ }
}
auto Time2 = std::chrono::system_clock::now();
auto DftTimeInSeconds = duration_cast<seconds>(Time2 - Time1).count();
@@ -183,7 +200,7 @@ struct GlobalEnv {
auto Stats = ParseFinalStatsFromLog(Job->LogPath);
NumRuns += Stats.number_of_executed_units;
- Vector<SizedFile> TempFiles, MergeCandidates;
+ std::vector<SizedFile> TempFiles, MergeCandidates;
// Read all newly created inputs and their feature sets.
// Choose only those inputs that have new features.
GetSizedFilesFromDir(Job->CorpusDir, &TempFiles);
@@ -193,7 +210,7 @@ struct GlobalEnv {
FeatureFile.replace(0, Job->CorpusDir.size(), Job->FeaturesDir);
auto FeatureBytes = FileToVector(FeatureFile, 0, false);
assert((FeatureBytes.size() % sizeof(uint32_t)) == 0);
- Vector<uint32_t> NewFeatures(FeatureBytes.size() / sizeof(uint32_t));
+ std::vector<uint32_t> NewFeatures(FeatureBytes.size() / sizeof(uint32_t));
memcpy(NewFeatures.data(), FeatureBytes.data(), FeatureBytes.size());
for (auto Ft : NewFeatures) {
if (!Features.count(Ft)) {
@@ -203,7 +220,7 @@ struct GlobalEnv {
}
}
// if (!FilesToAdd.empty() || Job->ExitCode != 0)
- Printf("#%zd: cov: %zd ft: %zd corp: %zd exec/s %zd "
+ Printf("#%zd: cov: %zd ft: %zd corp: %zd exec/s: %zd "
"oom/timeout/crash: %zd/%zd/%zd time: %zds job: %zd dft_time: %d\n",
NumRuns, Cov.size(), Features.size(), Files.size(),
Stats.average_exec_per_sec, NumOOMs, NumTimeouts, NumCrashes,
@@ -211,15 +228,27 @@ struct GlobalEnv {
if (MergeCandidates.empty()) return;
- Vector<std::string> FilesToAdd;
- Set<uint32_t> NewFeatures, NewCov;
+ std::vector<std::string> FilesToAdd;
+ std::set<uint32_t> NewFeatures, NewCov;
+ bool IsSetCoverMerge =
+ !Job->Cmd.getFlagValue("set_cover_merge").compare("1");
CrashResistantMerge(Args, {}, MergeCandidates, &FilesToAdd, Features,
- &NewFeatures, Cov, &NewCov, Job->CFPath, false);
+ &NewFeatures, Cov, &NewCov, Job->CFPath, false,
+ IsSetCoverMerge);
for (auto &Path : FilesToAdd) {
auto U = FileToVector(Path);
auto NewPath = DirPlusFile(MainCorpusDir, Hash(U));
WriteToFile(U, NewPath);
- Files.push_back(NewPath);
+ if (Group) { // Insert the queue according to the size of the seed.
+ size_t UnitSize = U.size();
+ auto Idx =
+ std::upper_bound(FilesSizes.begin(), FilesSizes.end(), UnitSize) -
+ FilesSizes.begin();
+ FilesSizes.insert(FilesSizes.begin() + Idx, UnitSize);
+ Files.insert(Files.begin() + Idx, NewPath);
+ } else {
+ Files.push_back(NewPath);
+ }
}
Features.insert(NewFeatures.begin(), NewFeatures.end());
Cov.insert(NewCov.begin(), NewCov.end());
@@ -228,10 +257,8 @@ struct GlobalEnv {
if (TPC.PcIsFuncEntry(TE))
PrintPC(" NEW_FUNC: %p %F %L\n", "",
TPC.GetNextInstructionPc(TE->PC));
-
}
-
void CollectDFT(const std::string &InputPath) {
if (DataFlowBinary.empty()) return;
if (!FilesWithDFT.insert(InputPath).second) return;
@@ -283,8 +310,8 @@ void WorkerThread(JobQueue *FuzzQ, JobQueue *MergeQ) {
// This is just a skeleton of an experimental -fork=1 feature.
void FuzzWithFork(Random &Rand, const FuzzingOptions &Options,
- const Vector<std::string> &Args,
- const Vector<std::string> &CorpusDirs, int NumJobs) {
+ const std::vector<std::string> &Args,
+ const std::vector<std::string> &CorpusDirs, int NumJobs) {
Printf("INFO: -fork=%d: fuzzing in separate process(s)\n", NumJobs);
GlobalEnv Env;
@@ -294,8 +321,9 @@ void FuzzWithFork(Random &Rand, const FuzzingOptions &Options,
Env.Verbosity = Options.Verbosity;
Env.ProcessStartTime = std::chrono::system_clock::now();
Env.DataFlowBinary = Options.CollectDataFlow;
+ Env.Group = Options.ForkCorpusGroups;
- Vector<SizedFile> SeedFiles;
+ std::vector<SizedFile> SeedFiles;
for (auto &Dir : CorpusDirs)
GetSizedFilesFromDir(Dir, &SeedFiles);
std::sort(SeedFiles.begin(), SeedFiles.end());
@@ -316,13 +344,20 @@ void FuzzWithFork(Random &Rand, const FuzzingOptions &Options,
Env.Files.push_back(File.File);
} else {
auto CFPath = DirPlusFile(Env.TempDir, "merge.txt");
- Set<uint32_t> NewFeatures, NewCov;
+ std::set<uint32_t> NewFeatures, NewCov;
CrashResistantMerge(Env.Args, {}, SeedFiles, &Env.Files, Env.Features,
- &NewFeatures, Env.Cov, &NewCov, CFPath, false);
+ &NewFeatures, Env.Cov, &NewCov, CFPath,
+ /*Verbose=*/false, /*IsSetCoverMerge=*/false);
Env.Features.insert(NewFeatures.begin(), NewFeatures.end());
Env.Cov.insert(NewFeatures.begin(), NewFeatures.end());
RemoveFile(CFPath);
}
+
+ if (Env.Group) {
+ for (auto &path : Env.Files)
+ Env.FilesSizes.push_back(FileSize(path));
+ }
+
Printf("INFO: -fork=%d: %zd seed inputs, starting to fuzz in %s\n", NumJobs,
Env.Files.size(), Env.TempDir.c_str());
@@ -337,8 +372,10 @@ void FuzzWithFork(Random &Rand, const FuzzingOptions &Options,
WriteToFile(Unit({1}), Env.StopFile());
};
+ size_t MergeCycle = 20;
+ size_t JobExecuted = 0;
size_t JobId = 1;
- Vector<std::thread> Threads;
+ std::vector<std::thread> Threads;
for (int t = 0; t < NumJobs; t++) {
Threads.push_back(std::thread(WorkerThread, &FuzzQ, &MergeQ));
FuzzQ.Push(Env.CreateNewJob(JobId++));
@@ -358,7 +395,46 @@ void FuzzWithFork(Random &Rand, const FuzzingOptions &Options,
Env.RunOneMergeJob(Job.get());
- // Continue if our crash is one of the ignorred ones.
+ // merge the corpus .
+ JobExecuted++;
+ if (Env.Group && JobExecuted >= MergeCycle) {
+ std::vector<SizedFile> CurrentSeedFiles;
+ for (auto &Dir : CorpusDirs)
+ GetSizedFilesFromDir(Dir, &CurrentSeedFiles);
+ std::sort(CurrentSeedFiles.begin(), CurrentSeedFiles.end());
+
+ auto CFPath = DirPlusFile(Env.TempDir, "merge.txt");
+ std::set<uint32_t> TmpNewFeatures, TmpNewCov;
+ std::set<uint32_t> TmpFeatures, TmpCov;
+ Env.Files.clear();
+ Env.FilesSizes.clear();
+ CrashResistantMerge(Env.Args, {}, CurrentSeedFiles, &Env.Files,
+ TmpFeatures, &TmpNewFeatures, TmpCov, &TmpNewCov,
+ CFPath, /*Verbose=*/false, /*IsSetCoverMerge=*/false);
+ for (auto &path : Env.Files)
+ Env.FilesSizes.push_back(FileSize(path));
+ RemoveFile(CFPath);
+ JobExecuted = 0;
+ MergeCycle += 5;
+ }
+
+ // Since the number of corpus seeds will gradually increase, in order to
+ // control the number in each group to be about three times the number of
+ // seeds selected each time, the number of groups is dynamically adjusted.
+ if (Env.Files.size() < 2000)
+ Env.NumCorpuses = 12;
+ else if (Env.Files.size() < 6000)
+ Env.NumCorpuses = 20;
+ else if (Env.Files.size() < 12000)
+ Env.NumCorpuses = 32;
+ else if (Env.Files.size() < 16000)
+ Env.NumCorpuses = 40;
+ else if (Env.Files.size() < 24000)
+ Env.NumCorpuses = 60;
+ else
+ Env.NumCorpuses = 80;
+
+ // Continue if our crash is one of the ignored ones.
if (Options.IgnoreTimeouts && ExitCode == Options.TimeoutExitCode)
Env.NumTimeouts++;
else if (Options.IgnoreOOMs && ExitCode == Options.OOMExitCode)
diff --git a/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerFork.h b/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerFork.h
index b29a43e13fbc..fc3e9d636cbc 100644
--- a/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerFork.h
+++ b/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerFork.h
@@ -17,8 +17,8 @@
namespace fuzzer {
void FuzzWithFork(Random &Rand, const FuzzingOptions &Options,
- const Vector<std::string> &Args,
- const Vector<std::string> &CorpusDirs, int NumJobs);
+ const std::vector<std::string> &Args,
+ const std::vector<std::string> &CorpusDirs, int NumJobs);
} // namespace fuzzer
#endif // LLVM_FUZZER_FORK_H
diff --git a/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerIO.cpp b/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerIO.cpp
index 7f149ac6c485..54cc4ee54be0 100644
--- a/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerIO.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerIO.cpp
@@ -23,6 +23,14 @@ namespace fuzzer {
static FILE *OutputFile = stderr;
+FILE *GetOutputFile() {
+ return OutputFile;
+}
+
+void SetOutputFile(FILE *NewOutputFile) {
+ OutputFile = NewOutputFile;
+}
+
long GetEpoch(const std::string &Path) {
struct stat St;
if (stat(Path.c_str(), &St))
@@ -57,7 +65,7 @@ std::string FileToString(const std::string &Path) {
}
void CopyFileToErr(const std::string &Path) {
- Printf("%s", FileToString(Path).c_str());
+ Puts(FileToString(Path).c_str());
}
void WriteToFile(const Unit &U, const std::string &Path) {
@@ -90,11 +98,11 @@ void AppendToFile(const uint8_t *Data, size_t Size, const std::string &Path) {
fclose(Out);
}
-void ReadDirToVectorOfUnits(const char *Path, Vector<Unit> *V, long *Epoch,
+void ReadDirToVectorOfUnits(const char *Path, std::vector<Unit> *V, long *Epoch,
size_t MaxSize, bool ExitOnError,
- Vector<std::string> *VPaths) {
+ std::vector<std::string> *VPaths) {
long E = Epoch ? *Epoch : 0;
- Vector<std::string> Files;
+ std::vector<std::string> Files;
ListFilesInDirRecursive(Path, Epoch, &Files, /*TopDir*/true);
size_t NumLoaded = 0;
for (size_t i = 0; i < Files.size(); i++) {
@@ -112,8 +120,8 @@ void ReadDirToVectorOfUnits(const char *Path, Vector<Unit> *V, long *Epoch,
}
}
-void GetSizedFilesFromDir(const std::string &Dir, Vector<SizedFile> *V) {
- Vector<std::string> Files;
+void GetSizedFilesFromDir(const std::string &Dir, std::vector<SizedFile> *V) {
+ std::vector<std::string> Files;
ListFilesInDirRecursive(Dir, 0, &Files, /*TopDir*/true);
for (auto &File : Files)
if (size_t Size = FileSize(File))
@@ -143,6 +151,11 @@ void CloseStdout() {
DiscardOutput(1);
}
+void Puts(const char *Str) {
+ fputs(Str, OutputFile);
+ fflush(OutputFile);
+}
+
void Printf(const char *Fmt, ...) {
va_list ap;
va_start(ap, Fmt);
diff --git a/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerIO.h b/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerIO.h
index bde18267ea36..874caad1baed 100644
--- a/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerIO.h
+++ b/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerIO.h
@@ -32,9 +32,9 @@ void WriteToFile(const Unit &U, const std::string &Path);
void AppendToFile(const uint8_t *Data, size_t Size, const std::string &Path);
void AppendToFile(const std::string &Data, const std::string &Path);
-void ReadDirToVectorOfUnits(const char *Path, Vector<Unit> *V, long *Epoch,
+void ReadDirToVectorOfUnits(const char *Path, std::vector<Unit> *V, long *Epoch,
size_t MaxSize, bool ExitOnError,
- Vector<std::string> *VPaths = 0);
+ std::vector<std::string> *VPaths = 0);
// Returns "Dir/FileName" or equivalent for the current OS.
std::string DirPlusFile(const std::string &DirPath,
@@ -54,6 +54,11 @@ void DupAndCloseStderr();
void CloseStdout();
+// For testing.
+FILE *GetOutputFile();
+void SetOutputFile(FILE *NewOutputFile);
+
+void Puts(const char *Str);
void Printf(const char *Fmt, ...);
void VPrintf(bool Verbose, const char *Fmt, ...);
@@ -66,7 +71,7 @@ bool IsDirectory(const std::string &Path);
size_t FileSize(const std::string &Path);
void ListFilesInDirRecursive(const std::string &Dir, long *Epoch,
- Vector<std::string> *V, bool TopDir);
+ std::vector<std::string> *V, bool TopDir);
bool MkDirRecursive(const std::string &Dir);
void RmDirRecursive(const std::string &Dir);
@@ -85,7 +90,7 @@ struct SizedFile {
bool operator<(const SizedFile &B) const { return Size < B.Size; }
};
-void GetSizedFilesFromDir(const std::string &Dir, Vector<SizedFile> *V);
+void GetSizedFilesFromDir(const std::string &Dir, std::vector<SizedFile> *V);
char GetSeparator();
bool IsSeparator(char C);
diff --git a/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerIOPosix.cpp b/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerIOPosix.cpp
index 4706a40959be..3700fb098e55 100644
--- a/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerIOPosix.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerIOPosix.cpp
@@ -53,7 +53,7 @@ std::string Basename(const std::string &Path) {
}
void ListFilesInDirRecursive(const std::string &Dir, long *Epoch,
- Vector<std::string> *V, bool TopDir) {
+ std::vector<std::string> *V, bool TopDir) {
auto E = GetEpoch(Dir);
if (Epoch)
if (E && *Epoch >= E) return;
@@ -78,7 +78,6 @@ void ListFilesInDirRecursive(const std::string &Dir, long *Epoch,
*Epoch = E;
}
-
void IterateDirRecursive(const std::string &Dir,
void (*DirPreCallback)(const std::string &Dir),
void (*DirPostCallback)(const std::string &Dir),
diff --git a/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerIOWindows.cpp b/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerIOWindows.cpp
index 61ad35e281f5..6771fc173c91 100644
--- a/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerIOWindows.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerIOWindows.cpp
@@ -111,7 +111,7 @@ size_t FileSize(const std::string &Path) {
}
void ListFilesInDirRecursive(const std::string &Dir, long *Epoch,
- Vector<std::string> *V, bool TopDir) {
+ std::vector<std::string> *V, bool TopDir) {
auto E = GetEpoch(Dir);
if (Epoch)
if (E && *Epoch >= E) return;
@@ -159,7 +159,6 @@ void ListFilesInDirRecursive(const std::string &Dir, long *Epoch,
*Epoch = E;
}
-
void IterateDirRecursive(const std::string &Dir,
void (*DirPreCallback)(const std::string &Dir),
void (*DirPostCallback)(const std::string &Dir),
@@ -297,9 +296,8 @@ static size_t ParseServerAndShare(const std::string &FileName,
return Pos - Offset;
}
-// Parse the given Ref string from the position Offset, to exactly match the given
-// string Patt.
-// Returns number of characters considered if successful.
+// Parse the given Ref string from the position Offset, to exactly match the
+// given string Patt. Returns number of characters considered if successful.
static size_t ParseCustomString(const std::string &Ref, size_t Offset,
const char *Patt) {
size_t Len = strlen(Patt);
diff --git a/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerInternal.h b/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerInternal.h
index 37c8a01dc3c6..88504705137a 100644
--- a/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerInternal.h
+++ b/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerInternal.h
@@ -29,14 +29,13 @@ namespace fuzzer {
using namespace std::chrono;
-class Fuzzer {
+class Fuzzer final {
public:
-
Fuzzer(UserCallback CB, InputCorpus &Corpus, MutationDispatcher &MD,
- FuzzingOptions Options);
- ~Fuzzer();
- void Loop(Vector<SizedFile> &CorporaFiles);
- void ReadAndExecuteSeedCorpora(Vector<SizedFile> &CorporaFiles);
+ const FuzzingOptions &Options);
+ ~Fuzzer() = delete;
+ void Loop(std::vector<SizedFile> &CorporaFiles);
+ void ReadAndExecuteSeedCorpora(std::vector<SizedFile> &CorporaFiles);
void MinimizeCrashLoop(const Unit &U);
void RereadOutputCorpus(size_t MaxSize);
@@ -65,15 +64,19 @@ public:
static void StaticFileSizeExceedCallback();
static void StaticGracefulExitCallback();
- void ExecuteCallback(const uint8_t *Data, size_t Size);
+ // Executes the target callback on {Data, Size} once.
+ // Returns false if the input was rejected by the target (target returned -1),
+ // and true otherwise.
+ bool ExecuteCallback(const uint8_t *Data, size_t Size);
bool RunOne(const uint8_t *Data, size_t Size, bool MayDeleteFile = false,
InputInfo *II = nullptr, bool ForceAddToCorpus = false,
bool *FoundUniqFeatures = nullptr);
void TPCUpdateObservedPCs();
// Merge Corpora[1:] into Corpora[0].
- void Merge(const Vector<std::string> &Corpora);
- void CrashResistantMergeInternalStep(const std::string &ControlFilePath);
+ void Merge(const std::vector<std::string> &Corpora);
+ void CrashResistantMergeInternalStep(const std::string &ControlFilePath,
+ bool IsSetCoverMerge);
MutationDispatcher &GetMD() { return MD; }
void PrintFinalStats();
void SetMaxInputLen(size_t MaxInputLen);
@@ -87,6 +90,7 @@ public:
void HandleMalloc(size_t Size);
static void MaybeExitGracefully();
+ static int InterruptExitCode();
std::string WriteToOutputCorpus(const Unit &U);
private:
@@ -141,7 +145,7 @@ private:
size_t MaxMutationLen = 0;
size_t TmpMaxMutationLen = 0;
- Vector<uint32_t> UniqFeatureSetTmp;
+ std::vector<uint32_t> UniqFeatureSetTmp;
// Need to know our own thread.
static thread_local bool IsMyThread;
diff --git a/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerLoop.cpp b/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerLoop.cpp
index 86a78ab75174..935dd2342e18 100644
--- a/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerLoop.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerLoop.cpp
@@ -136,7 +136,7 @@ void Fuzzer::HandleMalloc(size_t Size) {
}
Fuzzer::Fuzzer(UserCallback CB, InputCorpus &Corpus, MutationDispatcher &MD,
- FuzzingOptions Options)
+ const FuzzingOptions &Options)
: CB(CB), Corpus(Corpus), MD(MD), Options(Options) {
if (EF->__sanitizer_set_death_callback)
EF->__sanitizer_set_death_callback(StaticDeathCallback);
@@ -160,8 +160,6 @@ Fuzzer::Fuzzer(UserCallback CB, InputCorpus &Corpus, MutationDispatcher &MD,
memset(BaseSha1, 0, sizeof(BaseSha1));
}
-Fuzzer::~Fuzzer() {}
-
void Fuzzer::AllocateCurrentUnitData() {
if (CurrentUnitData || MaxInputLen == 0)
return;
@@ -262,6 +260,11 @@ void Fuzzer::MaybeExitGracefully() {
_Exit(0);
}
+int Fuzzer::InterruptExitCode() {
+ assert(F);
+ return F->Options.InterruptExitCode;
+}
+
void Fuzzer::InterruptCallback() {
Printf("==%lu== libFuzzer: run interrupted; exiting\n", GetPid());
PrintFinalStats();
@@ -296,7 +299,7 @@ void Fuzzer::AlarmCallback() {
Printf(" and the timeout value is %d (use -timeout=N to change)\n",
Options.UnitTimeoutSec);
DumpCurrentUnit("timeout-");
- Printf("==%lu== ERROR: libFuzzer: timeout after %d seconds\n", GetPid(),
+ Printf("==%lu== ERROR: libFuzzer: timeout after %zu seconds\n", GetPid(),
Seconds);
PrintStackTrace();
Printf("SUMMARY: libFuzzer: timeout\n");
@@ -309,9 +312,8 @@ void Fuzzer::RssLimitCallback() {
if (EF->__sanitizer_acquire_crash_state &&
!EF->__sanitizer_acquire_crash_state())
return;
- Printf(
- "==%lu== ERROR: libFuzzer: out-of-memory (used: %zdMb; limit: %zdMb)\n",
- GetPid(), GetPeakRSSMb(), Options.RssLimitMb);
+ Printf("==%lu== ERROR: libFuzzer: out-of-memory (used: %zdMb; limit: %dMb)\n",
+ GetPid(), GetPeakRSSMb(), Options.RssLimitMb);
Printf(" To change the out-of-memory limit use -rss_limit_mb=<N>\n\n");
PrintMemoryProfile();
DumpCurrentUnit("oom-");
@@ -366,7 +368,7 @@ void Fuzzer::PrintFinalStats() {
Printf("stat::number_of_executed_units: %zd\n", TotalNumberOfRuns);
Printf("stat::average_exec_per_sec: %zd\n", ExecPerSec);
Printf("stat::new_units_added: %zd\n", NumberOfNewUnitsAdded);
- Printf("stat::slowest_unit_time_sec: %zd\n", TimeOfLongestUnitInSeconds);
+ Printf("stat::slowest_unit_time_sec: %ld\n", TimeOfLongestUnitInSeconds);
Printf("stat::peak_rss_mb: %zd\n", GetPeakRSSMb());
}
@@ -388,7 +390,7 @@ void Fuzzer::SetMaxMutationLen(size_t MaxMutationLen) {
void Fuzzer::CheckExitOnSrcPosOrItem() {
if (!Options.ExitOnSrcPos.empty()) {
- static auto *PCsSet = new Set<uintptr_t>;
+ static auto *PCsSet = new std::set<uintptr_t>;
auto HandlePC = [&](const TracePC::PCTableEntry *TE) {
if (!PCsSet->insert(TE->PC).second)
return;
@@ -413,8 +415,8 @@ void Fuzzer::CheckExitOnSrcPosOrItem() {
void Fuzzer::RereadOutputCorpus(size_t MaxSize) {
if (Options.OutputCorpus.empty() || !Options.ReloadIntervalSec)
return;
- Vector<Unit> AdditionalCorpus;
- Vector<std::string> AdditionalCorpusPaths;
+ std::vector<Unit> AdditionalCorpus;
+ std::vector<std::string> AdditionalCorpusPaths;
ReadDirToVectorOfUnits(
Options.OutputCorpus.c_str(), &AdditionalCorpus,
&EpochOfLastReadOfOutputCorpus, MaxSize,
@@ -450,14 +452,14 @@ void Fuzzer::PrintPulseAndReportSlowInput(const uint8_t *Data, size_t Size) {
static_cast<long>(static_cast<double>(TimeOfLongestUnitInSeconds) * 1.1);
if (TimeOfUnit > Threshhold && TimeOfUnit >= Options.ReportSlowUnits) {
TimeOfLongestUnitInSeconds = TimeOfUnit;
- Printf("Slowest unit: %zd s:\n", TimeOfLongestUnitInSeconds);
+ Printf("Slowest unit: %ld s:\n", TimeOfLongestUnitInSeconds);
WriteUnitToFileWithPrefix({Data, Data + Size}, "slow-unit-");
}
}
static void WriteFeatureSetToFile(const std::string &FeaturesDir,
const std::string &FileName,
- const Vector<uint32_t> &FeatureSet) {
+ const std::vector<uint32_t> &FeatureSet) {
if (FeaturesDir.empty() || FeatureSet.empty()) return;
WriteToFile(reinterpret_cast<const uint8_t *>(FeatureSet.data()),
FeatureSet.size() * sizeof(FeatureSet[0]),
@@ -511,7 +513,7 @@ bool Fuzzer::RunOne(const uint8_t *Data, size_t Size, bool MayDeleteFile,
// Largest input length should be INT_MAX.
assert(Size < std::numeric_limits<uint32_t>::max());
- ExecuteCallback(Data, Size);
+ if(!ExecuteCallback(Data, Size)) return false;
auto TimeOfUnit = duration_cast<microseconds>(UnitStopTime - UnitStartTime);
UniqFeatureSetTmp.clear();
@@ -548,7 +550,7 @@ bool Fuzzer::RunOne(const uint8_t *Data, size_t Size, bool MayDeleteFile,
FoundUniqFeaturesOfII == II->UniqFeatureSet.size() &&
II->U.size() > Size) {
auto OldFeaturesFile = Sha1ToString(II->Sha1);
- Corpus.Replace(II, {Data, Data + Size});
+ Corpus.Replace(II, {Data, Data + Size}, TimeOfUnit);
RenameFeatureSetFile(Options.FeaturesDir, OldFeaturesFile,
Sha1ToString(II->Sha1));
return true;
@@ -586,7 +588,7 @@ static bool LooseMemeq(const uint8_t *A, const uint8_t *B, size_t Size) {
// This method is not inlined because it would cause a test to fail where it
// is part of the stack unwinding. See D97975 for details.
-ATTRIBUTE_NOINLINE void Fuzzer::ExecuteCallback(const uint8_t *Data,
+ATTRIBUTE_NOINLINE bool Fuzzer::ExecuteCallback(const uint8_t *Data,
size_t Size) {
TPC.RecordInitialStack();
TotalNumberOfRuns++;
@@ -602,23 +604,24 @@ ATTRIBUTE_NOINLINE void Fuzzer::ExecuteCallback(const uint8_t *Data,
if (CurrentUnitData && CurrentUnitData != Data)
memcpy(CurrentUnitData, Data, Size);
CurrentUnitSize = Size;
+ int CBRes = 0;
{
ScopedEnableMsanInterceptorChecks S;
AllocTracer.Start(Options.TraceMalloc);
UnitStartTime = system_clock::now();
TPC.ResetMaps();
RunningUserCallback = true;
- int Res = CB(DataCopy, Size);
+ CBRes = CB(DataCopy, Size);
RunningUserCallback = false;
UnitStopTime = system_clock::now();
- (void)Res;
- assert(Res == 0);
+ assert(CBRes == 0 || CBRes == -1);
HasMoreMallocsThanFrees = AllocTracer.Stop();
}
if (!LooseMemeq(DataCopy, Data, Size))
CrashOnOverwrittenData();
CurrentUnitSize = 0;
delete[] DataCopy;
+ return CBRes == 0;
}
std::string Fuzzer::WriteToOutputCorpus(const Unit &U) {
@@ -784,7 +787,7 @@ void Fuzzer::PurgeAllocator() {
LastAllocatorPurgeAttemptTime = system_clock::now();
}
-void Fuzzer::ReadAndExecuteSeedCorpora(Vector<SizedFile> &CorporaFiles) {
+void Fuzzer::ReadAndExecuteSeedCorpora(std::vector<SizedFile> &CorporaFiles) {
const size_t kMaxSaneLen = 1 << 20;
const size_t kMinDefaultLen = 4096;
size_t MaxSize = 0;
@@ -796,7 +799,7 @@ void Fuzzer::ReadAndExecuteSeedCorpora(Vector<SizedFile> &CorporaFiles) {
TotalSize += File.Size;
}
if (Options.MaxLen == 0)
- SetMaxInputLen(std::min(std::max(kMinDefaultLen, MaxSize), kMaxSaneLen));
+ SetMaxInputLen(std::clamp(MaxSize, kMinDefaultLen, kMaxSaneLen));
assert(MaxInputLen > 0);
// Test the callback with empty input and never try it again.
@@ -843,13 +846,20 @@ void Fuzzer::ReadAndExecuteSeedCorpora(Vector<SizedFile> &CorporaFiles) {
}
if (Corpus.empty() && Options.MaxNumberOfRuns) {
- Printf("ERROR: no interesting inputs were found. "
- "Is the code instrumented for coverage? Exiting.\n");
- exit(1);
+ Printf("WARNING: no interesting inputs were found so far. "
+ "Is the code instrumented for coverage?\n"
+ "This may also happen if the target rejected all inputs we tried so "
+ "far\n");
+ // The remaining logic requires that the corpus is not empty,
+ // so we add one fake input to the in-memory corpus.
+ Corpus.AddToCorpus({'\n'}, /*NumFeatures=*/1, /*MayDeleteFile=*/true,
+ /*HasFocusFunction=*/false, /*NeverReduce=*/false,
+ /*TimeOfUnit=*/duration_cast<microseconds>(0s), {0}, DFT,
+ /*BaseII*/ nullptr);
}
}
-void Fuzzer::Loop(Vector<SizedFile> &CorporaFiles) {
+void Fuzzer::Loop(std::vector<SizedFile> &CorporaFiles) {
auto FocusFunctionOrAuto = Options.FocusFunction;
DFT.Init(Options.DataFlowTrace, &FocusFunctionOrAuto, CorporaFiles,
MD.GetRand());
diff --git a/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerMerge.cpp b/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerMerge.cpp
index 162453ceae2c..8c8806e8aafd 100644
--- a/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerMerge.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerMerge.cpp
@@ -77,8 +77,9 @@ bool Merger::Parse(std::istream &IS, bool ParseCoverage) {
size_t ExpectedStartMarker = 0;
const size_t kInvalidStartMarker = -1;
size_t LastSeenStartMarker = kInvalidStartMarker;
- Vector<uint32_t> TmpFeatures;
- Set<uint32_t> PCs;
+ bool HaveFtMarker = true;
+ std::vector<uint32_t> TmpFeatures;
+ std::set<uint32_t> PCs;
while (std::getline(IS, Line, '\n')) {
std::istringstream ISS1(Line);
std::string Marker;
@@ -93,12 +94,13 @@ bool Merger::Parse(std::istream &IS, bool ParseCoverage) {
LastSeenStartMarker = ExpectedStartMarker;
assert(ExpectedStartMarker < Files.size());
ExpectedStartMarker++;
+ HaveFtMarker = false;
} else if (Marker == "FT") {
// FT FILE_ID COV1 COV2 COV3 ...
size_t CurrentFileIdx = N;
if (CurrentFileIdx != LastSeenStartMarker)
return false;
- LastSeenStartMarker = kInvalidStartMarker;
+ HaveFtMarker = true;
if (ParseCoverage) {
TmpFeatures.clear(); // use a vector from outer scope to avoid resizes.
while (ISS1 >> N)
@@ -108,6 +110,8 @@ bool Merger::Parse(std::istream &IS, bool ParseCoverage) {
}
} else if (Marker == "COV") {
size_t CurrentFileIdx = N;
+ if (CurrentFileIdx != LastSeenStartMarker)
+ return false;
if (ParseCoverage)
while (ISS1 >> N)
if (PCs.insert(N).second)
@@ -116,7 +120,7 @@ bool Merger::Parse(std::istream &IS, bool ParseCoverage) {
return false;
}
}
- if (LastSeenStartMarker != kInvalidStartMarker)
+ if (!HaveFtMarker && LastSeenStartMarker != kInvalidStartMarker)
LastFailure = Files[LastSeenStartMarker].Name;
FirstNotProcessedFile = ExpectedStartMarker;
@@ -132,15 +136,16 @@ size_t Merger::ApproximateMemoryConsumption() const {
// Decides which files need to be merged (add those to NewFiles).
// Returns the number of new features added.
-size_t Merger::Merge(const Set<uint32_t> &InitialFeatures,
- Set<uint32_t> *NewFeatures,
- const Set<uint32_t> &InitialCov, Set<uint32_t> *NewCov,
- Vector<std::string> *NewFiles) {
+size_t Merger::Merge(const std::set<uint32_t> &InitialFeatures,
+ std::set<uint32_t> *NewFeatures,
+ const std::set<uint32_t> &InitialCov,
+ std::set<uint32_t> *NewCov,
+ std::vector<std::string> *NewFiles) {
NewFiles->clear();
NewFeatures->clear();
NewCov->clear();
assert(NumFilesInFirstCorpus <= Files.size());
- Set<uint32_t> AllFeatures = InitialFeatures;
+ std::set<uint32_t> AllFeatures = InitialFeatures;
// What features are in the initial corpus?
for (size_t i = 0; i < NumFilesInFirstCorpus; i++) {
@@ -150,7 +155,7 @@ size_t Merger::Merge(const Set<uint32_t> &InitialFeatures,
// Remove all features that we already know from all other inputs.
for (size_t i = NumFilesInFirstCorpus; i < Files.size(); i++) {
auto &Cur = Files[i].Features;
- Vector<uint32_t> Tmp;
+ std::vector<uint32_t> Tmp;
std::set_difference(Cur.begin(), Cur.end(), AllFeatures.begin(),
AllFeatures.end(), std::inserter(Tmp, Tmp.begin()));
Cur.swap(Tmp);
@@ -188,15 +193,16 @@ size_t Merger::Merge(const Set<uint32_t> &InitialFeatures,
return NewFeatures->size();
}
-Set<uint32_t> Merger::AllFeatures() const {
- Set<uint32_t> S;
+std::set<uint32_t> Merger::AllFeatures() const {
+ std::set<uint32_t> S;
for (auto &File : Files)
S.insert(File.Features.begin(), File.Features.end());
return S;
}
// Inner process. May crash if the target crashes.
-void Fuzzer::CrashResistantMergeInternalStep(const std::string &CFPath) {
+void Fuzzer::CrashResistantMergeInternalStep(const std::string &CFPath,
+ bool IsSetCoverMerge) {
Printf("MERGE-INNER: using the control file '%s'\n", CFPath.c_str());
Merger M;
std::ifstream IF(CFPath);
@@ -212,11 +218,11 @@ void Fuzzer::CrashResistantMergeInternalStep(const std::string &CFPath) {
M.Files.size() - M.FirstNotProcessedFile);
std::ofstream OF(CFPath, std::ofstream::out | std::ofstream::app);
- Set<size_t> AllFeatures;
+ std::set<size_t> AllFeatures;
auto PrintStatsWrapper = [this, &AllFeatures](const char* Where) {
this->PrintStats(Where, "\n", 0, AllFeatures.size());
};
- Set<const TracePC::PCTableEntry *> AllPCs;
+ std::set<const TracePC::PCTableEntry *> AllPCs;
for (size_t i = M.FirstNotProcessedFile; i < M.Files.size(); i++) {
Fuzzer::MaybeExitGracefully();
auto U = FileToVector(M.Files[i].Name);
@@ -234,13 +240,14 @@ void Fuzzer::CrashResistantMergeInternalStep(const std::string &CFPath) {
// Collect coverage. We are iterating over the files in this order:
// * First, files in the initial corpus ordered by size, smallest first.
// * Then, all other files, smallest first.
- // So it makes no sense to record all features for all files, instead we
- // only record features that were not seen before.
- Set<size_t> UniqFeatures;
- TPC.CollectFeatures([&](size_t Feature) {
- if (AllFeatures.insert(Feature).second)
- UniqFeatures.insert(Feature);
- });
+ std::set<size_t> Features;
+ if (IsSetCoverMerge)
+ TPC.CollectFeatures([&](size_t Feature) { Features.insert(Feature); });
+ else
+ TPC.CollectFeatures([&](size_t Feature) {
+ if (AllFeatures.insert(Feature).second)
+ Features.insert(Feature);
+ });
TPC.UpdateObservedPCs();
// Show stats.
if (!(TotalNumberOfRuns & (TotalNumberOfRuns - 1)))
@@ -249,7 +256,7 @@ void Fuzzer::CrashResistantMergeInternalStep(const std::string &CFPath) {
PrintStatsWrapper("LOADED");
// Write the post-run marker and the coverage.
OF << "FT " << i;
- for (size_t F : UniqFeatures)
+ for (size_t F : Features)
OF << " " << F;
OF << "\n";
OF << "COV " << i;
@@ -263,15 +270,137 @@ void Fuzzer::CrashResistantMergeInternalStep(const std::string &CFPath) {
PrintStatsWrapper("DONE ");
}
-static size_t WriteNewControlFile(const std::string &CFPath,
- const Vector<SizedFile> &OldCorpus,
- const Vector<SizedFile> &NewCorpus,
- const Vector<MergeFileInfo> &KnownFiles) {
+// Merges all corpora into the first corpus. A file is added into
+// the first corpus only if it adds new features. Unlike `Merger::Merge`,
+// this implementation calculates an approximation of the minimum set
+// of corpora files, that cover all known features (set cover problem).
+// Generally, this means that files with more features are preferred for
+// merge into the first corpus. When two files have the same number of
+// features, the smaller one is preferred.
+size_t Merger::SetCoverMerge(const std::set<uint32_t> &InitialFeatures,
+ std::set<uint32_t> *NewFeatures,
+ const std::set<uint32_t> &InitialCov,
+ std::set<uint32_t> *NewCov,
+ std::vector<std::string> *NewFiles) {
+ assert(NumFilesInFirstCorpus <= Files.size());
+ NewFiles->clear();
+ NewFeatures->clear();
+ NewCov->clear();
+ std::set<uint32_t> AllFeatures;
+ // 1 << 21 - 1 is the maximum feature index.
+ // See 'kFeatureSetSize' in 'FuzzerCorpus.h'.
+ const uint32_t kFeatureSetSize = 1 << 21;
+ std::vector<bool> Covered(kFeatureSetSize, false);
+ size_t NumCovered = 0;
+
+ std::set<uint32_t> ExistingFeatures = InitialFeatures;
+ for (size_t i = 0; i < NumFilesInFirstCorpus; ++i)
+ ExistingFeatures.insert(Files[i].Features.begin(), Files[i].Features.end());
+
+ // Mark the existing features as covered.
+ for (const auto &F : ExistingFeatures) {
+ if (!Covered[F % kFeatureSetSize]) {
+ ++NumCovered;
+ Covered[F % kFeatureSetSize] = true;
+ }
+ // Calculate an underestimation of the set of covered features
+ // since the `Covered` bitvector is smaller than the feature range.
+ AllFeatures.insert(F % kFeatureSetSize);
+ }
+
+ std::set<size_t> RemainingFiles;
+ for (size_t i = NumFilesInFirstCorpus; i < Files.size(); ++i) {
+ // Construct an incremental sequence which represent the
+ // indices to all files (excluding those in the initial corpus).
+ // RemainingFiles = range(NumFilesInFirstCorpus..Files.size()).
+ RemainingFiles.insert(i);
+ // Insert this file's unique features to all features.
+ for (const auto &F : Files[i].Features)
+ AllFeatures.insert(F % kFeatureSetSize);
+ }
+
+ // Integrate files into Covered until set is complete.
+ while (NumCovered != AllFeatures.size()) {
+ // Index to file with largest number of unique features.
+ size_t MaxFeaturesIndex = NumFilesInFirstCorpus;
+ // Indices to remove from RemainingFiles.
+ std::set<size_t> RemoveIndices;
+ // Running max unique feature count.
+ // Updated upon finding a file with more features.
+ size_t MaxNumFeatures = 0;
+
+ // Iterate over all files not yet integrated into Covered,
+ // to find the file which has the largest number of
+ // features that are not already in Covered.
+ for (const auto &i : RemainingFiles) {
+ const auto &File = Files[i];
+ size_t CurrentUnique = 0;
+ // Count number of features in this file
+ // which are not yet in Covered.
+ for (const auto &F : File.Features)
+ if (!Covered[F % kFeatureSetSize])
+ ++CurrentUnique;
+
+ if (CurrentUnique == 0) {
+ // All features in this file are already in Covered: skip next time.
+ RemoveIndices.insert(i);
+ } else if (CurrentUnique > MaxNumFeatures ||
+ (CurrentUnique == MaxNumFeatures &&
+ File.Size < Files[MaxFeaturesIndex].Size)) {
+ // Update the max features file based on unique features
+ // Break ties by selecting smaller files.
+ MaxNumFeatures = CurrentUnique;
+ MaxFeaturesIndex = i;
+ }
+ }
+ // Must be a valid index/
+ assert(MaxFeaturesIndex < Files.size());
+ // Remove any feature-less files found.
+ for (const auto &i : RemoveIndices)
+ RemainingFiles.erase(i);
+ if (MaxNumFeatures == 0) {
+ // Did not find a file that adds unique features.
+ // This means that we should have no remaining files.
+ assert(RemainingFiles.size() == 0);
+ assert(NumCovered == AllFeatures.size());
+ break;
+ }
+
+ // MaxFeaturesIndex must be an element of Remaining.
+ assert(RemainingFiles.find(MaxFeaturesIndex) != RemainingFiles.end());
+ // Remove the file with the most features from Remaining.
+ RemainingFiles.erase(MaxFeaturesIndex);
+ const auto &MaxFeatureFile = Files[MaxFeaturesIndex];
+ // Add the features of the max feature file to Covered.
+ for (const auto &F : MaxFeatureFile.Features) {
+ if (!Covered[F % kFeatureSetSize]) {
+ ++NumCovered;
+ Covered[F % kFeatureSetSize] = true;
+ NewFeatures->insert(F);
+ }
+ }
+ // Add the index to this file to the result.
+ NewFiles->push_back(MaxFeatureFile.Name);
+ // Update NewCov with the additional coverage
+ // that MaxFeatureFile provides.
+ for (const auto &C : MaxFeatureFile.Cov)
+ if (InitialCov.find(C) == InitialCov.end())
+ NewCov->insert(C);
+ }
+
+ return NewFeatures->size();
+}
+
+static size_t
+WriteNewControlFile(const std::string &CFPath,
+ const std::vector<SizedFile> &OldCorpus,
+ const std::vector<SizedFile> &NewCorpus,
+ const std::vector<MergeFileInfo> &KnownFiles) {
std::unordered_set<std::string> FilesToSkip;
for (auto &SF: KnownFiles)
FilesToSkip.insert(SF.Name);
- Vector<std::string> FilesToUse;
+ std::vector<std::string> FilesToUse;
auto MaybeUseFile = [=, &FilesToUse](std::string Name) {
if (FilesToSkip.find(Name) == FilesToSkip.end())
FilesToUse.push_back(Name);
@@ -299,19 +428,19 @@ static size_t WriteNewControlFile(const std::string &CFPath,
}
// Outer process. Does not call the target code and thus should not fail.
-void CrashResistantMerge(const Vector<std::string> &Args,
- const Vector<SizedFile> &OldCorpus,
- const Vector<SizedFile> &NewCorpus,
- Vector<std::string> *NewFiles,
- const Set<uint32_t> &InitialFeatures,
- Set<uint32_t> *NewFeatures,
- const Set<uint32_t> &InitialCov,
- Set<uint32_t> *NewCov,
- const std::string &CFPath,
- bool V /*Verbose*/) {
+void CrashResistantMerge(const std::vector<std::string> &Args,
+ const std::vector<SizedFile> &OldCorpus,
+ const std::vector<SizedFile> &NewCorpus,
+ std::vector<std::string> *NewFiles,
+ const std::set<uint32_t> &InitialFeatures,
+ std::set<uint32_t> *NewFeatures,
+ const std::set<uint32_t> &InitialCov,
+ std::set<uint32_t> *NewCov, const std::string &CFPath,
+ bool V, /*Verbose*/
+ bool IsSetCoverMerge) {
if (NewCorpus.empty() && OldCorpus.empty()) return; // Nothing to merge.
size_t NumAttempts = 0;
- Vector<MergeFileInfo> KnownFiles;
+ std::vector<MergeFileInfo> KnownFiles;
if (FileSize(CFPath)) {
VPrintf(V, "MERGE-OUTER: non-empty control file provided: '%s'\n",
CFPath.c_str());
@@ -363,6 +492,7 @@ void CrashResistantMerge(const Vector<std::string> &Args,
// Every inner process should execute at least one input.
Command BaseCmd(Args);
BaseCmd.removeFlag("merge");
+ BaseCmd.removeFlag("set_cover_merge");
BaseCmd.removeFlag("fork");
BaseCmd.removeFlag("collect_data_flow");
for (size_t Attempt = 1; Attempt <= NumAttempts; Attempt++) {
@@ -370,14 +500,16 @@ void CrashResistantMerge(const Vector<std::string> &Args,
VPrintf(V, "MERGE-OUTER: attempt %zd\n", Attempt);
Command Cmd(BaseCmd);
Cmd.addFlag("merge_control_file", CFPath);
- Cmd.addFlag("merge_inner", "1");
+ // If we are going to use the set cover implementation for
+ // minimization add the merge_inner=2 internal flag.
+ Cmd.addFlag("merge_inner", IsSetCoverMerge ? "2" : "1");
if (!V) {
Cmd.setOutputFile(getDevNull());
Cmd.combineOutAndErr();
}
auto ExitCode = ExecuteCommand(Cmd);
if (!ExitCode) {
- VPrintf(V, "MERGE-OUTER: succesfull in %zd attempt(s)\n", Attempt);
+ VPrintf(V, "MERGE-OUTER: successful in %zd attempt(s)\n", Attempt);
break;
}
}
@@ -395,7 +527,10 @@ void CrashResistantMerge(const Vector<std::string> &Args,
M.ApproximateMemoryConsumption() >> 20, GetPeakRSSMb());
M.Files.insert(M.Files.end(), KnownFiles.begin(), KnownFiles.end());
- M.Merge(InitialFeatures, NewFeatures, InitialCov, NewCov, NewFiles);
+ if (IsSetCoverMerge)
+ M.SetCoverMerge(InitialFeatures, NewFeatures, InitialCov, NewCov, NewFiles);
+ else
+ M.Merge(InitialFeatures, NewFeatures, InitialCov, NewCov, NewFiles);
VPrintf(V, "MERGE-OUTER: %zd new files with %zd new features added; "
"%zd new coverage edges\n",
NewFiles->size(), NewFeatures->size(), NewCov->size());
diff --git a/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerMerge.h b/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerMerge.h
index e0c6bc539bdb..42f798e1da18 100644
--- a/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerMerge.h
+++ b/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerMerge.h
@@ -41,6 +41,7 @@
#define LLVM_FUZZER_MERGE_H
#include "FuzzerDefs.h"
+#include "FuzzerIO.h"
#include <istream>
#include <ostream>
@@ -52,11 +53,11 @@ namespace fuzzer {
struct MergeFileInfo {
std::string Name;
size_t Size = 0;
- Vector<uint32_t> Features, Cov;
+ std::vector<uint32_t> Features, Cov;
};
struct Merger {
- Vector<MergeFileInfo> Files;
+ std::vector<MergeFileInfo> Files;
size_t NumFilesInFirstCorpus = 0;
size_t FirstNotProcessedFile = 0;
std::string LastFailure;
@@ -64,23 +65,28 @@ struct Merger {
bool Parse(std::istream &IS, bool ParseCoverage);
bool Parse(const std::string &Str, bool ParseCoverage);
void ParseOrExit(std::istream &IS, bool ParseCoverage);
- size_t Merge(const Set<uint32_t> &InitialFeatures, Set<uint32_t> *NewFeatures,
- const Set<uint32_t> &InitialCov, Set<uint32_t> *NewCov,
- Vector<std::string> *NewFiles);
+ size_t Merge(const std::set<uint32_t> &InitialFeatures,
+ std::set<uint32_t> *NewFeatures,
+ const std::set<uint32_t> &InitialCov, std::set<uint32_t> *NewCov,
+ std::vector<std::string> *NewFiles);
+ size_t SetCoverMerge(const std::set<uint32_t> &InitialFeatures,
+ std::set<uint32_t> *NewFeatures,
+ const std::set<uint32_t> &InitialCov,
+ std::set<uint32_t> *NewCov,
+ std::vector<std::string> *NewFiles);
size_t ApproximateMemoryConsumption() const;
- Set<uint32_t> AllFeatures() const;
+ std::set<uint32_t> AllFeatures() const;
};
-void CrashResistantMerge(const Vector<std::string> &Args,
- const Vector<SizedFile> &OldCorpus,
- const Vector<SizedFile> &NewCorpus,
- Vector<std::string> *NewFiles,
- const Set<uint32_t> &InitialFeatures,
- Set<uint32_t> *NewFeatures,
- const Set<uint32_t> &InitialCov,
- Set<uint32_t> *NewCov,
- const std::string &CFPath,
- bool Verbose);
+void CrashResistantMerge(const std::vector<std::string> &Args,
+ const std::vector<SizedFile> &OldCorpus,
+ const std::vector<SizedFile> &NewCorpus,
+ std::vector<std::string> *NewFiles,
+ const std::set<uint32_t> &InitialFeatures,
+ std::set<uint32_t> *NewFeatures,
+ const std::set<uint32_t> &InitialCov,
+ std::set<uint32_t> *NewCov, const std::string &CFPath,
+ bool Verbose, bool IsSetCoverMerge);
} // namespace fuzzer
diff --git a/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerMutate.cpp b/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerMutate.cpp
index 4650f1beceac..1abce16d70d9 100644
--- a/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerMutate.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerMutate.cpp
@@ -485,7 +485,7 @@ void MutationDispatcher::RecordSuccessfulMutationSequence() {
}
void MutationDispatcher::PrintRecommendedDictionary() {
- Vector<DictionaryEntry> V;
+ std::vector<DictionaryEntry> V;
for (auto &DE : PersistentAutoDictionary)
if (!ManualDictionary.ContainsWord(DE.GetW()))
V.push_back(DE);
@@ -521,7 +521,7 @@ void MutationDispatcher::PrintMutationSequence(bool Verbose) {
std::string MutationDispatcher::MutationSequence() {
std::string MS;
- for (auto M : CurrentMutatorSequence) {
+ for (const auto &M : CurrentMutatorSequence) {
MS += M.Name;
MS += "-";
}
@@ -540,7 +540,7 @@ size_t MutationDispatcher::DefaultMutate(uint8_t *Data, size_t Size,
// Mutates Data in place, returns new size.
size_t MutationDispatcher::MutateImpl(uint8_t *Data, size_t Size,
size_t MaxSize,
- Vector<Mutator> &Mutators) {
+ std::vector<Mutator> &Mutators) {
assert(MaxSize > 0);
// Some mutations may fail (e.g. can't insert more bytes if Size == MaxSize),
// in which case they will return 0.
@@ -562,7 +562,7 @@ size_t MutationDispatcher::MutateImpl(uint8_t *Data, size_t Size,
// Mask represents the set of Data bytes that are worth mutating.
size_t MutationDispatcher::MutateWithMask(uint8_t *Data, size_t Size,
size_t MaxSize,
- const Vector<uint8_t> &Mask) {
+ const std::vector<uint8_t> &Mask) {
size_t MaskedSize = std::min(Size, Mask.size());
// * Copy the worthy bytes into a temporary array T
// * Mutate T
diff --git a/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerMutate.h b/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerMutate.h
index fd37191156d3..97704e2160aa 100644
--- a/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerMutate.h
+++ b/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerMutate.h
@@ -77,7 +77,7 @@ public:
/// that have '1' in Mask.
/// Mask.size() should be >= Size.
size_t MutateWithMask(uint8_t *Data, size_t Size, size_t MaxSize,
- const Vector<uint8_t> &Mask);
+ const std::vector<uint8_t> &Mask);
/// Applies one of the default mutations. Provided as a service
/// to mutation authors.
@@ -104,7 +104,7 @@ public:
size_t AddWordFromDictionary(Dictionary &D, uint8_t *Data, size_t Size,
size_t MaxSize);
size_t MutateImpl(uint8_t *Data, size_t Size, size_t MaxSize,
- Vector<Mutator> &Mutators);
+ std::vector<Mutator> &Mutators);
size_t InsertPartOf(const uint8_t *From, size_t FromSize, uint8_t *To,
size_t ToSize, size_t MaxToSize);
@@ -133,22 +133,22 @@ public:
// entries that led to successful discoveries in the past mutations.
Dictionary PersistentAutoDictionary;
- Vector<DictionaryEntry *> CurrentDictionaryEntrySequence;
+ std::vector<DictionaryEntry *> CurrentDictionaryEntrySequence;
static const size_t kCmpDictionaryEntriesDequeSize = 16;
DictionaryEntry CmpDictionaryEntriesDeque[kCmpDictionaryEntriesDequeSize];
size_t CmpDictionaryEntriesDequeIdx = 0;
const Unit *CrossOverWith = nullptr;
- Vector<uint8_t> MutateInPlaceHere;
- Vector<uint8_t> MutateWithMaskTemp;
+ std::vector<uint8_t> MutateInPlaceHere;
+ std::vector<uint8_t> MutateWithMaskTemp;
// CustomCrossOver needs its own buffer as a custom implementation may call
// LLVMFuzzerMutate, which in turn may resize MutateInPlaceHere.
- Vector<uint8_t> CustomCrossOverInPlaceHere;
+ std::vector<uint8_t> CustomCrossOverInPlaceHere;
- Vector<Mutator> Mutators;
- Vector<Mutator> DefaultMutators;
- Vector<Mutator> CurrentMutatorSequence;
+ std::vector<Mutator> Mutators;
+ std::vector<Mutator> DefaultMutators;
+ std::vector<Mutator> CurrentMutatorSequence;
};
} // namespace fuzzer
diff --git a/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerOptions.h b/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerOptions.h
index d0c285a6821d..72e256106194 100644
--- a/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerOptions.h
+++ b/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerOptions.h
@@ -47,6 +47,7 @@ struct FuzzingOptions {
int ReportSlowUnits = 10;
bool OnlyASCII = false;
bool Entropic = true;
+ bool ForkCorpusGroups = false;
size_t EntropicFeatureFrequencyThreshold = 0xFF;
size_t EntropicNumberOfRarestFeatures = 100;
bool EntropicScalePerExecTime = false;
diff --git a/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerTracePC.cpp b/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerTracePC.cpp
index d808b9b00fa3..7f4e8ef91c44 100644
--- a/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerTracePC.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerTracePC.cpp
@@ -133,13 +133,14 @@ inline ALWAYS_INLINE uintptr_t GetPreviousInstructionPc(uintptr_t PC) {
// so we return (pc-2) in that case in order to be safe.
// For A32 mode we return (pc-4) because all instructions are 32 bit long.
return (PC - 3) & (~1);
-#elif defined(__powerpc__) || defined(__powerpc64__) || defined(__aarch64__)
- // PCs are always 4 byte aligned.
- return PC - 4;
#elif defined(__sparc__) || defined(__mips__)
return PC - 8;
-#else
+#elif defined(__riscv__)
+ return PC - 2;
+#elif defined(__i386__) || defined(__x86_64__) || defined(_M_IX86) || defined(_M_X64)
return PC - 1;
+#else
+ return PC - 4;
#endif
}
@@ -148,8 +149,8 @@ inline ALWAYS_INLINE uintptr_t GetPreviousInstructionPc(uintptr_t PC) {
ALWAYS_INLINE uintptr_t TracePC::GetNextInstructionPc(uintptr_t PC) {
#if defined(__mips__)
return PC + 8;
-#elif defined(__powerpc__) || defined(__sparc__) || defined(__arm__) || \
- defined(__aarch64__)
+#elif defined(__powerpc__) || defined(__sparc__) || defined(__arm__) || \
+ defined(__aarch64__) || defined(__loongarch__)
return PC + 4;
#else
return PC + 1;
@@ -157,7 +158,7 @@ ALWAYS_INLINE uintptr_t TracePC::GetNextInstructionPc(uintptr_t PC) {
}
void TracePC::UpdateObservedPCs() {
- Vector<uintptr_t> CoveredFuncs;
+ std::vector<uintptr_t> CoveredFuncs;
auto ObservePC = [&](const PCTableEntry *TE) {
if (ObservedPCs.insert(TE).second && DoPrintNewPCs) {
PrintPC("\tNEW_PC: %p %F %L", "\tNEW_PC: %p",
@@ -300,8 +301,8 @@ void TracePC::PrintCoverage(bool PrintAllCounters) {
FunctionStr = FunctionStr.substr(3);
std::string LineStr = DescribePC("%l", VisualizePC);
size_t NumEdges = Last - First;
- Vector<uintptr_t> UncoveredPCs;
- Vector<uintptr_t> CoveredPCs;
+ std::vector<uintptr_t> UncoveredPCs;
+ std::vector<uintptr_t> CoveredPCs;
for (auto TE = First; TE < Last; TE++)
if (!ObservedPCs.count(TE))
UncoveredPCs.push_back(TE->PC);
@@ -391,6 +392,7 @@ void TracePC::HandleCmp(uintptr_t PC, T Arg1, T Arg2) {
ValueProfileMap.AddValue(PC * 128 + 64 + AbsoluteDistance);
}
+ATTRIBUTE_NO_SANITIZE_MEMORY
static size_t InternalStrnlen(const char *S, size_t MaxLen) {
size_t Len = 0;
for (; Len < MaxLen && S[Len]; Len++) {}
@@ -398,7 +400,8 @@ static size_t InternalStrnlen(const char *S, size_t MaxLen) {
}
// Finds min of (strlen(S1), strlen(S2)).
-// Needed bacause one of these strings may actually be non-zero terminated.
+// Needed because one of these strings may actually be non-zero terminated.
+ATTRIBUTE_NO_SANITIZE_MEMORY
static size_t InternalStrnlen2(const char *S1, const char *S2) {
size_t Len = 0;
for (; S1[Len] && S2[Len]; Len++) {}
diff --git a/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerTracePC.h b/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerTracePC.h
index a93732972f7d..af1f9d81e950 100644
--- a/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerTracePC.h
+++ b/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerTracePC.h
@@ -169,7 +169,7 @@ private:
size_t NumPCTables;
size_t NumPCsInPCTables;
- Set<const PCTableEntry*> ObservedPCs;
+ std::set<const PCTableEntry *> ObservedPCs;
std::unordered_map<uintptr_t, uintptr_t> ObservedFuncs; // PC => Counter.
uint8_t *FocusFunctionCounterPtr = nullptr;
diff --git a/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerUtil.cpp b/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerUtil.cpp
index 05185499bdd1..aeab70f20c28 100644
--- a/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerUtil.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerUtil.cpp
@@ -43,7 +43,7 @@ void PrintASCIIByte(uint8_t Byte) {
else if (Byte >= 32 && Byte < 127)
Printf("%c", Byte);
else
- Printf("\\x%02x", Byte);
+ Printf("\\%03o", Byte);
}
void PrintASCII(const uint8_t *Data, size_t Size, const char *PrintAfter) {
@@ -124,7 +124,7 @@ bool ParseOneDictionaryEntry(const std::string &Str, Unit *U) {
return true;
}
-bool ParseDictionaryFile(const std::string &Text, Vector<Unit> *Units) {
+bool ParseDictionaryFile(const std::string &Text, std::vector<Unit> *Units) {
if (Text.empty()) {
Printf("ParseDictionaryFile: file does not exist or is empty\n");
return false;
diff --git a/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerUtil.h b/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerUtil.h
index a188a7be32a5..554567e1b8fc 100644
--- a/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerUtil.h
+++ b/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerUtil.h
@@ -59,6 +59,8 @@ size_t GetPeakRSSMb();
int ExecuteCommand(const Command &Cmd);
bool ExecuteCommand(const Command &Cmd, std::string *CmdOutput);
+void SetThreadName(std::thread &thread, const std::string &name);
+
// Fuchsia does not have popen/pclose.
FILE *OpenProcessPipe(const char *Command, const char *Mode);
int CloseProcessPipe(FILE *F);
@@ -66,10 +68,10 @@ int CloseProcessPipe(FILE *F);
const void *SearchMemory(const void *haystack, size_t haystacklen,
const void *needle, size_t needlelen);
-std::string CloneArgsWithoutX(const Vector<std::string> &Args,
+std::string CloneArgsWithoutX(const std::vector<std::string> &Args,
const char *X1, const char *X2);
-inline std::string CloneArgsWithoutX(const Vector<std::string> &Args,
+inline std::string CloneArgsWithoutX(const std::vector<std::string> &Args,
const char *X) {
return CloneArgsWithoutX(Args, X, X);
}
@@ -94,7 +96,8 @@ inline size_t Log(size_t X) {
return static_cast<size_t>((sizeof(unsigned long long) * 8) - Clzll(X) - 1);
}
-inline size_t PageSize() { return 4096; }
+size_t PageSize();
+
inline uint8_t *RoundUpByPage(uint8_t *P) {
uintptr_t X = reinterpret_cast<uintptr_t>(P);
size_t Mask = PageSize() - 1;
diff --git a/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerUtilDarwin.cpp b/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerUtilDarwin.cpp
index a5bed658a446..6c3ece30f67b 100644
--- a/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerUtilDarwin.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerUtilDarwin.cpp
@@ -165,6 +165,11 @@ void DiscardOutput(int Fd) {
fclose(Temp);
}
+void SetThreadName(std::thread &thread, const std::string &name) {
+ // TODO ?
+ // Darwin allows to set the name only on the current thread it seems
+}
+
} // namespace fuzzer
#endif // LIBFUZZER_APPLE
diff --git a/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerUtilFuchsia.cpp b/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerUtilFuchsia.cpp
index 5034b4a28d3f..cfb81cd3f780 100644
--- a/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerUtilFuchsia.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerUtilFuchsia.cpp
@@ -52,6 +52,12 @@ void CrashTrampolineAsm() __asm__("CrashTrampolineAsm");
namespace {
+// The signal handler thread uses Zircon exceptions to resume crashed threads
+// into libFuzzer's POSIX signal handlers. The associated event is used to
+// signal when the thread is running, and when it should stop.
+std::thread SignalHandler;
+zx_handle_t SignalHandlerEvent = ZX_HANDLE_INVALID;
+
// Helper function to handle Zircon syscall failures.
void ExitOnErr(zx_status_t Status, const char *Syscall) {
if (Status != ZX_OK) {
@@ -68,23 +74,6 @@ void AlarmHandler(int Seconds) {
}
}
-// CFAOffset is used to reference the stack pointer before entering the
-// trampoline (Stack Pointer + CFAOffset = prev Stack Pointer). Before jumping
-// to the trampoline we copy all the registers onto the stack. We need to make
-// sure that the new stack has enough space to store all the registers.
-//
-// The trampoline holds CFI information regarding the registers stored in the
-// stack, which is then used by the unwinder to restore them.
-#if defined(__x86_64__)
-// In x86_64 the crashing function might also be using the red zone (128 bytes
-// on top of their rsp).
-constexpr size_t CFAOffset = 128 + sizeof(zx_thread_state_general_regs_t);
-#elif defined(__aarch64__)
-// In aarch64 we need to always have the stack pointer aligned to 16 bytes, so we
-// make sure that we are keeping that same alignment.
-constexpr size_t CFAOffset = (sizeof(zx_thread_state_general_regs_t) + 15) & -(uintptr_t)16;
-#endif
-
// For the crash handler, we need to call Fuzzer::StaticCrashSignalCallback
// without POSIX signal handlers. To achieve this, we use an assembly function
// to add the necessary CFI unwinding information and a C function to bridge
@@ -98,6 +87,7 @@ constexpr size_t CFAOffset = (sizeof(zx_thread_state_general_regs_t) + 15) & -(u
// Alternatively, Fuchsia may in future actually implement basic signal
// handling for the machine trap signals.
#if defined(__x86_64__)
+
#define FOREACH_REGISTER(OP_REG, OP_NUM) \
OP_REG(rax) \
OP_REG(rbx) \
@@ -118,6 +108,7 @@ constexpr size_t CFAOffset = (sizeof(zx_thread_state_general_regs_t) + 15) & -(u
OP_REG(rip)
#elif defined(__aarch64__)
+
#define FOREACH_REGISTER(OP_REG, OP_NUM) \
OP_NUM(0) \
OP_NUM(1) \
@@ -151,6 +142,41 @@ constexpr size_t CFAOffset = (sizeof(zx_thread_state_general_regs_t) + 15) & -(u
OP_NUM(29) \
OP_REG(sp)
+#elif defined(__riscv)
+
+#define FOREACH_REGISTER(OP_REG, OP_NUM) \
+ OP_REG(ra) \
+ OP_REG(sp) \
+ OP_REG(gp) \
+ OP_REG(tp) \
+ OP_REG(t0) \
+ OP_REG(t1) \
+ OP_REG(t2) \
+ OP_REG(s0) \
+ OP_REG(s1) \
+ OP_REG(a0) \
+ OP_REG(a1) \
+ OP_REG(a2) \
+ OP_REG(a3) \
+ OP_REG(a4) \
+ OP_REG(a5) \
+ OP_REG(a6) \
+ OP_REG(a7) \
+ OP_REG(s2) \
+ OP_REG(s3) \
+ OP_REG(s4) \
+ OP_REG(s5) \
+ OP_REG(s6) \
+ OP_REG(s7) \
+ OP_REG(s8) \
+ OP_REG(s9) \
+ OP_REG(s10) \
+ OP_REG(s11) \
+ OP_REG(t3) \
+ OP_REG(t4) \
+ OP_REG(t5) \
+ OP_REG(t6) \
+
#else
#error "Unsupported architecture for fuzzing on Fuchsia"
#endif
@@ -163,10 +189,10 @@ constexpr size_t CFAOffset = (sizeof(zx_thread_state_general_regs_t) + 15) & -(u
// Produces an assembler immediate operand for the named or numbered register.
// This operand contains the offset of the register relative to the CFA.
-#define ASM_OPERAND_REG(reg) \
- [reg] "i"(offsetof(zx_thread_state_general_regs_t, reg) - CFAOffset),
-#define ASM_OPERAND_NUM(num) \
- [x##num] "i"(offsetof(zx_thread_state_general_regs_t, r[num]) - CFAOffset),
+#define ASM_OPERAND_REG(reg) \
+ [reg] "i"(offsetof(zx_thread_state_general_regs_t, reg)),
+#define ASM_OPERAND_NUM(num) \
+ [x##num] "i"(offsetof(zx_thread_state_general_regs_t, r[num])),
// Trampoline to bridge from the assembly below to the static C++ crash
// callback.
@@ -178,62 +204,67 @@ static void StaticCrashHandler() {
}
}
-// Creates the trampoline with the necessary CFI information to unwind through
-// to the crashing call stack:
-// * Defining the CFA so that it points to the stack pointer at the point
-// of crash.
-// * Storing all registers at the point of crash in the stack and refer to them
-// via CFI information (relative to the CFA).
-// * Setting the return column so the unwinder knows how to continue unwinding.
-// * (x86_64) making sure rsp is aligned before calling StaticCrashHandler.
-// * Calling StaticCrashHandler that will trigger the unwinder.
+// This trampoline function has the necessary CFI information to unwind
+// and get a backtrace:
+// * The stack contains a copy of all the registers at the point of crash,
+// the code has CFI directives specifying how to restore them.
+// * A call to StaticCrashHandler, which will print the stacktrace and exit
+// the fuzzer, generating a crash artifact.
//
// The __attribute__((used)) is necessary because the function
// is never called; it's just a container around the assembly to allow it to
// use operands for compile-time computed constants.
__attribute__((used))
void MakeTrampoline() {
- __asm__(".cfi_endproc\n"
- ".pushsection .text.CrashTrampolineAsm\n"
- ".type CrashTrampolineAsm,STT_FUNC\n"
-"CrashTrampolineAsm:\n"
- ".cfi_startproc simple\n"
- ".cfi_signal_frame\n"
+ __asm__(
+ ".cfi_endproc\n"
+ ".pushsection .text.CrashTrampolineAsm\n"
+ ".type CrashTrampolineAsm,STT_FUNC\n"
+ "CrashTrampolineAsm:\n"
+ ".cfi_startproc simple\n"
+ ".cfi_signal_frame\n"
#if defined(__x86_64__)
- ".cfi_return_column rip\n"
- ".cfi_def_cfa rsp, %c[CFAOffset]\n"
- FOREACH_REGISTER(CFI_OFFSET_REG, CFI_OFFSET_NUM)
- "mov %%rsp, %%rbp\n"
- ".cfi_def_cfa_register rbp\n"
- "andq $-16, %%rsp\n"
- "call %c[StaticCrashHandler]\n"
- "ud2\n"
+ ".cfi_return_column rip\n"
+ ".cfi_def_cfa rsp, 0\n"
+ FOREACH_REGISTER(CFI_OFFSET_REG, CFI_OFFSET_NUM)
+ "call %c[StaticCrashHandler]\n"
+ "ud2\n"
#elif defined(__aarch64__)
- ".cfi_return_column 33\n"
- ".cfi_def_cfa sp, %c[CFAOffset]\n"
- FOREACH_REGISTER(CFI_OFFSET_REG, CFI_OFFSET_NUM)
- ".cfi_offset 33, %c[pc]\n"
- ".cfi_offset 30, %c[lr]\n"
- "bl %c[StaticCrashHandler]\n"
- "brk 1\n"
+ ".cfi_return_column 33\n"
+ ".cfi_def_cfa sp, 0\n"
+ FOREACH_REGISTER(CFI_OFFSET_REG, CFI_OFFSET_NUM)
+ ".cfi_offset 33, %c[pc]\n"
+ ".cfi_offset 30, %c[lr]\n"
+ "bl %c[StaticCrashHandler]\n"
+ "brk 1\n"
+#elif defined(__riscv)
+ ".cfi_return_column 64\n"
+ ".cfi_def_cfa sp, 0\n"
+ ".cfi_offset 64, %[pc]\n"
+ FOREACH_REGISTER(CFI_OFFSET_REG, CFI_OFFSET_NUM)
+ "call %c[StaticCrashHandler]\n"
+ "unimp\n"
#else
#error "Unsupported architecture for fuzzing on Fuchsia"
#endif
- ".cfi_endproc\n"
- ".size CrashTrampolineAsm, . - CrashTrampolineAsm\n"
- ".popsection\n"
- ".cfi_startproc\n"
- : // No outputs
- : FOREACH_REGISTER(ASM_OPERAND_REG, ASM_OPERAND_NUM)
+ ".cfi_endproc\n"
+ ".size CrashTrampolineAsm, . - CrashTrampolineAsm\n"
+ ".popsection\n"
+ ".cfi_startproc\n"
+ : // No outputs
+ : FOREACH_REGISTER(ASM_OPERAND_REG, ASM_OPERAND_NUM)
+#if defined(__aarch64__) || defined(__riscv)
+ ASM_OPERAND_REG(pc)
+#endif
#if defined(__aarch64__)
- ASM_OPERAND_REG(pc)
- ASM_OPERAND_REG(lr)
+ ASM_OPERAND_REG(lr)
#endif
- [StaticCrashHandler] "i" (StaticCrashHandler),
- [CFAOffset] "i" (CFAOffset));
+ [StaticCrashHandler] "i"(StaticCrashHandler));
}
-void CrashHandler(zx_handle_t *Event) {
+void CrashHandler() {
+ assert(SignalHandlerEvent != ZX_HANDLE_INVALID);
+
// This structure is used to ensure we close handles to objects we create in
// this handler.
struct ScopedHandle {
@@ -251,16 +282,30 @@ void CrashHandler(zx_handle_t *Event) {
Self, ZX_EXCEPTION_CHANNEL_DEBUGGER, &Channel.Handle),
"_zx_task_create_exception_channel");
- ExitOnErr(_zx_object_signal(*Event, 0, ZX_USER_SIGNAL_0),
+ ExitOnErr(_zx_object_signal(SignalHandlerEvent, 0, ZX_USER_SIGNAL_0),
"_zx_object_signal");
// This thread lives as long as the process in order to keep handling
// crashes. In practice, the first crashed thread to reach the end of the
// StaticCrashHandler will end the process.
while (true) {
- ExitOnErr(_zx_object_wait_one(Channel.Handle, ZX_CHANNEL_READABLE,
- ZX_TIME_INFINITE, nullptr),
- "_zx_object_wait_one");
+ zx_wait_item_t WaitItems[] = {
+ {
+ .handle = SignalHandlerEvent,
+ .waitfor = ZX_SIGNAL_HANDLE_CLOSED,
+ .pending = 0,
+ },
+ {
+ .handle = Channel.Handle,
+ .waitfor = ZX_CHANNEL_READABLE | ZX_CHANNEL_PEER_CLOSED,
+ .pending = 0,
+ },
+ };
+ auto Status = _zx_object_wait_many(
+ WaitItems, sizeof(WaitItems) / sizeof(WaitItems[0]), ZX_TIME_INFINITE);
+ if (Status != ZX_OK || (WaitItems[1].pending & ZX_CHANNEL_READABLE) == 0) {
+ break;
+ }
zx_exception_info_t ExceptionInfo;
ScopedHandle Exception;
@@ -296,14 +341,19 @@ void CrashHandler(zx_handle_t *Event) {
// onto the stack and jump into a trampoline with CFI instructions on how
// to restore it.
#if defined(__x86_64__)
- uintptr_t StackPtr = GeneralRegisters.rsp - CFAOffset;
+
+ uintptr_t StackPtr =
+ (GeneralRegisters.rsp - (128 + sizeof(GeneralRegisters))) &
+ -(uintptr_t)16;
__unsanitized_memcpy(reinterpret_cast<void *>(StackPtr), &GeneralRegisters,
sizeof(GeneralRegisters));
GeneralRegisters.rsp = StackPtr;
GeneralRegisters.rip = reinterpret_cast<zx_vaddr_t>(CrashTrampolineAsm);
-#elif defined(__aarch64__)
- uintptr_t StackPtr = GeneralRegisters.sp - CFAOffset;
+#elif defined(__aarch64__) || defined(__riscv)
+
+ uintptr_t StackPtr =
+ (GeneralRegisters.sp - sizeof(GeneralRegisters)) & -(uintptr_t)16;
__unsanitized_memcpy(reinterpret_cast<void *>(StackPtr), &GeneralRegisters,
sizeof(GeneralRegisters));
GeneralRegisters.sp = StackPtr;
@@ -327,6 +377,13 @@ void CrashHandler(zx_handle_t *Event) {
}
}
+void StopSignalHandler() {
+ _zx_handle_close(SignalHandlerEvent);
+ if (SignalHandler.joinable()) {
+ SignalHandler.join();
+ }
+}
+
} // namespace
// Platform specific functions.
@@ -356,16 +413,14 @@ void SetSignalHandler(const FuzzingOptions &Options) {
return;
// Set up the crash handler and wait until it is ready before proceeding.
- zx_handle_t Event;
- ExitOnErr(_zx_event_create(0, &Event), "_zx_event_create");
+ ExitOnErr(_zx_event_create(0, &SignalHandlerEvent), "_zx_event_create");
- std::thread T(CrashHandler, &Event);
- zx_status_t Status =
- _zx_object_wait_one(Event, ZX_USER_SIGNAL_0, ZX_TIME_INFINITE, nullptr);
- _zx_handle_close(Event);
+ SignalHandler = std::thread(CrashHandler);
+ zx_status_t Status = _zx_object_wait_one(SignalHandlerEvent, ZX_USER_SIGNAL_0,
+ ZX_TIME_INFINITE, nullptr);
ExitOnErr(Status, "_zx_object_wait_one");
- T.detach();
+ std::atexit(StopSignalHandler);
}
void SleepSeconds(int Seconds) {
@@ -545,6 +600,15 @@ void DiscardOutput(int Fd) {
dup2(nullfd, Fd);
}
+size_t PageSize() {
+ static size_t PageSizeCached = _zx_system_get_page_size();
+ return PageSizeCached;
+}
+
+void SetThreadName(std::thread &thread, const std::string &name) {
+ // TODO ?
+}
+
} // namespace fuzzer
#endif // LIBFUZZER_FUCHSIA
diff --git a/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerUtilLinux.cpp b/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerUtilLinux.cpp
index 981f9a8b429f..5729448b0beb 100644
--- a/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerUtilLinux.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerUtilLinux.cpp
@@ -11,7 +11,9 @@
#if LIBFUZZER_LINUX || LIBFUZZER_NETBSD || LIBFUZZER_FREEBSD || \
LIBFUZZER_EMSCRIPTEN
#include "FuzzerCommand.h"
+#include "FuzzerInternal.h"
+#include <signal.h>
#include <stdlib.h>
#include <sys/types.h>
#include <sys/wait.h>
@@ -25,6 +27,8 @@ int ExecuteCommand(const Command &Cmd) {
int exit_code = system(CmdLine.c_str());
if (WIFEXITED(exit_code))
return WEXITSTATUS(exit_code);
+ if (WIFSIGNALED(exit_code) && WTERMSIG(exit_code) == SIGINT)
+ return Fuzzer::InterruptExitCode();
return exit_code;
}
@@ -36,6 +40,14 @@ void DiscardOutput(int Fd) {
fclose(Temp);
}
+void SetThreadName(std::thread &thread, const std::string &name) {
+#if LIBFUZZER_LINUX || LIBFUZZER_FREEBSD
+ (void)pthread_setname_np(thread.native_handle(), name.c_str());
+#elif LIBFUZZER_NETBSD
+ (void)pthread_set_name_np(thread.native_handle(), "%s", name.c_str());
+#endif
+}
+
} // namespace fuzzer
#endif
diff --git a/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerUtilPosix.cpp b/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerUtilPosix.cpp
index 0446d732a9ec..392c1e5be4ee 100644
--- a/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerUtilPosix.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerUtilPosix.cpp
@@ -183,6 +183,11 @@ std::string SearchRegexCmd(const std::string &Regex) {
return "grep '" + Regex + "'";
}
+size_t PageSize() {
+ static size_t PageSizeCached = sysconf(_SC_PAGESIZE);
+ return PageSizeCached;
+}
+
} // namespace fuzzer
#endif // LIBFUZZER_POSIX
diff --git a/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerUtilWindows.cpp b/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerUtilWindows.cpp
index 1a54bb569eca..71770166805f 100644
--- a/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerUtilWindows.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/fuzzer/FuzzerUtilWindows.cpp
@@ -204,7 +204,7 @@ const void *SearchMemory(const void *Data, size_t DataLen, const void *Patt,
}
std::string DisassembleCmd(const std::string &FileName) {
- Vector<std::string> command_vector;
+ std::vector<std::string> command_vector;
command_vector.push_back("dumpbin /summary > nul");
if (ExecuteCommand(Command(command_vector)) == 0)
return "dumpbin /disasm " + FileName;
@@ -224,6 +224,20 @@ void DiscardOutput(int Fd) {
fclose(Temp);
}
+size_t PageSize() {
+ static size_t PageSizeCached = []() -> size_t {
+ SYSTEM_INFO si;
+ GetSystemInfo(&si);
+ return si.dwPageSize;
+ }();
+ return PageSizeCached;
+}
+
+void SetThreadName(std::thread &thread, const std::string &name) {
+ // TODO ?
+ // to UTF-8 then SetThreadDescription ?
+}
+
} // namespace fuzzer
#endif // LIBFUZZER_WINDOWS
diff --git a/contrib/llvm-project/compiler-rt/lib/gwp_asan/common.cpp b/contrib/llvm-project/compiler-rt/lib/gwp_asan/common.cpp
index b0f6c58bf496..790a331aa66c 100644
--- a/contrib/llvm-project/compiler-rt/lib/gwp_asan/common.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/gwp_asan/common.cpp
@@ -105,4 +105,8 @@ size_t AllocatorState::getNearestSlot(uintptr_t Ptr) const {
return addrToSlot(this, Ptr + PageSize); // Round up.
}
+uintptr_t AllocatorState::internallyDetectedErrorFaultAddress() const {
+ return GuardedPagePoolEnd - 0x10;
+}
+
} // namespace gwp_asan
diff --git a/contrib/llvm-project/compiler-rt/lib/gwp_asan/common.h b/contrib/llvm-project/compiler-rt/lib/gwp_asan/common.h
index 7ce367e3ffe9..df451021d341 100644
--- a/contrib/llvm-project/compiler-rt/lib/gwp_asan/common.h
+++ b/contrib/llvm-project/compiler-rt/lib/gwp_asan/common.h
@@ -19,7 +19,28 @@
#include <stdint.h>
namespace gwp_asan {
-enum class Error {
+
+// Magic header that resides in the AllocatorState so that GWP-ASan bugreports
+// can be understood by tools at different versions. Out-of-process crash
+// handlers, like crashpad on Fuchsia, take the raw contents of the
+// AllocationMetatada array and the AllocatorState, and shove them into the
+// minidump. Online unpacking of these structs needs to know from which version
+// of GWP-ASan it's extracting the information, as the structures are not
+// stable.
+struct AllocatorVersionMagic {
+ // The values are copied into the structure at runtime, during
+ // `GuardedPoolAllocator::init()` so that GWP-ASan remains completely in the
+ // `.bss` segment.
+ static constexpr uint8_t kAllocatorVersionMagic[4] = {'A', 'S', 'A', 'N'};
+ uint8_t Magic[4] = {};
+ // Update the version number when the AllocatorState or AllocationMetadata
+ // change.
+ static constexpr uint16_t kAllocatorVersion = 2;
+ uint16_t Version = 0;
+ uint16_t Reserved = 0;
+};
+
+enum class Error : uint8_t {
UNKNOWN,
USE_AFTER_FREE,
DOUBLE_FREE,
@@ -77,6 +98,12 @@ struct AllocationMetadata {
// Whether this allocation has been deallocated yet.
bool IsDeallocated = false;
+
+ // In recoverable mode, whether this allocation has had a crash associated
+ // with it. This has certain side effects, like meaning this allocation will
+ // permanently occupy a slot, and won't ever have another crash reported from
+ // it.
+ bool HasCrashed = false;
};
// This holds the state that's shared between the GWP-ASan allocator and the
@@ -84,6 +111,7 @@ struct AllocationMetadata {
// set of information required for understanding a GWP-ASan crash.
struct AllocatorState {
constexpr AllocatorState() {}
+ AllocatorVersionMagic VersionMagic{};
// Returns whether the provided pointer is a current sampled allocation that
// is owned by this pool.
@@ -105,6 +133,11 @@ struct AllocatorState {
// must be within memory owned by this pool, else the result is undefined.
bool isGuardPage(uintptr_t Ptr) const;
+ // Returns the address that's used by __gwp_asan_get_internal_crash_address()
+ // and GPA::raiseInternallyDetectedError() to communicate that the SEGV in
+ // question comes from an internally-detected error.
+ uintptr_t internallyDetectedErrorFaultAddress() const;
+
// The number of guarded slots that this pool holds.
size_t MaxSimultaneousAllocations = 0;
@@ -123,5 +156,38 @@ struct AllocatorState {
uintptr_t FailureAddress = 0;
};
+// Below are various compile-time checks that the layout of the internal
+// GWP-ASan structures are undisturbed. If they are disturbed, the version magic
+// number needs to be increased by one, and the asserts need to be updated.
+// Out-of-process crash handlers, like breakpad/crashpad, may copy the internal
+// GWP-ASan structures into a minidump for offline reconstruction of the crash.
+// In order to accomplish this, the offline reconstructor needs to know the
+// version of GWP-ASan internal structures that it's unpacking (along with the
+// architecture-specific layout info, which is left as an exercise to the crash
+// handler).
+static_assert(offsetof(AllocatorState, VersionMagic) == 0, "");
+static_assert(sizeof(AllocatorVersionMagic) == 8, "");
+#if defined(__x86_64__)
+static_assert(sizeof(AllocatorState) == 56, "");
+static_assert(offsetof(AllocatorState, FailureAddress) == 48, "");
+static_assert(sizeof(AllocationMetadata) == 568, "");
+static_assert(offsetof(AllocationMetadata, IsDeallocated) == 560, "");
+#elif defined(__aarch64__)
+static_assert(sizeof(AllocatorState) == 56, "");
+static_assert(offsetof(AllocatorState, FailureAddress) == 48, "");
+static_assert(sizeof(AllocationMetadata) == 568, "");
+static_assert(offsetof(AllocationMetadata, IsDeallocated) == 560, "");
+#elif defined(__i386__)
+static_assert(sizeof(AllocatorState) == 32, "");
+static_assert(offsetof(AllocatorState, FailureAddress) == 28, "");
+static_assert(sizeof(AllocationMetadata) == 548, "");
+static_assert(offsetof(AllocationMetadata, IsDeallocated) == 544, "");
+#elif defined(__arm__)
+static_assert(sizeof(AllocatorState) == 32, "");
+static_assert(offsetof(AllocatorState, FailureAddress) == 28, "");
+static_assert(sizeof(AllocationMetadata) == 560, "");
+static_assert(offsetof(AllocationMetadata, IsDeallocated) == 552, "");
+#endif // defined($ARCHITECTURE)
+
} // namespace gwp_asan
#endif // GWP_ASAN_COMMON_H_
diff --git a/contrib/llvm-project/compiler-rt/lib/gwp_asan/crash_handler.cpp b/contrib/llvm-project/compiler-rt/lib/gwp_asan/crash_handler.cpp
index 6b4c39edb294..555365c6e6f4 100644
--- a/contrib/llvm-project/compiler-rt/lib/gwp_asan/crash_handler.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/gwp_asan/crash_handler.cpp
@@ -31,7 +31,15 @@ bool __gwp_asan_error_is_mine(const gwp_asan::AllocatorState *State,
}
uintptr_t
-__gwp_asan_get_internal_crash_address(const gwp_asan::AllocatorState *State) {
+__gwp_asan_get_internal_crash_address(const gwp_asan::AllocatorState *State,
+ uintptr_t ErrorPtr) {
+ // There can be a race between internally- and externally-raised faults. The
+ // fault address from the signal handler is used to discriminate whether it's
+ // internally- or externally-raised, and the pool maintains a special page at
+ // the end of the GuardedPagePool specifically for the internally-raised
+ // faults.
+ if (ErrorPtr != State->internallyDetectedErrorFaultAddress())
+ return 0u;
return State->FailureAddress;
}
@@ -52,7 +60,14 @@ __gwp_asan_diagnose_error(const gwp_asan::AllocatorState *State,
if (State->FailureType != Error::UNKNOWN)
return State->FailureType;
- // Let's try and figure out what the source of this error is.
+ // Check for use-after-free.
+ if (addrToMetadata(State, Metadata, ErrorPtr)->IsDeallocated)
+ return Error::USE_AFTER_FREE;
+
+ // Check for buffer-overflow. Because of allocation alignment or left/right
+ // page placement, we can have buffer-overflows that don't touch a guarded
+ // page, but these are not possible to detect unless it's also a
+ // use-after-free, which is handled above.
if (State->isGuardPage(ErrorPtr)) {
size_t Slot = State->getNearestSlot(ErrorPtr);
const AllocationMetadata *SlotMeta =
@@ -67,13 +82,6 @@ __gwp_asan_diagnose_error(const gwp_asan::AllocatorState *State,
return Error::BUFFER_UNDERFLOW;
}
- // Access wasn't a guard page, check for use-after-free.
- const AllocationMetadata *SlotMeta =
- addrToMetadata(State, Metadata, ErrorPtr);
- if (SlotMeta->IsDeallocated) {
- return Error::USE_AFTER_FREE;
- }
-
// If we have reached here, the error is still unknown.
return Error::UNKNOWN;
}
diff --git a/contrib/llvm-project/compiler-rt/lib/gwp_asan/crash_handler.h b/contrib/llvm-project/compiler-rt/lib/gwp_asan/crash_handler.h
index 4a95069dac58..1ff60edea47d 100644
--- a/contrib/llvm-project/compiler-rt/lib/gwp_asan/crash_handler.h
+++ b/contrib/llvm-project/compiler-rt/lib/gwp_asan/crash_handler.h
@@ -46,12 +46,18 @@ __gwp_asan_diagnose_error(const gwp_asan::AllocatorState *State,
const gwp_asan::AllocationMetadata *Metadata,
uintptr_t ErrorPtr);
-// For internally-detected errors (double free, invalid free), this function
-// returns the pointer that the error occurred at. If the error is unrelated to
-// GWP-ASan, or if the error was caused by a non-internally detected failure,
-// this function returns zero.
+// This function, provided the fault address from the signal handler, returns
+// the following values:
+// 1. If the crash was caused by an internally-detected error (invalid free,
+// double free), this function returns the pointer that was used for the
+// internally-detected bad operation (i.e. the pointer given to free()).
+// 2. For externally-detected crashes (use-after-free, buffer-overflow), this
+// function returns zero.
+// 3. If GWP-ASan wasn't responsible for the crash at all, this function also
+// returns zero.
uintptr_t
-__gwp_asan_get_internal_crash_address(const gwp_asan::AllocatorState *State);
+__gwp_asan_get_internal_crash_address(const gwp_asan::AllocatorState *State,
+ uintptr_t ErrorPtr);
// Returns a pointer to the metadata for the allocation that's responsible for
// the crash. This metadata should not be dereferenced directly due to API
diff --git a/contrib/llvm-project/compiler-rt/lib/gwp_asan/guarded_pool_allocator.cpp b/contrib/llvm-project/compiler-rt/lib/gwp_asan/guarded_pool_allocator.cpp
index 8ce5fc9c4dfc..9017ab7cf7ac 100644
--- a/contrib/llvm-project/compiler-rt/lib/gwp_asan/guarded_pool_allocator.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/gwp_asan/guarded_pool_allocator.cpp
@@ -8,6 +8,7 @@
#include "gwp_asan/guarded_pool_allocator.h"
+#include "gwp_asan/crash_handler.h"
#include "gwp_asan/options.h"
#include "gwp_asan/utilities.h"
@@ -59,6 +60,13 @@ void GuardedPoolAllocator::init(const options::Options &Opts) {
SingletonPtr = this;
Backtrace = Opts.Backtrace;
+ State.VersionMagic = {{AllocatorVersionMagic::kAllocatorVersionMagic[0],
+ AllocatorVersionMagic::kAllocatorVersionMagic[1],
+ AllocatorVersionMagic::kAllocatorVersionMagic[2],
+ AllocatorVersionMagic::kAllocatorVersionMagic[3]},
+ AllocatorVersionMagic::kAllocatorVersion,
+ 0};
+
State.MaxSimultaneousAllocations = Opts.MaxSimultaneousAllocations;
const size_t PageSize = getPlatformPageSize();
@@ -66,8 +74,15 @@ void GuardedPoolAllocator::init(const options::Options &Opts) {
assert((PageSize & (PageSize - 1)) == 0);
State.PageSize = PageSize;
+ // Number of pages required =
+ // + MaxSimultaneousAllocations * maximumAllocationSize (N pages per slot)
+ // + MaxSimultaneousAllocations (one guard on the left side of each slot)
+ // + 1 (an extra guard page at the end of the pool, on the right side)
+ // + 1 (an extra page that's used for reporting internally-detected crashes,
+ // like double free and invalid free, to the signal handler; see
+ // raiseInternallyDetectedError() for more info)
size_t PoolBytesRequired =
- PageSize * (1 + State.MaxSimultaneousAllocations) +
+ PageSize * (2 + State.MaxSimultaneousAllocations) +
State.MaxSimultaneousAllocations * State.maximumAllocationSize();
assert(PoolBytesRequired % PageSize == 0);
void *GuardedPoolMemory = reserveGuardedPool(PoolBytesRequired);
@@ -251,22 +266,60 @@ void *GuardedPoolAllocator::allocate(size_t Size, size_t Alignment) {
return reinterpret_cast<void *>(UserPtr);
}
-void GuardedPoolAllocator::trapOnAddress(uintptr_t Address, Error E) {
+void GuardedPoolAllocator::raiseInternallyDetectedError(uintptr_t Address,
+ Error E) {
+ // Disable the allocator before setting the internal failure state. In
+ // non-recoverable mode, the allocator will be permanently disabled, and so
+ // things will be accessed without locks.
+ disable();
+
+ // Races between internally- and externally-raised faults can happen. Right
+ // now, in this thread we've locked the allocator in order to raise an
+ // internally-detected fault, and another thread could SIGSEGV to raise an
+ // externally-detected fault. What will happen is that the other thread will
+ // wait in the signal handler, as we hold the allocator's locks from the
+ // disable() above. We'll trigger the signal handler by touching the
+ // internal-signal-raising address below, and the signal handler from our
+ // thread will get to run first as we will continue to hold the allocator
+ // locks until the enable() at the end of this function. Be careful though, if
+ // this thread receives another SIGSEGV after the disable() above, but before
+ // touching the internal-signal-raising address below, then this thread will
+ // get an "externally-raised" SIGSEGV while *also* holding the allocator
+ // locks, which means this thread's signal handler will deadlock. This could
+ // be resolved with a re-entrant lock, but asking platforms to implement this
+ // seems unnecessary given the only way to get a SIGSEGV in this critical
+ // section is either a memory safety bug in the couple lines of code below (be
+ // careful!), or someone outside uses `kill(this_thread, SIGSEGV)`, which
+ // really shouldn't happen.
+
State.FailureType = E;
State.FailureAddress = Address;
- // Raise a SEGV by touching first guard page.
- volatile char *p = reinterpret_cast<char *>(State.GuardedPagePool);
+ // Raise a SEGV by touching a specific address that identifies to the crash
+ // handler that this is an internally-raised fault. Changing this address?
+ // Don't forget to update __gwp_asan_get_internal_crash_address.
+ volatile char *p =
+ reinterpret_cast<char *>(State.internallyDetectedErrorFaultAddress());
*p = 0;
- // Normally, would be __builtin_unreachable(), but because of
- // https://bugs.llvm.org/show_bug.cgi?id=47480, unreachable will DCE the
- // volatile store above, even though it has side effects.
- __builtin_trap();
-}
-void GuardedPoolAllocator::stop() {
- getThreadLocals()->RecursiveGuard = true;
- PoolMutex.tryLock();
+ // This should never be reached in non-recoverable mode. Ensure that the
+ // signal handler called handleRecoverablePostCrashReport(), which was
+ // responsible for re-setting these fields.
+ assert(State.FailureType == Error::UNKNOWN);
+ assert(State.FailureAddress == 0u);
+
+ // In recoverable mode, the signal handler (after dumping the crash) marked
+ // the page containing the InternalFaultSegvAddress as read/writeable, to
+ // allow the second touch to succeed after returning from the signal handler.
+ // Now, we need to mark the page as non-read/write-able again, so future
+ // internal faults can be raised.
+ deallocateInGuardedPool(
+ reinterpret_cast<void *>(getPageAddr(
+ State.internallyDetectedErrorFaultAddress(), State.PageSize)),
+ State.PageSize);
+
+ // And now we're done with patching ourselves back up, enable the allocator.
+ enable();
}
void GuardedPoolAllocator::deallocate(void *Ptr) {
@@ -275,19 +328,25 @@ void GuardedPoolAllocator::deallocate(void *Ptr) {
size_t Slot = State.getNearestSlot(UPtr);
uintptr_t SlotStart = State.slotToAddr(Slot);
AllocationMetadata *Meta = addrToMetadata(UPtr);
+
+ // If this allocation is responsible for crash, never recycle it. Turn the
+ // deallocate() call into a no-op.
+ if (Meta->HasCrashed)
+ return;
+
if (Meta->Addr != UPtr) {
- // If multiple errors occur at the same time, use the first one.
- ScopedLock L(PoolMutex);
- trapOnAddress(UPtr, Error::INVALID_FREE);
+ raiseInternallyDetectedError(UPtr, Error::INVALID_FREE);
+ return;
+ }
+ if (Meta->IsDeallocated) {
+ raiseInternallyDetectedError(UPtr, Error::DOUBLE_FREE);
+ return;
}
// Intentionally scope the mutex here, so that other threads can access the
// pool during the expensive markInaccessible() call.
{
ScopedLock L(PoolMutex);
- if (Meta->IsDeallocated) {
- trapOnAddress(UPtr, Error::DOUBLE_FREE);
- }
// Ensure that the deallocation is recorded before marking the page as
// inaccessible. Otherwise, a racy use-after-free will have inconsistent
@@ -311,6 +370,62 @@ void GuardedPoolAllocator::deallocate(void *Ptr) {
freeSlot(Slot);
}
+// Thread-compatible, protected by PoolMutex.
+static bool PreviousRecursiveGuard;
+
+void GuardedPoolAllocator::preCrashReport(void *Ptr) {
+ assert(pointerIsMine(Ptr) && "Pointer is not mine!");
+ uintptr_t InternalCrashAddr = __gwp_asan_get_internal_crash_address(
+ &State, reinterpret_cast<uintptr_t>(Ptr));
+ if (!InternalCrashAddr)
+ disable();
+
+ // If something in the signal handler calls malloc() while dumping the
+ // GWP-ASan report (e.g. backtrace_symbols()), make sure that GWP-ASan doesn't
+ // service that allocation. `PreviousRecursiveGuard` is protected by the
+ // allocator locks taken in disable(), either explicitly above for
+ // externally-raised errors, or implicitly in raiseInternallyDetectedError()
+ // for internally-detected errors.
+ PreviousRecursiveGuard = getThreadLocals()->RecursiveGuard;
+ getThreadLocals()->RecursiveGuard = true;
+}
+
+void GuardedPoolAllocator::postCrashReportRecoverableOnly(void *SignalPtr) {
+ uintptr_t SignalUPtr = reinterpret_cast<uintptr_t>(SignalPtr);
+ uintptr_t InternalCrashAddr =
+ __gwp_asan_get_internal_crash_address(&State, SignalUPtr);
+ uintptr_t ErrorUptr = InternalCrashAddr ?: SignalUPtr;
+
+ AllocationMetadata *Metadata = addrToMetadata(ErrorUptr);
+ Metadata->HasCrashed = true;
+
+ allocateInGuardedPool(
+ reinterpret_cast<void *>(getPageAddr(SignalUPtr, State.PageSize)),
+ State.PageSize);
+
+ // Clear the internal state in order to not confuse the crash handler if a
+ // use-after-free or buffer-overflow comes from a different allocation in the
+ // future.
+ if (InternalCrashAddr) {
+ State.FailureType = Error::UNKNOWN;
+ State.FailureAddress = 0;
+ }
+
+ size_t Slot = State.getNearestSlot(ErrorUptr);
+ // If the slot is available, remove it permanently.
+ for (size_t i = 0; i < FreeSlotsLength; ++i) {
+ if (FreeSlots[i] == Slot) {
+ FreeSlots[i] = FreeSlots[FreeSlotsLength - 1];
+ FreeSlotsLength -= 1;
+ break;
+ }
+ }
+
+ getThreadLocals()->RecursiveGuard = PreviousRecursiveGuard;
+ if (!InternalCrashAddr)
+ enable();
+}
+
size_t GuardedPoolAllocator::getSize(const void *Ptr) {
assert(pointerIsMine(Ptr));
ScopedLock L(PoolMutex);
diff --git a/contrib/llvm-project/compiler-rt/lib/gwp_asan/guarded_pool_allocator.h b/contrib/llvm-project/compiler-rt/lib/gwp_asan/guarded_pool_allocator.h
index 6d2ce2576c13..a02a35321c2b 100644
--- a/contrib/llvm-project/compiler-rt/lib/gwp_asan/guarded_pool_allocator.h
+++ b/contrib/llvm-project/compiler-rt/lib/gwp_asan/guarded_pool_allocator.h
@@ -20,6 +20,15 @@
#include <stddef.h>
#include <stdint.h>
// IWYU pragma: no_include <__stddef_max_align_t.h>
+// IWYU pragma: no_include <__stddef_null.h>
+// IWYU pragma: no_include <__stddef_nullptr_t.h>
+// IWYU pragma: no_include <__stddef_offsetof.h>
+// IWYU pragma: no_include <__stddef_ptrdiff_t.h>
+// IWYU pragma: no_include <__stddef_rsize_t.h>
+// IWYU pragma: no_include <__stddef_size_t.h>
+// IWYU pragma: no_include <__stddef_unreachable.h>
+// IWYU pragma: no_include <__stddef_wchar_t.h>
+// IWYU pragma: no_include <__stddef_wint_t.h>
namespace gwp_asan {
// This class is the primary implementation of the allocator portion of GWP-
@@ -67,11 +76,6 @@ public:
// allocate.
void iterate(void *Base, size_t Size, iterate_callback Cb, void *Arg);
- // This function is used to signal the allocator to indefinitely stop
- // functioning, as a crash has occurred. This stops the allocator from
- // servicing any further allocations permanently.
- void stop();
-
// Return whether the allocation should be randomly chosen for sampling.
GWP_ASAN_ALWAYS_INLINE bool shouldSample() {
// NextSampleCounter == 0 means we "should regenerate the counter".
@@ -115,6 +119,12 @@ public:
// Returns a pointer to the AllocatorState region.
const AllocatorState *getAllocatorState() const { return &State; }
+ // Functions that the signal handler is responsible for calling, while
+ // providing the SEGV pointer, prior to dumping the crash, and after dumping
+ // the crash (in recoverable mode only).
+ void preCrashReport(void *Ptr);
+ void postCrashReportRecoverableOnly(void *Ptr);
+
// Exposed as protected for testing.
protected:
// Returns the actual allocation size required to service an allocation with
@@ -185,7 +195,7 @@ private:
// Raise a SEGV and set the corresponding fields in the Allocator's State in
// order to tell the crash handler what happened. Used when errors are
// detected internally (Double Free, Invalid Free).
- void trapOnAddress(uintptr_t Address, Error E);
+ void raiseInternallyDetectedError(uintptr_t Address, Error E);
static GuardedPoolAllocator *getSingleton();
diff --git a/contrib/llvm-project/compiler-rt/lib/gwp_asan/optional/backtrace_sanitizer_common.cpp b/contrib/llvm-project/compiler-rt/lib/gwp_asan/optional/backtrace_sanitizer_common.cpp
index e6cce86e3b7b..f8b9cbdb7935 100644
--- a/contrib/llvm-project/compiler-rt/lib/gwp_asan/optional/backtrace_sanitizer_common.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/gwp_asan/optional/backtrace_sanitizer_common.cpp
@@ -72,7 +72,9 @@ static void PrintBacktrace(uintptr_t *Trace, size_t TraceLength,
return;
}
- StackTrace.Print();
+ __sanitizer::InternalScopedString buffer;
+ StackTrace.PrintTo(&buffer);
+ Printf("%s\n", buffer.data());
}
} // anonymous namespace
diff --git a/contrib/llvm-project/compiler-rt/lib/gwp_asan/optional/segv_handler.h b/contrib/llvm-project/compiler-rt/lib/gwp_asan/optional/segv_handler.h
index 87d9fe1dff17..72105ded7d55 100644
--- a/contrib/llvm-project/compiler-rt/lib/gwp_asan/optional/segv_handler.h
+++ b/contrib/llvm-project/compiler-rt/lib/gwp_asan/optional/segv_handler.h
@@ -23,7 +23,8 @@ namespace segv_handler {
// before this function.
void installSignalHandlers(gwp_asan::GuardedPoolAllocator *GPA, Printf_t Printf,
gwp_asan::backtrace::PrintBacktrace_t PrintBacktrace,
- gwp_asan::backtrace::SegvBacktrace_t SegvBacktrace);
+ gwp_asan::backtrace::SegvBacktrace_t SegvBacktrace,
+ bool Recoverable = false);
// Uninistall the signal handlers, test-only.
void uninstallSignalHandlers();
diff --git a/contrib/llvm-project/compiler-rt/lib/gwp_asan/optional/segv_handler_fuchsia.cpp b/contrib/llvm-project/compiler-rt/lib/gwp_asan/optional/segv_handler_fuchsia.cpp
index 966d7d0bd996..f5ff35e27ac2 100644
--- a/contrib/llvm-project/compiler-rt/lib/gwp_asan/optional/segv_handler_fuchsia.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/gwp_asan/optional/segv_handler_fuchsia.cpp
@@ -15,7 +15,8 @@ namespace segv_handler {
void installSignalHandlers(gwp_asan::GuardedPoolAllocator * /* GPA */,
Printf_t /* Printf */,
backtrace::PrintBacktrace_t /* PrintBacktrace */,
- backtrace::SegvBacktrace_t /* SegvBacktrace */) {}
+ backtrace::SegvBacktrace_t /* SegvBacktrace */,
+ bool /* Recoverable */) {}
void uninstallSignalHandlers() {}
} // namespace segv_handler
diff --git a/contrib/llvm-project/compiler-rt/lib/gwp_asan/optional/segv_handler_posix.cpp b/contrib/llvm-project/compiler-rt/lib/gwp_asan/optional/segv_handler_posix.cpp
index 5c9bb9f3a2e7..198db5cb074c 100644
--- a/contrib/llvm-project/compiler-rt/lib/gwp_asan/optional/segv_handler_posix.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/gwp_asan/optional/segv_handler_posix.cpp
@@ -47,15 +47,12 @@ void printHeader(Error E, uintptr_t AccessPtr,
// appended to a log file automatically per Printf() call.
constexpr size_t kDescriptionBufferLen = 128;
char DescriptionBuffer[kDescriptionBufferLen] = "";
+
+ bool AccessWasInBounds = false;
if (E != Error::UNKNOWN && Metadata != nullptr) {
uintptr_t Address = __gwp_asan_get_allocation_address(Metadata);
size_t Size = __gwp_asan_get_allocation_size(Metadata);
- if (E == Error::USE_AFTER_FREE) {
- snprintf(DescriptionBuffer, kDescriptionBufferLen,
- "(%zu byte%s into a %zu-byte allocation at 0x%zx) ",
- AccessPtr - Address, (AccessPtr - Address == 1) ? "" : "s", Size,
- Address);
- } else if (AccessPtr < Address) {
+ if (AccessPtr < Address) {
snprintf(DescriptionBuffer, kDescriptionBufferLen,
"(%zu byte%s to the left of a %zu-byte allocation at 0x%zx) ",
Address - AccessPtr, (Address - AccessPtr == 1) ? "" : "s", Size,
@@ -65,9 +62,15 @@ void printHeader(Error E, uintptr_t AccessPtr,
"(%zu byte%s to the right of a %zu-byte allocation at 0x%zx) ",
AccessPtr - Address, (AccessPtr - Address == 1) ? "" : "s", Size,
Address);
- } else {
+ } else if (E == Error::DOUBLE_FREE) {
snprintf(DescriptionBuffer, kDescriptionBufferLen,
"(a %zu-byte allocation) ", Size);
+ } else {
+ AccessWasInBounds = true;
+ snprintf(DescriptionBuffer, kDescriptionBufferLen,
+ "(%zu byte%s into a %zu-byte allocation at 0x%zx) ",
+ AccessPtr - Address, (AccessPtr - Address == 1) ? "" : "s", Size,
+ Address);
}
}
@@ -81,10 +84,27 @@ void printHeader(Error E, uintptr_t AccessPtr,
else
snprintf(ThreadBuffer, kThreadBufferLen, "%" PRIu64, ThreadID);
- Printf("%s at 0x%zx %sby thread %s here:\n", gwp_asan::ErrorToString(E),
- AccessPtr, DescriptionBuffer, ThreadBuffer);
+ const char *OutOfBoundsAndUseAfterFreeWarning = "";
+ if (E == Error::USE_AFTER_FREE && !AccessWasInBounds) {
+ OutOfBoundsAndUseAfterFreeWarning =
+ " (warning: buffer overflow/underflow detected on a free()'d "
+ "allocation. This either means you have a buffer-overflow and a "
+ "use-after-free at the same time, or you have a long-lived "
+ "use-after-free bug where the allocation/deallocation metadata below "
+ "has already been overwritten and is likely bogus)";
+ }
+
+ Printf("%s%s at 0x%zx %sby thread %s here:\n", gwp_asan::ErrorToString(E),
+ OutOfBoundsAndUseAfterFreeWarning, AccessPtr, DescriptionBuffer,
+ ThreadBuffer);
}
+static bool HasReportedBadPoolAccess = false;
+static const char *kUnknownCrashText =
+ "GWP-ASan cannot provide any more information about this error. This may "
+ "occur due to a wild memory access into the GWP-ASan pool, or an "
+ "overflow/underflow that is > 512B in length.\n";
+
void dumpReport(uintptr_t ErrorPtr, const gwp_asan::AllocatorState *State,
const gwp_asan::AllocationMetadata *Metadata,
SegvBacktrace_t SegvBacktrace, Printf_t Printf,
@@ -92,29 +112,45 @@ void dumpReport(uintptr_t ErrorPtr, const gwp_asan::AllocatorState *State,
assert(State && "dumpReport missing Allocator State.");
assert(Metadata && "dumpReport missing Metadata.");
assert(Printf && "dumpReport missing Printf.");
+ assert(__gwp_asan_error_is_mine(State, ErrorPtr) &&
+ "dumpReport() called on a non-GWP-ASan error.");
+
+ uintptr_t InternalErrorPtr =
+ __gwp_asan_get_internal_crash_address(State, ErrorPtr);
+ if (InternalErrorPtr)
+ ErrorPtr = InternalErrorPtr;
- if (!__gwp_asan_error_is_mine(State, ErrorPtr))
+ const gwp_asan::AllocationMetadata *AllocMeta =
+ __gwp_asan_get_metadata(State, Metadata, ErrorPtr);
+
+ if (AllocMeta == nullptr) {
+ if (HasReportedBadPoolAccess) return;
+ HasReportedBadPoolAccess = true;
+ Printf("*** GWP-ASan detected a memory error ***\n");
+ ScopedEndOfReportDecorator Decorator(Printf);
+ Printf(kUnknownCrashText);
+ return;
+ }
+
+ // It's unusual for a signal handler to be invoked multiple times for the same
+ // allocation, but it's possible in various scenarios, like:
+ // 1. A double-free or invalid-free was invoked in one thread at the same
+ // time as a buffer-overflow or use-after-free in another thread, or
+ // 2. Two threads do a use-after-free or buffer-overflow at the same time.
+ // In these instances, we've already dumped a report for this allocation, so
+ // skip dumping this issue as well.
+ if (AllocMeta->HasCrashed)
return;
Printf("*** GWP-ASan detected a memory error ***\n");
ScopedEndOfReportDecorator Decorator(Printf);
- uintptr_t InternalErrorPtr = __gwp_asan_get_internal_crash_address(State);
- if (InternalErrorPtr != 0u)
- ErrorPtr = InternalErrorPtr;
-
Error E = __gwp_asan_diagnose_error(State, Metadata, ErrorPtr);
-
if (E == Error::UNKNOWN) {
- Printf("GWP-ASan cannot provide any more information about this error. "
- "This may occur due to a wild memory access into the GWP-ASan pool, "
- "or an overflow/underflow that is > 512B in length.\n");
+ Printf(kUnknownCrashText);
return;
}
- const gwp_asan::AllocationMetadata *AllocMeta =
- __gwp_asan_get_metadata(State, Metadata, ErrorPtr);
-
// Print the error header.
printHeader(E, ErrorPtr, AllocMeta, Printf);
@@ -126,9 +162,6 @@ void dumpReport(uintptr_t ErrorPtr, const gwp_asan::AllocatorState *State,
PrintBacktrace(Trace, TraceLength, Printf);
- if (AllocMeta == nullptr)
- return;
-
// Maybe print the deallocation trace.
if (__gwp_asan_is_deallocated(AllocMeta)) {
uint64_t ThreadID = __gwp_asan_get_deallocation_thread_id(AllocMeta);
@@ -154,23 +187,33 @@ void dumpReport(uintptr_t ErrorPtr, const gwp_asan::AllocatorState *State,
struct sigaction PreviousHandler;
bool SignalHandlerInstalled;
+bool RecoverableSignal;
gwp_asan::GuardedPoolAllocator *GPAForSignalHandler;
Printf_t PrintfForSignalHandler;
PrintBacktrace_t PrintBacktraceForSignalHandler;
SegvBacktrace_t BacktraceForSignalHandler;
static void sigSegvHandler(int sig, siginfo_t *info, void *ucontext) {
- if (GPAForSignalHandler) {
- GPAForSignalHandler->stop();
+ const gwp_asan::AllocatorState *State =
+ GPAForSignalHandler->getAllocatorState();
+ void *FaultAddr = info->si_addr;
+ uintptr_t FaultAddrUPtr = reinterpret_cast<uintptr_t>(FaultAddr);
+
+ if (__gwp_asan_error_is_mine(State, FaultAddrUPtr)) {
+ GPAForSignalHandler->preCrashReport(FaultAddr);
- dumpReport(reinterpret_cast<uintptr_t>(info->si_addr),
- GPAForSignalHandler->getAllocatorState(),
- GPAForSignalHandler->getMetadataRegion(),
+ dumpReport(FaultAddrUPtr, State, GPAForSignalHandler->getMetadataRegion(),
BacktraceForSignalHandler, PrintfForSignalHandler,
PrintBacktraceForSignalHandler, ucontext);
+
+ if (RecoverableSignal) {
+ GPAForSignalHandler->postCrashReportRecoverableOnly(FaultAddr);
+ return;
+ }
}
- // Process any previous handlers.
+ // Process any previous handlers as long as the crash wasn't a GWP-ASan crash
+ // in recoverable mode.
if (PreviousHandler.sa_flags & SA_SIGINFO) {
PreviousHandler.sa_sigaction(sig, info, ucontext);
} else if (PreviousHandler.sa_handler == SIG_DFL) {
@@ -196,7 +239,7 @@ namespace segv_handler {
void installSignalHandlers(gwp_asan::GuardedPoolAllocator *GPA, Printf_t Printf,
PrintBacktrace_t PrintBacktrace,
- SegvBacktrace_t SegvBacktrace) {
+ SegvBacktrace_t SegvBacktrace, bool Recoverable) {
assert(GPA && "GPA wasn't provided to installSignalHandlers.");
assert(Printf && "Printf wasn't provided to installSignalHandlers.");
assert(PrintBacktrace &&
@@ -207,6 +250,7 @@ void installSignalHandlers(gwp_asan::GuardedPoolAllocator *GPA, Printf_t Printf,
PrintfForSignalHandler = Printf;
PrintBacktraceForSignalHandler = PrintBacktrace;
BacktraceForSignalHandler = SegvBacktrace;
+ RecoverableSignal = Recoverable;
struct sigaction Action = {};
Action.sa_sigaction = sigSegvHandler;
diff --git a/contrib/llvm-project/compiler-rt/lib/gwp_asan/options.inc b/contrib/llvm-project/compiler-rt/lib/gwp_asan/options.inc
index 9900a2ac40df..3a593216e8df 100644
--- a/contrib/llvm-project/compiler-rt/lib/gwp_asan/options.inc
+++ b/contrib/llvm-project/compiler-rt/lib/gwp_asan/options.inc
@@ -49,6 +49,16 @@ GWP_ASAN_OPTION(
"the same. Note, if the previously installed SIGSEGV handler is SIG_IGN, "
"we terminate the process after dumping the error report.")
+GWP_ASAN_OPTION(
+ bool, Recoverable, false,
+ "Install GWP-ASan's signal handler in recoverable mode. This means that "
+ "upon GWP-ASan detecting an error, it'll print the error report, but *not* "
+ "crash. Only one crash per sampled allocation will ever be recorded, and "
+ "if a sampled allocation does actually cause a crash, it'll permanently "
+ "occupy a slot in the pool. The recoverable mode also means that "
+ "previously-installed signal handlers will only be triggered for "
+ "non-GWP-ASan errors, as all GWP-ASan errors won't be forwarded.")
+
GWP_ASAN_OPTION(bool, InstallForkHandlers, true,
"Install GWP-ASan atfork handlers to acquire internal locks "
"before fork and release them after.")
diff --git a/contrib/llvm-project/compiler-rt/lib/gwp_asan/platform_specific/guarded_pool_allocator_posix.cpp b/contrib/llvm-project/compiler-rt/lib/gwp_asan/platform_specific/guarded_pool_allocator_posix.cpp
index adb7330a431e..c036ebe3efcc 100644
--- a/contrib/llvm-project/compiler-rt/lib/gwp_asan/platform_specific/guarded_pool_allocator_posix.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/gwp_asan/platform_specific/guarded_pool_allocator_posix.cpp
@@ -98,6 +98,10 @@ size_t GuardedPoolAllocator::getPlatformPageSize() {
}
void GuardedPoolAllocator::installAtFork() {
+ static bool AtForkInstalled = false;
+ if (AtForkInstalled)
+ return;
+ AtForkInstalled = true;
auto Disable = []() {
if (auto *S = getSingleton())
S->disable();
diff --git a/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan.cpp b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan.cpp
index cbe0dee66dcd..52780becbdb2 100644
--- a/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan.cpp
@@ -16,6 +16,7 @@
#include "hwasan_checks.h"
#include "hwasan_dynamic_shadow.h"
#include "hwasan_globals.h"
+#include "hwasan_mapping.h"
#include "hwasan_poisoning.h"
#include "hwasan_report.h"
#include "hwasan_thread.h"
@@ -24,6 +25,7 @@
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_flag_parser.h"
#include "sanitizer_common/sanitizer_flags.h"
+#include "sanitizer_common/sanitizer_interface_internal.h"
#include "sanitizer_common/sanitizer_libc.h"
#include "sanitizer_common/sanitizer_procmaps.h"
#include "sanitizer_common/sanitizer_stackdepot.h"
@@ -84,6 +86,11 @@ static void InitializeFlags() {
cf.clear_shadow_mmap_threshold = 4096 * (SANITIZER_ANDROID ? 2 : 8);
// Sigtrap is used in error reporting.
cf.handle_sigtrap = kHandleSignalExclusive;
+ // For now only tested on Linux and Fuchsia. Other plantforms can be turned
+ // on as they become ready.
+ constexpr bool can_detect_leaks =
+ (SANITIZER_LINUX && !SANITIZER_ANDROID) || SANITIZER_FUCHSIA;
+ cf.detect_leaks = cf.detect_leaks && can_detect_leaks;
#if SANITIZER_ANDROID
// Let platform handle other signals. It is better at reporting them then we
@@ -104,6 +111,15 @@ static void InitializeFlags() {
RegisterHwasanFlags(&parser, f);
RegisterCommonFlags(&parser);
+#if CAN_SANITIZE_LEAKS
+ __lsan::Flags *lf = __lsan::flags();
+ lf->SetDefaults();
+
+ FlagParser lsan_parser;
+ __lsan::RegisterLsanFlags(&lsan_parser, lf);
+ RegisterCommonFlags(&lsan_parser);
+#endif
+
#if HWASAN_CONTAINS_UBSAN
__ubsan::Flags *uf = __ubsan::flags();
uf->SetDefaults();
@@ -116,12 +132,18 @@ static void InitializeFlags() {
// Override from user-specified string.
if (__hwasan_default_options)
parser.ParseString(__hwasan_default_options());
+#if CAN_SANITIZE_LEAKS
+ lsan_parser.ParseString(__lsan_default_options());
+#endif
#if HWASAN_CONTAINS_UBSAN
const char *ubsan_default_options = __ubsan_default_options();
ubsan_parser.ParseString(ubsan_default_options);
#endif
parser.ParseStringFromEnv("HWASAN_OPTIONS");
+#if CAN_SANITIZE_LEAKS
+ lsan_parser.ParseStringFromEnv("LSAN_OPTIONS");
+#endif
#if HWASAN_CONTAINS_UBSAN
ubsan_parser.ParseStringFromEnv("UBSAN_OPTIONS");
#endif
@@ -131,6 +153,12 @@ static void InitializeFlags() {
if (Verbosity()) ReportUnrecognizedFlags();
if (common_flags()->help) parser.PrintFlagDescriptions();
+ // Flag validation:
+ if (!CAN_SANITIZE_LEAKS && common_flags()->detect_leaks) {
+ Report("%s: detect_leaks is not supported on this platform.\n",
+ SanitizerToolName);
+ Die();
+ }
}
static void CheckUnwind() {
@@ -141,17 +169,17 @@ static void CheckUnwind() {
static void HwasanFormatMemoryUsage(InternalScopedString &s) {
HwasanThreadList &thread_list = hwasanThreadList();
auto thread_stats = thread_list.GetThreadStats();
- auto *sds = StackDepotGetStats();
+ auto sds = StackDepotGetStats();
AllocatorStatCounters asc;
GetAllocatorStats(asc);
- s.append(
+ s.AppendF(
"HWASAN pid: %d rss: %zd threads: %zd stacks: %zd"
" thr_aux: %zd stack_depot: %zd uniq_stacks: %zd"
" heap: %zd",
internal_getpid(), GetRSS(), thread_stats.n_live_threads,
thread_stats.total_stack_size,
thread_stats.n_live_threads * thread_list.MemoryUsedPerThread(),
- sds->allocated, sds->n_uniq_ids, asc[AllocatorStatMapped]);
+ sds.allocated, sds.n_uniq_ids, asc[AllocatorStatMapped]);
}
#if SANITIZER_ANDROID
@@ -216,8 +244,8 @@ void HandleTagMismatch(AccessInfo ai, uptr pc, uptr frame, void *uc,
registers_frame);
}
-void HwasanTagMismatch(uptr addr, uptr access_info, uptr *registers_frame,
- size_t outsize) {
+void HwasanTagMismatch(uptr addr, uptr pc, uptr frame, uptr access_info,
+ uptr *registers_frame, size_t outsize) {
__hwasan::AccessInfo ai;
ai.is_store = access_info & 0x10;
ai.is_load = !ai.is_store;
@@ -228,9 +256,7 @@ void HwasanTagMismatch(uptr addr, uptr access_info, uptr *registers_frame,
else
ai.size = 1 << (access_info & 0xf);
- HandleTagMismatch(ai, (uptr)__builtin_return_address(0),
- (uptr)__builtin_frame_address(0), nullptr, registers_frame);
- __builtin_unreachable();
+ HandleTagMismatch(ai, pc, frame, nullptr, registers_frame);
}
Thread *GetCurrentThread() {
@@ -266,14 +292,20 @@ static bool InitializeSingleGlobal(const hwasan_global &global) {
}
static void InitLoadedGlobals() {
- dl_iterate_phdr(
- [](dl_phdr_info *info, size_t /* size */, void * /* data */) -> int {
- for (const hwasan_global &global : HwasanGlobalsFor(
- info->dlpi_addr, info->dlpi_phdr, info->dlpi_phnum))
- InitializeSingleGlobal(global);
- return 0;
- },
- nullptr);
+ // Fuchsia's libc provides a hook (__sanitizer_module_loaded) that runs on
+ // the startup path which calls into __hwasan_library_loaded on all
+ // initially loaded modules, so explicitly registering the globals here
+ // isn't needed.
+ if constexpr (!SANITIZER_FUCHSIA) {
+ dl_iterate_phdr(
+ [](dl_phdr_info *info, size_t /* size */, void * /* data */) -> int {
+ for (const hwasan_global &global : HwasanGlobalsFor(
+ info->dlpi_addr, info->dlpi_phdr, info->dlpi_phnum))
+ InitializeSingleGlobal(global);
+ return 0;
+ },
+ nullptr);
+ }
}
// Prepare to run instrumented code on the main thread.
@@ -319,7 +351,7 @@ void __hwasan_init_static() {
InitializeSingleGlobal(global);
}
-void __hwasan_init() {
+__attribute__((constructor(0))) void __hwasan_init() {
CHECK(!hwasan_init_is_running);
if (hwasan_inited) return;
hwasan_init_is_running = 1;
@@ -344,7 +376,7 @@ void __hwasan_init() {
// Needs to be called here because flags()->random_tags might not have been
// initialized when InitInstrumentation() was called.
- GetCurrentThread()->InitRandomState();
+ GetCurrentThread()->EnsureRandomStateInited();
SetPrintfAndReportCallback(AppendToErrorMessageBuffer);
// This may call libc -> needs initialized shadow.
@@ -360,11 +392,22 @@ void __hwasan_init() {
HwasanTSDThreadInit();
HwasanAllocatorInit();
+ HwasanInstallAtForkHandler();
+
+ if (CAN_SANITIZE_LEAKS) {
+ __lsan::InitCommonLsan();
+ InstallAtExitCheckLeaks();
+ }
#if HWASAN_CONTAINS_UBSAN
__ubsan::InitAsPlugin();
#endif
+ if (CAN_SANITIZE_LEAKS && common_flags()->detect_leaks) {
+ __lsan::ScopedInterceptorDisabler disabler;
+ Symbolizer::LateInitialize();
+ }
+
VPrintf(1, "HWAddressSanitizer init done\n");
hwasan_init_is_running = 0;
@@ -390,23 +433,46 @@ void __hwasan_print_shadow(const void *p, uptr sz) {
uptr shadow_last = MemToShadow(ptr_raw + sz - 1);
Printf("HWASan shadow map for %zx .. %zx (pointer tag %x)\n", ptr_raw,
ptr_raw + sz, GetTagFromPointer((uptr)p));
- for (uptr s = shadow_first; s <= shadow_last; ++s)
- Printf(" %zx: %x\n", ShadowToMem(s), *(tag_t *)s);
+ for (uptr s = shadow_first; s <= shadow_last; ++s) {
+ tag_t mem_tag = *reinterpret_cast<tag_t *>(s);
+ uptr granule_addr = ShadowToMem(s);
+ if (mem_tag && mem_tag < kShadowAlignment)
+ Printf(" %zx: %02x(%02x)\n", granule_addr, mem_tag,
+ *reinterpret_cast<tag_t *>(granule_addr + kShadowAlignment - 1));
+ else
+ Printf(" %zx: %02x\n", granule_addr, mem_tag);
+ }
}
sptr __hwasan_test_shadow(const void *p, uptr sz) {
if (sz == 0)
return -1;
- tag_t ptr_tag = GetTagFromPointer((uptr)p);
- uptr ptr_raw = UntagAddr(reinterpret_cast<uptr>(p));
+ uptr ptr = reinterpret_cast<uptr>(p);
+ tag_t ptr_tag = GetTagFromPointer(ptr);
+ uptr ptr_raw = UntagAddr(ptr);
uptr shadow_first = MemToShadow(ptr_raw);
- uptr shadow_last = MemToShadow(ptr_raw + sz - 1);
- for (uptr s = shadow_first; s <= shadow_last; ++s)
- if (*(tag_t *)s != ptr_tag) {
- sptr offset = ShadowToMem(s) - ptr_raw;
+ uptr shadow_last = MemToShadow(ptr_raw + sz);
+ for (uptr s = shadow_first; s < shadow_last; ++s) {
+ if (UNLIKELY(*(tag_t *)s != ptr_tag)) {
+ uptr short_size =
+ ShortTagSize(*(tag_t *)s, AddTagToPointer(ShadowToMem(s), ptr_tag));
+ sptr offset = ShadowToMem(s) - ptr_raw + short_size;
return offset < 0 ? 0 : offset;
}
- return -1;
+ }
+
+ uptr end = ptr + sz;
+ uptr tail_sz = end & (kShadowAlignment - 1);
+ if (!tail_sz)
+ return -1;
+
+ uptr short_size =
+ ShortTagSize(*(tag_t *)shadow_last, end & ~(kShadowAlignment - 1));
+ if (LIKELY(tail_sz <= short_size))
+ return -1;
+
+ sptr offset = sz - tail_sz + short_size;
+ return offset < 0 ? 0 : offset;
}
u16 __sanitizer_unaligned_load16(const uu16 *p) {
@@ -466,6 +532,56 @@ void __hwasan_load16_noabort(uptr p) {
CheckAddress<ErrorAction::Recover, AccessType::Load, 4>(p);
}
+void __hwasan_loadN_match_all(uptr p, uptr sz, u8 match_all_tag) {
+ if (GetTagFromPointer(p) != match_all_tag)
+ CheckAddressSized<ErrorAction::Abort, AccessType::Load>(p, sz);
+}
+void __hwasan_load1_match_all(uptr p, u8 match_all_tag) {
+ if (GetTagFromPointer(p) != match_all_tag)
+ CheckAddress<ErrorAction::Abort, AccessType::Load, 0>(p);
+}
+void __hwasan_load2_match_all(uptr p, u8 match_all_tag) {
+ if (GetTagFromPointer(p) != match_all_tag)
+ CheckAddress<ErrorAction::Abort, AccessType::Load, 1>(p);
+}
+void __hwasan_load4_match_all(uptr p, u8 match_all_tag) {
+ if (GetTagFromPointer(p) != match_all_tag)
+ CheckAddress<ErrorAction::Abort, AccessType::Load, 2>(p);
+}
+void __hwasan_load8_match_all(uptr p, u8 match_all_tag) {
+ if (GetTagFromPointer(p) != match_all_tag)
+ CheckAddress<ErrorAction::Abort, AccessType::Load, 3>(p);
+}
+void __hwasan_load16_match_all(uptr p, u8 match_all_tag) {
+ if (GetTagFromPointer(p) != match_all_tag)
+ CheckAddress<ErrorAction::Abort, AccessType::Load, 4>(p);
+}
+
+void __hwasan_loadN_match_all_noabort(uptr p, uptr sz, u8 match_all_tag) {
+ if (GetTagFromPointer(p) != match_all_tag)
+ CheckAddressSized<ErrorAction::Recover, AccessType::Load>(p, sz);
+}
+void __hwasan_load1_match_all_noabort(uptr p, u8 match_all_tag) {
+ if (GetTagFromPointer(p) != match_all_tag)
+ CheckAddress<ErrorAction::Recover, AccessType::Load, 0>(p);
+}
+void __hwasan_load2_match_all_noabort(uptr p, u8 match_all_tag) {
+ if (GetTagFromPointer(p) != match_all_tag)
+ CheckAddress<ErrorAction::Recover, AccessType::Load, 1>(p);
+}
+void __hwasan_load4_match_all_noabort(uptr p, u8 match_all_tag) {
+ if (GetTagFromPointer(p) != match_all_tag)
+ CheckAddress<ErrorAction::Recover, AccessType::Load, 2>(p);
+}
+void __hwasan_load8_match_all_noabort(uptr p, u8 match_all_tag) {
+ if (GetTagFromPointer(p) != match_all_tag)
+ CheckAddress<ErrorAction::Recover, AccessType::Load, 3>(p);
+}
+void __hwasan_load16_match_all_noabort(uptr p, u8 match_all_tag) {
+ if (GetTagFromPointer(p) != match_all_tag)
+ CheckAddress<ErrorAction::Recover, AccessType::Load, 4>(p);
+}
+
void __hwasan_storeN(uptr p, uptr sz) {
CheckAddressSized<ErrorAction::Abort, AccessType::Store>(p, sz);
}
@@ -504,18 +620,70 @@ void __hwasan_store16_noabort(uptr p) {
CheckAddress<ErrorAction::Recover, AccessType::Store, 4>(p);
}
+void __hwasan_storeN_match_all(uptr p, uptr sz, u8 match_all_tag) {
+ if (GetTagFromPointer(p) != match_all_tag)
+ CheckAddressSized<ErrorAction::Abort, AccessType::Store>(p, sz);
+}
+void __hwasan_store1_match_all(uptr p, u8 match_all_tag) {
+ if (GetTagFromPointer(p) != match_all_tag)
+ CheckAddress<ErrorAction::Abort, AccessType::Store, 0>(p);
+}
+void __hwasan_store2_match_all(uptr p, u8 match_all_tag) {
+ if (GetTagFromPointer(p) != match_all_tag)
+ CheckAddress<ErrorAction::Abort, AccessType::Store, 1>(p);
+}
+void __hwasan_store4_match_all(uptr p, u8 match_all_tag) {
+ if (GetTagFromPointer(p) != match_all_tag)
+ CheckAddress<ErrorAction::Abort, AccessType::Store, 2>(p);
+}
+void __hwasan_store8_match_all(uptr p, u8 match_all_tag) {
+ if (GetTagFromPointer(p) != match_all_tag)
+ CheckAddress<ErrorAction::Abort, AccessType::Store, 3>(p);
+}
+void __hwasan_store16_match_all(uptr p, u8 match_all_tag) {
+ if (GetTagFromPointer(p) != match_all_tag)
+ CheckAddress<ErrorAction::Abort, AccessType::Store, 4>(p);
+}
+
+void __hwasan_storeN_match_all_noabort(uptr p, uptr sz, u8 match_all_tag) {
+ if (GetTagFromPointer(p) != match_all_tag)
+ CheckAddressSized<ErrorAction::Recover, AccessType::Store>(p, sz);
+}
+void __hwasan_store1_match_all_noabort(uptr p, u8 match_all_tag) {
+ if (GetTagFromPointer(p) != match_all_tag)
+ CheckAddress<ErrorAction::Recover, AccessType::Store, 0>(p);
+}
+void __hwasan_store2_match_all_noabort(uptr p, u8 match_all_tag) {
+ if (GetTagFromPointer(p) != match_all_tag)
+ CheckAddress<ErrorAction::Recover, AccessType::Store, 1>(p);
+}
+void __hwasan_store4_match_all_noabort(uptr p, u8 match_all_tag) {
+ if (GetTagFromPointer(p) != match_all_tag)
+ CheckAddress<ErrorAction::Recover, AccessType::Store, 2>(p);
+}
+void __hwasan_store8_match_all_noabort(uptr p, u8 match_all_tag) {
+ if (GetTagFromPointer(p) != match_all_tag)
+ CheckAddress<ErrorAction::Recover, AccessType::Store, 3>(p);
+}
+void __hwasan_store16_match_all_noabort(uptr p, u8 match_all_tag) {
+ if (GetTagFromPointer(p) != match_all_tag)
+ CheckAddress<ErrorAction::Recover, AccessType::Store, 4>(p);
+}
+
void __hwasan_tag_memory(uptr p, u8 tag, uptr sz) {
- TagMemoryAligned(p, sz, tag);
+ TagMemoryAligned(UntagAddr(p), sz, tag);
}
uptr __hwasan_tag_pointer(uptr p, u8 tag) {
return AddTagToPointer(p, tag);
}
+u8 __hwasan_get_tag_from_pointer(uptr p) { return GetTagFromPointer(p); }
+
void __hwasan_handle_longjmp(const void *sp_dst) {
uptr dst = (uptr)sp_dst;
// HWASan does not support tagged SP.
- CHECK(GetTagFromPointer(dst) == 0);
+ CHECK_EQ(GetTagFromPointer(dst), 0);
uptr sp = (uptr)__builtin_frame_address(0);
static const uptr kMaxExpectedCleanupSize = 64 << 20; // 64M
@@ -566,6 +734,12 @@ u8 __hwasan_generate_tag() {
return t->GenerateRandomTag();
}
+void __hwasan_add_frame_record(u64 frame_record_info) {
+ Thread *t = GetCurrentThread();
+ if (t)
+ t->stack_allocations()->push(frame_record_info);
+}
+
#if !SANITIZER_SUPPORTS_WEAK_HOOKS
extern "C" {
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
@@ -584,7 +758,9 @@ void __sanitizer_print_stack_trace() {
// rest of the mismatch handling code (C++).
void __hwasan_tag_mismatch4(uptr addr, uptr access_info, uptr *registers_frame,
size_t outsize) {
- __hwasan::HwasanTagMismatch(addr, access_info, registers_frame, outsize);
+ __hwasan::HwasanTagMismatch(addr, (uptr)__builtin_return_address(0),
+ (uptr)__builtin_frame_address(0), access_info,
+ registers_frame, outsize);
}
} // extern "C"
diff --git a/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan.h b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan.h
index 7338b696ad34..df21375e8167 100644
--- a/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan.h
+++ b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan.h
@@ -16,6 +16,7 @@
#include "hwasan_flags.h"
#include "hwasan_interface_internal.h"
+#include "hwasan_mapping.h"
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_flags.h"
#include "sanitizer_common/sanitizer_internal_defs.h"
@@ -78,12 +79,23 @@ const unsigned kRecordFPShift = 48;
const unsigned kRecordFPLShift = 4;
const unsigned kRecordFPModulus = 1 << (64 - kRecordFPShift + kRecordFPLShift);
+static inline bool InTaggableRegion(uptr addr) {
+#if defined(HWASAN_ALIASING_MODE)
+ // Aliases are mapped next to shadow so that the upper bits match the shadow
+ // base.
+ return (addr >> kTaggableRegionCheckShift) ==
+ (__hwasan::GetShadowOffset() >> kTaggableRegionCheckShift);
+#endif
+ return true;
+}
+
static inline tag_t GetTagFromPointer(uptr p) {
- return (p >> kAddressTagShift) & kTagMask;
+ return InTaggableRegion(p) ? ((p >> kAddressTagShift) & kTagMask) : 0;
}
static inline uptr UntagAddr(uptr tagged_addr) {
- return tagged_addr & ~kAddressTagMask;
+ return InTaggableRegion(tagged_addr) ? (tagged_addr & ~kAddressTagMask)
+ : tagged_addr;
}
static inline void *UntagPtr(const void *tagged_ptr) {
@@ -92,7 +104,9 @@ static inline void *UntagPtr(const void *tagged_ptr) {
}
static inline uptr AddTagToPointer(uptr p, tag_t tag) {
- return (p & ~kAddressTagMask) | ((uptr)tag << kAddressTagShift);
+ return InTaggableRegion(p) ? ((p & ~kAddressTagMask) |
+ ((uptr)(tag & kTagMask) << kAddressTagShift))
+ : p;
}
namespace __hwasan {
@@ -107,6 +121,8 @@ void InitThreads();
void InitializeInterceptors();
void HwasanAllocatorInit();
+void HwasanAllocatorLock();
+void HwasanAllocatorUnlock();
void *hwasan_malloc(uptr size, StackTrace *stack);
void *hwasan_calloc(uptr nmemb, uptr size, StackTrace *stack);
@@ -140,6 +156,10 @@ void HwasanAtExit();
void HwasanOnDeadlySignal(int signo, void *info, void *context);
+void HwasanInstallAtForkHandler();
+
+void InstallAtExitCheckLeaks();
+
void UpdateMemoryUsage();
void AppendToErrorMessageBuffer(const char *buffer);
@@ -163,45 +183,46 @@ void HandleTagMismatch(AccessInfo ai, uptr pc, uptr frame, void *uc,
// This dispatches to HandleTagMismatch but sets up the AccessInfo, program
// counter, and frame pointer.
-void HwasanTagMismatch(uptr addr, uptr access_info, uptr *registers_frame,
- size_t outsize);
+void HwasanTagMismatch(uptr addr, uptr pc, uptr frame, uptr access_info,
+ uptr *registers_frame, size_t outsize);
} // namespace __hwasan
-#define HWASAN_MALLOC_HOOK(ptr, size) \
- do { \
- if (&__sanitizer_malloc_hook) { \
- __sanitizer_malloc_hook(ptr, size); \
- } \
- RunMallocHooks(ptr, size); \
- } while (false)
-#define HWASAN_FREE_HOOK(ptr) \
- do { \
- if (&__sanitizer_free_hook) { \
- __sanitizer_free_hook(ptr); \
- } \
- RunFreeHooks(ptr); \
- } while (false)
-
-#if HWASAN_WITH_INTERCEPTORS && defined(__aarch64__)
+#if HWASAN_WITH_INTERCEPTORS
// For both bionic and glibc __sigset_t is an unsigned long.
typedef unsigned long __hw_sigset_t;
// Setjmp and longjmp implementations are platform specific, and hence the
-// interception code is platform specific too. As yet we've only implemented
-// the interception for AArch64.
-typedef unsigned long long __hw_register_buf[22];
+// interception code is platform specific too.
+# if defined(__aarch64__)
+constexpr size_t kHwRegisterBufSize = 22;
+# elif defined(__x86_64__)
+constexpr size_t kHwRegisterBufSize = 8;
+# elif SANITIZER_RISCV64
+// saving PC, 12 int regs, sp, 12 fp regs
+# ifndef __riscv_float_abi_soft
+constexpr size_t kHwRegisterBufSize = 1 + 12 + 1 + 12;
+# else
+constexpr size_t kHwRegisterBufSize = 1 + 12 + 1;
+# endif
+# endif
+typedef unsigned long long __hw_register_buf[kHwRegisterBufSize];
struct __hw_jmp_buf_struct {
// NOTE: The machine-dependent definition of `__sigsetjmp'
// assume that a `__hw_jmp_buf' begins with a `__hw_register_buf' and that
// `__mask_was_saved' follows it. Do not move these members or add others
// before it.
+ //
+ // We add a __magic field to our struct to catch cases where libc's setjmp
+ // populated the jmp_buf instead of our interceptor.
__hw_register_buf __jmpbuf; // Calling environment.
- int __mask_was_saved; // Saved the signal mask?
+ unsigned __mask_was_saved : 1; // Saved the signal mask?
+ unsigned __magic : 31; // Used to distinguish __hw_jmp_buf from jmp_buf.
__hw_sigset_t __saved_mask; // Saved signal mask.
};
typedef struct __hw_jmp_buf_struct __hw_jmp_buf[1];
typedef struct __hw_jmp_buf_struct __hw_sigjmp_buf[1];
-#endif // HWASAN_WITH_INTERCEPTORS && __aarch64__
+constexpr unsigned kHwJmpBufMagic = 0x248ACE77;
+#endif // HWASAN_WITH_INTERCEPTORS
#define ENSURE_HWASAN_INITED() \
do { \
diff --git a/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_allocation_functions.cpp b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_allocation_functions.cpp
index 6c2a6077866f..75d91ed09ce1 100644
--- a/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_allocation_functions.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_allocation_functions.cpp
@@ -14,28 +14,32 @@
#include "hwasan.h"
#include "interception/interception.h"
+#include "sanitizer_common/sanitizer_allocator_dlsym.h"
#include "sanitizer_common/sanitizer_allocator_interface.h"
+#include "sanitizer_common/sanitizer_mallinfo.h"
#include "sanitizer_common/sanitizer_tls_get_addr.h"
using namespace __hwasan;
-static uptr allocated_for_dlsym;
-static const uptr kDlsymAllocPoolSize = 1024;
-static uptr alloc_memory_for_dlsym[kDlsymAllocPoolSize];
-
-static bool IsInDlsymAllocPool(const void *ptr) {
- uptr off = (uptr)ptr - (uptr)alloc_memory_for_dlsym;
- return off < sizeof(alloc_memory_for_dlsym);
-}
+struct DlsymAlloc : public DlSymAllocator<DlsymAlloc> {
+ static bool UseImpl() { return !hwasan_inited; }
+ static void OnAllocate(const void *ptr, uptr size) {
+# if CAN_SANITIZE_LEAKS
+ // Suppress leaks from dlerror(). Previously dlsym hack on global array was
+ // used by leak sanitizer as a root region.
+ __lsan_register_root_region(ptr, size);
+# endif
+ }
+ static void OnFree(const void *ptr, uptr size) {
+# if CAN_SANITIZE_LEAKS
+ __lsan_unregister_root_region(ptr, size);
+# endif
+ }
+};
-static void *AllocateFromLocalPool(uptr size_in_bytes) {
- uptr size_in_words = RoundUpTo(size_in_bytes, kWordSize) / kWordSize;
- void *mem = (void *)&alloc_memory_for_dlsym[allocated_for_dlsym];
- allocated_for_dlsym += size_in_words;
- CHECK_LT(allocated_for_dlsym, kDlsymAllocPoolSize);
- return mem;
-}
+extern "C" {
+SANITIZER_INTERFACE_ATTRIBUTE
int __sanitizer_posix_memalign(void **memptr, uptr alignment, uptr size) {
GET_MALLOC_STACK_TRACE;
CHECK_NE(memptr, 0);
@@ -43,16 +47,19 @@ int __sanitizer_posix_memalign(void **memptr, uptr alignment, uptr size) {
return res;
}
+SANITIZER_INTERFACE_ATTRIBUTE
void *__sanitizer_memalign(uptr alignment, uptr size) {
GET_MALLOC_STACK_TRACE;
return hwasan_memalign(alignment, size, &stack);
}
+SANITIZER_INTERFACE_ATTRIBUTE
void *__sanitizer_aligned_alloc(uptr alignment, uptr size) {
GET_MALLOC_STACK_TRACE;
return hwasan_aligned_alloc(alignment, size, &stack);
}
+SANITIZER_INTERFACE_ATTRIBUTE
void *__sanitizer___libc_memalign(uptr alignment, uptr size) {
GET_MALLOC_STACK_TRACE;
void *ptr = hwasan_memalign(alignment, size, &stack);
@@ -61,93 +68,105 @@ void *__sanitizer___libc_memalign(uptr alignment, uptr size) {
return ptr;
}
+SANITIZER_INTERFACE_ATTRIBUTE
void *__sanitizer_valloc(uptr size) {
GET_MALLOC_STACK_TRACE;
return hwasan_valloc(size, &stack);
}
+SANITIZER_INTERFACE_ATTRIBUTE
void *__sanitizer_pvalloc(uptr size) {
GET_MALLOC_STACK_TRACE;
return hwasan_pvalloc(size, &stack);
}
+SANITIZER_INTERFACE_ATTRIBUTE
void __sanitizer_free(void *ptr) {
- GET_MALLOC_STACK_TRACE;
- if (!ptr || UNLIKELY(IsInDlsymAllocPool(ptr)))
+ if (!ptr)
return;
+ if (DlsymAlloc::PointerIsMine(ptr))
+ return DlsymAlloc::Free(ptr);
+ GET_MALLOC_STACK_TRACE;
hwasan_free(ptr, &stack);
}
+SANITIZER_INTERFACE_ATTRIBUTE
void __sanitizer_cfree(void *ptr) {
- GET_MALLOC_STACK_TRACE;
- if (!ptr || UNLIKELY(IsInDlsymAllocPool(ptr)))
+ if (!ptr)
return;
+ if (DlsymAlloc::PointerIsMine(ptr))
+ return DlsymAlloc::Free(ptr);
+ GET_MALLOC_STACK_TRACE;
hwasan_free(ptr, &stack);
}
+SANITIZER_INTERFACE_ATTRIBUTE
uptr __sanitizer_malloc_usable_size(const void *ptr) {
return __sanitizer_get_allocated_size(ptr);
}
+SANITIZER_INTERFACE_ATTRIBUTE
struct __sanitizer_struct_mallinfo __sanitizer_mallinfo() {
__sanitizer_struct_mallinfo sret;
internal_memset(&sret, 0, sizeof(sret));
return sret;
}
+SANITIZER_INTERFACE_ATTRIBUTE
int __sanitizer_mallopt(int cmd, int value) { return 0; }
+SANITIZER_INTERFACE_ATTRIBUTE
void __sanitizer_malloc_stats(void) {
// FIXME: implement, but don't call REAL(malloc_stats)!
}
+SANITIZER_INTERFACE_ATTRIBUTE
void *__sanitizer_calloc(uptr nmemb, uptr size) {
+ if (DlsymAlloc::Use())
+ return DlsymAlloc::Callocate(nmemb, size);
GET_MALLOC_STACK_TRACE;
- if (UNLIKELY(!hwasan_inited))
- // Hack: dlsym calls calloc before REAL(calloc) is retrieved from dlsym.
- return AllocateFromLocalPool(nmemb * size);
return hwasan_calloc(nmemb, size, &stack);
}
+SANITIZER_INTERFACE_ATTRIBUTE
void *__sanitizer_realloc(void *ptr, uptr size) {
+ if (DlsymAlloc::Use() || DlsymAlloc::PointerIsMine(ptr))
+ return DlsymAlloc::Realloc(ptr, size);
GET_MALLOC_STACK_TRACE;
- if (UNLIKELY(IsInDlsymAllocPool(ptr))) {
- uptr offset = (uptr)ptr - (uptr)alloc_memory_for_dlsym;
- uptr copy_size = Min(size, kDlsymAllocPoolSize - offset);
- void *new_ptr;
- if (UNLIKELY(!hwasan_inited)) {
- new_ptr = AllocateFromLocalPool(copy_size);
- } else {
- copy_size = size;
- new_ptr = hwasan_malloc(copy_size, &stack);
- }
- internal_memcpy(new_ptr, ptr, copy_size);
- return new_ptr;
- }
return hwasan_realloc(ptr, size, &stack);
}
+SANITIZER_INTERFACE_ATTRIBUTE
void *__sanitizer_reallocarray(void *ptr, uptr nmemb, uptr size) {
GET_MALLOC_STACK_TRACE;
return hwasan_reallocarray(ptr, nmemb, size, &stack);
}
+SANITIZER_INTERFACE_ATTRIBUTE
void *__sanitizer_malloc(uptr size) {
- GET_MALLOC_STACK_TRACE;
if (UNLIKELY(!hwasan_init_is_running))
ENSURE_HWASAN_INITED();
- if (UNLIKELY(!hwasan_inited))
- // Hack: dlsym calls malloc before REAL(malloc) is retrieved from dlsym.
- return AllocateFromLocalPool(size);
+ if (DlsymAlloc::Use())
+ return DlsymAlloc::Allocate(size);
+ GET_MALLOC_STACK_TRACE;
return hwasan_malloc(size, &stack);
}
-#if HWASAN_WITH_INTERCEPTORS
+} // extern "C"
+
+#if HWASAN_WITH_INTERCEPTORS || SANITIZER_FUCHSIA
+#if SANITIZER_FUCHSIA
+// Fuchsia does not use WRAP/wrappers used for the interceptor infrastructure.
+# define INTERCEPTOR_ALIAS(RET, FN, ARGS...) \
+ extern "C" SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE RET FN( \
+ ARGS) ALIAS(__sanitizer_##FN)
+#else
# define INTERCEPTOR_ALIAS(RET, FN, ARGS...) \
extern "C" SANITIZER_INTERFACE_ATTRIBUTE RET WRAP(FN)(ARGS) \
- ALIAS("__sanitizer_" #FN); \
+ ALIAS(__sanitizer_##FN); \
extern "C" SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE RET FN( \
- ARGS) ALIAS("__sanitizer_" #FN)
+ ARGS) ALIAS(__sanitizer_##FN)
+#endif
INTERCEPTOR_ALIAS(int, posix_memalign, void **memptr, SIZE_T alignment,
SIZE_T size);
diff --git a/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_allocator.cpp b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_allocator.cpp
index ef6d4d6c7678..d21ba024a20e 100644
--- a/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_allocator.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_allocator.cpp
@@ -21,6 +21,7 @@
#include "hwasan_malloc_bisect.h"
#include "hwasan_thread.h"
#include "hwasan_report.h"
+#include "lsan/lsan_common.h"
namespace __hwasan {
@@ -32,40 +33,39 @@ static atomic_uint8_t hwasan_allocator_tagging_enabled;
static constexpr tag_t kFallbackAllocTag = 0xBB & kTagMask;
static constexpr tag_t kFallbackFreeTag = 0xBC;
-enum RightAlignMode {
- kRightAlignNever,
- kRightAlignSometimes,
- kRightAlignAlways
+enum {
+ // Either just allocated by underlying allocator, but AsanChunk is not yet
+ // ready, or almost returned to undelying allocator and AsanChunk is already
+ // meaningless.
+ CHUNK_INVALID = 0,
+ // The chunk is allocated and not yet freed.
+ CHUNK_ALLOCATED = 1,
};
+
// Initialized in HwasanAllocatorInit, an never changed.
static ALIGNED(16) u8 tail_magic[kShadowAlignment - 1];
+static uptr max_malloc_size;
bool HwasanChunkView::IsAllocated() const {
- return metadata_ && metadata_->alloc_context_id &&
- metadata_->get_requested_size();
-}
-
-// Aligns the 'addr' right to the granule boundary.
-static uptr AlignRight(uptr addr, uptr requested_size) {
- uptr tail_size = requested_size % kShadowAlignment;
- if (!tail_size) return addr;
- return addr + kShadowAlignment - tail_size;
+ return metadata_ && metadata_->IsAllocated();
}
uptr HwasanChunkView::Beg() const {
- if (metadata_ && metadata_->right_aligned)
- return AlignRight(block_, metadata_->get_requested_size());
return block_;
}
uptr HwasanChunkView::End() const {
return Beg() + UsedSize();
}
uptr HwasanChunkView::UsedSize() const {
- return metadata_->get_requested_size();
+ return metadata_->GetRequestedSize();
}
u32 HwasanChunkView::GetAllocStackId() const {
- return metadata_->alloc_context_id;
+ return metadata_->GetAllocStackId();
+}
+
+u32 HwasanChunkView::GetAllocThreadId() const {
+ return metadata_->GetAllocThreadId();
}
uptr HwasanChunkView::ActualSize() const {
@@ -76,10 +76,58 @@ bool HwasanChunkView::FromSmallHeap() const {
return allocator.FromPrimary(reinterpret_cast<void *>(block_));
}
+bool HwasanChunkView::AddrIsInside(uptr addr) const {
+ return (addr >= Beg()) && (addr < Beg() + UsedSize());
+}
+
+inline void Metadata::SetAllocated(u32 stack, u64 size) {
+ Thread *t = GetCurrentThread();
+ u64 context = t ? t->unique_id() : kMainTid;
+ context <<= 32;
+ context += stack;
+ requested_size_low = size & ((1ul << 32) - 1);
+ requested_size_high = size >> 32;
+ atomic_store(&alloc_context_id, context, memory_order_relaxed);
+ atomic_store(&chunk_state, CHUNK_ALLOCATED, memory_order_release);
+}
+
+inline void Metadata::SetUnallocated() {
+ atomic_store(&chunk_state, CHUNK_INVALID, memory_order_release);
+ requested_size_low = 0;
+ requested_size_high = 0;
+ atomic_store(&alloc_context_id, 0, memory_order_relaxed);
+}
+
+inline bool Metadata::IsAllocated() const {
+ return atomic_load(&chunk_state, memory_order_relaxed) == CHUNK_ALLOCATED;
+}
+
+inline u64 Metadata::GetRequestedSize() const {
+ return (static_cast<u64>(requested_size_high) << 32) + requested_size_low;
+}
+
+inline u32 Metadata::GetAllocStackId() const {
+ return atomic_load(&alloc_context_id, memory_order_relaxed);
+}
+
+inline u32 Metadata::GetAllocThreadId() const {
+ u64 context = atomic_load(&alloc_context_id, memory_order_relaxed);
+ u32 tid = context >> 32;
+ return tid;
+}
+
void GetAllocatorStats(AllocatorStatCounters s) {
allocator.GetStats(s);
}
+inline void Metadata::SetLsanTag(__lsan::ChunkTag tag) {
+ lsan_tag = tag;
+}
+
+inline __lsan::ChunkTag Metadata::GetLsanTag() const {
+ return static_cast<__lsan::ChunkTag>(lsan_tag);
+}
+
uptr GetAliasRegionStart() {
#if defined(HWASAN_ALIASING_MODE)
constexpr uptr kAliasRegionOffset = 1ULL << (kTaggableRegionCheckShift - 1);
@@ -101,14 +149,28 @@ void HwasanAllocatorInit() {
atomic_store_relaxed(&hwasan_allocator_tagging_enabled,
!flags()->disable_allocator_tagging);
SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null);
- allocator.Init(common_flags()->allocator_release_to_os_interval_ms,
- GetAliasRegionStart());
+ allocator.InitLinkerInitialized(
+ common_flags()->allocator_release_to_os_interval_ms,
+ GetAliasRegionStart());
for (uptr i = 0; i < sizeof(tail_magic); i++)
tail_magic[i] = GetCurrentThread()->GenerateRandomTag();
+ if (common_flags()->max_allocation_size_mb) {
+ max_malloc_size = common_flags()->max_allocation_size_mb << 20;
+ max_malloc_size = Min(max_malloc_size, kMaxAllowedMallocSize);
+ } else {
+ max_malloc_size = kMaxAllowedMallocSize;
+ }
}
-void AllocatorSwallowThreadLocalCache(AllocatorCache *cache) {
+void HwasanAllocatorLock() { allocator.ForceLock(); }
+
+void HwasanAllocatorUnlock() { allocator.ForceUnlock(); }
+
+void AllocatorThreadStart(AllocatorCache *cache) { allocator.InitCache(cache); }
+
+void AllocatorThreadFinish(AllocatorCache *cache) {
allocator.SwallowCache(cache);
+ allocator.DestroyCache(cache);
}
static uptr TaggedSize(uptr size) {
@@ -120,13 +182,21 @@ static uptr TaggedSize(uptr size) {
static void *HwasanAllocate(StackTrace *stack, uptr orig_size, uptr alignment,
bool zeroise) {
- if (orig_size > kMaxAllowedMallocSize) {
+ // Keep this consistent with LSAN and ASAN behavior.
+ if (UNLIKELY(orig_size == 0))
+ orig_size = 1;
+ if (UNLIKELY(orig_size > max_malloc_size)) {
if (AllocatorMayReturnNull()) {
Report("WARNING: HWAddressSanitizer failed to allocate 0x%zx bytes\n",
orig_size);
return nullptr;
}
- ReportAllocationSizeTooBig(orig_size, kMaxAllowedMallocSize, stack);
+ ReportAllocationSizeTooBig(orig_size, max_malloc_size, stack);
+ }
+ if (UNLIKELY(IsRssLimitExceeded())) {
+ if (AllocatorMayReturnNull())
+ return nullptr;
+ ReportRssLimitExceeded(stack);
}
alignment = Max(alignment, kShadowAlignment);
@@ -146,48 +216,51 @@ static void *HwasanAllocate(StackTrace *stack, uptr orig_size, uptr alignment,
return nullptr;
ReportOutOfMemory(size, stack);
}
- Metadata *meta =
- reinterpret_cast<Metadata *>(allocator.GetMetaData(allocated));
- meta->set_requested_size(orig_size);
- meta->alloc_context_id = StackDepotPut(*stack);
- meta->right_aligned = false;
if (zeroise) {
- internal_memset(allocated, 0, size);
+ // The secondary allocator mmaps memory, which should be zero-inited so we
+ // don't need to explicitly clear it.
+ if (allocator.FromPrimary(allocated))
+ internal_memset(allocated, 0, size);
} else if (flags()->max_malloc_fill_size > 0) {
uptr fill_size = Min(size, (uptr)flags()->max_malloc_fill_size);
internal_memset(allocated, flags()->malloc_fill_byte, fill_size);
}
if (size != orig_size) {
- internal_memcpy(reinterpret_cast<u8 *>(allocated) + orig_size, tail_magic,
- size - orig_size - 1);
+ u8 *tail = reinterpret_cast<u8 *>(allocated) + orig_size;
+ uptr tail_length = size - orig_size;
+ internal_memcpy(tail, tail_magic, tail_length - 1);
+ // Short granule is excluded from magic tail, so we explicitly untag.
+ tail[tail_length - 1] = 0;
}
void *user_ptr = allocated;
- // Tagging can only be skipped when both tag_in_malloc and tag_in_free are
- // false. When tag_in_malloc = false and tag_in_free = true malloc needs to
- // retag to 0.
if (InTaggableRegion(reinterpret_cast<uptr>(user_ptr)) &&
- (flags()->tag_in_malloc || flags()->tag_in_free) &&
- atomic_load_relaxed(&hwasan_allocator_tagging_enabled)) {
- if (flags()->tag_in_malloc && malloc_bisect(stack, orig_size)) {
- tag_t tag = t ? t->GenerateRandomTag() : kFallbackAllocTag;
- uptr tag_size = orig_size ? orig_size : 1;
- uptr full_granule_size = RoundDownTo(tag_size, kShadowAlignment);
- user_ptr =
- (void *)TagMemoryAligned((uptr)user_ptr, full_granule_size, tag);
- if (full_granule_size != tag_size) {
- u8 *short_granule =
- reinterpret_cast<u8 *>(allocated) + full_granule_size;
- TagMemoryAligned((uptr)short_granule, kShadowAlignment,
- tag_size % kShadowAlignment);
- short_granule[kShadowAlignment - 1] = tag;
- }
- } else {
- user_ptr = (void *)TagMemoryAligned((uptr)user_ptr, size, 0);
+ atomic_load_relaxed(&hwasan_allocator_tagging_enabled) &&
+ flags()->tag_in_malloc && malloc_bisect(stack, orig_size)) {
+ tag_t tag = t ? t->GenerateRandomTag() : kFallbackAllocTag;
+ uptr tag_size = orig_size ? orig_size : 1;
+ uptr full_granule_size = RoundDownTo(tag_size, kShadowAlignment);
+ user_ptr = (void *)TagMemoryAligned((uptr)user_ptr, full_granule_size, tag);
+ if (full_granule_size != tag_size) {
+ u8 *short_granule = reinterpret_cast<u8 *>(allocated) + full_granule_size;
+ TagMemoryAligned((uptr)short_granule, kShadowAlignment,
+ tag_size % kShadowAlignment);
+ short_granule[kShadowAlignment - 1] = tag;
}
+ } else {
+ // Tagging can not be completely skipped. If it's disabled, we need to tag
+ // with zeros.
+ user_ptr = (void *)TagMemoryAligned((uptr)user_ptr, size, 0);
}
- HWASAN_MALLOC_HOOK(user_ptr, size);
+ Metadata *meta =
+ reinterpret_cast<Metadata *>(allocator.GetMetaData(allocated));
+#if CAN_SANITIZE_LEAKS
+ meta->SetLsanTag(__lsan::DisabledInThisThread() ? __lsan::kIgnored
+ : __lsan::kDirectlyLeaked);
+#endif
+ meta->SetAllocated(StackDepotPut(*stack), orig_size);
+ RunMallocHooks(user_ptr, orig_size);
return user_ptr;
}
@@ -201,24 +274,43 @@ static bool PointerAndMemoryTagsMatch(void *tagged_ptr) {
return PossiblyShortTagMatches(mem_tag, tagged_uptr, 1);
}
+static bool CheckInvalidFree(StackTrace *stack, void *untagged_ptr,
+ void *tagged_ptr) {
+ // This function can return true if halt_on_error is false.
+ if (!MemIsApp(reinterpret_cast<uptr>(untagged_ptr)) ||
+ !PointerAndMemoryTagsMatch(tagged_ptr)) {
+ ReportInvalidFree(stack, reinterpret_cast<uptr>(tagged_ptr));
+ return true;
+ }
+ return false;
+}
+
static void HwasanDeallocate(StackTrace *stack, void *tagged_ptr) {
CHECK(tagged_ptr);
- HWASAN_FREE_HOOK(tagged_ptr);
+ void *untagged_ptr = UntagPtr(tagged_ptr);
- if (!PointerAndMemoryTagsMatch(tagged_ptr))
- ReportInvalidFree(stack, reinterpret_cast<uptr>(tagged_ptr));
+ if (CheckInvalidFree(stack, untagged_ptr, tagged_ptr))
+ return;
- void *untagged_ptr = InTaggableRegion(reinterpret_cast<uptr>(tagged_ptr))
- ? UntagPtr(tagged_ptr)
- : tagged_ptr;
void *aligned_ptr = reinterpret_cast<void *>(
RoundDownTo(reinterpret_cast<uptr>(untagged_ptr), kShadowAlignment));
tag_t pointer_tag = GetTagFromPointer(reinterpret_cast<uptr>(tagged_ptr));
Metadata *meta =
reinterpret_cast<Metadata *>(allocator.GetMetaData(aligned_ptr));
- uptr orig_size = meta->get_requested_size();
+ if (!meta) {
+ ReportInvalidFree(stack, reinterpret_cast<uptr>(tagged_ptr));
+ return;
+ }
+
+ RunFreeHooks(tagged_ptr);
+
+ uptr orig_size = meta->GetRequestedSize();
u32 free_context_id = StackDepotPut(*stack);
- u32 alloc_context_id = meta->alloc_context_id;
+ u32 alloc_context_id = meta->GetAllocStackId();
+ u32 alloc_thread_id = meta->GetAllocThreadId();
+
+ bool in_taggable_region =
+ InTaggableRegion(reinterpret_cast<uptr>(tagged_ptr));
// Check tail magic.
uptr tagged_size = TaggedSize(orig_size);
@@ -228,13 +320,17 @@ static void HwasanDeallocate(StackTrace *stack, void *tagged_ptr) {
CHECK_LT(tail_size, kShadowAlignment);
void *tail_beg = reinterpret_cast<void *>(
reinterpret_cast<uptr>(aligned_ptr) + orig_size);
- if (tail_size && internal_memcmp(tail_beg, tail_magic, tail_size))
+ tag_t short_granule_memtag = *(reinterpret_cast<tag_t *>(
+ reinterpret_cast<uptr>(tail_beg) + tail_size));
+ if (tail_size &&
+ (internal_memcmp(tail_beg, tail_magic, tail_size) ||
+ (in_taggable_region && pointer_tag != short_granule_memtag)))
ReportTailOverwritten(stack, reinterpret_cast<uptr>(tagged_ptr),
orig_size, tail_magic);
}
- meta->set_requested_size(0);
- meta->alloc_context_id = 0;
+ // TODO(kstoimenov): consider meta->SetUnallocated(free_context_id).
+ meta->SetUnallocated();
// This memory will not be reused by anyone else, so we are free to keep it
// poisoned.
Thread *t = GetCurrentThread();
@@ -243,9 +339,9 @@ static void HwasanDeallocate(StackTrace *stack, void *tagged_ptr) {
Min(TaggedSize(orig_size), (uptr)flags()->max_free_fill_size);
internal_memset(aligned_ptr, flags()->free_fill_byte, fill_size);
}
- if (InTaggableRegion(reinterpret_cast<uptr>(tagged_ptr)) &&
- flags()->tag_in_free && malloc_bisect(stack, 0) &&
- atomic_load_relaxed(&hwasan_allocator_tagging_enabled)) {
+ if (in_taggable_region && flags()->tag_in_free && malloc_bisect(stack, 0) &&
+ atomic_load_relaxed(&hwasan_allocator_tagging_enabled) &&
+ allocator.FromPrimary(untagged_ptr) /* Secondary 0-tag and unmap.*/) {
// Always store full 8-bit tags on free to maximize UAF detection.
tag_t tag;
if (t) {
@@ -267,8 +363,9 @@ static void HwasanDeallocate(StackTrace *stack, void *tagged_ptr) {
if (t) {
allocator.Deallocate(t->allocator_cache(), aligned_ptr);
if (auto *ha = t->heap_allocations())
- ha->push({reinterpret_cast<uptr>(tagged_ptr), alloc_context_id,
- free_context_id, static_cast<u32>(orig_size)});
+ ha->push({reinterpret_cast<uptr>(tagged_ptr), alloc_thread_id,
+ alloc_context_id, free_context_id,
+ static_cast<u32>(orig_size)});
} else {
SpinMutexLock l(&fallback_mutex);
AllocatorCache *cache = &fallback_allocator_cache;
@@ -278,18 +375,17 @@ static void HwasanDeallocate(StackTrace *stack, void *tagged_ptr) {
static void *HwasanReallocate(StackTrace *stack, void *tagged_ptr_old,
uptr new_size, uptr alignment) {
- if (!PointerAndMemoryTagsMatch(tagged_ptr_old))
- ReportInvalidFree(stack, reinterpret_cast<uptr>(tagged_ptr_old));
-
+ void *untagged_ptr_old = UntagPtr(tagged_ptr_old);
+ if (CheckInvalidFree(stack, untagged_ptr_old, tagged_ptr_old))
+ return nullptr;
void *tagged_ptr_new =
HwasanAllocate(stack, new_size, alignment, false /*zeroise*/);
if (tagged_ptr_old && tagged_ptr_new) {
- void *untagged_ptr_old = UntagPtr(tagged_ptr_old);
Metadata *meta =
reinterpret_cast<Metadata *>(allocator.GetMetaData(untagged_ptr_old));
- internal_memcpy(
- UntagPtr(tagged_ptr_new), untagged_ptr_old,
- Min(new_size, static_cast<uptr>(meta->get_requested_size())));
+ void *untagged_ptr_new = UntagPtr(tagged_ptr_new);
+ internal_memcpy(untagged_ptr_new, untagged_ptr_old,
+ Min(new_size, static_cast<uptr>(meta->GetRequestedSize())));
HwasanDeallocate(stack, tagged_ptr_old);
}
return tagged_ptr_new;
@@ -305,6 +401,8 @@ static void *HwasanCalloc(StackTrace *stack, uptr nmemb, uptr size) {
}
HwasanChunkView FindHeapChunkByAddress(uptr address) {
+ if (!allocator.PointerIsMine(reinterpret_cast<void *>(address)))
+ return HwasanChunkView();
void *block = allocator.GetBlockBegin(reinterpret_cast<void*>(address));
if (!block)
return HwasanChunkView();
@@ -313,19 +411,40 @@ HwasanChunkView FindHeapChunkByAddress(uptr address) {
return HwasanChunkView(reinterpret_cast<uptr>(block), metadata);
}
-static uptr AllocationSize(const void *tagged_ptr) {
- const void *untagged_ptr = UntagPtr(tagged_ptr);
+static const void *AllocationBegin(const void *p) {
+ const void *untagged_ptr = UntagPtr(p);
+ if (!untagged_ptr)
+ return nullptr;
+
+ const void *beg = allocator.GetBlockBegin(untagged_ptr);
+ if (!beg)
+ return nullptr;
+
+ Metadata *b = (Metadata *)allocator.GetMetaData(beg);
+ if (b->GetRequestedSize() == 0)
+ return nullptr;
+
+ tag_t tag = GetTagFromPointer((uptr)p);
+ return (const void *)AddTagToPointer((uptr)beg, tag);
+}
+
+static uptr AllocationSize(const void *p) {
+ const void *untagged_ptr = UntagPtr(p);
if (!untagged_ptr) return 0;
const void *beg = allocator.GetBlockBegin(untagged_ptr);
- Metadata *b = (Metadata *)allocator.GetMetaData(untagged_ptr);
- if (b->right_aligned) {
- if (beg != reinterpret_cast<void *>(RoundDownTo(
- reinterpret_cast<uptr>(untagged_ptr), kShadowAlignment)))
- return 0;
- } else {
- if (beg != untagged_ptr) return 0;
- }
- return b->get_requested_size();
+ if (!beg)
+ return 0;
+ Metadata *b = (Metadata *)allocator.GetMetaData(beg);
+ return b->GetRequestedSize();
+}
+
+static uptr AllocationSizeFast(const void *p) {
+ const void *untagged_ptr = UntagPtr(p);
+ void *aligned_ptr = reinterpret_cast<void *>(
+ RoundDownTo(reinterpret_cast<uptr>(untagged_ptr), kShadowAlignment));
+ Metadata *meta =
+ reinterpret_cast<Metadata *>(allocator.GetMetaData(aligned_ptr));
+ return meta->GetRequestedSize();
}
void *hwasan_malloc(uptr size, StackTrace *stack) {
@@ -416,6 +535,120 @@ void hwasan_free(void *ptr, StackTrace *stack) {
} // namespace __hwasan
+// --- Implementation of LSan-specific functions --- {{{1
+namespace __lsan {
+
+void LockAllocator() {
+ __hwasan::HwasanAllocatorLock();
+}
+
+void UnlockAllocator() {
+ __hwasan::HwasanAllocatorUnlock();
+}
+
+void GetAllocatorGlobalRange(uptr *begin, uptr *end) {
+ *begin = (uptr)&__hwasan::allocator;
+ *end = *begin + sizeof(__hwasan::allocator);
+}
+
+uptr PointsIntoChunk(void *p) {
+ p = UntagPtr(p);
+ uptr addr = reinterpret_cast<uptr>(p);
+ uptr chunk =
+ reinterpret_cast<uptr>(__hwasan::allocator.GetBlockBeginFastLocked(p));
+ if (!chunk)
+ return 0;
+ __hwasan::Metadata *metadata = reinterpret_cast<__hwasan::Metadata *>(
+ __hwasan::allocator.GetMetaData(reinterpret_cast<void *>(chunk)));
+ if (!metadata || !metadata->IsAllocated())
+ return 0;
+ if (addr < chunk + metadata->GetRequestedSize())
+ return chunk;
+ if (IsSpecialCaseOfOperatorNew0(chunk, metadata->GetRequestedSize(), addr))
+ return chunk;
+ return 0;
+}
+
+uptr GetUserBegin(uptr chunk) {
+ CHECK_EQ(UntagAddr(chunk), chunk);
+ void *block = __hwasan::allocator.GetBlockBeginFastLocked(
+ reinterpret_cast<void *>(chunk));
+ if (!block)
+ return 0;
+ __hwasan::Metadata *metadata = reinterpret_cast<__hwasan::Metadata *>(
+ __hwasan::allocator.GetMetaData(block));
+ if (!metadata || !metadata->IsAllocated())
+ return 0;
+
+ return reinterpret_cast<uptr>(block);
+}
+
+uptr GetUserAddr(uptr chunk) {
+ if (!InTaggableRegion(chunk))
+ return chunk;
+ tag_t mem_tag = *(tag_t *)__hwasan::MemToShadow(chunk);
+ return AddTagToPointer(chunk, mem_tag);
+}
+
+LsanMetadata::LsanMetadata(uptr chunk) {
+ CHECK_EQ(UntagAddr(chunk), chunk);
+ metadata_ =
+ chunk ? __hwasan::allocator.GetMetaData(reinterpret_cast<void *>(chunk))
+ : nullptr;
+}
+
+bool LsanMetadata::allocated() const {
+ if (!metadata_)
+ return false;
+ __hwasan::Metadata *m = reinterpret_cast<__hwasan::Metadata *>(metadata_);
+ return m->IsAllocated();
+}
+
+ChunkTag LsanMetadata::tag() const {
+ __hwasan::Metadata *m = reinterpret_cast<__hwasan::Metadata *>(metadata_);
+ return m->GetLsanTag();
+}
+
+void LsanMetadata::set_tag(ChunkTag value) {
+ __hwasan::Metadata *m = reinterpret_cast<__hwasan::Metadata *>(metadata_);
+ m->SetLsanTag(value);
+}
+
+uptr LsanMetadata::requested_size() const {
+ __hwasan::Metadata *m = reinterpret_cast<__hwasan::Metadata *>(metadata_);
+ return m->GetRequestedSize();
+}
+
+u32 LsanMetadata::stack_trace_id() const {
+ __hwasan::Metadata *m = reinterpret_cast<__hwasan::Metadata *>(metadata_);
+ return m->GetAllocStackId();
+}
+
+void ForEachChunk(ForEachChunkCallback callback, void *arg) {
+ __hwasan::allocator.ForEachChunk(callback, arg);
+}
+
+IgnoreObjectResult IgnoreObject(const void *p) {
+ p = UntagPtr(p);
+ uptr addr = reinterpret_cast<uptr>(p);
+ uptr chunk = reinterpret_cast<uptr>(__hwasan::allocator.GetBlockBegin(p));
+ if (!chunk)
+ return kIgnoreObjectInvalid;
+ __hwasan::Metadata *metadata = reinterpret_cast<__hwasan::Metadata *>(
+ __hwasan::allocator.GetMetaData(reinterpret_cast<void *>(chunk)));
+ if (!metadata || !metadata->IsAllocated())
+ return kIgnoreObjectInvalid;
+ if (addr >= chunk + metadata->GetRequestedSize())
+ return kIgnoreObjectInvalid;
+ if (metadata->GetLsanTag() == kIgnored)
+ return kIgnoreObjectAlreadyIgnored;
+
+ metadata->SetLsanTag(kIgnored);
+ return kIgnoreObjectSuccess;
+}
+
+} // namespace __lsan
+
using namespace __hwasan;
void __hwasan_enable_allocator_tagging() {
@@ -446,4 +679,17 @@ uptr __sanitizer_get_estimated_allocated_size(uptr size) { return size; }
int __sanitizer_get_ownership(const void *p) { return AllocationSize(p) != 0; }
+const void *__sanitizer_get_allocated_begin(const void *p) {
+ return AllocationBegin(p);
+}
+
uptr __sanitizer_get_allocated_size(const void *p) { return AllocationSize(p); }
+
+uptr __sanitizer_get_allocated_size_fast(const void *p) {
+ DCHECK_EQ(p, __sanitizer_get_allocated_begin(p));
+ uptr ret = AllocationSizeFast(p);
+ DCHECK_EQ(ret, __sanitizer_get_allocated_size(p));
+ return ret;
+}
+
+void __sanitizer_purge_allocator() { allocator.ForceReleaseToOS(); }
diff --git a/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_allocator.h b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_allocator.h
index 35c3d6b4bf43..2ada2a0b1851 100644
--- a/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_allocator.h
+++ b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_allocator.h
@@ -17,6 +17,7 @@
#include "hwasan_interface_internal.h"
#include "hwasan_mapping.h"
#include "hwasan_poisoning.h"
+#include "lsan/lsan_common.h"
#include "sanitizer_common/sanitizer_allocator.h"
#include "sanitizer_common/sanitizer_allocator_checks.h"
#include "sanitizer_common/sanitizer_allocator_interface.h"
@@ -24,28 +25,39 @@
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_ring_buffer.h"
-#if !defined(__aarch64__) && !defined(__x86_64__)
-#error Unsupported platform
+#if !defined(__aarch64__) && !defined(__x86_64__) && !(SANITIZER_RISCV64)
+# error Unsupported platform
#endif
namespace __hwasan {
struct Metadata {
+ private:
+ atomic_uint64_t alloc_context_id;
u32 requested_size_low;
- u32 requested_size_high : 31;
- u32 right_aligned : 1;
- u32 alloc_context_id;
- u64 get_requested_size() {
- return (static_cast<u64>(requested_size_high) << 32) + requested_size_low;
- }
- void set_requested_size(u64 size) {
- requested_size_low = size & ((1ul << 32) - 1);
- requested_size_high = size >> 32;
- }
+ u16 requested_size_high;
+ atomic_uint8_t chunk_state;
+ u8 lsan_tag;
+
+ public:
+ inline void SetAllocated(u32 stack, u64 size);
+ inline void SetUnallocated();
+
+ inline bool IsAllocated() const;
+ inline u64 GetRequestedSize() const;
+ inline u32 GetAllocStackId() const;
+ inline u32 GetAllocThreadId() const;
+ inline void SetLsanTag(__lsan::ChunkTag tag);
+ inline __lsan::ChunkTag GetLsanTag() const;
};
+static_assert(sizeof(Metadata) == 16);
struct HwasanMapUnmapCallback {
void OnMap(uptr p, uptr size) const { UpdateMemoryUsage(); }
+ void OnMapSecondary(uptr p, uptr size, uptr user_begin,
+ uptr user_size) const {
+ UpdateMemoryUsage();
+ }
void OnUnmap(uptr p, uptr size) const {
// We are about to unmap a chunk of user memory.
// It can return as user-requested mmap() or another thread stack.
@@ -61,20 +73,27 @@ struct AP64 {
#if defined(HWASAN_ALIASING_MODE)
static const uptr kSpaceSize = 1ULL << kAddressTagShift;
+ typedef __sanitizer::DefaultSizeClassMap SizeClassMap;
+#elif SANITIZER_LINUX && !SANITIZER_ANDROID
+ static const uptr kSpaceSize = 0x40000000000ULL; // 4T.
+ typedef __sanitizer::DefaultSizeClassMap SizeClassMap;
#else
- static const uptr kSpaceSize = 0x2000000000ULL;
+ static const uptr kSpaceSize = 0x2000000000ULL; // 128G.
+ typedef __sanitizer::VeryDenseSizeClassMap SizeClassMap;
#endif
+
static const uptr kMetadataSize = sizeof(Metadata);
- typedef __sanitizer::VeryDenseSizeClassMap SizeClassMap;
using AddressSpaceView = LocalAddressSpaceView;
typedef HwasanMapUnmapCallback MapUnmapCallback;
static const uptr kFlags = 0;
};
+
typedef SizeClassAllocator64<AP64> PrimaryAllocator;
typedef CombinedAllocator<PrimaryAllocator> Allocator;
typedef Allocator::AllocatorCache AllocatorCache;
-void AllocatorSwallowThreadLocalCache(AllocatorCache *cache);
+void AllocatorThreadStart(AllocatorCache *cache);
+void AllocatorThreadFinish(AllocatorCache *cache);
class HwasanChunkView {
public:
@@ -87,8 +106,12 @@ class HwasanChunkView {
uptr UsedSize() const; // Size requested by the user
uptr ActualSize() const; // Size allocated by the allocator.
u32 GetAllocStackId() const;
+ u32 GetAllocThreadId() const;
bool FromSmallHeap() const;
+ bool AddrIsInside(uptr addr) const;
+
private:
+ friend class __lsan::LsanMetadata;
uptr block_;
Metadata *const metadata_;
};
@@ -97,29 +120,18 @@ HwasanChunkView FindHeapChunkByAddress(uptr address);
// Information about one (de)allocation that happened in the past.
// These are recorded in a thread-local ring buffer.
-// TODO: this is currently 24 bytes (20 bytes + alignment).
-// Compress it to 16 bytes or extend it to be more useful.
struct HeapAllocationRecord {
uptr tagged_addr;
- u32 alloc_context_id;
- u32 free_context_id;
- u32 requested_size;
+ u32 alloc_thread_id;
+ u32 alloc_context_id;
+ u32 free_context_id;
+ u32 requested_size;
};
typedef RingBuffer<HeapAllocationRecord> HeapAllocationsRingBuffer;
void GetAllocatorStats(AllocatorStatCounters s);
-inline bool InTaggableRegion(uptr addr) {
-#if defined(HWASAN_ALIASING_MODE)
- // Aliases are mapped next to shadow so that the upper bits match the shadow
- // base.
- return (addr >> kTaggableRegionCheckShift) ==
- (GetShadowOffset() >> kTaggableRegionCheckShift);
-#endif
- return true;
-}
-
} // namespace __hwasan
#endif // HWASAN_ALLOCATOR_H
diff --git a/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_checks.h b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_checks.h
index ab543ea88beb..0911af30dcb8 100644
--- a/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_checks.h
+++ b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_checks.h
@@ -15,17 +15,49 @@
#include "hwasan_allocator.h"
#include "hwasan_mapping.h"
+#include "hwasan_registers.h"
#include "sanitizer_common/sanitizer_common.h"
namespace __hwasan {
-template <unsigned X>
+
+enum class ErrorAction { Abort, Recover };
+enum class AccessType { Load, Store };
+
+// Used when the access size is known.
+constexpr unsigned SigTrapEncoding(ErrorAction EA, AccessType AT,
+ unsigned LogSize) {
+ return 0x20 * (EA == ErrorAction::Recover) +
+ 0x10 * (AT == AccessType::Store) + LogSize;
+}
+
+// Used when the access size varies at runtime.
+constexpr unsigned SigTrapEncoding(ErrorAction EA, AccessType AT) {
+ return SigTrapEncoding(EA, AT, 0xf);
+}
+
+template <ErrorAction EA, AccessType AT, size_t LogSize>
__attribute__((always_inline)) static void SigTrap(uptr p) {
-#if defined(__aarch64__)
+ // Other platforms like linux can use signals for intercepting an exception
+ // and dispatching to HandleTagMismatch. The fuchsias implementation doesn't
+ // use signals so we can call it here directly instead.
+#if CAN_GET_REGISTERS && SANITIZER_FUCHSIA
+ auto regs = GetRegisters();
+ size_t size = 2 << LogSize;
+ AccessInfo access_info = {
+ .addr = p,
+ .size = size,
+ .is_store = AT == AccessType::Store,
+ .is_load = AT == AccessType::Load,
+ .recover = EA == ErrorAction::Recover,
+ };
+ HandleTagMismatch(access_info, (uptr)__builtin_return_address(0),
+ (uptr)__builtin_frame_address(0), /*uc=*/nullptr, regs.x);
+#elif defined(__aarch64__)
(void)p;
// 0x900 is added to do not interfere with the kernel use of lower values of
// brk immediate.
register uptr x0 asm("x0") = p;
- asm("brk %1\n\t" ::"r"(x0), "n"(0x900 + X));
+ asm("brk %1\n\t" ::"r"(x0), "n"(0x900 + SigTrapEncoding(EA, AT, LogSize)));
#elif defined(__x86_64__)
// INT3 + NOP DWORD ptr [EAX + X] to pass X to our signal handler, 5 bytes
// total. The pointer is passed via rdi.
@@ -34,8 +66,17 @@ __attribute__((always_inline)) static void SigTrap(uptr p) {
// different nop command, the three bytes one).
asm volatile(
"int3\n"
- "nopl %c0(%%rax)\n" ::"n"(0x40 + X),
+ "nopl %c0(%%rax)\n" ::"n"(0x40 + SigTrapEncoding(EA, AT, LogSize)),
"D"(p));
+#elif SANITIZER_RISCV64
+ // Put pointer into x10
+ // addiw contains immediate of 0x40 + X, where 0x40 is magic number and X
+ // encodes access size
+ register uptr x10 asm("x10") = p;
+ asm volatile(
+ "ebreak\n"
+ "addiw x0, x0, %1\n" ::"r"(x10),
+ "I"(0x40 + SigTrapEncoding(EA, AT, LogSize)));
#else
// FIXME: not always sigill.
__builtin_trap();
@@ -44,26 +85,62 @@ __attribute__((always_inline)) static void SigTrap(uptr p) {
}
// Version with access size which is not power of 2
-template <unsigned X>
+template <ErrorAction EA, AccessType AT>
__attribute__((always_inline)) static void SigTrap(uptr p, uptr size) {
-#if defined(__aarch64__)
+ // Other platforms like linux can use signals for intercepting an exception
+ // and dispatching to HandleTagMismatch. The fuchsias implementation doesn't
+ // use signals so we can call it here directly instead.
+#if CAN_GET_REGISTERS && SANITIZER_FUCHSIA
+ auto regs = GetRegisters();
+ AccessInfo access_info = {
+ .addr = p,
+ .size = size,
+ .is_store = AT == AccessType::Store,
+ .is_load = AT == AccessType::Load,
+ .recover = EA == ErrorAction::Recover,
+ };
+ HandleTagMismatch(access_info, (uptr)__builtin_return_address(0),
+ (uptr)__builtin_frame_address(0), /*uc=*/nullptr, regs.x);
+#elif defined(__aarch64__)
register uptr x0 asm("x0") = p;
register uptr x1 asm("x1") = size;
- asm("brk %2\n\t" ::"r"(x0), "r"(x1), "n"(0x900 + X));
+ asm("brk %2\n\t" ::"r"(x0), "r"(x1), "n"(0x900 + SigTrapEncoding(EA, AT)));
#elif defined(__x86_64__)
// Size is stored in rsi.
asm volatile(
"int3\n"
- "nopl %c0(%%rax)\n" ::"n"(0x40 + X),
+ "nopl %c0(%%rax)\n" ::"n"(0x40 + SigTrapEncoding(EA, AT)),
"D"(p), "S"(size));
+#elif SANITIZER_RISCV64
+ // Put access size into x11
+ register uptr x10 asm("x10") = p;
+ register uptr x11 asm("x11") = size;
+ asm volatile(
+ "ebreak\n"
+ "addiw x0, x0, %2\n" ::"r"(x10),
+ "r"(x11), "I"(0x40 + SigTrapEncoding(EA, AT)));
#else
__builtin_trap();
#endif
// __builtin_unreachable();
}
-__attribute__((always_inline, nodebug)) static bool PossiblyShortTagMatches(
- tag_t mem_tag, uptr ptr, uptr sz) {
+__attribute__((always_inline, nodebug)) static inline uptr ShortTagSize(
+ tag_t mem_tag, uptr ptr) {
+ DCHECK(IsAligned(ptr, kShadowAlignment));
+ tag_t ptr_tag = GetTagFromPointer(ptr);
+ if (ptr_tag == mem_tag)
+ return kShadowAlignment;
+ if (!mem_tag || mem_tag >= kShadowAlignment)
+ return 0;
+ if (*(u8 *)(ptr | (kShadowAlignment - 1)) != ptr_tag)
+ return 0;
+ return mem_tag;
+}
+
+__attribute__((always_inline, nodebug)) static inline bool
+PossiblyShortTagMatches(tag_t mem_tag, uptr ptr, uptr sz) {
+ DCHECK(IsAligned(ptr, kShadowAlignment));
tag_t ptr_tag = GetTagFromPointer(ptr);
if (ptr_tag == mem_tag)
return true;
@@ -71,15 +148,9 @@ __attribute__((always_inline, nodebug)) static bool PossiblyShortTagMatches(
return false;
if ((ptr & (kShadowAlignment - 1)) + sz > mem_tag)
return false;
-#ifndef __aarch64__
- ptr = UntagAddr(ptr);
-#endif
return *(u8 *)(ptr | (kShadowAlignment - 1)) == ptr_tag;
}
-enum class ErrorAction { Abort, Recover };
-enum class AccessType { Load, Store };
-
template <ErrorAction EA, AccessType AT, unsigned LogSize>
__attribute__((always_inline, nodebug)) static void CheckAddress(uptr p) {
if (!InTaggableRegion(p))
@@ -87,8 +158,7 @@ __attribute__((always_inline, nodebug)) static void CheckAddress(uptr p) {
uptr ptr_raw = p & ~kAddressTagMask;
tag_t mem_tag = *(tag_t *)MemToShadow(ptr_raw);
if (UNLIKELY(!PossiblyShortTagMatches(mem_tag, p, 1 << LogSize))) {
- SigTrap<0x20 * (EA == ErrorAction::Recover) +
- 0x10 * (AT == AccessType::Store) + LogSize>(p);
+ SigTrap<EA, AT, LogSize>(p);
if (EA == ErrorAction::Abort)
__builtin_unreachable();
}
@@ -105,18 +175,16 @@ __attribute__((always_inline, nodebug)) static void CheckAddressSized(uptr p,
tag_t *shadow_last = (tag_t *)MemToShadow(ptr_raw + sz);
for (tag_t *t = shadow_first; t < shadow_last; ++t)
if (UNLIKELY(ptr_tag != *t)) {
- SigTrap<0x20 * (EA == ErrorAction::Recover) +
- 0x10 * (AT == AccessType::Store) + 0xf>(p, sz);
+ SigTrap<EA, AT>(p, sz);
if (EA == ErrorAction::Abort)
__builtin_unreachable();
}
uptr end = p + sz;
- uptr tail_sz = end & 0xf;
+ uptr tail_sz = end & (kShadowAlignment - 1);
if (UNLIKELY(tail_sz != 0 &&
!PossiblyShortTagMatches(
*shadow_last, end & ~(kShadowAlignment - 1), tail_sz))) {
- SigTrap<0x20 * (EA == ErrorAction::Recover) +
- 0x10 * (AT == AccessType::Store) + 0xf>(p, sz);
+ SigTrap<EA, AT>(p, sz);
if (EA == ErrorAction::Abort)
__builtin_unreachable();
}
diff --git a/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_exceptions.cpp b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_exceptions.cpp
index 169e7876cb58..bf700bf56838 100644
--- a/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_exceptions.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_exceptions.cpp
@@ -29,8 +29,8 @@ typedef _Unwind_Reason_Code PersonalityFn(int version, _Unwind_Action actions,
// is statically linked and the sanitizer runtime and the program are linked
// against different unwinders. The _Unwind_Context data structure is opaque so
// it may be incompatible between unwinders.
-typedef _Unwind_Word GetGRFn(_Unwind_Context* context, int index);
-typedef _Unwind_Word GetCFAFn(_Unwind_Context* context);
+typedef uintptr_t GetGRFn(_Unwind_Context* context, int index);
+typedef uintptr_t GetCFAFn(_Unwind_Context* context);
extern "C" SANITIZER_INTERFACE_ATTRIBUTE _Unwind_Reason_Code
__hwasan_personality_wrapper(int version, _Unwind_Action actions,
@@ -56,11 +56,14 @@ __hwasan_personality_wrapper(int version, _Unwind_Action actions,
uptr fp = get_gr(context, 6); // rbp
#elif defined(__aarch64__)
uptr fp = get_gr(context, 29); // x29
+#elif SANITIZER_RISCV64
+ uptr fp = get_gr(context, 8); // x8
#else
#error Unsupported architecture
#endif
uptr sp = get_cfa(context);
- TagMemory(sp, fp - sp, 0);
+ TagMemory(UntagAddr(sp), UntagAddr(fp) - UntagAddr(sp),
+ GetTagFromPointer(sp));
}
return rc;
diff --git a/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_flags.inc b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_flags.inc
index 18ea47f981be..058a0457b9e7 100644
--- a/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_flags.inc
+++ b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_flags.inc
@@ -23,6 +23,9 @@ HWASAN_FLAG(bool, tag_in_free, true, "")
HWASAN_FLAG(bool, print_stats, false, "")
HWASAN_FLAG(bool, halt_on_error, true, "")
HWASAN_FLAG(bool, atexit, false, "")
+HWASAN_FLAG(
+ bool, print_live_threads_info, true,
+ "If set, prints the remaining threads in report as an extra information.")
// Test only flag to disable malloc/realloc/free memory tagging on startup.
// Tagging can be reenabled with __hwasan_enable_allocator_tagging().
@@ -39,7 +42,7 @@ HWASAN_FLAG(
HWASAN_FLAG(bool, free_checks_tail_magic, 1,
"If set, free() will check the magic values "
- "to the right of the allocated object "
+ "after the allocated object "
"if the allocation size is not a divident of the granule size")
HWASAN_FLAG(
int, max_free_fill_size, 0,
@@ -81,3 +84,10 @@ HWASAN_FLAG(bool, malloc_bisect_dump, false,
// are untagged before the call.
HWASAN_FLAG(bool, fail_without_syscall_abi, true,
"Exit if fail to request relaxed syscall ABI.")
+
+HWASAN_FLAG(
+ uptr, fixed_shadow_base, -1,
+ "If not -1, HWASan will attempt to allocate the shadow at this address, "
+ "instead of choosing one dynamically."
+ "Tip: this can be combined with the compiler option, "
+ "-hwasan-mapping-offset, to optimize the instrumentation.")
diff --git a/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_fuchsia.cpp b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_fuchsia.cpp
index e299a7e862eb..d1696f8aa796 100644
--- a/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_fuchsia.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_fuchsia.cpp
@@ -15,6 +15,9 @@
#include "sanitizer_common/sanitizer_fuchsia.h"
#if SANITIZER_FUCHSIA
+#include <zircon/features.h>
+#include <zircon/syscalls.h>
+
#include "hwasan.h"
#include "hwasan_interface_internal.h"
#include "hwasan_report.h"
@@ -130,7 +133,7 @@ static void ThreadCreateHook(void *hook, bool aborted) {
static void ThreadStartHook(void *hook, thrd_t self) {
Thread *thread = static_cast<Thread *>(hook);
FinishThreadInitialization(thread);
- thread->InitRandomState();
+ thread->EnsureRandomStateInited();
}
// This is the function that sets up the stack ring buffer and enables us to use
@@ -180,12 +183,33 @@ void HwasanTSDThreadInit() {}
// function is unneeded.
void InstallAtExitHandler() {}
-// TODO(fxbug.dev/81499): Once we finalize the tagged pointer ABI in zircon, we should come back
-// here and implement the appropriate check that TBI is enabled.
-void InitializeOsSupport() {}
+void HwasanInstallAtForkHandler() {}
+
+void InstallAtExitCheckLeaks() {}
+
+void InitializeOsSupport() {
+#ifdef __aarch64__
+ uint32_t features = 0;
+ CHECK_EQ(zx_system_get_features(ZX_FEATURE_KIND_ADDRESS_TAGGING, &features),
+ ZX_OK);
+ if (!(features & ZX_ARM64_FEATURE_ADDRESS_TAGGING_TBI) &&
+ flags()->fail_without_syscall_abi) {
+ Printf(
+ "FATAL: HWAddressSanitizer requires "
+ "ZX_ARM64_FEATURE_ADDRESS_TAGGING_TBI.\n");
+ Die();
+ }
+#endif
+}
} // namespace __hwasan
+namespace __lsan {
+
+bool UseExitcodeOnLeak() { return __hwasan::flags()->halt_on_error; }
+
+} // namespace __lsan
+
extern "C" {
void *__sanitizer_before_thread_create_hook(thrd_t thread, bool detached,
@@ -208,6 +232,10 @@ void __sanitizer_thread_exit_hook(void *hook, thrd_t self) {
__hwasan::ThreadExitHook(hook, self);
}
+void __sanitizer_module_loaded(const struct dl_phdr_info *info, size_t) {
+ __hwasan_library_loaded(info->dlpi_addr, info->dlpi_phdr, info->dlpi_phnum);
+}
+
} // extern "C"
#endif // SANITIZER_FUCHSIA
diff --git a/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_globals.cpp b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_globals.cpp
index d71bcd792e1f..7e0f3df20dd0 100644
--- a/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_globals.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_globals.cpp
@@ -13,6 +13,8 @@
#include "hwasan_globals.h"
+#include "sanitizer_common/sanitizer_array_ref.h"
+
namespace __hwasan {
enum { NT_LLVM_HWASAN_GLOBALS = 3 };
diff --git a/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_globals.h b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_globals.h
index fd7adf7a0588..94cd53e1888c 100644
--- a/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_globals.h
+++ b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_globals.h
@@ -16,6 +16,7 @@
#include <link.h>
+#include "sanitizer_common/sanitizer_array_ref.h"
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_internal_defs.h"
diff --git a/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_interceptors.cpp b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_interceptors.cpp
index 68f8adec0776..96df4dd0c24d 100644
--- a/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_interceptors.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_interceptors.cpp
@@ -14,60 +14,356 @@
// sanitizer_common/sanitizer_common_interceptors.h
//===----------------------------------------------------------------------===//
-#include "interception/interception.h"
+#define SANITIZER_COMMON_NO_REDEFINE_BUILTINS
+
#include "hwasan.h"
+#include "hwasan_allocator.h"
+#include "hwasan_checks.h"
+#include "hwasan_mapping.h"
+#include "hwasan_platform_interceptors.h"
#include "hwasan_thread.h"
+#include "hwasan_thread_list.h"
+#include "interception/interception.h"
+#include "sanitizer_common/sanitizer_errno.h"
+#include "sanitizer_common/sanitizer_linux.h"
#include "sanitizer_common/sanitizer_stackdepot.h"
#if !SANITIZER_FUCHSIA
using namespace __hwasan;
-#if HWASAN_WITH_INTERCEPTORS
+struct HWAsanInterceptorContext {
+ const char *interceptor_name;
+};
+
+# define ACCESS_MEMORY_RANGE(offset, size, access) \
+ do { \
+ __hwasan::CheckAddressSized<ErrorAction::Recover, access>((uptr)offset, \
+ size); \
+ } while (0)
+
+# define HWASAN_READ_RANGE(offset, size) \
+ ACCESS_MEMORY_RANGE(offset, size, AccessType::Load)
+# define HWASAN_WRITE_RANGE(offset, size) \
+ ACCESS_MEMORY_RANGE(offset, size, AccessType::Store)
+
+# if !SANITIZER_APPLE
+# define HWASAN_INTERCEPT_FUNC(name) \
+ do { \
+ if (!INTERCEPT_FUNCTION(name)) \
+ VReport(1, "HWAddressSanitizer: failed to intercept '%s'\n", #name); \
+ } while (0)
+# define HWASAN_INTERCEPT_FUNC_VER(name, ver) \
+ do { \
+ if (!INTERCEPT_FUNCTION_VER(name, ver)) \
+ VReport(1, "HWAddressSanitizer: failed to intercept '%s@@%s'\n", \
+ #name, ver); \
+ } while (0)
+# define HWASAN_INTERCEPT_FUNC_VER_UNVERSIONED_FALLBACK(name, ver) \
+ do { \
+ if (!INTERCEPT_FUNCTION_VER(name, ver) && !INTERCEPT_FUNCTION(name)) \
+ VReport( \
+ 1, "HWAddressSanitizer: failed to intercept '%s@@%s' or '%s'\n", \
+ #name, ver, #name); \
+ } while (0)
+
+# else
+// OS X interceptors don't need to be initialized with INTERCEPT_FUNCTION.
+# define HWASAN_INTERCEPT_FUNC(name)
+# endif // SANITIZER_APPLE
+
+# if HWASAN_WITH_INTERCEPTORS
+
+# define COMMON_SYSCALL_PRE_READ_RANGE(p, s) HWASAN_READ_RANGE(p, s)
+# define COMMON_SYSCALL_PRE_WRITE_RANGE(p, s) HWASAN_WRITE_RANGE(p, s)
+# define COMMON_SYSCALL_POST_READ_RANGE(p, s) \
+ do { \
+ (void)(p); \
+ (void)(s); \
+ } while (false)
+# define COMMON_SYSCALL_POST_WRITE_RANGE(p, s) \
+ do { \
+ (void)(p); \
+ (void)(s); \
+ } while (false)
+# include "sanitizer_common/sanitizer_common_syscalls.inc"
+# include "sanitizer_common/sanitizer_syscalls_netbsd.inc"
+
+# define COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ptr, size) \
+ HWASAN_WRITE_RANGE(ptr, size)
+
+# define COMMON_INTERCEPTOR_READ_RANGE(ctx, ptr, size) \
+ HWASAN_READ_RANGE(ptr, size)
+
+# define COMMON_INTERCEPTOR_ENTER(ctx, func, ...) \
+ HWAsanInterceptorContext _ctx = {#func}; \
+ ctx = (void *)&_ctx; \
+ do { \
+ (void)(ctx); \
+ (void)(func); \
+ } while (false)
+
+# define COMMON_INTERCEPTOR_DIR_ACQUIRE(ctx, path) \
+ do { \
+ (void)(ctx); \
+ (void)(path); \
+ } while (false)
+
+# define COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd) \
+ do { \
+ (void)(ctx); \
+ (void)(fd); \
+ } while (false)
+
+# define COMMON_INTERCEPTOR_FD_RELEASE(ctx, fd) \
+ do { \
+ (void)(ctx); \
+ (void)(fd); \
+ } while (false)
+
+# define COMMON_INTERCEPTOR_FD_SOCKET_ACCEPT(ctx, fd, newfd) \
+ do { \
+ (void)(ctx); \
+ (void)(fd); \
+ (void)(newfd); \
+ } while (false)
+
+# define COMMON_INTERCEPTOR_SET_THREAD_NAME(ctx, name) \
+ do { \
+ (void)(ctx); \
+ (void)(name); \
+ } while (false)
+
+# define COMMON_INTERCEPTOR_SET_PTHREAD_NAME(ctx, thread, name) \
+ do { \
+ (void)(ctx); \
+ (void)(thread); \
+ (void)(name); \
+ } while (false)
+
+# define COMMON_INTERCEPTOR_BLOCK_REAL(name) \
+ do { \
+ (void)(name); \
+ } while (false)
+
+# define COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, dst, v, size) \
+ { \
+ if (COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED) \
+ return internal_memset(dst, v, size); \
+ COMMON_INTERCEPTOR_ENTER(ctx, memset, dst, v, size); \
+ if (MemIsApp(UntagAddr(reinterpret_cast<uptr>(dst))) && \
+ common_flags()->intercept_intrin) \
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, size); \
+ return REAL(memset)(dst, v, size); \
+ }
+
+# define COMMON_INTERCEPTOR_STRERROR() \
+ do { \
+ } while (false)
+
+# define COMMON_INTERCEPT_FUNCTION(name) HWASAN_INTERCEPT_FUNC(name)
+
+# define COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED (!hwasan_inited)
+
+// The main purpose of the mmap interceptor is to prevent the user from
+// allocating on top of shadow pages.
+//
+// For compatibility, it does not tag pointers, nor does it allow
+// MAP_FIXED in combination with a tagged pointer. (Since mmap itself
+// will not return a tagged pointer, the tagged pointer must have come
+// from elsewhere, such as the secondary allocator, which makes it a
+// very odd usecase.)
+template <class Mmap>
+static void *mmap_interceptor(Mmap real_mmap, void *addr, SIZE_T length,
+ int prot, int flags, int fd, OFF64_T offset) {
+ if (addr) {
+ if (flags & map_fixed) CHECK_EQ(addr, UntagPtr(addr));
+
+ addr = UntagPtr(addr);
+ }
+ SIZE_T rounded_length = RoundUpTo(length, GetPageSize());
+ void *end_addr = (char *)addr + (rounded_length - 1);
+ if (addr && length &&
+ (!MemIsApp(reinterpret_cast<uptr>(addr)) ||
+ !MemIsApp(reinterpret_cast<uptr>(end_addr)))) {
+ // User requested an address that is incompatible with HWASan's
+ // memory layout. Use a different address if allowed, else fail.
+ if (flags & map_fixed) {
+ errno = errno_EINVAL;
+ return (void *)-1;
+ } else {
+ addr = nullptr;
+ }
+ }
+ void *res = real_mmap(addr, length, prot, flags, fd, offset);
+ if (length && res != (void *)-1) {
+ uptr beg = reinterpret_cast<uptr>(res);
+ DCHECK(IsAligned(beg, GetPageSize()));
+ if (!MemIsApp(beg) || !MemIsApp(beg + rounded_length - 1)) {
+ // Application has attempted to map more memory than is supported by
+ // HWASan. Act as if we ran out of memory.
+ internal_munmap(res, length);
+ errno = errno_ENOMEM;
+ return (void *)-1;
+ }
+ __hwasan::TagMemoryAligned(beg, rounded_length, 0);
+ }
+
+ return res;
+}
+
+template <class Munmap>
+static int munmap_interceptor(Munmap real_munmap, void *addr, SIZE_T length) {
+ // We should not tag if munmap fail, but it's to late to tag after
+ // real_munmap, as the pages could be mmaped by another thread.
+ uptr beg = reinterpret_cast<uptr>(addr);
+ if (length && IsAligned(beg, GetPageSize())) {
+ SIZE_T rounded_length = RoundUpTo(length, GetPageSize());
+ // Protect from unmapping the shadow.
+ if (!MemIsApp(beg) || !MemIsApp(beg + rounded_length - 1)) {
+ errno = errno_EINVAL;
+ return -1;
+ }
+ __hwasan::TagMemoryAligned(beg, rounded_length, 0);
+ }
+ return real_munmap(addr, length);
+}
+
+# define COMMON_INTERCEPTOR_MMAP_IMPL(ctx, mmap, addr, length, prot, flags, \
+ fd, offset) \
+ do { \
+ (void)(ctx); \
+ return mmap_interceptor(REAL(mmap), addr, sz, prot, flags, fd, off); \
+ } while (false)
+
+# define COMMON_INTERCEPTOR_MUNMAP_IMPL(ctx, addr, length) \
+ do { \
+ (void)(ctx); \
+ return munmap_interceptor(REAL(munmap), addr, sz); \
+ } while (false)
+
+# include "sanitizer_common/sanitizer_common_interceptors_memintrinsics.inc"
+# include "sanitizer_common/sanitizer_common_interceptors.inc"
struct ThreadStartArg {
- thread_callback_t callback;
- void *param;
+ __sanitizer_sigset_t starting_sigset_;
};
static void *HwasanThreadStartFunc(void *arg) {
__hwasan_thread_enter();
- ThreadStartArg A = *reinterpret_cast<ThreadStartArg*>(arg);
- UnmapOrDie(arg, GetPageSizeCached());
- return A.callback(A.param);
-}
-
-INTERCEPTOR(int, pthread_create, void *th, void *attr, void *(*callback)(void*),
- void * param) {
- ScopedTaggingDisabler disabler;
- ThreadStartArg *A = reinterpret_cast<ThreadStartArg *> (MmapOrDie(
- GetPageSizeCached(), "pthread_create"));
- *A = {callback, param};
- int res = REAL(pthread_create)(th, attr, &HwasanThreadStartFunc, A);
- return res;
+ SetSigProcMask(&reinterpret_cast<ThreadStartArg *>(arg)->starting_sigset_,
+ nullptr);
+ InternalFree(arg);
+ auto self = GetThreadSelf();
+ auto args = hwasanThreadArgRetval().GetArgs(self);
+ void *retval = (*args.routine)(args.arg_retval);
+ hwasanThreadArgRetval().Finish(self, retval);
+ return retval;
+}
+
+extern "C" {
+int pthread_attr_getdetachstate(void *attr, int *v);
+}
+
+INTERCEPTOR(int, pthread_create, void *thread, void *attr,
+ void *(*callback)(void *), void *param) {
+ EnsureMainThreadIDIsCorrect();
+ ScopedTaggingDisabler tagging_disabler;
+ bool detached = [attr]() {
+ int d = 0;
+ return attr && !pthread_attr_getdetachstate(attr, &d) && IsStateDetached(d);
+ }();
+ ThreadStartArg *A = (ThreadStartArg *)InternalAlloc(sizeof(ThreadStartArg));
+ ScopedBlockSignals block(&A->starting_sigset_);
+ // ASAN uses the same approach to disable leaks from pthread_create.
+# if CAN_SANITIZE_LEAKS
+ __lsan::ScopedInterceptorDisabler lsan_disabler;
+# endif
+
+ int result;
+ hwasanThreadArgRetval().Create(detached, {callback, param}, [&]() -> uptr {
+ result = REAL(pthread_create)(thread, attr, &HwasanThreadStartFunc, A);
+ return result ? 0 : *(uptr *)(thread);
+ });
+ if (result != 0)
+ InternalFree(A);
+ return result;
+}
+
+INTERCEPTOR(int, pthread_join, void *thread, void **retval) {
+ int result;
+ hwasanThreadArgRetval().Join((uptr)thread, [&]() {
+ result = REAL(pthread_join)(thread, retval);
+ return !result;
+ });
+ return result;
+}
+
+INTERCEPTOR(int, pthread_detach, void *thread) {
+ int result;
+ hwasanThreadArgRetval().Detach((uptr)thread, [&]() {
+ result = REAL(pthread_detach)(thread);
+ return !result;
+ });
+ return result;
+}
+
+INTERCEPTOR(void, pthread_exit, void *retval) {
+ hwasanThreadArgRetval().Finish(GetThreadSelf(), retval);
+ REAL(pthread_exit)(retval);
+}
+
+# if SANITIZER_GLIBC
+INTERCEPTOR(int, pthread_tryjoin_np, void *thread, void **ret) {
+ int result;
+ hwasanThreadArgRetval().Join((uptr)thread, [&]() {
+ result = REAL(pthread_tryjoin_np)(thread, ret);
+ return !result;
+ });
+ return result;
+}
+
+INTERCEPTOR(int, pthread_timedjoin_np, void *thread, void **ret,
+ const struct timespec *abstime) {
+ int result;
+ hwasanThreadArgRetval().Join((uptr)thread, [&]() {
+ result = REAL(pthread_timedjoin_np)(thread, ret, abstime);
+ return !result;
+ });
+ return result;
}
+# endif
+
+DEFINE_REAL_PTHREAD_FUNCTIONS
DEFINE_REAL(int, vfork)
DECLARE_EXTERN_INTERCEPTOR_AND_WRAPPER(int, vfork)
-#endif // HWASAN_WITH_INTERCEPTORS
-#if HWASAN_WITH_INTERCEPTORS && defined(__aarch64__)
// Get and/or change the set of blocked signals.
extern "C" int sigprocmask(int __how, const __hw_sigset_t *__restrict __set,
__hw_sigset_t *__restrict __oset);
-#define SIG_BLOCK 0
-#define SIG_SETMASK 2
+# define SIG_BLOCK 0
+# define SIG_SETMASK 2
extern "C" int __sigjmp_save(__hw_sigjmp_buf env, int savemask) {
+ env[0].__magic = kHwJmpBufMagic;
env[0].__mask_was_saved =
- (savemask && sigprocmask(SIG_BLOCK, (__hw_sigset_t *)0,
- &env[0].__saved_mask) == 0);
+ (savemask &&
+ sigprocmask(SIG_BLOCK, (__hw_sigset_t *)0, &env[0].__saved_mask) == 0);
return 0;
}
static void __attribute__((always_inline))
InternalLongjmp(__hw_register_buf env, int retval) {
+# if defined(__aarch64__)
+ constexpr size_t kSpIndex = 13;
+# elif defined(__x86_64__)
+ constexpr size_t kSpIndex = 6;
+# elif SANITIZER_RISCV64
+ constexpr size_t kSpIndex = 13;
+# endif
+
// Clear all memory tags on the stack between here and where we're going.
- unsigned long long stack_pointer = env[13];
+ unsigned long long stack_pointer = env[kSpIndex];
// The stack pointer should never be tagged, so we don't need to clear the
// tag for this function call.
__hwasan_handle_longjmp((void *)stack_pointer);
@@ -78,35 +374,106 @@ InternalLongjmp(__hw_register_buf env, int retval) {
// Must implement this ourselves, since we don't know the order of registers
// in different libc implementations and many implementations mangle the
// stack pointer so we can't use it without knowing the demangling scheme.
+# if defined(__aarch64__)
register long int retval_tmp asm("x1") = retval;
register void *env_address asm("x0") = &env[0];
- asm volatile("ldp x19, x20, [%0, #0<<3];"
- "ldp x21, x22, [%0, #2<<3];"
- "ldp x23, x24, [%0, #4<<3];"
- "ldp x25, x26, [%0, #6<<3];"
- "ldp x27, x28, [%0, #8<<3];"
- "ldp x29, x30, [%0, #10<<3];"
- "ldp d8, d9, [%0, #14<<3];"
- "ldp d10, d11, [%0, #16<<3];"
- "ldp d12, d13, [%0, #18<<3];"
- "ldp d14, d15, [%0, #20<<3];"
- "ldr x5, [%0, #13<<3];"
- "mov sp, x5;"
- // Return the value requested to return through arguments.
- // This should be in x1 given what we requested above.
- "cmp %1, #0;"
- "mov x0, #1;"
- "csel x0, %1, x0, ne;"
- "br x30;"
- : "+r"(env_address)
- : "r"(retval_tmp));
+ asm volatile(
+ "ldp x19, x20, [%0, #0<<3];"
+ "ldp x21, x22, [%0, #2<<3];"
+ "ldp x23, x24, [%0, #4<<3];"
+ "ldp x25, x26, [%0, #6<<3];"
+ "ldp x27, x28, [%0, #8<<3];"
+ "ldp x29, x30, [%0, #10<<3];"
+ "ldp d8, d9, [%0, #14<<3];"
+ "ldp d10, d11, [%0, #16<<3];"
+ "ldp d12, d13, [%0, #18<<3];"
+ "ldp d14, d15, [%0, #20<<3];"
+ "ldr x5, [%0, #13<<3];"
+ "mov sp, x5;"
+ // Return the value requested to return through arguments.
+ // This should be in x1 given what we requested above.
+ "cmp %1, #0;"
+ "mov x0, #1;"
+ "csel x0, %1, x0, ne;"
+ "br x30;"
+ : "+r"(env_address)
+ : "r"(retval_tmp));
+# elif defined(__x86_64__)
+ register long int retval_tmp asm("%rsi") = retval;
+ register void *env_address asm("%rdi") = &env[0];
+ asm volatile(
+ // Restore registers.
+ "mov (0*8)(%0),%%rbx;"
+ "mov (1*8)(%0),%%rbp;"
+ "mov (2*8)(%0),%%r12;"
+ "mov (3*8)(%0),%%r13;"
+ "mov (4*8)(%0),%%r14;"
+ "mov (5*8)(%0),%%r15;"
+ "mov (6*8)(%0),%%rsp;"
+ "mov (7*8)(%0),%%rdx;"
+ // Return 1 if retval is 0.
+ "mov $1,%%rax;"
+ "test %1,%1;"
+ "cmovnz %1,%%rax;"
+ "jmp *%%rdx;" ::"r"(env_address),
+ "r"(retval_tmp));
+# elif SANITIZER_RISCV64
+ register long int retval_tmp asm("x11") = retval;
+ register void *env_address asm("x10") = &env[0];
+ asm volatile(
+ "ld ra, 0<<3(%0);"
+ "ld s0, 1<<3(%0);"
+ "ld s1, 2<<3(%0);"
+ "ld s2, 3<<3(%0);"
+ "ld s3, 4<<3(%0);"
+ "ld s4, 5<<3(%0);"
+ "ld s5, 6<<3(%0);"
+ "ld s6, 7<<3(%0);"
+ "ld s7, 8<<3(%0);"
+ "ld s8, 9<<3(%0);"
+ "ld s9, 10<<3(%0);"
+ "ld s10, 11<<3(%0);"
+ "ld s11, 12<<3(%0);"
+# if __riscv_float_abi_double
+ "fld fs0, 14<<3(%0);"
+ "fld fs1, 15<<3(%0);"
+ "fld fs2, 16<<3(%0);"
+ "fld fs3, 17<<3(%0);"
+ "fld fs4, 18<<3(%0);"
+ "fld fs5, 19<<3(%0);"
+ "fld fs6, 20<<3(%0);"
+ "fld fs7, 21<<3(%0);"
+ "fld fs8, 22<<3(%0);"
+ "fld fs9, 23<<3(%0);"
+ "fld fs10, 24<<3(%0);"
+ "fld fs11, 25<<3(%0);"
+# elif __riscv_float_abi_soft
+# else
+# error "Unsupported case"
+# endif
+ "ld a4, 13<<3(%0);"
+ "mv sp, a4;"
+ // Return the value requested to return through arguments.
+ // This should be in x11 given what we requested above.
+ "seqz a0, %1;"
+ "add a0, a0, %1;"
+ "ret;"
+ : "+r"(env_address)
+ : "r"(retval_tmp));
+# endif
}
INTERCEPTOR(void, siglongjmp, __hw_sigjmp_buf env, int val) {
+ if (env[0].__magic != kHwJmpBufMagic) {
+ Printf(
+ "WARNING: Unexpected bad jmp_buf. Either setjmp was not called or "
+ "there is a bug in HWASan.\n");
+ return REAL(siglongjmp)(env, val);
+ }
+
if (env[0].__mask_was_saved)
// Restore the saved signal mask.
- (void)sigprocmask(SIG_SETMASK, &env[0].__saved_mask,
- (__hw_sigset_t *)0);
+ (void)sigprocmask(SIG_SETMASK, &env[0].__saved_mask, (__hw_sigset_t *)0);
InternalLongjmp(env[0].__jmpbuf, val);
}
@@ -114,41 +481,37 @@ INTERCEPTOR(void, siglongjmp, __hw_sigjmp_buf env, int val) {
// _setjmp on start_thread. Hence we have to intercept the longjmp on
// pthread_exit so the __hw_jmp_buf order matches.
INTERCEPTOR(void, __libc_longjmp, __hw_jmp_buf env, int val) {
+ if (env[0].__magic != kHwJmpBufMagic)
+ return REAL(__libc_longjmp)(env, val);
InternalLongjmp(env[0].__jmpbuf, val);
}
INTERCEPTOR(void, longjmp, __hw_jmp_buf env, int val) {
+ if (env[0].__magic != kHwJmpBufMagic) {
+ Printf(
+ "WARNING: Unexpected bad jmp_buf. Either setjmp was not called or "
+ "there is a bug in HWASan.\n");
+ return REAL(longjmp)(env, val);
+ }
InternalLongjmp(env[0].__jmpbuf, val);
}
-#undef SIG_BLOCK
-#undef SIG_SETMASK
+# undef SIG_BLOCK
+# undef SIG_SETMASK
-#endif // HWASAN_WITH_INTERCEPTORS && __aarch64__
-
-static void BeforeFork() {
- StackDepotLockAll();
-}
-
-static void AfterFork() {
- StackDepotUnlockAll();
-}
-
-INTERCEPTOR(int, fork, void) {
- ENSURE_HWASAN_INITED();
- BeforeFork();
- int pid = REAL(fork)();
- AfterFork();
- return pid;
-}
+# endif // HWASAN_WITH_INTERCEPTORS
namespace __hwasan {
int OnExit() {
+ if (CAN_SANITIZE_LEAKS && common_flags()->detect_leaks &&
+ __lsan::HasReportedLeaks()) {
+ return common_flags()->exitcode;
+ }
// FIXME: ask frontend whether we need to return failure.
return 0;
}
-} // namespace __hwasan
+} // namespace __hwasan
namespace __hwasan {
@@ -156,17 +519,30 @@ void InitializeInterceptors() {
static int inited = 0;
CHECK_EQ(inited, 0);
- INTERCEPT_FUNCTION(fork);
+# if HWASAN_WITH_INTERCEPTORS
+ InitializeCommonInterceptors();
+
+ (void)(read_iovec);
+ (void)(write_iovec);
-#if HWASAN_WITH_INTERCEPTORS
-#if defined(__linux__)
+# if defined(__linux__)
+ INTERCEPT_FUNCTION(__libc_longjmp);
+ INTERCEPT_FUNCTION(longjmp);
+ INTERCEPT_FUNCTION(siglongjmp);
INTERCEPT_FUNCTION(vfork);
-#endif // __linux__
+# endif // __linux__
INTERCEPT_FUNCTION(pthread_create);
-#endif
+ INTERCEPT_FUNCTION(pthread_join);
+ INTERCEPT_FUNCTION(pthread_detach);
+ INTERCEPT_FUNCTION(pthread_exit);
+# if SANITIZER_GLIBC
+ INTERCEPT_FUNCTION(pthread_tryjoin_np);
+ INTERCEPT_FUNCTION(pthread_timedjoin_np);
+# endif
+# endif
inited = 1;
}
-} // namespace __hwasan
+} // namespace __hwasan
#endif // #if !SANITIZER_FUCHSIA
diff --git a/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_interface_internal.h b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_interface_internal.h
index 25c0f94fe51f..8f2f77dad917 100644
--- a/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_interface_internal.h
+++ b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_interface_internal.h
@@ -77,6 +77,32 @@ SANITIZER_INTERFACE_ATTRIBUTE
void __hwasan_load16_noabort(uptr);
SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_loadN_match_all(uptr, uptr, u8);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_load1_match_all(uptr, u8);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_load2_match_all(uptr, u8);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_load4_match_all(uptr, u8);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_load8_match_all(uptr, u8);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_load16_match_all(uptr, u8);
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_loadN_match_all_noabort(uptr, uptr, u8);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_load1_match_all_noabort(uptr, u8);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_load2_match_all_noabort(uptr, u8);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_load4_match_all_noabort(uptr, u8);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_load8_match_all_noabort(uptr, u8);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_load16_match_all_noabort(uptr, u8);
+
+SANITIZER_INTERFACE_ATTRIBUTE
void __hwasan_storeN(uptr, uptr);
SANITIZER_INTERFACE_ATTRIBUTE
void __hwasan_store1(uptr);
@@ -103,12 +129,41 @@ SANITIZER_INTERFACE_ATTRIBUTE
void __hwasan_store16_noabort(uptr);
SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_storeN_match_all(uptr, uptr, u8);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_store1_match_all(uptr, u8);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_store2_match_all(uptr, u8);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_store4_match_all(uptr, u8);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_store8_match_all(uptr, u8);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_store16_match_all(uptr, u8);
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_storeN_match_all_noabort(uptr, uptr, u8);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_store1_match_all_noabort(uptr, u8);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_store2_match_all_noabort(uptr, u8);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_store4_match_all_noabort(uptr, u8);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_store8_match_all_noabort(uptr, u8);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_store16_match_all_noabort(uptr, u8);
+
+SANITIZER_INTERFACE_ATTRIBUTE
void __hwasan_tag_memory(uptr p, u8 tag, uptr sz);
SANITIZER_INTERFACE_ATTRIBUTE
uptr __hwasan_tag_pointer(uptr p, u8 tag);
SANITIZER_INTERFACE_ATTRIBUTE
+u8 __hwasan_get_tag_from_pointer(uptr p);
+
+SANITIZER_INTERFACE_ATTRIBUTE
void __hwasan_tag_mismatch(uptr addr, u8 ts);
SANITIZER_INTERFACE_ATTRIBUTE
@@ -168,60 +223,27 @@ void __hwasan_thread_exit();
SANITIZER_INTERFACE_ATTRIBUTE
void __hwasan_print_memory_usage();
+// The compiler will generate this when
+// `-hwasan-record-stack-history-with-calls` is added as a flag, which will add
+// frame record information to the stack ring buffer. This is an alternative to
+// the compiler emitting instructions in the prologue for doing the same thing
+// by accessing the ring buffer directly.
SANITIZER_INTERFACE_ATTRIBUTE
-int __sanitizer_posix_memalign(void **memptr, uptr alignment, uptr size);
-
-SANITIZER_INTERFACE_ATTRIBUTE
-void * __sanitizer_memalign(uptr alignment, uptr size);
-
-SANITIZER_INTERFACE_ATTRIBUTE
-void * __sanitizer_aligned_alloc(uptr alignment, uptr size);
-
-SANITIZER_INTERFACE_ATTRIBUTE
-void * __sanitizer___libc_memalign(uptr alignment, uptr size);
-
-SANITIZER_INTERFACE_ATTRIBUTE
-void * __sanitizer_valloc(uptr size);
-
-SANITIZER_INTERFACE_ATTRIBUTE
-void * __sanitizer_pvalloc(uptr size);
-
-SANITIZER_INTERFACE_ATTRIBUTE
-void __sanitizer_free(void *ptr);
-
-SANITIZER_INTERFACE_ATTRIBUTE
-void __sanitizer_cfree(void *ptr);
-
-SANITIZER_INTERFACE_ATTRIBUTE
-uptr __sanitizer_malloc_usable_size(const void *ptr);
+void __hwasan_add_frame_record(u64 frame_record_info);
SANITIZER_INTERFACE_ATTRIBUTE
-__hwasan::__sanitizer_struct_mallinfo __sanitizer_mallinfo();
-
-SANITIZER_INTERFACE_ATTRIBUTE
-int __sanitizer_mallopt(int cmd, int value);
-
-SANITIZER_INTERFACE_ATTRIBUTE
-void __sanitizer_malloc_stats(void);
-
-SANITIZER_INTERFACE_ATTRIBUTE
-void * __sanitizer_calloc(uptr nmemb, uptr size);
-
-SANITIZER_INTERFACE_ATTRIBUTE
-void * __sanitizer_realloc(void *ptr, uptr size);
-
+void *__hwasan_memcpy(void *dst, const void *src, uptr size);
SANITIZER_INTERFACE_ATTRIBUTE
-void * __sanitizer_reallocarray(void *ptr, uptr nmemb, uptr size);
-
+void *__hwasan_memset(void *s, int c, uptr n);
SANITIZER_INTERFACE_ATTRIBUTE
-void * __sanitizer_malloc(uptr size);
+void *__hwasan_memmove(void *dest, const void *src, uptr n);
SANITIZER_INTERFACE_ATTRIBUTE
-void *__hwasan_memcpy(void *dst, const void *src, uptr size);
+void *__hwasan_memcpy_match_all(void *dst, const void *src, uptr size, u8);
SANITIZER_INTERFACE_ATTRIBUTE
-void *__hwasan_memset(void *s, int c, uptr n);
+void *__hwasan_memset_match_all(void *s, int c, uptr n, u8);
SANITIZER_INTERFACE_ATTRIBUTE
-void *__hwasan_memmove(void *dest, const void *src, uptr n);
+void *__hwasan_memmove_match_all(void *dest, const void *src, uptr n, u8);
SANITIZER_INTERFACE_ATTRIBUTE
void __hwasan_set_error_report_callback(void (*callback)(const char *));
diff --git a/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_linux.cpp b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_linux.cpp
index e22723529f44..e6aa60b324fa 100644
--- a/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_linux.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_linux.cpp
@@ -15,30 +15,30 @@
#include "sanitizer_common/sanitizer_platform.h"
#if SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD
-#include "hwasan.h"
-#include "hwasan_dynamic_shadow.h"
-#include "hwasan_interface_internal.h"
-#include "hwasan_mapping.h"
-#include "hwasan_report.h"
-#include "hwasan_thread.h"
-#include "hwasan_thread_list.h"
-
-#include <dlfcn.h>
-#include <elf.h>
-#include <link.h>
-#include <pthread.h>
-#include <signal.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <sys/resource.h>
-#include <sys/time.h>
-#include <unistd.h>
-#include <unwind.h>
-#include <sys/prctl.h>
-#include <errno.h>
-
-#include "sanitizer_common/sanitizer_common.h"
-#include "sanitizer_common/sanitizer_procmaps.h"
+# include <dlfcn.h>
+# include <elf.h>
+# include <errno.h>
+# include <link.h>
+# include <pthread.h>
+# include <signal.h>
+# include <stdio.h>
+# include <stdlib.h>
+# include <sys/prctl.h>
+# include <sys/resource.h>
+# include <sys/time.h>
+# include <unistd.h>
+# include <unwind.h>
+
+# include "hwasan.h"
+# include "hwasan_dynamic_shadow.h"
+# include "hwasan_interface_internal.h"
+# include "hwasan_mapping.h"
+# include "hwasan_report.h"
+# include "hwasan_thread.h"
+# include "hwasan_thread_list.h"
+# include "sanitizer_common/sanitizer_common.h"
+# include "sanitizer_common/sanitizer_procmaps.h"
+# include "sanitizer_common/sanitizer_stackdepot.h"
// Configurations of HWASAN_WITH_INTERCEPTORS and SANITIZER_ANDROID.
//
@@ -50,10 +50,10 @@
// Tested with check-hwasan on x86_64-linux.
// HWASAN_WITH_INTERCEPTORS=ON, SANITIZER_ANDROID=ON
// Tested with check-hwasan on aarch64-linux-android.
-#if !SANITIZER_ANDROID
+# if !SANITIZER_ANDROID
SANITIZER_INTERFACE_ATTRIBUTE
THREADLOCAL uptr __hwasan_tls;
-#endif
+# endif
namespace __hwasan {
@@ -106,19 +106,92 @@ static uptr GetHighMemEnd() {
}
static void InitializeShadowBaseAddress(uptr shadow_size_bytes) {
- __hwasan_shadow_memory_dynamic_address =
- FindDynamicShadowStart(shadow_size_bytes);
+ if (flags()->fixed_shadow_base != (uptr)-1) {
+ __hwasan_shadow_memory_dynamic_address = flags()->fixed_shadow_base;
+ } else {
+ __hwasan_shadow_memory_dynamic_address =
+ FindDynamicShadowStart(shadow_size_bytes);
+ }
+}
+
+static void MaybeDieIfNoTaggingAbi(const char *message) {
+ if (!flags()->fail_without_syscall_abi)
+ return;
+ Printf("FATAL: %s\n", message);
+ Die();
+}
+
+# define PR_SET_TAGGED_ADDR_CTRL 55
+# define PR_GET_TAGGED_ADDR_CTRL 56
+# define PR_TAGGED_ADDR_ENABLE (1UL << 0)
+# define ARCH_GET_UNTAG_MASK 0x4001
+# define ARCH_ENABLE_TAGGED_ADDR 0x4002
+# define ARCH_GET_MAX_TAG_BITS 0x4003
+
+static bool CanUseTaggingAbi() {
+# if defined(__x86_64__)
+ unsigned long num_bits = 0;
+ // Check for x86 LAM support. This API is based on a currently unsubmitted
+ // patch to the Linux kernel (as of August 2022) and is thus subject to
+ // change. The patch is here:
+ // https://lore.kernel.org/all/20220815041803.17954-1-kirill.shutemov@linux.intel.com/
+ //
+ // arch_prctl(ARCH_GET_MAX_TAG_BITS, &bits) returns the maximum number of tag
+ // bits the user can request, or zero if LAM is not supported by the hardware.
+ if (internal_iserror(internal_arch_prctl(ARCH_GET_MAX_TAG_BITS,
+ reinterpret_cast<uptr>(&num_bits))))
+ return false;
+ // The platform must provide enough bits for HWASan tags.
+ if (num_bits < kTagBits)
+ return false;
+ return true;
+# else
+ // Check for ARM TBI support.
+ return !internal_iserror(internal_prctl(PR_GET_TAGGED_ADDR_CTRL, 0, 0, 0, 0));
+# endif // __x86_64__
+}
+
+static bool EnableTaggingAbi() {
+# if defined(__x86_64__)
+ // Enable x86 LAM tagging for the process.
+ //
+ // arch_prctl(ARCH_ENABLE_TAGGED_ADDR, bits) enables tagging if the number of
+ // tag bits requested by the user does not exceed that provided by the system.
+ // arch_prctl(ARCH_GET_UNTAG_MASK, &mask) returns the mask of significant
+ // address bits. It is ~0ULL if either LAM is disabled for the process or LAM
+ // is not supported by the hardware.
+ if (internal_iserror(internal_arch_prctl(ARCH_ENABLE_TAGGED_ADDR, kTagBits)))
+ return false;
+ unsigned long mask = 0;
+ // Make sure the tag bits are where we expect them to be.
+ if (internal_iserror(internal_arch_prctl(ARCH_GET_UNTAG_MASK,
+ reinterpret_cast<uptr>(&mask))))
+ return false;
+ // @mask has ones for non-tag bits, whereas @kAddressTagMask has ones for tag
+ // bits. Therefore these masks must not overlap.
+ if (mask & kAddressTagMask)
+ return false;
+ return true;
+# else
+ // Enable ARM TBI tagging for the process. If for some reason tagging is not
+ // supported, prctl(PR_SET_TAGGED_ADDR_CTRL, PR_TAGGED_ADDR_ENABLE) returns
+ // -EINVAL.
+ if (internal_iserror(internal_prctl(PR_SET_TAGGED_ADDR_CTRL,
+ PR_TAGGED_ADDR_ENABLE, 0, 0, 0)))
+ return false;
+ // Ensure that TBI is enabled.
+ if (internal_prctl(PR_GET_TAGGED_ADDR_CTRL, 0, 0, 0, 0) !=
+ PR_TAGGED_ADDR_ENABLE)
+ return false;
+ return true;
+# endif // __x86_64__
}
void InitializeOsSupport() {
-#define PR_SET_TAGGED_ADDR_CTRL 55
-#define PR_GET_TAGGED_ADDR_CTRL 56
-#define PR_TAGGED_ADDR_ENABLE (1UL << 0)
// Check we're running on a kernel that can use the tagged address ABI.
- int local_errno = 0;
- if (internal_iserror(internal_prctl(PR_GET_TAGGED_ADDR_CTRL, 0, 0, 0, 0),
- &local_errno) &&
- local_errno == EINVAL) {
+ bool has_abi = CanUseTaggingAbi();
+
+ if (!has_abi) {
# if SANITIZER_ANDROID || defined(HWASAN_ALIASING_MODE)
// Some older Android kernels have the tagged pointer ABI on
// unconditionally, and hence don't have the tagged-addr prctl while still
@@ -127,46 +200,22 @@ void InitializeOsSupport() {
// case.
return;
# else
- if (flags()->fail_without_syscall_abi) {
- Printf(
- "FATAL: "
- "HWAddressSanitizer requires a kernel with tagged address ABI.\n");
- Die();
- }
+ MaybeDieIfNoTaggingAbi(
+ "HWAddressSanitizer requires a kernel with tagged address ABI.");
# endif
}
- // Turn on the tagged address ABI.
- if ((internal_iserror(internal_prctl(PR_SET_TAGGED_ADDR_CTRL,
- PR_TAGGED_ADDR_ENABLE, 0, 0, 0)) ||
- !internal_prctl(PR_GET_TAGGED_ADDR_CTRL, 0, 0, 0, 0))) {
-# if defined(__x86_64__) && !defined(HWASAN_ALIASING_MODE)
- // Try the new prctl API for Intel LAM. The API is based on a currently
- // unsubmitted patch to the Linux kernel (as of May 2021) and is thus
- // subject to change. Patch is here:
- // https://lore.kernel.org/linux-mm/20210205151631.43511-12-kirill.shutemov@linux.intel.com/
- int tag_bits = kTagBits;
- int tag_shift = kAddressTagShift;
- if (!internal_iserror(
- internal_prctl(PR_SET_TAGGED_ADDR_CTRL, PR_TAGGED_ADDR_ENABLE,
- reinterpret_cast<unsigned long>(&tag_bits),
- reinterpret_cast<unsigned long>(&tag_shift), 0))) {
- CHECK_EQ(tag_bits, kTagBits);
- CHECK_EQ(tag_shift, kAddressTagShift);
- return;
- }
-# endif // defined(__x86_64__) && !defined(HWASAN_ALIASING_MODE)
- if (flags()->fail_without_syscall_abi) {
- Printf(
- "FATAL: HWAddressSanitizer failed to enable tagged address syscall "
- "ABI.\nSuggest check `sysctl abi.tagged_addr_disabled` "
- "configuration.\n");
- Die();
- }
- }
-#undef PR_SET_TAGGED_ADDR_CTRL
-#undef PR_GET_TAGGED_ADDR_CTRL
-#undef PR_TAGGED_ADDR_ENABLE
+ if (EnableTaggingAbi())
+ return;
+
+# if SANITIZER_ANDROID
+ MaybeDieIfNoTaggingAbi(
+ "HWAddressSanitizer failed to enable tagged address syscall ABI.\n"
+ "Check the `sysctl abi.tagged_addr_disabled` configuration.");
+# else
+ MaybeDieIfNoTaggingAbi(
+ "HWAddressSanitizer failed to enable tagged address syscall ABI.\n");
+# endif
}
bool InitShadow() {
@@ -238,31 +287,18 @@ void InitThreads() {
bool MemIsApp(uptr p) {
// Memory outside the alias range has non-zero tags.
# if !defined(HWASAN_ALIASING_MODE)
- CHECK(GetTagFromPointer(p) == 0);
+ CHECK_EQ(GetTagFromPointer(p), 0);
# endif
- return p >= kHighMemStart || (p >= kLowMemStart && p <= kLowMemEnd);
+ return (p >= kHighMemStart && p <= kHighMemEnd) ||
+ (p >= kLowMemStart && p <= kLowMemEnd);
}
-void InstallAtExitHandler() {
- atexit(HwasanAtExit);
-}
+void InstallAtExitHandler() { atexit(HwasanAtExit); }
// ---------------------- TSD ---------------- {{{1
-extern "C" void __hwasan_thread_enter() {
- hwasanThreadList().CreateCurrentThread()->InitRandomState();
-}
-
-extern "C" void __hwasan_thread_exit() {
- Thread *t = GetCurrentThread();
- // Make sure that signal handler can not see a stale current thread pointer.
- atomic_signal_fence(memory_order_seq_cst);
- if (t)
- hwasanThreadList().ReleaseThread(t);
-}
-
-#if HWASAN_WITH_INTERCEPTORS
+# if HWASAN_WITH_INTERCEPTORS
static pthread_key_t tsd_key;
static bool tsd_key_inited = false;
@@ -286,22 +322,18 @@ void HwasanTSDInit() {
tsd_key_inited = true;
CHECK_EQ(0, pthread_key_create(&tsd_key, HwasanTSDDtor));
}
-#else
+# else
void HwasanTSDInit() {}
void HwasanTSDThreadInit() {}
-#endif
+# endif
-#if SANITIZER_ANDROID
-uptr *GetCurrentThreadLongPtr() {
- return (uptr *)get_android_tls_ptr();
-}
-#else
-uptr *GetCurrentThreadLongPtr() {
- return &__hwasan_tls;
-}
-#endif
+# if SANITIZER_ANDROID
+uptr *GetCurrentThreadLongPtr() { return (uptr *)get_android_tls_ptr(); }
+# else
+uptr *GetCurrentThreadLongPtr() { return &__hwasan_tls; }
+# endif
-#if SANITIZER_ANDROID
+# if SANITIZER_ANDROID
void AndroidTestTlsSlot() {
uptr kMagicValue = 0x010203040A0B0C0D;
uptr *tls_ptr = GetCurrentThreadLongPtr();
@@ -316,9 +348,9 @@ void AndroidTestTlsSlot() {
}
*tls_ptr = old_value;
}
-#else
+# else
void AndroidTestTlsSlot() {}
-#endif
+# endif
static AccessInfo GetAccessInfo(siginfo_t *info, ucontext_t *uc) {
// Access type is passed in a platform dependent way (see below) and encoded
@@ -326,32 +358,32 @@ static AccessInfo GetAccessInfo(siginfo_t *info, ucontext_t *uc) {
// recoverable. Valid values of Y are 0 to 4, which are interpreted as
// log2(access_size), and 0xF, which means that access size is passed via
// platform dependent register (see below).
-#if defined(__aarch64__)
+# if defined(__aarch64__)
// Access type is encoded in BRK immediate as 0x900 + 0xXY. For Y == 0xF,
// access size is stored in X1 register. Access address is always in X0
// register.
uptr pc = (uptr)info->si_addr;
const unsigned code = ((*(u32 *)pc) >> 5) & 0xffff;
if ((code & 0xff00) != 0x900)
- return AccessInfo{}; // Not ours.
+ return AccessInfo{}; // Not ours.
const bool is_store = code & 0x10;
const bool recover = code & 0x20;
const uptr addr = uc->uc_mcontext.regs[0];
const unsigned size_log = code & 0xf;
if (size_log > 4 && size_log != 0xf)
- return AccessInfo{}; // Not ours.
+ return AccessInfo{}; // Not ours.
const uptr size = size_log == 0xf ? uc->uc_mcontext.regs[1] : 1U << size_log;
-#elif defined(__x86_64__)
+# elif defined(__x86_64__)
// Access type is encoded in the instruction following INT3 as
// NOP DWORD ptr [EAX + 0x40 + 0xXY]. For Y == 0xF, access size is stored in
// RSI register. Access address is always in RDI register.
uptr pc = (uptr)uc->uc_mcontext.gregs[REG_RIP];
- uint8_t *nop = (uint8_t*)pc;
- if (*nop != 0x0f || *(nop + 1) != 0x1f || *(nop + 2) != 0x40 ||
+ uint8_t *nop = (uint8_t *)pc;
+ if (*nop != 0x0f || *(nop + 1) != 0x1f || *(nop + 2) != 0x40 ||
*(nop + 3) < 0x40)
- return AccessInfo{}; // Not ours.
+ return AccessInfo{}; // Not ours.
const unsigned code = *(nop + 3);
const bool is_store = code & 0x10;
@@ -359,13 +391,54 @@ static AccessInfo GetAccessInfo(siginfo_t *info, ucontext_t *uc) {
const uptr addr = uc->uc_mcontext.gregs[REG_RDI];
const unsigned size_log = code & 0xf;
if (size_log > 4 && size_log != 0xf)
- return AccessInfo{}; // Not ours.
+ return AccessInfo{}; // Not ours.
const uptr size =
size_log == 0xf ? uc->uc_mcontext.gregs[REG_RSI] : 1U << size_log;
-#else
-# error Unsupported architecture
-#endif
+# elif SANITIZER_RISCV64
+ // Access type is encoded in the instruction following EBREAK as
+ // ADDI x0, x0, [0x40 + 0xXY]. For Y == 0xF, access size is stored in
+ // X11 register. Access address is always in X10 register.
+ uptr pc = (uptr)uc->uc_mcontext.__gregs[REG_PC];
+ uint8_t byte1 = *((u8 *)(pc + 0));
+ uint8_t byte2 = *((u8 *)(pc + 1));
+ uint8_t byte3 = *((u8 *)(pc + 2));
+ uint8_t byte4 = *((u8 *)(pc + 3));
+ uint32_t ebreak = (byte1 | (byte2 << 8) | (byte3 << 16) | (byte4 << 24));
+ bool isFaultShort = false;
+ bool isEbreak = (ebreak == 0x100073);
+ bool isShortEbreak = false;
+# if defined(__riscv_compressed)
+ isFaultShort = ((ebreak & 0x3) != 0x3);
+ isShortEbreak = ((ebreak & 0xffff) == 0x9002);
+# endif
+ // faulted insn is not ebreak, not our case
+ if (!(isEbreak || isShortEbreak))
+ return AccessInfo{};
+ // advance pc to point after ebreak and reconstruct addi instruction
+ pc += isFaultShort ? 2 : 4;
+ byte1 = *((u8 *)(pc + 0));
+ byte2 = *((u8 *)(pc + 1));
+ byte3 = *((u8 *)(pc + 2));
+ byte4 = *((u8 *)(pc + 3));
+ // reconstruct instruction
+ uint32_t instr = (byte1 | (byte2 << 8) | (byte3 << 16) | (byte4 << 24));
+ // check if this is really 32 bit instruction
+ // code is encoded in top 12 bits, since instruction is supposed to be with
+ // imm
+ const unsigned code = (instr >> 20) & 0xffff;
+ const uptr addr = uc->uc_mcontext.__gregs[10];
+ const bool is_store = code & 0x10;
+ const bool recover = code & 0x20;
+ const unsigned size_log = code & 0xf;
+ if (size_log > 4 && size_log != 0xf)
+ return AccessInfo{}; // Not our case
+ const uptr size =
+ size_log == 0xf ? uc->uc_mcontext.__gregs[11] : 1U << size_log;
+
+# else
+# error Unsupported architecture
+# endif
return AccessInfo{addr, size, is_store, !is_store, recover};
}
@@ -378,12 +451,25 @@ static bool HwasanOnSIGTRAP(int signo, siginfo_t *info, ucontext_t *uc) {
SignalContext sig{info, uc};
HandleTagMismatch(ai, StackTrace::GetNextInstructionPc(sig.pc), sig.bp, uc);
-#if defined(__aarch64__)
+# if defined(__aarch64__)
uc->uc_mcontext.pc += 4;
-#elif defined(__x86_64__)
-#else
-# error Unsupported architecture
-#endif
+# elif defined(__x86_64__)
+# elif SANITIZER_RISCV64
+ // pc points to EBREAK which is 2 bytes long
+ uint8_t *exception_source = (uint8_t *)(uc->uc_mcontext.__gregs[REG_PC]);
+ uint8_t byte1 = (uint8_t)(*(exception_source + 0));
+ uint8_t byte2 = (uint8_t)(*(exception_source + 1));
+ uint8_t byte3 = (uint8_t)(*(exception_source + 2));
+ uint8_t byte4 = (uint8_t)(*(exception_source + 3));
+ uint32_t faulted = (byte1 | (byte2 << 8) | (byte3 << 16) | (byte4 << 24));
+ bool isFaultShort = false;
+# if defined(__riscv_compressed)
+ isFaultShort = ((faulted & 0x3) != 0x3);
+# endif
+ uc->uc_mcontext.__gregs[REG_PC] += isFaultShort ? 2 : 4;
+# else
+# error Unsupported architecture
+# endif
return true;
}
@@ -396,7 +482,7 @@ static void OnStackUnwind(const SignalContext &sig, const void *,
void HwasanOnDeadlySignal(int signo, void *info, void *context) {
// Probably a tag mismatch.
if (signo == SIGTRAP)
- if (HwasanOnSIGTRAP(signo, (siginfo_t *)info, (ucontext_t*)context))
+ if (HwasanOnSIGTRAP(signo, (siginfo_t *)info, (ucontext_t *)context))
return;
HandleDeadlySignal(info, context, GetTid(), &OnStackUnwind, nullptr);
@@ -435,6 +521,66 @@ uptr TagMemoryAligned(uptr p, uptr size, tag_t tag) {
return AddTagToPointer(p, tag);
}
-} // namespace __hwasan
+static void BeforeFork() {
+ if (CAN_SANITIZE_LEAKS) {
+ __lsan::LockGlobal();
+ }
+ // `_lsan` functions defined regardless of `CAN_SANITIZE_LEAKS` and lock the
+ // stuff we need.
+ __lsan::LockThreads();
+ __lsan::LockAllocator();
+ StackDepotLockBeforeFork();
+}
+
+static void AfterFork(bool fork_child) {
+ StackDepotUnlockAfterFork(fork_child);
+ // `_lsan` functions defined regardless of `CAN_SANITIZE_LEAKS` and unlock
+ // the stuff we need.
+ __lsan::UnlockAllocator();
+ __lsan::UnlockThreads();
+ if (CAN_SANITIZE_LEAKS) {
+ __lsan::UnlockGlobal();
+ }
+}
+
+void HwasanInstallAtForkHandler() {
+ pthread_atfork(
+ &BeforeFork, []() { AfterFork(/* fork_child= */ false); },
+ []() { AfterFork(/* fork_child= */ true); });
+}
+
+void InstallAtExitCheckLeaks() {
+ if (CAN_SANITIZE_LEAKS) {
+ if (common_flags()->detect_leaks && common_flags()->leak_check_at_exit) {
+ if (flags()->halt_on_error)
+ Atexit(__lsan::DoLeakCheck);
+ else
+ Atexit(__lsan::DoRecoverableLeakCheckVoid);
+ }
+ }
+}
+
+} // namespace __hwasan
+
+using namespace __hwasan;
+
+extern "C" void __hwasan_thread_enter() {
+ hwasanThreadList().CreateCurrentThread()->EnsureRandomStateInited();
+}
+
+extern "C" void __hwasan_thread_exit() {
+ Thread *t = GetCurrentThread();
+ // Make sure that signal handler can not see a stale current thread pointer.
+ atomic_signal_fence(memory_order_seq_cst);
+ if (t) {
+ // Block async signals on the thread as the handler can be instrumented.
+ // After this point instrumented code can't access essential data from TLS
+ // and will crash.
+ // Bionic already calls __hwasan_thread_exit with blocked signals.
+ if (SANITIZER_GLIBC)
+ BlockSignals();
+ hwasanThreadList().ReleaseThread(t);
+ }
+}
-#endif // SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD
+#endif // SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD
diff --git a/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_memintrinsics.cpp b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_memintrinsics.cpp
index fab017aae60b..16d6f9085924 100644
--- a/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_memintrinsics.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_memintrinsics.cpp
@@ -40,5 +40,35 @@ void *__hwasan_memmove(void *to, const void *from, uptr size) {
reinterpret_cast<uptr>(to), size);
CheckAddressSized<ErrorAction::Recover, AccessType::Load>(
reinterpret_cast<uptr>(from), size);
- return memmove(UntagPtr(to), UntagPtr(from), size);
+ return memmove(to, from, size);
+}
+
+void *__hwasan_memset_match_all(void *block, int c, uptr size,
+ u8 match_all_tag) {
+ if (GetTagFromPointer(reinterpret_cast<uptr>(block)) != match_all_tag)
+ CheckAddressSized<ErrorAction::Recover, AccessType::Store>(
+ reinterpret_cast<uptr>(block), size);
+ return memset(block, c, size);
+}
+
+void *__hwasan_memcpy_match_all(void *to, const void *from, uptr size,
+ u8 match_all_tag) {
+ if (GetTagFromPointer(reinterpret_cast<uptr>(to)) != match_all_tag)
+ CheckAddressSized<ErrorAction::Recover, AccessType::Store>(
+ reinterpret_cast<uptr>(to), size);
+ if (GetTagFromPointer(reinterpret_cast<uptr>(from)) != match_all_tag)
+ CheckAddressSized<ErrorAction::Recover, AccessType::Load>(
+ reinterpret_cast<uptr>(from), size);
+ return memcpy(to, from, size);
+}
+
+void *__hwasan_memmove_match_all(void *to, const void *from, uptr size,
+ u8 match_all_tag) {
+ if (GetTagFromPointer(reinterpret_cast<uptr>(to)) != match_all_tag)
+ CheckAddressSized<ErrorAction::Recover, AccessType::Store>(
+ reinterpret_cast<uptr>(to), size);
+ if (GetTagFromPointer(reinterpret_cast<uptr>(from)) != match_all_tag)
+ CheckAddressSized<ErrorAction::Recover, AccessType::Load>(
+ reinterpret_cast<uptr>(from), size);
+ return memmove(to, from, size);
}
diff --git a/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_new_delete.cpp b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_new_delete.cpp
index 4e057a651e1d..f0fd3726ef1b 100644
--- a/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_new_delete.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_new_delete.cpp
@@ -22,21 +22,23 @@
#if HWASAN_REPLACE_OPERATORS_NEW_AND_DELETE
// TODO(alekseys): throw std::bad_alloc instead of dying on OOM.
-#define OPERATOR_NEW_BODY(nothrow) \
- GET_MALLOC_STACK_TRACE; \
- void *res = hwasan_malloc(size, &stack);\
- if (!nothrow && UNLIKELY(!res)) ReportOutOfMemory(size, &stack);\
- return res
-#define OPERATOR_NEW_ALIGN_BODY(nothrow) \
- GET_MALLOC_STACK_TRACE; \
- void *res = hwasan_aligned_alloc(static_cast<uptr>(align), size, &stack); \
- if (!nothrow && UNLIKELY(!res)) \
- ReportOutOfMemory(size, &stack); \
- return res
-
-#define OPERATOR_DELETE_BODY \
- GET_MALLOC_STACK_TRACE; \
- if (ptr) hwasan_free(ptr, &stack)
+# define OPERATOR_NEW_BODY(nothrow) \
+ GET_MALLOC_STACK_TRACE; \
+ void *res = hwasan_malloc(size, &stack); \
+ if (!nothrow && UNLIKELY(!res)) \
+ ReportOutOfMemory(size, &stack); \
+ return res
+# define OPERATOR_NEW_ALIGN_BODY(nothrow) \
+ GET_MALLOC_STACK_TRACE; \
+ void *res = hwasan_memalign(static_cast<uptr>(align), size, &stack); \
+ if (!nothrow && UNLIKELY(!res)) \
+ ReportOutOfMemory(size, &stack); \
+ return res
+
+# define OPERATOR_DELETE_BODY \
+ GET_MALLOC_STACK_TRACE; \
+ if (ptr) \
+ hwasan_free(ptr, &stack)
#elif defined(__ANDROID__)
@@ -44,8 +46,8 @@
// since we previously released a runtime that intercepted these functions,
// removing the interceptors would break ABI. Therefore we simply forward to
// malloc and free.
-#define OPERATOR_NEW_BODY(nothrow) return malloc(size)
-#define OPERATOR_DELETE_BODY free(ptr)
+# define OPERATOR_NEW_BODY(nothrow) return malloc(size)
+# define OPERATOR_DELETE_BODY free(ptr)
#endif
@@ -55,26 +57,27 @@ using namespace __hwasan;
// Fake std::nothrow_t to avoid including <new>.
namespace std {
- struct nothrow_t {};
+struct nothrow_t {};
} // namespace std
-
-
-INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
-void *operator new(size_t size) { OPERATOR_NEW_BODY(false /*nothrow*/); }
-INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
-void *operator new[](size_t size) { OPERATOR_NEW_BODY(false /*nothrow*/); }
-INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
-void *operator new(size_t size, std::nothrow_t const&) {
+INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void *operator new(size_t size) {
+ OPERATOR_NEW_BODY(false /*nothrow*/);
+}
+INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void *operator new[](
+ size_t size) {
+ OPERATOR_NEW_BODY(false /*nothrow*/);
+}
+INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void *operator new(
+ size_t size, std::nothrow_t const &) {
OPERATOR_NEW_BODY(true /*nothrow*/);
}
-INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
-void *operator new[](size_t size, std::nothrow_t const&) {
+INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void *operator new[](
+ size_t size, std::nothrow_t const &) {
OPERATOR_NEW_BODY(true /*nothrow*/);
}
-INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void operator delete(void *ptr)
- NOEXCEPT {
+INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void operator delete(
+ void *ptr) NOEXCEPT {
OPERATOR_DELETE_BODY;
}
INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void operator delete[](
@@ -89,6 +92,14 @@ INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void operator delete[](
void *ptr, std::nothrow_t const &) {
OPERATOR_DELETE_BODY;
}
+INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void operator delete(
+ void *ptr, size_t) NOEXCEPT {
+ OPERATOR_DELETE_BODY;
+}
+INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void operator delete[](
+ void *ptr, size_t) NOEXCEPT {
+ OPERATOR_DELETE_BODY;
+}
#endif // OPERATOR_NEW_BODY
@@ -131,5 +142,21 @@ INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void operator delete[](
void *ptr, std::align_val_t, std::nothrow_t const &) NOEXCEPT {
OPERATOR_DELETE_BODY;
}
+INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void operator delete(
+ void *ptr, size_t, std::align_val_t) NOEXCEPT {
+ OPERATOR_DELETE_BODY;
+}
+INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void operator delete[](
+ void *ptr, size_t, std::align_val_t) NOEXCEPT {
+ OPERATOR_DELETE_BODY;
+}
+INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void operator delete(
+ void *ptr, size_t, std::align_val_t, std::nothrow_t const &) NOEXCEPT {
+ OPERATOR_DELETE_BODY;
+}
+INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void operator delete[](
+ void *ptr, size_t, std::align_val_t, std::nothrow_t const &) NOEXCEPT {
+ OPERATOR_DELETE_BODY;
+}
#endif // OPERATOR_NEW_ALIGN_BODY
diff --git a/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_platform_interceptors.h b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_platform_interceptors.h
new file mode 100644
index 000000000000..d92b51052194
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_platform_interceptors.h
@@ -0,0 +1,1001 @@
+#ifndef HWASAN_PLATFORM_INTERCEPTORS_H
+#define HWASAN_PLATFORM_INTERCEPTORS_H
+
+#include "sanitizer_common/sanitizer_platform_interceptors.h"
+
+// This file cancels out most of the sanitizer_common interception, thus
+// allowing HWASan to selectively reuse some of the interceptors.
+//
+// To re-enable sanitizer_common's interception of a function, comment out
+// the corresponding '#undef SANITIZER_INTERCEPT_fn' and
+// '#define SANITIZER_INTERCEPT_fn 0':
+// - We prefer to comment out rather than delete the lines, to show that
+// it is deliberate, rather than an accidental omission.
+// - We do not use '#define SANITIZE_INTERCEPT_fn 1', because
+// interception is usually conditional (e.g., based on SI_POSIX); we let
+// the condition in sanitizers_platform_interceptors.h take effect.
+
+// Originally generated with:
+// cat ../sanitizer_common/sanitizer_platform_interceptors.h | grep '^#define SANITIZER_INTERCEPT' | cut -d ' ' -f 2 | while read x; do echo "#undef $x"; echo "#define $x 0"; echo; done
+#undef SANITIZER_INTERCEPT_STRLEN
+#define SANITIZER_INTERCEPT_STRLEN 0
+
+#undef SANITIZER_INTERCEPT_STRNLEN
+#define SANITIZER_INTERCEPT_STRNLEN 0
+
+#undef SANITIZER_INTERCEPT_STRCMP
+#define SANITIZER_INTERCEPT_STRCMP 0
+
+#undef SANITIZER_INTERCEPT_STRSTR
+#define SANITIZER_INTERCEPT_STRSTR 0
+
+#undef SANITIZER_INTERCEPT_STRCASESTR
+#define SANITIZER_INTERCEPT_STRCASESTR 0
+
+#undef SANITIZER_INTERCEPT_STRTOK
+#define SANITIZER_INTERCEPT_STRTOK 0
+
+#undef SANITIZER_INTERCEPT_STRCHR
+#define SANITIZER_INTERCEPT_STRCHR 0
+
+#undef SANITIZER_INTERCEPT_STRCHRNUL
+#define SANITIZER_INTERCEPT_STRCHRNUL 0
+
+#undef SANITIZER_INTERCEPT_STRRCHR
+#define SANITIZER_INTERCEPT_STRRCHR 0
+
+#undef SANITIZER_INTERCEPT_STRSPN
+#define SANITIZER_INTERCEPT_STRSPN 0
+
+#undef SANITIZER_INTERCEPT_STRPBRK
+#define SANITIZER_INTERCEPT_STRPBRK 0
+
+#undef SANITIZER_INTERCEPT_TEXTDOMAIN
+#define SANITIZER_INTERCEPT_TEXTDOMAIN 0
+
+#undef SANITIZER_INTERCEPT_STRCASECMP
+#define SANITIZER_INTERCEPT_STRCASECMP 0
+
+// #undef SANITIZER_INTERCEPT_MEMSET
+// #define SANITIZER_INTERCEPT_MEMSET 0
+
+// #undef SANITIZER_INTERCEPT_MEMMOVE
+// #define SANITIZER_INTERCEPT_MEMMOVE 0
+
+// #undef SANITIZER_INTERCEPT_MEMCPY
+// #define SANITIZER_INTERCEPT_MEMCPY 0
+
+// #undef SANITIZER_INTERCEPT_MEMCMP
+// #define SANITIZER_INTERCEPT_MEMCMP 0
+
+// #undef SANITIZER_INTERCEPT_BCMP
+// #define SANITIZER_INTERCEPT_BCMP 0
+
+#undef SANITIZER_INTERCEPT_STRNDUP
+#define SANITIZER_INTERCEPT_STRNDUP 0
+
+#undef SANITIZER_INTERCEPT___STRNDUP
+#define SANITIZER_INTERCEPT___STRNDUP 0
+
+#undef SANITIZER_INTERCEPT_MEMMEM
+#define SANITIZER_INTERCEPT_MEMMEM 0
+
+#undef SANITIZER_INTERCEPT_MEMCHR
+#define SANITIZER_INTERCEPT_MEMCHR 0
+
+#undef SANITIZER_INTERCEPT_MEMRCHR
+#define SANITIZER_INTERCEPT_MEMRCHR 0
+
+#undef SANITIZER_INTERCEPT_READ
+#define SANITIZER_INTERCEPT_READ 0
+
+#undef SANITIZER_INTERCEPT_PREAD
+#define SANITIZER_INTERCEPT_PREAD 0
+
+#undef SANITIZER_INTERCEPT_WRITE
+#define SANITIZER_INTERCEPT_WRITE 0
+
+#undef SANITIZER_INTERCEPT_PWRITE
+#define SANITIZER_INTERCEPT_PWRITE 0
+
+#undef SANITIZER_INTERCEPT_FREAD
+#define SANITIZER_INTERCEPT_FREAD 0
+
+#undef SANITIZER_INTERCEPT_FWRITE
+#define SANITIZER_INTERCEPT_FWRITE 0
+
+#undef SANITIZER_INTERCEPT_FGETS
+#define SANITIZER_INTERCEPT_FGETS 0
+
+#undef SANITIZER_INTERCEPT_FPUTS
+#define SANITIZER_INTERCEPT_FPUTS 0
+
+#undef SANITIZER_INTERCEPT_PUTS
+#define SANITIZER_INTERCEPT_PUTS 0
+
+#undef SANITIZER_INTERCEPT_PREAD64
+#define SANITIZER_INTERCEPT_PREAD64 0
+
+#undef SANITIZER_INTERCEPT_PWRITE64
+#define SANITIZER_INTERCEPT_PWRITE64 0
+
+#undef SANITIZER_INTERCEPT_READV
+#define SANITIZER_INTERCEPT_READV 0
+
+#undef SANITIZER_INTERCEPT_WRITEV
+#define SANITIZER_INTERCEPT_WRITEV 0
+
+#undef SANITIZER_INTERCEPT_PREADV
+#define SANITIZER_INTERCEPT_PREADV 0
+
+#undef SANITIZER_INTERCEPT_PWRITEV
+#define SANITIZER_INTERCEPT_PWRITEV 0
+
+#undef SANITIZER_INTERCEPT_PREADV64
+#define SANITIZER_INTERCEPT_PREADV64 0
+
+#undef SANITIZER_INTERCEPT_PWRITEV64
+#define SANITIZER_INTERCEPT_PWRITEV64 0
+
+#undef SANITIZER_INTERCEPT_PRCTL
+#define SANITIZER_INTERCEPT_PRCTL 0
+
+#undef SANITIZER_INTERCEPT_LOCALTIME_AND_FRIENDS
+#define SANITIZER_INTERCEPT_LOCALTIME_AND_FRIENDS 0
+
+#undef SANITIZER_INTERCEPT_STRPTIME
+#define SANITIZER_INTERCEPT_STRPTIME 0
+
+#undef SANITIZER_INTERCEPT_SCANF
+#define SANITIZER_INTERCEPT_SCANF 0
+
+#undef SANITIZER_INTERCEPT_ISOC99_SCANF
+#define SANITIZER_INTERCEPT_ISOC99_SCANF 0
+
+#undef SANITIZER_INTERCEPT_PRINTF
+#define SANITIZER_INTERCEPT_PRINTF 0
+
+#undef SANITIZER_INTERCEPT_PRINTF_L
+#define SANITIZER_INTERCEPT_PRINTF_L 0
+
+#undef SANITIZER_INTERCEPT_ISOC99_PRINTF
+#define SANITIZER_INTERCEPT_ISOC99_PRINTF 0
+
+#undef SANITIZER_INTERCEPT___PRINTF_CHK
+#define SANITIZER_INTERCEPT___PRINTF_CHK 0
+
+#undef SANITIZER_INTERCEPT_FREXP
+#define SANITIZER_INTERCEPT_FREXP 0
+
+#undef SANITIZER_INTERCEPT_FREXPF_FREXPL
+#define SANITIZER_INTERCEPT_FREXPF_FREXPL 0
+
+#undef SANITIZER_INTERCEPT_GETPWNAM_AND_FRIENDS
+#define SANITIZER_INTERCEPT_GETPWNAM_AND_FRIENDS 0
+
+#undef SANITIZER_INTERCEPT_GETPWNAM_R_AND_FRIENDS
+#define SANITIZER_INTERCEPT_GETPWNAM_R_AND_FRIENDS 0
+
+#undef SANITIZER_INTERCEPT_GETPWENT
+#define SANITIZER_INTERCEPT_GETPWENT 0
+
+#undef SANITIZER_INTERCEPT_FGETGRENT_R
+#define SANITIZER_INTERCEPT_FGETGRENT_R 0
+
+#undef SANITIZER_INTERCEPT_FGETPWENT
+#define SANITIZER_INTERCEPT_FGETPWENT 0
+
+#undef SANITIZER_INTERCEPT_GETPWENT_R
+#define SANITIZER_INTERCEPT_GETPWENT_R 0
+
+#undef SANITIZER_INTERCEPT_FGETPWENT_R
+#define SANITIZER_INTERCEPT_FGETPWENT_R 0
+
+#undef SANITIZER_INTERCEPT_SETPWENT
+#define SANITIZER_INTERCEPT_SETPWENT 0
+
+#undef SANITIZER_INTERCEPT_CLOCK_GETTIME
+#define SANITIZER_INTERCEPT_CLOCK_GETTIME 0
+
+#undef SANITIZER_INTERCEPT_CLOCK_GETCPUCLOCKID
+#define SANITIZER_INTERCEPT_CLOCK_GETCPUCLOCKID 0
+
+#undef SANITIZER_INTERCEPT_GETITIMER
+#define SANITIZER_INTERCEPT_GETITIMER 0
+
+#undef SANITIZER_INTERCEPT_TIME
+#define SANITIZER_INTERCEPT_TIME 0
+
+#undef SANITIZER_INTERCEPT_GLOB
+#define SANITIZER_INTERCEPT_GLOB 0
+
+#undef SANITIZER_INTERCEPT_GLOB64
+#define SANITIZER_INTERCEPT_GLOB64 0
+
+#undef SANITIZER_INTERCEPT___B64_TO
+#define SANITIZER_INTERCEPT___B64_TO 0
+
+#undef SANITIZER_INTERCEPT_DN_COMP_EXPAND
+#define SANITIZER_INTERCEPT_DN_COMP_EXPAND 0
+
+#undef SANITIZER_INTERCEPT_POSIX_SPAWN
+#define SANITIZER_INTERCEPT_POSIX_SPAWN 0
+
+#undef SANITIZER_INTERCEPT_WAIT
+#define SANITIZER_INTERCEPT_WAIT 0
+
+#undef SANITIZER_INTERCEPT_INET
+#define SANITIZER_INTERCEPT_INET 0
+
+#undef SANITIZER_INTERCEPT_PTHREAD_GETSCHEDPARAM
+#define SANITIZER_INTERCEPT_PTHREAD_GETSCHEDPARAM 0
+
+#undef SANITIZER_INTERCEPT_GETADDRINFO
+#define SANITIZER_INTERCEPT_GETADDRINFO 0
+
+#undef SANITIZER_INTERCEPT_GETNAMEINFO
+#define SANITIZER_INTERCEPT_GETNAMEINFO 0
+
+#undef SANITIZER_INTERCEPT_GETSOCKNAME
+#define SANITIZER_INTERCEPT_GETSOCKNAME 0
+
+#undef SANITIZER_INTERCEPT_GETHOSTBYNAME
+#define SANITIZER_INTERCEPT_GETHOSTBYNAME 0
+
+#undef SANITIZER_INTERCEPT_GETHOSTBYNAME2
+#define SANITIZER_INTERCEPT_GETHOSTBYNAME2 0
+
+#undef SANITIZER_INTERCEPT_GETHOSTBYNAME_R
+#define SANITIZER_INTERCEPT_GETHOSTBYNAME_R 0
+
+#undef SANITIZER_INTERCEPT_GETHOSTBYNAME2_R
+#define SANITIZER_INTERCEPT_GETHOSTBYNAME2_R 0
+
+#undef SANITIZER_INTERCEPT_GETHOSTBYADDR_R
+#define SANITIZER_INTERCEPT_GETHOSTBYADDR_R 0
+
+#undef SANITIZER_INTERCEPT_GETHOSTENT_R
+#define SANITIZER_INTERCEPT_GETHOSTENT_R 0
+
+#undef SANITIZER_INTERCEPT_GETSOCKOPT
+#define SANITIZER_INTERCEPT_GETSOCKOPT 0
+
+#undef SANITIZER_INTERCEPT_ACCEPT
+#define SANITIZER_INTERCEPT_ACCEPT 0
+
+#undef SANITIZER_INTERCEPT_ACCEPT4
+#define SANITIZER_INTERCEPT_ACCEPT4 0
+
+#undef SANITIZER_INTERCEPT_PACCEPT
+#define SANITIZER_INTERCEPT_PACCEPT 0
+
+#undef SANITIZER_INTERCEPT_MODF
+#define SANITIZER_INTERCEPT_MODF 0
+
+#undef SANITIZER_INTERCEPT_RECVMSG
+#define SANITIZER_INTERCEPT_RECVMSG 0
+
+#undef SANITIZER_INTERCEPT_SENDMSG
+#define SANITIZER_INTERCEPT_SENDMSG 0
+
+#undef SANITIZER_INTERCEPT_RECVMMSG
+#define SANITIZER_INTERCEPT_RECVMMSG 0
+
+#undef SANITIZER_INTERCEPT_SENDMMSG
+#define SANITIZER_INTERCEPT_SENDMMSG 0
+
+#undef SANITIZER_INTERCEPT_SYSMSG
+#define SANITIZER_INTERCEPT_SYSMSG 0
+
+#undef SANITIZER_INTERCEPT_GETPEERNAME
+#define SANITIZER_INTERCEPT_GETPEERNAME 0
+
+#undef SANITIZER_INTERCEPT_IOCTL
+#define SANITIZER_INTERCEPT_IOCTL 0
+
+#undef SANITIZER_INTERCEPT_INET_ATON
+#define SANITIZER_INTERCEPT_INET_ATON 0
+
+#undef SANITIZER_INTERCEPT_SYSINFO
+#define SANITIZER_INTERCEPT_SYSINFO 0
+
+#undef SANITIZER_INTERCEPT_READDIR
+#define SANITIZER_INTERCEPT_READDIR 0
+
+#undef SANITIZER_INTERCEPT_READDIR64
+#define SANITIZER_INTERCEPT_READDIR64 0
+
+#undef SANITIZER_INTERCEPT_PTRACE
+#define SANITIZER_INTERCEPT_PTRACE 0
+
+#undef SANITIZER_INTERCEPT_PTRACE
+#define SANITIZER_INTERCEPT_PTRACE 0
+
+#undef SANITIZER_INTERCEPT_SETLOCALE
+#define SANITIZER_INTERCEPT_SETLOCALE 0
+
+#undef SANITIZER_INTERCEPT_GETCWD
+#define SANITIZER_INTERCEPT_GETCWD 0
+
+#undef SANITIZER_INTERCEPT_GET_CURRENT_DIR_NAME
+#define SANITIZER_INTERCEPT_GET_CURRENT_DIR_NAME 0
+
+#undef SANITIZER_INTERCEPT_STRTOIMAX
+#define SANITIZER_INTERCEPT_STRTOIMAX 0
+
+#undef SANITIZER_INTERCEPT_MBSTOWCS
+#define SANITIZER_INTERCEPT_MBSTOWCS 0
+
+#undef SANITIZER_INTERCEPT_MBSNRTOWCS
+#define SANITIZER_INTERCEPT_MBSNRTOWCS 0
+
+#undef SANITIZER_INTERCEPT_WCSTOMBS
+#define SANITIZER_INTERCEPT_WCSTOMBS 0
+
+#undef SANITIZER_INTERCEPT_STRXFRM
+#define SANITIZER_INTERCEPT_STRXFRM 0
+
+#undef SANITIZER_INTERCEPT___STRXFRM_L
+#define SANITIZER_INTERCEPT___STRXFRM_L 0
+
+#undef SANITIZER_INTERCEPT_WCSXFRM
+#define SANITIZER_INTERCEPT_WCSXFRM 0
+
+#undef SANITIZER_INTERCEPT___WCSXFRM_L
+#define SANITIZER_INTERCEPT___WCSXFRM_L 0
+
+#undef SANITIZER_INTERCEPT_WCSNRTOMBS
+#define SANITIZER_INTERCEPT_WCSNRTOMBS 0
+
+#undef SANITIZER_INTERCEPT_WCRTOMB
+#define SANITIZER_INTERCEPT_WCRTOMB 0
+
+#undef SANITIZER_INTERCEPT_WCTOMB
+#define SANITIZER_INTERCEPT_WCTOMB 0
+
+#undef SANITIZER_INTERCEPT_TCGETATTR
+#define SANITIZER_INTERCEPT_TCGETATTR 0
+
+#undef SANITIZER_INTERCEPT_REALPATH
+#define SANITIZER_INTERCEPT_REALPATH 0
+
+#undef SANITIZER_INTERCEPT_CANONICALIZE_FILE_NAME
+#define SANITIZER_INTERCEPT_CANONICALIZE_FILE_NAME 0
+
+#undef SANITIZER_INTERCEPT_CONFSTR
+#define SANITIZER_INTERCEPT_CONFSTR 0
+
+#undef SANITIZER_INTERCEPT_SCHED_GETAFFINITY
+#define SANITIZER_INTERCEPT_SCHED_GETAFFINITY 0
+
+#undef SANITIZER_INTERCEPT_SCHED_GETPARAM
+#define SANITIZER_INTERCEPT_SCHED_GETPARAM 0
+
+#undef SANITIZER_INTERCEPT_STRERROR
+#define SANITIZER_INTERCEPT_STRERROR 0
+
+#undef SANITIZER_INTERCEPT_STRERROR_R
+#define SANITIZER_INTERCEPT_STRERROR_R 0
+
+#undef SANITIZER_INTERCEPT_XPG_STRERROR_R
+#define SANITIZER_INTERCEPT_XPG_STRERROR_R 0
+
+#undef SANITIZER_INTERCEPT_SCANDIR
+#define SANITIZER_INTERCEPT_SCANDIR 0
+
+#undef SANITIZER_INTERCEPT_SCANDIR64
+#define SANITIZER_INTERCEPT_SCANDIR64 0
+
+#undef SANITIZER_INTERCEPT_GETGROUPS
+#define SANITIZER_INTERCEPT_GETGROUPS 0
+
+#undef SANITIZER_INTERCEPT_POLL
+#define SANITIZER_INTERCEPT_POLL 0
+
+#undef SANITIZER_INTERCEPT_PPOLL
+#define SANITIZER_INTERCEPT_PPOLL 0
+
+#undef SANITIZER_INTERCEPT_WORDEXP
+#define SANITIZER_INTERCEPT_WORDEXP 0
+
+#undef SANITIZER_INTERCEPT_SIGWAIT
+#define SANITIZER_INTERCEPT_SIGWAIT 0
+
+#undef SANITIZER_INTERCEPT_SIGWAITINFO
+#define SANITIZER_INTERCEPT_SIGWAITINFO 0
+
+#undef SANITIZER_INTERCEPT_SIGTIMEDWAIT
+#define SANITIZER_INTERCEPT_SIGTIMEDWAIT 0
+
+#undef SANITIZER_INTERCEPT_SIGSETOPS
+#define SANITIZER_INTERCEPT_SIGSETOPS 0
+
+#undef SANITIZER_INTERCEPT_SIGSET_LOGICOPS
+#define SANITIZER_INTERCEPT_SIGSET_LOGICOPS 0
+
+#undef SANITIZER_INTERCEPT_SIGPENDING
+#define SANITIZER_INTERCEPT_SIGPENDING 0
+
+#undef SANITIZER_INTERCEPT_SIGPROCMASK
+#define SANITIZER_INTERCEPT_SIGPROCMASK 0
+
+#undef SANITIZER_INTERCEPT_PTHREAD_SIGMASK
+#define SANITIZER_INTERCEPT_PTHREAD_SIGMASK 0
+
+#undef SANITIZER_INTERCEPT_BACKTRACE
+#define SANITIZER_INTERCEPT_BACKTRACE 0
+
+#undef SANITIZER_INTERCEPT_GETMNTENT
+#define SANITIZER_INTERCEPT_GETMNTENT 0
+
+#undef SANITIZER_INTERCEPT_GETMNTENT_R
+#define SANITIZER_INTERCEPT_GETMNTENT_R 0
+
+#undef SANITIZER_INTERCEPT_STATFS
+#define SANITIZER_INTERCEPT_STATFS 0
+
+#undef SANITIZER_INTERCEPT_STATFS64
+#define SANITIZER_INTERCEPT_STATFS64 0
+
+#undef SANITIZER_INTERCEPT_STATVFS
+#define SANITIZER_INTERCEPT_STATVFS 0
+
+#undef SANITIZER_INTERCEPT_STATVFS64
+#define SANITIZER_INTERCEPT_STATVFS64 0
+
+#undef SANITIZER_INTERCEPT_INITGROUPS
+#define SANITIZER_INTERCEPT_INITGROUPS 0
+
+#undef SANITIZER_INTERCEPT_ETHER_NTOA_ATON
+#define SANITIZER_INTERCEPT_ETHER_NTOA_ATON 0
+
+#undef SANITIZER_INTERCEPT_ETHER_HOST
+#define SANITIZER_INTERCEPT_ETHER_HOST 0
+
+#undef SANITIZER_INTERCEPT_ETHER_R
+#define SANITIZER_INTERCEPT_ETHER_R 0
+
+#undef SANITIZER_INTERCEPT_SHMCTL
+#define SANITIZER_INTERCEPT_SHMCTL 0
+
+#undef SANITIZER_INTERCEPT_RANDOM_R
+#define SANITIZER_INTERCEPT_RANDOM_R 0
+
+#undef SANITIZER_INTERCEPT_PTHREAD_ATTR_GET
+#define SANITIZER_INTERCEPT_PTHREAD_ATTR_GET 0
+
+#undef SANITIZER_INTERCEPT_PTHREAD_ATTR_GETINHERITSCHED
+#define SANITIZER_INTERCEPT_PTHREAD_ATTR_GETINHERITSCHED 0
+
+#undef SANITIZER_INTERCEPT_PTHREAD_ATTR_GETAFFINITY_NP
+#define SANITIZER_INTERCEPT_PTHREAD_ATTR_GETAFFINITY_NP 0
+
+#undef SANITIZER_INTERCEPT_PTHREAD_GETAFFINITY_NP
+#define SANITIZER_INTERCEPT_PTHREAD_GETAFFINITY_NP 0
+
+#undef SANITIZER_INTERCEPT_PTHREAD_ATTR_GET_SCHED
+#define SANITIZER_INTERCEPT_PTHREAD_ATTR_GET_SCHED 0
+
+#undef SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GETPSHARED
+#define SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GETPSHARED 0
+
+#undef SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GETTYPE
+#define SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GETTYPE 0
+
+#undef SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GETPROTOCOL
+#define SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GETPROTOCOL 0
+
+#undef SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GETPRIOCEILING
+#define SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GETPRIOCEILING 0
+
+#undef SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GETROBUST
+#define SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GETROBUST 0
+
+#undef SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GETROBUST_NP
+#define SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GETROBUST_NP 0
+
+#undef SANITIZER_INTERCEPT_PTHREAD_RWLOCKATTR_GETPSHARED
+#define SANITIZER_INTERCEPT_PTHREAD_RWLOCKATTR_GETPSHARED 0
+
+#undef SANITIZER_INTERCEPT_PTHREAD_RWLOCKATTR_GETKIND_NP
+#define SANITIZER_INTERCEPT_PTHREAD_RWLOCKATTR_GETKIND_NP 0
+
+#undef SANITIZER_INTERCEPT_PTHREAD_CONDATTR_GETPSHARED
+#define SANITIZER_INTERCEPT_PTHREAD_CONDATTR_GETPSHARED 0
+
+#undef SANITIZER_INTERCEPT_PTHREAD_CONDATTR_GETCLOCK
+#define SANITIZER_INTERCEPT_PTHREAD_CONDATTR_GETCLOCK 0
+
+#undef SANITIZER_INTERCEPT_PTHREAD_BARRIERATTR_GETPSHARED
+#define SANITIZER_INTERCEPT_PTHREAD_BARRIERATTR_GETPSHARED 0
+
+#undef SANITIZER_INTERCEPT_TRYJOIN
+#define SANITIZER_INTERCEPT_TRYJOIN 0
+
+#undef SANITIZER_INTERCEPT_TIMEDJOIN
+#define SANITIZER_INTERCEPT_TIMEDJOIN 0
+
+#undef SANITIZER_INTERCEPT_THR_EXIT
+#define SANITIZER_INTERCEPT_THR_EXIT 0
+
+#undef SANITIZER_INTERCEPT_TMPNAM
+#define SANITIZER_INTERCEPT_TMPNAM 0
+
+#undef SANITIZER_INTERCEPT_TMPNAM_R
+#define SANITIZER_INTERCEPT_TMPNAM_R 0
+
+#undef SANITIZER_INTERCEPT_PTSNAME
+#define SANITIZER_INTERCEPT_PTSNAME 0
+
+#undef SANITIZER_INTERCEPT_PTSNAME_R
+#define SANITIZER_INTERCEPT_PTSNAME_R 0
+
+#undef SANITIZER_INTERCEPT_TTYNAME
+#define SANITIZER_INTERCEPT_TTYNAME 0
+
+#undef SANITIZER_INTERCEPT_TTYNAME_R
+#define SANITIZER_INTERCEPT_TTYNAME_R 0
+
+#undef SANITIZER_INTERCEPT_TEMPNAM
+#define SANITIZER_INTERCEPT_TEMPNAM 0
+
+#undef SANITIZER_INTERCEPT_SINCOS
+#define SANITIZER_INTERCEPT_SINCOS 0
+
+#undef SANITIZER_INTERCEPT_REMQUO
+#define SANITIZER_INTERCEPT_REMQUO 0
+
+#undef SANITIZER_INTERCEPT_REMQUOL
+#define SANITIZER_INTERCEPT_REMQUOL 0
+
+#undef SANITIZER_INTERCEPT_LGAMMA
+#define SANITIZER_INTERCEPT_LGAMMA 0
+
+#undef SANITIZER_INTERCEPT_LGAMMAL
+#define SANITIZER_INTERCEPT_LGAMMAL 0
+
+#undef SANITIZER_INTERCEPT_LGAMMA_R
+#define SANITIZER_INTERCEPT_LGAMMA_R 0
+
+#undef SANITIZER_INTERCEPT_LGAMMAL_R
+#define SANITIZER_INTERCEPT_LGAMMAL_R 0
+
+#undef SANITIZER_INTERCEPT_DRAND48_R
+#define SANITIZER_INTERCEPT_DRAND48_R 0
+
+#undef SANITIZER_INTERCEPT_RAND_R
+#define SANITIZER_INTERCEPT_RAND_R 0
+
+#undef SANITIZER_INTERCEPT_ICONV
+#define SANITIZER_INTERCEPT_ICONV 0
+
+#undef SANITIZER_INTERCEPT_TIMES
+#define SANITIZER_INTERCEPT_TIMES 0
+
+#undef SANITIZER_INTERCEPT_GETLINE
+#define SANITIZER_INTERCEPT_GETLINE 0
+
+#undef SANITIZER_INTERCEPT__EXIT
+#define SANITIZER_INTERCEPT__EXIT 0
+
+#undef SANITIZER_INTERCEPT___LIBC_MUTEX
+#define SANITIZER_INTERCEPT___LIBC_MUTEX 0
+
+#undef SANITIZER_INTERCEPT_PTHREAD_SETNAME_NP
+#define SANITIZER_INTERCEPT_PTHREAD_SETNAME_NP 0
+
+#undef SANITIZER_INTERCEPT_PTHREAD_GETNAME_NP
+#define SANITIZER_INTERCEPT_PTHREAD_GETNAME_NP 0
+
+#undef SANITIZER_INTERCEPT_TLS_GET_ADDR
+#define SANITIZER_INTERCEPT_TLS_GET_ADDR 0
+
+#undef SANITIZER_INTERCEPT_LISTXATTR
+#define SANITIZER_INTERCEPT_LISTXATTR 0
+
+#undef SANITIZER_INTERCEPT_GETXATTR
+#define SANITIZER_INTERCEPT_GETXATTR 0
+
+#undef SANITIZER_INTERCEPT_GETRESID
+#define SANITIZER_INTERCEPT_GETRESID 0
+
+#undef SANITIZER_INTERCEPT_GETIFADDRS
+#define SANITIZER_INTERCEPT_GETIFADDRS 0
+
+#undef SANITIZER_INTERCEPT_IF_INDEXTONAME
+#define SANITIZER_INTERCEPT_IF_INDEXTONAME 0
+
+#undef SANITIZER_INTERCEPT_CAPGET
+#define SANITIZER_INTERCEPT_CAPGET 0
+
+#undef SANITIZER_INTERCEPT_AEABI_MEM
+#define SANITIZER_INTERCEPT_AEABI_MEM 0
+
+#undef SANITIZER_INTERCEPT_AEABI_MEM
+#define SANITIZER_INTERCEPT_AEABI_MEM 0
+
+#undef SANITIZER_INTERCEPT___BZERO
+#define SANITIZER_INTERCEPT___BZERO 0
+
+#undef SANITIZER_INTERCEPT_BZERO
+#define SANITIZER_INTERCEPT_BZERO 0
+
+#undef SANITIZER_INTERCEPT_FTIME
+#define SANITIZER_INTERCEPT_FTIME 0
+
+#undef SANITIZER_INTERCEPT_XDR
+#define SANITIZER_INTERCEPT_XDR 0
+
+#undef SANITIZER_INTERCEPT_XDRREC
+#define SANITIZER_INTERCEPT_XDRREC 0
+
+#undef SANITIZER_INTERCEPT_TSEARCH
+#define SANITIZER_INTERCEPT_TSEARCH 0
+
+#undef SANITIZER_INTERCEPT_LIBIO_INTERNALS
+#define SANITIZER_INTERCEPT_LIBIO_INTERNALS 0
+
+#undef SANITIZER_INTERCEPT_FOPEN
+#define SANITIZER_INTERCEPT_FOPEN 0
+
+#undef SANITIZER_INTERCEPT_FOPEN64
+#define SANITIZER_INTERCEPT_FOPEN64 0
+
+#undef SANITIZER_INTERCEPT_OPEN_MEMSTREAM
+#define SANITIZER_INTERCEPT_OPEN_MEMSTREAM 0
+
+#undef SANITIZER_INTERCEPT_OBSTACK
+#define SANITIZER_INTERCEPT_OBSTACK 0
+
+#undef SANITIZER_INTERCEPT_FFLUSH
+#define SANITIZER_INTERCEPT_FFLUSH 0
+
+#undef SANITIZER_INTERCEPT_FCLOSE
+#define SANITIZER_INTERCEPT_FCLOSE 0
+
+#undef SANITIZER_INTERCEPT_DLOPEN_DLCLOSE
+#define SANITIZER_INTERCEPT_DLOPEN_DLCLOSE 0
+
+#undef SANITIZER_INTERCEPT_GETPASS
+#define SANITIZER_INTERCEPT_GETPASS 0
+
+#undef SANITIZER_INTERCEPT_TIMERFD
+#define SANITIZER_INTERCEPT_TIMERFD 0
+
+#undef SANITIZER_INTERCEPT_MLOCKX
+#define SANITIZER_INTERCEPT_MLOCKX 0
+
+#undef SANITIZER_INTERCEPT_FOPENCOOKIE
+#define SANITIZER_INTERCEPT_FOPENCOOKIE 0
+
+#undef SANITIZER_INTERCEPT_SEM
+#define SANITIZER_INTERCEPT_SEM 0
+
+#undef SANITIZER_INTERCEPT_PTHREAD_SETCANCEL
+#define SANITIZER_INTERCEPT_PTHREAD_SETCANCEL 0
+
+#undef SANITIZER_INTERCEPT_MINCORE
+#define SANITIZER_INTERCEPT_MINCORE 0
+
+#undef SANITIZER_INTERCEPT_PROCESS_VM_READV
+#define SANITIZER_INTERCEPT_PROCESS_VM_READV 0
+
+#undef SANITIZER_INTERCEPT_CTERMID
+#define SANITIZER_INTERCEPT_CTERMID 0
+
+#undef SANITIZER_INTERCEPT_CTERMID_R
+#define SANITIZER_INTERCEPT_CTERMID_R 0
+
+#undef SANITIZER_INTERCEPTOR_HOOKS
+#define SANITIZER_INTERCEPTOR_HOOKS 0
+
+#undef SANITIZER_INTERCEPT_RECV_RECVFROM
+#define SANITIZER_INTERCEPT_RECV_RECVFROM 0
+
+#undef SANITIZER_INTERCEPT_SEND_SENDTO
+#define SANITIZER_INTERCEPT_SEND_SENDTO 0
+
+#undef SANITIZER_INTERCEPT_EVENTFD_READ_WRITE
+#define SANITIZER_INTERCEPT_EVENTFD_READ_WRITE 0
+
+#undef SANITIZER_INTERCEPT_STAT
+#define SANITIZER_INTERCEPT_STAT 0
+
+#undef SANITIZER_INTERCEPT_STAT64
+#define SANITIZER_INTERCEPT_STAT64 0
+
+#undef SANITIZER_INTERCEPT_LSTAT
+#define SANITIZER_INTERCEPT_LSTAT 0
+
+#undef SANITIZER_INTERCEPT___XSTAT
+#define SANITIZER_INTERCEPT___XSTAT 0
+
+#undef SANITIZER_INTERCEPT___XSTAT64
+#define SANITIZER_INTERCEPT___XSTAT64 0
+
+#undef SANITIZER_INTERCEPT___LXSTAT
+#define SANITIZER_INTERCEPT___LXSTAT 0
+
+#undef SANITIZER_INTERCEPT___LXSTAT64
+#define SANITIZER_INTERCEPT___LXSTAT64 0
+
+#undef SANITIZER_INTERCEPT_UTMP
+#define SANITIZER_INTERCEPT_UTMP 0
+
+#undef SANITIZER_INTERCEPT_UTMPX
+#define SANITIZER_INTERCEPT_UTMPX 0
+
+#undef SANITIZER_INTERCEPT_GETLOADAVG
+#define SANITIZER_INTERCEPT_GETLOADAVG 0
+
+// #undef SANITIZER_INTERCEPT_MMAP
+// #define SANITIZER_INTERCEPT_MMAP 0
+
+#undef SANITIZER_INTERCEPT_MMAP64
+#define SANITIZER_INTERCEPT_MMAP64 0
+
+#undef SANITIZER_INTERCEPT_MALLOPT_AND_MALLINFO
+#define SANITIZER_INTERCEPT_MALLOPT_AND_MALLINFO 0
+
+#undef SANITIZER_INTERCEPT_MEMALIGN
+#define SANITIZER_INTERCEPT_MEMALIGN 0
+
+#undef SANITIZER_INTERCEPT___LIBC_MEMALIGN
+#define SANITIZER_INTERCEPT___LIBC_MEMALIGN 0
+
+#undef SANITIZER_INTERCEPT_PVALLOC
+#define SANITIZER_INTERCEPT_PVALLOC 0
+
+#undef SANITIZER_INTERCEPT_CFREE
+#define SANITIZER_INTERCEPT_CFREE 0
+
+#undef SANITIZER_INTERCEPT_REALLOCARRAY
+#define SANITIZER_INTERCEPT_REALLOCARRAY 0
+
+#undef SANITIZER_INTERCEPT_ALIGNED_ALLOC
+#define SANITIZER_INTERCEPT_ALIGNED_ALLOC 0
+
+#undef SANITIZER_INTERCEPT_MALLOC_USABLE_SIZE
+#define SANITIZER_INTERCEPT_MALLOC_USABLE_SIZE 0
+
+#undef SANITIZER_INTERCEPT_MCHECK_MPROBE
+#define SANITIZER_INTERCEPT_MCHECK_MPROBE 0
+
+#undef SANITIZER_INTERCEPT_WCSLEN
+#define SANITIZER_INTERCEPT_WCSLEN 0
+
+#undef SANITIZER_INTERCEPT_WCSCAT
+#define SANITIZER_INTERCEPT_WCSCAT 0
+
+#undef SANITIZER_INTERCEPT_WCSDUP
+#define SANITIZER_INTERCEPT_WCSDUP 0
+
+#undef SANITIZER_INTERCEPT_SIGNAL_AND_SIGACTION
+#define SANITIZER_INTERCEPT_SIGNAL_AND_SIGACTION 0
+
+#undef SANITIZER_INTERCEPT_BSD_SIGNAL
+#define SANITIZER_INTERCEPT_BSD_SIGNAL 0
+
+#undef SANITIZER_INTERCEPT_ACCT
+#define SANITIZER_INTERCEPT_ACCT 0
+
+#undef SANITIZER_INTERCEPT_USER_FROM_UID
+#define SANITIZER_INTERCEPT_USER_FROM_UID 0
+
+#undef SANITIZER_INTERCEPT_UID_FROM_USER
+#define SANITIZER_INTERCEPT_UID_FROM_USER 0
+
+#undef SANITIZER_INTERCEPT_GROUP_FROM_GID
+#define SANITIZER_INTERCEPT_GROUP_FROM_GID 0
+
+#undef SANITIZER_INTERCEPT_GID_FROM_GROUP
+#define SANITIZER_INTERCEPT_GID_FROM_GROUP 0
+
+#undef SANITIZER_INTERCEPT_ACCESS
+#define SANITIZER_INTERCEPT_ACCESS 0
+
+#undef SANITIZER_INTERCEPT_FACCESSAT
+#define SANITIZER_INTERCEPT_FACCESSAT 0
+
+#undef SANITIZER_INTERCEPT_GETGROUPLIST
+#define SANITIZER_INTERCEPT_GETGROUPLIST 0
+
+#undef SANITIZER_INTERCEPT_STRLCPY
+#define SANITIZER_INTERCEPT_STRLCPY 0
+
+#undef SANITIZER_INTERCEPT_NAME_TO_HANDLE_AT
+#define SANITIZER_INTERCEPT_NAME_TO_HANDLE_AT 0
+
+#undef SANITIZER_INTERCEPT_OPEN_BY_HANDLE_AT
+#define SANITIZER_INTERCEPT_OPEN_BY_HANDLE_AT 0
+
+#undef SANITIZER_INTERCEPT_READLINK
+#define SANITIZER_INTERCEPT_READLINK 0
+
+#undef SANITIZER_INTERCEPT_READLINKAT
+#define SANITIZER_INTERCEPT_READLINKAT 0
+
+#undef SANITIZER_INTERCEPT_DEVNAME
+#define SANITIZER_INTERCEPT_DEVNAME 0
+
+#undef SANITIZER_INTERCEPT_DEVNAME_R
+#define SANITIZER_INTERCEPT_DEVNAME_R 0
+
+#undef SANITIZER_INTERCEPT_FGETLN
+#define SANITIZER_INTERCEPT_FGETLN 0
+
+#undef SANITIZER_INTERCEPT_STRMODE
+#define SANITIZER_INTERCEPT_STRMODE 0
+
+#undef SANITIZER_INTERCEPT_TTYENT
+#define SANITIZER_INTERCEPT_TTYENT 0
+
+#undef SANITIZER_INTERCEPT_TTYENTPATH
+#define SANITIZER_INTERCEPT_TTYENTPATH 0
+
+#undef SANITIZER_INTERCEPT_PROTOENT
+#define SANITIZER_INTERCEPT_PROTOENT 0
+
+#undef SANITIZER_INTERCEPT_PROTOENT_R
+#define SANITIZER_INTERCEPT_PROTOENT_R 0
+
+#undef SANITIZER_INTERCEPT_NETENT
+#define SANITIZER_INTERCEPT_NETENT 0
+
+#undef SANITIZER_INTERCEPT_SETVBUF
+#define SANITIZER_INTERCEPT_SETVBUF 0
+
+#undef SANITIZER_INTERCEPT_GETMNTINFO
+#define SANITIZER_INTERCEPT_GETMNTINFO 0
+
+#undef SANITIZER_INTERCEPT_MI_VECTOR_HASH
+#define SANITIZER_INTERCEPT_MI_VECTOR_HASH 0
+
+#undef SANITIZER_INTERCEPT_GETVFSSTAT
+#define SANITIZER_INTERCEPT_GETVFSSTAT 0
+
+#undef SANITIZER_INTERCEPT_REGEX
+#define SANITIZER_INTERCEPT_REGEX 0
+
+#undef SANITIZER_INTERCEPT_REGEXSUB
+#define SANITIZER_INTERCEPT_REGEXSUB 0
+
+#undef SANITIZER_INTERCEPT_FTS
+#define SANITIZER_INTERCEPT_FTS 0
+
+#undef SANITIZER_INTERCEPT_SYSCTL
+#define SANITIZER_INTERCEPT_SYSCTL 0
+
+#undef SANITIZER_INTERCEPT_ASYSCTL
+#define SANITIZER_INTERCEPT_ASYSCTL 0
+
+#undef SANITIZER_INTERCEPT_SYSCTLGETMIBINFO
+#define SANITIZER_INTERCEPT_SYSCTLGETMIBINFO 0
+
+#undef SANITIZER_INTERCEPT_NL_LANGINFO
+#define SANITIZER_INTERCEPT_NL_LANGINFO 0
+
+#undef SANITIZER_INTERCEPT_MODCTL
+#define SANITIZER_INTERCEPT_MODCTL 0
+
+#undef SANITIZER_INTERCEPT_CAPSICUM
+#define SANITIZER_INTERCEPT_CAPSICUM 0
+
+#undef SANITIZER_INTERCEPT_STRTONUM
+#define SANITIZER_INTERCEPT_STRTONUM 0
+
+#undef SANITIZER_INTERCEPT_FPARSELN
+#define SANITIZER_INTERCEPT_FPARSELN 0
+
+#undef SANITIZER_INTERCEPT_STATVFS1
+#define SANITIZER_INTERCEPT_STATVFS1 0
+
+#undef SANITIZER_INTERCEPT_STRTOI
+#define SANITIZER_INTERCEPT_STRTOI 0
+
+#undef SANITIZER_INTERCEPT_CAPSICUM
+#define SANITIZER_INTERCEPT_CAPSICUM 0
+
+#undef SANITIZER_INTERCEPT_SHA1
+#define SANITIZER_INTERCEPT_SHA1 0
+
+#undef SANITIZER_INTERCEPT_MD4
+#define SANITIZER_INTERCEPT_MD4 0
+
+#undef SANITIZER_INTERCEPT_RMD160
+#define SANITIZER_INTERCEPT_RMD160 0
+
+#undef SANITIZER_INTERCEPT_MD5
+#define SANITIZER_INTERCEPT_MD5 0
+
+#undef SANITIZER_INTERCEPT_FSEEK
+#define SANITIZER_INTERCEPT_FSEEK 0
+
+#undef SANITIZER_INTERCEPT_MD2
+#define SANITIZER_INTERCEPT_MD2 0
+
+#undef SANITIZER_INTERCEPT_SHA2
+#define SANITIZER_INTERCEPT_SHA2 0
+
+#undef SANITIZER_INTERCEPT_CDB
+#define SANITIZER_INTERCEPT_CDB 0
+
+#undef SANITIZER_INTERCEPT_VIS
+#define SANITIZER_INTERCEPT_VIS 0
+
+#undef SANITIZER_INTERCEPT_POPEN
+#define SANITIZER_INTERCEPT_POPEN 0
+
+#undef SANITIZER_INTERCEPT_POPENVE
+#define SANITIZER_INTERCEPT_POPENVE 0
+
+#undef SANITIZER_INTERCEPT_PCLOSE
+#define SANITIZER_INTERCEPT_PCLOSE 0
+
+#undef SANITIZER_INTERCEPT_FUNOPEN
+#define SANITIZER_INTERCEPT_FUNOPEN 0
+
+#undef SANITIZER_INTERCEPT_FUNOPEN2
+#define SANITIZER_INTERCEPT_FUNOPEN2 0
+
+#undef SANITIZER_INTERCEPT_GETFSENT
+#define SANITIZER_INTERCEPT_GETFSENT 0
+
+#undef SANITIZER_INTERCEPT_ARC4RANDOM
+#define SANITIZER_INTERCEPT_ARC4RANDOM 0
+
+#undef SANITIZER_INTERCEPT_FDEVNAME
+#define SANITIZER_INTERCEPT_FDEVNAME 0
+
+#undef SANITIZER_INTERCEPT_GETUSERSHELL
+#define SANITIZER_INTERCEPT_GETUSERSHELL 0
+
+#undef SANITIZER_INTERCEPT_SL_INIT
+#define SANITIZER_INTERCEPT_SL_INIT 0
+
+#undef SANITIZER_INTERCEPT_GETRANDOM
+#define SANITIZER_INTERCEPT_GETRANDOM 0
+
+#undef SANITIZER_INTERCEPT___CXA_ATEXIT
+#define SANITIZER_INTERCEPT___CXA_ATEXIT 0
+
+#undef SANITIZER_INTERCEPT_ATEXIT
+#define SANITIZER_INTERCEPT_ATEXIT 0
+
+#undef SANITIZER_INTERCEPT_PTHREAD_ATFORK
+#define SANITIZER_INTERCEPT_PTHREAD_ATFORK 0
+
+#undef SANITIZER_INTERCEPT_GETENTROPY
+#define SANITIZER_INTERCEPT_GETENTROPY 0
+
+#undef SANITIZER_INTERCEPT_QSORT
+#define SANITIZER_INTERCEPT_QSORT 0
+
+#undef SANITIZER_INTERCEPT_QSORT_R
+#define SANITIZER_INTERCEPT_QSORT_R 0
+
+#undef SANITIZER_INTERCEPT_BSEARCH
+#define SANITIZER_INTERCEPT_BSEARCH 0
+
+#undef SANITIZER_INTERCEPT_SIGALTSTACK
+#define SANITIZER_INTERCEPT_SIGALTSTACK 0
+
+#undef SANITIZER_INTERCEPT_UNAME
+#define SANITIZER_INTERCEPT_UNAME 0
+
+#undef SANITIZER_INTERCEPT___XUNAME
+#define SANITIZER_INTERCEPT___XUNAME 0
+
+#undef SANITIZER_INTERCEPT_FLOPEN
+#define SANITIZER_INTERCEPT_FLOPEN 0
+
+#undef SANITIZER_INTERCEPT_PROCCTL
+#define SANITIZER_INTERCEPT_PROCCTL 0
+
+#undef SANITIZER_INTERCEPT_HEXDUMP
+#define SANITIZER_INTERCEPT_HEXDUMP 0
+
+#undef SANITIZER_INTERCEPT_ARGP_PARSE
+#define SANITIZER_INTERCEPT_ARGP_PARSE 0
+
+#endif // HWASAN_PLATFORM_INTERCEPTORS_H
diff --git a/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_poisoning.cpp b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_poisoning.cpp
index 5aafdb1884b5..a4e5935754a8 100644
--- a/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_poisoning.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_poisoning.cpp
@@ -26,3 +26,11 @@ uptr TagMemory(uptr p, uptr size, tag_t tag) {
}
} // namespace __hwasan
+
+// --- Implementation of LSan-specific functions --- {{{1
+namespace __lsan {
+bool WordIsPoisoned(uptr addr) {
+ // Fixme: implement actual tag checking.
+ return false;
+}
+} // namespace __lsan
diff --git a/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_preinit.cpp b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_preinit.cpp
new file mode 100644
index 000000000000..8c9c95f413be
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_preinit.cpp
@@ -0,0 +1,23 @@
+//===-- hwasan_preinit.cpp ------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of HWAddressSanitizer, an address sanity checker.
+//
+// Call __hwasan_init at the very early stage of process startup.
+//===----------------------------------------------------------------------===//
+#include "hwasan_interface_internal.h"
+#include "sanitizer_common/sanitizer_internal_defs.h"
+
+#if SANITIZER_CAN_USE_PREINIT_ARRAY
+// The symbol is called __local_hwasan_preinit, because it's not intended to
+// be exported.
+// This code linked into the main executable when -fsanitize=hwaddress is in
+// the link flags. It can only use exported interface functions.
+__attribute__((section(".preinit_array"), used)) static void (
+ *__local_hwasan_preinit)(void) = __hwasan_init;
+#endif
diff --git a/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_registers.h b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_registers.h
new file mode 100644
index 000000000000..48a140ffc923
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_registers.h
@@ -0,0 +1,56 @@
+//===-- hwasan_registers.h --------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This describes the register state retrieved by hwasan when error reporting.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef HWASAN_REGISTERS_H
+#define HWASAN_REGISTERS_H
+
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_platform.h"
+
+#if defined(__aarch64__)
+
+# define CAN_GET_REGISTERS 1
+
+struct Registers {
+ uptr x[32];
+};
+
+__attribute__((always_inline, unused)) static Registers GetRegisters() {
+ Registers regs;
+ __asm__ volatile(
+ "stp x0, x1, [%1, #(8 * 0)]\n"
+ "stp x2, x3, [%1, #(8 * 2)]\n"
+ "stp x4, x5, [%1, #(8 * 4)]\n"
+ "stp x6, x7, [%1, #(8 * 6)]\n"
+ "stp x8, x9, [%1, #(8 * 8)]\n"
+ "stp x10, x11, [%1, #(8 * 10)]\n"
+ "stp x12, x13, [%1, #(8 * 12)]\n"
+ "stp x14, x15, [%1, #(8 * 14)]\n"
+ "stp x16, x17, [%1, #(8 * 16)]\n"
+ "stp x18, x19, [%1, #(8 * 18)]\n"
+ "stp x20, x21, [%1, #(8 * 20)]\n"
+ "stp x22, x23, [%1, #(8 * 22)]\n"
+ "stp x24, x25, [%1, #(8 * 24)]\n"
+ "stp x26, x27, [%1, #(8 * 26)]\n"
+ "stp x28, x29, [%1, #(8 * 28)]\n"
+ : "=m"(regs)
+ : "r"(regs.x));
+ regs.x[30] = reinterpret_cast<uintptr_t>(__builtin_return_address(0));
+ regs.x[31] = reinterpret_cast<uintptr_t>(__builtin_frame_address(0));
+ return regs;
+}
+
+#else
+# define CAN_GET_REGISTERS 0
+#endif
+
+#endif // HWASAN_REGISTERS_H
diff --git a/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_report.cpp b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_report.cpp
index 44047c9fdaf8..12a4fa47f215 100644
--- a/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_report.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_report.cpp
@@ -22,8 +22,10 @@
#include "hwasan_thread.h"
#include "hwasan_thread_list.h"
#include "sanitizer_common/sanitizer_allocator_internal.h"
+#include "sanitizer_common/sanitizer_array_ref.h"
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_flags.h"
+#include "sanitizer_common/sanitizer_internal_defs.h"
#include "sanitizer_common/sanitizer_mutex.h"
#include "sanitizer_common/sanitizer_report_decorator.h"
#include "sanitizer_common/sanitizer_stackdepot.h"
@@ -36,8 +38,8 @@ namespace __hwasan {
class ScopedReport {
public:
- ScopedReport(bool fatal = false) : error_message_(1), fatal(fatal) {
- BlockingMutexLock lock(&error_message_lock_);
+ explicit ScopedReport(bool fatal) : fatal(fatal) {
+ Lock lock(&error_message_lock_);
error_message_ptr_ = fatal ? &error_message_ : nullptr;
++hwasan_report_count;
}
@@ -45,7 +47,7 @@ class ScopedReport {
~ScopedReport() {
void (*report_cb)(const char *);
{
- BlockingMutexLock lock(&error_message_lock_);
+ Lock lock(&error_message_lock_);
report_cb = error_report_callback_;
error_message_ptr_ = nullptr;
}
@@ -61,33 +63,29 @@ class ScopedReport {
}
static void MaybeAppendToErrorMessage(const char *msg) {
- BlockingMutexLock lock(&error_message_lock_);
+ Lock lock(&error_message_lock_);
if (!error_message_ptr_)
return;
- uptr len = internal_strlen(msg);
- uptr old_size = error_message_ptr_->size();
- error_message_ptr_->resize(old_size + len);
- // overwrite old trailing '\0', keep new trailing '\0' untouched.
- internal_memcpy(&(*error_message_ptr_)[old_size - 1], msg, len);
+ error_message_ptr_->Append(msg);
}
static void SetErrorReportCallback(void (*callback)(const char *)) {
- BlockingMutexLock lock(&error_message_lock_);
+ Lock lock(&error_message_lock_);
error_report_callback_ = callback;
}
private:
- ScopedErrorReportLock error_report_lock_;
- InternalMmapVector<char> error_message_;
+ InternalScopedString error_message_;
bool fatal;
- static InternalMmapVector<char> *error_message_ptr_;
- static BlockingMutex error_message_lock_;
+ static Mutex error_message_lock_;
+ static InternalScopedString *error_message_ptr_
+ SANITIZER_GUARDED_BY(error_message_lock_);
static void (*error_report_callback_)(const char *);
};
-InternalMmapVector<char> *ScopedReport::error_message_ptr_;
-BlockingMutex ScopedReport::error_message_lock_;
+Mutex ScopedReport::error_message_lock_;
+InternalScopedString *ScopedReport::error_message_ptr_;
void (*ScopedReport::error_report_callback_)(const char *);
// If there is an active ScopedReport, append to its error message.
@@ -102,29 +100,54 @@ static StackTrace GetStackTraceFromId(u32 id) {
return res;
}
+static void MaybePrintAndroidHelpUrl() {
+#if SANITIZER_ANDROID
+ Printf(
+ "Learn more about HWASan reports: "
+ "https://source.android.com/docs/security/test/memory-safety/"
+ "hwasan-reports\n");
+#endif
+}
+
+namespace {
// A RAII object that holds a copy of the current thread stack ring buffer.
// The actual stack buffer may change while we are iterating over it (for
// example, Printf may call syslog() which can itself be built with hwasan).
class SavedStackAllocations {
public:
- SavedStackAllocations(StackAllocationsRingBuffer *rb) {
+ SavedStackAllocations() = default;
+
+ explicit SavedStackAllocations(Thread *t) { CopyFrom(t); }
+
+ void CopyFrom(Thread *t) {
+ StackAllocationsRingBuffer *rb = t->stack_allocations();
uptr size = rb->size() * sizeof(uptr);
void *storage =
MmapAlignedOrDieOnFatalError(size, size * 2, "saved stack allocations");
new (&rb_) StackAllocationsRingBuffer(*rb, storage);
+ thread_id_ = t->unique_id();
}
~SavedStackAllocations() {
- StackAllocationsRingBuffer *rb = get();
- UnmapOrDie(rb->StartOfStorage(), rb->size() * sizeof(uptr));
+ if (rb_) {
+ StackAllocationsRingBuffer *rb = get();
+ UnmapOrDie(rb->StartOfStorage(), rb->size() * sizeof(uptr));
+ }
+ }
+
+ const StackAllocationsRingBuffer *get() const {
+ return (const StackAllocationsRingBuffer *)&rb_;
}
StackAllocationsRingBuffer *get() {
return (StackAllocationsRingBuffer *)&rb_;
}
+ u32 thread_id() const { return thread_id_; }
+
private:
- uptr rb_;
+ uptr rb_ = 0;
+ u32 thread_id_;
};
class Decorator: public __sanitizer::SanitizerCommonDecorator {
@@ -137,6 +160,7 @@ class Decorator: public __sanitizer::SanitizerCommonDecorator {
const char *Location() { return Green(); }
const char *Thread() { return Green(); }
};
+} // namespace
static bool FindHeapAllocation(HeapAllocationsRingBuffer *rb, uptr tagged_addr,
HeapAllocationRecord *har, uptr *ring_index,
@@ -177,10 +201,11 @@ static bool FindHeapAllocation(HeapAllocationsRingBuffer *rb, uptr tagged_addr,
return false;
}
-static void PrintStackAllocations(StackAllocationsRingBuffer *sa,
+static void PrintStackAllocations(const StackAllocationsRingBuffer *sa,
tag_t addr_tag, uptr untagged_addr) {
uptr frames = Min((uptr)flags()->stack_history_size, sa->size());
bool found_local = false;
+ InternalScopedString location;
for (uptr i = 0; i < frames; i++) {
const uptr *record_addr = &(*sa)[i];
uptr record = *record_addr;
@@ -196,24 +221,56 @@ static void PrintStackAllocations(StackAllocationsRingBuffer *sa,
for (LocalInfo &local : frame.locals) {
if (!local.has_frame_offset || !local.has_size || !local.has_tag_offset)
continue;
+ if (!(local.name && internal_strlen(local.name)) &&
+ !(local.function_name && internal_strlen(local.function_name)) &&
+ !(local.decl_file && internal_strlen(local.decl_file)))
+ continue;
tag_t obj_tag = base_tag ^ local.tag_offset;
if (obj_tag != addr_tag)
continue;
- // Calculate the offset from the object address to the faulting
- // address. Because we only store bits 4-19 of FP (bits 0-3 are
- // guaranteed to be zero), the calculation is performed mod 2^20 and may
- // harmlessly underflow if the address mod 2^20 is below the object
- // address.
- uptr obj_offset =
- (untagged_addr - fp - local.frame_offset) & (kRecordFPModulus - 1);
- if (obj_offset >= local.size)
- continue;
+ // Guess top bits of local variable from the faulting address, because
+ // we only store bits 4-19 of FP (bits 0-3 are guaranteed to be zero).
+ uptr local_beg = (fp + local.frame_offset) |
+ (untagged_addr & ~(uptr(kRecordFPModulus) - 1));
+ uptr local_end = local_beg + local.size;
+
if (!found_local) {
- Printf("Potentially referenced stack objects:\n");
+ Printf("\nPotentially referenced stack objects:\n");
found_local = true;
}
- Printf(" %s in %s %s:%d\n", local.name, local.function_name,
- local.decl_file, local.decl_line);
+
+ uptr offset;
+ const char *whence;
+ const char *cause;
+ if (local_beg <= untagged_addr && untagged_addr < local_end) {
+ offset = untagged_addr - local_beg;
+ whence = "inside";
+ cause = "use-after-scope";
+ } else if (untagged_addr >= local_end) {
+ offset = untagged_addr - local_end;
+ whence = "after";
+ cause = "stack-buffer-overflow";
+ } else {
+ offset = local_beg - untagged_addr;
+ whence = "before";
+ cause = "stack-buffer-overflow";
+ }
+ Decorator d;
+ Printf("%s", d.Error());
+ Printf("Cause: %s\n", cause);
+ Printf("%s", d.Default());
+ Printf("%s", d.Location());
+ StackTracePrinter::GetOrInit()->RenderSourceLocation(
+ &location, local.decl_file, local.decl_line, /* column= */ 0,
+ common_flags()->symbolize_vs_style,
+ common_flags()->strip_path_prefix);
+ Printf(
+ "%p is located %zd bytes %s a %zd-byte local variable %s [%p,%p) "
+ "in %s %s\n",
+ untagged_addr, offset, whence, local_end - local_beg, local.name,
+ local_beg, local_end, local.function_name, location.data());
+ location.clear();
+ Printf("%s\n", d.Default());
}
frame.Clear();
}
@@ -233,13 +290,16 @@ static void PrintStackAllocations(StackAllocationsRingBuffer *sa,
break;
uptr pc_mask = (1ULL << 48) - 1;
uptr pc = record & pc_mask;
- frame_desc.append(" record_addr:0x%zx record:0x%zx",
- reinterpret_cast<uptr>(record_addr), record);
- if (SymbolizedStack *frame = Symbolizer::GetOrInit()->SymbolizePC(pc)) {
- RenderFrame(&frame_desc, " %F %L", 0, frame->info.address, &frame->info,
- common_flags()->symbolize_vs_style,
- common_flags()->strip_path_prefix);
- frame->ClearAll();
+ frame_desc.AppendF(" record_addr:0x%zx record:0x%zx",
+ reinterpret_cast<uptr>(record_addr), record);
+ SymbolizedStackHolder symbolized_stack(
+ Symbolizer::GetOrInit()->SymbolizePC(pc));
+ const SymbolizedStack *frame = symbolized_stack.get();
+ if (frame) {
+ StackTracePrinter::GetOrInit()->RenderFrame(
+ &frame_desc, " %F %L", 0, frame->info.address, &frame->info,
+ common_flags()->symbolize_vs_style,
+ common_flags()->strip_path_prefix);
}
Printf("%s\n", frame_desc.data());
frame_desc.clear();
@@ -296,35 +356,356 @@ static uptr GetGlobalSizeFromDescriptor(uptr ptr) {
return 0;
}
-static void ShowHeapOrGlobalCandidate(uptr untagged_addr, tag_t *candidate,
- tag_t *left, tag_t *right) {
- Decorator d;
- uptr mem = ShadowToMem(reinterpret_cast<uptr>(candidate));
- HwasanChunkView chunk = FindHeapChunkByAddress(mem);
+void ReportStats() {}
+
+constexpr uptr kDumpWidth = 16;
+constexpr uptr kShadowLines = 17;
+constexpr uptr kShadowDumpSize = kShadowLines * kDumpWidth;
+
+constexpr uptr kShortLines = 3;
+constexpr uptr kShortDumpSize = kShortLines * kDumpWidth;
+constexpr uptr kShortDumpOffset = (kShadowLines - kShortLines) / 2 * kDumpWidth;
+
+static uptr GetPrintTagStart(uptr addr) {
+ addr = MemToShadow(addr);
+ addr = RoundDownTo(addr, kDumpWidth);
+ addr -= kDumpWidth * (kShadowLines / 2);
+ return addr;
+}
+
+template <typename PrintTag>
+static void PrintTagInfoAroundAddr(uptr addr, uptr num_rows,
+ InternalScopedString &s,
+ PrintTag print_tag) {
+ uptr center_row_beg = RoundDownTo(addr, kDumpWidth);
+ uptr beg_row = center_row_beg - kDumpWidth * (num_rows / 2);
+ uptr end_row = center_row_beg + kDumpWidth * ((num_rows + 1) / 2);
+ for (uptr row = beg_row; row < end_row; row += kDumpWidth) {
+ s.Append(row == center_row_beg ? "=>" : " ");
+ s.AppendF("%p:", (void *)ShadowToMem(row));
+ for (uptr i = 0; i < kDumpWidth; i++) {
+ s.Append(row + i == addr ? "[" : " ");
+ print_tag(s, row + i);
+ s.Append(row + i == addr ? "]" : " ");
+ }
+ s.AppendF("\n");
+ }
+}
+
+template <typename GetTag, typename GetShortTag>
+static void PrintTagsAroundAddr(uptr addr, GetTag get_tag,
+ GetShortTag get_short_tag) {
+ InternalScopedString s;
+ addr = MemToShadow(addr);
+ s.AppendF(
+ "\nMemory tags around the buggy address (one tag corresponds to %zd "
+ "bytes):\n",
+ kShadowAlignment);
+ PrintTagInfoAroundAddr(addr, kShadowLines, s,
+ [&](InternalScopedString &s, uptr tag_addr) {
+ tag_t tag = get_tag(tag_addr);
+ s.AppendF("%02x", tag);
+ });
+
+ s.AppendF(
+ "Tags for short granules around the buggy address (one tag corresponds "
+ "to %zd bytes):\n",
+ kShadowAlignment);
+ PrintTagInfoAroundAddr(addr, kShortLines, s,
+ [&](InternalScopedString &s, uptr tag_addr) {
+ tag_t tag = get_tag(tag_addr);
+ if (tag >= 1 && tag <= kShadowAlignment) {
+ tag_t short_tag = get_short_tag(tag_addr);
+ s.AppendF("%02x", short_tag);
+ } else {
+ s.AppendF("..");
+ }
+ });
+ s.AppendF(
+ "See "
+ "https://clang.llvm.org/docs/"
+ "HardwareAssistedAddressSanitizerDesign.html#short-granules for a "
+ "description of short granule tags\n");
+ Printf("%s", s.data());
+}
+
+static uptr GetTopPc(const StackTrace *stack) {
+ return stack->size ? StackTrace::GetPreviousInstructionPc(stack->trace[0])
+ : 0;
+}
+
+namespace {
+class BaseReport {
+ public:
+ BaseReport(StackTrace *stack, bool fatal, uptr tagged_addr, uptr access_size)
+ : scoped_report(fatal),
+ stack(stack),
+ tagged_addr(tagged_addr),
+ access_size(access_size),
+ untagged_addr(UntagAddr(tagged_addr)),
+ ptr_tag(GetTagFromPointer(tagged_addr)),
+ mismatch_offset(FindMismatchOffset()),
+ heap(CopyHeapChunk()),
+ allocations(CopyAllocations()),
+ candidate(FindBufferOverflowCandidate()),
+ shadow(CopyShadow()) {}
+
+ protected:
+ struct OverflowCandidate {
+ uptr untagged_addr = 0;
+ bool after = false;
+ bool is_close = false;
+
+ struct {
+ uptr begin = 0;
+ uptr end = 0;
+ u32 thread_id = 0;
+ u32 stack_id = 0;
+ bool is_allocated = false;
+ } heap;
+ };
+
+ struct HeapAllocation {
+ HeapAllocationRecord har = {};
+ uptr ring_index = 0;
+ uptr num_matching_addrs = 0;
+ uptr num_matching_addrs_4b = 0;
+ u32 free_thread_id = 0;
+ };
+
+ struct Allocations {
+ ArrayRef<SavedStackAllocations> stack;
+ ArrayRef<HeapAllocation> heap;
+ };
+
+ struct HeapChunk {
+ uptr begin = 0;
+ uptr size = 0;
+ u32 stack_id = 0;
+ bool from_small_heap = false;
+ bool is_allocated = false;
+ };
+
+ struct Shadow {
+ uptr addr = 0;
+ tag_t tags[kShadowDumpSize] = {};
+ tag_t short_tags[kShortDumpSize] = {};
+ };
+
+ sptr FindMismatchOffset() const;
+ Shadow CopyShadow() const;
+ tag_t GetTagCopy(uptr addr) const;
+ tag_t GetShortTagCopy(uptr addr) const;
+ HeapChunk CopyHeapChunk() const;
+ Allocations CopyAllocations();
+ OverflowCandidate FindBufferOverflowCandidate() const;
+ void PrintAddressDescription() const;
+ void PrintHeapOrGlobalCandidate() const;
+ void PrintTags(uptr addr) const;
+
+ SavedStackAllocations stack_allocations_storage[16];
+ HeapAllocation heap_allocations_storage[256];
+
+ const ScopedReport scoped_report;
+ const StackTrace *stack = nullptr;
+ const uptr tagged_addr = 0;
+ const uptr access_size = 0;
+ const uptr untagged_addr = 0;
+ const tag_t ptr_tag = 0;
+ const sptr mismatch_offset = 0;
+
+ const HeapChunk heap;
+ const Allocations allocations;
+ const OverflowCandidate candidate;
+
+ const Shadow shadow;
+};
+
+sptr BaseReport::FindMismatchOffset() const {
+ if (!access_size)
+ return 0;
+ sptr offset =
+ __hwasan_test_shadow(reinterpret_cast<void *>(tagged_addr), access_size);
+ CHECK_GE(offset, 0);
+ CHECK_LT(offset, static_cast<sptr>(access_size));
+ tag_t *tag_ptr =
+ reinterpret_cast<tag_t *>(MemToShadow(untagged_addr + offset));
+ tag_t mem_tag = *tag_ptr;
+
+ if (mem_tag && mem_tag < kShadowAlignment) {
+ tag_t *granule_ptr = reinterpret_cast<tag_t *>((untagged_addr + offset) &
+ ~(kShadowAlignment - 1));
+ // If offset is 0, (untagged_addr + offset) is not aligned to granules.
+ // This is the offset of the leftmost accessed byte within the bad granule.
+ u8 in_granule_offset = (untagged_addr + offset) & (kShadowAlignment - 1);
+ tag_t short_tag = granule_ptr[kShadowAlignment - 1];
+ // The first mismatch was a short granule that matched the ptr_tag.
+ if (short_tag == ptr_tag) {
+ // If the access starts after the end of the short granule, then the first
+ // bad byte is the first byte of the access; otherwise it is the first
+ // byte past the end of the short granule
+ if (mem_tag > in_granule_offset) {
+ offset += mem_tag - in_granule_offset;
+ }
+ }
+ }
+ return offset;
+}
+
+BaseReport::Shadow BaseReport::CopyShadow() const {
+ Shadow result;
+ if (!MemIsApp(untagged_addr))
+ return result;
+
+ result.addr = GetPrintTagStart(untagged_addr + mismatch_offset);
+ uptr tag_addr = result.addr;
+ uptr short_end = kShortDumpOffset + ARRAY_SIZE(shadow.short_tags);
+ for (uptr i = 0; i < ARRAY_SIZE(result.tags); ++i, ++tag_addr) {
+ if (!MemIsShadow(tag_addr))
+ continue;
+ result.tags[i] = *reinterpret_cast<tag_t *>(tag_addr);
+ if (i < kShortDumpOffset || i >= short_end)
+ continue;
+ uptr granule_addr = ShadowToMem(tag_addr);
+ if (1 <= result.tags[i] && result.tags[i] <= kShadowAlignment &&
+ IsAccessibleMemoryRange(granule_addr, kShadowAlignment)) {
+ result.short_tags[i - kShortDumpOffset] =
+ *reinterpret_cast<tag_t *>(granule_addr + kShadowAlignment - 1);
+ }
+ }
+ return result;
+}
+
+tag_t BaseReport::GetTagCopy(uptr addr) const {
+ CHECK_GE(addr, shadow.addr);
+ uptr idx = addr - shadow.addr;
+ CHECK_LT(idx, ARRAY_SIZE(shadow.tags));
+ return shadow.tags[idx];
+}
+
+tag_t BaseReport::GetShortTagCopy(uptr addr) const {
+ CHECK_GE(addr, shadow.addr + kShortDumpOffset);
+ uptr idx = addr - shadow.addr - kShortDumpOffset;
+ CHECK_LT(idx, ARRAY_SIZE(shadow.short_tags));
+ return shadow.short_tags[idx];
+}
+
+BaseReport::HeapChunk BaseReport::CopyHeapChunk() const {
+ HeapChunk result = {};
+ if (MemIsShadow(untagged_addr))
+ return result;
+ HwasanChunkView chunk = FindHeapChunkByAddress(untagged_addr);
+ result.begin = chunk.Beg();
+ if (result.begin) {
+ result.size = chunk.ActualSize();
+ result.from_small_heap = chunk.FromSmallHeap();
+ result.is_allocated = chunk.IsAllocated();
+ result.stack_id = chunk.GetAllocStackId();
+ }
+ return result;
+}
+
+BaseReport::Allocations BaseReport::CopyAllocations() {
+ if (MemIsShadow(untagged_addr))
+ return {};
+ uptr stack_allocations_count = 0;
+ uptr heap_allocations_count = 0;
+ hwasanThreadList().VisitAllLiveThreads([&](Thread *t) {
+ if (stack_allocations_count < ARRAY_SIZE(stack_allocations_storage) &&
+ t->AddrIsInStack(untagged_addr)) {
+ stack_allocations_storage[stack_allocations_count++].CopyFrom(t);
+ }
+
+ if (heap_allocations_count < ARRAY_SIZE(heap_allocations_storage)) {
+ // Scan all threads' ring buffers to find if it's a heap-use-after-free.
+ HeapAllocationRecord har;
+ uptr ring_index, num_matching_addrs, num_matching_addrs_4b;
+ if (FindHeapAllocation(t->heap_allocations(), tagged_addr, &har,
+ &ring_index, &num_matching_addrs,
+ &num_matching_addrs_4b)) {
+ auto &ha = heap_allocations_storage[heap_allocations_count++];
+ ha.har = har;
+ ha.ring_index = ring_index;
+ ha.num_matching_addrs = num_matching_addrs;
+ ha.num_matching_addrs_4b = num_matching_addrs_4b;
+ ha.free_thread_id = t->unique_id();
+ }
+ }
+ });
+
+ return {{stack_allocations_storage, stack_allocations_count},
+ {heap_allocations_storage, heap_allocations_count}};
+}
+
+BaseReport::OverflowCandidate BaseReport::FindBufferOverflowCandidate() const {
+ OverflowCandidate result = {};
+ if (MemIsShadow(untagged_addr))
+ return result;
+ // Check if this looks like a heap buffer overflow by scanning
+ // the shadow left and right and looking for the first adjacent
+ // object with a different memory tag. If that tag matches ptr_tag,
+ // check the allocator if it has a live chunk there.
+ tag_t *tag_ptr = reinterpret_cast<tag_t *>(MemToShadow(untagged_addr));
+ tag_t *candidate_tag_ptr = nullptr, *left = tag_ptr, *right = tag_ptr;
+ uptr candidate_distance = 0;
+ for (; candidate_distance < 1000; candidate_distance++) {
+ if (MemIsShadow(reinterpret_cast<uptr>(left)) && TagsEqual(ptr_tag, left)) {
+ candidate_tag_ptr = left;
+ break;
+ }
+ --left;
+ if (MemIsShadow(reinterpret_cast<uptr>(right)) &&
+ TagsEqual(ptr_tag, right)) {
+ candidate_tag_ptr = right;
+ break;
+ }
+ ++right;
+ }
+
+ constexpr auto kCloseCandidateDistance = 1;
+ result.is_close = candidate_distance <= kCloseCandidateDistance;
+
+ result.after = candidate_tag_ptr == left;
+ result.untagged_addr = ShadowToMem(reinterpret_cast<uptr>(candidate_tag_ptr));
+ HwasanChunkView chunk = FindHeapChunkByAddress(result.untagged_addr);
if (chunk.IsAllocated()) {
+ result.heap.is_allocated = true;
+ result.heap.begin = chunk.Beg();
+ result.heap.end = chunk.End();
+ result.heap.thread_id = chunk.GetAllocThreadId();
+ result.heap.stack_id = chunk.GetAllocStackId();
+ }
+ return result;
+}
+
+void BaseReport::PrintHeapOrGlobalCandidate() const {
+ Decorator d;
+ if (candidate.heap.is_allocated) {
uptr offset;
const char *whence;
- if (untagged_addr < chunk.End() && untagged_addr >= chunk.Beg()) {
- offset = untagged_addr - chunk.Beg();
+ if (candidate.heap.begin <= untagged_addr &&
+ untagged_addr < candidate.heap.end) {
+ offset = untagged_addr - candidate.heap.begin;
whence = "inside";
- } else if (candidate == left) {
- offset = untagged_addr - chunk.End();
- whence = "to the right of";
+ } else if (candidate.after) {
+ offset = untagged_addr - candidate.heap.end;
+ whence = "after";
} else {
- offset = chunk.Beg() - untagged_addr;
- whence = "to the left of";
+ offset = candidate.heap.begin - untagged_addr;
+ whence = "before";
}
Printf("%s", d.Error());
Printf("\nCause: heap-buffer-overflow\n");
Printf("%s", d.Default());
Printf("%s", d.Location());
- Printf("%p is located %zd bytes %s %zd-byte region [%p,%p)\n",
- untagged_addr, offset, whence, chunk.UsedSize(), chunk.Beg(),
- chunk.End());
+ Printf("%p is located %zd bytes %s a %zd-byte region [%p,%p)\n",
+ untagged_addr, offset, whence,
+ candidate.heap.end - candidate.heap.begin, candidate.heap.begin,
+ candidate.heap.end);
Printf("%s", d.Allocation());
- Printf("allocated here:\n");
+ Printf("allocated by thread T%u here:\n", candidate.heap.thread_id);
Printf("%s", d.Default());
- GetStackTraceFromId(chunk.GetAllocStackId()).Print();
+ GetStackTraceFromId(candidate.heap.stack_id).Print();
return;
}
// Check whether the address points into a loaded library. If so, this is
@@ -332,159 +713,133 @@ static void ShowHeapOrGlobalCandidate(uptr untagged_addr, tag_t *candidate,
const char *module_name;
uptr module_address;
Symbolizer *sym = Symbolizer::GetOrInit();
- if (sym->GetModuleNameAndOffsetForPC(mem, &module_name, &module_address)) {
+ if (sym->GetModuleNameAndOffsetForPC(candidate.untagged_addr, &module_name,
+ &module_address)) {
Printf("%s", d.Error());
Printf("\nCause: global-overflow\n");
Printf("%s", d.Default());
DataInfo info;
Printf("%s", d.Location());
- if (sym->SymbolizeData(mem, &info) && info.start) {
+ if (sym->SymbolizeData(candidate.untagged_addr, &info) && info.start) {
Printf(
- "%p is located %zd bytes to the %s of %zd-byte global variable "
+ "%p is located %zd bytes %s a %zd-byte global variable "
"%s [%p,%p) in %s\n",
untagged_addr,
- candidate == left ? untagged_addr - (info.start + info.size)
- : info.start - untagged_addr,
- candidate == left ? "right" : "left", info.size, info.name,
+ candidate.after ? untagged_addr - (info.start + info.size)
+ : info.start - untagged_addr,
+ candidate.after ? "after" : "before", info.size, info.name,
info.start, info.start + info.size, module_name);
} else {
- uptr size = GetGlobalSizeFromDescriptor(mem);
+ uptr size = GetGlobalSizeFromDescriptor(candidate.untagged_addr);
if (size == 0)
// We couldn't find the size of the global from the descriptors.
- Printf("%p is located to the %s of a global variable in (%s+0x%x)\n",
- untagged_addr, candidate == left ? "right" : "left", module_name,
- module_address);
+ Printf(
+ "%p is located %s a global variable in "
+ "\n #0 0x%x (%s+0x%x)\n",
+ untagged_addr, candidate.after ? "after" : "before",
+ candidate.untagged_addr, module_name, module_address);
else
Printf(
- "%p is located to the %s of a %zd-byte global variable in "
- "(%s+0x%x)\n",
- untagged_addr, candidate == left ? "right" : "left", size,
- module_name, module_address);
+ "%p is located %s a %zd-byte global variable in "
+ "\n #0 0x%x (%s+0x%x)\n",
+ untagged_addr, candidate.after ? "after" : "before", size,
+ candidate.untagged_addr, module_name, module_address);
}
Printf("%s", d.Default());
}
}
-void PrintAddressDescription(
- uptr tagged_addr, uptr access_size,
- StackAllocationsRingBuffer *current_stack_allocations) {
+void BaseReport::PrintAddressDescription() const {
Decorator d;
int num_descriptions_printed = 0;
- uptr untagged_addr = UntagAddr(tagged_addr);
- // Print some very basic information about the address, if it's a heap.
- HwasanChunkView chunk = FindHeapChunkByAddress(untagged_addr);
- if (uptr beg = chunk.Beg()) {
- uptr size = chunk.ActualSize();
- Printf("%s[%p,%p) is a %s %s heap chunk; "
- "size: %zd offset: %zd\n%s",
- d.Location(),
- beg, beg + size,
- chunk.FromSmallHeap() ? "small" : "large",
- chunk.IsAllocated() ? "allocated" : "unallocated",
- size, untagged_addr - beg,
+ if (MemIsShadow(untagged_addr)) {
+ Printf("%s%p is HWAsan shadow memory.\n%s", d.Location(), untagged_addr,
d.Default());
+ return;
+ }
+
+ // Print some very basic information about the address, if it's a heap.
+ if (heap.begin) {
+ Printf(
+ "%s[%p,%p) is a %s %s heap chunk; "
+ "size: %zd offset: %zd\n%s",
+ d.Location(), heap.begin, heap.begin + heap.size,
+ heap.from_small_heap ? "small" : "large",
+ heap.is_allocated ? "allocated" : "unallocated", heap.size,
+ untagged_addr - heap.begin, d.Default());
}
- tag_t addr_tag = GetTagFromPointer(tagged_addr);
+ auto announce_by_id = [](u32 thread_id) {
+ hwasanThreadList().VisitAllLiveThreads([&](Thread *t) {
+ if (thread_id == t->unique_id())
+ t->Announce();
+ });
+ };
- bool on_stack = false;
// Check stack first. If the address is on the stack of a live thread, we
// know it cannot be a heap / global overflow.
- hwasanThreadList().VisitAllLiveThreads([&](Thread *t) {
- if (t->AddrIsInStack(untagged_addr)) {
- on_stack = true;
- // TODO(fmayer): figure out how to distinguish use-after-return and
- // stack-buffer-overflow.
- Printf("%s", d.Error());
- Printf("\nCause: stack tag-mismatch\n");
- Printf("%s", d.Location());
- Printf("Address %p is located in stack of thread T%zd\n", untagged_addr,
- t->unique_id());
- Printf("%s", d.Default());
- t->Announce();
-
- auto *sa = (t == GetCurrentThread() && current_stack_allocations)
- ? current_stack_allocations
- : t->stack_allocations();
- PrintStackAllocations(sa, addr_tag, untagged_addr);
- num_descriptions_printed++;
- }
- });
+ for (const auto &sa : allocations.stack) {
+ Printf("%s", d.Error());
+ Printf("\nCause: stack tag-mismatch\n");
+ Printf("%s", d.Location());
+ Printf("Address %p is located in stack of thread T%zd\n", untagged_addr,
+ sa.thread_id());
+ Printf("%s", d.Default());
+ announce_by_id(sa.thread_id());
+ PrintStackAllocations(sa.get(), ptr_tag, untagged_addr);
+ num_descriptions_printed++;
+ }
- // Check if this looks like a heap buffer overflow by scanning
- // the shadow left and right and looking for the first adjacent
- // object with a different memory tag. If that tag matches addr_tag,
- // check the allocator if it has a live chunk there.
- tag_t *tag_ptr = reinterpret_cast<tag_t*>(MemToShadow(untagged_addr));
- tag_t *candidate = nullptr, *left = tag_ptr, *right = tag_ptr;
- uptr candidate_distance = 0;
- for (; candidate_distance < 1000; candidate_distance++) {
- if (MemIsShadow(reinterpret_cast<uptr>(left)) &&
- TagsEqual(addr_tag, left)) {
- candidate = left;
- break;
- }
- --left;
- if (MemIsShadow(reinterpret_cast<uptr>(right)) &&
- TagsEqual(addr_tag, right)) {
- candidate = right;
- break;
- }
- ++right;
+ if (allocations.stack.empty() && candidate.untagged_addr &&
+ candidate.is_close) {
+ PrintHeapOrGlobalCandidate();
+ num_descriptions_printed++;
}
- constexpr auto kCloseCandidateDistance = 1;
+ for (const auto &ha : allocations.heap) {
+ const HeapAllocationRecord har = ha.har;
+
+ Printf("%s", d.Error());
+ Printf("\nCause: use-after-free\n");
+ Printf("%s", d.Location());
+ Printf("%p is located %zd bytes inside a %zd-byte region [%p,%p)\n",
+ untagged_addr, untagged_addr - UntagAddr(har.tagged_addr),
+ har.requested_size, UntagAddr(har.tagged_addr),
+ UntagAddr(har.tagged_addr) + har.requested_size);
+ Printf("%s", d.Allocation());
+ Printf("freed by thread T%u here:\n", ha.free_thread_id);
+ Printf("%s", d.Default());
+ GetStackTraceFromId(har.free_context_id).Print();
- if (!on_stack && candidate && candidate_distance <= kCloseCandidateDistance) {
- ShowHeapOrGlobalCandidate(untagged_addr, candidate, left, right);
+ Printf("%s", d.Allocation());
+ Printf("previously allocated by thread T%u here:\n", har.alloc_thread_id);
+ Printf("%s", d.Default());
+ GetStackTraceFromId(har.alloc_context_id).Print();
+
+ // Print a developer note: the index of this heap object
+ // in the thread's deallocation ring buffer.
+ Printf("hwasan_dev_note_heap_rb_distance: %zd %zd\n", ha.ring_index + 1,
+ flags()->heap_history_size);
+ Printf("hwasan_dev_note_num_matching_addrs: %zd\n", ha.num_matching_addrs);
+ Printf("hwasan_dev_note_num_matching_addrs_4b: %zd\n",
+ ha.num_matching_addrs_4b);
+
+ announce_by_id(ha.free_thread_id);
+ // TODO: announce_by_id(har.alloc_thread_id);
num_descriptions_printed++;
}
- hwasanThreadList().VisitAllLiveThreads([&](Thread *t) {
- // Scan all threads' ring buffers to find if it's a heap-use-after-free.
- HeapAllocationRecord har;
- uptr ring_index, num_matching_addrs, num_matching_addrs_4b;
- if (FindHeapAllocation(t->heap_allocations(), tagged_addr, &har,
- &ring_index, &num_matching_addrs,
- &num_matching_addrs_4b)) {
- Printf("%s", d.Error());
- Printf("\nCause: use-after-free\n");
- Printf("%s", d.Location());
- Printf("%p is located %zd bytes inside of %zd-byte region [%p,%p)\n",
- untagged_addr, untagged_addr - UntagAddr(har.tagged_addr),
- har.requested_size, UntagAddr(har.tagged_addr),
- UntagAddr(har.tagged_addr) + har.requested_size);
- Printf("%s", d.Allocation());
- Printf("freed by thread T%zd here:\n", t->unique_id());
- Printf("%s", d.Default());
- GetStackTraceFromId(har.free_context_id).Print();
-
- Printf("%s", d.Allocation());
- Printf("previously allocated here:\n", t);
- Printf("%s", d.Default());
- GetStackTraceFromId(har.alloc_context_id).Print();
-
- // Print a developer note: the index of this heap object
- // in the thread's deallocation ring buffer.
- Printf("hwasan_dev_note_heap_rb_distance: %zd %zd\n", ring_index + 1,
- flags()->heap_history_size);
- Printf("hwasan_dev_note_num_matching_addrs: %zd\n", num_matching_addrs);
- Printf("hwasan_dev_note_num_matching_addrs_4b: %zd\n",
- num_matching_addrs_4b);
-
- t->Announce();
- num_descriptions_printed++;
- }
- });
-
- if (candidate && num_descriptions_printed == 0) {
- ShowHeapOrGlobalCandidate(untagged_addr, candidate, left, right);
+ if (candidate.untagged_addr && num_descriptions_printed == 0) {
+ PrintHeapOrGlobalCandidate();
num_descriptions_printed++;
}
// Print the remaining threads, as an extra information, 1 line per thread.
- hwasanThreadList().VisitAllLiveThreads([&](Thread *t) { t->Announce(); });
+ if (flags()->print_live_threads_info) {
+ Printf("\n");
+ hwasanThreadList().VisitAllLiveThreads([&](Thread *t) { t->Announce(); });
+ }
if (!num_descriptions_printed)
// We exhausted our possibilities. Bail out.
@@ -497,90 +852,80 @@ void PrintAddressDescription(
}
}
-void ReportStats() {}
-
-static void PrintTagInfoAroundAddr(tag_t *tag_ptr, uptr num_rows,
- void (*print_tag)(InternalScopedString &s,
- tag_t *tag)) {
- const uptr row_len = 16; // better be power of two.
- tag_t *center_row_beg = reinterpret_cast<tag_t *>(
- RoundDownTo(reinterpret_cast<uptr>(tag_ptr), row_len));
- tag_t *beg_row = center_row_beg - row_len * (num_rows / 2);
- tag_t *end_row = center_row_beg + row_len * ((num_rows + 1) / 2);
- InternalScopedString s;
- for (tag_t *row = beg_row; row < end_row; row += row_len) {
- s.append("%s", row == center_row_beg ? "=>" : " ");
- s.append("%p:", row);
- for (uptr i = 0; i < row_len; i++) {
- s.append("%s", row + i == tag_ptr ? "[" : " ");
- print_tag(s, &row[i]);
- s.append("%s", row + i == tag_ptr ? "]" : " ");
- }
- s.append("\n");
+void BaseReport::PrintTags(uptr addr) const {
+ if (shadow.addr) {
+ PrintTagsAroundAddr(
+ addr, [&](uptr addr) { return GetTagCopy(addr); },
+ [&](uptr addr) { return GetShortTagCopy(addr); });
}
- Printf("%s", s.data());
}
-static void PrintTagsAroundAddr(tag_t *tag_ptr) {
- Printf(
- "Memory tags around the buggy address (one tag corresponds to %zd "
- "bytes):\n", kShadowAlignment);
- PrintTagInfoAroundAddr(tag_ptr, 17, [](InternalScopedString &s, tag_t *tag) {
- s.append("%02x", *tag);
- });
-
- Printf(
- "Tags for short granules around the buggy address (one tag corresponds "
- "to %zd bytes):\n",
- kShadowAlignment);
- PrintTagInfoAroundAddr(tag_ptr, 3, [](InternalScopedString &s, tag_t *tag) {
- if (*tag >= 1 && *tag <= kShadowAlignment) {
- uptr granule_addr = ShadowToMem(reinterpret_cast<uptr>(tag));
- s.append("%02x",
- *reinterpret_cast<u8 *>(granule_addr + kShadowAlignment - 1));
- } else {
- s.append("..");
- }
- });
- Printf(
- "See "
- "https://clang.llvm.org/docs/"
- "HardwareAssistedAddressSanitizerDesign.html#short-granules for a "
- "description of short granule tags\n");
-}
+class InvalidFreeReport : public BaseReport {
+ public:
+ InvalidFreeReport(StackTrace *stack, uptr tagged_addr)
+ : BaseReport(stack, flags()->halt_on_error, tagged_addr, 0) {}
+ ~InvalidFreeReport();
-void ReportInvalidFree(StackTrace *stack, uptr tagged_addr) {
- ScopedReport R(flags()->halt_on_error);
+ private:
+};
- uptr untagged_addr = UntagAddr(tagged_addr);
- tag_t ptr_tag = GetTagFromPointer(tagged_addr);
- tag_t *tag_ptr = reinterpret_cast<tag_t*>(MemToShadow(untagged_addr));
- tag_t mem_tag = *tag_ptr;
+InvalidFreeReport::~InvalidFreeReport() {
Decorator d;
Printf("%s", d.Error());
- uptr pc = stack->size ? stack->trace[0] : 0;
+ uptr pc = GetTopPc(stack);
const char *bug_type = "invalid-free";
- Report("ERROR: %s: %s on address %p at pc %p\n", SanitizerToolName, bug_type,
- untagged_addr, pc);
+ const Thread *thread = GetCurrentThread();
+ if (thread) {
+ Report("ERROR: %s: %s on address %p at pc %p on thread T%zd\n",
+ SanitizerToolName, bug_type, untagged_addr, pc, thread->unique_id());
+ } else {
+ Report("ERROR: %s: %s on address %p at pc %p on unknown thread\n",
+ SanitizerToolName, bug_type, untagged_addr, pc);
+ }
Printf("%s", d.Access());
- Printf("tags: %02x/%02x (ptr/mem)\n", ptr_tag, mem_tag);
+ if (shadow.addr) {
+ Printf("tags: %02x/%02x (ptr/mem)\n", ptr_tag,
+ GetTagCopy(MemToShadow(untagged_addr)));
+ }
Printf("%s", d.Default());
stack->Print();
- PrintAddressDescription(tagged_addr, 0, nullptr);
-
- PrintTagsAroundAddr(tag_ptr);
-
+ PrintAddressDescription();
+ PrintTags(untagged_addr);
+ MaybePrintAndroidHelpUrl();
ReportErrorSummary(bug_type, stack);
}
-void ReportTailOverwritten(StackTrace *stack, uptr tagged_addr, uptr orig_size,
- const u8 *expected) {
- uptr tail_size = kShadowAlignment - (orig_size % kShadowAlignment);
- ScopedReport R(flags()->halt_on_error);
+class TailOverwrittenReport : public BaseReport {
+ public:
+ explicit TailOverwrittenReport(StackTrace *stack, uptr tagged_addr,
+ uptr orig_size, const u8 *expected)
+ : BaseReport(stack, flags()->halt_on_error, tagged_addr, 0),
+ orig_size(orig_size),
+ tail_size(kShadowAlignment - (orig_size % kShadowAlignment)) {
+ CHECK_GT(tail_size, 0U);
+ CHECK_LT(tail_size, kShadowAlignment);
+ internal_memcpy(tail_copy,
+ reinterpret_cast<u8 *>(untagged_addr + orig_size),
+ tail_size);
+ internal_memcpy(actual_expected, expected, tail_size);
+ // Short granule is stashed in the last byte of the magic string. To avoid
+ // confusion, make the expected magic string contain the short granule tag.
+ if (orig_size % kShadowAlignment != 0)
+ actual_expected[tail_size - 1] = ptr_tag;
+ }
+ ~TailOverwrittenReport();
+
+ private:
+ const uptr orig_size = 0;
+ const uptr tail_size = 0;
+ u8 actual_expected[kShadowAlignment] = {};
+ u8 tail_copy[kShadowAlignment] = {};
+};
+
+TailOverwrittenReport::~TailOverwrittenReport() {
Decorator d;
- uptr untagged_addr = UntagAddr(tagged_addr);
Printf("%s", d.Error());
const char *bug_type = "allocation-tail-overwritten";
Report("ERROR: %s: %s; heap object [%p,%p) of size %zd\n", SanitizerToolName,
@@ -593,112 +938,137 @@ void ReportTailOverwritten(StackTrace *stack, uptr tagged_addr, uptr orig_size,
Printf("deallocated here:\n");
Printf("%s", d.Default());
stack->Print();
- HwasanChunkView chunk = FindHeapChunkByAddress(untagged_addr);
- if (chunk.Beg()) {
+ if (heap.begin) {
Printf("%s", d.Allocation());
Printf("allocated here:\n");
Printf("%s", d.Default());
- GetStackTraceFromId(chunk.GetAllocStackId()).Print();
+ GetStackTraceFromId(heap.stack_id).Print();
}
InternalScopedString s;
- CHECK_GT(tail_size, 0U);
- CHECK_LT(tail_size, kShadowAlignment);
- u8 *tail = reinterpret_cast<u8*>(untagged_addr + orig_size);
- s.append("Tail contains: ");
- for (uptr i = 0; i < kShadowAlignment - tail_size; i++)
- s.append(".. ");
- for (uptr i = 0; i < tail_size; i++)
- s.append("%02x ", tail[i]);
- s.append("\n");
- s.append("Expected: ");
- for (uptr i = 0; i < kShadowAlignment - tail_size; i++)
- s.append(".. ");
+ u8 *tail = tail_copy;
+ s.AppendF("Tail contains: ");
+ for (uptr i = 0; i < kShadowAlignment - tail_size; i++) s.AppendF(".. ");
+ for (uptr i = 0; i < tail_size; i++) s.AppendF("%02x ", tail[i]);
+ s.AppendF("\n");
+ s.AppendF("Expected: ");
+ for (uptr i = 0; i < kShadowAlignment - tail_size; i++) s.AppendF(".. ");
+ for (uptr i = 0; i < tail_size; i++) s.AppendF("%02x ", actual_expected[i]);
+ s.AppendF("\n");
+ s.AppendF(" ");
+ for (uptr i = 0; i < kShadowAlignment - tail_size; i++) s.AppendF(" ");
for (uptr i = 0; i < tail_size; i++)
- s.append("%02x ", expected[i]);
- s.append("\n");
- s.append(" ");
- for (uptr i = 0; i < kShadowAlignment - tail_size; i++)
- s.append(" ");
- for (uptr i = 0; i < tail_size; i++)
- s.append("%s ", expected[i] != tail[i] ? "^^" : " ");
-
- s.append("\nThis error occurs when a buffer overflow overwrites memory\n"
- "to the right of a heap object, but within the %zd-byte granule, e.g.\n"
- " char *x = new char[20];\n"
- " x[25] = 42;\n"
- "%s does not detect such bugs in uninstrumented code at the time of write,"
- "\nbut can detect them at the time of free/delete.\n"
- "To disable this feature set HWASAN_OPTIONS=free_checks_tail_magic=0\n",
- kShadowAlignment, SanitizerToolName);
+ s.AppendF("%s ", actual_expected[i] != tail[i] ? "^^" : " ");
+
+ s.AppendF(
+ "\nThis error occurs when a buffer overflow overwrites memory\n"
+ "after a heap object, but within the %zd-byte granule, e.g.\n"
+ " char *x = new char[20];\n"
+ " x[25] = 42;\n"
+ "%s does not detect such bugs in uninstrumented code at the time of "
+ "write,"
+ "\nbut can detect them at the time of free/delete.\n"
+ "To disable this feature set HWASAN_OPTIONS=free_checks_tail_magic=0\n",
+ kShadowAlignment, SanitizerToolName);
Printf("%s", s.data());
GetCurrentThread()->Announce();
-
- tag_t *tag_ptr = reinterpret_cast<tag_t*>(MemToShadow(untagged_addr));
- PrintTagsAroundAddr(tag_ptr);
-
+ PrintTags(untagged_addr);
+ MaybePrintAndroidHelpUrl();
ReportErrorSummary(bug_type, stack);
}
-void ReportTagMismatch(StackTrace *stack, uptr tagged_addr, uptr access_size,
- bool is_store, bool fatal, uptr *registers_frame) {
- ScopedReport R(fatal);
- SavedStackAllocations current_stack_allocations(
- GetCurrentThread()->stack_allocations());
+class TagMismatchReport : public BaseReport {
+ public:
+ explicit TagMismatchReport(StackTrace *stack, uptr tagged_addr,
+ uptr access_size, bool is_store, bool fatal,
+ uptr *registers_frame)
+ : BaseReport(stack, fatal, tagged_addr, access_size),
+ is_store(is_store),
+ registers_frame(registers_frame) {}
+ ~TagMismatchReport();
+ private:
+ const bool is_store;
+ const uptr *registers_frame;
+};
+
+TagMismatchReport::~TagMismatchReport() {
Decorator d;
- Printf("%s", d.Error());
- uptr untagged_addr = UntagAddr(tagged_addr);
// TODO: when possible, try to print heap-use-after-free, etc.
const char *bug_type = "tag-mismatch";
- uptr pc = stack->size ? stack->trace[0] : 0;
+ uptr pc = GetTopPc(stack);
+ Printf("%s", d.Error());
Report("ERROR: %s: %s on address %p at pc %p\n", SanitizerToolName, bug_type,
untagged_addr, pc);
Thread *t = GetCurrentThread();
- sptr offset =
- __hwasan_test_shadow(reinterpret_cast<void *>(tagged_addr), access_size);
- CHECK(offset >= 0 && offset < static_cast<sptr>(access_size));
- tag_t ptr_tag = GetTagFromPointer(tagged_addr);
- tag_t *tag_ptr =
- reinterpret_cast<tag_t *>(MemToShadow(untagged_addr + offset));
- tag_t mem_tag = *tag_ptr;
+ tag_t mem_tag = GetTagCopy(MemToShadow(untagged_addr + mismatch_offset));
Printf("%s", d.Access());
- Printf("%s of size %zu at %p tags: %02x/%02x (ptr/mem) in thread T%zd\n",
- is_store ? "WRITE" : "READ", access_size, untagged_addr, ptr_tag,
- mem_tag, t->unique_id());
- if (offset != 0)
- Printf("Invalid access starting at offset [%zu, %zu)\n", offset,
- Min(access_size, static_cast<uptr>(offset) + (1 << kShadowScale)));
+ if (mem_tag && mem_tag < kShadowAlignment) {
+ tag_t short_tag =
+ GetShortTagCopy(MemToShadow(untagged_addr + mismatch_offset));
+ Printf(
+ "%s of size %zu at %p tags: %02x/%02x(%02x) (ptr/mem) in thread T%zd\n",
+ is_store ? "WRITE" : "READ", access_size, untagged_addr, ptr_tag,
+ mem_tag, short_tag, t->unique_id());
+ } else {
+ Printf("%s of size %zu at %p tags: %02x/%02x (ptr/mem) in thread T%zd\n",
+ is_store ? "WRITE" : "READ", access_size, untagged_addr, ptr_tag,
+ mem_tag, t->unique_id());
+ }
+ if (mismatch_offset)
+ Printf("Invalid access starting at offset %zu\n", mismatch_offset);
Printf("%s", d.Default());
stack->Print();
- PrintAddressDescription(tagged_addr, access_size,
- current_stack_allocations.get());
+ PrintAddressDescription();
t->Announce();
- PrintTagsAroundAddr(tag_ptr);
+ PrintTags(untagged_addr + mismatch_offset);
if (registers_frame)
ReportRegisters(registers_frame, pc);
+ MaybePrintAndroidHelpUrl();
ReportErrorSummary(bug_type, stack);
}
+} // namespace
+
+void ReportInvalidFree(StackTrace *stack, uptr tagged_addr) {
+ InvalidFreeReport R(stack, tagged_addr);
+}
+
+void ReportTailOverwritten(StackTrace *stack, uptr tagged_addr, uptr orig_size,
+ const u8 *expected) {
+ TailOverwrittenReport R(stack, tagged_addr, orig_size, expected);
+}
+
+void ReportTagMismatch(StackTrace *stack, uptr tagged_addr, uptr access_size,
+ bool is_store, bool fatal, uptr *registers_frame) {
+ TagMismatchReport R(stack, tagged_addr, access_size, is_store, fatal,
+ registers_frame);
+}
// See the frame breakdown defined in __hwasan_tag_mismatch (from
-// hwasan_tag_mismatch_aarch64.S).
-void ReportRegisters(uptr *frame, uptr pc) {
- Printf("Registers where the failure occurred (pc %p):\n", pc);
+// hwasan_tag_mismatch_{aarch64,riscv64}.S).
+void ReportRegisters(const uptr *frame, uptr pc) {
+ Printf("\nRegisters where the failure occurred (pc %p):\n", pc);
// We explicitly print a single line (4 registers/line) each iteration to
// reduce the amount of logcat error messages printed. Each Printf() will
// result in a new logcat line, irrespective of whether a newline is present,
// and so we wish to reduce the number of Printf() calls we have to make.
+#if defined(__aarch64__)
Printf(" x0 %016llx x1 %016llx x2 %016llx x3 %016llx\n",
frame[0], frame[1], frame[2], frame[3]);
+#elif SANITIZER_RISCV64
+ Printf(" sp %016llx x1 %016llx x2 %016llx x3 %016llx\n",
+ reinterpret_cast<const u8 *>(frame) + 256, frame[1], frame[2],
+ frame[3]);
+#endif
Printf(" x4 %016llx x5 %016llx x6 %016llx x7 %016llx\n",
frame[4], frame[5], frame[6], frame[7]);
Printf(" x8 %016llx x9 %016llx x10 %016llx x11 %016llx\n",
@@ -713,8 +1083,14 @@ void ReportRegisters(uptr *frame, uptr pc) {
frame[24], frame[25], frame[26], frame[27]);
// hwasan_check* reduces the stack pointer by 256, then __hwasan_tag_mismatch
// passes it to this function.
+#if defined(__aarch64__)
Printf(" x28 %016llx x29 %016llx x30 %016llx sp %016llx\n", frame[28],
- frame[29], frame[30], reinterpret_cast<u8 *>(frame) + 256);
+ frame[29], frame[30], reinterpret_cast<const u8 *>(frame) + 256);
+#elif SANITIZER_RISCV64
+ Printf(" x28 %016llx x29 %016llx x30 %016llx x31 %016llx\n", frame[28],
+ frame[29], frame[30], frame[31]);
+#else
+#endif
}
} // namespace __hwasan
diff --git a/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_report.h b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_report.h
index de86c38fc01f..bb9492a18cf9 100644
--- a/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_report.h
+++ b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_report.h
@@ -26,7 +26,7 @@ void ReportTagMismatch(StackTrace *stack, uptr addr, uptr access_size,
void ReportInvalidFree(StackTrace *stack, uptr addr);
void ReportTailOverwritten(StackTrace *stack, uptr addr, uptr orig_size,
const u8 *expected);
-void ReportRegisters(uptr *registers_frame, uptr pc);
+void ReportRegisters(const uptr *registers_frame, uptr pc);
void ReportAtExitStatistics();
diff --git a/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_setjmp.S b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_setjmp_aarch64.S
index 381af63363cc..0c0abb6de861 100644
--- a/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_setjmp.S
+++ b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_setjmp_aarch64.S
@@ -1,4 +1,4 @@
-//===-- hwasan_setjmp.S --------------------------------------------------------===//
+//===-- hwasan_setjmp_aarch64.S -------------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
@@ -29,35 +29,39 @@
// Hence we have to write this function in assembly.
.section .text
-.file "hwasan_setjmp.S"
+.file "hwasan_setjmp_aarch64.S"
-.global __interceptor_setjmp
-ASM_TYPE_FUNCTION(__interceptor_setjmp)
-__interceptor_setjmp:
+.global ASM_WRAPPER_NAME(setjmp)
+ASM_TYPE_FUNCTION(ASM_WRAPPER_NAME(setjmp))
+ASM_WRAPPER_NAME(setjmp):
CFI_STARTPROC
BTI_C
mov x1, #0
- b __interceptor_sigsetjmp
+ b ASM_WRAPPER_NAME(sigsetjmp)
CFI_ENDPROC
-ASM_SIZE(__interceptor_setjmp)
+ASM_SIZE(ASM_WRAPPER_NAME(setjmp))
+
+ASM_INTERCEPTOR_TRAMPOLINE(setjmp)
#if SANITIZER_ANDROID
// Bionic also defines a function `setjmp` that calls `sigsetjmp` saving the
// current signal.
-.global __interceptor_setjmp_bionic
-ASM_TYPE_FUNCTION(__interceptor_setjmp_bionic)
-__interceptor_setjmp_bionic:
+.global ASM_WRAPPER_NAME(setjmp_bionic)
+ASM_TYPE_FUNCTION(ASM_WRAPPER_NAME(setjmp_bionic))
+ASM_WRAPPER_NAME(setjmp_bionic):
CFI_STARTPROC
BTI_C
mov x1, #1
- b __interceptor_sigsetjmp
+ b ASM_WRAPPER_NAME(sigsetjmp)
CFI_ENDPROC
-ASM_SIZE(__interceptor_setjmp_bionic)
+ASM_SIZE(ASM_WRAPPER_NAME(setjmp_bionic))
+
+ASM_INTERCEPTOR_TRAMPOLINE(setjmp_bionic)
#endif
-.global __interceptor_sigsetjmp
-ASM_TYPE_FUNCTION(__interceptor_sigsetjmp)
-__interceptor_sigsetjmp:
+.global ASM_WRAPPER_NAME(sigsetjmp)
+ASM_TYPE_FUNCTION(ASM_WRAPPER_NAME(sigsetjmp))
+ASM_WRAPPER_NAME(sigsetjmp):
CFI_STARTPROC
BTI_C
stp x19, x20, [x0, #0<<3]
@@ -77,27 +81,19 @@ __interceptor_sigsetjmp:
// This function is defined in hwasan_interceptors.cc
b __sigjmp_save
CFI_ENDPROC
-ASM_SIZE(__interceptor_sigsetjmp)
+ASM_SIZE(ASM_WRAPPER_NAME(sigsetjmp))
+ASM_INTERCEPTOR_TRAMPOLINE(sigsetjmp)
-.macro ALIAS first second
- .globl \second
- .equ \second\(), \first
-.endm
#if SANITIZER_ANDROID
-ALIAS __interceptor_sigsetjmp, sigsetjmp
-.weak sigsetjmp
-
-ALIAS __interceptor_setjmp_bionic, setjmp
-.weak setjmp
+ASM_TRAMPOLINE_ALIAS(sigsetjmp, sigsetjmp)
+ASM_TRAMPOLINE_ALIAS(setjmp, setjmp_bionic)
#else
-ALIAS __interceptor_sigsetjmp, __sigsetjmp
-.weak __sigsetjmp
+ASM_TRAMPOLINE_ALIAS(__sigsetjmp, sigsetjmp)
#endif
-ALIAS __interceptor_setjmp, _setjmp
-.weak _setjmp
+ASM_TRAMPOLINE_ALIAS(_setjmp, setjmp)
#endif
// We do not need executable stack.
diff --git a/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_setjmp_riscv64.S b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_setjmp_riscv64.S
new file mode 100644
index 000000000000..c01f4e25e8a4
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_setjmp_riscv64.S
@@ -0,0 +1,92 @@
+//===-- hwasan_setjmp_riscv64.S -------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of HWAddressSanitizer.
+// setjmp interceptor for risc-v.
+// HWAddressSanitizer runtime.
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_common/sanitizer_asm.h"
+#include "builtins/assembly.h"
+
+#if HWASAN_WITH_INTERCEPTORS && defined(__riscv) && (__riscv_xlen == 64)
+#include "sanitizer_common/sanitizer_platform.h"
+
+// We want to save the context of the calling function.
+// That requires
+// 1) No modification of the link register by this function.
+// 2) No modification of the stack pointer by this function.
+// 3) (no modification of any other saved register, but that's not really going
+// to occur, and hence isn't as much of a worry).
+//
+// There's essentially no way to ensure that the compiler will not modify the
+// stack pointer when compiling a C function.
+// Hence we have to write this function in assembly.
+
+.section .text
+.file "hwasan_setjmp_riscv64.S"
+
+.global ASM_WRAPPER_NAME(setjmp)
+ASM_TYPE_FUNCTION(ASM_WRAPPER_NAME(setjmp))
+ASM_WRAPPER_NAME(setjmp):
+ CFI_STARTPROC
+ addi x11, x0, 0
+ tail ASM_WRAPPER_NAME(sigsetjmp)
+ CFI_ENDPROC
+ASM_SIZE(ASM_WRAPPER_NAME(setjmp))
+
+.global ASM_WRAPPER_NAME(sigsetjmp)
+ASM_TYPE_FUNCTION(ASM_WRAPPER_NAME(sigsetjmp))
+ASM_WRAPPER_NAME(sigsetjmp):
+ CFI_STARTPROC
+ sd ra, 0<<3(x10)
+ sd s0, 1<<3(x10)
+ sd s1, 2<<3(x10)
+ sd s2, 3<<3(x10)
+ sd s3, 4<<3(x10)
+ sd s4, 5<<3(x10)
+ sd s5, 6<<3(x10)
+ sd s6, 7<<3(x10)
+ sd s7, 8<<3(x10)
+ sd s8, 9<<3(x10)
+ sd s9, 10<<3(x10)
+ sd s10, 11<<3(x10)
+ sd s11, 12<<3(x10)
+ sd sp, 13<<3(x10)
+#if __riscv_float_abi_double
+ fsd fs0, 14<<3(x10)
+ fsd fs1, 15<<3(x10)
+ fsd fs2, 16<<3(x10)
+ fsd fs3, 17<<3(x10)
+ fsd fs4, 18<<3(x10)
+ fsd fs5, 19<<3(x10)
+ fsd fs6, 20<<3(x10)
+ fsd fs7, 21<<3(x10)
+ fsd fs8, 22<<3(x10)
+ fsd fs9, 23<<3(x10)
+ fsd fs10, 24<<3(x10)
+ fsd fs11, 25<<3(x10)
+#elif __riscv_float_abi_soft
+#else
+# error "Unsupported case"
+#endif
+ // We always have the second argument to __sigjmp_save (savemask) set, since
+ // the _setjmp function above has set it for us as `false`.
+ // This function is defined in hwasan_interceptors.cc
+ tail __sigjmp_save
+ CFI_ENDPROC
+ASM_SIZE(ASM_WRAPPER_NAME(sigsetjmp))
+
+ASM_INTERCEPTOR_TRAMPOLINE(sigsetjmp)
+ASM_TRAMPOLINE_ALIAS(__sigsetjmp, sigsetjmp)
+ASM_INTERCEPTOR_TRAMPOLINE(setjmp)
+ASM_TRAMPOLINE_ALIAS(_setjmp, setjmp)
+#endif
+
+// We do not need executable stack.
+NO_EXEC_STACK_DIRECTIVE
diff --git a/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_setjmp_x86_64.S b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_setjmp_x86_64.S
new file mode 100644
index 000000000000..9804e8d7ceca
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_setjmp_x86_64.S
@@ -0,0 +1,79 @@
+//===-- hwasan_setjmp_x86_64.S --------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// setjmp interceptor for x86_64.
+//
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_common/sanitizer_asm.h"
+
+#if HWASAN_WITH_INTERCEPTORS && defined(__x86_64__)
+#include "sanitizer_common/sanitizer_platform.h"
+
+// We want to save the context of the calling function.
+// That requires
+// 1) No modification of the return address by this function.
+// 2) No modification of the stack pointer by this function.
+// 3) (no modification of any other saved register, but that's not really going
+// to occur, and hence isn't as much of a worry).
+//
+// There's essentially no way to ensure that the compiler will not modify the
+// stack pointer when compiling a C function.
+// Hence we have to write this function in assembly.
+//
+// TODO: Handle Intel CET.
+
+.section .text
+.file "hwasan_setjmp_x86_64.S"
+
+.global ASM_WRAPPER_NAME(setjmp)
+ASM_TYPE_FUNCTION(ASM_WRAPPER_NAME(setjmp))
+ASM_WRAPPER_NAME(setjmp):
+ CFI_STARTPROC
+ _CET_ENDBR
+ xorl %esi, %esi
+ jmp .Linterceptor_sigsetjmp
+ CFI_ENDPROC
+ASM_SIZE(ASM_WRAPPER_NAME(setjmp))
+
+.global ASM_WRAPPER_NAME(sigsetjmp)
+ASM_TYPE_FUNCTION(ASM_WRAPPER_NAME(sigsetjmp))
+ASM_WRAPPER_NAME(sigsetjmp):
+.Linterceptor_sigsetjmp:
+ CFI_STARTPROC
+ _CET_ENDBR
+
+ // Save callee save registers.
+ mov %rbx, (0*8)(%rdi)
+ mov %rbp, (1*8)(%rdi)
+ mov %r12, (2*8)(%rdi)
+ mov %r13, (3*8)(%rdi)
+ mov %r14, (4*8)(%rdi)
+ mov %r15, (5*8)(%rdi)
+
+ // Save SP as it was in caller's frame.
+ lea 8(%rsp), %rdx
+ mov %rdx, (6*8)(%rdi)
+
+ // Save return address.
+ mov (%rsp), %rax
+ mov %rax, (7*8)(%rdi)
+
+ jmp __sigjmp_save
+
+ CFI_ENDPROC
+ASM_SIZE(ASM_WRAPPER_NAME(sigsetjmp))
+
+ASM_INTERCEPTOR_TRAMPOLINE(sigsetjmp)
+ASM_TRAMPOLINE_ALIAS(__sigsetjmp, sigsetjmp)
+ASM_INTERCEPTOR_TRAMPOLINE(setjmp)
+ASM_TRAMPOLINE_ALIAS(_setjmp, setjmp)
+#endif
+
+// We do not need executable stack.
+NO_EXEC_STACK_DIRECTIVE
diff --git a/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_tag_mismatch_aarch64.S b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_tag_mismatch_aarch64.S
index bcb0df420190..fd060c51cd8e 100644
--- a/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_tag_mismatch_aarch64.S
+++ b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_tag_mismatch_aarch64.S
@@ -89,16 +89,16 @@ __hwasan_tag_mismatch:
ubfx x16, x0, #4, #52
ldrb w16, [x9, x16]
cmp w16, #0xf
- b.hi __hwasan_tag_mismatch_v2
+ b.hi mismatch
cmp w16, w17
- b.lo __hwasan_tag_mismatch_v2
+ b.lo mismatch
// Load the real tag from the last byte of the granule and compare against
// the pointer tag.
orr x16, x0, #0xf
ldrb w16, [x16]
cmp x16, x0, lsr #56
- b.ne __hwasan_tag_mismatch_v2
+ b.ne mismatch
// Restore x0, x1 and sp to their values from before the __hwasan_tag_mismatch
// call and resume execution.
@@ -108,6 +108,8 @@ __hwasan_tag_mismatch:
.global __hwasan_tag_mismatch_v2
.type __hwasan_tag_mismatch_v2, %function
__hwasan_tag_mismatch_v2:
+// Avoid using global label, to prevent "relocation out of range".
+mismatch:
CFI_STARTPROC
BTI_J
diff --git a/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_tag_mismatch_riscv64.S b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_tag_mismatch_riscv64.S
new file mode 100644
index 000000000000..487a042405b6
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_tag_mismatch_riscv64.S
@@ -0,0 +1,132 @@
+#include "sanitizer_common/sanitizer_asm.h"
+
+// The content of this file is RISCV64-only:
+#if defined(__riscv) && (__riscv_xlen == 64)
+
+// The responsibility of the HWASan entry point in compiler-rt is to primarily
+// readjust the stack from the callee and save the current register values to
+// the stack.
+// This entry point function should be called from a __hwasan_check_* symbol.
+// These are generated during a lowering pass in the backend, and are found in
+// RISCVAsmPrinter::EmitHwasanMemaccessSymbols(). Please look there for
+// further information.
+// The __hwasan_check_* caller of this function should have expanded the stack
+// and saved the previous values of x10(arg0), x11(arg1), x1(ra), and x8(fp).
+// This function will "consume" these saved values and treats it as part of its
+// own stack frame. In this sense, the __hwasan_check_* callee and this function
+// "share" a stack frame. This allows us to omit having unwinding information
+// (.cfi_*) present in every __hwasan_check_* function, therefore reducing binary size.
+// This is particularly important as hwasan_check_* instances are duplicated in every
+// translation unit where HWASan is enabled.
+// This function calls HwasanTagMismatch to step back into the C++ code that
+// completes the stack unwinding and error printing. This function is is not
+// permitted to return.
+
+
+// | ... |
+// | ... |
+// | Previous stack frames... |
+// +=================================+
+// | ... |
+// | |
+// | Stack frame space for x12 - x31.|
+// | |
+// | ... |
+// +---------------------------------+ <-- [SP + 96]
+// | Saved x11(arg1), as |
+// | __hwasan_check_* clobbers it. |
+// +---------------------------------+ <-- [SP + 88]
+// | Saved x10(arg0), as |
+// | __hwasan_check_* clobbers it. |
+// +---------------------------------+ <-- [SP + 80]
+// | |
+// | Stack frame space for x9. |
+// +---------------------------------+ <-- [SP + 72]
+// | |
+// | Saved x8(fp), as |
+// | __hwasan_check_* clobbers it. |
+// +---------------------------------+ <-- [SP + 64]
+// | ... |
+// | |
+// | Stack frame space for x2 - x7. |
+// | |
+// | ... |
+// +---------------------------------+ <-- [SP + 16]
+// | Return address (x1) for caller |
+// | of __hwasan_check_*. |
+// +---------------------------------+ <-- [SP + 8]
+// | Reserved place for x0, possibly |
+// | junk, since we don't save it. |
+// +---------------------------------+ <-- [x2 / SP]
+
+// This function takes two arguments:
+// * x10/a0: The data address.
+// * x11/a1: The encoded access info for the failing access.
+
+.section .text
+.file "hwasan_tag_mismatch_riscv64.S"
+
+.global __hwasan_tag_mismatch_v2
+ASM_TYPE_FUNCTION(__hwasan_tag_mismatch_v2)
+__hwasan_tag_mismatch_v2:
+ CFI_STARTPROC
+
+ // Set the CFA to be the return address for caller of __hwasan_check_*. Note
+ // that we do not emit CFI predicates to describe the contents of this stack
+ // frame, as this proxy entry point should never be debugged. The contents
+ // are static and are handled by the unwinder after calling
+ // __hwasan_tag_mismatch. The frame pointer is already correctly setup
+ // by __hwasan_check_*.
+ addi fp, sp, 256
+ CFI_DEF_CFA(fp, 0)
+ CFI_OFFSET(ra, -248)
+ CFI_OFFSET(fp, -192)
+
+ // Save the rest of the registers into the preallocated space left by
+ // __hwasan_check.
+ sd x31, 248(sp)
+ sd x30, 240(sp)
+ sd x29, 232(sp)
+ sd x28, 224(sp)
+ sd x27, 216(sp)
+ sd x26, 208(sp)
+ sd x25, 200(sp)
+ sd x24, 192(sp)
+ sd x23, 184(sp)
+ sd x22, 176(sp)
+ sd x21, 168(sp)
+ sd x20, 160(sp)
+ sd x19, 152(sp)
+ sd x18, 144(sp)
+ sd x17, 136(sp)
+ sd x16, 128(sp)
+ sd x15, 120(sp)
+ sd x14, 112(sp)
+ sd x13, 104(sp)
+ sd x12, 96(sp)
+ // sd x11, 88(sp) ; already saved
+ // sd x10, 80(sp) ; already saved
+ sd x9, 72(sp)
+ // sd x8, 64(sp) ; already saved
+ sd x7, 56(sp)
+ sd x6, 48(sp)
+ sd x5, 40(sp)
+ sd x4, 32(sp)
+ sd x3, 24(sp)
+ sd x2, 16(sp)
+ // sd x1, 8(sp) ; already saved
+ // sd x0, 0(sp) ; don't store zero register
+
+ // Pass the address of the frame to __hwasan_tag_mismatch4, so that it can
+ // extract the saved registers from this frame without having to worry about
+ // finding this frame.
+ mv x12, sp
+
+ call __hwasan_tag_mismatch4
+ CFI_ENDPROC
+ASM_SIZE(__hwasan_tag_mismatch_v2)
+
+#endif // defined(__riscv) && (__riscv_xlen == 64)
+
+// We do not need executable stack.
+NO_EXEC_STACK_DIRECTIVE
diff --git a/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_thread.cpp b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_thread.cpp
index ee747a3beea5..3e14a718513d 100644
--- a/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_thread.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_thread.cpp
@@ -1,15 +1,16 @@
+#include "hwasan_thread.h"
+
#include "hwasan.h"
+#include "hwasan_interface_internal.h"
#include "hwasan_mapping.h"
-#include "hwasan_thread.h"
#include "hwasan_poisoning.h"
-#include "hwasan_interface_internal.h"
-
+#include "hwasan_thread_list.h"
+#include "sanitizer_common/sanitizer_atomic.h"
#include "sanitizer_common/sanitizer_file.h"
#include "sanitizer_common/sanitizer_placement_new.h"
#include "sanitizer_common/sanitizer_tls_get_addr.h"
-
namespace __hwasan {
static u32 RandomSeed() {
@@ -27,6 +28,7 @@ static u32 RandomSeed() {
void Thread::InitRandomState() {
random_state_ = flags()->random_tags ? RandomSeed() : unique_id_;
+ random_state_inited_ = true;
// Push a random number of zeros onto the ring buffer so that the first stack
// tag base will be random.
@@ -40,18 +42,33 @@ void Thread::Init(uptr stack_buffer_start, uptr stack_buffer_size,
CHECK_EQ(0, stack_top_);
CHECK_EQ(0, stack_bottom_);
- static u64 unique_id;
- unique_id_ = unique_id++;
+ static atomic_uint64_t unique_id;
+ unique_id_ = atomic_fetch_add(&unique_id, 1, memory_order_relaxed);
+ if (!IsMainThread())
+ os_id_ = GetTid();
+
if (auto sz = flags()->heap_history_size)
heap_allocations_ = HeapAllocationsRingBuffer::New(sz);
- InitStackAndTls(state);
#if !SANITIZER_FUCHSIA
// Do not initialize the stack ring buffer just yet on Fuchsia. Threads will
// be initialized before we enter the thread itself, so we will instead call
// this later.
InitStackRingBuffer(stack_buffer_start, stack_buffer_size);
#endif
+ InitStackAndTls(state);
+ dtls_ = DTLS_Get();
+ AllocatorThreadStart(allocator_cache());
+
+ if (flags()->verbose_threads) {
+ if (IsMainThread()) {
+ Printf("sizeof(Thread): %zd sizeof(HeapRB): %zd sizeof(StackRB): %zd\n",
+ sizeof(Thread), heap_allocations_->SizeInBytes(),
+ stack_allocations_->size() * sizeof(uptr));
+ }
+ Print("Creating : ");
+ }
+ ClearShadowForThreadStackAndTLS();
}
void Thread::InitStackRingBuffer(uptr stack_buffer_start,
@@ -73,28 +90,23 @@ void Thread::InitStackRingBuffer(uptr stack_buffer_start,
CHECK(MemIsApp(stack_bottom_));
CHECK(MemIsApp(stack_top_ - 1));
}
-
- if (flags()->verbose_threads) {
- if (IsMainThread()) {
- Printf("sizeof(Thread): %zd sizeof(HeapRB): %zd sizeof(StackRB): %zd\n",
- sizeof(Thread), heap_allocations_->SizeInBytes(),
- stack_allocations_->size() * sizeof(uptr));
- }
- Print("Creating : ");
- }
}
void Thread::ClearShadowForThreadStackAndTLS() {
if (stack_top_ != stack_bottom_)
- TagMemory(stack_bottom_, stack_top_ - stack_bottom_, 0);
+ TagMemory(UntagAddr(stack_bottom_),
+ UntagAddr(stack_top_) - UntagAddr(stack_bottom_),
+ GetTagFromPointer(stack_top_));
if (tls_begin_ != tls_end_)
- TagMemory(tls_begin_, tls_end_ - tls_begin_, 0);
+ TagMemory(UntagAddr(tls_begin_),
+ UntagAddr(tls_end_) - UntagAddr(tls_begin_),
+ GetTagFromPointer(tls_begin_));
}
void Thread::Destroy() {
if (flags()->verbose_threads)
Print("Destroying: ");
- AllocatorSwallowThreadLocalCache(allocator_cache());
+ AllocatorThreadFinish(allocator_cache());
ClearShadowForThreadStackAndTLS();
if (heap_allocations_)
heap_allocations_->Delete();
@@ -108,10 +120,9 @@ void Thread::Destroy() {
}
void Thread::Print(const char *Prefix) {
- Printf("%sT%zd %p stack: [%p,%p) sz: %zd tls: [%p,%p)\n", Prefix,
- unique_id_, this, stack_bottom(), stack_top(),
- stack_top() - stack_bottom(),
- tls_begin(), tls_end());
+ Printf("%sT%zd %p stack: [%p,%p) sz: %zd tls: [%p,%p)\n", Prefix, unique_id_,
+ (void *)this, stack_bottom(), stack_top(),
+ stack_top() - stack_bottom(), tls_begin(), tls_end());
}
static u32 xorshift(u32 state) {
@@ -124,17 +135,21 @@ static u32 xorshift(u32 state) {
// Generate a (pseudo-)random non-zero tag.
tag_t Thread::GenerateRandomTag(uptr num_bits) {
DCHECK_GT(num_bits, 0);
- if (tagging_disabled_) return 0;
+ if (tagging_disabled_)
+ return 0;
tag_t tag;
const uptr tag_mask = (1ULL << num_bits) - 1;
do {
if (flags()->random_tags) {
- if (!random_buffer_)
+ if (!random_buffer_) {
+ EnsureRandomStateInited();
random_buffer_ = random_state_ = xorshift(random_state_);
+ }
CHECK(random_buffer_);
tag = random_buffer_ & tag_mask;
random_buffer_ >>= num_bits;
} else {
+ EnsureRandomStateInited();
random_state_ += 1;
tag = random_state_ & tag_mask;
}
@@ -142,4 +157,67 @@ tag_t Thread::GenerateRandomTag(uptr num_bits) {
return tag;
}
+void EnsureMainThreadIDIsCorrect() {
+ auto *t = __hwasan::GetCurrentThread();
+ if (t && (t->IsMainThread()))
+ t->set_os_id(GetTid());
+}
+
} // namespace __hwasan
+
+// --- Implementation of LSan-specific functions --- {{{1
+namespace __lsan {
+
+static __hwasan::HwasanThreadList *GetHwasanThreadListLocked() {
+ auto &tl = __hwasan::hwasanThreadList();
+ tl.CheckLocked();
+ return &tl;
+}
+
+static __hwasan::Thread *GetThreadByOsIDLocked(tid_t os_id) {
+ return GetHwasanThreadListLocked()->FindThreadLocked(
+ [os_id](__hwasan::Thread *t) { return t->os_id() == os_id; });
+}
+
+void LockThreads() {
+ __hwasan::hwasanThreadList().Lock();
+ __hwasan::hwasanThreadArgRetval().Lock();
+}
+
+void UnlockThreads() {
+ __hwasan::hwasanThreadArgRetval().Unlock();
+ __hwasan::hwasanThreadList().Unlock();
+}
+
+void EnsureMainThreadIDIsCorrect() { __hwasan::EnsureMainThreadIDIsCorrect(); }
+
+bool GetThreadRangesLocked(tid_t os_id, uptr *stack_begin, uptr *stack_end,
+ uptr *tls_begin, uptr *tls_end, uptr *cache_begin,
+ uptr *cache_end, DTLS **dtls) {
+ auto *t = GetThreadByOsIDLocked(os_id);
+ if (!t)
+ return false;
+ *stack_begin = t->stack_bottom();
+ *stack_end = t->stack_top();
+ *tls_begin = t->tls_begin();
+ *tls_end = t->tls_end();
+ // Fixme: is this correct for HWASan.
+ *cache_begin = 0;
+ *cache_end = 0;
+ *dtls = t->dtls();
+ return true;
+}
+
+void GetAllThreadAllocatorCachesLocked(InternalMmapVector<uptr> *caches) {}
+
+void GetThreadExtraStackRangesLocked(tid_t os_id,
+ InternalMmapVector<Range> *ranges) {}
+void GetThreadExtraStackRangesLocked(InternalMmapVector<Range> *ranges) {}
+
+void GetAdditionalThreadContextPtrsLocked(InternalMmapVector<uptr> *ptrs) {
+ __hwasan::hwasanThreadArgRetval().GetAllPtrsLocked(ptrs);
+}
+
+void GetRunningThreadsLocked(InternalMmapVector<tid_t> *threads) {}
+
+} // namespace __lsan
diff --git a/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_thread.h b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_thread.h
index 9f20afe1dc76..9e1b438e48f7 100644
--- a/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_thread.h
+++ b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_thread.h
@@ -28,12 +28,17 @@ class Thread {
void Init(uptr stack_buffer_start, uptr stack_buffer_size,
const InitState *state = nullptr);
- void InitRandomState();
+
void InitStackAndTls(const InitState *state = nullptr);
// Must be called from the thread itself.
void InitStackRingBuffer(uptr stack_buffer_start, uptr stack_buffer_size);
+ inline void EnsureRandomStateInited() {
+ if (UNLIKELY(!random_state_inited_))
+ InitRandomState();
+ }
+
void Destroy();
uptr stack_top() { return stack_top_; }
@@ -41,6 +46,7 @@ class Thread {
uptr stack_size() { return stack_top() - stack_bottom(); }
uptr tls_begin() { return tls_begin_; }
uptr tls_end() { return tls_end_; }
+ DTLS *dtls() { return dtls_; }
bool IsMainThread() { return unique_id_ == 0; }
bool AddrIsInStack(uptr addr) {
@@ -56,13 +62,16 @@ class Thread {
void DisableTagging() { tagging_disabled_++; }
void EnableTagging() { tagging_disabled_--; }
- u64 unique_id() const { return unique_id_; }
+ u32 unique_id() const { return unique_id_; }
void Announce() {
if (announced_) return;
announced_ = true;
Print("Thread: ");
}
+ tid_t os_id() const { return os_id_; }
+ void set_os_id(tid_t os_id) { os_id_ = os_id; }
+
uptr &vfork_spill() { return vfork_spill_; }
private:
@@ -70,11 +79,13 @@ class Thread {
// via mmap() and *must* be valid in zero-initialized state.
void ClearShadowForThreadStackAndTLS();
void Print(const char *prefix);
+ void InitRandomState();
uptr vfork_spill_;
uptr stack_top_;
uptr stack_bottom_;
uptr tls_begin_;
uptr tls_end_;
+ DTLS *dtls_;
u32 random_state_;
u32 random_buffer_;
@@ -83,18 +94,25 @@ class Thread {
HeapAllocationsRingBuffer *heap_allocations_;
StackAllocationsRingBuffer *stack_allocations_;
- u64 unique_id_; // counting from zero.
+ u32 unique_id_; // counting from zero.
+
+ tid_t os_id_;
u32 tagging_disabled_; // if non-zero, malloc uses zero tag in this thread.
bool announced_;
+ bool random_state_inited_; // Whether InitRandomState() has been called.
+
friend struct ThreadListHead;
};
Thread *GetCurrentThread();
uptr *GetCurrentThreadLongPtr();
+// Used to handle fork().
+void EnsureMainThreadIDIsCorrect();
+
struct ScopedTaggingDisabler {
ScopedTaggingDisabler() { GetCurrentThread()->DisableTagging(); }
~ScopedTaggingDisabler() { GetCurrentThread()->EnableTagging(); }
diff --git a/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_thread_list.cpp b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_thread_list.cpp
index fa46e658b69d..7df4dd3d7851 100644
--- a/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_thread_list.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_thread_list.cpp
@@ -1,15 +1,28 @@
#include "hwasan_thread_list.h"
+#include "sanitizer_common/sanitizer_thread_arg_retval.h"
+
namespace __hwasan {
-static ALIGNED(16) char thread_list_placeholder[sizeof(HwasanThreadList)];
+
static HwasanThreadList *hwasan_thread_list;
+static ThreadArgRetval *thread_data;
HwasanThreadList &hwasanThreadList() { return *hwasan_thread_list; }
+ThreadArgRetval &hwasanThreadArgRetval() { return *thread_data; }
void InitThreadList(uptr storage, uptr size) {
- CHECK(hwasan_thread_list == nullptr);
+ CHECK_EQ(hwasan_thread_list, nullptr);
+
+ static ALIGNED(alignof(
+ HwasanThreadList)) char thread_list_placeholder[sizeof(HwasanThreadList)];
hwasan_thread_list =
new (thread_list_placeholder) HwasanThreadList(storage, size);
+
+ CHECK_EQ(thread_data, nullptr);
+
+ static ALIGNED(alignof(
+ ThreadArgRetval)) char thread_data_placeholder[sizeof(ThreadArgRetval)];
+ thread_data = new (thread_data_placeholder) ThreadArgRetval();
}
-} // namespace __hwasan
+} // namespace __hwasan
diff --git a/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_thread_list.h b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_thread_list.h
index 15916a802d6e..82f6c70a03f8 100644
--- a/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_thread_list.h
+++ b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_thread_list.h
@@ -47,8 +47,8 @@
#include "hwasan_allocator.h"
#include "hwasan_flags.h"
#include "hwasan_thread.h"
-
#include "sanitizer_common/sanitizer_placement_new.h"
+#include "sanitizer_common/sanitizer_thread_arg_retval.h"
namespace __hwasan {
@@ -71,7 +71,7 @@ struct ThreadStats {
uptr total_stack_size;
};
-class HwasanThreadList {
+class SANITIZER_MUTEX HwasanThreadList {
public:
HwasanThreadList(uptr storage, uptr size)
: free_space_(storage), free_space_end_(storage + size) {
@@ -85,7 +85,8 @@ class HwasanThreadList {
RoundUpTo(ring_buffer_size_ + sizeof(Thread), ring_buffer_size_ * 2);
}
- Thread *CreateCurrentThread(const Thread::InitState *state = nullptr) {
+ Thread *CreateCurrentThread(const Thread::InitState *state = nullptr)
+ SANITIZER_EXCLUDES(free_list_mutex_, live_list_mutex_) {
Thread *t = nullptr;
{
SpinMutexLock l(&free_list_mutex_);
@@ -114,7 +115,8 @@ class HwasanThreadList {
ReleaseMemoryPagesToOS(start, start + thread_alloc_size_);
}
- void RemoveThreadFromLiveList(Thread *t) {
+ void RemoveThreadFromLiveList(Thread *t)
+ SANITIZER_EXCLUDES(live_list_mutex_) {
SpinMutexLock l(&live_list_mutex_);
for (Thread *&t2 : live_list_)
if (t2 == t) {
@@ -127,11 +129,11 @@ class HwasanThreadList {
CHECK(0 && "thread not found in live list");
}
- void ReleaseThread(Thread *t) {
+ void ReleaseThread(Thread *t) SANITIZER_EXCLUDES(free_list_mutex_) {
RemoveThreadStats(t);
+ RemoveThreadFromLiveList(t);
t->Destroy();
DontNeedThread(t);
- RemoveThreadFromLiveList(t);
SpinMutexLock l(&free_list_mutex_);
free_list_.push_back(t);
}
@@ -149,30 +151,47 @@ class HwasanThreadList {
}
template <class CB>
- void VisitAllLiveThreads(CB cb) {
+ void VisitAllLiveThreads(CB cb) SANITIZER_EXCLUDES(live_list_mutex_) {
SpinMutexLock l(&live_list_mutex_);
for (Thread *t : live_list_) cb(t);
}
- void AddThreadStats(Thread *t) {
+ template <class CB>
+ Thread *FindThreadLocked(CB cb) SANITIZER_CHECK_LOCKED(live_list_mutex_) {
+ CheckLocked();
+ for (Thread *t : live_list_)
+ if (cb(t))
+ return t;
+ return nullptr;
+ }
+
+ void AddThreadStats(Thread *t) SANITIZER_EXCLUDES(stats_mutex_) {
SpinMutexLock l(&stats_mutex_);
stats_.n_live_threads++;
stats_.total_stack_size += t->stack_size();
}
- void RemoveThreadStats(Thread *t) {
+ void RemoveThreadStats(Thread *t) SANITIZER_EXCLUDES(stats_mutex_) {
SpinMutexLock l(&stats_mutex_);
stats_.n_live_threads--;
stats_.total_stack_size -= t->stack_size();
}
- ThreadStats GetThreadStats() {
+ ThreadStats GetThreadStats() SANITIZER_EXCLUDES(stats_mutex_) {
SpinMutexLock l(&stats_mutex_);
return stats_;
}
uptr GetRingBufferSize() const { return ring_buffer_size_; }
+ void Lock() SANITIZER_ACQUIRE(live_list_mutex_) { live_list_mutex_.Lock(); }
+ void CheckLocked() const SANITIZER_CHECK_LOCKED(live_list_mutex_) {
+ live_list_mutex_.CheckLocked();
+ }
+ void Unlock() SANITIZER_RELEASE(live_list_mutex_) {
+ live_list_mutex_.Unlock();
+ }
+
private:
Thread *AllocThread() {
SpinMutexLock l(&free_space_mutex_);
@@ -180,7 +199,7 @@ class HwasanThreadList {
CHECK(IsAligned(free_space_, align));
Thread *t = (Thread *)(free_space_ + ring_buffer_size_);
free_space_ += thread_alloc_size_;
- CHECK(free_space_ <= free_space_end_ && "out of thread memory");
+ CHECK_LE(free_space_, free_space_end_);
return t;
}
@@ -191,15 +210,18 @@ class HwasanThreadList {
uptr thread_alloc_size_;
SpinMutex free_list_mutex_;
- InternalMmapVector<Thread *> free_list_;
+ InternalMmapVector<Thread *> free_list_
+ SANITIZER_GUARDED_BY(free_list_mutex_);
SpinMutex live_list_mutex_;
- InternalMmapVector<Thread *> live_list_;
+ InternalMmapVector<Thread *> live_list_
+ SANITIZER_GUARDED_BY(live_list_mutex_);
- ThreadStats stats_;
SpinMutex stats_mutex_;
+ ThreadStats stats_ SANITIZER_GUARDED_BY(stats_mutex_);
};
void InitThreadList(uptr storage, uptr size);
HwasanThreadList &hwasanThreadList();
+ThreadArgRetval &hwasanThreadArgRetval();
} // namespace __hwasan
diff --git a/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_type_test.cpp b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_type_test.cpp
index 8cff495bae15..5307073fb40b 100644
--- a/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_type_test.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/hwasan/hwasan_type_test.cpp
@@ -19,7 +19,7 @@
#define CHECK_TYPE_SIZE_FITS(TYPE) \
COMPILER_CHECK(sizeof(__hw_##TYPE) <= sizeof(TYPE))
-#if HWASAN_WITH_INTERCEPTORS && defined(__aarch64__)
+#if HWASAN_WITH_INTERCEPTORS
CHECK_TYPE_SIZE_FITS(jmp_buf);
CHECK_TYPE_SIZE_FITS(sigjmp_buf);
#endif
diff --git a/contrib/llvm-project/compiler-rt/lib/interception/interception.h b/contrib/llvm-project/compiler-rt/lib/interception/interception.h
index d8dc092c45f5..58e969378a90 100644
--- a/contrib/llvm-project/compiler-rt/lib/interception/interception.h
+++ b/contrib/llvm-project/compiler-rt/lib/interception/interception.h
@@ -14,9 +14,10 @@
#ifndef INTERCEPTION_H
#define INTERCEPTION_H
+#include "sanitizer_common/sanitizer_asm.h"
#include "sanitizer_common/sanitizer_internal_defs.h"
-#if !SANITIZER_LINUX && !SANITIZER_FREEBSD && !SANITIZER_MAC && \
+#if !SANITIZER_LINUX && !SANITIZER_FREEBSD && !SANITIZER_APPLE && \
!SANITIZER_NETBSD && !SANITIZER_WINDOWS && !SANITIZER_FUCHSIA && \
!SANITIZER_SOLARIS
# error "Interception doesn't work on this operating system."
@@ -67,28 +68,54 @@ typedef __sanitizer::OFF64_T OFF64_T;
// for more details). To intercept such functions you need to use the
// INTERCEPTOR_WITH_SUFFIX(...) macro.
-// How it works:
-// To replace system functions on Linux we just need to declare functions
-// with same names in our library and then obtain the real function pointers
+// How it works on Linux
+// ---------------------
+//
+// To replace system functions on Linux we just need to declare functions with
+// the same names in our library and then obtain the real function pointers
// using dlsym().
-// There is one complication. A user may also intercept some of the functions
-// we intercept. To resolve this we declare our interceptors with __interceptor_
-// prefix, and then make actual interceptors weak aliases to __interceptor_
-// functions.
//
-// This is not so on Mac OS, where the two-level namespace makes
-// our replacement functions invisible to other libraries. This may be overcomed
-// using the DYLD_FORCE_FLAT_NAMESPACE, but some errors loading the shared
-// libraries in Chromium were noticed when doing so.
+// There is one complication: a user may also intercept some of the functions we
+// intercept. To allow for up to 3 interceptors (including ours) of a given
+// function "func", the interceptor implementation is in ___interceptor_func,
+// which is aliased by a weak function __interceptor_func, which in turn is
+// aliased (via a trampoline) by weak wrapper function "func".
+//
+// Most user interceptors should define a foreign interceptor as follows:
+//
+// - provide a non-weak function "func" that performs interception;
+// - if __interceptor_func exists, call it to perform the real functionality;
+// - if it does not exist, figure out the real function and call it instead.
+//
+// In rare cases, a foreign interceptor (of another dynamic analysis runtime)
+// may be defined as follows (on supported architectures):
+//
+// - provide a non-weak function __interceptor_func that performs interception;
+// - if ___interceptor_func exists, call it to perform the real functionality;
+// - if it does not exist, figure out the real function and call it instead;
+// - provide a weak function "func" that is an alias to __interceptor_func.
+//
+// With this protocol, sanitizer interceptors, foreign user interceptors, and
+// foreign interceptors of other dynamic analysis runtimes, or any combination
+// thereof, may co-exist simultaneously.
+//
+// How it works on Mac OS
+// ----------------------
+//
+// This is not so on Mac OS, where the two-level namespace makes our replacement
+// functions invisible to other libraries. This may be overcomed using the
+// DYLD_FORCE_FLAT_NAMESPACE, but some errors loading the shared libraries in
+// Chromium were noticed when doing so.
+//
// Instead we create a dylib containing a __DATA,__interpose section that
// associates library functions with their wrappers. When this dylib is
-// preloaded before an executable using DYLD_INSERT_LIBRARIES, it routes all
-// the calls to interposed functions done through stubs to the wrapper
-// functions.
+// preloaded before an executable using DYLD_INSERT_LIBRARIES, it routes all the
+// calls to interposed functions done through stubs to the wrapper functions.
+//
// As it's decided at compile time which functions are to be intercepted on Mac,
// INTERCEPT_FUNCTION() is effectively a no-op on this system.
-#if SANITIZER_MAC
+#if SANITIZER_APPLE
#include <sys/cdefs.h> // For __DARWIN_ALIAS_C().
// Just a pair of pointers.
@@ -100,53 +127,109 @@ struct interpose_substitution {
// For a function foo() create a global pair of pointers { wrap_foo, foo } in
// the __DATA,__interpose section.
// As a result all the calls to foo() will be routed to wrap_foo() at runtime.
-#define INTERPOSER(func_name) __attribute__((used)) \
+#define INTERPOSER(func_name) __attribute__((used)) \
const interpose_substitution substitution_##func_name[] \
__attribute__((section("__DATA, __interpose"))) = { \
- { reinterpret_cast<const uptr>(WRAP(func_name)), \
- reinterpret_cast<const uptr>(func_name) } \
+ { reinterpret_cast<const uptr>(WRAP(func_name)), \
+ reinterpret_cast<const uptr>(func_name) } \
}
// For a function foo() and a wrapper function bar() create a global pair
// of pointers { bar, foo } in the __DATA,__interpose section.
// As a result all the calls to foo() will be routed to bar() at runtime.
#define INTERPOSER_2(func_name, wrapper_name) __attribute__((used)) \
-const interpose_substitution substitution_##func_name[] \
- __attribute__((section("__DATA, __interpose"))) = { \
- { reinterpret_cast<const uptr>(wrapper_name), \
- reinterpret_cast<const uptr>(func_name) } \
+const interpose_substitution substitution_##func_name[] \
+ __attribute__((section("__DATA, __interpose"))) = { \
+ { reinterpret_cast<const uptr>(wrapper_name), \
+ reinterpret_cast<const uptr>(func_name) } \
}
# define WRAP(x) wrap_##x
-# define WRAPPER_NAME(x) "wrap_"#x
+# define TRAMPOLINE(x) WRAP(x)
# define INTERCEPTOR_ATTRIBUTE
# define DECLARE_WRAPPER(ret_type, func, ...)
#elif SANITIZER_WINDOWS
# define WRAP(x) __asan_wrap_##x
-# define WRAPPER_NAME(x) "__asan_wrap_"#x
+# define TRAMPOLINE(x) WRAP(x)
# define INTERCEPTOR_ATTRIBUTE __declspec(dllexport)
-# define DECLARE_WRAPPER(ret_type, func, ...) \
+# define DECLARE_WRAPPER(ret_type, func, ...) \
extern "C" ret_type func(__VA_ARGS__);
-# define DECLARE_WRAPPER_WINAPI(ret_type, func, ...) \
+# define DECLARE_WRAPPER_WINAPI(ret_type, func, ...) \
extern "C" __declspec(dllimport) ret_type __stdcall func(__VA_ARGS__);
-#elif SANITIZER_FREEBSD || SANITIZER_NETBSD
-# define WRAP(x) __interceptor_ ## x
-# define WRAPPER_NAME(x) "__interceptor_" #x
+#elif !SANITIZER_FUCHSIA // LINUX, FREEBSD, NETBSD, SOLARIS
# define INTERCEPTOR_ATTRIBUTE __attribute__((visibility("default")))
+# if ASM_INTERCEPTOR_TRAMPOLINE_SUPPORT
+// Weak aliases of weak aliases do not work, therefore we need to set up a
+// trampoline function. The function "func" is a weak alias to the trampoline
+// (so that we may check if "func" was overridden), which calls the weak
+// function __interceptor_func, which in turn aliases the actual interceptor
+// implementation ___interceptor_func:
+//
+// [wrapper "func": weak] --(alias)--> [TRAMPOLINE(func)]
+// |
+// +--------(tail call)-------+
+// |
+// v
+// [__interceptor_func: weak] --(alias)--> [WRAP(func)]
+//
+// We use inline assembly to define most of this, because not all compilers
+// support functions with the "naked" attribute with every architecture.
+# define WRAP(x) ___interceptor_ ## x
+# define TRAMPOLINE(x) __interceptor_trampoline_ ## x
+# if SANITIZER_FREEBSD || SANITIZER_NETBSD
// FreeBSD's dynamic linker (incompliantly) gives non-weak symbols higher
// priority than weak ones so weak aliases won't work for indirect calls
// in position-independent (-fPIC / -fPIE) mode.
-# define DECLARE_WRAPPER(ret_type, func, ...) \
- extern "C" ret_type func(__VA_ARGS__) \
- __attribute__((alias("__interceptor_" #func), visibility("default")));
-#elif !SANITIZER_FUCHSIA
-# define WRAP(x) __interceptor_ ## x
-# define WRAPPER_NAME(x) "__interceptor_" #x
-# define INTERCEPTOR_ATTRIBUTE __attribute__((visibility("default")))
-# define DECLARE_WRAPPER(ret_type, func, ...) \
- extern "C" ret_type func(__VA_ARGS__) \
- __attribute__((weak, alias("__interceptor_" #func), visibility("default")));
+# define __ASM_WEAK_WRAPPER(func) ".globl " #func "\n"
+# else
+# define __ASM_WEAK_WRAPPER(func) ".weak " #func "\n"
+# endif // SANITIZER_FREEBSD || SANITIZER_NETBSD
+# if defined(__arm__) || defined(__aarch64__)
+# define ASM_TYPE_FUNCTION_STR "%function"
+# else
+# define ASM_TYPE_FUNCTION_STR "@function"
+# endif
+// Keep trampoline implementation in sync with sanitizer_common/sanitizer_asm.h
+# define DECLARE_WRAPPER(ret_type, func, ...) \
+ extern "C" ret_type func(__VA_ARGS__); \
+ extern "C" ret_type TRAMPOLINE(func)(__VA_ARGS__); \
+ extern "C" ret_type __interceptor_##func(__VA_ARGS__) \
+ INTERCEPTOR_ATTRIBUTE __attribute__((weak)) ALIAS(WRAP(func)); \
+ asm( \
+ ".text\n" \
+ __ASM_WEAK_WRAPPER(func) \
+ ".set " #func ", " SANITIZER_STRINGIFY(TRAMPOLINE(func)) "\n" \
+ ".globl " SANITIZER_STRINGIFY(TRAMPOLINE(func)) "\n" \
+ ".type " SANITIZER_STRINGIFY(TRAMPOLINE(func)) ", " \
+ ASM_TYPE_FUNCTION_STR "\n" \
+ SANITIZER_STRINGIFY(TRAMPOLINE(func)) ":\n" \
+ SANITIZER_STRINGIFY(CFI_STARTPROC) "\n" \
+ C_ASM_TAIL_CALL(SANITIZER_STRINGIFY(TRAMPOLINE(func)), \
+ "__interceptor_" \
+ SANITIZER_STRINGIFY(ASM_PREEMPTIBLE_SYM(func))) "\n" \
+ SANITIZER_STRINGIFY(CFI_ENDPROC) "\n" \
+ ".size " SANITIZER_STRINGIFY(TRAMPOLINE(func)) ", " \
+ ".-" SANITIZER_STRINGIFY(TRAMPOLINE(func)) "\n" \
+ );
+# else // ASM_INTERCEPTOR_TRAMPOLINE_SUPPORT
+// Some architectures cannot implement efficient interceptor trampolines with
+// just a plain jump due to complexities of resolving a preemptible symbol. In
+// those cases, revert to just this scheme:
+//
+// [wrapper "func": weak] --(alias)--> [WRAP(func)]
+//
+# define WRAP(x) __interceptor_ ## x
+# define TRAMPOLINE(x) WRAP(x)
+# if SANITIZER_FREEBSD || SANITIZER_NETBSD
+# define __ATTRIBUTE_WEAK_WRAPPER
+# else
+# define __ATTRIBUTE_WEAK_WRAPPER __attribute__((weak))
+# endif // SANITIZER_FREEBSD || SANITIZER_NETBSD
+# define DECLARE_WRAPPER(ret_type, func, ...) \
+ extern "C" ret_type func(__VA_ARGS__) \
+ INTERCEPTOR_ATTRIBUTE __ATTRIBUTE_WEAK_WRAPPER ALIAS(WRAP(func));
+# endif // ASM_INTERCEPTOR_TRAMPOLINE_SUPPORT
#endif
#if SANITIZER_FUCHSIA
@@ -157,33 +240,35 @@ const interpose_substitution substitution_##func_name[] \
# define INTERCEPTOR_ATTRIBUTE __attribute__((visibility("default")))
# define REAL(x) __unsanitized_##x
# define DECLARE_REAL(ret_type, func, ...)
-#elif !SANITIZER_MAC
+#elif !SANITIZER_APPLE
# define PTR_TO_REAL(x) real_##x
# define REAL(x) __interception::PTR_TO_REAL(x)
# define FUNC_TYPE(x) x##_type
-# define DECLARE_REAL(ret_type, func, ...) \
+# define DECLARE_REAL(ret_type, func, ...) \
typedef ret_type (*FUNC_TYPE(func))(__VA_ARGS__); \
- namespace __interception { \
- extern FUNC_TYPE(func) PTR_TO_REAL(func); \
+ namespace __interception { \
+ extern FUNC_TYPE(func) PTR_TO_REAL(func); \
}
# define ASSIGN_REAL(dst, src) REAL(dst) = REAL(src)
-#else // SANITIZER_MAC
+#else // SANITIZER_APPLE
# define REAL(x) x
# define DECLARE_REAL(ret_type, func, ...) \
extern "C" ret_type func(__VA_ARGS__);
# define ASSIGN_REAL(x, y)
-#endif // SANITIZER_MAC
+#endif // SANITIZER_APPLE
#if !SANITIZER_FUCHSIA
-# define DECLARE_REAL_AND_INTERCEPTOR(ret_type, func, ...) \
+# define DECLARE_REAL_AND_INTERCEPTOR(ret_type, func, ...) \
DECLARE_REAL(ret_type, func, __VA_ARGS__) \
+ extern "C" ret_type TRAMPOLINE(func)(__VA_ARGS__); \
extern "C" ret_type WRAP(func)(__VA_ARGS__);
// Declare an interceptor and its wrapper defined in a different translation
// unit (ex. asm).
-# define DECLARE_EXTERN_INTERCEPTOR_AND_WRAPPER(ret_type, func, ...) \
- extern "C" ret_type WRAP(func)(__VA_ARGS__); \
- extern "C" ret_type func(__VA_ARGS__);
+# define DECLARE_EXTERN_INTERCEPTOR_AND_WRAPPER(ret_type, func, ...) \
+ extern "C" ret_type TRAMPOLINE(func)(__VA_ARGS__); \
+ extern "C" ret_type WRAP(func)(__VA_ARGS__); \
+ extern "C" ret_type func(__VA_ARGS__);
#else
# define DECLARE_REAL_AND_INTERCEPTOR(ret_type, func, ...)
# define DECLARE_EXTERN_INTERCEPTOR_AND_WRAPPER(ret_type, func, ...)
@@ -193,7 +278,7 @@ const interpose_substitution substitution_##func_name[] \
// macros does its job. In exceptional cases you may need to call REAL(foo)
// without defining INTERCEPTOR(..., foo, ...). For example, if you override
// foo with an interceptor for other function.
-#if !SANITIZER_MAC && !SANITIZER_FUCHSIA
+#if !SANITIZER_APPLE && !SANITIZER_FUCHSIA
# define DEFINE_REAL(ret_type, func, ...) \
typedef ret_type (*FUNC_TYPE(func))(__VA_ARGS__); \
namespace __interception { \
@@ -213,25 +298,23 @@ const interpose_substitution substitution_##func_name[] \
__interceptor_##func(__VA_ARGS__); \
extern "C" INTERCEPTOR_ATTRIBUTE ret_type func(__VA_ARGS__)
-#elif !SANITIZER_MAC
+#elif !SANITIZER_APPLE
-#define INTERCEPTOR(ret_type, func, ...) \
- DEFINE_REAL(ret_type, func, __VA_ARGS__) \
- DECLARE_WRAPPER(ret_type, func, __VA_ARGS__) \
- extern "C" \
- INTERCEPTOR_ATTRIBUTE \
- ret_type WRAP(func)(__VA_ARGS__)
+#define INTERCEPTOR(ret_type, func, ...) \
+ DEFINE_REAL(ret_type, func, __VA_ARGS__) \
+ DECLARE_WRAPPER(ret_type, func, __VA_ARGS__) \
+ extern "C" INTERCEPTOR_ATTRIBUTE ret_type WRAP(func)(__VA_ARGS__)
// We don't need INTERCEPTOR_WITH_SUFFIX on non-Darwin for now.
#define INTERCEPTOR_WITH_SUFFIX(ret_type, func, ...) \
INTERCEPTOR(ret_type, func, __VA_ARGS__)
-#else // SANITIZER_MAC
+#else // SANITIZER_APPLE
-#define INTERCEPTOR_ZZZ(suffix, ret_type, func, ...) \
- extern "C" ret_type func(__VA_ARGS__) suffix; \
- extern "C" ret_type WRAP(func)(__VA_ARGS__); \
- INTERPOSER(func); \
+#define INTERCEPTOR_ZZZ(suffix, ret_type, func, ...) \
+ extern "C" ret_type func(__VA_ARGS__) suffix; \
+ extern "C" ret_type WRAP(func)(__VA_ARGS__); \
+ INTERPOSER(func); \
extern "C" INTERCEPTOR_ATTRIBUTE ret_type WRAP(func)(__VA_ARGS__)
#define INTERCEPTOR(ret_type, func, ...) \
@@ -246,14 +329,12 @@ const interpose_substitution substitution_##func_name[] \
#endif
#if SANITIZER_WINDOWS
-# define INTERCEPTOR_WINAPI(ret_type, func, ...) \
+# define INTERCEPTOR_WINAPI(ret_type, func, ...) \
typedef ret_type (__stdcall *FUNC_TYPE(func))(__VA_ARGS__); \
- namespace __interception { \
- FUNC_TYPE(func) PTR_TO_REAL(func); \
- } \
- extern "C" \
- INTERCEPTOR_ATTRIBUTE \
- ret_type __stdcall WRAP(func)(__VA_ARGS__)
+ namespace __interception { \
+ FUNC_TYPE(func) PTR_TO_REAL(func); \
+ } \
+ extern "C" INTERCEPTOR_ATTRIBUTE ret_type __stdcall WRAP(func)(__VA_ARGS__)
#endif
// ISO C++ forbids casting between pointer-to-function and pointer-to-object,
@@ -278,7 +359,7 @@ typedef unsigned long uptr;
# define INTERCEPT_FUNCTION(func) INTERCEPT_FUNCTION_LINUX_OR_FREEBSD(func)
# define INTERCEPT_FUNCTION_VER(func, symver) \
INTERCEPT_FUNCTION_VER_LINUX_OR_FREEBSD(func, symver)
-#elif SANITIZER_MAC
+#elif SANITIZER_APPLE
# include "interception_mac.h"
# define INTERCEPT_FUNCTION(func) INTERCEPT_FUNCTION_MAC(func)
# define INTERCEPT_FUNCTION_VER(func, symver) \
diff --git a/contrib/llvm-project/compiler-rt/lib/interception/interception_linux.cpp b/contrib/llvm-project/compiler-rt/lib/interception/interception_linux.cpp
index 5111a87f0a6c..ef8136eb4fc7 100644
--- a/contrib/llvm-project/compiler-rt/lib/interception/interception_linux.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/interception/interception_linux.cpp
@@ -33,7 +33,7 @@ static int StrCmp(const char *s1, const char *s2) {
}
#endif
-static void *GetFuncAddr(const char *name, uptr wrapper_addr) {
+static void *GetFuncAddr(const char *name, uptr trampoline) {
#if SANITIZER_NETBSD
// FIXME: Find a better way to handle renames
if (StrCmp(name, "sigaction"))
@@ -50,17 +50,17 @@ static void *GetFuncAddr(const char *name, uptr wrapper_addr) {
// In case `name' is not loaded, dlsym ends up finding the actual wrapper.
// We don't want to intercept the wrapper and have it point to itself.
- if ((uptr)addr == wrapper_addr)
+ if ((uptr)addr == trampoline)
addr = nullptr;
}
return addr;
}
bool InterceptFunction(const char *name, uptr *ptr_to_real, uptr func,
- uptr wrapper) {
- void *addr = GetFuncAddr(name, wrapper);
+ uptr trampoline) {
+ void *addr = GetFuncAddr(name, trampoline);
*ptr_to_real = (uptr)addr;
- return addr && (func == wrapper);
+ return addr && (func == trampoline);
}
// dlvsym is a GNU extension supported by some other platforms.
@@ -70,12 +70,12 @@ static void *GetFuncAddr(const char *name, const char *ver) {
}
bool InterceptFunction(const char *name, const char *ver, uptr *ptr_to_real,
- uptr func, uptr wrapper) {
+ uptr func, uptr trampoline) {
void *addr = GetFuncAddr(name, ver);
*ptr_to_real = (uptr)addr;
- return addr && (func == wrapper);
+ return addr && (func == trampoline);
}
-#endif // SANITIZER_GLIBC || SANITIZER_FREEBSD || SANITIZER_NETBSD
+# endif // SANITIZER_GLIBC || SANITIZER_FREEBSD || SANITIZER_NETBSD
} // namespace __interception
diff --git a/contrib/llvm-project/compiler-rt/lib/interception/interception_linux.h b/contrib/llvm-project/compiler-rt/lib/interception/interception_linux.h
index a08f8cb98c40..433a3d9bd7fa 100644
--- a/contrib/llvm-project/compiler-rt/lib/interception/interception_linux.h
+++ b/contrib/llvm-project/compiler-rt/lib/interception/interception_linux.h
@@ -15,7 +15,7 @@
SANITIZER_SOLARIS
#if !defined(INCLUDED_FROM_INTERCEPTION_LIB)
-# error "interception_linux.h should be included from interception library only"
+# error interception_linux.h should be included from interception library only
#endif
#ifndef INTERCEPTION_LINUX_H
@@ -23,26 +23,26 @@
namespace __interception {
bool InterceptFunction(const char *name, uptr *ptr_to_real, uptr func,
- uptr wrapper);
+ uptr trampoline);
bool InterceptFunction(const char *name, const char *ver, uptr *ptr_to_real,
- uptr func, uptr wrapper);
+ uptr func, uptr trampoline);
} // namespace __interception
#define INTERCEPT_FUNCTION_LINUX_OR_FREEBSD(func) \
::__interception::InterceptFunction( \
#func, \
- (::__interception::uptr *) & REAL(func), \
- (::__interception::uptr) & (func), \
- (::__interception::uptr) & WRAP(func))
+ (::__interception::uptr *)&REAL(func), \
+ (::__interception::uptr)&(func), \
+ (::__interception::uptr)&TRAMPOLINE(func))
// dlvsym is a GNU extension supported by some other platforms.
#if SANITIZER_GLIBC || SANITIZER_FREEBSD || SANITIZER_NETBSD
#define INTERCEPT_FUNCTION_VER_LINUX_OR_FREEBSD(func, symver) \
::__interception::InterceptFunction( \
#func, symver, \
- (::__interception::uptr *) & REAL(func), \
- (::__interception::uptr) & (func), \
- (::__interception::uptr) & WRAP(func))
+ (::__interception::uptr *)&REAL(func), \
+ (::__interception::uptr)&(func), \
+ (::__interception::uptr)&TRAMPOLINE(func))
#else
#define INTERCEPT_FUNCTION_VER_LINUX_OR_FREEBSD(func, symver) \
INTERCEPT_FUNCTION_LINUX_OR_FREEBSD(func)
diff --git a/contrib/llvm-project/compiler-rt/lib/interception/interception_mac.cpp b/contrib/llvm-project/compiler-rt/lib/interception/interception_mac.cpp
index fb6eadcff597..03eae0fdca0d 100644
--- a/contrib/llvm-project/compiler-rt/lib/interception/interception_mac.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/interception/interception_mac.cpp
@@ -13,6 +13,6 @@
#include "interception.h"
-#if SANITIZER_MAC
+#if SANITIZER_APPLE
-#endif // SANITIZER_MAC
+#endif // SANITIZER_APPLE
diff --git a/contrib/llvm-project/compiler-rt/lib/interception/interception_mac.h b/contrib/llvm-project/compiler-rt/lib/interception/interception_mac.h
index eddedb8959c4..26079518c649 100644
--- a/contrib/llvm-project/compiler-rt/lib/interception/interception_mac.h
+++ b/contrib/llvm-project/compiler-rt/lib/interception/interception_mac.h
@@ -11,7 +11,7 @@
// Mac-specific interception methods.
//===----------------------------------------------------------------------===//
-#if SANITIZER_MAC
+#if SANITIZER_APPLE
#if !defined(INCLUDED_FROM_INTERCEPTION_LIB)
# error "interception_mac.h should be included from interception.h only"
@@ -24,4 +24,4 @@
#define INTERCEPT_FUNCTION_VER_MAC(func, symver)
#endif // INTERCEPTION_MAC_H
-#endif // SANITIZER_MAC
+#endif // SANITIZER_APPLE
diff --git a/contrib/llvm-project/compiler-rt/lib/interception/interception_type_test.cpp b/contrib/llvm-project/compiler-rt/lib/interception/interception_type_test.cpp
index a611604a700c..7c3de82a1e86 100644
--- a/contrib/llvm-project/compiler-rt/lib/interception/interception_type_test.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/interception/interception_type_test.cpp
@@ -13,7 +13,7 @@
#include "interception.h"
-#if SANITIZER_LINUX || SANITIZER_MAC
+#if SANITIZER_LINUX || SANITIZER_APPLE
#include <sys/types.h>
#include <stddef.h>
@@ -24,9 +24,9 @@ COMPILER_CHECK(sizeof(::SSIZE_T) == sizeof(ssize_t));
COMPILER_CHECK(sizeof(::PTRDIFF_T) == sizeof(ptrdiff_t));
COMPILER_CHECK(sizeof(::INTMAX_T) == sizeof(intmax_t));
-#if !SANITIZER_MAC
+# if SANITIZER_GLIBC || SANITIZER_ANDROID
COMPILER_CHECK(sizeof(::OFF64_T) == sizeof(off64_t));
-#endif
+# endif
// The following are the cases when pread (and friends) is used instead of
// pread64. In those cases we need OFF_T to match off_t. We don't care about the
diff --git a/contrib/llvm-project/compiler-rt/lib/interception/interception_win.cpp b/contrib/llvm-project/compiler-rt/lib/interception/interception_win.cpp
index 98bc756ae53a..1829358705fe 100644
--- a/contrib/llvm-project/compiler-rt/lib/interception/interception_win.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/interception/interception_win.cpp
@@ -1,4 +1,4 @@
-//===-- interception_linux.cpp ----------------------------------*- C++ -*-===//
+//===-- interception_win.cpp ------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
@@ -56,7 +56,7 @@
// tramp: jmp QWORD [addr]
// addr: .bytes <hook>
//
-// Note: <real> is equilavent to <label>.
+// Note: <real> is equivalent to <label>.
//
// 3) HotPatch
//
@@ -141,8 +141,29 @@ static const int kBranchLength =
FIRST_32_SECOND_64(kJumpInstructionLength, kIndirectJumpInstructionLength);
static const int kDirectBranchLength = kBranchLength + kAddressLength;
+# if defined(_MSC_VER)
+# define INTERCEPTION_FORMAT(f, a)
+# else
+# define INTERCEPTION_FORMAT(f, a) __attribute__((format(printf, f, a)))
+# endif
+
+static void (*ErrorReportCallback)(const char *format, ...)
+ INTERCEPTION_FORMAT(1, 2);
+
+void SetErrorReportCallback(void (*callback)(const char *format, ...)) {
+ ErrorReportCallback = callback;
+}
+
+# define ReportError(...) \
+ do { \
+ if (ErrorReportCallback) \
+ ErrorReportCallback(__VA_ARGS__); \
+ } while (0)
+
static void InterceptionFailed() {
- // Do we have a good way to abort with an error message here?
+ ReportError("interception_win: failed due to an unrecoverable error.\n");
+ // This acts like an abort when no debugger is attached. According to an old
+ // comment, calling abort() leads to an infinite recursion in CheckFailed.
__debugbreak();
}
@@ -249,8 +270,13 @@ static void WritePadding(uptr from, uptr size) {
}
static void WriteJumpInstruction(uptr from, uptr target) {
- if (!DistanceIsWithin2Gig(from + kJumpInstructionLength, target))
+ if (!DistanceIsWithin2Gig(from + kJumpInstructionLength, target)) {
+ ReportError(
+ "interception_win: cannot write jmp further than 2GB away, from %p to "
+ "%p.\n",
+ (void *)from, (void *)target);
InterceptionFailed();
+ }
ptrdiff_t offset = target - from - kJumpInstructionLength;
*(u8*)from = 0xE9;
*(u32*)(from + 1) = offset;
@@ -274,6 +300,10 @@ static void WriteIndirectJumpInstruction(uptr from, uptr indirect_target) {
int offset = indirect_target - from - kIndirectJumpInstructionLength;
if (!DistanceIsWithin2Gig(from + kIndirectJumpInstructionLength,
indirect_target)) {
+ ReportError(
+ "interception_win: cannot write indirect jmp with target further than "
+ "2GB away, from %p to %p.\n",
+ (void *)from, (void *)indirect_target);
InterceptionFailed();
}
*(u16*)from = 0x25FF;
@@ -398,8 +428,50 @@ static uptr AllocateMemoryForTrampoline(uptr image_address, size_t size) {
return allocated_space;
}
+// The following prologues cannot be patched because of the short jump
+// jumping to the patching region.
+
+// Short jump patterns below are only for x86_64.
+# if SANITIZER_WINDOWS_x64
+// ntdll!wcslen in Win11
+// 488bc1 mov rax,rcx
+// 0fb710 movzx edx,word ptr [rax]
+// 4883c002 add rax,2
+// 6685d2 test dx,dx
+// 75f4 jne -12
+static const u8 kPrologueWithShortJump1[] = {
+ 0x48, 0x8b, 0xc1, 0x0f, 0xb7, 0x10, 0x48, 0x83,
+ 0xc0, 0x02, 0x66, 0x85, 0xd2, 0x75, 0xf4,
+};
+
+// ntdll!strrchr in Win11
+// 4c8bc1 mov r8,rcx
+// 8a01 mov al,byte ptr [rcx]
+// 48ffc1 inc rcx
+// 84c0 test al,al
+// 75f7 jne -9
+static const u8 kPrologueWithShortJump2[] = {
+ 0x4c, 0x8b, 0xc1, 0x8a, 0x01, 0x48, 0xff, 0xc1,
+ 0x84, 0xc0, 0x75, 0xf7,
+};
+#endif
+
// Returns 0 on error.
static size_t GetInstructionSize(uptr address, size_t* rel_offset = nullptr) {
+#if SANITIZER_ARM64
+ // An ARM64 instruction is 4 bytes long.
+ return 4;
+#endif
+
+# if SANITIZER_WINDOWS_x64
+ if (memcmp((u8*)address, kPrologueWithShortJump1,
+ sizeof(kPrologueWithShortJump1)) == 0 ||
+ memcmp((u8*)address, kPrologueWithShortJump2,
+ sizeof(kPrologueWithShortJump2)) == 0) {
+ return 0;
+ }
+#endif
+
switch (*(u64*)address) {
case 0x90909090909006EB: // stub: jmp over 6 x nop.
return 8;
@@ -456,6 +528,7 @@ static size_t GetInstructionSize(uptr address, size_t* rel_offset = nullptr) {
case 0xFF8B: // 8B FF : mov edi, edi
case 0xEC8B: // 8B EC : mov ebp, esp
case 0xc889: // 89 C8 : mov eax, ecx
+ case 0xE589: // 89 E5 : mov ebp, esp
case 0xC18B: // 8B C1 : mov eax, ecx
case 0xC033: // 33 C0 : xor eax, eax
case 0xC933: // 33 C9 : xor ecx, ecx
@@ -472,11 +545,19 @@ static size_t GetInstructionSize(uptr address, size_t* rel_offset = nullptr) {
return 7;
}
-#if SANITIZER_WINDOWS64
+# if SANITIZER_WINDOWS_x64
switch (*(u8*)address) {
case 0xA1: // A1 XX XX XX XX XX XX XX XX :
// movabs eax, dword ptr ds:[XXXXXXXX]
return 9;
+
+ case 0x83:
+ const u8 next_byte = *(u8*)(address + 1);
+ const u8 mod = next_byte >> 6;
+ const u8 rm = next_byte & 7;
+ if (mod == 1 && rm == 4)
+ return 5; // 83 ModR/M SIB Disp8 Imm8
+ // add|or|adc|sbb|and|sub|xor|cmp [r+disp8], imm8
}
switch (*(u16*)address) {
@@ -493,8 +574,11 @@ static size_t GetInstructionSize(uptr address, size_t* rel_offset = nullptr) {
case 0x5641: // push r14
case 0x5741: // push r15
case 0x9066: // Two-byte NOP
+ case 0xc084: // test al, al
+ case 0x018a: // mov al, byte ptr [rcx]
return 2;
+ case 0x058A: // 8A 05 XX XX XX XX : mov al, byte ptr [XX XX XX XX]
case 0x058B: // 8B 05 XX XX XX XX : mov eax, dword ptr [XX XX XX XX]
if (rel_offset)
*rel_offset = 2;
@@ -509,6 +593,7 @@ static size_t GetInstructionSize(uptr address, size_t* rel_offset = nullptr) {
case 0xd12b48: // 48 2b d1 : sub rdx, rcx
case 0x07c1f6: // f6 c1 07 : test cl, 0x7
case 0xc98548: // 48 85 C9 : test rcx, rcx
+ case 0xd28548: // 48 85 d2 : test rdx, rdx
case 0xc0854d: // 4d 85 c0 : test r8, r8
case 0xc2b60f: // 0f b6 c2 : movzx eax, dl
case 0xc03345: // 45 33 c0 : xor r8d, r8d
@@ -522,6 +607,7 @@ static size_t GetInstructionSize(uptr address, size_t* rel_offset = nullptr) {
case 0xca2b48: // 48 2b ca : sub rcx, rdx
case 0x10b70f: // 0f b7 10 : movzx edx, WORD PTR [rax]
case 0xc00b4d: // 3d 0b c0 : or r8, r8
+ case 0xc08b41: // 41 8b c0 : mov eax, r8d
case 0xd18b48: // 48 8b d1 : mov rdx, rcx
case 0xdc8b4c: // 4c 8b dc : mov r11, rsp
case 0xd18b4c: // 4c 8b d1 : mov r10, rcx
@@ -540,7 +626,7 @@ static size_t GetInstructionSize(uptr address, size_t* rel_offset = nullptr) {
// mov rax, QWORD PTR [rip + XXXXXXXX]
case 0x25ff48: // 48 ff 25 XX XX XX XX :
// rex.W jmp QWORD PTR [rip + XXXXXXXX]
-
+ case 0x158D4C: // 4c 8d 15 XX XX XX XX : lea r10, [rip + XX]
// Instructions having offset relative to 'rip' need offset adjustment.
if (rel_offset)
*rel_offset = 3;
@@ -556,6 +642,7 @@ static size_t GetInstructionSize(uptr address, size_t* rel_offset = nullptr) {
case 0x246c8948: // 48 89 6C 24 XX : mov QWORD ptr [rsp + XX], rbp
case 0x245c8948: // 48 89 5c 24 XX : mov QWORD PTR [rsp + XX], rbx
case 0x24748948: // 48 89 74 24 XX : mov QWORD PTR [rsp + XX], rsi
+ case 0x247c8948: // 48 89 7c 24 XX : mov QWORD PTR [rsp + XX], rdi
case 0x244C8948: // 48 89 4C 24 XX : mov QWORD PTR [rsp + XX], rcx
case 0x24548948: // 48 89 54 24 XX : mov QWORD PTR [rsp + XX], rdx
case 0x244c894c: // 4c 89 4c 24 XX : mov QWORD PTR [rsp + XX], r9
@@ -592,6 +679,8 @@ static size_t GetInstructionSize(uptr address, size_t* rel_offset = nullptr) {
case 0x24448B: // 8B 44 24 XX : mov eax, dword ptr [esp + XX]
case 0x244C8B: // 8B 4C 24 XX : mov ecx, dword ptr [esp + XX]
case 0x24548B: // 8B 54 24 XX : mov edx, dword ptr [esp + XX]
+ case 0x245C8B: // 8B 5C 24 XX : mov ebx, dword ptr [esp + XX]
+ case 0x246C8B: // 8B 6C 24 XX : mov ebp, dword ptr [esp + XX]
case 0x24748B: // 8B 74 24 XX : mov esi, dword ptr [esp + XX]
case 0x247C8B: // 8B 7C 24 XX : mov edi, dword ptr [esp + XX]
return 4;
@@ -603,12 +692,20 @@ static size_t GetInstructionSize(uptr address, size_t* rel_offset = nullptr) {
}
#endif
- // Unknown instruction!
- // FIXME: Unknown instruction failures might happen when we add a new
- // interceptor or a new compiler version. In either case, they should result
- // in visible and readable error messages. However, merely calling abort()
- // leads to an infinite recursion in CheckFailed.
- InterceptionFailed();
+ // Unknown instruction! This might happen when we add a new interceptor, use
+ // a new compiler version, or if Windows changed how some functions are
+ // compiled. In either case, we print the address and 8 bytes of instructions
+ // to notify the user about the error and to help identify the unknown
+ // instruction. Don't treat this as a fatal error, though we can break the
+ // debugger if one has been attached.
+ u8 *bytes = (u8 *)address;
+ ReportError(
+ "interception_win: unhandled instruction at %p: %02x %02x %02x %02x %02x "
+ "%02x %02x %02x\n",
+ (void *)address, bytes[0], bytes[1], bytes[2], bytes[3], bytes[4],
+ bytes[5], bytes[6], bytes[7]);
+ if (::IsDebuggerPresent())
+ __debugbreak();
return 0;
}
@@ -629,16 +726,24 @@ static bool CopyInstructions(uptr to, uptr from, size_t size) {
while (cursor != size) {
size_t rel_offset = 0;
size_t instruction_size = GetInstructionSize(from + cursor, &rel_offset);
- _memcpy((void*)(to + cursor), (void*)(from + cursor),
+ if (!instruction_size)
+ return false;
+ _memcpy((void *)(to + cursor), (void *)(from + cursor),
(size_t)instruction_size);
if (rel_offset) {
- uptr delta = to - from;
- uptr relocated_offset = *(u32*)(to + cursor + rel_offset) - delta;
-#if SANITIZER_WINDOWS64
- if (relocated_offset + 0x80000000U >= 0xFFFFFFFFU)
+# if SANITIZER_WINDOWS64
+ // we want to make sure that the new relative offset still fits in 32-bits
+ // this will be untrue if relocated_offset \notin [-2**31, 2**31)
+ s64 delta = to - from;
+ s64 relocated_offset = *(s32 *)(to + cursor + rel_offset) - delta;
+ if (-0x8000'0000ll > relocated_offset || relocated_offset > 0x7FFF'FFFFll)
return false;
-#endif
- *(u32*)(to + cursor + rel_offset) = relocated_offset;
+# else
+ // on 32-bit, the relative offset will always be correct
+ s32 delta = to - from;
+ s32 relocated_offset = *(s32 *)(to + cursor + rel_offset) - delta;
+# endif
+ *(s32 *)(to + cursor + rel_offset) = relocated_offset;
}
cursor += instruction_size;
}
@@ -689,7 +794,7 @@ bool OverrideFunctionWithRedirectJump(
return false;
if (orig_old_func) {
- uptr relative_offset = *(u32*)(old_func + 1);
+ sptr relative_offset = *(s32 *)(old_func + 1);
uptr absolute_target = old_func + relative_offset + kJumpInstructionLength;
*orig_old_func = absolute_target;
}
@@ -840,15 +945,26 @@ bool OverrideFunction(
static void **InterestingDLLsAvailable() {
static const char *InterestingDLLs[] = {
- "kernel32.dll",
- "msvcr100.dll", // VS2010
- "msvcr110.dll", // VS2012
- "msvcr120.dll", // VS2013
- "vcruntime140.dll", // VS2015
- "ucrtbase.dll", // Universal CRT
- // NTDLL should go last as it exports some functions that we should
- // override in the CRT [presumably only used internally].
- "ntdll.dll", NULL};
+ "kernel32.dll",
+ "msvcr100d.dll", // VS2010
+ "msvcr110d.dll", // VS2012
+ "msvcr120d.dll", // VS2013
+ "vcruntime140d.dll", // VS2015
+ "ucrtbased.dll", // Universal CRT
+ "msvcr100.dll", // VS2010
+ "msvcr110.dll", // VS2012
+ "msvcr120.dll", // VS2013
+ "vcruntime140.dll", // VS2015
+ "ucrtbase.dll", // Universal CRT
+# if (defined(__MINGW32__) && defined(__i386__))
+ "libc++.dll", // libc++
+ "libunwind.dll", // libunwind
+# endif
+ // NTDLL should go last as it exports some functions that we should
+ // override in the CRT [presumably only used internally].
+ "ntdll.dll",
+ NULL
+ };
static void *result[ARRAY_SIZE(InterestingDLLs)] = { 0 };
if (!result[0]) {
for (size_t i = 0, j = 0; InterestingDLLs[i]; ++i) {
@@ -1019,4 +1135,4 @@ bool OverrideImportedFunction(const char *module_to_patch,
} // namespace __interception
-#endif // SANITIZER_MAC
+#endif // SANITIZER_APPLE
diff --git a/contrib/llvm-project/compiler-rt/lib/interception/interception_win.h b/contrib/llvm-project/compiler-rt/lib/interception/interception_win.h
index 4590013019e3..f6eca82191cb 100644
--- a/contrib/llvm-project/compiler-rt/lib/interception/interception_win.h
+++ b/contrib/llvm-project/compiler-rt/lib/interception/interception_win.h
@@ -41,6 +41,11 @@ bool OverrideImportedFunction(const char *module_to_patch,
const char *function_name, uptr new_function,
uptr *orig_old_func);
+// Sets a callback to be used for reporting errors by interception_win. The
+// callback will be called with printf-like arguments. Intended to be used with
+// __sanitizer::Report. Pass nullptr to disable error reporting (default).
+void SetErrorReportCallback(void (*callback)(const char *format, ...));
+
#if !SANITIZER_WINDOWS64
// Exposed for unittests
bool OverrideFunctionWithDetour(
diff --git a/contrib/llvm-project/compiler-rt/lib/lsan/lsan.cpp b/contrib/llvm-project/compiler-rt/lib/lsan/lsan.cpp
index b6adc248157b..7a27b600f203 100644
--- a/contrib/llvm-project/compiler-rt/lib/lsan/lsan.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/lsan/lsan.cpp
@@ -13,11 +13,12 @@
#include "lsan.h"
-#include "sanitizer_common/sanitizer_flags.h"
-#include "sanitizer_common/sanitizer_flag_parser.h"
#include "lsan_allocator.h"
#include "lsan_common.h"
#include "lsan_thread.h"
+#include "sanitizer_common/sanitizer_flag_parser.h"
+#include "sanitizer_common/sanitizer_flags.h"
+#include "sanitizer_common/sanitizer_interface_internal.h"
bool lsan_inited;
bool lsan_init_is_running;
@@ -35,7 +36,7 @@ void __sanitizer::BufferedStackTrace::UnwindImpl(
uptr pc, uptr bp, void *context, bool request_fast, u32 max_depth) {
using namespace __lsan;
uptr stack_top = 0, stack_bottom = 0;
- if (ThreadContext *t = CurrentThreadContext()) {
+ if (ThreadContextLsanBase *t = GetCurrentThread()) {
stack_top = t->stack_end();
stack_bottom = t->stack_begin();
}
@@ -96,12 +97,11 @@ extern "C" void __lsan_init() {
ReplaceSystemMalloc();
InitTlsSize();
InitializeInterceptors();
- InitializeThreadRegistry();
+ InitializeThreads();
InstallDeadlySignalHandlers(LsanOnDeadlySignal);
InitializeMainThread();
-
- if (common_flags()->detect_leaks && common_flags()->leak_check_at_exit)
- Atexit(DoLeakCheck);
+ InstallAtExitCheckLeaks();
+ InstallAtForkHandler();
InitializeCoverage(common_flags()->coverage, common_flags()->coverage_dir);
diff --git a/contrib/llvm-project/compiler-rt/lib/lsan/lsan.h b/contrib/llvm-project/compiler-rt/lib/lsan/lsan.h
index 1e82ad72f005..0074ad530878 100644
--- a/contrib/llvm-project/compiler-rt/lib/lsan/lsan.h
+++ b/contrib/llvm-project/compiler-rt/lib/lsan/lsan.h
@@ -13,17 +13,17 @@
#include "lsan_thread.h"
#if SANITIZER_POSIX
-#include "lsan_posix.h"
+# include "lsan_posix.h"
#elif SANITIZER_FUCHSIA
-#include "lsan_fuchsia.h"
+# include "lsan_fuchsia.h"
#endif
#include "sanitizer_common/sanitizer_flags.h"
#include "sanitizer_common/sanitizer_stacktrace.h"
-#define GET_STACK_TRACE(max_size, fast) \
- __sanitizer::BufferedStackTrace stack; \
- stack.Unwind(StackTrace::GetCurrentPc(), \
- GET_CURRENT_FRAME(), nullptr, fast, max_size);
+#define GET_STACK_TRACE(max_size, fast) \
+ __sanitizer::BufferedStackTrace stack; \
+ stack.Unwind(StackTrace::GetCurrentPc(), GET_CURRENT_FRAME(), nullptr, fast, \
+ max_size);
#define GET_STACK_TRACE_FATAL \
GET_STACK_TRACE(kStackTraceMax, common_flags()->fast_unwind_on_fatal)
@@ -39,12 +39,15 @@ namespace __lsan {
void InitializeInterceptors();
void ReplaceSystemMalloc();
void LsanOnDeadlySignal(int signo, void *siginfo, void *context);
-
-#define ENSURE_LSAN_INITED do { \
- CHECK(!lsan_init_is_running); \
- if (!lsan_inited) \
- __lsan_init(); \
-} while (0)
+void InstallAtExitCheckLeaks();
+void InstallAtForkHandler();
+
+#define ENSURE_LSAN_INITED \
+ do { \
+ CHECK(!lsan_init_is_running); \
+ if (!lsan_inited) \
+ __lsan_init(); \
+ } while (0)
} // namespace __lsan
diff --git a/contrib/llvm-project/compiler-rt/lib/lsan/lsan_allocator.cpp b/contrib/llvm-project/compiler-rt/lib/lsan/lsan_allocator.cpp
index 91e34ebb3214..12d579a9385b 100644
--- a/contrib/llvm-project/compiler-rt/lib/lsan/lsan_allocator.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/lsan/lsan_allocator.cpp
@@ -27,11 +27,11 @@ extern "C" void *memset(void *ptr, int value, uptr num);
namespace __lsan {
#if defined(__i386__) || defined(__arm__)
-static const uptr kMaxAllowedMallocSize = 1UL << 30;
+static const uptr kMaxAllowedMallocSize = 1ULL << 30;
#elif defined(__mips64) || defined(__aarch64__)
-static const uptr kMaxAllowedMallocSize = 4UL << 30;
+static const uptr kMaxAllowedMallocSize = 4ULL << 30;
#else
-static const uptr kMaxAllowedMallocSize = 8UL << 30;
+static const uptr kMaxAllowedMallocSize = 8ULL << 30;
#endif
static Allocator allocator;
@@ -49,8 +49,11 @@ void InitializeAllocator() {
max_malloc_size = kMaxAllowedMallocSize;
}
+void AllocatorThreadStart() { allocator.InitCache(GetAllocatorCache()); }
+
void AllocatorThreadFinish() {
allocator.SwallowCache(GetAllocatorCache());
+ allocator.DestroyCache(GetAllocatorCache());
}
static ChunkMetadata *Metadata(const void *p) {
@@ -65,12 +68,14 @@ static void RegisterAllocation(const StackTrace &stack, void *p, uptr size) {
m->stack_trace_id = StackDepotPut(stack);
m->requested_size = size;
atomic_store(reinterpret_cast<atomic_uint8_t *>(m), 1, memory_order_relaxed);
+ RunMallocHooks(p, size);
}
static void RegisterDeallocation(void *p) {
if (!p) return;
ChunkMetadata *m = Metadata(p);
CHECK(m);
+ RunFreeHooks(p);
atomic_store(reinterpret_cast<atomic_uint8_t *>(m), 0, memory_order_relaxed);
}
@@ -88,6 +93,11 @@ void *Allocate(const StackTrace &stack, uptr size, uptr alignment,
size = 1;
if (size > max_malloc_size)
return ReportAllocationSizeTooBig(size, stack);
+ if (UNLIKELY(IsRssLimitExceeded())) {
+ if (AllocatorMayReturnNull())
+ return nullptr;
+ ReportRssLimitExceeded(&stack);
+ }
void *p = allocator.Allocate(GetAllocatorCache(), size, alignment);
if (UNLIKELY(!p)) {
SetAllocatorOutOfMemory();
@@ -99,8 +109,6 @@ void *Allocate(const StackTrace &stack, uptr size, uptr alignment,
if (cleared && allocator.FromPrimary(p))
memset(p, 0, size);
RegisterAllocation(stack, p, size);
- if (&__sanitizer_malloc_hook) __sanitizer_malloc_hook(p, size);
- RunMallocHooks(p, size);
return p;
}
@@ -115,8 +123,6 @@ static void *Calloc(uptr nmemb, uptr size, const StackTrace &stack) {
}
void Deallocate(void *p) {
- if (&__sanitizer_free_hook) __sanitizer_free_hook(p);
- RunFreeHooks(p);
RegisterDeallocation(p);
allocator.Deallocate(GetAllocatorCache(), p);
}
@@ -142,12 +148,34 @@ void GetAllocatorCacheRange(uptr *begin, uptr *end) {
*end = *begin + sizeof(AllocatorCache);
}
+static const void *GetMallocBegin(const void *p) {
+ if (!p)
+ return nullptr;
+ void *beg = allocator.GetBlockBegin(p);
+ if (!beg)
+ return nullptr;
+ ChunkMetadata *m = Metadata(beg);
+ if (!m)
+ return nullptr;
+ if (!m->allocated)
+ return nullptr;
+ if (m->requested_size == 0)
+ return nullptr;
+ return (const void *)beg;
+}
+
uptr GetMallocUsableSize(const void *p) {
+ if (!p)
+ return 0;
ChunkMetadata *m = Metadata(p);
if (!m) return 0;
return m->requested_size;
}
+uptr GetMallocUsableSizeFast(const void *p) {
+ return Metadata(p)->requested_size;
+}
+
int lsan_posix_memalign(void **memptr, uptr alignment, uptr size,
const StackTrace &stack) {
if (UNLIKELY(!CheckPosixMemalignAlignment(alignment))) {
@@ -270,6 +298,10 @@ uptr GetUserBegin(uptr chunk) {
return chunk;
}
+uptr GetUserAddr(uptr chunk) {
+ return chunk;
+}
+
LsanMetadata::LsanMetadata(uptr chunk) {
metadata_ = Metadata(reinterpret_cast<void *>(chunk));
CHECK(metadata_);
@@ -299,7 +331,7 @@ void ForEachChunk(ForEachChunkCallback callback, void *arg) {
allocator.ForEachChunk(callback, arg);
}
-IgnoreObjectResult IgnoreObjectLocked(const void *p) {
+IgnoreObjectResult IgnoreObject(const void *p) {
void *chunk = allocator.GetBlockBegin(p);
if (!chunk || p < chunk) return kIgnoreObjectInvalid;
ChunkMetadata *m = Metadata(chunk);
@@ -314,15 +346,6 @@ IgnoreObjectResult IgnoreObjectLocked(const void *p) {
}
}
-void GetAdditionalThreadContextPtrs(ThreadContextBase *tctx, void *ptrs) {
- // This function can be used to treat memory reachable from `tctx` as live.
- // This is useful for threads that have been created but not yet started.
-
- // This is currently a no-op because the LSan `pthread_create()` interceptor
- // blocks until the child thread starts which keeps the thread's `arg` pointer
- // live.
-}
-
} // namespace __lsan
using namespace __lsan;
@@ -343,7 +366,7 @@ uptr __sanitizer_get_heap_size() {
}
SANITIZER_INTERFACE_ATTRIBUTE
-uptr __sanitizer_get_free_bytes() { return 0; }
+uptr __sanitizer_get_free_bytes() { return 1; }
SANITIZER_INTERFACE_ATTRIBUTE
uptr __sanitizer_get_unmapped_bytes() { return 0; }
@@ -352,23 +375,29 @@ SANITIZER_INTERFACE_ATTRIBUTE
uptr __sanitizer_get_estimated_allocated_size(uptr size) { return size; }
SANITIZER_INTERFACE_ATTRIBUTE
-int __sanitizer_get_ownership(const void *p) { return Metadata(p) != nullptr; }
+int __sanitizer_get_ownership(const void *p) {
+ return GetMallocBegin(p) != nullptr;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+const void * __sanitizer_get_allocated_begin(const void *p) {
+ return GetMallocBegin(p);
+}
SANITIZER_INTERFACE_ATTRIBUTE
uptr __sanitizer_get_allocated_size(const void *p) {
return GetMallocUsableSize(p);
}
-#if !SANITIZER_SUPPORTS_WEAK_HOOKS
-// Provide default (no-op) implementation of malloc hooks.
-SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
-void __sanitizer_malloc_hook(void *ptr, uptr size) {
- (void)ptr;
- (void)size;
-}
-SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
-void __sanitizer_free_hook(void *ptr) {
- (void)ptr;
+SANITIZER_INTERFACE_ATTRIBUTE
+uptr __sanitizer_get_allocated_size_fast(const void *p) {
+ DCHECK_EQ(p, __sanitizer_get_allocated_begin(p));
+ uptr ret = GetMallocUsableSizeFast(p);
+ DCHECK_EQ(ret, __sanitizer_get_allocated_size(p));
+ return ret;
}
-#endif
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __sanitizer_purge_allocator() { allocator.ForceReleaseToOS(); }
+
} // extern "C"
diff --git a/contrib/llvm-project/compiler-rt/lib/lsan/lsan_allocator.h b/contrib/llvm-project/compiler-rt/lib/lsan/lsan_allocator.h
index 9d763789154f..5eed0cbdb309 100644
--- a/contrib/llvm-project/compiler-rt/lib/lsan/lsan_allocator.h
+++ b/contrib/llvm-project/compiler-rt/lib/lsan/lsan_allocator.h
@@ -32,6 +32,7 @@ template<typename Callable>
void ForEachChunk(const Callable &callback);
void GetAllocatorCacheRange(uptr *begin, uptr *end);
+void AllocatorThreadStart();
void AllocatorThreadFinish();
void InitializeAllocator();
@@ -49,8 +50,7 @@ struct ChunkMetadata {
u32 stack_trace_id;
};
-#if defined(__mips64) || defined(__aarch64__) || defined(__i386__) || \
- defined(__arm__) || SANITIZER_RISCV64
+#if !SANITIZER_CAN_USE_ALLOCATOR64
template <typename AddressSpaceViewTy>
struct AP32 {
static const uptr kSpaceBeg = 0;
@@ -65,26 +65,45 @@ struct AP32 {
template <typename AddressSpaceView>
using PrimaryAllocatorASVT = SizeClassAllocator32<AP32<AddressSpaceView>>;
using PrimaryAllocator = PrimaryAllocatorASVT<LocalAddressSpaceView>;
-#elif defined(__x86_64__) || defined(__powerpc64__) || defined(__s390x__)
-# if SANITIZER_FUCHSIA
+#else
+# if SANITIZER_FUCHSIA || defined(__powerpc64__)
const uptr kAllocatorSpace = ~(uptr)0;
+# if SANITIZER_RISCV64
+// See the comments in compiler-rt/lib/asan/asan_allocator.h for why these
+// values were chosen.
+const uptr kAllocatorSize = UINT64_C(1) << 33; // 8GB
+using LSanSizeClassMap = SizeClassMap</*kNumBits=*/2,
+ /*kMinSizeLog=*/5,
+ /*kMidSizeLog=*/8,
+ /*kMaxSizeLog=*/18,
+ /*kNumCachedHintT=*/8,
+ /*kMaxBytesCachedLog=*/10>;
+static_assert(LSanSizeClassMap::kNumClassesRounded <= 32,
+ "32 size classes is the optimal number to ensure tests run "
+ "effieciently on Fuchsia.");
+# else
const uptr kAllocatorSize = 0x40000000000ULL; // 4T.
-# elif defined(__powerpc64__)
-const uptr kAllocatorSpace = 0xa0000000000ULL;
-const uptr kAllocatorSize = 0x20000000000ULL; // 2T.
-#elif defined(__s390x__)
-const uptr kAllocatorSpace = 0x40000000000ULL;
-const uptr kAllocatorSize = 0x40000000000ULL; // 4T.
-# else
+using LSanSizeClassMap = DefaultSizeClassMap;
+# endif
+# elif SANITIZER_RISCV64
+const uptr kAllocatorSpace = ~(uptr)0;
+const uptr kAllocatorSize = 0x2000000000ULL; // 128G.
+using LSanSizeClassMap = DefaultSizeClassMap;
+# elif SANITIZER_APPLE
const uptr kAllocatorSpace = 0x600000000000ULL;
const uptr kAllocatorSize = 0x40000000000ULL; // 4T.
-# endif
+using LSanSizeClassMap = DefaultSizeClassMap;
+# else
+const uptr kAllocatorSpace = 0x500000000000ULL;
+const uptr kAllocatorSize = 0x40000000000ULL; // 4T.
+using LSanSizeClassMap = DefaultSizeClassMap;
+# endif
template <typename AddressSpaceViewTy>
struct AP64 { // Allocator64 parameters. Deliberately using a short name.
static const uptr kSpaceBeg = kAllocatorSpace;
static const uptr kSpaceSize = kAllocatorSize;
static const uptr kMetadataSize = sizeof(ChunkMetadata);
- typedef DefaultSizeClassMap SizeClassMap;
+ using SizeClassMap = LSanSizeClassMap;
typedef NoOpMapUnmapCallback MapUnmapCallback;
static const uptr kFlags = 0;
using AddressSpaceView = AddressSpaceViewTy;
diff --git a/contrib/llvm-project/compiler-rt/lib/lsan/lsan_common.cpp b/contrib/llvm-project/compiler-rt/lib/lsan/lsan_common.cpp
index 74400d2e8426..0ecded8b28cd 100644
--- a/contrib/llvm-project/compiler-rt/lib/lsan/lsan_common.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/lsan/lsan_common.cpp
@@ -26,14 +26,26 @@
#include "sanitizer_common/sanitizer_tls_get_addr.h"
#if CAN_SANITIZE_LEAKS
+
+# if SANITIZER_APPLE
+// https://github.com/apple-oss-distributions/objc4/blob/8701d5672d3fd3cd817aeb84db1077aafe1a1604/runtime/objc-runtime-new.h#L127
+# if SANITIZER_IOS && !SANITIZER_IOSSIM
+# define OBJC_DATA_MASK 0x0000007ffffffff8UL
+# else
+# define OBJC_DATA_MASK 0x00007ffffffffff8UL
+# endif
+# endif
+
namespace __lsan {
// This mutex is used to prevent races between DoLeakCheck and IgnoreObject, and
// also to protect the global list of root regions.
-BlockingMutex global_mutex(LINKER_INITIALIZED);
+static Mutex global_mutex;
-Flags lsan_flags;
+void LockGlobal() SANITIZER_ACQUIRE(global_mutex) { global_mutex.Lock(); }
+void UnlockGlobal() SANITIZER_RELEASE(global_mutex) { global_mutex.Unlock(); }
+Flags lsan_flags;
void DisableCounterUnderflow() {
if (common_flags()->detect_leaks) {
@@ -43,43 +55,48 @@ void DisableCounterUnderflow() {
}
void Flags::SetDefaults() {
-#define LSAN_FLAG(Type, Name, DefaultValue, Description) Name = DefaultValue;
-#include "lsan_flags.inc"
-#undef LSAN_FLAG
+# define LSAN_FLAG(Type, Name, DefaultValue, Description) Name = DefaultValue;
+# include "lsan_flags.inc"
+# undef LSAN_FLAG
}
void RegisterLsanFlags(FlagParser *parser, Flags *f) {
-#define LSAN_FLAG(Type, Name, DefaultValue, Description) \
- RegisterFlag(parser, #Name, Description, &f->Name);
-#include "lsan_flags.inc"
-#undef LSAN_FLAG
+# define LSAN_FLAG(Type, Name, DefaultValue, Description) \
+ RegisterFlag(parser, #Name, Description, &f->Name);
+# include "lsan_flags.inc"
+# undef LSAN_FLAG
}
-#define LOG_POINTERS(...) \
- do { \
- if (flags()->log_pointers) Report(__VA_ARGS__); \
- } while (0)
+# define LOG_POINTERS(...) \
+ do { \
+ if (flags()->log_pointers) \
+ Report(__VA_ARGS__); \
+ } while (0)
-#define LOG_THREADS(...) \
- do { \
- if (flags()->log_threads) Report(__VA_ARGS__); \
- } while (0)
+# define LOG_THREADS(...) \
+ do { \
+ if (flags()->log_threads) \
+ Report(__VA_ARGS__); \
+ } while (0)
class LeakSuppressionContext {
bool parsed = false;
SuppressionContext context;
bool suppressed_stacks_sorted = true;
InternalMmapVector<u32> suppressed_stacks;
+ const LoadedModule *suppress_module = nullptr;
- Suppression *GetSuppressionForAddr(uptr addr);
void LazyInit();
+ Suppression *GetSuppressionForAddr(uptr addr);
+ bool SuppressInvalid(const StackTrace &stack);
+ bool SuppressByRule(const StackTrace &stack, uptr hit_count, uptr total_size);
public:
LeakSuppressionContext(const char *supprression_types[],
int suppression_types_num)
: context(supprression_types, suppression_types_num) {}
- Suppression *GetSuppressionForStack(u32 stack_trace_id);
+ bool Suppress(u32 stack_trace_id, uptr hit_count, uptr total_size);
const InternalMmapVector<u32> &GetSortedSuppressedStacks() {
if (!suppressed_stacks_sorted) {
@@ -94,17 +111,17 @@ class LeakSuppressionContext {
ALIGNED(64) static char suppression_placeholder[sizeof(LeakSuppressionContext)];
static LeakSuppressionContext *suppression_ctx = nullptr;
static const char kSuppressionLeak[] = "leak";
-static const char *kSuppressionTypes[] = { kSuppressionLeak };
+static const char *kSuppressionTypes[] = {kSuppressionLeak};
static const char kStdSuppressions[] =
-#if SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT
+# if SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT
// For more details refer to the SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT
// definition.
"leak:*pthread_exit*\n"
-#endif // SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT
-#if SANITIZER_MAC
+# endif // SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT
+# if SANITIZER_APPLE
// For Darwin and os_log/os_trace: https://reviews.llvm.org/D35173
"leak:*_os_trace*\n"
-#endif
+# endif
// TLS leak in some glibc versions, described in
// https://sourceware.org/bugzilla/show_bug.cgi?id=12650.
"leak:*tls_get_addr*\n";
@@ -122,26 +139,109 @@ void LeakSuppressionContext::LazyInit() {
if (&__lsan_default_suppressions)
context.Parse(__lsan_default_suppressions());
context.Parse(kStdSuppressions);
+ if (flags()->use_tls && flags()->use_ld_allocations)
+ suppress_module = GetLinker();
}
}
-static LeakSuppressionContext *GetSuppressionContext() {
- CHECK(suppression_ctx);
- return suppression_ctx;
+Suppression *LeakSuppressionContext::GetSuppressionForAddr(uptr addr) {
+ Suppression *s = nullptr;
+
+ // Suppress by module name.
+ const char *module_name = Symbolizer::GetOrInit()->GetModuleNameForPc(addr);
+ if (!module_name)
+ module_name = "<unknown module>";
+ if (context.Match(module_name, kSuppressionLeak, &s))
+ return s;
+
+ // Suppress by file or function name.
+ SymbolizedStackHolder symbolized_stack(
+ Symbolizer::GetOrInit()->SymbolizePC(addr));
+ const SymbolizedStack *frames = symbolized_stack.get();
+ for (const SymbolizedStack *cur = frames; cur; cur = cur->next) {
+ if (context.Match(cur->info.function, kSuppressionLeak, &s) ||
+ context.Match(cur->info.file, kSuppressionLeak, &s)) {
+ break;
+ }
+ }
+ return s;
}
-static InternalMmapVector<RootRegion> *root_regions;
+static uptr GetCallerPC(const StackTrace &stack) {
+ // The top frame is our malloc/calloc/etc. The next frame is the caller.
+ if (stack.size >= 2)
+ return stack.trace[1];
+ return 0;
+}
-InternalMmapVector<RootRegion> const *GetRootRegions() { return root_regions; }
+# if SANITIZER_APPLE
+// Several pointers in the Objective-C runtime (method cache and class_rw_t,
+// for example) are tagged with additional bits we need to strip.
+static inline void *TransformPointer(void *p) {
+ uptr ptr = reinterpret_cast<uptr>(p);
+ return reinterpret_cast<void *>(ptr & OBJC_DATA_MASK);
+}
+# endif
-void InitializeRootRegions() {
- CHECK(!root_regions);
- ALIGNED(64) static char placeholder[sizeof(InternalMmapVector<RootRegion>)];
- root_regions = new (placeholder) InternalMmapVector<RootRegion>();
+// On Linux, treats all chunks allocated from ld-linux.so as reachable, which
+// covers dynamically allocated TLS blocks, internal dynamic loader's loaded
+// modules accounting etc.
+// Dynamic TLS blocks contain the TLS variables of dynamically loaded modules.
+// They are allocated with a __libc_memalign() call in allocate_and_init()
+// (elf/dl-tls.c). Glibc won't tell us the address ranges occupied by those
+// blocks, but we can make sure they come from our own allocator by intercepting
+// __libc_memalign(). On top of that, there is no easy way to reach them. Their
+// addresses are stored in a dynamically allocated array (the DTV) which is
+// referenced from the static TLS. Unfortunately, we can't just rely on the DTV
+// being reachable from the static TLS, and the dynamic TLS being reachable from
+// the DTV. This is because the initial DTV is allocated before our interception
+// mechanism kicks in, and thus we don't recognize it as allocated memory. We
+// can't special-case it either, since we don't know its size.
+// Our solution is to include in the root set all allocations made from
+// ld-linux.so (which is where allocate_and_init() is implemented). This is
+// guaranteed to include all dynamic TLS blocks (and possibly other allocations
+// which we don't care about).
+// On all other platforms, this simply checks to ensure that the caller pc is
+// valid before reporting chunks as leaked.
+bool LeakSuppressionContext::SuppressInvalid(const StackTrace &stack) {
+ uptr caller_pc = GetCallerPC(stack);
+ // If caller_pc is unknown, this chunk may be allocated in a coroutine. Mark
+ // it as reachable, as we can't properly report its allocation stack anyway.
+ return !caller_pc ||
+ (suppress_module && suppress_module->containsAddress(caller_pc));
+}
+
+bool LeakSuppressionContext::SuppressByRule(const StackTrace &stack,
+ uptr hit_count, uptr total_size) {
+ for (uptr i = 0; i < stack.size; i++) {
+ Suppression *s = GetSuppressionForAddr(
+ StackTrace::GetPreviousInstructionPc(stack.trace[i]));
+ if (s) {
+ s->weight += total_size;
+ atomic_fetch_add(&s->hit_count, hit_count, memory_order_relaxed);
+ return true;
+ }
+ }
+ return false;
+}
+
+bool LeakSuppressionContext::Suppress(u32 stack_trace_id, uptr hit_count,
+ uptr total_size) {
+ LazyInit();
+ StackTrace stack = StackDepotGet(stack_trace_id);
+ if (!SuppressInvalid(stack) && !SuppressByRule(stack, hit_count, total_size))
+ return false;
+ suppressed_stacks_sorted = false;
+ suppressed_stacks.push_back(stack_trace_id);
+ return true;
+}
+
+static LeakSuppressionContext *GetSuppressionContext() {
+ CHECK(suppression_ctx);
+ return suppression_ctx;
}
void InitCommonLsan() {
- InitializeRootRegions();
if (common_flags()->detect_leaks) {
// Initialization which can fail or print warnings should only be done if
// LSan is actually enabled.
@@ -150,30 +250,42 @@ void InitCommonLsan() {
}
}
-class Decorator: public __sanitizer::SanitizerCommonDecorator {
+class Decorator : public __sanitizer::SanitizerCommonDecorator {
public:
- Decorator() : SanitizerCommonDecorator() { }
+ Decorator() : SanitizerCommonDecorator() {}
const char *Error() { return Red(); }
const char *Leak() { return Blue(); }
};
-static inline bool CanBeAHeapPointer(uptr p) {
+static inline bool MaybeUserPointer(uptr p) {
// Since our heap is located in mmap-ed memory, we can assume a sensible lower
// bound on heap addresses.
const uptr kMinAddress = 4 * 4096;
- if (p < kMinAddress) return false;
-#if defined(__x86_64__)
- // Accept only canonical form user-space addresses.
- return ((p >> 47) == 0);
-#elif defined(__mips64)
+ if (p < kMinAddress)
+ return false;
+# if defined(__x86_64__)
+ // TODO: support LAM48 and 5 level page tables.
+ // LAM_U57 mask format
+ // * top byte: 0x81 because the format is: [0] [6-bit tag] [0]
+ // * top-1 byte: 0xff because it should be 0
+ // * top-2 byte: 0x80 because Linux uses 128 TB VMA ending at 0x7fffffffffff
+ constexpr uptr kLAM_U57Mask = 0x81ff80;
+ constexpr uptr kPointerMask = kLAM_U57Mask << 40;
+ return ((p & kPointerMask) == 0);
+# elif defined(__mips64)
return ((p >> 40) == 0);
-#elif defined(__aarch64__)
- unsigned runtimeVMA =
- (MostSignificantSetBitIndex(GET_CURRENT_FRAME()) + 1);
- return ((p >> runtimeVMA) == 0);
-#else
+# elif defined(__aarch64__)
+ // TBI (Top Byte Ignore) feature of AArch64: bits [63:56] are ignored in
+ // address translation and can be used to store a tag.
+ constexpr uptr kPointerMask = 255ULL << 48;
+ // Accept up to 48 bit VMA.
+ return ((p & kPointerMask) == 0);
+# elif defined(__loongarch_lp64)
+ // Allow 47-bit user-space VMA at current.
+ return ((p >> 47) == 0);
+# else
return true;
-#endif
+# endif
}
// Scans the memory range, looking for byte patterns that point into allocator
@@ -182,37 +294,46 @@ static inline bool CanBeAHeapPointer(uptr p) {
// (|tag| = kReachable) and finding indirectly leaked chunks
// (|tag| = kIndirectlyLeaked). In the second case, there's no flood fill,
// so |frontier| = 0.
-void ScanRangeForPointers(uptr begin, uptr end,
- Frontier *frontier,
+void ScanRangeForPointers(uptr begin, uptr end, Frontier *frontier,
const char *region_type, ChunkTag tag) {
CHECK(tag == kReachable || tag == kIndirectlyLeaked);
const uptr alignment = flags()->pointer_alignment();
- LOG_POINTERS("Scanning %s range %p-%p.\n", region_type, begin, end);
+ LOG_POINTERS("Scanning %s range %p-%p.\n", region_type, (void *)begin,
+ (void *)end);
uptr pp = begin;
if (pp % alignment)
pp = pp + alignment - pp % alignment;
for (; pp + sizeof(void *) <= end; pp += alignment) {
void *p = *reinterpret_cast<void **>(pp);
- if (!CanBeAHeapPointer(reinterpret_cast<uptr>(p))) continue;
+# if SANITIZER_APPLE
+ p = TransformPointer(p);
+# endif
+ if (!MaybeUserPointer(reinterpret_cast<uptr>(p)))
+ continue;
uptr chunk = PointsIntoChunk(p);
- if (!chunk) continue;
+ if (!chunk)
+ continue;
// Pointers to self don't count. This matters when tag == kIndirectlyLeaked.
- if (chunk == begin) continue;
+ if (chunk == begin)
+ continue;
LsanMetadata m(chunk);
- if (m.tag() == kReachable || m.tag() == kIgnored) continue;
+ if (m.tag() == kReachable || m.tag() == kIgnored)
+ continue;
// Do this check relatively late so we can log only the interesting cases.
if (!flags()->use_poisoned && WordIsPoisoned(pp)) {
LOG_POINTERS(
"%p is poisoned: ignoring %p pointing into chunk %p-%p of size "
"%zu.\n",
- pp, p, chunk, chunk + m.requested_size(), m.requested_size());
+ (void *)pp, p, (void *)chunk, (void *)(chunk + m.requested_size()),
+ m.requested_size());
continue;
}
m.set_tag(tag);
- LOG_POINTERS("%p: found %p pointing into chunk %p-%p of size %zu.\n", pp, p,
- chunk, chunk + m.requested_size(), m.requested_size());
+ LOG_POINTERS("%p: found %p pointing into chunk %p-%p of size %zu.\n",
+ (void *)pp, p, (void *)chunk,
+ (void *)(chunk + m.requested_size()), m.requested_size());
if (frontier)
frontier->push_back(chunk);
}
@@ -235,28 +356,31 @@ void ScanGlobalRange(uptr begin, uptr end, Frontier *frontier) {
}
}
-void ForEachExtraStackRangeCb(uptr begin, uptr end, void* arg) {
- Frontier *frontier = reinterpret_cast<Frontier *>(arg);
- ScanRangeForPointers(begin, end, frontier, "FAKE STACK", kReachable);
+void ScanExtraStackRanges(const InternalMmapVector<Range> &ranges,
+ Frontier *frontier) {
+ for (uptr i = 0; i < ranges.size(); i++) {
+ ScanRangeForPointers(ranges[i].begin, ranges[i].end, frontier, "FAKE STACK",
+ kReachable);
+ }
}
-#if SANITIZER_FUCHSIA
+# if SANITIZER_FUCHSIA
// Fuchsia handles all threads together with its own callback.
-static void ProcessThreads(SuspendedThreadsList const &, Frontier *) {}
+static void ProcessThreads(SuspendedThreadsList const &, Frontier *, tid_t,
+ uptr) {}
-#else
+# else
-#if SANITIZER_ANDROID
+# if SANITIZER_ANDROID
// FIXME: Move this out into *libcdep.cpp
extern "C" SANITIZER_WEAK_ATTRIBUTE void __libc_iterate_dynamic_tls(
pid_t, void (*cb)(void *, void *, uptr, void *), void *);
-#endif
+# endif
static void ProcessThreadRegistry(Frontier *frontier) {
InternalMmapVector<uptr> ptrs;
- GetThreadRegistryLocked()->RunCallbackForEachThreadLocked(
- GetAdditionalThreadContextPtrs, &ptrs);
+ GetAdditionalThreadContextPtrsLocked(&ptrs);
for (uptr i = 0; i < ptrs.size(); ++i) {
void *ptr = reinterpret_cast<void *>(ptrs[i]);
@@ -276,32 +400,38 @@ static void ProcessThreadRegistry(Frontier *frontier) {
// Scans thread data (stacks and TLS) for heap pointers.
static void ProcessThreads(SuspendedThreadsList const &suspended_threads,
- Frontier *frontier) {
+ Frontier *frontier, tid_t caller_tid,
+ uptr caller_sp) {
InternalMmapVector<uptr> registers;
+ InternalMmapVector<Range> extra_ranges;
for (uptr i = 0; i < suspended_threads.ThreadCount(); i++) {
tid_t os_id = static_cast<tid_t>(suspended_threads.GetThreadID(i));
- LOG_THREADS("Processing thread %d.\n", os_id);
+ LOG_THREADS("Processing thread %llu.\n", os_id);
uptr stack_begin, stack_end, tls_begin, tls_end, cache_begin, cache_end;
DTLS *dtls;
- bool thread_found = GetThreadRangesLocked(os_id, &stack_begin, &stack_end,
- &tls_begin, &tls_end,
- &cache_begin, &cache_end, &dtls);
+ bool thread_found =
+ GetThreadRangesLocked(os_id, &stack_begin, &stack_end, &tls_begin,
+ &tls_end, &cache_begin, &cache_end, &dtls);
if (!thread_found) {
// If a thread can't be found in the thread registry, it's probably in the
// process of destruction. Log this event and move on.
- LOG_THREADS("Thread %d not found in registry.\n", os_id);
+ LOG_THREADS("Thread %llu not found in registry.\n", os_id);
continue;
}
uptr sp;
PtraceRegistersStatus have_registers =
suspended_threads.GetRegistersAndSP(i, &registers, &sp);
if (have_registers != REGISTERS_AVAILABLE) {
- Report("Unable to get registers from thread %d.\n", os_id);
+ Report("Unable to get registers from thread %llu.\n", os_id);
// If unable to get SP, consider the entire stack to be reachable unless
// GetRegistersAndSP failed with ESRCH.
- if (have_registers == REGISTERS_UNAVAILABLE_FATAL) continue;
+ if (have_registers == REGISTERS_UNAVAILABLE_FATAL)
+ continue;
sp = stack_begin;
}
+ if (suspended_threads.GetThreadID(i) == caller_tid) {
+ sp = caller_sp;
+ }
if (flags()->use_registers && have_registers) {
uptr registers_begin = reinterpret_cast<uptr>(registers.data());
@@ -312,7 +442,8 @@ static void ProcessThreads(SuspendedThreadsList const &suspended_threads,
}
if (flags()->use_stacks) {
- LOG_THREADS("Stack at %p-%p (SP = %p).\n", stack_begin, stack_end, sp);
+ LOG_THREADS("Stack at %p-%p (SP = %p).\n", (void *)stack_begin,
+ (void *)stack_end, (void *)sp);
if (sp < stack_begin || sp >= stack_end) {
// SP is outside the recorded stack range (e.g. the thread is running a
// signal handler on alternate stack, or swapcontext was used).
@@ -326,19 +457,21 @@ static void ProcessThreads(SuspendedThreadsList const &suspended_threads,
stack_begin += page_size;
}
LOG_THREADS("Skipped %d guard page(s) to obtain stack %p-%p.\n",
- skipped, stack_begin, stack_end);
+ skipped, (void *)stack_begin, (void *)stack_end);
} else {
// Shrink the stack range to ignore out-of-scope values.
stack_begin = sp;
}
ScanRangeForPointers(stack_begin, stack_end, frontier, "STACK",
kReachable);
- ForEachExtraStackRange(os_id, ForEachExtraStackRangeCb, frontier);
+ extra_ranges.clear();
+ GetThreadExtraStackRangesLocked(os_id, &extra_ranges);
+ ScanExtraStackRanges(extra_ranges, frontier);
}
if (flags()->use_tls) {
if (tls_begin) {
- LOG_THREADS("TLS at %p-%p.\n", tls_begin, tls_end);
+ LOG_THREADS("TLS at %p-%p.\n", (void *)tls_begin, (void *)tls_end);
// If the tls and cache ranges don't overlap, scan full tls range,
// otherwise, only scan the non-overlapping portions
if (cache_begin == cache_end || tls_end < cache_begin ||
@@ -353,7 +486,7 @@ static void ProcessThreads(SuspendedThreadsList const &suspended_threads,
kReachable);
}
}
-#if SANITIZER_ANDROID
+# if SANITIZER_ANDROID
auto *cb = +[](void *dtls_begin, void *dtls_end, uptr /*dso_idd*/,
void *arg) -> void {
ScanRangeForPointers(reinterpret_cast<uptr>(dtls_begin),
@@ -366,13 +499,14 @@ static void ProcessThreads(SuspendedThreadsList const &suspended_threads,
// thread is suspended in the middle of updating its DTLS. IOWs, we
// could scan already freed memory. (probably fine for now)
__libc_iterate_dynamic_tls(os_id, cb, frontier);
-#else
+# else
if (dtls && !DTLSInDestruction(dtls)) {
ForEachDVT(dtls, [&](const DTLS::DTV &dtv, int id) {
uptr dtls_beg = dtv.beg;
uptr dtls_end = dtls_beg + dtv.size;
if (dtls_beg < dtls_end) {
- LOG_THREADS("DTLS %zu at %p-%p.\n", id, dtls_beg, dtls_end);
+ LOG_THREADS("DTLS %d at %p-%p.\n", id, (void *)dtls_beg,
+ (void *)dtls_end);
ScanRangeForPointers(dtls_beg, dtls_end, frontier, "DTLS",
kReachable);
}
@@ -380,9 +514,9 @@ static void ProcessThreads(SuspendedThreadsList const &suspended_threads,
} else {
// We are handling a thread with DTLS under destruction. Log about
// this and continue.
- LOG_THREADS("Thread %d has DTLS under destruction.\n", os_id);
+ LOG_THREADS("Thread %llu has DTLS under destruction.\n", os_id);
}
-#endif
+# endif
}
}
@@ -390,39 +524,54 @@ static void ProcessThreads(SuspendedThreadsList const &suspended_threads,
ProcessThreadRegistry(frontier);
}
-#endif // SANITIZER_FUCHSIA
-
-void ScanRootRegion(Frontier *frontier, const RootRegion &root_region,
- uptr region_begin, uptr region_end, bool is_readable) {
- uptr intersection_begin = Max(root_region.begin, region_begin);
- uptr intersection_end = Min(region_end, root_region.begin + root_region.size);
- if (intersection_begin >= intersection_end) return;
- LOG_POINTERS("Root region %p-%p intersects with mapped region %p-%p (%s)\n",
- root_region.begin, root_region.begin + root_region.size,
- region_begin, region_end,
- is_readable ? "readable" : "unreadable");
- if (is_readable)
- ScanRangeForPointers(intersection_begin, intersection_end, frontier, "ROOT",
- kReachable);
+# endif // SANITIZER_FUCHSIA
+
+// A map that contains [region_begin, region_end) pairs.
+using RootRegions = DenseMap<detail::DenseMapPair<uptr, uptr>, uptr>;
+
+static RootRegions &GetRootRegionsLocked() {
+ global_mutex.CheckLocked();
+ static RootRegions *regions = nullptr;
+ alignas(RootRegions) static char placeholder[sizeof(RootRegions)];
+ if (!regions)
+ regions = new (placeholder) RootRegions();
+ return *regions;
}
-static void ProcessRootRegion(Frontier *frontier,
- const RootRegion &root_region) {
- MemoryMappingLayout proc_maps(/*cache_enabled*/ true);
- MemoryMappedSegment segment;
- while (proc_maps.Next(&segment)) {
- ScanRootRegion(frontier, root_region, segment.start, segment.end,
- segment.IsReadable());
+bool HasRootRegions() { return !GetRootRegionsLocked().empty(); }
+
+void ScanRootRegions(Frontier *frontier,
+ const InternalMmapVectorNoCtor<Region> &mapped_regions) {
+ if (!flags()->use_root_regions)
+ return;
+
+ InternalMmapVector<Region> regions;
+ GetRootRegionsLocked().forEach([&](const auto &kv) {
+ regions.push_back({kv.first.first, kv.first.second});
+ return true;
+ });
+
+ InternalMmapVector<Region> intersection;
+ Intersect(mapped_regions, regions, intersection);
+
+ for (const Region &r : intersection) {
+ LOG_POINTERS("Root region intersects with mapped region at %p-%p\n",
+ (void *)r.begin, (void *)r.end);
+ ScanRangeForPointers(r.begin, r.end, frontier, "ROOT", kReachable);
}
}
// Scans root regions for heap pointers.
static void ProcessRootRegions(Frontier *frontier) {
- if (!flags()->use_root_regions) return;
- CHECK(root_regions);
- for (uptr i = 0; i < root_regions->size(); i++) {
- ProcessRootRegion(frontier, (*root_regions)[i]);
- }
+ if (!flags()->use_root_regions || !HasRootRegions())
+ return;
+ MemoryMappingLayout proc_maps(/*cache_enabled*/ true);
+ MemoryMappedSegment segment;
+ InternalMmapVector<Region> mapped_regions;
+ while (proc_maps.Next(&segment))
+ if (segment.IsReadable())
+ mapped_regions.push_back({segment.start, segment.end});
+ ScanRootRegions(frontier, mapped_regions);
}
static void FloodFillTag(Frontier *frontier, ChunkTag tag) {
@@ -459,8 +608,8 @@ static void IgnoredSuppressedCb(uptr chunk, void *arg) {
if (idx >= suppressed.size() || m.stack_trace_id() != suppressed[idx])
return;
- LOG_POINTERS("Suppressed: chunk %p-%p of size %zu.\n", chunk,
- chunk + m.requested_size(), m.requested_size());
+ LOG_POINTERS("Suppressed: chunk %p-%p of size %zu.\n", (void *)chunk,
+ (void *)(chunk + m.requested_size()), m.requested_size());
m.set_tag(kIgnored);
}
@@ -471,82 +620,16 @@ static void CollectIgnoredCb(uptr chunk, void *arg) {
chunk = GetUserBegin(chunk);
LsanMetadata m(chunk);
if (m.allocated() && m.tag() == kIgnored) {
- LOG_POINTERS("Ignored: chunk %p-%p of size %zu.\n",
- chunk, chunk + m.requested_size(), m.requested_size());
+ LOG_POINTERS("Ignored: chunk %p-%p of size %zu.\n", (void *)chunk,
+ (void *)(chunk + m.requested_size()), m.requested_size());
reinterpret_cast<Frontier *>(arg)->push_back(chunk);
}
}
-static uptr GetCallerPC(u32 stack_id, StackDepotReverseMap *map) {
- CHECK(stack_id);
- StackTrace stack = map->Get(stack_id);
- // The top frame is our malloc/calloc/etc. The next frame is the caller.
- if (stack.size >= 2)
- return stack.trace[1];
- return 0;
-}
-
-struct InvalidPCParam {
- Frontier *frontier;
- StackDepotReverseMap *stack_depot_reverse_map;
- bool skip_linker_allocations;
-};
-
-// ForEachChunk callback. If the caller pc is invalid or is within the linker,
-// mark as reachable. Called by ProcessPlatformSpecificAllocations.
-static void MarkInvalidPCCb(uptr chunk, void *arg) {
- CHECK(arg);
- InvalidPCParam *param = reinterpret_cast<InvalidPCParam *>(arg);
- chunk = GetUserBegin(chunk);
- LsanMetadata m(chunk);
- if (m.allocated() && m.tag() != kReachable && m.tag() != kIgnored) {
- u32 stack_id = m.stack_trace_id();
- uptr caller_pc = 0;
- if (stack_id > 0)
- caller_pc = GetCallerPC(stack_id, param->stack_depot_reverse_map);
- // If caller_pc is unknown, this chunk may be allocated in a coroutine. Mark
- // it as reachable, as we can't properly report its allocation stack anyway.
- if (caller_pc == 0 || (param->skip_linker_allocations &&
- GetLinker()->containsAddress(caller_pc))) {
- m.set_tag(kReachable);
- param->frontier->push_back(chunk);
- }
- }
-}
-
-// On Linux, treats all chunks allocated from ld-linux.so as reachable, which
-// covers dynamically allocated TLS blocks, internal dynamic loader's loaded
-// modules accounting etc.
-// Dynamic TLS blocks contain the TLS variables of dynamically loaded modules.
-// They are allocated with a __libc_memalign() call in allocate_and_init()
-// (elf/dl-tls.c). Glibc won't tell us the address ranges occupied by those
-// blocks, but we can make sure they come from our own allocator by intercepting
-// __libc_memalign(). On top of that, there is no easy way to reach them. Their
-// addresses are stored in a dynamically allocated array (the DTV) which is
-// referenced from the static TLS. Unfortunately, we can't just rely on the DTV
-// being reachable from the static TLS, and the dynamic TLS being reachable from
-// the DTV. This is because the initial DTV is allocated before our interception
-// mechanism kicks in, and thus we don't recognize it as allocated memory. We
-// can't special-case it either, since we don't know its size.
-// Our solution is to include in the root set all allocations made from
-// ld-linux.so (which is where allocate_and_init() is implemented). This is
-// guaranteed to include all dynamic TLS blocks (and possibly other allocations
-// which we don't care about).
-// On all other platforms, this simply checks to ensure that the caller pc is
-// valid before reporting chunks as leaked.
-void ProcessPC(Frontier *frontier) {
- StackDepotReverseMap stack_depot_reverse_map;
- InvalidPCParam arg;
- arg.frontier = frontier;
- arg.stack_depot_reverse_map = &stack_depot_reverse_map;
- arg.skip_linker_allocations =
- flags()->use_tls && flags()->use_ld_allocations && GetLinker() != nullptr;
- ForEachChunk(MarkInvalidPCCb, &arg);
-}
-
// Sets the appropriate tag on each chunk.
static void ClassifyAllChunks(SuspendedThreadsList const &suspended_threads,
- Frontier *frontier) {
+ Frontier *frontier, tid_t caller_tid,
+ uptr caller_sp) {
const InternalMmapVector<u32> &suppressed_stacks =
GetSuppressionContext()->GetSortedSuppressedStacks();
if (!suppressed_stacks.empty()) {
@@ -555,13 +638,10 @@ static void ClassifyAllChunks(SuspendedThreadsList const &suspended_threads,
}
ForEachChunk(CollectIgnoredCb, frontier);
ProcessGlobalRegions(frontier);
- ProcessThreads(suspended_threads, frontier);
+ ProcessThreads(suspended_threads, frontier, caller_tid, caller_sp);
ProcessRootRegions(frontier);
FloodFillTag(frontier, kReachable);
- CHECK_EQ(0, frontier->size());
- ProcessPC(frontier);
-
// The check here is relatively expensive, so we do this in a separate flood
// fill. That way we can skip the check for chunks that are reachable
// otherwise.
@@ -584,32 +664,17 @@ static void ResetTagsCb(uptr chunk, void *arg) {
m.set_tag(kDirectlyLeaked);
}
-static void PrintStackTraceById(u32 stack_trace_id) {
- CHECK(stack_trace_id);
- StackDepotGet(stack_trace_id).Print();
-}
-
// ForEachChunk callback. Aggregates information about unreachable chunks into
// a LeakReport.
static void CollectLeaksCb(uptr chunk, void *arg) {
CHECK(arg);
- LeakReport *leak_report = reinterpret_cast<LeakReport *>(arg);
+ LeakedChunks *leaks = reinterpret_cast<LeakedChunks *>(arg);
chunk = GetUserBegin(chunk);
LsanMetadata m(chunk);
- if (!m.allocated()) return;
- if (m.tag() == kDirectlyLeaked || m.tag() == kIndirectlyLeaked) {
- u32 resolution = flags()->resolution;
- u32 stack_trace_id = 0;
- if (resolution > 0) {
- StackTrace stack = StackDepotGet(m.stack_trace_id());
- stack.size = Min(stack.size, resolution);
- stack_trace_id = StackDepotPut(stack);
- } else {
- stack_trace_id = m.stack_trace_id();
- }
- leak_report->AddLeakedChunk(chunk, stack_trace_id, m.requested_size(),
- m.tag());
- }
+ if (!m.allocated())
+ return;
+ if (m.tag() == kDirectlyLeaked || m.tag() == kIndirectlyLeaked)
+ leaks->push_back({chunk, m.stack_trace_id(), m.requested_size(), m.tag()});
}
void LeakSuppressionContext::PrintMatchedSuppressions() {
@@ -629,24 +694,13 @@ void LeakSuppressionContext::PrintMatchedSuppressions() {
Printf("%s\n\n", line);
}
-static void ReportIfNotSuspended(ThreadContextBase *tctx, void *arg) {
- const InternalMmapVector<tid_t> &suspended_threads =
- *(const InternalMmapVector<tid_t> *)arg;
- if (tctx->status == ThreadStatusRunning) {
- uptr i = InternalLowerBound(suspended_threads, tctx->os_id);
- if (i >= suspended_threads.size() || suspended_threads[i] != tctx->os_id)
- Report("Running thread %d was not suspended. False leaks are possible.\n",
- tctx->os_id);
- }
-}
-
-#if SANITIZER_FUCHSIA
+# if SANITIZER_FUCHSIA
// Fuchsia provides a libc interface that guarantees all threads are
// covered, and SuspendedThreadList is never really used.
static void ReportUnsuspendedThreads(const SuspendedThreadsList &) {}
-#else // !SANITIZER_FUCHSIA
+# else // !SANITIZER_FUCHSIA
static void ReportUnsuspendedThreads(
const SuspendedThreadsList &suspended_threads) {
@@ -656,11 +710,19 @@ static void ReportUnsuspendedThreads(
Sort(threads.data(), threads.size());
- GetThreadRegistryLocked()->RunCallbackForEachThreadLocked(
- &ReportIfNotSuspended, &threads);
+ InternalMmapVector<tid_t> unsuspended;
+ GetRunningThreadsLocked(&unsuspended);
+
+ for (auto os_id : unsuspended) {
+ uptr i = InternalLowerBound(threads, os_id);
+ if (i >= threads.size() || threads[i] != os_id)
+ Report(
+ "Running thread %zu was not suspended. False leaks are possible.\n",
+ os_id);
+ }
}
-#endif // !SANITIZER_FUCHSIA
+# endif // !SANITIZER_FUCHSIA
static void CheckForLeaksCallback(const SuspendedThreadsList &suspended_threads,
void *arg) {
@@ -668,8 +730,9 @@ static void CheckForLeaksCallback(const SuspendedThreadsList &suspended_threads,
CHECK(param);
CHECK(!param->success);
ReportUnsuspendedThreads(suspended_threads);
- ClassifyAllChunks(suspended_threads, &param->frontier);
- ForEachChunk(CollectLeaksCb, &param->leak_report);
+ ClassifyAllChunks(suspended_threads, &param->frontier, param->caller_tid,
+ param->caller_sp);
+ ForEachChunk(CollectLeaksCb, &param->leaks);
// Clean up for subsequent leak checks. This assumes we did not overwrite any
// kIgnored tags.
ForEachChunk(ResetTagsCb, nullptr);
@@ -699,14 +762,23 @@ static bool PrintResults(LeakReport &report) {
}
static bool CheckForLeaks() {
- if (&__lsan_is_turned_off && __lsan_is_turned_off())
+ if (&__lsan_is_turned_off && __lsan_is_turned_off()) {
+ VReport(1, "LeakSanitizer is disabled");
return false;
+ }
+ VReport(1, "LeakSanitizer: checking for leaks");
// Inside LockStuffAndStopTheWorld we can't run symbolizer, so we can't match
// suppressions. However if a stack id was previously suppressed, it should be
// suppressed in future checks as well.
for (int i = 0;; ++i) {
EnsureMainThreadIDIsCorrect();
CheckForLeaksParam param;
+ // Capture calling thread's stack pointer early, to avoid false negatives.
+ // Old frame with dead pointers might be overlapped by new frame inside
+ // CheckForLeaks which does not use bytes with pointers before the
+ // threads are suspended and stack pointers captured.
+ param.caller_tid = GetTid();
+ param.caller_sp = reinterpret_cast<uptr>(__builtin_frame_address(0));
LockStuffAndStopTheWorld(CheckForLeaksCallback, &param);
if (!param.success) {
Report("LeakSanitizer has encountered a fatal error.\n");
@@ -718,17 +790,20 @@ static bool CheckForLeaks() {
"etc)\n");
Die();
}
+ LeakReport leak_report;
+ leak_report.AddLeakedChunks(param.leaks);
+
// No new suppressions stacks, so rerun will not help and we can report.
- if (!param.leak_report.ApplySuppressions())
- return PrintResults(param.leak_report);
+ if (!leak_report.ApplySuppressions())
+ return PrintResults(leak_report);
// No indirect leaks to report, so we are done here.
- if (!param.leak_report.IndirectUnsuppressedLeakCount())
- return PrintResults(param.leak_report);
+ if (!leak_report.IndirectUnsuppressedLeakCount())
+ return PrintResults(leak_report);
if (i >= 8) {
Report("WARNING: LeakSanitizer gave up on indirect leaks suppression.\n");
- return PrintResults(param.leak_report);
+ return PrintResults(leak_report);
}
// We found a new previously unseen suppressed call stack. Rerun to make
@@ -742,90 +817,68 @@ static bool has_reported_leaks = false;
bool HasReportedLeaks() { return has_reported_leaks; }
void DoLeakCheck() {
- BlockingMutexLock l(&global_mutex);
+ Lock l(&global_mutex);
static bool already_done;
- if (already_done) return;
+ if (already_done)
+ return;
already_done = true;
has_reported_leaks = CheckForLeaks();
- if (has_reported_leaks) HandleLeaks();
+ if (has_reported_leaks)
+ HandleLeaks();
}
static int DoRecoverableLeakCheck() {
- BlockingMutexLock l(&global_mutex);
+ Lock l(&global_mutex);
bool have_leaks = CheckForLeaks();
return have_leaks ? 1 : 0;
}
void DoRecoverableLeakCheckVoid() { DoRecoverableLeakCheck(); }
-Suppression *LeakSuppressionContext::GetSuppressionForAddr(uptr addr) {
- Suppression *s = nullptr;
-
- // Suppress by module name.
- if (const char *module_name =
- Symbolizer::GetOrInit()->GetModuleNameForPc(addr))
- if (context.Match(module_name, kSuppressionLeak, &s))
- return s;
-
- // Suppress by file or function name.
- SymbolizedStack *frames = Symbolizer::GetOrInit()->SymbolizePC(addr);
- for (SymbolizedStack *cur = frames; cur; cur = cur->next) {
- if (context.Match(cur->info.function, kSuppressionLeak, &s) ||
- context.Match(cur->info.file, kSuppressionLeak, &s)) {
- break;
- }
- }
- frames->ClearAll();
- return s;
-}
-
-Suppression *LeakSuppressionContext::GetSuppressionForStack(
- u32 stack_trace_id) {
- LazyInit();
- StackTrace stack = StackDepotGet(stack_trace_id);
- for (uptr i = 0; i < stack.size; i++) {
- Suppression *s = GetSuppressionForAddr(
- StackTrace::GetPreviousInstructionPc(stack.trace[i]));
- if (s) {
- suppressed_stacks_sorted = false;
- suppressed_stacks.push_back(stack_trace_id);
- return s;
- }
- }
- return nullptr;
-}
-
///// LeakReport implementation. /////
// A hard limit on the number of distinct leaks, to avoid quadratic complexity
// in LeakReport::AddLeakedChunk(). We don't expect to ever see this many leaks
// in real-world applications.
-// FIXME: Get rid of this limit by changing the implementation of LeakReport to
-// use a hash table.
+// FIXME: Get rid of this limit by moving logic into DedupLeaks.
const uptr kMaxLeaksConsidered = 5000;
-void LeakReport::AddLeakedChunk(uptr chunk, u32 stack_trace_id,
- uptr leaked_size, ChunkTag tag) {
- CHECK(tag == kDirectlyLeaked || tag == kIndirectlyLeaked);
- bool is_directly_leaked = (tag == kDirectlyLeaked);
- uptr i;
- for (i = 0; i < leaks_.size(); i++) {
- if (leaks_[i].stack_trace_id == stack_trace_id &&
- leaks_[i].is_directly_leaked == is_directly_leaked) {
- leaks_[i].hit_count++;
- leaks_[i].total_size += leaked_size;
- break;
+void LeakReport::AddLeakedChunks(const LeakedChunks &chunks) {
+ for (const LeakedChunk &leak : chunks) {
+ uptr chunk = leak.chunk;
+ u32 stack_trace_id = leak.stack_trace_id;
+ uptr leaked_size = leak.leaked_size;
+ ChunkTag tag = leak.tag;
+ CHECK(tag == kDirectlyLeaked || tag == kIndirectlyLeaked);
+
+ if (u32 resolution = flags()->resolution) {
+ StackTrace stack = StackDepotGet(stack_trace_id);
+ stack.size = Min(stack.size, resolution);
+ stack_trace_id = StackDepotPut(stack);
+ }
+
+ bool is_directly_leaked = (tag == kDirectlyLeaked);
+ uptr i;
+ for (i = 0; i < leaks_.size(); i++) {
+ if (leaks_[i].stack_trace_id == stack_trace_id &&
+ leaks_[i].is_directly_leaked == is_directly_leaked) {
+ leaks_[i].hit_count++;
+ leaks_[i].total_size += leaked_size;
+ break;
+ }
+ }
+ if (i == leaks_.size()) {
+ if (leaks_.size() == kMaxLeaksConsidered)
+ return;
+ Leak leak = {next_id_++, /* hit_count */ 1,
+ leaked_size, stack_trace_id,
+ is_directly_leaked, /* is_suppressed */ false};
+ leaks_.push_back(leak);
+ }
+ if (flags()->report_objects) {
+ LeakedObject obj = {leaks_[i].id, GetUserAddr(chunk), leaked_size};
+ leaked_objects_.push_back(obj);
}
- }
- if (i == leaks_.size()) {
- if (leaks_.size() == kMaxLeaksConsidered) return;
- Leak leak = { next_id_++, /* hit_count */ 1, leaked_size, stack_trace_id,
- is_directly_leaked, /* is_suppressed */ false };
- leaks_.push_back(leak);
- }
- if (flags()->report_objects) {
- LeakedObject obj = {leaks_[i].id, chunk, leaked_size};
- leaked_objects_.push_back(obj);
}
}
@@ -840,9 +893,10 @@ void LeakReport::ReportTopLeaks(uptr num_leaks_to_report) {
CHECK(leaks_.size() <= kMaxLeaksConsidered);
Printf("\n");
if (leaks_.size() == kMaxLeaksConsidered)
- Printf("Too many leaks! Only the first %zu leaks encountered will be "
- "reported.\n",
- kMaxLeaksConsidered);
+ Printf(
+ "Too many leaks! Only the first %zu leaks encountered will be "
+ "reported.\n",
+ kMaxLeaksConsidered);
uptr unsuppressed_count = UnsuppressedLeakCount();
if (num_leaks_to_report > 0 && num_leaks_to_report < unsuppressed_count)
@@ -850,10 +904,12 @@ void LeakReport::ReportTopLeaks(uptr num_leaks_to_report) {
Sort(leaks_.data(), leaks_.size(), &LeakComparator);
uptr leaks_reported = 0;
for (uptr i = 0; i < leaks_.size(); i++) {
- if (leaks_[i].is_suppressed) continue;
+ if (leaks_[i].is_suppressed)
+ continue;
PrintReportForLeak(i);
leaks_reported++;
- if (leaks_reported == num_leaks_to_report) break;
+ if (leaks_reported == num_leaks_to_report)
+ break;
}
if (leaks_reported < unsuppressed_count) {
uptr remaining = unsuppressed_count - leaks_reported;
@@ -869,7 +925,8 @@ void LeakReport::PrintReportForLeak(uptr index) {
leaks_[index].total_size, leaks_[index].hit_count);
Printf("%s", d.Default());
- PrintStackTraceById(leaks_[index].stack_trace_id);
+ CHECK(leaks_[index].stack_trace_id);
+ StackDepotGet(leaks_[index].stack_trace_id).Print();
if (flags()->report_objects) {
Printf("Objects leaked above:\n");
@@ -882,7 +939,7 @@ void LeakReport::PrintLeakedObjectsForLeak(uptr index) {
u32 leak_id = leaks_[index].id;
for (uptr j = 0; j < leaked_objects_.size(); j++) {
if (leaked_objects_[j].leak_id == leak_id)
- Printf("%p (%zu bytes)\n", leaked_objects_[j].addr,
+ Printf("%p (%zu bytes)\n", (void *)leaked_objects_[j].addr,
leaked_objects_[j].size);
}
}
@@ -891,26 +948,23 @@ void LeakReport::PrintSummary() {
CHECK(leaks_.size() <= kMaxLeaksConsidered);
uptr bytes = 0, allocations = 0;
for (uptr i = 0; i < leaks_.size(); i++) {
- if (leaks_[i].is_suppressed) continue;
- bytes += leaks_[i].total_size;
- allocations += leaks_[i].hit_count;
+ if (leaks_[i].is_suppressed)
+ continue;
+ bytes += leaks_[i].total_size;
+ allocations += leaks_[i].hit_count;
}
InternalScopedString summary;
- summary.append("%zu byte(s) leaked in %zu allocation(s).", bytes,
- allocations);
+ summary.AppendF("%zu byte(s) leaked in %zu allocation(s).", bytes,
+ allocations);
ReportErrorSummary(summary.data());
}
uptr LeakReport::ApplySuppressions() {
LeakSuppressionContext *suppressions = GetSuppressionContext();
- uptr new_suppressions = false;
+ uptr new_suppressions = 0;
for (uptr i = 0; i < leaks_.size(); i++) {
- Suppression *s =
- suppressions->GetSuppressionForStack(leaks_[i].stack_trace_id);
- if (s) {
- s->weight += leaks_[i].total_size;
- atomic_store_relaxed(&s->hit_count, atomic_load_relaxed(&s->hit_count) +
- leaks_[i].hit_count);
+ if (suppressions->Suppress(leaks_[i].stack_trace_id, leaks_[i].hit_count,
+ leaks_[i].total_size)) {
leaks_[i].is_suppressed = true;
++new_suppressions;
}
@@ -921,7 +975,8 @@ uptr LeakReport::ApplySuppressions() {
uptr LeakReport::UnsuppressedLeakCount() {
uptr result = 0;
for (uptr i = 0; i < leaks_.size(); i++)
- if (!leaks_[i].is_suppressed) result++;
+ if (!leaks_[i].is_suppressed)
+ result++;
return result;
}
@@ -933,16 +988,16 @@ uptr LeakReport::IndirectUnsuppressedLeakCount() {
return result;
}
-} // namespace __lsan
-#else // CAN_SANITIZE_LEAKS
+} // namespace __lsan
+#else // CAN_SANITIZE_LEAKS
namespace __lsan {
-void InitCommonLsan() { }
-void DoLeakCheck() { }
-void DoRecoverableLeakCheckVoid() { }
-void DisableInThisThread() { }
-void EnableInThisThread() { }
-}
-#endif // CAN_SANITIZE_LEAKS
+void InitCommonLsan() {}
+void DoLeakCheck() {}
+void DoRecoverableLeakCheckVoid() {}
+void DisableInThisThread() {}
+void EnableInThisThread() {}
+} // namespace __lsan
+#endif // CAN_SANITIZE_LEAKS
using namespace __lsan;
@@ -954,54 +1009,55 @@ void __lsan_ignore_object(const void *p) {
return;
// Cannot use PointsIntoChunk or LsanMetadata here, since the allocator is not
// locked.
- BlockingMutexLock l(&global_mutex);
- IgnoreObjectResult res = IgnoreObjectLocked(p);
+ Lock l(&global_mutex);
+ IgnoreObjectResult res = IgnoreObject(p);
if (res == kIgnoreObjectInvalid)
- VReport(1, "__lsan_ignore_object(): no heap object found at %p", p);
+ VReport(1, "__lsan_ignore_object(): no heap object found at %p\n", p);
if (res == kIgnoreObjectAlreadyIgnored)
- VReport(1, "__lsan_ignore_object(): "
- "heap object at %p is already being ignored\n", p);
+ VReport(1,
+ "__lsan_ignore_object(): "
+ "heap object at %p is already being ignored\n",
+ p);
if (res == kIgnoreObjectSuccess)
VReport(1, "__lsan_ignore_object(): ignoring heap object at %p\n", p);
-#endif // CAN_SANITIZE_LEAKS
+#endif // CAN_SANITIZE_LEAKS
}
SANITIZER_INTERFACE_ATTRIBUTE
void __lsan_register_root_region(const void *begin, uptr size) {
#if CAN_SANITIZE_LEAKS
- BlockingMutexLock l(&global_mutex);
- CHECK(root_regions);
- RootRegion region = {reinterpret_cast<uptr>(begin), size};
- root_regions->push_back(region);
- VReport(1, "Registered root region at %p of size %llu\n", begin, size);
-#endif // CAN_SANITIZE_LEAKS
+ VReport(1, "Registered root region at %p of size %zu\n", begin, size);
+ uptr b = reinterpret_cast<uptr>(begin);
+ uptr e = b + size;
+ CHECK_LT(b, e);
+
+ Lock l(&global_mutex);
+ ++GetRootRegionsLocked()[{b, e}];
+#endif // CAN_SANITIZE_LEAKS
}
SANITIZER_INTERFACE_ATTRIBUTE
void __lsan_unregister_root_region(const void *begin, uptr size) {
#if CAN_SANITIZE_LEAKS
- BlockingMutexLock l(&global_mutex);
- CHECK(root_regions);
- bool removed = false;
- for (uptr i = 0; i < root_regions->size(); i++) {
- RootRegion region = (*root_regions)[i];
- if (region.begin == reinterpret_cast<uptr>(begin) && region.size == size) {
- removed = true;
- uptr last_index = root_regions->size() - 1;
- (*root_regions)[i] = (*root_regions)[last_index];
- root_regions->pop_back();
- VReport(1, "Unregistered root region at %p of size %llu\n", begin, size);
- break;
+ uptr b = reinterpret_cast<uptr>(begin);
+ uptr e = b + size;
+ CHECK_LT(b, e);
+ VReport(1, "Unregistered root region at %p of size %zu\n", begin, size);
+
+ {
+ Lock l(&global_mutex);
+ if (auto *f = GetRootRegionsLocked().find({b, e})) {
+ if (--(f->second) == 0)
+ GetRootRegionsLocked().erase(f);
+ return;
}
}
- if (!removed) {
- Report(
- "__lsan_unregister_root_region(): region at %p of size %llu has not "
- "been registered.\n",
- begin, size);
- Die();
- }
-#endif // CAN_SANITIZE_LEAKS
+ Report(
+ "__lsan_unregister_root_region(): region at %p of size %zu has not "
+ "been registered.\n",
+ begin, size);
+ Die();
+#endif // CAN_SANITIZE_LEAKS
}
SANITIZER_INTERFACE_ATTRIBUTE
@@ -1023,7 +1079,7 @@ void __lsan_do_leak_check() {
#if CAN_SANITIZE_LEAKS
if (common_flags()->detect_leaks)
__lsan::DoLeakCheck();
-#endif // CAN_SANITIZE_LEAKS
+#endif // CAN_SANITIZE_LEAKS
}
SANITIZER_INTERFACE_ATTRIBUTE
@@ -1031,7 +1087,7 @@ int __lsan_do_recoverable_leak_check() {
#if CAN_SANITIZE_LEAKS
if (common_flags()->detect_leaks)
return __lsan::DoRecoverableLeakCheck();
-#endif // CAN_SANITIZE_LEAKS
+#endif // CAN_SANITIZE_LEAKS
return 0;
}
@@ -1040,14 +1096,12 @@ SANITIZER_INTERFACE_WEAK_DEF(const char *, __lsan_default_options, void) {
}
#if !SANITIZER_SUPPORTS_WEAK_HOOKS
-SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
-int __lsan_is_turned_off() {
+SANITIZER_INTERFACE_WEAK_DEF(int, __lsan_is_turned_off, void) {
return 0;
}
-SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
-const char *__lsan_default_suppressions() {
+SANITIZER_INTERFACE_WEAK_DEF(const char *, __lsan_default_suppressions, void) {
return "";
}
#endif
-} // extern "C"
+} // extern "C"
diff --git a/contrib/llvm-project/compiler-rt/lib/lsan/lsan_common.h b/contrib/llvm-project/compiler-rt/lib/lsan/lsan_common.h
index 776ca60b1e97..c598b6210587 100644
--- a/contrib/llvm-project/compiler-rt/lib/lsan/lsan_common.h
+++ b/contrib/llvm-project/compiler-rt/lib/lsan/lsan_common.h
@@ -18,8 +18,11 @@
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_internal_defs.h"
#include "sanitizer_common/sanitizer_platform.h"
+#include "sanitizer_common/sanitizer_range.h"
+#include "sanitizer_common/sanitizer_stackdepot.h"
#include "sanitizer_common/sanitizer_stoptheworld.h"
#include "sanitizer_common/sanitizer_symbolizer.h"
+#include "sanitizer_common/sanitizer_thread_registry.h"
// LeakSanitizer relies on some Glibc's internals (e.g. TLS machinery) on Linux.
// Also, LSan doesn't like 32 bit architectures
@@ -32,21 +35,23 @@
// Exclude leak-detection on arm32 for Android because `__aeabi_read_tp`
// is missing. This caused a link error.
#if SANITIZER_ANDROID && (__ANDROID_API__ < 28 || defined(__arm__))
-#define CAN_SANITIZE_LEAKS 0
-#elif (SANITIZER_LINUX || SANITIZER_MAC) && (SANITIZER_WORDSIZE == 64) && \
+# define CAN_SANITIZE_LEAKS 0
+#elif (SANITIZER_LINUX || SANITIZER_APPLE) && (SANITIZER_WORDSIZE == 64) && \
(defined(__x86_64__) || defined(__mips64) || defined(__aarch64__) || \
defined(__powerpc64__) || defined(__s390x__))
-#define CAN_SANITIZE_LEAKS 1
-#elif defined(__i386__) && (SANITIZER_LINUX || SANITIZER_MAC)
-#define CAN_SANITIZE_LEAKS 1
+# define CAN_SANITIZE_LEAKS 1
+#elif defined(__i386__) && (SANITIZER_LINUX || SANITIZER_APPLE)
+# define CAN_SANITIZE_LEAKS 1
#elif defined(__arm__) && SANITIZER_LINUX
-#define CAN_SANITIZE_LEAKS 1
+# define CAN_SANITIZE_LEAKS 1
+#elif SANITIZER_LOONGARCH64 && SANITIZER_LINUX
+# define CAN_SANITIZE_LEAKS 1
#elif SANITIZER_RISCV64 && SANITIZER_LINUX
-#define CAN_SANITIZE_LEAKS 1
+# define CAN_SANITIZE_LEAKS 1
#elif SANITIZER_NETBSD || SANITIZER_FUCHSIA
-#define CAN_SANITIZE_LEAKS 1
+# define CAN_SANITIZE_LEAKS 1
#else
-#define CAN_SANITIZE_LEAKS 0
+# define CAN_SANITIZE_LEAKS 0
#endif
namespace __sanitizer {
@@ -56,6 +61,9 @@ class ThreadContextBase;
struct DTLS;
}
+// This section defines function and class prototypes which must be implemented
+// by the parent tool linking in LSan. There are implementations provided by the
+// LSan library which will be linked in when LSan is used as a standalone tool.
namespace __lsan {
// Chunk tags.
@@ -66,6 +74,105 @@ enum ChunkTag {
kIgnored = 3
};
+enum IgnoreObjectResult {
+ kIgnoreObjectSuccess,
+ kIgnoreObjectAlreadyIgnored,
+ kIgnoreObjectInvalid
+};
+
+//// --------------------------------------------------------------------------
+//// Poisoning prototypes.
+//// --------------------------------------------------------------------------
+
+// Returns true if [addr, addr + sizeof(void *)) is poisoned.
+bool WordIsPoisoned(uptr addr);
+
+//// --------------------------------------------------------------------------
+//// Thread prototypes.
+//// --------------------------------------------------------------------------
+
+// Wrappers for ThreadRegistry access.
+void LockThreads() SANITIZER_NO_THREAD_SAFETY_ANALYSIS;
+void UnlockThreads() SANITIZER_NO_THREAD_SAFETY_ANALYSIS;
+// If called from the main thread, updates the main thread's TID in the thread
+// registry. We need this to handle processes that fork() without a subsequent
+// exec(), which invalidates the recorded TID. To update it, we must call
+// gettid() from the main thread. Our solution is to call this function before
+// leak checking and also before every call to pthread_create() (to handle cases
+// where leak checking is initiated from a non-main thread).
+void EnsureMainThreadIDIsCorrect();
+
+bool GetThreadRangesLocked(tid_t os_id, uptr *stack_begin, uptr *stack_end,
+ uptr *tls_begin, uptr *tls_end, uptr *cache_begin,
+ uptr *cache_end, DTLS **dtls);
+void GetAllThreadAllocatorCachesLocked(InternalMmapVector<uptr> *caches);
+void GetThreadExtraStackRangesLocked(InternalMmapVector<Range> *ranges);
+void GetThreadExtraStackRangesLocked(tid_t os_id,
+ InternalMmapVector<Range> *ranges);
+void GetAdditionalThreadContextPtrsLocked(InternalMmapVector<uptr> *ptrs);
+void GetRunningThreadsLocked(InternalMmapVector<tid_t> *threads);
+
+//// --------------------------------------------------------------------------
+//// Allocator prototypes.
+//// --------------------------------------------------------------------------
+
+// Wrappers for allocator's ForceLock()/ForceUnlock().
+void LockAllocator();
+void UnlockAllocator();
+
+// Lock/unlock global mutext.
+void LockGlobal();
+void UnlockGlobal();
+
+// Returns the address range occupied by the global allocator object.
+void GetAllocatorGlobalRange(uptr *begin, uptr *end);
+// If p points into a chunk that has been allocated to the user, returns its
+// user-visible address. Otherwise, returns 0.
+uptr PointsIntoChunk(void *p);
+// Returns address of user-visible chunk contained in this allocator chunk.
+uptr GetUserBegin(uptr chunk);
+// Returns user-visible address for chunk. If memory tagging is used this
+// function will return the tagged address.
+uptr GetUserAddr(uptr chunk);
+
+// Wrapper for chunk metadata operations.
+class LsanMetadata {
+ public:
+ // Constructor accepts address of user-visible chunk.
+ explicit LsanMetadata(uptr chunk);
+ bool allocated() const;
+ ChunkTag tag() const;
+ void set_tag(ChunkTag value);
+ uptr requested_size() const;
+ u32 stack_trace_id() const;
+
+ private:
+ void *metadata_;
+};
+
+// Iterate over all existing chunks. Allocator must be locked.
+void ForEachChunk(ForEachChunkCallback callback, void *arg);
+
+// Helper for __lsan_ignore_object().
+IgnoreObjectResult IgnoreObject(const void *p);
+
+// The rest of the LSan interface which is implemented by library.
+
+struct ScopedStopTheWorldLock {
+ ScopedStopTheWorldLock() {
+ LockThreads();
+ LockAllocator();
+ }
+
+ ~ScopedStopTheWorldLock() {
+ UnlockAllocator();
+ UnlockThreads();
+ }
+
+ ScopedStopTheWorldLock &operator=(const ScopedStopTheWorldLock &) = delete;
+ ScopedStopTheWorldLock(const ScopedStopTheWorldLock &) = delete;
+};
+
struct Flags {
#define LSAN_FLAG(Type, Name, DefaultValue, Description) Type Name;
#include "lsan_flags.inc"
@@ -81,6 +188,15 @@ extern Flags lsan_flags;
inline Flags *flags() { return &lsan_flags; }
void RegisterLsanFlags(FlagParser *parser, Flags *f);
+struct LeakedChunk {
+ uptr chunk;
+ u32 stack_trace_id;
+ uptr leaked_size;
+ ChunkTag tag;
+};
+
+using LeakedChunks = InternalMmapVector<LeakedChunk>;
+
struct Leak {
u32 id;
uptr hit_count;
@@ -100,8 +216,7 @@ struct LeakedObject {
class LeakReport {
public:
LeakReport() {}
- void AddLeakedChunk(uptr chunk, u32 stack_trace_id, uptr leaked_size,
- ChunkTag tag);
+ void AddLeakedChunks(const LeakedChunks &chunks);
void ReportTopLeaks(uptr max_leaks);
void PrintSummary();
uptr ApplySuppressions();
@@ -124,26 +239,23 @@ void InitializePlatformSpecificModules();
void ProcessGlobalRegions(Frontier *frontier);
void ProcessPlatformSpecificAllocations(Frontier *frontier);
-struct RootRegion {
- uptr begin;
- uptr size;
-};
-
// LockStuffAndStopTheWorld can start to use Scan* calls to collect into
// this Frontier vector before the StopTheWorldCallback actually runs.
// This is used when the OS has a unified callback API for suspending
// threads and enumerating roots.
struct CheckForLeaksParam {
Frontier frontier;
- LeakReport leak_report;
+ LeakedChunks leaks;
+ tid_t caller_tid;
+ uptr caller_sp;
bool success = false;
};
-InternalMmapVector<RootRegion> const *GetRootRegions();
-void ScanRootRegion(Frontier *frontier, RootRegion const &region,
- uptr region_begin, uptr region_end, bool is_readable);
-void ForEachExtraStackRangeCb(uptr begin, uptr end, void* arg);
-void GetAdditionalThreadContextPtrs(ThreadContextBase *tctx, void *ptrs);
+using Region = Range;
+
+bool HasRootRegions();
+void ScanRootRegions(Frontier *frontier,
+ const InternalMmapVectorNoCtor<Region> &region);
// Run stoptheworld while holding any platform-specific locks, as well as the
// allocator and thread registry locks.
void LockStuffAndStopTheWorld(StopTheWorldCallback callback,
@@ -153,12 +265,8 @@ void ScanRangeForPointers(uptr begin, uptr end,
Frontier *frontier,
const char *region_type, ChunkTag tag);
void ScanGlobalRange(uptr begin, uptr end, Frontier *frontier);
-
-enum IgnoreObjectResult {
- kIgnoreObjectSuccess,
- kIgnoreObjectAlreadyIgnored,
- kIgnoreObjectInvalid
-};
+void ScanExtraStackRanges(const InternalMmapVector<Range> &ranges,
+ Frontier *frontier);
// Functions called from the parent tool.
const char *MaybeCallLsanDefaultOptions();
@@ -210,41 +318,6 @@ inline bool IsSpecialCaseOfOperatorNew0(uptr chunk_beg, uptr chunk_size,
#endif
}
-// The following must be implemented in the parent tool.
-
-void ForEachChunk(ForEachChunkCallback callback, void *arg);
-// Returns the address range occupied by the global allocator object.
-void GetAllocatorGlobalRange(uptr *begin, uptr *end);
-// Wrappers for allocator's ForceLock()/ForceUnlock().
-void LockAllocator();
-void UnlockAllocator();
-// Returns true if [addr, addr + sizeof(void *)) is poisoned.
-bool WordIsPoisoned(uptr addr);
-// Wrappers for ThreadRegistry access.
-void LockThreadRegistry() NO_THREAD_SAFETY_ANALYSIS;
-void UnlockThreadRegistry() NO_THREAD_SAFETY_ANALYSIS;
-ThreadRegistry *GetThreadRegistryLocked();
-bool GetThreadRangesLocked(tid_t os_id, uptr *stack_begin, uptr *stack_end,
- uptr *tls_begin, uptr *tls_end, uptr *cache_begin,
- uptr *cache_end, DTLS **dtls);
-void GetAllThreadAllocatorCachesLocked(InternalMmapVector<uptr> *caches);
-void ForEachExtraStackRange(tid_t os_id, RangeIteratorCallback callback,
- void *arg);
-// If called from the main thread, updates the main thread's TID in the thread
-// registry. We need this to handle processes that fork() without a subsequent
-// exec(), which invalidates the recorded TID. To update it, we must call
-// gettid() from the main thread. Our solution is to call this function before
-// leak checking and also before every call to pthread_create() (to handle cases
-// where leak checking is initiated from a non-main thread).
-void EnsureMainThreadIDIsCorrect();
-// If p points into a chunk that has been allocated to the user, returns its
-// user-visible address. Otherwise, returns 0.
-uptr PointsIntoChunk(void *p);
-// Returns address of user-visible chunk contained in this allocator chunk.
-uptr GetUserBegin(uptr chunk);
-// Helper for __lsan_ignore_object().
-IgnoreObjectResult IgnoreObjectLocked(const void *p);
-
// Return the linker module, if valid for the platform.
LoadedModule *GetLinker();
@@ -254,20 +327,6 @@ bool HasReportedLeaks();
// Run platform-specific leak handlers.
void HandleLeaks();
-// Wrapper for chunk metadata operations.
-class LsanMetadata {
- public:
- // Constructor accepts address of user-visible chunk.
- explicit LsanMetadata(uptr chunk);
- bool allocated() const;
- ChunkTag tag() const;
- void set_tag(ChunkTag value);
- uptr requested_size() const;
- u32 stack_trace_id() const;
- private:
- void *metadata_;
-};
-
} // namespace __lsan
extern "C" {
@@ -279,6 +338,13 @@ int __lsan_is_turned_off();
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
const char *__lsan_default_suppressions();
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __lsan_register_root_region(const void *p, __lsan::uptr size);
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __lsan_unregister_root_region(const void *p, __lsan::uptr size);
+
} // extern "C"
#endif // LSAN_COMMON_H
diff --git a/contrib/llvm-project/compiler-rt/lib/lsan/lsan_common_fuchsia.cpp b/contrib/llvm-project/compiler-rt/lib/lsan/lsan_common_fuchsia.cpp
index 2d35fa5b1cff..cb3fe1f859f7 100644
--- a/contrib/llvm-project/compiler-rt/lib/lsan/lsan_common_fuchsia.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/lsan/lsan_common_fuchsia.cpp
@@ -12,6 +12,7 @@
//===---------------------------------------------------------------------===//
#include "lsan_common.h"
+#include "lsan_thread.h"
#include "sanitizer_common/sanitizer_platform.h"
#if CAN_SANITIZE_LEAKS && SANITIZER_FUCHSIA
@@ -52,14 +53,22 @@ void ProcessPlatformSpecificAllocations(Frontier *frontier) {}
// behavior and causes rare race conditions.
void HandleLeaks() {}
+// This is defined differently in asan_fuchsia.cpp and lsan_fuchsia.cpp.
+bool UseExitcodeOnLeak();
+
int ExitHook(int status) {
+ if (common_flags()->detect_leaks && common_flags()->leak_check_at_exit) {
+ if (UseExitcodeOnLeak())
+ DoLeakCheck();
+ else
+ DoRecoverableLeakCheckVoid();
+ }
return status == 0 && HasReportedLeaks() ? common_flags()->exitcode : status;
}
void LockStuffAndStopTheWorld(StopTheWorldCallback callback,
CheckForLeaksParam *argument) {
- LockThreadRegistry();
- LockAllocator();
+ ScopedStopTheWorldLock lock;
struct Params {
InternalMmapVector<uptr> allocator_caches;
@@ -110,7 +119,8 @@ void LockStuffAndStopTheWorld(StopTheWorldCallback callback,
auto i = __sanitizer::InternalLowerBound(params->allocator_caches, begin);
if (i < params->allocator_caches.size() &&
params->allocator_caches[i] >= begin &&
- end - params->allocator_caches[i] <= sizeof(AllocatorCache)) {
+ params->allocator_caches[i] <= end &&
+ end - params->allocator_caches[i] >= sizeof(AllocatorCache)) {
// Split the range in two and omit the allocator cache within.
ScanRangeForPointers(begin, params->allocator_caches[i],
&params->argument->frontier, "TLS", kReachable);
@@ -135,23 +145,16 @@ void LockStuffAndStopTheWorld(StopTheWorldCallback callback,
// We don't use the thread registry at all for enumerating the threads
// and their stacks, registers, and TLS regions. So use it separately
- // just for the allocator cache, and to call ForEachExtraStackRange,
+ // just for the allocator cache, and to call ScanExtraStackRanges,
// which ASan needs.
if (flags()->use_stacks) {
- GetThreadRegistryLocked()->RunCallbackForEachThreadLocked(
- [](ThreadContextBase *tctx, void *arg) {
- ForEachExtraStackRange(tctx->os_id, ForEachExtraStackRangeCb,
- arg);
- },
- &params->argument->frontier);
+ InternalMmapVector<Range> ranges;
+ GetThreadExtraStackRangesLocked(&ranges);
+ ScanExtraStackRanges(ranges, &params->argument->frontier);
}
-
params->callback(SuspendedThreadsListFuchsia(), params->argument);
},
&params);
-
- UnlockAllocator();
- UnlockThreadRegistry();
}
} // namespace __lsan
diff --git a/contrib/llvm-project/compiler-rt/lib/lsan/lsan_common_linux.cpp b/contrib/llvm-project/compiler-rt/lib/lsan/lsan_common_linux.cpp
index 3af586e220f6..692ad35169e1 100644
--- a/contrib/llvm-project/compiler-rt/lib/lsan/lsan_common_linux.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/lsan/lsan_common_linux.cpp
@@ -122,12 +122,9 @@ void HandleLeaks() {
static int LockStuffAndStopTheWorldCallback(struct dl_phdr_info *info,
size_t size, void *data) {
- LockThreadRegistry();
- LockAllocator();
+ ScopedStopTheWorldLock lock;
DoStopTheWorldParam *param = reinterpret_cast<DoStopTheWorldParam *>(data);
StopTheWorld(param->callback, param->argument);
- UnlockAllocator();
- UnlockThreadRegistry();
return 1;
}
diff --git a/contrib/llvm-project/compiler-rt/lib/lsan/lsan_common_mac.cpp b/contrib/llvm-project/compiler-rt/lib/lsan/lsan_common_mac.cpp
index 8516a176eb46..4e5198979b95 100644
--- a/contrib/llvm-project/compiler-rt/lib/lsan/lsan_common_mac.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/lsan/lsan_common_mac.cpp
@@ -15,38 +15,55 @@
#include "sanitizer_common/sanitizer_libc.h"
#include "lsan_common.h"
-#if CAN_SANITIZE_LEAKS && SANITIZER_MAC
+#if CAN_SANITIZE_LEAKS && SANITIZER_APPLE
-#include "sanitizer_common/sanitizer_allocator_internal.h"
-#include "lsan_allocator.h"
+# include <mach/mach.h>
+# include <mach/vm_statistics.h>
+# include <pthread.h>
-#include <pthread.h>
+# include "lsan_allocator.h"
+# include "sanitizer_common/sanitizer_allocator_internal.h"
+namespace __lsan {
-#include <mach/mach.h>
+class ThreadContextLsanBase;
-// Only introduced in Mac OS X 10.9.
-#ifdef VM_MEMORY_OS_ALLOC_ONCE
-static const int kSanitizerVmMemoryOsAllocOnce = VM_MEMORY_OS_ALLOC_ONCE;
-#else
-static const int kSanitizerVmMemoryOsAllocOnce = 73;
-#endif
+enum class SeenRegion {
+ None = 0,
+ AllocOnce = 1 << 0,
+ LibDispatch = 1 << 1,
+ Foundation = 1 << 2,
+ All = AllocOnce | LibDispatch | Foundation
+};
-namespace __lsan {
+inline SeenRegion operator|(SeenRegion left, SeenRegion right) {
+ return static_cast<SeenRegion>(static_cast<int>(left) |
+ static_cast<int>(right));
+}
+
+inline SeenRegion &operator|=(SeenRegion &left, const SeenRegion &right) {
+ left = left | right;
+ return left;
+}
+
+struct RegionScanState {
+ SeenRegion seen_regions = SeenRegion::None;
+ bool in_libdispatch = false;
+};
typedef struct {
int disable_counter;
- u32 current_thread_id;
+ ThreadContextLsanBase *current_thread;
AllocatorCache cache;
} thread_local_data_t;
static pthread_key_t key;
static pthread_once_t key_once = PTHREAD_ONCE_INIT;
-// The main thread destructor requires the current thread id,
-// so we can't destroy it until it's been used and reset to invalid tid
+// The main thread destructor requires the current thread,
+// so we can't destroy it until it's been used and reset.
void restore_tid_data(void *ptr) {
thread_local_data_t *data = (thread_local_data_t *)ptr;
- if (data->current_thread_id != kInvalidTid)
+ if (data->current_thread)
pthread_setspecific(key, data);
}
@@ -61,7 +78,7 @@ static thread_local_data_t *get_tls_val(bool alloc) {
if (ptr == NULL && alloc) {
ptr = (thread_local_data_t *)InternalAlloc(sizeof(*ptr));
ptr->disable_counter = 0;
- ptr->current_thread_id = kInvalidTid;
+ ptr->current_thread = nullptr;
ptr->cache = AllocatorCache();
pthread_setspecific(key, ptr);
}
@@ -84,12 +101,14 @@ void EnableInThisThread() {
--*disable_counter;
}
-u32 GetCurrentThread() {
+ThreadContextLsanBase *GetCurrentThread() {
thread_local_data_t *data = get_tls_val(false);
- return data ? data->current_thread_id : kInvalidTid;
+ return data ? data->current_thread : nullptr;
}
-void SetCurrentThread(u32 tid) { get_tls_val(true)->current_thread_id = tid; }
+void SetCurrentThread(ThreadContextLsanBase *tctx) {
+ get_tls_val(true)->current_thread = tctx;
+}
AllocatorCache *GetAllocatorCache() { return &get_tls_val(true)->cache; }
@@ -143,31 +162,50 @@ void ProcessGlobalRegions(Frontier *frontier) {
}
void ProcessPlatformSpecificAllocations(Frontier *frontier) {
- unsigned depth = 1;
- vm_size_t size = 0;
vm_address_t address = 0;
kern_return_t err = KERN_SUCCESS;
- mach_msg_type_number_t count = VM_REGION_SUBMAP_INFO_COUNT_64;
- InternalMmapVector<RootRegion> const *root_regions = GetRootRegions();
+ InternalMmapVector<Region> mapped_regions;
+ bool use_root_regions = flags()->use_root_regions && HasRootRegions();
+ RegionScanState scan_state;
while (err == KERN_SUCCESS) {
+ vm_size_t size = 0;
+ unsigned depth = 1;
struct vm_region_submap_info_64 info;
+ mach_msg_type_number_t count = VM_REGION_SUBMAP_INFO_COUNT_64;
err = vm_region_recurse_64(mach_task_self(), &address, &size, &depth,
(vm_region_info_t)&info, &count);
uptr end_address = address + size;
-
- // libxpc stashes some pointers in the Kernel Alloc Once page,
- // make sure not to report those as leaks.
- if (info.user_tag == kSanitizerVmMemoryOsAllocOnce) {
+ if (info.user_tag == VM_MEMORY_OS_ALLOC_ONCE) {
+ // libxpc stashes some pointers in the Kernel Alloc Once page,
+ // make sure not to report those as leaks.
+ scan_state.seen_regions |= SeenRegion::AllocOnce;
ScanRangeForPointers(address, end_address, frontier, "GLOBAL",
kReachable);
+ } else if (info.user_tag == VM_MEMORY_FOUNDATION) {
+ // Objective-C block trampolines use the Foundation region.
+ scan_state.seen_regions |= SeenRegion::Foundation;
+ ScanRangeForPointers(address, end_address, frontier, "GLOBAL",
+ kReachable);
+ } else if (info.user_tag == VM_MEMORY_LIBDISPATCH) {
+ // Dispatch continuations use the libdispatch region. Empirically, there
+ // can be more than one region with this tag, so we'll optimistically
+ // assume that they're continguous. Otherwise, we would need to scan every
+ // region to ensure we find them all.
+ scan_state.in_libdispatch = true;
+ ScanRangeForPointers(address, end_address, frontier, "GLOBAL",
+ kReachable);
+ } else if (scan_state.in_libdispatch) {
+ scan_state.seen_regions |= SeenRegion::LibDispatch;
+ scan_state.in_libdispatch = false;
+ }
- // Recursing over the full memory map is very slow, break out
- // early if we don't need the full iteration.
- if (!flags()->use_root_regions || !root_regions->size())
- break;
+ // Recursing over the full memory map is very slow, break out
+ // early if we don't need the full iteration.
+ if (scan_state.seen_regions == SeenRegion::All && !use_root_regions) {
+ break;
}
// This additional root region scan is required on Darwin in order to
@@ -177,15 +215,12 @@ void ProcessPlatformSpecificAllocations(Frontier *frontier) {
//
// TODO(fjricci) - remove this once sanitizer_procmaps_mac has the same
// behavior as sanitizer_procmaps_linux and traverses all memory regions
- if (flags()->use_root_regions) {
- for (uptr i = 0; i < root_regions->size(); i++) {
- ScanRootRegion(frontier, (*root_regions)[i], address, end_address,
- info.protection & kProtectionRead);
- }
- }
+ if (use_root_regions && (info.protection & kProtectionRead))
+ mapped_regions.push_back({address, end_address});
address = end_address;
}
+ ScanRootRegions(frontier, mapped_regions);
}
// On darwin, we can intercept _exit gracefully, and return a failing exit code
@@ -195,13 +230,10 @@ void HandleLeaks() {}
void LockStuffAndStopTheWorld(StopTheWorldCallback callback,
CheckForLeaksParam *argument) {
- LockThreadRegistry();
- LockAllocator();
+ ScopedStopTheWorldLock lock;
StopTheWorld(callback, argument);
- UnlockAllocator();
- UnlockThreadRegistry();
}
-} // namespace __lsan
+} // namespace __lsan
-#endif // CAN_SANITIZE_LEAKS && SANITIZER_MAC
+#endif // CAN_SANITIZE_LEAKS && SANITIZER_APPLE
diff --git a/contrib/llvm-project/compiler-rt/lib/lsan/lsan_fuchsia.cpp b/contrib/llvm-project/compiler-rt/lib/lsan/lsan_fuchsia.cpp
index 40e65c6fb729..ba59bc9b71e3 100644
--- a/contrib/llvm-project/compiler-rt/lib/lsan/lsan_fuchsia.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/lsan/lsan_fuchsia.cpp
@@ -46,6 +46,7 @@ struct OnStartedArgs {
};
void ThreadContext::OnStarted(void *arg) {
+ ThreadContextLsanBase::OnStarted(arg);
auto args = reinterpret_cast<const OnStartedArgs *>(arg);
cache_begin_ = args->cache_begin;
cache_end_ = args->cache_end;
@@ -62,13 +63,13 @@ void InitializeMainThread() {
OnCreatedArgs args;
__sanitizer::GetThreadStackTopAndBottom(true, &args.stack_end,
&args.stack_begin);
- u32 tid = ThreadCreate(0, GetThreadSelf(), true, &args);
+ u32 tid = ThreadCreate(kMainTid, true, &args);
CHECK_EQ(tid, 0);
ThreadStart(tid);
}
void GetAllThreadAllocatorCachesLocked(InternalMmapVector<uptr> *caches) {
- GetThreadRegistryLocked()->RunCallbackForEachThreadLocked(
+ GetLsanThreadRegistryLocked()->RunCallbackForEachThreadLocked(
[](ThreadContextBase *tctx, void *arg) {
auto ctx = static_cast<ThreadContext *>(tctx);
static_cast<decltype(caches)>(arg)->push_back(ctx->cache_begin());
@@ -76,6 +77,14 @@ void GetAllThreadAllocatorCachesLocked(InternalMmapVector<uptr> *caches) {
caches);
}
+// On Fuchsia, leak detection is done by a special hook after atexit hooks.
+// So this doesn't install any atexit hook like on other platforms.
+void InstallAtExitCheckLeaks() {}
+void InstallAtForkHandler() {}
+
+// ASan defines this to check its `halt_on_error` flag.
+bool UseExitcodeOnLeak() { return true; }
+
} // namespace __lsan
// These are declared (in extern "C") by <zircon/sanitizer.h>.
@@ -86,14 +95,13 @@ void GetAllThreadAllocatorCachesLocked(InternalMmapVector<uptr> *caches) {
void *__sanitizer_before_thread_create_hook(thrd_t thread, bool detached,
const char *name, void *stack_base,
size_t stack_size) {
- uptr user_id = reinterpret_cast<uptr>(thread);
ENSURE_LSAN_INITED;
EnsureMainThreadIDIsCorrect();
OnCreatedArgs args;
args.stack_begin = reinterpret_cast<uptr>(stack_base);
args.stack_end = args.stack_begin + stack_size;
- u32 parent_tid = GetCurrentThread();
- u32 tid = ThreadCreate(parent_tid, user_id, detached, &args);
+ u32 parent_tid = GetCurrentThreadId();
+ u32 tid = ThreadCreate(parent_tid, detached, &args);
return reinterpret_cast<void *>(static_cast<uptr>(tid));
}
@@ -104,7 +112,7 @@ void __sanitizer_thread_create_hook(void *hook, thrd_t thread, int error) {
// On success, there is nothing to do here.
if (error != thrd_success) {
// Clean up the thread registry for the thread creation that didn't happen.
- GetThreadRegistryLocked()->FinishThread(tid);
+ GetLsanThreadRegistryLocked()->FinishThread(tid);
}
}
diff --git a/contrib/llvm-project/compiler-rt/lib/lsan/lsan_interceptors.cpp b/contrib/llvm-project/compiler-rt/lib/lsan/lsan_interceptors.cpp
index 90a90a56c54c..885f7ad5ddba 100644
--- a/contrib/llvm-project/compiler-rt/lib/lsan/lsan_interceptors.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/lsan/lsan_interceptors.cpp
@@ -13,6 +13,7 @@
#include "interception/interception.h"
#include "sanitizer_common/sanitizer_allocator.h"
+#include "sanitizer_common/sanitizer_allocator_dlsym.h"
#include "sanitizer_common/sanitizer_allocator_report.h"
#include "sanitizer_common/sanitizer_atomic.h"
#include "sanitizer_common/sanitizer_common.h"
@@ -43,6 +44,22 @@ int pthread_key_create(unsigned *key, void (*destructor)(void* v));
int pthread_setspecific(unsigned key, const void *v);
}
+struct DlsymAlloc : DlSymAllocator<DlsymAlloc> {
+ static bool UseImpl() { return lsan_init_is_running; }
+ static void OnAllocate(const void *ptr, uptr size) {
+#if CAN_SANITIZE_LEAKS
+ // Suppress leaks from dlerror(). Previously dlsym hack on global array was
+ // used by leak sanitizer as a root region.
+ __lsan_register_root_region(ptr, size);
+#endif
+ }
+ static void OnFree(const void *ptr, uptr size) {
+#if CAN_SANITIZE_LEAKS
+ __lsan_unregister_root_region(ptr, size);
+#endif
+ }
+};
+
///// Malloc/free interceptors. /////
namespace std {
@@ -50,43 +67,36 @@ namespace std {
enum class align_val_t: size_t;
}
-#if !SANITIZER_MAC
+#if !SANITIZER_APPLE
INTERCEPTOR(void*, malloc, uptr size) {
+ if (DlsymAlloc::Use())
+ return DlsymAlloc::Allocate(size);
ENSURE_LSAN_INITED;
GET_STACK_TRACE_MALLOC;
return lsan_malloc(size, stack);
}
INTERCEPTOR(void, free, void *p) {
+ if (DlsymAlloc::PointerIsMine(p))
+ return DlsymAlloc::Free(p);
ENSURE_LSAN_INITED;
lsan_free(p);
}
INTERCEPTOR(void*, calloc, uptr nmemb, uptr size) {
- // This hack is not required for Fuchsia because there are no dlsym calls
- // involved in setting up interceptors.
-#if !SANITIZER_FUCHSIA
- if (lsan_init_is_running) {
- // Hack: dlsym calls calloc before REAL(calloc) is retrieved from dlsym.
- const uptr kCallocPoolSize = 1024;
- static uptr calloc_memory_for_dlsym[kCallocPoolSize];
- static uptr allocated;
- uptr size_in_words = ((nmemb * size) + kWordSize - 1) / kWordSize;
- void *mem = (void*)&calloc_memory_for_dlsym[allocated];
- allocated += size_in_words;
- CHECK(allocated < kCallocPoolSize);
- return mem;
- }
-#endif // !SANITIZER_FUCHSIA
+ if (DlsymAlloc::Use())
+ return DlsymAlloc::Callocate(nmemb, size);
ENSURE_LSAN_INITED;
GET_STACK_TRACE_MALLOC;
return lsan_calloc(nmemb, size, stack);
}
-INTERCEPTOR(void*, realloc, void *q, uptr size) {
+INTERCEPTOR(void *, realloc, void *ptr, uptr size) {
+ if (DlsymAlloc::Use() || DlsymAlloc::PointerIsMine(ptr))
+ return DlsymAlloc::Realloc(ptr, size);
ENSURE_LSAN_INITED;
GET_STACK_TRACE_MALLOC;
- return lsan_realloc(q, size, stack);
+ return lsan_realloc(ptr, size, stack);
}
INTERCEPTOR(void*, reallocarray, void *q, uptr nmemb, uptr size) {
@@ -106,7 +116,7 @@ INTERCEPTOR(void*, valloc, uptr size) {
GET_STACK_TRACE_MALLOC;
return lsan_valloc(size, stack);
}
-#endif // !SANITIZER_MAC
+#endif // !SANITIZER_APPLE
#if SANITIZER_INTERCEPT_MEMALIGN
INTERCEPTOR(void*, memalign, uptr alignment, uptr size) {
@@ -187,7 +197,7 @@ INTERCEPTOR(void*, pvalloc, uptr size) {
#endif // SANITIZER_INTERCEPT_PVALLOC
#if SANITIZER_INTERCEPT_CFREE
-INTERCEPTOR(void, cfree, void *p) ALIAS(WRAPPER_NAME(free));
+INTERCEPTOR(void, cfree, void *p) ALIAS(WRAP(free));
#define LSAN_MAYBE_INTERCEPT_CFREE INTERCEPT_FUNCTION(cfree)
#else
#define LSAN_MAYBE_INTERCEPT_CFREE
@@ -232,7 +242,7 @@ INTERCEPTOR(int, mprobe, void *ptr) {
// libstdc++, each of has its implementation of new and delete.
// To make sure that C++ allocation/deallocation operators are overridden on
// OS X we need to intercept them using their mangled names.
-#if !SANITIZER_MAC
+#if !SANITIZER_APPLE
INTERCEPTOR_ATTRIBUTE
void *operator new(size_t size) { OPERATOR_NEW_BODY(false /*nothrow*/); }
@@ -291,7 +301,7 @@ INTERCEPTOR_ATTRIBUTE
void operator delete[](void *ptr, size_t size, std::align_val_t) NOEXCEPT
{ OPERATOR_DELETE_BODY; }
-#else // SANITIZER_MAC
+#else // SANITIZER_APPLE
INTERCEPTOR(void *, _Znwm, size_t size)
{ OPERATOR_NEW_BODY(false /*nothrow*/); }
@@ -311,7 +321,7 @@ INTERCEPTOR(void, _ZdlPvRKSt9nothrow_t, void *ptr, std::nothrow_t const&)
INTERCEPTOR(void, _ZdaPvRKSt9nothrow_t, void *ptr, std::nothrow_t const&)
{ OPERATOR_DELETE_BODY; }
-#endif // !SANITIZER_MAC
+#endif // !SANITIZER_APPLE
///// Thread initialization and finalization. /////
@@ -405,16 +415,10 @@ INTERCEPTOR(char *, strerror, int errnum) {
#if SANITIZER_POSIX
-struct ThreadParam {
- void *(*callback)(void *arg);
- void *param;
- atomic_uintptr_t tid;
-};
-
-extern "C" void *__lsan_thread_start_func(void *arg) {
- ThreadParam *p = (ThreadParam*)arg;
- void* (*callback)(void *arg) = p->callback;
- void *param = p->param;
+template <bool Detached>
+static void *ThreadStartFunc(void *arg) {
+ u32 parent_tid = (uptr)arg;
+ uptr tid = ThreadCreate(parent_tid, Detached);
// Wait until the last iteration to maximize the chance that we are the last
// destructor to run.
#if !SANITIZER_NETBSD && !SANITIZER_FREEBSD
@@ -423,70 +427,105 @@ extern "C" void *__lsan_thread_start_func(void *arg) {
Report("LeakSanitizer: failed to set thread key.\n");
Die();
}
-#endif
- int tid = 0;
- while ((tid = atomic_load(&p->tid, memory_order_acquire)) == 0)
- internal_sched_yield();
+# endif
ThreadStart(tid, GetTid());
- atomic_store(&p->tid, 0, memory_order_release);
- return callback(param);
+ auto self = GetThreadSelf();
+ auto args = GetThreadArgRetval().GetArgs(self);
+ void *retval = (*args.routine)(args.arg_retval);
+ GetThreadArgRetval().Finish(self, retval);
+ return retval;
}
INTERCEPTOR(int, pthread_create, void *th, void *attr,
void *(*callback)(void *), void *param) {
ENSURE_LSAN_INITED;
EnsureMainThreadIDIsCorrect();
+
+ bool detached = [attr]() {
+ int d = 0;
+ return attr && !pthread_attr_getdetachstate(attr, &d) && IsStateDetached(d);
+ }();
+
__sanitizer_pthread_attr_t myattr;
if (!attr) {
pthread_attr_init(&myattr);
attr = &myattr;
}
AdjustStackSize(attr);
- int detached = 0;
- pthread_attr_getdetachstate(attr, &detached);
- ThreadParam p;
- p.callback = callback;
- p.param = param;
- atomic_store(&p.tid, 0, memory_order_relaxed);
- int res;
+ uptr this_tid = GetCurrentThreadId();
+ int result;
{
// Ignore all allocations made by pthread_create: thread stack/TLS may be
// stored by pthread for future reuse even after thread destruction, and
// the linked list it's stored in doesn't even hold valid pointers to the
// objects, the latter are calculated by obscure pointer arithmetic.
ScopedInterceptorDisabler disabler;
- res = REAL(pthread_create)(th, attr, __lsan_thread_start_func, &p);
- }
- if (res == 0) {
- int tid = ThreadCreate(GetCurrentThread(), *(uptr *)th,
- IsStateDetached(detached));
- CHECK_NE(tid, kMainTid);
- atomic_store(&p.tid, tid, memory_order_release);
- while (atomic_load(&p.tid, memory_order_acquire) != 0)
- internal_sched_yield();
+ GetThreadArgRetval().Create(detached, {callback, param}, [&]() -> uptr {
+ result = REAL(pthread_create)(
+ th, attr, detached ? ThreadStartFunc<true> : ThreadStartFunc<false>,
+ (void *)this_tid);
+ return result ? 0 : *(uptr *)(th);
+ });
}
if (attr == &myattr)
pthread_attr_destroy(&myattr);
- return res;
-}
-
-INTERCEPTOR(int, pthread_join, void *th, void **ret) {
- ENSURE_LSAN_INITED;
- int tid = ThreadTid((uptr)th);
- int res = REAL(pthread_join)(th, ret);
- if (res == 0)
- ThreadJoin(tid);
- return res;
-}
-
-INTERCEPTOR(int, pthread_detach, void *th) {
- ENSURE_LSAN_INITED;
- int tid = ThreadTid((uptr)th);
- int res = REAL(pthread_detach)(th);
- if (res == 0)
- ThreadDetach(tid);
- return res;
-}
+ return result;
+}
+
+INTERCEPTOR(int, pthread_join, void *thread, void **retval) {
+ int result;
+ GetThreadArgRetval().Join((uptr)thread, [&]() {
+ result = REAL(pthread_join)(thread, retval);
+ return !result;
+ });
+ return result;
+}
+
+INTERCEPTOR(int, pthread_detach, void *thread) {
+ int result;
+ GetThreadArgRetval().Detach((uptr)thread, [&]() {
+ result = REAL(pthread_detach)(thread);
+ return !result;
+ });
+ return result;
+}
+
+INTERCEPTOR(void, pthread_exit, void *retval) {
+ GetThreadArgRetval().Finish(GetThreadSelf(), retval);
+ REAL(pthread_exit)(retval);
+}
+
+# if SANITIZER_INTERCEPT_TRYJOIN
+INTERCEPTOR(int, pthread_tryjoin_np, void *thread, void **ret) {
+ int result;
+ GetThreadArgRetval().Join((uptr)thread, [&]() {
+ result = REAL(pthread_tryjoin_np)(thread, ret);
+ return !result;
+ });
+ return result;
+}
+# define LSAN_MAYBE_INTERCEPT_TRYJOIN INTERCEPT_FUNCTION(pthread_tryjoin_np)
+# else
+# define LSAN_MAYBE_INTERCEPT_TRYJOIN
+# endif // SANITIZER_INTERCEPT_TRYJOIN
+
+# if SANITIZER_INTERCEPT_TIMEDJOIN
+INTERCEPTOR(int, pthread_timedjoin_np, void *thread, void **ret,
+ const struct timespec *abstime) {
+ int result;
+ GetThreadArgRetval().Join((uptr)thread, [&]() {
+ result = REAL(pthread_timedjoin_np)(thread, ret, abstime);
+ return !result;
+ });
+ return result;
+}
+# define LSAN_MAYBE_INTERCEPT_TIMEDJOIN \
+ INTERCEPT_FUNCTION(pthread_timedjoin_np)
+# else
+# define LSAN_MAYBE_INTERCEPT_TIMEDJOIN
+# endif // SANITIZER_INTERCEPT_TIMEDJOIN
+
+DEFINE_REAL_PTHREAD_FUNCTIONS
INTERCEPTOR(void, _exit, int status) {
if (status == 0 && HasReportedLeaks()) status = common_flags()->exitcode;
@@ -494,6 +533,7 @@ INTERCEPTOR(void, _exit, int status) {
}
#define COMMON_INTERCEPT_FUNCTION(name) INTERCEPT_FUNCTION(name)
+#define SIGNAL_INTERCEPTOR_ENTER() ENSURE_LSAN_INITED
#include "sanitizer_common/sanitizer_signal_interceptors.inc"
#endif // SANITIZER_POSIX
@@ -520,8 +560,11 @@ void InitializeInterceptors() {
LSAN_MAYBE_INTERCEPT_MALLINFO;
LSAN_MAYBE_INTERCEPT_MALLOPT;
INTERCEPT_FUNCTION(pthread_create);
- INTERCEPT_FUNCTION(pthread_detach);
INTERCEPT_FUNCTION(pthread_join);
+ INTERCEPT_FUNCTION(pthread_detach);
+ INTERCEPT_FUNCTION(pthread_exit);
+ LSAN_MAYBE_INTERCEPT_TIMEDJOIN;
+ LSAN_MAYBE_INTERCEPT_TRYJOIN;
INTERCEPT_FUNCTION(_exit);
LSAN_MAYBE_INTERCEPT__LWP_EXIT;
diff --git a/contrib/llvm-project/compiler-rt/lib/lsan/lsan_linux.cpp b/contrib/llvm-project/compiler-rt/lib/lsan/lsan_linux.cpp
index 47c2f21b5a6b..5074cee1296a 100644
--- a/contrib/llvm-project/compiler-rt/lib/lsan/lsan_linux.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/lsan/lsan_linux.cpp
@@ -14,13 +14,14 @@
#if SANITIZER_LINUX || SANITIZER_NETBSD || SANITIZER_FUCHSIA
-#include "lsan_allocator.h"
+# include "lsan_allocator.h"
+# include "lsan_thread.h"
namespace __lsan {
-static THREADLOCAL u32 current_thread_tid = kInvalidTid;
-u32 GetCurrentThread() { return current_thread_tid; }
-void SetCurrentThread(u32 tid) { current_thread_tid = tid; }
+static THREADLOCAL ThreadContextLsanBase *current_thread = nullptr;
+ThreadContextLsanBase *GetCurrentThread() { return current_thread; }
+void SetCurrentThread(ThreadContextLsanBase *tctx) { current_thread = tctx; }
static THREADLOCAL AllocatorCache allocator_cache;
AllocatorCache *GetAllocatorCache() { return &allocator_cache; }
diff --git a/contrib/llvm-project/compiler-rt/lib/lsan/lsan_mac.cpp b/contrib/llvm-project/compiler-rt/lib/lsan/lsan_mac.cpp
index b96893e2801b..990954a8b687 100644
--- a/contrib/llvm-project/compiler-rt/lib/lsan/lsan_mac.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/lsan/lsan_mac.cpp
@@ -12,7 +12,7 @@
//===----------------------------------------------------------------------===//
#include "sanitizer_common/sanitizer_platform.h"
-#if SANITIZER_MAC
+#if SANITIZER_APPLE
#include "interception/interception.h"
#include "lsan.h"
@@ -67,10 +67,9 @@ typedef struct {
ALWAYS_INLINE
void lsan_register_worker_thread(int parent_tid) {
- if (GetCurrentThread() == kInvalidTid) {
- u32 tid = ThreadCreate(parent_tid, 0, true);
+ if (GetCurrentThreadId() == kInvalidTid) {
+ u32 tid = ThreadCreate(parent_tid, true);
ThreadStart(tid, GetTid());
- SetCurrentThread(tid);
}
}
@@ -81,7 +80,7 @@ extern "C" void lsan_dispatch_call_block_and_release(void *block) {
VReport(2,
"lsan_dispatch_call_block_and_release(): "
"context: %p, pthread_self: %p\n",
- block, pthread_self());
+ block, (void*)pthread_self());
lsan_register_worker_thread(context->parent_tid);
// Call the original dispatcher for the block.
context->func(context->block);
@@ -101,7 +100,7 @@ extern "C" lsan_block_context_t *alloc_lsan_context(void *ctxt,
(lsan_block_context_t *)lsan_malloc(sizeof(lsan_block_context_t), stack);
lsan_ctxt->block = ctxt;
lsan_ctxt->func = func;
- lsan_ctxt->parent_tid = GetCurrentThread();
+ lsan_ctxt->parent_tid = GetCurrentThreadId();
return lsan_ctxt;
}
@@ -146,13 +145,13 @@ void dispatch_source_set_event_handler(dispatch_source_t ds,
void (^work)(void));
}
-#define GET_LSAN_BLOCK(work) \
- void (^lsan_block)(void); \
- int parent_tid = GetCurrentThread(); \
- lsan_block = ^(void) { \
- lsan_register_worker_thread(parent_tid); \
- work(); \
- }
+# define GET_LSAN_BLOCK(work) \
+ void (^lsan_block)(void); \
+ int parent_tid = GetCurrentThreadId(); \
+ lsan_block = ^(void) { \
+ lsan_register_worker_thread(parent_tid); \
+ work(); \
+ }
INTERCEPTOR(void, dispatch_async, dispatch_queue_t dq, void (^work)(void)) {
GET_LSAN_BLOCK(work);
@@ -188,4 +187,4 @@ INTERCEPTOR(void, dispatch_source_set_event_handler, dispatch_source_t ds,
}
#endif
-#endif // SANITIZER_MAC
+#endif // SANITIZER_APPLE
diff --git a/contrib/llvm-project/compiler-rt/lib/lsan/lsan_malloc_mac.cpp b/contrib/llvm-project/compiler-rt/lib/lsan/lsan_malloc_mac.cpp
index d03eb2e915c0..525c30272ccc 100644
--- a/contrib/llvm-project/compiler-rt/lib/lsan/lsan_malloc_mac.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/lsan/lsan_malloc_mac.cpp
@@ -12,7 +12,7 @@
//===----------------------------------------------------------------------===//
#include "sanitizer_common/sanitizer_platform.h"
-#if SANITIZER_MAC
+#if SANITIZER_APPLE
#include "lsan.h"
#include "lsan_allocator.h"
@@ -56,4 +56,4 @@ using namespace __lsan;
#include "sanitizer_common/sanitizer_malloc_mac.inc"
-#endif // SANITIZER_MAC
+#endif // SANITIZER_APPLE
diff --git a/contrib/llvm-project/compiler-rt/lib/lsan/lsan_posix.cpp b/contrib/llvm-project/compiler-rt/lib/lsan/lsan_posix.cpp
index 5d1c3f6260dd..422c29acca69 100644
--- a/contrib/llvm-project/compiler-rt/lib/lsan/lsan_posix.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/lsan/lsan_posix.cpp
@@ -14,10 +14,13 @@
#include "sanitizer_common/sanitizer_platform.h"
#if SANITIZER_POSIX
-#include "lsan.h"
-#include "lsan_allocator.h"
-#include "sanitizer_common/sanitizer_stacktrace.h"
-#include "sanitizer_common/sanitizer_tls_get_addr.h"
+# include <pthread.h>
+
+# include "lsan.h"
+# include "lsan_allocator.h"
+# include "lsan_thread.h"
+# include "sanitizer_common/sanitizer_stacktrace.h"
+# include "sanitizer_common/sanitizer_tls_get_addr.h"
namespace __lsan {
@@ -34,6 +37,7 @@ struct OnStartedArgs {
};
void ThreadContext::OnStarted(void *arg) {
+ ThreadContextLsanBase::OnStarted(arg);
auto args = reinterpret_cast<const OnStartedArgs *>(arg);
stack_begin_ = args->stack_begin;
stack_end_ = args->stack_end;
@@ -61,7 +65,7 @@ bool GetThreadRangesLocked(tid_t os_id, uptr *stack_begin, uptr *stack_end,
uptr *tls_begin, uptr *tls_end, uptr *cache_begin,
uptr *cache_end, DTLS **dtls) {
ThreadContext *context = static_cast<ThreadContext *>(
- GetThreadRegistryLocked()->FindThreadContextByOsIDLocked(os_id));
+ GetLsanThreadRegistryLocked()->FindThreadContextByOsIDLocked(os_id));
if (!context)
return false;
*stack_begin = context->stack_begin();
@@ -75,7 +79,7 @@ bool GetThreadRangesLocked(tid_t os_id, uptr *stack_begin, uptr *stack_end,
}
void InitializeMainThread() {
- u32 tid = ThreadCreate(kMainTid, 0, true);
+ u32 tid = ThreadCreate(kMainTid, true);
CHECK_EQ(tid, kMainTid);
ThreadStart(tid, GetTid());
}
@@ -87,10 +91,38 @@ static void OnStackUnwind(const SignalContext &sig, const void *,
}
void LsanOnDeadlySignal(int signo, void *siginfo, void *context) {
- HandleDeadlySignal(siginfo, context, GetCurrentThread(), &OnStackUnwind,
+ HandleDeadlySignal(siginfo, context, GetCurrentThreadId(), &OnStackUnwind,
nullptr);
}
+void InstallAtExitCheckLeaks() {
+ if (common_flags()->detect_leaks && common_flags()->leak_check_at_exit)
+ Atexit(DoLeakCheck);
+}
+
+static void BeforeFork() {
+ LockGlobal();
+ LockThreads();
+ LockAllocator();
+ StackDepotLockBeforeFork();
+}
+
+static void AfterFork(bool fork_child) {
+ StackDepotUnlockAfterFork(fork_child);
+ UnlockAllocator();
+ UnlockThreads();
+ UnlockGlobal();
+}
+
+void InstallAtForkHandler() {
+# if SANITIZER_SOLARIS || SANITIZER_NETBSD || SANITIZER_APPLE
+ return; // FIXME: Implement FutexWait.
+# endif
+ pthread_atfork(
+ &BeforeFork, []() { AfterFork(/* fork_child= */ false); },
+ []() { AfterFork(/* fork_child= */ true); });
+}
+
} // namespace __lsan
#endif // SANITIZER_POSIX
diff --git a/contrib/llvm-project/compiler-rt/lib/lsan/lsan_thread.cpp b/contrib/llvm-project/compiler-rt/lib/lsan/lsan_thread.cpp
index 1d224ebca693..8aa3111eecf7 100644
--- a/contrib/llvm-project/compiler-rt/lib/lsan/lsan_thread.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/lsan/lsan_thread.cpp
@@ -24,89 +24,93 @@
namespace __lsan {
static ThreadRegistry *thread_registry;
+static ThreadArgRetval *thread_arg_retval;
+
+static Mutex mu_for_thread_context;
+static LowLevelAllocator allocator_for_thread_context;
static ThreadContextBase *CreateThreadContext(u32 tid) {
- void *mem = MmapOrDie(sizeof(ThreadContext), "ThreadContext");
- return new (mem) ThreadContext(tid);
+ Lock lock(&mu_for_thread_context);
+ return new (allocator_for_thread_context) ThreadContext(tid);
}
-void InitializeThreadRegistry() {
- static ALIGNED(64) char thread_registry_placeholder[sizeof(ThreadRegistry)];
+void InitializeThreads() {
+ static ALIGNED(alignof(
+ ThreadRegistry)) char thread_registry_placeholder[sizeof(ThreadRegistry)];
thread_registry =
new (thread_registry_placeholder) ThreadRegistry(CreateThreadContext);
+
+ static ALIGNED(alignof(ThreadArgRetval)) char
+ thread_arg_retval_placeholder[sizeof(ThreadArgRetval)];
+ thread_arg_retval = new (thread_arg_retval_placeholder) ThreadArgRetval();
}
+ThreadArgRetval &GetThreadArgRetval() { return *thread_arg_retval; }
+
ThreadContextLsanBase::ThreadContextLsanBase(int tid)
: ThreadContextBase(tid) {}
+void ThreadContextLsanBase::OnStarted(void *arg) {
+ SetCurrentThread(this);
+ AllocatorThreadStart();
+}
+
void ThreadContextLsanBase::OnFinished() {
AllocatorThreadFinish();
DTLS_Destroy();
+ SetCurrentThread(nullptr);
}
-u32 ThreadCreate(u32 parent_tid, uptr user_id, bool detached, void *arg) {
- return thread_registry->CreateThread(user_id, detached, parent_tid, arg);
+u32 ThreadCreate(u32 parent_tid, bool detached, void *arg) {
+ return thread_registry->CreateThread(0, detached, parent_tid, arg);
}
void ThreadContextLsanBase::ThreadStart(u32 tid, tid_t os_id,
ThreadType thread_type, void *arg) {
thread_registry->StartThread(tid, os_id, thread_type, arg);
- SetCurrentThread(tid);
}
-void ThreadFinish() {
- thread_registry->FinishThread(GetCurrentThread());
- SetCurrentThread(kInvalidTid);
-}
+void ThreadFinish() { thread_registry->FinishThread(GetCurrentThreadId()); }
-ThreadContext *CurrentThreadContext() {
- if (!thread_registry)
- return nullptr;
- if (GetCurrentThread() == kInvalidTid)
- return nullptr;
- // No lock needed when getting current thread.
- return (ThreadContext *)thread_registry->GetThreadLocked(GetCurrentThread());
+void EnsureMainThreadIDIsCorrect() {
+ if (GetCurrentThreadId() == kMainTid)
+ GetCurrentThread()->os_id = GetTid();
}
-static bool FindThreadByUid(ThreadContextBase *tctx, void *arg) {
- uptr uid = (uptr)arg;
- if (tctx->user_id == uid && tctx->status != ThreadStatusInvalid) {
- return true;
- }
- return false;
-}
+///// Interface to the common LSan module. /////
-u32 ThreadTid(uptr uid) {
- return thread_registry->FindThread(FindThreadByUid, (void *)uid);
-}
+void GetThreadExtraStackRangesLocked(tid_t os_id,
+ InternalMmapVector<Range> *ranges) {}
+void GetThreadExtraStackRangesLocked(InternalMmapVector<Range> *ranges) {}
-void ThreadDetach(u32 tid) {
- CHECK_NE(tid, kInvalidTid);
- thread_registry->DetachThread(tid, /* arg */ nullptr);
+void LockThreads() {
+ thread_registry->Lock();
+ thread_arg_retval->Lock();
}
-void ThreadJoin(u32 tid) {
- CHECK_NE(tid, kInvalidTid);
- thread_registry->JoinThread(tid, /* arg */ nullptr);
+void UnlockThreads() {
+ thread_arg_retval->Unlock();
+ thread_registry->Unlock();
}
-void EnsureMainThreadIDIsCorrect() {
- if (GetCurrentThread() == kMainTid)
- CurrentThreadContext()->os_id = GetTid();
+ThreadRegistry *GetLsanThreadRegistryLocked() {
+ thread_registry->CheckLocked();
+ return thread_registry;
}
-///// Interface to the common LSan module. /////
-
-void ForEachExtraStackRange(tid_t os_id, RangeIteratorCallback callback,
- void *arg) {}
-
-void LockThreadRegistry() { thread_registry->Lock(); }
-
-void UnlockThreadRegistry() { thread_registry->Unlock(); }
+void GetRunningThreadsLocked(InternalMmapVector<tid_t> *threads) {
+ GetLsanThreadRegistryLocked()->RunCallbackForEachThreadLocked(
+ [](ThreadContextBase *tctx, void *threads) {
+ if (tctx->status == ThreadStatusRunning) {
+ reinterpret_cast<InternalMmapVector<tid_t> *>(threads)->push_back(
+ tctx->os_id);
+ }
+ },
+ threads);
+}
-ThreadRegistry *GetThreadRegistryLocked() {
- thread_registry->CheckLocked();
- return thread_registry;
+void GetAdditionalThreadContextPtrsLocked(InternalMmapVector<uptr> *ptrs) {
+ GetThreadArgRetval().GetAllPtrsLocked(ptrs);
}
} // namespace __lsan
diff --git a/contrib/llvm-project/compiler-rt/lib/lsan/lsan_thread.h b/contrib/llvm-project/compiler-rt/lib/lsan/lsan_thread.h
index 36643753d019..222066ee93cd 100644
--- a/contrib/llvm-project/compiler-rt/lib/lsan/lsan_thread.h
+++ b/contrib/llvm-project/compiler-rt/lib/lsan/lsan_thread.h
@@ -14,6 +14,7 @@
#ifndef LSAN_THREAD_H
#define LSAN_THREAD_H
+#include "sanitizer_common/sanitizer_thread_arg_retval.h"
#include "sanitizer_common/sanitizer_thread_registry.h"
namespace __lsan {
@@ -21,6 +22,7 @@ namespace __lsan {
class ThreadContextLsanBase : public ThreadContextBase {
public:
explicit ThreadContextLsanBase(int tid);
+ void OnStarted(void *arg) override;
void OnFinished() override;
uptr stack_begin() { return stack_begin_; }
uptr stack_end() { return stack_end_; }
@@ -42,18 +44,21 @@ class ThreadContextLsanBase : public ThreadContextBase {
// This subclass of ThreadContextLsanBase is declared in an OS-specific header.
class ThreadContext;
-void InitializeThreadRegistry();
+void InitializeThreads();
void InitializeMainThread();
-u32 ThreadCreate(u32 tid, uptr uid, bool detached, void *arg = nullptr);
+ThreadRegistry *GetLsanThreadRegistryLocked();
+ThreadArgRetval &GetThreadArgRetval();
+
+u32 ThreadCreate(u32 tid, bool detached, void *arg = nullptr);
void ThreadFinish();
-void ThreadDetach(u32 tid);
-void ThreadJoin(u32 tid);
-u32 ThreadTid(uptr uid);
-u32 GetCurrentThread();
-void SetCurrentThread(u32 tid);
-ThreadContext *CurrentThreadContext();
+ThreadContextLsanBase *GetCurrentThread();
+inline u32 GetCurrentThreadId() {
+ ThreadContextLsanBase *ctx = GetCurrentThread();
+ return ctx ? ctx->tid : kInvalidTid;
+}
+void SetCurrentThread(ThreadContextLsanBase *tctx);
void EnsureMainThreadIDIsCorrect();
} // namespace __lsan
diff --git a/contrib/llvm-project/compiler-rt/lib/memprof/memprof_allocator.cpp b/contrib/llvm-project/compiler-rt/lib/memprof/memprof_allocator.cpp
index 6f01d4dfcb84..af46ffdb248e 100644
--- a/contrib/llvm-project/compiler-rt/lib/memprof/memprof_allocator.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/memprof/memprof_allocator.cpp
@@ -15,29 +15,68 @@
#include "memprof_allocator.h"
#include "memprof_mapping.h"
+#include "memprof_mibmap.h"
+#include "memprof_rawprofile.h"
#include "memprof_stack.h"
#include "memprof_thread.h"
+#include "profile/MemProfData.inc"
#include "sanitizer_common/sanitizer_allocator_checks.h"
#include "sanitizer_common/sanitizer_allocator_interface.h"
#include "sanitizer_common/sanitizer_allocator_report.h"
+#include "sanitizer_common/sanitizer_array_ref.h"
+#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_errno.h"
#include "sanitizer_common/sanitizer_file.h"
#include "sanitizer_common/sanitizer_flags.h"
#include "sanitizer_common/sanitizer_internal_defs.h"
-#include "sanitizer_common/sanitizer_list.h"
#include "sanitizer_common/sanitizer_stackdepot.h"
#include <sched.h>
-#include <stdlib.h>
#include <time.h>
namespace __memprof {
+namespace {
+using ::llvm::memprof::MemInfoBlock;
+
+void Print(const MemInfoBlock &M, const u64 id, bool print_terse) {
+ u64 p;
+
+ if (print_terse) {
+ p = M.TotalSize * 100 / M.AllocCount;
+ Printf("MIB:%llu/%u/%llu.%02llu/%u/%u/", id, M.AllocCount, p / 100, p % 100,
+ M.MinSize, M.MaxSize);
+ p = M.TotalAccessCount * 100 / M.AllocCount;
+ Printf("%llu.%02llu/%llu/%llu/", p / 100, p % 100, M.MinAccessCount,
+ M.MaxAccessCount);
+ p = M.TotalLifetime * 100 / M.AllocCount;
+ Printf("%llu.%02llu/%u/%u/", p / 100, p % 100, M.MinLifetime,
+ M.MaxLifetime);
+ Printf("%u/%u/%u/%u\n", M.NumMigratedCpu, M.NumLifetimeOverlaps,
+ M.NumSameAllocCpu, M.NumSameDeallocCpu);
+ } else {
+ p = M.TotalSize * 100 / M.AllocCount;
+ Printf("Memory allocation stack id = %llu\n", id);
+ Printf("\talloc_count %u, size (ave/min/max) %llu.%02llu / %u / %u\n",
+ M.AllocCount, p / 100, p % 100, M.MinSize, M.MaxSize);
+ p = M.TotalAccessCount * 100 / M.AllocCount;
+ Printf("\taccess_count (ave/min/max): %llu.%02llu / %llu / %llu\n", p / 100,
+ p % 100, M.MinAccessCount, M.MaxAccessCount);
+ p = M.TotalLifetime * 100 / M.AllocCount;
+ Printf("\tlifetime (ave/min/max): %llu.%02llu / %u / %u\n", p / 100,
+ p % 100, M.MinLifetime, M.MaxLifetime);
+ Printf("\tnum migrated: %u, num lifetime overlaps: %u, num same alloc "
+ "cpu: %u, num same dealloc_cpu: %u\n",
+ M.NumMigratedCpu, M.NumLifetimeOverlaps, M.NumSameAllocCpu,
+ M.NumSameDeallocCpu);
+ }
+}
+} // namespace
static int GetCpuId(void) {
// _memprof_preinit is called via the preinit_array, which subsequently calls
// malloc. Since this is before _dl_init calls VDSO_SETUP, sched_getcpu
// will seg fault as the address of __vdso_getcpu will be null.
- if (!memprof_init_done)
+ if (!memprof_inited)
return -1;
return sched_getcpu();
}
@@ -151,6 +190,7 @@ void MemprofMapUnmapCallback::OnMap(uptr p, uptr size) const {
thread_stats.mmaps++;
thread_stats.mmaped += size;
}
+
void MemprofMapUnmapCallback::OnUnmap(uptr p, uptr size) const {
// We are about to unmap a chunk of user memory.
// Mark the corresponding shadow memory as not needed.
@@ -166,244 +206,6 @@ AllocatorCache *GetAllocatorCache(MemprofThreadLocalMallocStorage *ms) {
return &ms->allocator_cache;
}
-struct MemInfoBlock {
- u32 alloc_count;
- u64 total_access_count, min_access_count, max_access_count;
- u64 total_size;
- u32 min_size, max_size;
- u32 alloc_timestamp, dealloc_timestamp;
- u64 total_lifetime;
- u32 min_lifetime, max_lifetime;
- u32 alloc_cpu_id, dealloc_cpu_id;
- u32 num_migrated_cpu;
-
- // Only compared to prior deallocated object currently.
- u32 num_lifetime_overlaps;
- u32 num_same_alloc_cpu;
- u32 num_same_dealloc_cpu;
-
- u64 data_type_id; // TODO: hash of type name
-
- MemInfoBlock() : alloc_count(0) {}
-
- MemInfoBlock(u32 size, u64 access_count, u32 alloc_timestamp,
- u32 dealloc_timestamp, u32 alloc_cpu, u32 dealloc_cpu)
- : alloc_count(1), total_access_count(access_count),
- min_access_count(access_count), max_access_count(access_count),
- total_size(size), min_size(size), max_size(size),
- alloc_timestamp(alloc_timestamp), dealloc_timestamp(dealloc_timestamp),
- total_lifetime(dealloc_timestamp - alloc_timestamp),
- min_lifetime(total_lifetime), max_lifetime(total_lifetime),
- alloc_cpu_id(alloc_cpu), dealloc_cpu_id(dealloc_cpu),
- num_lifetime_overlaps(0), num_same_alloc_cpu(0),
- num_same_dealloc_cpu(0) {
- num_migrated_cpu = alloc_cpu_id != dealloc_cpu_id;
- }
-
- void Print(u64 id) {
- u64 p;
- if (flags()->print_terse) {
- p = total_size * 100 / alloc_count;
- Printf("MIB:%llu/%u/%d.%02d/%u/%u/", id, alloc_count, p / 100, p % 100,
- min_size, max_size);
- p = total_access_count * 100 / alloc_count;
- Printf("%d.%02d/%u/%u/", p / 100, p % 100, min_access_count,
- max_access_count);
- p = total_lifetime * 100 / alloc_count;
- Printf("%d.%02d/%u/%u/", p / 100, p % 100, min_lifetime, max_lifetime);
- Printf("%u/%u/%u/%u\n", num_migrated_cpu, num_lifetime_overlaps,
- num_same_alloc_cpu, num_same_dealloc_cpu);
- } else {
- p = total_size * 100 / alloc_count;
- Printf("Memory allocation stack id = %llu\n", id);
- Printf("\talloc_count %u, size (ave/min/max) %d.%02d / %u / %u\n",
- alloc_count, p / 100, p % 100, min_size, max_size);
- p = total_access_count * 100 / alloc_count;
- Printf("\taccess_count (ave/min/max): %d.%02d / %u / %u\n", p / 100,
- p % 100, min_access_count, max_access_count);
- p = total_lifetime * 100 / alloc_count;
- Printf("\tlifetime (ave/min/max): %d.%02d / %u / %u\n", p / 100, p % 100,
- min_lifetime, max_lifetime);
- Printf("\tnum migrated: %u, num lifetime overlaps: %u, num same alloc "
- "cpu: %u, num same dealloc_cpu: %u\n",
- num_migrated_cpu, num_lifetime_overlaps, num_same_alloc_cpu,
- num_same_dealloc_cpu);
- }
- }
-
- static void printHeader() {
- CHECK(flags()->print_terse);
- Printf("MIB:StackID/AllocCount/AveSize/MinSize/MaxSize/AveAccessCount/"
- "MinAccessCount/MaxAccessCount/AveLifetime/MinLifetime/MaxLifetime/"
- "NumMigratedCpu/NumLifetimeOverlaps/NumSameAllocCpu/"
- "NumSameDeallocCpu\n");
- }
-
- void Merge(MemInfoBlock &newMIB) {
- alloc_count += newMIB.alloc_count;
-
- total_access_count += newMIB.total_access_count;
- min_access_count = Min(min_access_count, newMIB.min_access_count);
- max_access_count = Max(max_access_count, newMIB.max_access_count);
-
- total_size += newMIB.total_size;
- min_size = Min(min_size, newMIB.min_size);
- max_size = Max(max_size, newMIB.max_size);
-
- total_lifetime += newMIB.total_lifetime;
- min_lifetime = Min(min_lifetime, newMIB.min_lifetime);
- max_lifetime = Max(max_lifetime, newMIB.max_lifetime);
-
- // We know newMIB was deallocated later, so just need to check if it was
- // allocated before last one deallocated.
- num_lifetime_overlaps += newMIB.alloc_timestamp < dealloc_timestamp;
- alloc_timestamp = newMIB.alloc_timestamp;
- dealloc_timestamp = newMIB.dealloc_timestamp;
-
- num_same_alloc_cpu += alloc_cpu_id == newMIB.alloc_cpu_id;
- num_same_dealloc_cpu += dealloc_cpu_id == newMIB.dealloc_cpu_id;
- alloc_cpu_id = newMIB.alloc_cpu_id;
- dealloc_cpu_id = newMIB.dealloc_cpu_id;
- }
-};
-
-static u32 AccessCount = 0;
-static u32 MissCount = 0;
-
-struct SetEntry {
- SetEntry() : id(0), MIB() {}
- bool Empty() { return id == 0; }
- void Print() {
- CHECK(!Empty());
- MIB.Print(id);
- }
- // The stack id
- u64 id;
- MemInfoBlock MIB;
-};
-
-struct CacheSet {
- enum { kSetSize = 4 };
-
- void PrintAll() {
- for (int i = 0; i < kSetSize; i++) {
- if (Entries[i].Empty())
- continue;
- Entries[i].Print();
- }
- }
- void insertOrMerge(u64 new_id, MemInfoBlock &newMIB) {
- AccessCount++;
- SetAccessCount++;
-
- for (int i = 0; i < kSetSize; i++) {
- auto id = Entries[i].id;
- // Check if this is a hit or an empty entry. Since we always move any
- // filled locations to the front of the array (see below), we don't need
- // to look after finding the first empty entry.
- if (id == new_id || !id) {
- if (id == 0) {
- Entries[i].id = new_id;
- Entries[i].MIB = newMIB;
- } else {
- Entries[i].MIB.Merge(newMIB);
- }
- // Assuming some id locality, we try to swap the matching entry
- // into the first set position.
- if (i != 0) {
- auto tmp = Entries[0];
- Entries[0] = Entries[i];
- Entries[i] = tmp;
- }
- return;
- }
- }
-
- // Miss
- MissCount++;
- SetMissCount++;
-
- // We try to find the entries with the lowest alloc count to be evicted:
- int min_idx = 0;
- u64 min_count = Entries[0].MIB.alloc_count;
- for (int i = 1; i < kSetSize; i++) {
- CHECK(!Entries[i].Empty());
- if (Entries[i].MIB.alloc_count < min_count) {
- min_idx = i;
- min_count = Entries[i].MIB.alloc_count;
- }
- }
-
- // Print the evicted entry profile information
- if (!flags()->print_terse)
- Printf("Evicted:\n");
- Entries[min_idx].Print();
-
- // Similar to the hit case, put new MIB in first set position.
- if (min_idx != 0)
- Entries[min_idx] = Entries[0];
- Entries[0].id = new_id;
- Entries[0].MIB = newMIB;
- }
-
- void PrintMissRate(int i) {
- u64 p = SetAccessCount ? SetMissCount * 10000ULL / SetAccessCount : 0;
- Printf("Set %d miss rate: %d / %d = %5d.%02d%%\n", i, SetMissCount,
- SetAccessCount, p / 100, p % 100);
- }
-
- SetEntry Entries[kSetSize];
- u32 SetAccessCount = 0;
- u32 SetMissCount = 0;
-};
-
-struct MemInfoBlockCache {
- MemInfoBlockCache() {
- if (common_flags()->print_module_map)
- DumpProcessMap();
- if (flags()->print_terse)
- MemInfoBlock::printHeader();
- Sets =
- (CacheSet *)malloc(sizeof(CacheSet) * flags()->mem_info_cache_entries);
- Constructed = true;
- }
-
- ~MemInfoBlockCache() { free(Sets); }
-
- void insertOrMerge(u64 new_id, MemInfoBlock &newMIB) {
- u64 hv = new_id;
-
- // Use mod method where number of entries should be a prime close to power
- // of 2.
- hv %= flags()->mem_info_cache_entries;
-
- return Sets[hv].insertOrMerge(new_id, newMIB);
- }
-
- void PrintAll() {
- for (int i = 0; i < flags()->mem_info_cache_entries; i++) {
- Sets[i].PrintAll();
- }
- }
-
- void PrintMissRate() {
- if (!flags()->print_mem_info_cache_miss_rate)
- return;
- u64 p = AccessCount ? MissCount * 10000ULL / AccessCount : 0;
- Printf("Overall miss rate: %d / %d = %5d.%02d%%\n", MissCount, AccessCount,
- p / 100, p % 100);
- if (flags()->print_mem_info_cache_miss_rate_details)
- for (int i = 0; i < flags()->mem_info_cache_entries; i++)
- Sets[i].PrintMissRate(i);
- }
-
- CacheSet *Sets;
- // Flag when the Sets have been allocated, in case a deallocation is called
- // very early before the static init of the Allocator and therefore this table
- // have completed.
- bool Constructed = false;
-};
-
// Accumulates the access count from the shadow for the given pointer and size.
u64 GetShadowCount(uptr p, u32 size) {
u64 *shadow = (u64 *)MEM_TO_SHADOW(p);
@@ -452,26 +254,68 @@ struct Allocator {
AllocatorCache fallback_allocator_cache;
uptr max_user_defined_malloc_size;
- atomic_uint8_t rss_limit_exceeded;
- MemInfoBlockCache MemInfoBlockTable;
- bool destructing;
+ // Holds the mapping of stack ids to MemInfoBlocks.
+ MIBMapTy MIBMap;
+
+ atomic_uint8_t destructing;
+ atomic_uint8_t constructed;
+ bool print_text;
// ------------------- Initialization ------------------------
- explicit Allocator(LinkerInitialized) : destructing(false) {}
+ explicit Allocator(LinkerInitialized) : print_text(flags()->print_text) {
+ atomic_store_relaxed(&destructing, 0);
+ atomic_store_relaxed(&constructed, 1);
+ }
- ~Allocator() { FinishAndPrint(); }
+ ~Allocator() {
+ atomic_store_relaxed(&destructing, 1);
+ FinishAndWrite();
+ }
+
+ static void PrintCallback(const uptr Key, LockedMemInfoBlock *const &Value,
+ void *Arg) {
+ SpinMutexLock l(&Value->mutex);
+ Print(Value->mib, Key, bool(Arg));
+ }
+
+ void FinishAndWrite() {
+ if (print_text && common_flags()->print_module_map)
+ DumpProcessMap();
- void FinishAndPrint() {
- if (!flags()->print_terse)
- Printf("Live on exit:\n");
allocator.ForceLock();
+
+ InsertLiveBlocks();
+ if (print_text) {
+ if (!flags()->print_terse)
+ Printf("Recorded MIBs (incl. live on exit):\n");
+ MIBMap.ForEach(PrintCallback,
+ reinterpret_cast<void *>(flags()->print_terse));
+ StackDepotPrintAll();
+ } else {
+ // Serialize the contents to a raw profile. Format documented in
+ // memprof_rawprofile.h.
+ char *Buffer = nullptr;
+
+ __sanitizer::ListOfModules List;
+ List.init();
+ ArrayRef<LoadedModule> Modules(List.begin(), List.end());
+ u64 BytesSerialized = SerializeToRawProfile(MIBMap, Modules, Buffer);
+ CHECK(Buffer && BytesSerialized && "could not serialize to buffer");
+ report_file.Write(Buffer, BytesSerialized);
+ }
+
+ allocator.ForceUnlock();
+ }
+
+ // Inserts any blocks which have been allocated but not yet deallocated.
+ void InsertLiveBlocks() {
allocator.ForEachChunk(
[](uptr chunk, void *alloc) {
u64 user_requested_size;
+ Allocator *A = (Allocator *)alloc;
MemprofChunk *m =
- ((Allocator *)alloc)
- ->GetMemprofChunk((void *)chunk, user_requested_size);
+ A->GetMemprofChunk((void *)chunk, user_requested_size);
if (!m)
return;
uptr user_beg = ((uptr)m) + kChunkHeaderSize;
@@ -479,16 +323,9 @@ struct Allocator {
long curtime = GetTimestamp();
MemInfoBlock newMIB(user_requested_size, c, m->timestamp_ms, curtime,
m->cpu_id, GetCpuId());
- ((Allocator *)alloc)
- ->MemInfoBlockTable.insertOrMerge(m->alloc_context_id, newMIB);
+ InsertOrMerge(m->alloc_context_id, newMIB, A->MIBMap);
},
this);
- allocator.ForceUnlock();
-
- destructing = true;
- MemInfoBlockTable.PrintMissRate();
- MemInfoBlockTable.PrintAll();
- StackDepotPrintAll();
}
void InitLinkerInitialized() {
@@ -501,20 +338,12 @@ struct Allocator {
: kMaxAllowedMallocSize;
}
- bool RssLimitExceeded() {
- return atomic_load(&rss_limit_exceeded, memory_order_relaxed);
- }
-
- void SetRssLimitExceeded(bool limit_exceeded) {
- atomic_store(&rss_limit_exceeded, limit_exceeded, memory_order_relaxed);
- }
-
// -------------------- Allocation/Deallocation routines ---------------
void *Allocate(uptr size, uptr alignment, BufferedStackTrace *stack,
AllocType alloc_type) {
if (UNLIKELY(!memprof_inited))
MemprofInitFromRtl();
- if (RssLimitExceeded()) {
+ if (UNLIKELY(IsRssLimitExceeded())) {
if (AllocatorMayReturnNull())
return nullptr;
ReportRssLimitExceeded(stack);
@@ -541,8 +370,7 @@ struct Allocator {
if (size > kMaxAllowedMallocSize || needed_size > kMaxAllowedMallocSize ||
size > max_user_defined_malloc_size) {
if (AllocatorMayReturnNull()) {
- Report("WARNING: MemProfiler failed to allocate 0x%zx bytes\n",
- (void *)size);
+ Report("WARNING: MemProfiler failed to allocate 0x%zx bytes\n", size);
return nullptr;
}
uptr malloc_limit =
@@ -604,7 +432,7 @@ struct Allocator {
CHECK_LE(alloc_beg + sizeof(LargeChunkHeader), chunk_beg);
reinterpret_cast<LargeChunkHeader *>(alloc_beg)->Set(m);
}
- MEMPROF_MALLOC_HOOK(res, size);
+ RunMallocHooks(res, size);
return res;
}
@@ -614,24 +442,21 @@ struct Allocator {
if (p == 0)
return;
- MEMPROF_FREE_HOOK(ptr);
+ RunFreeHooks(ptr);
uptr chunk_beg = p - kChunkHeaderSize;
MemprofChunk *m = reinterpret_cast<MemprofChunk *>(chunk_beg);
u64 user_requested_size =
atomic_exchange(&m->user_requested_size, 0, memory_order_acquire);
- if (memprof_inited && memprof_init_done && !destructing &&
- MemInfoBlockTable.Constructed) {
+ if (memprof_inited && atomic_load_relaxed(&constructed) &&
+ !atomic_load_relaxed(&destructing)) {
u64 c = GetShadowCount(p, user_requested_size);
long curtime = GetTimestamp();
MemInfoBlock newMIB(user_requested_size, c, m->timestamp_ms, curtime,
m->cpu_id, GetCpuId());
- {
- SpinMutexLock l(&fallback_mutex);
- MemInfoBlockTable.insertOrMerge(m->alloc_context_id, newMIB);
- }
+ InsertOrMerge(m->alloc_context_id, newMIB, MIBMap);
}
MemprofStats &thread_stats = GetCurrentThreadStats();
@@ -732,16 +557,20 @@ struct Allocator {
return user_requested_size;
}
+ uptr AllocationSizeFast(uptr p) {
+ return reinterpret_cast<MemprofChunk *>(p - kChunkHeaderSize)->UsedSize();
+ }
+
void Purge(BufferedStackTrace *stack) { allocator.ForceReleaseToOS(); }
void PrintStats() { allocator.PrintStats(); }
- void ForceLock() NO_THREAD_SAFETY_ANALYSIS {
+ void ForceLock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
allocator.ForceLock();
fallback_mutex.Lock();
}
- void ForceUnlock() NO_THREAD_SAFETY_ANALYSIS {
+ void ForceUnlock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
fallback_mutex.Unlock();
allocator.ForceUnlock();
}
@@ -858,6 +687,18 @@ int memprof_posix_memalign(void **memptr, uptr alignment, uptr size,
return 0;
}
+static const void *memprof_malloc_begin(const void *p) {
+ u64 user_requested_size;
+ MemprofChunk *m =
+ instance.GetMemprofChunkByAddr((uptr)p, user_requested_size);
+ if (!m)
+ return nullptr;
+ if (user_requested_size == 0)
+ return nullptr;
+
+ return (const void *)m->Beg();
+}
+
uptr memprof_malloc_usable_size(const void *ptr, uptr pc, uptr bp) {
if (!ptr)
return 0;
@@ -865,41 +706,45 @@ uptr memprof_malloc_usable_size(const void *ptr, uptr pc, uptr bp) {
return usable_size;
}
-void MemprofSoftRssLimitExceededCallback(bool limit_exceeded) {
- instance.SetRssLimitExceeded(limit_exceeded);
-}
-
} // namespace __memprof
// ---------------------- Interface ---------------- {{{1
using namespace __memprof;
-#if !SANITIZER_SUPPORTS_WEAK_HOOKS
-// Provide default (no-op) implementation of malloc hooks.
-SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_malloc_hook, void *ptr,
- uptr size) {
- (void)ptr;
- (void)size;
-}
-
-SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_free_hook, void *ptr) {
- (void)ptr;
-}
-#endif
-
uptr __sanitizer_get_estimated_allocated_size(uptr size) { return size; }
int __sanitizer_get_ownership(const void *p) {
return memprof_malloc_usable_size(p, 0, 0) != 0;
}
+const void *__sanitizer_get_allocated_begin(const void *p) {
+ return memprof_malloc_begin(p);
+}
+
uptr __sanitizer_get_allocated_size(const void *p) {
return memprof_malloc_usable_size(p, 0, 0);
}
+uptr __sanitizer_get_allocated_size_fast(const void *p) {
+ DCHECK_EQ(p, __sanitizer_get_allocated_begin(p));
+ uptr ret = instance.AllocationSizeFast(reinterpret_cast<uptr>(p));
+ DCHECK_EQ(ret, __sanitizer_get_allocated_size(p));
+ return ret;
+}
+
int __memprof_profile_dump() {
- instance.FinishAndPrint();
+ instance.FinishAndWrite();
// In the future we may want to return non-zero if there are any errors
// detected during the dumping process.
return 0;
}
+
+void __memprof_profile_reset() {
+ if (report_file.fd != kInvalidFd && report_file.fd != kStdoutFd &&
+ report_file.fd != kStderrFd) {
+ CloseFile(report_file.fd);
+ // Setting the file descriptor to kInvalidFd ensures that we will reopen the
+ // file when invoking Write again.
+ report_file.fd = kInvalidFd;
+ }
+}
diff --git a/contrib/llvm-project/compiler-rt/lib/memprof/memprof_allocator.h b/contrib/llvm-project/compiler-rt/lib/memprof/memprof_allocator.h
index f1438baaa20e..14c61c7325e3 100644
--- a/contrib/llvm-project/compiler-rt/lib/memprof/memprof_allocator.h
+++ b/contrib/llvm-project/compiler-rt/lib/memprof/memprof_allocator.h
@@ -39,6 +39,10 @@ void InitializeAllocator();
struct MemprofMapUnmapCallback {
void OnMap(uptr p, uptr size) const;
+ void OnMapSecondary(uptr p, uptr size, uptr user_begin,
+ uptr user_size) const {
+ OnMap(p, size);
+ }
void OnUnmap(uptr p, uptr size) const;
};
@@ -98,7 +102,6 @@ int memprof_posix_memalign(void **memptr, uptr alignment, uptr size,
uptr memprof_malloc_usable_size(const void *ptr, uptr pc, uptr bp);
void PrintInternalAllocatorStats();
-void MemprofSoftRssLimitExceededCallback(bool exceeded);
} // namespace __memprof
#endif // MEMPROF_ALLOCATOR_H
diff --git a/contrib/llvm-project/compiler-rt/lib/memprof/memprof_descriptions.cpp b/contrib/llvm-project/compiler-rt/lib/memprof/memprof_descriptions.cpp
index 669b1acd8c71..48b74b6bc87f 100644
--- a/contrib/llvm-project/compiler-rt/lib/memprof/memprof_descriptions.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/memprof/memprof_descriptions.cpp
@@ -49,14 +49,14 @@ void DescribeThread(MemprofThreadContext *context) {
}
context->announced = true;
InternalScopedString str;
- str.append("Thread %s", MemprofThreadIdAndName(context).c_str());
+ str.AppendF("Thread %s", MemprofThreadIdAndName(context).c_str());
if (context->parent_tid == kInvalidTid) {
- str.append(" created by unknown thread\n");
+ str.AppendF(" created by unknown thread\n");
Printf("%s", str.data());
return;
}
- str.append(" created by %s here:\n",
- MemprofThreadIdAndName(context->parent_tid).c_str());
+ str.AppendF(" created by %s here:\n",
+ MemprofThreadIdAndName(context->parent_tid).c_str());
Printf("%s", str.data());
StackDepotGet(context->stack_id).Print();
// Recursively described parent thread if needed.
diff --git a/contrib/llvm-project/compiler-rt/lib/memprof/memprof_flags.inc b/contrib/llvm-project/compiler-rt/lib/memprof/memprof_flags.inc
index 035fd15b9288..ee0760ddc302 100644
--- a/contrib/llvm-project/compiler-rt/lib/memprof/memprof_flags.inc
+++ b/contrib/llvm-project/compiler-rt/lib/memprof/memprof_flags.inc
@@ -35,15 +35,7 @@ MEMPROF_FLAG(bool, allocator_frees_and_returns_null_on_realloc_zero, true,
"realloc(p, 0) is equivalent to free(p) by default (Same as the "
"POSIX standard). If set to false, realloc(p, 0) will return a "
"pointer to an allocated space which can not be used.")
+MEMPROF_FLAG(bool, print_text, false,
+ "If set, prints the heap profile in text format. Else use the raw binary serialization format.")
MEMPROF_FLAG(bool, print_terse, false,
- "If set, prints memory profile in a terse format.")
-
-MEMPROF_FLAG(
- int, mem_info_cache_entries, 16381,
- "Size in entries of the mem info block cache, should be closest prime"
- " number to a power of two for best hashing.")
-MEMPROF_FLAG(bool, print_mem_info_cache_miss_rate, false,
- "If set, prints the miss rate of the mem info block cache.")
-MEMPROF_FLAG(
- bool, print_mem_info_cache_miss_rate_details, false,
- "If set, prints detailed miss rates of the mem info block cache sets.")
+ "If set, prints memory profile in a terse format. Only applicable if print_text = true.")
diff --git a/contrib/llvm-project/compiler-rt/lib/memprof/memprof_interceptors.cpp b/contrib/llvm-project/compiler-rt/lib/memprof/memprof_interceptors.cpp
index e22768061e70..8925ec5bbaa3 100644
--- a/contrib/llvm-project/compiler-rt/lib/memprof/memprof_interceptors.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/memprof/memprof_interceptors.cpp
@@ -52,11 +52,6 @@ using namespace __memprof;
DECLARE_REAL_AND_INTERCEPTOR(void *, malloc, uptr)
DECLARE_REAL_AND_INTERCEPTOR(void, free, void *)
-#define MEMPROF_INTERCEPTOR_ENTER(ctx, func) \
- ctx = 0; \
- (void)ctx;
-
-#define COMMON_INTERCEPT_FUNCTION(name) MEMPROF_INTERCEPT_FUNC(name)
#define COMMON_INTERCEPT_FUNCTION_VER(name, ver) \
MEMPROF_INTERCEPT_FUNC_VER(name, ver)
#define COMMON_INTERCEPT_FUNCTION_VER_UNVERSIONED_FALLBACK(name, ver) \
@@ -93,10 +88,6 @@ DECLARE_REAL_AND_INTERCEPTOR(void, free, void *)
do { \
} while (false)
#define COMMON_INTERCEPTOR_BLOCK_REAL(name) REAL(name)
-#define COMMON_INTERCEPTOR_ON_DLOPEN(filename, flag) \
- do { \
- CheckNoDeepBind(filename, flag); \
- } while (false)
#define COMMON_INTERCEPTOR_ON_EXIT(ctx) OnExit()
#define COMMON_INTERCEPTOR_LIBRARY_LOADED(filename, handle)
#define COMMON_INTERCEPTOR_LIBRARY_UNLOADED()
@@ -109,24 +100,6 @@ DECLARE_REAL_AND_INTERCEPTOR(void, free, void *)
*begin = *end = 0; \
}
-#define COMMON_INTERCEPTOR_MEMMOVE_IMPL(ctx, to, from, size) \
- do { \
- MEMPROF_INTERCEPTOR_ENTER(ctx, memmove); \
- MEMPROF_MEMMOVE_IMPL(to, from, size); \
- } while (false)
-
-#define COMMON_INTERCEPTOR_MEMCPY_IMPL(ctx, to, from, size) \
- do { \
- MEMPROF_INTERCEPTOR_ENTER(ctx, memcpy); \
- MEMPROF_MEMCPY_IMPL(to, from, size); \
- } while (false)
-
-#define COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, block, c, size) \
- do { \
- MEMPROF_INTERCEPTOR_ENTER(ctx, memset); \
- MEMPROF_MEMSET_IMPL(block, c, size); \
- } while (false)
-
#include "sanitizer_common/sanitizer_common_interceptors.inc"
#define COMMON_SYSCALL_PRE_READ_RANGE(p, s) MEMPROF_READ_RANGE(p, s)
@@ -196,7 +169,7 @@ INTERCEPTOR(int, pthread_join, void *t, void **arg) {
DEFINE_REAL_PTHREAD_FUNCTIONS
INTERCEPTOR(char *, index, const char *string, int c)
-ALIAS(WRAPPER_NAME(strchr));
+ALIAS(WRAP(strchr));
// For both strcat() and strncat() we need to check the validity of |to|
// argument irrespective of the |from| length.
@@ -204,9 +177,9 @@ INTERCEPTOR(char *, strcat, char *to, const char *from) {
void *ctx;
MEMPROF_INTERCEPTOR_ENTER(ctx, strcat);
ENSURE_MEMPROF_INITED();
- uptr from_length = REAL(strlen)(from);
+ uptr from_length = internal_strlen(from);
MEMPROF_READ_RANGE(from, from_length + 1);
- uptr to_length = REAL(strlen)(to);
+ uptr to_length = internal_strlen(to);
MEMPROF_READ_STRING(to, to_length);
MEMPROF_WRITE_RANGE(to + to_length, from_length + 1);
return REAL(strcat)(to, from);
@@ -219,7 +192,7 @@ INTERCEPTOR(char *, strncat, char *to, const char *from, uptr size) {
uptr from_length = MaybeRealStrnlen(from, size);
uptr copy_length = Min(size, from_length + 1);
MEMPROF_READ_RANGE(from, copy_length);
- uptr to_length = REAL(strlen)(to);
+ uptr to_length = internal_strlen(to);
MEMPROF_READ_STRING(to, to_length);
MEMPROF_WRITE_RANGE(to + to_length, from_length + 1);
return REAL(strncat)(to, from, size);
@@ -232,7 +205,7 @@ INTERCEPTOR(char *, strcpy, char *to, const char *from) {
return REAL(strcpy)(to, from);
}
ENSURE_MEMPROF_INITED();
- uptr from_size = REAL(strlen)(from) + 1;
+ uptr from_size = internal_strlen(from) + 1;
MEMPROF_READ_RANGE(from, from_size);
MEMPROF_WRITE_RANGE(to, from_size);
return REAL(strcpy)(to, from);
@@ -244,7 +217,7 @@ INTERCEPTOR(char *, strdup, const char *s) {
if (UNLIKELY(!memprof_inited))
return internal_strdup(s);
ENSURE_MEMPROF_INITED();
- uptr length = REAL(strlen)(s);
+ uptr length = internal_strlen(s);
MEMPROF_READ_RANGE(s, length + 1);
GET_STACK_TRACE_MALLOC;
void *new_mem = memprof_malloc(length + 1, &stack);
@@ -258,7 +231,7 @@ INTERCEPTOR(char *, __strdup, const char *s) {
if (UNLIKELY(!memprof_inited))
return internal_strdup(s);
ENSURE_MEMPROF_INITED();
- uptr length = REAL(strlen)(s);
+ uptr length = internal_strlen(s);
MEMPROF_READ_RANGE(s, length + 1);
GET_STACK_TRACE_MALLOC;
void *new_mem = memprof_malloc(length + 1, &stack);
diff --git a/contrib/llvm-project/compiler-rt/lib/memprof/memprof_interceptors.h b/contrib/llvm-project/compiler-rt/lib/memprof/memprof_interceptors.h
index ca5f3690430a..20edef42a515 100644
--- a/contrib/llvm-project/compiler-rt/lib/memprof/memprof_interceptors.h
+++ b/contrib/llvm-project/compiler-rt/lib/memprof/memprof_interceptors.h
@@ -48,13 +48,19 @@ DECLARE_REAL(char *, strstr, const char *s1, const char *s2)
#define MEMPROF_INTERCEPT_FUNC_VER(name, ver) \
do { \
if (!INTERCEPT_FUNCTION_VER(name, ver)) \
- VReport(1, "MemProfiler: failed to intercept '%s@@%s'\n", #name, #ver); \
+ VReport(1, "MemProfiler: failed to intercept '%s@@%s'\n", #name, ver); \
} while (0)
#define MEMPROF_INTERCEPT_FUNC_VER_UNVERSIONED_FALLBACK(name, ver) \
do { \
if (!INTERCEPT_FUNCTION_VER(name, ver) && !INTERCEPT_FUNCTION(name)) \
VReport(1, "MemProfiler: failed to intercept '%s@@%s' or '%s'\n", #name, \
- #ver, #name); \
+ ver, #name); \
} while (0)
+#define MEMPROF_INTERCEPTOR_ENTER(ctx, func) \
+ ctx = 0; \
+ (void)ctx;
+
+#define COMMON_INTERCEPT_FUNCTION(name) MEMPROF_INTERCEPT_FUNC(name)
+
#endif // MEMPROF_INTERCEPTORS_H
diff --git a/contrib/llvm-project/compiler-rt/lib/memprof/memprof_interceptors_memintrinsics.cpp b/contrib/llvm-project/compiler-rt/lib/memprof/memprof_interceptors_memintrinsics.cpp
index 4eb409362b57..56bd11614d6a 100644
--- a/contrib/llvm-project/compiler-rt/lib/memprof/memprof_interceptors_memintrinsics.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/memprof/memprof_interceptors_memintrinsics.cpp
@@ -11,11 +11,74 @@
// MemProf versions of memcpy, memmove, and memset.
//===---------------------------------------------------------------------===//
+#define SANITIZER_COMMON_NO_REDEFINE_BUILTINS
+
#include "memprof_interceptors_memintrinsics.h"
+
+#include "memprof_interceptors.h"
#include "memprof_stack.h"
using namespace __memprof;
+// memcpy is called during __memprof_init() from the internals of printf(...).
+// We do not treat memcpy with to==from as a bug.
+// See http://llvm.org/bugs/show_bug.cgi?id=11763.
+#define MEMPROF_MEMCPY_IMPL(to, from, size) \
+ do { \
+ if (UNLIKELY(!memprof_inited)) \
+ return internal_memcpy(to, from, size); \
+ if (memprof_init_is_running) { \
+ return REAL(memcpy)(to, from, size); \
+ } \
+ ENSURE_MEMPROF_INITED(); \
+ MEMPROF_READ_RANGE(from, size); \
+ MEMPROF_WRITE_RANGE(to, size); \
+ return REAL(memcpy)(to, from, size); \
+ } while (0)
+
+// memset is called inside Printf.
+#define MEMPROF_MEMSET_IMPL(block, c, size) \
+ do { \
+ if (UNLIKELY(!memprof_inited)) \
+ return internal_memset(block, c, size); \
+ if (memprof_init_is_running) { \
+ return REAL(memset)(block, c, size); \
+ } \
+ ENSURE_MEMPROF_INITED(); \
+ MEMPROF_WRITE_RANGE(block, size); \
+ return REAL(memset)(block, c, size); \
+ } while (0)
+
+#define MEMPROF_MEMMOVE_IMPL(to, from, size) \
+ do { \
+ if (UNLIKELY(!memprof_inited)) \
+ return internal_memmove(to, from, size); \
+ ENSURE_MEMPROF_INITED(); \
+ MEMPROF_READ_RANGE(from, size); \
+ MEMPROF_WRITE_RANGE(to, size); \
+ return internal_memmove(to, from, size); \
+ } while (0)
+
+#define COMMON_INTERCEPTOR_MEMMOVE_IMPL(ctx, to, from, size) \
+ do { \
+ MEMPROF_INTERCEPTOR_ENTER(ctx, memmove); \
+ MEMPROF_MEMMOVE_IMPL(to, from, size); \
+ } while (false)
+
+#define COMMON_INTERCEPTOR_MEMCPY_IMPL(ctx, to, from, size) \
+ do { \
+ MEMPROF_INTERCEPTOR_ENTER(ctx, memcpy); \
+ MEMPROF_MEMCPY_IMPL(to, from, size); \
+ } while (false)
+
+#define COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, block, c, size) \
+ do { \
+ MEMPROF_INTERCEPTOR_ENTER(ctx, memset); \
+ MEMPROF_MEMSET_IMPL(block, c, size); \
+ } while (false)
+
+#include "sanitizer_common/sanitizer_common_interceptors_memintrinsics.inc"
+
void *__memprof_memcpy(void *to, const void *from, uptr size) {
MEMPROF_MEMCPY_IMPL(to, from, size);
}
diff --git a/contrib/llvm-project/compiler-rt/lib/memprof/memprof_interceptors_memintrinsics.h b/contrib/llvm-project/compiler-rt/lib/memprof/memprof_interceptors_memintrinsics.h
index 348461d55c41..0b87a6f3522a 100644
--- a/contrib/llvm-project/compiler-rt/lib/memprof/memprof_interceptors_memintrinsics.h
+++ b/contrib/llvm-project/compiler-rt/lib/memprof/memprof_interceptors_memintrinsics.h
@@ -32,45 +32,6 @@ namespace __memprof {
__memprof_record_access_range(offset, size); \
} while (0)
-// memcpy is called during __memprof_init() from the internals of printf(...).
-// We do not treat memcpy with to==from as a bug.
-// See http://llvm.org/bugs/show_bug.cgi?id=11763.
-#define MEMPROF_MEMCPY_IMPL(to, from, size) \
- do { \
- if (UNLIKELY(!memprof_inited)) \
- return internal_memcpy(to, from, size); \
- if (memprof_init_is_running) { \
- return REAL(memcpy)(to, from, size); \
- } \
- ENSURE_MEMPROF_INITED(); \
- MEMPROF_READ_RANGE(from, size); \
- MEMPROF_WRITE_RANGE(to, size); \
- return REAL(memcpy)(to, from, size); \
- } while (0)
-
-// memset is called inside Printf.
-#define MEMPROF_MEMSET_IMPL(block, c, size) \
- do { \
- if (UNLIKELY(!memprof_inited)) \
- return internal_memset(block, c, size); \
- if (memprof_init_is_running) { \
- return REAL(memset)(block, c, size); \
- } \
- ENSURE_MEMPROF_INITED(); \
- MEMPROF_WRITE_RANGE(block, size); \
- return REAL(memset)(block, c, size); \
- } while (0)
-
-#define MEMPROF_MEMMOVE_IMPL(to, from, size) \
- do { \
- if (UNLIKELY(!memprof_inited)) \
- return internal_memmove(to, from, size); \
- ENSURE_MEMPROF_INITED(); \
- MEMPROF_READ_RANGE(from, size); \
- MEMPROF_WRITE_RANGE(to, size); \
- return internal_memmove(to, from, size); \
- } while (0)
-
#define MEMPROF_READ_RANGE(offset, size) ACCESS_MEMORY_RANGE(offset, size)
#define MEMPROF_WRITE_RANGE(offset, size) ACCESS_MEMORY_RANGE(offset, size)
diff --git a/contrib/llvm-project/compiler-rt/lib/memprof/memprof_interface_internal.h b/contrib/llvm-project/compiler-rt/lib/memprof/memprof_interface_internal.h
index 0aca4afc9afa..318bc4104405 100644
--- a/contrib/llvm-project/compiler-rt/lib/memprof/memprof_interface_internal.h
+++ b/contrib/llvm-project/compiler-rt/lib/memprof/memprof_interface_internal.h
@@ -49,6 +49,7 @@ extern uptr __memprof_shadow_memory_dynamic_address;
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE extern char
__memprof_profile_filename[1];
SANITIZER_INTERFACE_ATTRIBUTE int __memprof_profile_dump();
+SANITIZER_INTERFACE_ATTRIBUTE void __memprof_profile_reset();
SANITIZER_INTERFACE_ATTRIBUTE void __memprof_load(uptr p);
SANITIZER_INTERFACE_ATTRIBUTE void __memprof_store(uptr p);
diff --git a/contrib/llvm-project/compiler-rt/lib/memprof/memprof_internal.h b/contrib/llvm-project/compiler-rt/lib/memprof/memprof_internal.h
index 8d227887fe15..990e62ce1a55 100644
--- a/contrib/llvm-project/compiler-rt/lib/memprof/memprof_internal.h
+++ b/contrib/llvm-project/compiler-rt/lib/memprof/memprof_internal.h
@@ -66,8 +66,6 @@ void *MemprofDoesNotSupportStaticLinkage();
// memprof_thread.cpp
MemprofThread *CreateMainThread();
-void ReadContextStack(void *context, uptr *stack, uptr *ssize);
-
// Wrapper for TLS/TSD.
void TSDInit(void (*destructor)(void *tsd));
void *TSDGet();
@@ -76,24 +74,8 @@ void PlatformTSDDtor(void *tsd);
void *MemprofDlSymNext(const char *sym);
-// Add convenient macro for interface functions that may be represented as
-// weak hooks.
-#define MEMPROF_MALLOC_HOOK(ptr, size) \
- do { \
- if (&__sanitizer_malloc_hook) \
- __sanitizer_malloc_hook(ptr, size); \
- RunMallocHooks(ptr, size); \
- } while (false)
-#define MEMPROF_FREE_HOOK(ptr) \
- do { \
- if (&__sanitizer_free_hook) \
- __sanitizer_free_hook(ptr); \
- RunFreeHooks(ptr); \
- } while (false)
-
extern int memprof_inited;
extern int memprof_timestamp_inited;
-extern int memprof_init_done;
// Used to avoid infinite recursion in __memprof_init().
extern bool memprof_init_is_running;
extern void (*death_callback)(void);
diff --git a/contrib/llvm-project/compiler-rt/lib/memprof/memprof_linux.cpp b/contrib/llvm-project/compiler-rt/lib/memprof/memprof_linux.cpp
index 61c833bfdf64..fcb6f662a82e 100644
--- a/contrib/llvm-project/compiler-rt/lib/memprof/memprof_linux.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/memprof/memprof_linux.cpp
@@ -20,7 +20,6 @@
#include "memprof_internal.h"
#include "memprof_thread.h"
#include "sanitizer_common/sanitizer_flags.h"
-#include "sanitizer_common/sanitizer_freebsd.h"
#include "sanitizer_common/sanitizer_libc.h"
#include "sanitizer_common/sanitizer_procmaps.h"
@@ -69,12 +68,6 @@ uptr FindDynamicShadowStart() {
/*min_shadow_base_alignment*/ 0, kHighMemEnd);
}
-void ReadContextStack(void *context, uptr *stack, uptr *ssize) {
- ucontext_t *ucp = (ucontext_t *)context;
- *stack = (uptr)ucp->uc_stack.ss_sp;
- *ssize = ucp->uc_stack.ss_size;
-}
-
void *MemprofDlSymNext(const char *sym) { return dlsym(RTLD_NEXT, sym); }
} // namespace __memprof
diff --git a/contrib/llvm-project/compiler-rt/lib/memprof/memprof_malloc_linux.cpp b/contrib/llvm-project/compiler-rt/lib/memprof/memprof_malloc_linux.cpp
index c7330f4619a1..ef753fcaa4ad 100644
--- a/contrib/llvm-project/compiler-rt/lib/memprof/memprof_malloc_linux.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/memprof/memprof_malloc_linux.cpp
@@ -23,125 +23,52 @@
#include "memprof_internal.h"
#include "memprof_stack.h"
#include "sanitizer_common/sanitizer_allocator_checks.h"
+#include "sanitizer_common/sanitizer_allocator_dlsym.h"
#include "sanitizer_common/sanitizer_errno.h"
#include "sanitizer_common/sanitizer_tls_get_addr.h"
// ---------------------- Replacement functions ---------------- {{{1
using namespace __memprof;
-static uptr allocated_for_dlsym;
-static uptr last_dlsym_alloc_size_in_words;
-static const uptr kDlsymAllocPoolSize = 1024;
-static uptr alloc_memory_for_dlsym[kDlsymAllocPoolSize];
-
-static inline bool IsInDlsymAllocPool(const void *ptr) {
- uptr off = (uptr)ptr - (uptr)alloc_memory_for_dlsym;
- return off < allocated_for_dlsym * sizeof(alloc_memory_for_dlsym[0]);
-}
-
-static void *AllocateFromLocalPool(uptr size_in_bytes) {
- uptr size_in_words = RoundUpTo(size_in_bytes, kWordSize) / kWordSize;
- void *mem = (void *)&alloc_memory_for_dlsym[allocated_for_dlsym];
- last_dlsym_alloc_size_in_words = size_in_words;
- allocated_for_dlsym += size_in_words;
- CHECK_LT(allocated_for_dlsym, kDlsymAllocPoolSize);
- return mem;
-}
-
-static void DeallocateFromLocalPool(const void *ptr) {
- // Hack: since glibc 2.27 dlsym no longer uses stack-allocated memory to store
- // error messages and instead uses malloc followed by free. To avoid pool
- // exhaustion due to long object filenames, handle that special case here.
- uptr prev_offset = allocated_for_dlsym - last_dlsym_alloc_size_in_words;
- void *prev_mem = (void *)&alloc_memory_for_dlsym[prev_offset];
- if (prev_mem == ptr) {
- REAL(memset)(prev_mem, 0, last_dlsym_alloc_size_in_words * kWordSize);
- allocated_for_dlsym = prev_offset;
- last_dlsym_alloc_size_in_words = 0;
- }
-}
-
-static int PosixMemalignFromLocalPool(void **memptr, uptr alignment,
- uptr size_in_bytes) {
- if (UNLIKELY(!CheckPosixMemalignAlignment(alignment)))
- return errno_EINVAL;
-
- CHECK(alignment >= kWordSize);
-
- uptr addr = (uptr)&alloc_memory_for_dlsym[allocated_for_dlsym];
- uptr aligned_addr = RoundUpTo(addr, alignment);
- uptr aligned_size = RoundUpTo(size_in_bytes, kWordSize);
-
- uptr *end_mem = (uptr *)(aligned_addr + aligned_size);
- uptr allocated = end_mem - alloc_memory_for_dlsym;
- if (allocated >= kDlsymAllocPoolSize)
- return errno_ENOMEM;
-
- allocated_for_dlsym = allocated;
- *memptr = (void *)aligned_addr;
- return 0;
-}
-
-static inline bool MaybeInDlsym() { return memprof_init_is_running; }
-
-static inline bool UseLocalPool() { return MaybeInDlsym(); }
-
-static void *ReallocFromLocalPool(void *ptr, uptr size) {
- const uptr offset = (uptr)ptr - (uptr)alloc_memory_for_dlsym;
- const uptr copy_size = Min(size, kDlsymAllocPoolSize - offset);
- void *new_ptr;
- if (UNLIKELY(UseLocalPool())) {
- new_ptr = AllocateFromLocalPool(size);
- } else {
- ENSURE_MEMPROF_INITED();
- GET_STACK_TRACE_MALLOC;
- new_ptr = memprof_malloc(size, &stack);
- }
- internal_memcpy(new_ptr, ptr, copy_size);
- return new_ptr;
-}
+struct DlsymAlloc : public DlSymAllocator<DlsymAlloc> {
+ static bool UseImpl() { return memprof_init_is_running; }
+};
INTERCEPTOR(void, free, void *ptr) {
+ if (DlsymAlloc::PointerIsMine(ptr))
+ return DlsymAlloc::Free(ptr);
GET_STACK_TRACE_FREE;
- if (UNLIKELY(IsInDlsymAllocPool(ptr))) {
- DeallocateFromLocalPool(ptr);
- return;
- }
memprof_free(ptr, &stack, FROM_MALLOC);
}
#if SANITIZER_INTERCEPT_CFREE
INTERCEPTOR(void, cfree, void *ptr) {
+ if (DlsymAlloc::PointerIsMine(ptr))
+ return DlsymAlloc::Free(ptr);
GET_STACK_TRACE_FREE;
- if (UNLIKELY(IsInDlsymAllocPool(ptr)))
- return;
memprof_free(ptr, &stack, FROM_MALLOC);
}
#endif // SANITIZER_INTERCEPT_CFREE
INTERCEPTOR(void *, malloc, uptr size) {
- if (UNLIKELY(UseLocalPool()))
- // Hack: dlsym calls malloc before REAL(malloc) is retrieved from dlsym.
- return AllocateFromLocalPool(size);
+ if (DlsymAlloc::Use())
+ return DlsymAlloc::Allocate(size);
ENSURE_MEMPROF_INITED();
GET_STACK_TRACE_MALLOC;
return memprof_malloc(size, &stack);
}
INTERCEPTOR(void *, calloc, uptr nmemb, uptr size) {
- if (UNLIKELY(UseLocalPool()))
- // Hack: dlsym calls calloc before REAL(calloc) is retrieved from dlsym.
- return AllocateFromLocalPool(nmemb * size);
+ if (DlsymAlloc::Use())
+ return DlsymAlloc::Callocate(nmemb, size);
ENSURE_MEMPROF_INITED();
GET_STACK_TRACE_MALLOC;
return memprof_calloc(nmemb, size, &stack);
}
INTERCEPTOR(void *, realloc, void *ptr, uptr size) {
- if (UNLIKELY(IsInDlsymAllocPool(ptr)))
- return ReallocFromLocalPool(ptr, size);
- if (UNLIKELY(UseLocalPool()))
- return AllocateFromLocalPool(size);
+ if (DlsymAlloc::Use() || DlsymAlloc::PointerIsMine(ptr))
+ return DlsymAlloc::Realloc(ptr, size);
ENSURE_MEMPROF_INITED();
GET_STACK_TRACE_MALLOC;
return memprof_realloc(ptr, size, &stack);
@@ -201,8 +128,6 @@ INTERCEPTOR(int, mallopt, int cmd, int value) { return 0; }
#endif // SANITIZER_INTERCEPT_MALLOPT_AND_MALLINFO
INTERCEPTOR(int, posix_memalign, void **memptr, uptr alignment, uptr size) {
- if (UNLIKELY(UseLocalPool()))
- return PosixMemalignFromLocalPool(memptr, alignment, size);
GET_STACK_TRACE_MALLOC;
return memprof_posix_memalign(memptr, alignment, size, &stack);
}
diff --git a/contrib/llvm-project/compiler-rt/lib/memprof/memprof_mibmap.cpp b/contrib/llvm-project/compiler-rt/lib/memprof/memprof_mibmap.cpp
new file mode 100644
index 000000000000..32f0796c8f24
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/memprof/memprof_mibmap.cpp
@@ -0,0 +1,37 @@
+//===-- memprof_mibmap.cpp -----------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of MemProfiler, a memory profiler.
+//
+//===----------------------------------------------------------------------===//
+
+#include "memprof_mibmap.h"
+#include "profile/MemProfData.inc"
+#include "sanitizer_common/sanitizer_allocator_internal.h"
+#include "sanitizer_common/sanitizer_mutex.h"
+
+namespace __memprof {
+using ::llvm::memprof::MemInfoBlock;
+
+void InsertOrMerge(const uptr Id, const MemInfoBlock &Block, MIBMapTy &Map) {
+ MIBMapTy::Handle h(&Map, static_cast<uptr>(Id), /*remove=*/false,
+ /*create=*/true);
+ if (h.created()) {
+ LockedMemInfoBlock *lmib =
+ (LockedMemInfoBlock *)InternalAlloc(sizeof(LockedMemInfoBlock));
+ lmib->mutex.Init();
+ lmib->mib = Block;
+ *h = lmib;
+ } else {
+ LockedMemInfoBlock *lmib = *h;
+ SpinMutexLock lock(&lmib->mutex);
+ lmib->mib.Merge(Block);
+ }
+}
+
+} // namespace __memprof
diff --git a/contrib/llvm-project/compiler-rt/lib/memprof/memprof_mibmap.h b/contrib/llvm-project/compiler-rt/lib/memprof/memprof_mibmap.h
new file mode 100644
index 000000000000..a7cd420464e8
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/memprof/memprof_mibmap.h
@@ -0,0 +1,27 @@
+#ifndef MEMPROF_MIBMAP_H_
+#define MEMPROF_MIBMAP_H_
+
+#include <stdint.h>
+
+#include "profile/MemProfData.inc"
+#include "sanitizer_common/sanitizer_addrhashmap.h"
+#include "sanitizer_common/sanitizer_mutex.h"
+
+namespace __memprof {
+
+struct LockedMemInfoBlock {
+ __sanitizer::StaticSpinMutex mutex;
+ ::llvm::memprof::MemInfoBlock mib;
+};
+
+// The MIB map stores a mapping from stack ids to MemInfoBlocks.
+typedef __sanitizer::AddrHashMap<LockedMemInfoBlock *, 200003> MIBMapTy;
+
+// Insert a new MemInfoBlock or merge with an existing block identified by the
+// stack id.
+void InsertOrMerge(const uptr Id, const ::llvm::memprof::MemInfoBlock &Block,
+ MIBMapTy &Map);
+
+} // namespace __memprof
+
+#endif // MEMPROF_MIBMAP_H_
diff --git a/contrib/llvm-project/compiler-rt/lib/memprof/memprof_rawprofile.cpp b/contrib/llvm-project/compiler-rt/lib/memprof/memprof_rawprofile.cpp
new file mode 100644
index 000000000000..fa92fa0e4b53
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/memprof/memprof_rawprofile.cpp
@@ -0,0 +1,252 @@
+#include <stdint.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "memprof_rawprofile.h"
+#include "profile/MemProfData.inc"
+#include "sanitizer_common/sanitizer_allocator_internal.h"
+#include "sanitizer_common/sanitizer_array_ref.h"
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_linux.h"
+#include "sanitizer_common/sanitizer_procmaps.h"
+#include "sanitizer_common/sanitizer_stackdepot.h"
+#include "sanitizer_common/sanitizer_stackdepotbase.h"
+#include "sanitizer_common/sanitizer_stacktrace.h"
+#include "sanitizer_common/sanitizer_vector.h"
+
+namespace __memprof {
+using ::__sanitizer::Vector;
+using ::llvm::memprof::MemInfoBlock;
+using SegmentEntry = ::llvm::memprof::SegmentEntry;
+using Header = ::llvm::memprof::Header;
+
+namespace {
+template <class T> char *WriteBytes(const T &Pod, char *Buffer) {
+ *(T *)Buffer = Pod;
+ return Buffer + sizeof(T);
+}
+
+void RecordStackId(const uptr Key, UNUSED LockedMemInfoBlock *const &MIB,
+ void *Arg) {
+ // No need to touch the MIB value here since we are only recording the key.
+ auto *StackIds = reinterpret_cast<Vector<u64> *>(Arg);
+ StackIds->PushBack(Key);
+}
+} // namespace
+
+u64 SegmentSizeBytes(ArrayRef<LoadedModule> Modules) {
+ u64 NumSegmentsToRecord = 0;
+ for (const auto &Module : Modules) {
+ for (const auto &Segment : Module.ranges()) {
+ if (Segment.executable)
+ NumSegmentsToRecord++;
+ }
+ }
+
+ return sizeof(u64) // A header which stores the number of records.
+ + sizeof(SegmentEntry) * NumSegmentsToRecord;
+}
+
+// The segment section uses the following format:
+// ---------- Segment Info
+// Num Entries
+// ---------- Segment Entry
+// Start
+// End
+// Offset
+// UuidSize
+// Uuid 32B
+// ----------
+// ...
+void SerializeSegmentsToBuffer(ArrayRef<LoadedModule> Modules,
+ const u64 ExpectedNumBytes, char *&Buffer) {
+ char *Ptr = Buffer;
+ // Reserve space for the final count.
+ Ptr += sizeof(u64);
+
+ u64 NumSegmentsRecorded = 0;
+
+ for (const auto &Module : Modules) {
+ for (const auto &Segment : Module.ranges()) {
+ if (Segment.executable) {
+ SegmentEntry Entry(Segment.beg, Segment.end, Module.base_address());
+ CHECK(Module.uuid_size() <= MEMPROF_BUILDID_MAX_SIZE);
+ Entry.BuildIdSize = Module.uuid_size();
+ memcpy(Entry.BuildId, Module.uuid(), Module.uuid_size());
+ memcpy(Ptr, &Entry, sizeof(SegmentEntry));
+ Ptr += sizeof(SegmentEntry);
+ NumSegmentsRecorded++;
+ }
+ }
+ }
+ // Store the number of segments we recorded in the space we reserved.
+ *((u64 *)Buffer) = NumSegmentsRecorded;
+ CHECK(ExpectedNumBytes >= static_cast<u64>(Ptr - Buffer) &&
+ "Expected num bytes != actual bytes written");
+}
+
+u64 StackSizeBytes(const Vector<u64> &StackIds) {
+ u64 NumBytesToWrite = sizeof(u64);
+
+ const u64 NumIds = StackIds.Size();
+ for (unsigned k = 0; k < NumIds; ++k) {
+ const u64 Id = StackIds[k];
+ // One entry for the id and then one more for the number of stack pcs.
+ NumBytesToWrite += 2 * sizeof(u64);
+ const StackTrace St = StackDepotGet(Id);
+
+ CHECK(St.trace != nullptr && St.size > 0 && "Empty stack trace");
+ for (uptr i = 0; i < St.size && St.trace[i] != 0; i++) {
+ NumBytesToWrite += sizeof(u64);
+ }
+ }
+ return NumBytesToWrite;
+}
+
+// The stack info section uses the following format:
+//
+// ---------- Stack Info
+// Num Entries
+// ---------- Stack Entry
+// Num Stacks
+// PC1
+// PC2
+// ...
+// ----------
+void SerializeStackToBuffer(const Vector<u64> &StackIds,
+ const u64 ExpectedNumBytes, char *&Buffer) {
+ const u64 NumIds = StackIds.Size();
+ char *Ptr = Buffer;
+ Ptr = WriteBytes(static_cast<u64>(NumIds), Ptr);
+
+ for (unsigned k = 0; k < NumIds; ++k) {
+ const u64 Id = StackIds[k];
+ Ptr = WriteBytes(Id, Ptr);
+ Ptr += sizeof(u64); // Bump it by u64, we will fill this in later.
+ u64 Count = 0;
+ const StackTrace St = StackDepotGet(Id);
+ for (uptr i = 0; i < St.size && St.trace[i] != 0; i++) {
+ // PCs in stack traces are actually the return addresses, that is,
+ // addresses of the next instructions after the call.
+ uptr pc = StackTrace::GetPreviousInstructionPc(St.trace[i]);
+ Ptr = WriteBytes(static_cast<u64>(pc), Ptr);
+ ++Count;
+ }
+ // Store the count in the space we reserved earlier.
+ *(u64 *)(Ptr - (Count + 1) * sizeof(u64)) = Count;
+ }
+
+ CHECK(ExpectedNumBytes >= static_cast<u64>(Ptr - Buffer) &&
+ "Expected num bytes != actual bytes written");
+}
+
+// The MIB section has the following format:
+// ---------- MIB Info
+// Num Entries
+// ---------- MIB Entry 0
+// Alloc Count
+// ...
+// ---------- MIB Entry 1
+// Alloc Count
+// ...
+// ----------
+void SerializeMIBInfoToBuffer(MIBMapTy &MIBMap, const Vector<u64> &StackIds,
+ const u64 ExpectedNumBytes, char *&Buffer) {
+ char *Ptr = Buffer;
+ const u64 NumEntries = StackIds.Size();
+ Ptr = WriteBytes(NumEntries, Ptr);
+
+ for (u64 i = 0; i < NumEntries; i++) {
+ const u64 Key = StackIds[i];
+ MIBMapTy::Handle h(&MIBMap, Key, /*remove=*/true, /*create=*/false);
+ CHECK(h.exists());
+ Ptr = WriteBytes(Key, Ptr);
+ Ptr = WriteBytes((*h)->mib, Ptr);
+ }
+
+ CHECK(ExpectedNumBytes >= static_cast<u64>(Ptr - Buffer) &&
+ "Expected num bytes != actual bytes written");
+}
+
+// Format
+// ---------- Header
+// Magic
+// Version
+// Total Size
+// Segment Offset
+// MIB Info Offset
+// Stack Offset
+// ---------- Segment Info
+// Num Entries
+// ---------- Segment Entry
+// Start
+// End
+// Offset
+// BuildID 32B
+// ----------
+// ...
+// ----------
+// Optional Padding Bytes
+// ---------- MIB Info
+// Num Entries
+// ---------- MIB Entry
+// Alloc Count
+// ...
+// ----------
+// Optional Padding Bytes
+// ---------- Stack Info
+// Num Entries
+// ---------- Stack Entry
+// Num Stacks
+// PC1
+// PC2
+// ...
+// ----------
+// Optional Padding Bytes
+// ...
+u64 SerializeToRawProfile(MIBMapTy &MIBMap, ArrayRef<LoadedModule> Modules,
+ char *&Buffer) {
+ // Each section size is rounded up to 8b since the first entry in each section
+ // is a u64 which holds the number of entries in the section by convention.
+ const u64 NumSegmentBytes = RoundUpTo(SegmentSizeBytes(Modules), 8);
+
+ Vector<u64> StackIds;
+ MIBMap.ForEach(RecordStackId, reinterpret_cast<void *>(&StackIds));
+ // The first 8b are for the total number of MIB records. Each MIB record is
+ // preceded by a 8b stack id which is associated with stack frames in the next
+ // section.
+ const u64 NumMIBInfoBytes = RoundUpTo(
+ sizeof(u64) + StackIds.Size() * (sizeof(u64) + sizeof(MemInfoBlock)), 8);
+
+ const u64 NumStackBytes = RoundUpTo(StackSizeBytes(StackIds), 8);
+
+ // Ensure that the profile is 8b aligned. We allow for some optional padding
+ // at the end so that any subsequent profile serialized to the same file does
+ // not incur unaligned accesses.
+ const u64 TotalSizeBytes = RoundUpTo(
+ sizeof(Header) + NumSegmentBytes + NumStackBytes + NumMIBInfoBytes, 8);
+
+ // Allocate the memory for the entire buffer incl. info blocks.
+ Buffer = (char *)InternalAlloc(TotalSizeBytes);
+ char *Ptr = Buffer;
+
+ Header header{MEMPROF_RAW_MAGIC_64,
+ MEMPROF_RAW_VERSION,
+ static_cast<u64>(TotalSizeBytes),
+ sizeof(Header),
+ sizeof(Header) + NumSegmentBytes,
+ sizeof(Header) + NumSegmentBytes + NumMIBInfoBytes};
+ Ptr = WriteBytes(header, Ptr);
+
+ SerializeSegmentsToBuffer(Modules, NumSegmentBytes, Ptr);
+ Ptr += NumSegmentBytes;
+
+ SerializeMIBInfoToBuffer(MIBMap, StackIds, NumMIBInfoBytes, Ptr);
+ Ptr += NumMIBInfoBytes;
+
+ SerializeStackToBuffer(StackIds, NumStackBytes, Ptr);
+
+ return TotalSizeBytes;
+}
+
+} // namespace __memprof
diff --git a/contrib/llvm-project/compiler-rt/lib/memprof/memprof_rawprofile.h b/contrib/llvm-project/compiler-rt/lib/memprof/memprof_rawprofile.h
new file mode 100644
index 000000000000..e2494175f165
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/memprof/memprof_rawprofile.h
@@ -0,0 +1,15 @@
+#ifndef MEMPROF_RAWPROFILE_H_
+#define MEMPROF_RAWPROFILE_H_
+
+#include "memprof_mibmap.h"
+#include "sanitizer_common/sanitizer_array_ref.h"
+#include "sanitizer_common/sanitizer_common.h"
+
+namespace __memprof {
+// Serialize the in-memory representation of the memprof profile to the raw
+// binary format. The format itself is documented memprof_rawprofile.cpp.
+u64 SerializeToRawProfile(MIBMapTy &BlockCache, ArrayRef<LoadedModule> Modules,
+ char *&Buffer);
+} // namespace __memprof
+
+#endif // MEMPROF_RAWPROFILE_H_
diff --git a/contrib/llvm-project/compiler-rt/lib/memprof/memprof_rtl.cpp b/contrib/llvm-project/compiler-rt/lib/memprof/memprof_rtl.cpp
index fee2912d64d4..5e2e7bc2be3f 100644
--- a/contrib/llvm-project/compiler-rt/lib/memprof/memprof_rtl.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/memprof/memprof_rtl.cpp
@@ -21,6 +21,7 @@
#include "memprof_thread.h"
#include "sanitizer_common/sanitizer_atomic.h"
#include "sanitizer_common/sanitizer_flags.h"
+#include "sanitizer_common/sanitizer_interface_internal.h"
#include "sanitizer_common/sanitizer_libc.h"
#include "sanitizer_common/sanitizer_symbolizer.h"
@@ -38,6 +39,7 @@ static void MemprofDie() {
if (atomic_fetch_add(&num_calls, 1, memory_order_relaxed) != 0) {
// Don't die twice - run a busy loop.
while (1) {
+ internal_sched_yield();
}
}
if (common_flags()->print_module_map >= 1)
@@ -48,6 +50,14 @@ static void MemprofDie() {
}
}
+static void MemprofOnDeadlySignal(int signo, void *siginfo, void *context) {
+ // We call StartReportDeadlySignal not HandleDeadlySignal so we get the
+ // deadly signal message to stderr but no writing to the profile output file
+ StartReportDeadlySignal();
+ __memprof_profile_dump();
+ Die();
+}
+
static void CheckUnwind() {
GET_STACK_TRACE(kStackTraceMax, common_flags()->fast_unwind_on_check);
stack.Print();
@@ -55,7 +65,6 @@ static void CheckUnwind() {
// -------------------------- Globals --------------------- {{{1
int memprof_inited;
-int memprof_init_done;
bool memprof_init_is_running;
int memprof_timestamp_inited;
long memprof_init_timestamp_s;
@@ -133,13 +142,6 @@ void PrintAddressSpaceLayout() {
CHECK(SHADOW_SCALE >= 3 && SHADOW_SCALE <= 7);
}
-static bool UNUSED __local_memprof_dyninit = [] {
- MaybeStartBackgroudThread();
- SetSoftRssLimitExceededCallback(MemprofSoftRssLimitExceededCallback);
-
- return false;
-}();
-
static void MemprofInitInternal() {
if (LIKELY(memprof_inited))
return;
@@ -175,9 +177,6 @@ static void MemprofInitInternal() {
__sanitizer::InitializePlatformEarly();
- // Re-exec ourselves if we need to set additional env or command line args.
- MaybeReexec();
-
// Setup internal allocator callback.
SetLowLevelAllocateMinAlignment(SHADOW_GRANULARITY);
@@ -191,14 +190,10 @@ static void MemprofInitInternal() {
InitializeShadowMemory();
TSDInit(PlatformTSDDtor);
+ InstallDeadlySignalHandlers(MemprofOnDeadlySignal);
InitializeAllocator();
- // On Linux MemprofThread::ThreadStart() calls malloc() that's why
- // memprof_inited should be set to 1 prior to initializing the threads.
- memprof_inited = 1;
- memprof_init_is_running = false;
-
if (flags()->atexit)
Atexit(memprof_atexit);
@@ -217,7 +212,8 @@ static void MemprofInitInternal() {
VReport(1, "MemProfiler Init done\n");
- memprof_init_done = 1;
+ memprof_init_is_running = false;
+ memprof_inited = 1;
}
void MemprofInitTime() {
@@ -264,14 +260,9 @@ void __memprof_record_access(void const volatile *addr) {
__memprof::RecordAccess((uptr)addr);
}
-// We only record the access on the first location in the range,
-// since we will later accumulate the access counts across the
-// full allocation, and we don't want to inflate the hotness from
-// a memory intrinsic on a large range of memory.
-// TODO: Should we do something else so we can better track utilization?
-void __memprof_record_access_range(void const volatile *addr,
- UNUSED uptr size) {
- __memprof::RecordAccess((uptr)addr);
+void __memprof_record_access_range(void const volatile *addr, uptr size) {
+ for (uptr a = (uptr)addr; a < (uptr)addr + size; a += kWordSize)
+ __memprof::RecordAccess(a);
}
extern "C" SANITIZER_INTERFACE_ATTRIBUTE u16
diff --git a/contrib/llvm-project/compiler-rt/lib/memprof/memprof_stats.cpp b/contrib/llvm-project/compiler-rt/lib/memprof/memprof_stats.cpp
index 8a50d270dc6a..c8faebfa12de 100644
--- a/contrib/llvm-project/compiler-rt/lib/memprof/memprof_stats.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/memprof/memprof_stats.cpp
@@ -62,11 +62,11 @@ void MemprofStats::MergeFrom(const MemprofStats *stats) {
dst_ptr[i] += src_ptr[i];
}
-static BlockingMutex print_lock(LINKER_INITIALIZED);
+static Mutex print_lock;
static MemprofStats unknown_thread_stats(LINKER_INITIALIZED);
static MemprofStats dead_threads_stats(LINKER_INITIALIZED);
-static BlockingMutex dead_threads_stats_lock(LINKER_INITIALIZED);
+static Mutex dead_threads_stats_lock;
// Required for malloc_zone_statistics() on OS X. This can't be stored in
// per-thread MemprofStats.
static uptr max_malloced_memory;
@@ -87,7 +87,7 @@ static void GetAccumulatedStats(MemprofStats *stats) {
}
stats->MergeFrom(&unknown_thread_stats);
{
- BlockingMutexLock lock(&dead_threads_stats_lock);
+ Lock lock(&dead_threads_stats_lock);
stats->MergeFrom(&dead_threads_stats);
}
// This is not very accurate: we may miss allocation peaks that happen
@@ -99,7 +99,7 @@ static void GetAccumulatedStats(MemprofStats *stats) {
}
void FlushToDeadThreadStats(MemprofStats *stats) {
- BlockingMutexLock lock(&dead_threads_stats_lock);
+ Lock lock(&dead_threads_stats_lock);
dead_threads_stats.MergeFrom(stats);
stats->Clear();
}
@@ -113,11 +113,11 @@ static void PrintAccumulatedStats() {
MemprofStats stats;
GetAccumulatedStats(&stats);
// Use lock to keep reports from mixing up.
- BlockingMutexLock lock(&print_lock);
+ Lock lock(&print_lock);
stats.Print();
- StackDepotStats *stack_depot_stats = StackDepotGetStats();
+ StackDepotStats stack_depot_stats = StackDepotGetStats();
Printf("Stats: StackDepot: %zd ids; %zdM allocated\n",
- stack_depot_stats->n_uniq_ids, stack_depot_stats->allocated >> 20);
+ stack_depot_stats.n_uniq_ids, stack_depot_stats.allocated >> 20);
PrintInternalAllocatorStats();
}
diff --git a/contrib/llvm-project/compiler-rt/lib/memprof/memprof_thread.cpp b/contrib/llvm-project/compiler-rt/lib/memprof/memprof_thread.cpp
index 5ae7a2ee85b9..9512a87cf98e 100644
--- a/contrib/llvm-project/compiler-rt/lib/memprof/memprof_thread.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/memprof/memprof_thread.cpp
@@ -40,11 +40,11 @@ void MemprofThreadContext::OnFinished() {
static ALIGNED(16) char thread_registry_placeholder[sizeof(ThreadRegistry)];
static ThreadRegistry *memprof_thread_registry;
-static BlockingMutex mu_for_thread_context(LINKER_INITIALIZED);
+static Mutex mu_for_thread_context;
static LowLevelAllocator allocator_for_thread_context;
static ThreadContextBase *GetMemprofThreadContext(u32 tid) {
- BlockingMutexLock lock(&mu_for_thread_context);
+ Lock lock(&mu_for_thread_context);
return new (allocator_for_thread_context) MemprofThreadContext(tid);
}
@@ -80,8 +80,7 @@ MemprofThread *MemprofThread::Create(thread_callback_t start_routine, void *arg,
thread->start_routine_ = start_routine;
thread->arg_ = arg;
MemprofThreadContext::CreateThreadContextArgs args = {thread, stack};
- memprofThreadRegistry().CreateThread(*reinterpret_cast<uptr *>(thread),
- detached, parent_tid, &args);
+ memprofThreadRegistry().CreateThread(0, detached, parent_tid, &args);
return thread;
}
@@ -131,7 +130,7 @@ void MemprofThread::Init(const InitOptions *options) {
int local = 0;
VReport(1, "T%d: stack [%p,%p) size 0x%zx; local=%p\n", tid(),
(void *)stack_bottom_, (void *)stack_top_, stack_top_ - stack_bottom_,
- &local);
+ (void *)&local);
}
thread_return_t
@@ -198,7 +197,7 @@ MemprofThread *GetCurrentThread() {
void SetCurrentThread(MemprofThread *t) {
CHECK(t->context());
- VReport(2, "SetCurrentThread: %p for thread %p\n", t->context(),
+ VReport(2, "SetCurrentThread: %p for thread %p\n", (void *)t->context(),
(void *)GetThreadSelf());
// Make sure we do not reset the current MemprofThread.
CHECK_EQ(0, TSDGet());
diff --git a/contrib/llvm-project/compiler-rt/lib/memprof/tests/driver.cpp b/contrib/llvm-project/compiler-rt/lib/memprof/tests/driver.cpp
new file mode 100644
index 000000000000..b402cec1126b
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/memprof/tests/driver.cpp
@@ -0,0 +1,14 @@
+//===-- driver.cpp ----------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "gtest/gtest.h"
+
+int main(int argc, char **argv) {
+ testing::InitGoogleTest(&argc, argv);
+ return RUN_ALL_TESTS();
+}
diff --git a/contrib/llvm-project/compiler-rt/lib/memprof/tests/rawprofile.cpp b/contrib/llvm-project/compiler-rt/lib/memprof/tests/rawprofile.cpp
new file mode 100644
index 000000000000..c5dfdca890be
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/memprof/tests/rawprofile.cpp
@@ -0,0 +1,170 @@
+#include "memprof/memprof_rawprofile.h"
+
+#include <cstdint>
+#include <memory>
+
+#include "profile/MemProfData.inc"
+#include "sanitizer_common/sanitizer_array_ref.h"
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_procmaps.h"
+#include "sanitizer_common/sanitizer_stackdepot.h"
+#include "sanitizer_common/sanitizer_stacktrace.h"
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+
+namespace {
+
+using ::__memprof::MIBMapTy;
+using ::__memprof::SerializeToRawProfile;
+using ::__sanitizer::StackDepotPut;
+using ::__sanitizer::StackTrace;
+using ::llvm::memprof::MemInfoBlock;
+
+uint64_t PopulateFakeMap(const MemInfoBlock &FakeMIB, uintptr_t StackPCBegin,
+ MIBMapTy &FakeMap) {
+ constexpr int kSize = 5;
+ uintptr_t array[kSize];
+ for (int i = 0; i < kSize; i++) {
+ array[i] = StackPCBegin + i;
+ }
+ StackTrace St(array, kSize);
+ uint32_t Id = StackDepotPut(St);
+
+ InsertOrMerge(Id, FakeMIB, FakeMap);
+ return Id;
+}
+
+template <class T = uint64_t> T Read(char *&Buffer) {
+ static_assert(std::is_pod<T>::value, "Must be a POD type.");
+ assert(reinterpret_cast<size_t>(Buffer) % sizeof(T) == 0 &&
+ "Unaligned read!");
+ T t = *reinterpret_cast<T *>(Buffer);
+ Buffer += sizeof(T);
+ return t;
+}
+
+TEST(MemProf, Basic) {
+ __sanitizer::LoadedModule FakeModule;
+ FakeModule.addAddressRange(/*begin=*/0x10, /*end=*/0x20, /*executable=*/true,
+ /*writable=*/false, /*name=*/"");
+ const char uuid[MEMPROF_BUILDID_MAX_SIZE] = {0xC, 0x0, 0xF, 0xF, 0xE, 0xE};
+ FakeModule.setUuid(uuid, MEMPROF_BUILDID_MAX_SIZE);
+ __sanitizer::ArrayRef<__sanitizer::LoadedModule> Modules(&FakeModule,
+ (&FakeModule) + 1);
+
+ MIBMapTy FakeMap;
+ MemInfoBlock FakeMIB;
+ // Since we want to override the constructor set vals to make it easier to
+ // test.
+ memset(&FakeMIB, 0, sizeof(MemInfoBlock));
+ FakeMIB.AllocCount = 0x1;
+ FakeMIB.TotalAccessCount = 0x2;
+
+ uint64_t FakeIds[2];
+ FakeIds[0] = PopulateFakeMap(FakeMIB, /*StackPCBegin=*/2, FakeMap);
+ FakeIds[1] = PopulateFakeMap(FakeMIB, /*StackPCBegin=*/3, FakeMap);
+
+ char *Ptr = nullptr;
+ uint64_t NumBytes = SerializeToRawProfile(FakeMap, Modules, Ptr);
+ const char *Buffer = Ptr;
+
+ ASSERT_GT(NumBytes, 0ULL);
+ ASSERT_TRUE(Ptr);
+
+ // Check the header.
+ EXPECT_THAT(Read(Ptr), MEMPROF_RAW_MAGIC_64);
+ EXPECT_THAT(Read(Ptr), MEMPROF_RAW_VERSION);
+ const uint64_t TotalSize = Read(Ptr);
+ const uint64_t SegmentOffset = Read(Ptr);
+ const uint64_t MIBOffset = Read(Ptr);
+ const uint64_t StackOffset = Read(Ptr);
+
+ // ============= Check sizes and padding.
+ EXPECT_EQ(TotalSize, NumBytes);
+ EXPECT_EQ(TotalSize % 8, 0ULL);
+
+ // Should be equal to the size of the raw profile header.
+ EXPECT_EQ(SegmentOffset, 48ULL);
+
+ // We expect only 1 segment entry, 8b for the count and 64b for SegmentEntry
+ // in memprof_rawprofile.cpp.
+ EXPECT_EQ(MIBOffset - SegmentOffset, 72ULL);
+
+ EXPECT_EQ(MIBOffset, 120ULL);
+ // We expect 2 mib entry, 8b for the count and sizeof(uint64_t) +
+ // sizeof(MemInfoBlock) contains stack id + MeminfoBlock.
+ EXPECT_EQ(StackOffset - MIBOffset, 8 + 2 * (8 + sizeof(MemInfoBlock)));
+
+ EXPECT_EQ(StackOffset, 408ULL);
+ // We expect 2 stack entries, with 5 frames - 8b for total count,
+ // 2 * (8b for id, 8b for frame count and 5*8b for fake frames).
+ // Since this is the last section, there may be additional padding at the end
+ // to make the total profile size 8b aligned.
+ EXPECT_GE(TotalSize - StackOffset, 8ULL + 2 * (8 + 8 + 5 * 8));
+
+ // ============= Check contents.
+ unsigned char ExpectedSegmentBytes[72] = {
+ 0x01, 0, 0, 0, 0, 0, 0, 0, // Number of entries
+ 0x10, 0, 0, 0, 0, 0, 0, 0, // Start
+ 0x20, 0, 0, 0, 0, 0, 0, 0, // End
+ 0x0, 0, 0, 0, 0, 0, 0, 0, // Offset
+ 0x20, 0, 0, 0, 0, 0, 0, 0, // UuidSize
+ 0xC, 0x0, 0xF, 0xF, 0xE, 0xE // Uuid
+ };
+ EXPECT_EQ(memcmp(Buffer + SegmentOffset, ExpectedSegmentBytes, 72), 0);
+
+ // Check that the number of entries is 2.
+ EXPECT_EQ(*reinterpret_cast<const uint64_t *>(Buffer + MIBOffset), 2ULL);
+ // Check that stack id is set.
+ EXPECT_EQ(*reinterpret_cast<const uint64_t *>(Buffer + MIBOffset + 8),
+ FakeIds[0]);
+
+ // Only check a few fields of the first MemInfoBlock.
+ unsigned char ExpectedMIBBytes[sizeof(MemInfoBlock)] = {
+ 0x01, 0, 0, 0, // Alloc count
+ 0x02, 0, 0, 0, // Total access count
+ };
+ // Compare contents of 1st MIB after skipping count and stack id.
+ EXPECT_EQ(
+ memcmp(Buffer + MIBOffset + 16, ExpectedMIBBytes, sizeof(MemInfoBlock)),
+ 0);
+ // Compare contents of 2nd MIB after skipping count and stack id for the first
+ // and only the id for the second.
+ EXPECT_EQ(memcmp(Buffer + MIBOffset + 16 + sizeof(MemInfoBlock) + 8,
+ ExpectedMIBBytes, sizeof(MemInfoBlock)),
+ 0);
+
+ // Check that the number of entries is 2.
+ EXPECT_EQ(*reinterpret_cast<const uint64_t *>(Buffer + StackOffset), 2ULL);
+ // Check that the 1st stack id is set.
+ EXPECT_EQ(*reinterpret_cast<const uint64_t *>(Buffer + StackOffset + 8),
+ FakeIds[0]);
+ // Contents are num pcs, value of each pc - 1.
+ unsigned char ExpectedStackBytes[2][6 * 8] = {
+ {
+ 0x5, 0, 0, 0, 0, 0, 0, 0, // Number of PCs
+ 0x1, 0, 0, 0, 0, 0, 0, 0, // PC ...
+ 0x2, 0, 0, 0, 0, 0, 0, 0, 0x3, 0, 0, 0, 0, 0, 0, 0,
+ 0x4, 0, 0, 0, 0, 0, 0, 0, 0x5, 0, 0, 0, 0, 0, 0, 0,
+ },
+ {
+ 0x5, 0, 0, 0, 0, 0, 0, 0, // Number of PCs
+ 0x2, 0, 0, 0, 0, 0, 0, 0, // PC ...
+ 0x3, 0, 0, 0, 0, 0, 0, 0, 0x4, 0, 0, 0, 0, 0, 0, 0,
+ 0x5, 0, 0, 0, 0, 0, 0, 0, 0x6, 0, 0, 0, 0, 0, 0, 0,
+ },
+ };
+ EXPECT_EQ(memcmp(Buffer + StackOffset + 16, ExpectedStackBytes[0],
+ sizeof(ExpectedStackBytes[0])),
+ 0);
+
+ // Check that the 2nd stack id is set.
+ EXPECT_EQ(
+ *reinterpret_cast<const uint64_t *>(Buffer + StackOffset + 8 + 6 * 8 + 8),
+ FakeIds[1]);
+
+ EXPECT_EQ(memcmp(Buffer + StackOffset + 16 + 6 * 8 + 8, ExpectedStackBytes[1],
+ sizeof(ExpectedStackBytes[1])),
+ 0);
+}
+} // namespace
diff --git a/contrib/llvm-project/compiler-rt/lib/msan/msan.cpp b/contrib/llvm-project/compiler-rt/lib/msan/msan.cpp
index 4fa772fdcb6e..a2fc27de1901 100644
--- a/contrib/llvm-project/compiler-rt/lib/msan/msan.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/msan/msan.cpp
@@ -12,20 +12,22 @@
//===----------------------------------------------------------------------===//
#include "msan.h"
+
#include "msan_chained_origin_depot.h"
#include "msan_origin.h"
+#include "msan_poisoning.h"
#include "msan_report.h"
#include "msan_thread.h"
-#include "msan_poisoning.h"
#include "sanitizer_common/sanitizer_atomic.h"
#include "sanitizer_common/sanitizer_common.h"
-#include "sanitizer_common/sanitizer_flags.h"
#include "sanitizer_common/sanitizer_flag_parser.h"
+#include "sanitizer_common/sanitizer_flags.h"
+#include "sanitizer_common/sanitizer_interface_internal.h"
#include "sanitizer_common/sanitizer_libc.h"
#include "sanitizer_common/sanitizer_procmaps.h"
+#include "sanitizer_common/sanitizer_stackdepot.h"
#include "sanitizer_common/sanitizer_stacktrace.h"
#include "sanitizer_common/sanitizer_symbolizer.h"
-#include "sanitizer_common/sanitizer_stackdepot.h"
#include "ubsan/ubsan_flags.h"
#include "ubsan/ubsan_init.h"
@@ -67,8 +69,6 @@ THREADLOCAL u64 __msan_va_arg_overflow_size_tls;
SANITIZER_INTERFACE_ATTRIBUTE
THREADLOCAL u32 __msan_origin_tls;
-static THREADLOCAL int is_in_symbolizer;
-
extern "C" SANITIZER_WEAK_ATTRIBUTE const int __msan_track_origins;
int __msan_get_track_origins() {
@@ -79,15 +79,19 @@ extern "C" SANITIZER_WEAK_ATTRIBUTE const int __msan_keep_going;
namespace __msan {
-void EnterSymbolizer() { ++is_in_symbolizer; }
-void ExitSymbolizer() { --is_in_symbolizer; }
-bool IsInSymbolizer() { return is_in_symbolizer; }
+static THREADLOCAL int is_in_symbolizer_or_unwinder;
+static void EnterSymbolizerOrUnwider() { ++is_in_symbolizer_or_unwinder; }
+static void ExitSymbolizerOrUnwider() { --is_in_symbolizer_or_unwinder; }
+bool IsInSymbolizerOrUnwider() { return is_in_symbolizer_or_unwinder; }
+
+struct UnwinderScope {
+ UnwinderScope() { EnterSymbolizerOrUnwider(); }
+ ~UnwinderScope() { ExitSymbolizerOrUnwider(); }
+};
static Flags msan_flags;
-Flags *flags() {
- return &msan_flags;
-}
+Flags *flags() { return &msan_flags; }
int msan_inited = 0;
bool msan_init_is_running;
@@ -134,8 +138,8 @@ static void RegisterMsanFlags(FlagParser *parser, Flags *f) {
#include "msan_flags.inc"
#undef MSAN_FLAG
- FlagHandlerKeepGoing *fh_keep_going =
- new (FlagParser::Alloc) FlagHandlerKeepGoing(&f->halt_on_error);
+ FlagHandlerKeepGoing *fh_keep_going = new (GetGlobalLowLevelAllocator())
+ FlagHandlerKeepGoing(&f->halt_on_error);
parser->RegisterHandler("keep_going", fh_keep_going,
"deprecated, use halt_on_error");
}
@@ -221,10 +225,6 @@ static void InitializeFlags() {
if (f->store_context_size < 1) f->store_context_size = 1;
}
-void PrintWarning(uptr pc, uptr bp) {
- PrintWarningWithOrigin(pc, bp, __msan_origin_tls);
-}
-
void PrintWarningWithOrigin(uptr pc, uptr bp, u32 origin) {
if (msan_expect_umr) {
// Printf("Expected UMR\n");
@@ -299,7 +299,29 @@ u32 ChainOrigin(u32 id, StackTrace *stack) {
return chained.raw_id();
}
-} // namespace __msan
+// Current implementation separates the 'id_ptr' from the 'descr' and makes
+// 'descr' constant.
+// Previous implementation 'descr' is created at compile time and contains
+// '----' in the beginning. When we see descr for the first time we replace
+// '----' with a uniq id and set the origin to (id | (31-th bit)).
+static inline void SetAllocaOrigin(void *a, uptr size, u32 *id_ptr, char *descr,
+ uptr pc) {
+ static const u32 dash = '-';
+ static const u32 first_timer =
+ dash + (dash << 8) + (dash << 16) + (dash << 24);
+ u32 id = *id_ptr;
+ if (id == 0 || id == first_timer) {
+ u32 idx = atomic_fetch_add(&NumStackOriginDescrs, 1, memory_order_relaxed);
+ CHECK_LT(idx, kNumStackOriginDescrs);
+ StackOriginDescr[idx] = descr;
+ StackOriginPC[idx] = pc;
+ id = Origin::CreateStackOrigin(idx).raw_id();
+ *id_ptr = id;
+ }
+ __msan_set_origin(a, size, id);
+}
+
+} // namespace __msan
void __sanitizer::BufferedStackTrace::UnwindImpl(
uptr pc, uptr bp, void *context, bool request_fast, u32 max_depth) {
@@ -307,7 +329,7 @@ void __sanitizer::BufferedStackTrace::UnwindImpl(
MsanThread *t = GetCurrentThread();
if (!t || !StackTrace::WillUseFastUnwind(request_fast)) {
// Block reports from our interceptors during _Unwind_Backtrace.
- SymbolizerScope sym_scope;
+ UnwinderScope sym_scope;
return Unwind(max_depth, pc, bp, context, t ? t->stack_top() : 0,
t ? t->stack_bottom() : 0, false);
}
@@ -323,8 +345,7 @@ using namespace __msan;
#define MSAN_MAYBE_WARNING(type, size) \
void __msan_maybe_warning_##size(type s, u32 o) { \
- GET_CALLER_PC_BP_SP; \
- (void) sp; \
+ GET_CALLER_PC_BP; \
if (UNLIKELY(s)) { \
PrintWarningWithOrigin(pc, bp, o); \
if (__msan::flags()->halt_on_error) { \
@@ -343,8 +364,7 @@ MSAN_MAYBE_WARNING(u64, 8)
void __msan_maybe_store_origin_##size(type s, void *p, u32 o) { \
if (UNLIKELY(s)) { \
if (__msan_get_track_origins() > 1) { \
- GET_CALLER_PC_BP_SP; \
- (void) sp; \
+ GET_CALLER_PC_BP; \
GET_STORE_STACK_TRACE_PC_BP(pc, bp); \
o = ChainOrigin(o, &stack); \
} \
@@ -358,9 +378,8 @@ MSAN_MAYBE_STORE_ORIGIN(u32, 4)
MSAN_MAYBE_STORE_ORIGIN(u64, 8)
void __msan_warning() {
- GET_CALLER_PC_BP_SP;
- (void)sp;
- PrintWarning(pc, bp);
+ GET_CALLER_PC_BP;
+ PrintWarningWithOrigin(pc, bp, 0);
if (__msan::flags()->halt_on_error) {
if (__msan::flags()->print_stats)
ReportStats();
@@ -370,9 +389,8 @@ void __msan_warning() {
}
void __msan_warning_noreturn() {
- GET_CALLER_PC_BP_SP;
- (void)sp;
- PrintWarning(pc, bp);
+ GET_CALLER_PC_BP;
+ PrintWarningWithOrigin(pc, bp, 0);
if (__msan::flags()->print_stats)
ReportStats();
Printf("Exiting\n");
@@ -380,8 +398,7 @@ void __msan_warning_noreturn() {
}
void __msan_warning_with_origin(u32 origin) {
- GET_CALLER_PC_BP_SP;
- (void)sp;
+ GET_CALLER_PC_BP;
PrintWarningWithOrigin(pc, bp, origin);
if (__msan::flags()->halt_on_error) {
if (__msan::flags()->print_stats)
@@ -392,8 +409,7 @@ void __msan_warning_with_origin(u32 origin) {
}
void __msan_warning_with_origin_noreturn(u32 origin) {
- GET_CALLER_PC_BP_SP;
- (void)sp;
+ GET_CALLER_PC_BP;
PrintWarningWithOrigin(pc, bp, origin);
if (__msan::flags()->print_stats)
ReportStats();
@@ -433,6 +449,7 @@ void __msan_init() {
__sanitizer_set_report_path(common_flags()->log_path);
InitializeInterceptors();
+ InstallAtForkHandler();
CheckASLR();
InitTlsSize();
InstallDeadlySignalHandlers(MsanOnDeadlySignal);
@@ -450,7 +467,7 @@ void __msan_init() {
__msan_clear_on_return();
if (__msan_get_track_origins())
VPrintf(1, "msan_track_origins\n");
- if (!InitShadow(__msan_get_track_origins())) {
+ if (!InitShadowWithReExec(__msan_get_track_origins())) {
Printf("FATAL: MemorySanitizer can not mmap the shadow memory.\n");
Printf("FATAL: Make sure to compile with -fPIE and to link with -pie.\n");
Printf("FATAL: Disabling ASLR is known to cause this error.\n");
@@ -460,7 +477,8 @@ void __msan_init() {
Die();
}
- Symbolizer::GetOrInit()->AddHooks(EnterSymbolizer, ExitSymbolizer);
+ Symbolizer::GetOrInit()->AddHooks(EnterSymbolizerOrUnwider,
+ ExitSymbolizerOrUnwider);
InitializeCoverage(common_flags()->coverage, common_flags()->coverage_dir);
@@ -470,7 +488,7 @@ void __msan_init() {
MsanThread *main_thread = MsanThread::Create(nullptr, nullptr);
SetCurrentThread(main_thread);
- main_thread->ThreadStart();
+ main_thread->Init();
#if MSAN_CONTAINS_UBSAN
__ubsan::InitAsPlugin();
@@ -490,8 +508,7 @@ void __msan_set_expect_umr(int expect_umr) {
if (expect_umr) {
msan_expected_umr_found = 0;
} else if (!msan_expected_umr_found) {
- GET_CALLER_PC_BP_SP;
- (void)sp;
+ GET_CALLER_PC_BP;
GET_FATAL_STACK_TRACE_PC_BP(pc, bp);
ReportExpectedUMRNotFound(&stack);
Die();
@@ -515,6 +532,7 @@ void __msan_dump_shadow(const void *x, uptr size) {
}
unsigned char *s = (unsigned char*)MEM_TO_SHADOW(x);
+ Printf("%p[%p] ", (void *)s, x);
for (uptr i = 0; i < size; i++)
Printf("%x%x ", s[i] >> 4, s[i] & 0xf);
Printf("\n");
@@ -538,8 +556,7 @@ void __msan_check_mem_is_initialized(const void *x, uptr size) {
if (offset < 0)
return;
- GET_CALLER_PC_BP_SP;
- (void)sp;
+ GET_CALLER_PC_BP;
ReportUMRInsideAddressRange(__func__, x, size, offset);
__msan::PrintWarningWithOrigin(pc, bp,
__msan_get_origin(((const char *)x) + offset));
@@ -575,45 +592,30 @@ void __msan_set_origin(const void *a, uptr size, u32 origin) {
if (__msan_get_track_origins()) SetOrigin(a, size, origin);
}
-// 'descr' is created at compile time and contains '----' in the beginning.
-// When we see descr for the first time we replace '----' with a uniq id
-// and set the origin to (id | (31-th bit)).
void __msan_set_alloca_origin(void *a, uptr size, char *descr) {
- __msan_set_alloca_origin4(a, size, descr, 0);
+ SetAllocaOrigin(a, size, reinterpret_cast<u32 *>(descr), descr + 4,
+ GET_CALLER_PC());
}
void __msan_set_alloca_origin4(void *a, uptr size, char *descr, uptr pc) {
- static const u32 dash = '-';
- static const u32 first_timer =
- dash + (dash << 8) + (dash << 16) + (dash << 24);
- u32 *id_ptr = (u32*)descr;
- bool print = false; // internal_strstr(descr + 4, "AllocaTOTest") != 0;
- u32 id = *id_ptr;
- if (id == first_timer) {
- u32 idx = atomic_fetch_add(&NumStackOriginDescrs, 1, memory_order_relaxed);
- CHECK_LT(idx, kNumStackOriginDescrs);
- StackOriginDescr[idx] = descr + 4;
-#if SANITIZER_PPC64V1
- // On PowerPC64 ELFv1, the address of a function actually points to a
- // three-doubleword data structure with the first field containing
- // the address of the function's code.
- if (pc)
- pc = *reinterpret_cast<uptr*>(pc);
-#endif
- StackOriginPC[idx] = pc;
- id = Origin::CreateStackOrigin(idx).raw_id();
- *id_ptr = id;
- if (print)
- Printf("First time: idx=%d id=%d %s %p \n", idx, id, descr + 4, pc);
- }
- if (print)
- Printf("__msan_set_alloca_origin: descr=%s id=%x\n", descr + 4, id);
- __msan_set_origin(a, size, id);
+ // Intentionally ignore pc and use return address. This function is here for
+ // compatibility, in case program is linked with library instrumented by
+ // older clang.
+ SetAllocaOrigin(a, size, reinterpret_cast<u32 *>(descr), descr + 4,
+ GET_CALLER_PC());
+}
+
+void __msan_set_alloca_origin_with_descr(void *a, uptr size, u32 *id_ptr,
+ char *descr) {
+ SetAllocaOrigin(a, size, id_ptr, descr, GET_CALLER_PC());
+}
+
+void __msan_set_alloca_origin_no_descr(void *a, uptr size, u32 *id_ptr) {
+ SetAllocaOrigin(a, size, id_ptr, nullptr, GET_CALLER_PC());
}
u32 __msan_chain_origin(u32 id) {
- GET_CALLER_PC_BP_SP;
- (void)sp;
+ GET_CALLER_PC_BP;
GET_STORE_STACK_TRACE_PC_BP(pc, bp);
return ChainOrigin(id, &stack);
}
diff --git a/contrib/llvm-project/compiler-rt/lib/msan/msan.h b/contrib/llvm-project/compiler-rt/lib/msan/msan.h
index 963b94a54087..7fb58be67a02 100644
--- a/contrib/llvm-project/compiler-rt/lib/msan/msan.h
+++ b/contrib/llvm-project/compiler-rt/lib/msan/msan.h
@@ -33,12 +33,18 @@ struct MappingDesc {
uptr start;
uptr end;
enum Type {
- INVALID, APP, SHADOW, ORIGIN
+ INVALID = 1,
+ ALLOCATOR = 2,
+ APP = 4,
+ SHADOW = 8,
+ ORIGIN = 16,
} type;
const char *name;
};
-
+// Note: MappingDesc::ALLOCATOR entries are only used to check for memory
+// layout compatibility. The actual allocation settings are in
+// msan_allocator.cpp, which need to be kept in sync.
#if SANITIZER_LINUX && defined(__mips64)
// MIPS64 maps:
@@ -65,98 +71,52 @@ const MappingDesc kMemoryLayout[] = {
#elif SANITIZER_LINUX && defined(__aarch64__)
-// The mapping describes both 39-bits, 42-bits, and 48-bits VMA. AArch64
-// maps:
-// - 0x0000000000000-0x0000010000000: 39/42/48-bits program own segments
-// - 0x0005500000000-0x0005600000000: 39-bits PIE program segments
-// - 0x0007f80000000-0x0007fffffffff: 39-bits libraries segments
-// - 0x002aa00000000-0x002ab00000000: 42-bits PIE program segments
-// - 0x003ff00000000-0x003ffffffffff: 42-bits libraries segments
-// - 0x0aaaaa0000000-0x0aaab00000000: 48-bits PIE program segments
-// - 0xffff000000000-0x1000000000000: 48-bits libraries segments
-// It is fragmented in multiples segments to increase the memory available
-// on 42-bits (12.21% of total VMA available for 42-bits and 13.28 for
-// 39 bits). The 48-bits segments only cover the usual PIE/default segments
-// plus some more segments (262144GB total, 0.39% total VMA).
+// The mapping assumes 48-bit VMA. AArch64 maps:
+// - 0x0000000000000-0x0100000000000: 39/42/48-bits program own segments
+// - 0x0a00000000000-0x0b00000000000: 48-bits PIE program segments
+// Ideally, this would extend to 0x0c00000000000 (2^45 bytes - the
+// maximum ASLR region for 48-bit VMA) but it is too hard to fit in
+// the larger app/shadow/origin regions.
+// - 0x0e00000000000-0x1000000000000: 48-bits libraries segments
const MappingDesc kMemoryLayout[] = {
- {0x00000000000ULL, 0x01000000000ULL, MappingDesc::INVALID, "invalid"},
- {0x01000000000ULL, 0x02000000000ULL, MappingDesc::SHADOW, "shadow-2"},
- {0x02000000000ULL, 0x03000000000ULL, MappingDesc::ORIGIN, "origin-2"},
- {0x03000000000ULL, 0x04000000000ULL, MappingDesc::SHADOW, "shadow-1"},
- {0x04000000000ULL, 0x05000000000ULL, MappingDesc::ORIGIN, "origin-1"},
- {0x05000000000ULL, 0x06000000000ULL, MappingDesc::APP, "app-1"},
- {0x06000000000ULL, 0x07000000000ULL, MappingDesc::INVALID, "invalid"},
- {0x07000000000ULL, 0x08000000000ULL, MappingDesc::APP, "app-2"},
- {0x08000000000ULL, 0x09000000000ULL, MappingDesc::INVALID, "invalid"},
- // The mappings below are used only for 42-bits VMA.
- {0x09000000000ULL, 0x0A000000000ULL, MappingDesc::SHADOW, "shadow-3"},
- {0x0A000000000ULL, 0x0B000000000ULL, MappingDesc::ORIGIN, "origin-3"},
- {0x0B000000000ULL, 0x0F000000000ULL, MappingDesc::INVALID, "invalid"},
- {0x0F000000000ULL, 0x10000000000ULL, MappingDesc::APP, "app-3"},
- {0x10000000000ULL, 0x11000000000ULL, MappingDesc::INVALID, "invalid"},
- {0x11000000000ULL, 0x12000000000ULL, MappingDesc::APP, "app-4"},
- {0x12000000000ULL, 0x17000000000ULL, MappingDesc::INVALID, "invalid"},
- {0x17000000000ULL, 0x18000000000ULL, MappingDesc::SHADOW, "shadow-4"},
- {0x18000000000ULL, 0x19000000000ULL, MappingDesc::ORIGIN, "origin-4"},
- {0x19000000000ULL, 0x20000000000ULL, MappingDesc::INVALID, "invalid"},
- {0x20000000000ULL, 0x21000000000ULL, MappingDesc::APP, "app-5"},
- {0x21000000000ULL, 0x26000000000ULL, MappingDesc::INVALID, "invalid"},
- {0x26000000000ULL, 0x27000000000ULL, MappingDesc::SHADOW, "shadow-5"},
- {0x27000000000ULL, 0x28000000000ULL, MappingDesc::ORIGIN, "origin-5"},
- {0x28000000000ULL, 0x29000000000ULL, MappingDesc::SHADOW, "shadow-7"},
- {0x29000000000ULL, 0x2A000000000ULL, MappingDesc::ORIGIN, "origin-7"},
- {0x2A000000000ULL, 0x2B000000000ULL, MappingDesc::APP, "app-6"},
- {0x2B000000000ULL, 0x2C000000000ULL, MappingDesc::INVALID, "invalid"},
- {0x2C000000000ULL, 0x2D000000000ULL, MappingDesc::SHADOW, "shadow-6"},
- {0x2D000000000ULL, 0x2E000000000ULL, MappingDesc::ORIGIN, "origin-6"},
- {0x2E000000000ULL, 0x2F000000000ULL, MappingDesc::APP, "app-7"},
- {0x2F000000000ULL, 0x39000000000ULL, MappingDesc::INVALID, "invalid"},
- {0x39000000000ULL, 0x3A000000000ULL, MappingDesc::SHADOW, "shadow-9"},
- {0x3A000000000ULL, 0x3B000000000ULL, MappingDesc::ORIGIN, "origin-9"},
- {0x3B000000000ULL, 0x3C000000000ULL, MappingDesc::APP, "app-8"},
- {0x3C000000000ULL, 0x3D000000000ULL, MappingDesc::INVALID, "invalid"},
- {0x3D000000000ULL, 0x3E000000000ULL, MappingDesc::SHADOW, "shadow-8"},
- {0x3E000000000ULL, 0x3F000000000ULL, MappingDesc::ORIGIN, "origin-8"},
- {0x3F000000000ULL, 0x40000000000ULL, MappingDesc::APP, "app-9"},
- // The mappings below are used only for 48-bits VMA.
- // TODO(unknown): 48-bit mapping ony covers the usual PIE, non-PIE
- // segments and some more segments totalizing 262144GB of VMA (which cover
- // only 0.32% of all 48-bit VMA). Memory avaliability can be increase by
- // adding multiple application segments like 39 and 42 mapping.
- {0x0040000000000ULL, 0x0041000000000ULL, MappingDesc::INVALID, "invalid"},
- {0x0041000000000ULL, 0x0042000000000ULL, MappingDesc::APP, "app-10"},
- {0x0042000000000ULL, 0x0047000000000ULL, MappingDesc::INVALID, "invalid"},
- {0x0047000000000ULL, 0x0048000000000ULL, MappingDesc::SHADOW, "shadow-10"},
- {0x0048000000000ULL, 0x0049000000000ULL, MappingDesc::ORIGIN, "origin-10"},
- {0x0049000000000ULL, 0x0050000000000ULL, MappingDesc::INVALID, "invalid"},
- {0x0050000000000ULL, 0x0051000000000ULL, MappingDesc::APP, "app-11"},
- {0x0051000000000ULL, 0x0056000000000ULL, MappingDesc::INVALID, "invalid"},
- {0x0056000000000ULL, 0x0057000000000ULL, MappingDesc::SHADOW, "shadow-11"},
- {0x0057000000000ULL, 0x0058000000000ULL, MappingDesc::ORIGIN, "origin-11"},
- {0x0058000000000ULL, 0x0059000000000ULL, MappingDesc::APP, "app-12"},
- {0x0059000000000ULL, 0x005E000000000ULL, MappingDesc::INVALID, "invalid"},
- {0x005E000000000ULL, 0x005F000000000ULL, MappingDesc::SHADOW, "shadow-12"},
- {0x005F000000000ULL, 0x0060000000000ULL, MappingDesc::ORIGIN, "origin-12"},
- {0x0060000000000ULL, 0x0061000000000ULL, MappingDesc::INVALID, "invalid"},
- {0x0061000000000ULL, 0x0062000000000ULL, MappingDesc::APP, "app-13"},
- {0x0062000000000ULL, 0x0067000000000ULL, MappingDesc::INVALID, "invalid"},
- {0x0067000000000ULL, 0x0068000000000ULL, MappingDesc::SHADOW, "shadow-13"},
- {0x0068000000000ULL, 0x0069000000000ULL, MappingDesc::ORIGIN, "origin-13"},
- {0x0069000000000ULL, 0x0AAAAA0000000ULL, MappingDesc::INVALID, "invalid"},
- {0x0AAAAA0000000ULL, 0x0AAAB00000000ULL, MappingDesc::APP, "app-14"},
- {0x0AAAB00000000ULL, 0x0AACAA0000000ULL, MappingDesc::INVALID, "invalid"},
- {0x0AACAA0000000ULL, 0x0AACB00000000ULL, MappingDesc::SHADOW, "shadow-14"},
- {0x0AACB00000000ULL, 0x0AADAA0000000ULL, MappingDesc::INVALID, "invalid"},
- {0x0AADAA0000000ULL, 0x0AADB00000000ULL, MappingDesc::ORIGIN, "origin-14"},
- {0x0AADB00000000ULL, 0x0FF9F00000000ULL, MappingDesc::INVALID, "invalid"},
- {0x0FF9F00000000ULL, 0x0FFA000000000ULL, MappingDesc::SHADOW, "shadow-15"},
- {0x0FFA000000000ULL, 0x0FFAF00000000ULL, MappingDesc::INVALID, "invalid"},
- {0x0FFAF00000000ULL, 0x0FFB000000000ULL, MappingDesc::ORIGIN, "origin-15"},
- {0x0FFB000000000ULL, 0x0FFFF00000000ULL, MappingDesc::INVALID, "invalid"},
- {0x0FFFF00000000ULL, 0x1000000000000ULL, MappingDesc::APP, "app-15"},
+ {0X0000000000000, 0X0100000000000, MappingDesc::APP, "app-10-13"},
+ {0X0100000000000, 0X0200000000000, MappingDesc::SHADOW, "shadow-14"},
+ {0X0200000000000, 0X0300000000000, MappingDesc::INVALID, "invalid"},
+ {0X0300000000000, 0X0400000000000, MappingDesc::ORIGIN, "origin-14"},
+ {0X0400000000000, 0X0600000000000, MappingDesc::SHADOW, "shadow-15"},
+ {0X0600000000000, 0X0800000000000, MappingDesc::ORIGIN, "origin-15"},
+ {0X0800000000000, 0X0A00000000000, MappingDesc::INVALID, "invalid"},
+ {0X0A00000000000, 0X0B00000000000, MappingDesc::APP, "app-14"},
+ {0X0B00000000000, 0X0C00000000000, MappingDesc::SHADOW, "shadow-10-13"},
+ {0X0C00000000000, 0X0D00000000000, MappingDesc::INVALID, "invalid"},
+ {0X0D00000000000, 0X0E00000000000, MappingDesc::ORIGIN, "origin-10-13"},
+ {0x0E00000000000, 0x0E40000000000, MappingDesc::ALLOCATOR, "allocator"},
+ {0X0E40000000000, 0X1000000000000, MappingDesc::APP, "app-15"},
};
-# define MEM_TO_SHADOW(mem) ((uptr)mem ^ 0x6000000000ULL)
-# define SHADOW_TO_ORIGIN(shadow) (((uptr)(shadow)) + 0x1000000000ULL)
+# define MEM_TO_SHADOW(mem) ((uptr)mem ^ 0xB00000000000ULL)
+# define SHADOW_TO_ORIGIN(shadow) (((uptr)(shadow)) + 0x200000000000ULL)
+
+#elif SANITIZER_LINUX && SANITIZER_LOONGARCH64
+// LoongArch64 maps:
+// - 0x000000000000-0x010000000000: Program own segments
+// - 0x555500000000-0x555600000000: PIE program segments
+// - 0x7fff00000000-0x7fffffffffff: libraries segments.
+const MappingDesc kMemoryLayout[] = {
+ {0x000000000000ULL, 0x010000000000ULL, MappingDesc::APP, "app-1"},
+ {0x010000000000ULL, 0x100000000000ULL, MappingDesc::SHADOW, "shadow-2"},
+ {0x100000000000ULL, 0x110000000000ULL, MappingDesc::INVALID, "invalid"},
+ {0x110000000000ULL, 0x200000000000ULL, MappingDesc::ORIGIN, "origin-2"},
+ {0x200000000000ULL, 0x300000000000ULL, MappingDesc::SHADOW, "shadow-3"},
+ {0x300000000000ULL, 0x400000000000ULL, MappingDesc::ORIGIN, "origin-3"},
+ {0x400000000000ULL, 0x500000000000ULL, MappingDesc::INVALID, "invalid"},
+ {0x500000000000ULL, 0x510000000000ULL, MappingDesc::SHADOW, "shadow-1"},
+ {0x510000000000ULL, 0x600000000000ULL, MappingDesc::APP, "app-2"},
+ {0x600000000000ULL, 0x610000000000ULL, MappingDesc::ORIGIN, "origin-1"},
+ {0x610000000000ULL, 0x700000000000ULL, MappingDesc::INVALID, "invalid"},
+ {0x700000000000ULL, 0x740000000000ULL, MappingDesc::ALLOCATOR, "allocator"},
+ {0x740000000000ULL, 0x800000000000ULL, MappingDesc::APP, "app-3"}};
+# define MEM_TO_SHADOW(mem) (((uptr)(mem)) ^ 0x500000000000ULL)
+# define SHADOW_TO_ORIGIN(shadow) (((uptr)(shadow)) + 0x100000000000ULL)
#elif SANITIZER_LINUX && SANITIZER_PPC64
const MappingDesc kMemoryLayout[] = {
@@ -166,7 +126,8 @@ const MappingDesc kMemoryLayout[] = {
{0x180200000000ULL, 0x1C0000000000ULL, MappingDesc::INVALID, "invalid"},
{0x1C0000000000ULL, 0x2C0200000000ULL, MappingDesc::ORIGIN, "origin"},
{0x2C0200000000ULL, 0x300000000000ULL, MappingDesc::INVALID, "invalid"},
- {0x300000000000ULL, 0x800000000000ULL, MappingDesc::APP, "high memory"}};
+ {0x300000000000ULL, 0x320000000000ULL, MappingDesc::ALLOCATOR, "allocator"},
+ {0x320000000000ULL, 0x800000000000ULL, MappingDesc::APP, "high memory"}};
// Various kernels use different low end ranges but we can combine them into one
// big range. They also use different high end ranges but we can map them all to
@@ -189,12 +150,34 @@ const MappingDesc kMemoryLayout[] = {
{0x180000000000ULL, 0x1C0000000000ULL, MappingDesc::INVALID, "invalid"},
{0x1C0000000000ULL, 0x2C0000000000ULL, MappingDesc::ORIGIN, "origin"},
{0x2C0000000000ULL, 0x440000000000ULL, MappingDesc::INVALID, "invalid"},
- {0x440000000000ULL, 0x500000000000ULL, MappingDesc::APP, "high memory"}};
+ {0x440000000000ULL, 0x460000000000ULL, MappingDesc::ALLOCATOR, "allocator"},
+ {0x460000000000ULL, 0x500000000000ULL, MappingDesc::APP, "high memory"}};
#define MEM_TO_SHADOW(mem) \
((((uptr)(mem)) & ~0xC00000000000ULL) + 0x080000000000ULL)
#define SHADOW_TO_ORIGIN(shadow) (((uptr)(shadow)) + 0x140000000000ULL)
+#elif SANITIZER_FREEBSD && defined(__aarch64__)
+
+// Low memory: main binary, MAP_32BIT mappings and modules
+// High memory: heap, modules and main thread stack
+const MappingDesc kMemoryLayout[] = {
+ {0x000000000000ULL, 0x020000000000ULL, MappingDesc::APP, "low memory"},
+ {0x020000000000ULL, 0x200000000000ULL, MappingDesc::INVALID, "invalid"},
+ {0x200000000000ULL, 0x620000000000ULL, MappingDesc::SHADOW, "shadow"},
+ {0x620000000000ULL, 0x700000000000ULL, MappingDesc::INVALID, "invalid"},
+ {0x700000000000ULL, 0xb20000000000ULL, MappingDesc::ORIGIN, "origin"},
+ {0xb20000000000ULL, 0xc00000000000ULL, MappingDesc::INVALID, "invalid"},
+ {0xc00000000000ULL, 0x1000000000000ULL, MappingDesc::APP, "high memory"}};
+
+// Maps low and high app ranges to contiguous space with zero base:
+// Low: 0000 0000 0000 - 01ff ffff ffff -> 4000 0000 0000 - 41ff ffff ffff
+// High: c000 0000 0000 - ffff ffff ffff -> 0000 0000 0000 - 3fff ffff ffff
+#define LINEARIZE_MEM(mem) \
+ (((uptr)(mem) & ~0x1800000000000ULL) ^ 0x400000000000ULL)
+#define MEM_TO_SHADOW(mem) (LINEARIZE_MEM((mem)) + 0x200000000000ULL)
+#define SHADOW_TO_ORIGIN(shadow) (((uptr)(shadow)) + 0x500000000000)
+
#elif SANITIZER_FREEBSD && SANITIZER_WORDSIZE == 64
// Low memory: main binary, MAP_32BIT mappings and modules
@@ -218,19 +201,6 @@ const MappingDesc kMemoryLayout[] = {
#elif SANITIZER_NETBSD || (SANITIZER_LINUX && SANITIZER_WORDSIZE == 64)
-#ifdef MSAN_LINUX_X86_64_OLD_MAPPING
-// Requries PIE binary and ASLR enabled.
-// Main thread stack and DSOs at 0x7f0000000000 (sometimes 0x7e0000000000).
-// Heap at 0x600000000000.
-const MappingDesc kMemoryLayout[] = {
- {0x000000000000ULL, 0x200000000000ULL, MappingDesc::INVALID, "invalid"},
- {0x200000000000ULL, 0x400000000000ULL, MappingDesc::SHADOW, "shadow"},
- {0x400000000000ULL, 0x600000000000ULL, MappingDesc::ORIGIN, "origin"},
- {0x600000000000ULL, 0x800000000000ULL, MappingDesc::APP, "app"}};
-
-#define MEM_TO_SHADOW(mem) (((uptr)(mem)) & ~0x400000000000ULL)
-#define SHADOW_TO_ORIGIN(mem) (((uptr)(mem)) + 0x200000000000ULL)
-#else // MSAN_LINUX_X86_64_OLD_MAPPING
// All of the following configurations are supported.
// ASLR disabled: main executable and DSOs at 0x555550000000
// PIE and ASLR: main executable and DSOs at 0x7f0000000000
@@ -248,10 +218,10 @@ const MappingDesc kMemoryLayout[] = {
{0x510000000000ULL, 0x600000000000ULL, MappingDesc::APP, "app-2"},
{0x600000000000ULL, 0x610000000000ULL, MappingDesc::ORIGIN, "origin-1"},
{0x610000000000ULL, 0x700000000000ULL, MappingDesc::INVALID, "invalid"},
- {0x700000000000ULL, 0x800000000000ULL, MappingDesc::APP, "app-3"}};
+ {0x700000000000ULL, 0x740000000000ULL, MappingDesc::ALLOCATOR, "allocator"},
+ {0x740000000000ULL, 0x800000000000ULL, MappingDesc::APP, "app-3"}};
#define MEM_TO_SHADOW(mem) (((uptr)(mem)) ^ 0x500000000000ULL)
#define SHADOW_TO_ORIGIN(mem) (((uptr)(mem)) + 0x100000000000ULL)
-#endif // MSAN_LINUX_X86_64_OLD_MAPPING
#else
#error "Unsupported platform"
@@ -264,20 +234,22 @@ const uptr kMemoryLayoutSize = sizeof(kMemoryLayout) / sizeof(kMemoryLayout[0]);
#ifndef __clang__
__attribute__((optimize("unroll-loops")))
#endif
-inline bool addr_is_type(uptr addr, MappingDesc::Type mapping_type) {
+inline bool
+addr_is_type(uptr addr, int mapping_types) {
// It is critical for performance that this loop is unrolled (because then it is
// simplified into just a few constant comparisons).
#ifdef __clang__
#pragma unroll
#endif
for (unsigned i = 0; i < kMemoryLayoutSize; ++i)
- if (kMemoryLayout[i].type == mapping_type &&
+ if ((kMemoryLayout[i].type & mapping_types) &&
addr >= kMemoryLayout[i].start && addr < kMemoryLayout[i].end)
return true;
return false;
}
-#define MEM_IS_APP(mem) addr_is_type((uptr)(mem), MappingDesc::APP)
+#define MEM_IS_APP(mem) \
+ (addr_is_type((uptr)(mem), MappingDesc::APP | MappingDesc::ALLOCATOR))
#define MEM_IS_SHADOW(mem) addr_is_type((uptr)(mem), MappingDesc::SHADOW)
#define MEM_IS_ORIGIN(mem) addr_is_type((uptr)(mem), MappingDesc::ORIGIN)
@@ -291,37 +263,31 @@ extern bool msan_init_is_running;
extern int msan_report_count;
bool ProtectRange(uptr beg, uptr end);
-bool InitShadow(bool init_origins);
+bool InitShadowWithReExec(bool init_origins);
char *GetProcSelfMaps();
void InitializeInterceptors();
void MsanAllocatorInit();
-void MsanDeallocate(StackTrace *stack, void *ptr);
-
-void *msan_malloc(uptr size, StackTrace *stack);
-void *msan_calloc(uptr nmemb, uptr size, StackTrace *stack);
-void *msan_realloc(void *ptr, uptr size, StackTrace *stack);
-void *msan_reallocarray(void *ptr, uptr nmemb, uptr size, StackTrace *stack);
-void *msan_valloc(uptr size, StackTrace *stack);
-void *msan_pvalloc(uptr size, StackTrace *stack);
-void *msan_aligned_alloc(uptr alignment, uptr size, StackTrace *stack);
-void *msan_memalign(uptr alignment, uptr size, StackTrace *stack);
+void MsanDeallocate(BufferedStackTrace *stack, void *ptr);
+
+void *msan_malloc(uptr size, BufferedStackTrace *stack);
+void *msan_calloc(uptr nmemb, uptr size, BufferedStackTrace *stack);
+void *msan_realloc(void *ptr, uptr size, BufferedStackTrace *stack);
+void *msan_reallocarray(void *ptr, uptr nmemb, uptr size,
+ BufferedStackTrace *stack);
+void *msan_valloc(uptr size, BufferedStackTrace *stack);
+void *msan_pvalloc(uptr size, BufferedStackTrace *stack);
+void *msan_aligned_alloc(uptr alignment, uptr size, BufferedStackTrace *stack);
+void *msan_memalign(uptr alignment, uptr size, BufferedStackTrace *stack);
int msan_posix_memalign(void **memptr, uptr alignment, uptr size,
- StackTrace *stack);
+ BufferedStackTrace *stack);
void InstallTrapHandler();
void InstallAtExitHandler();
const char *GetStackOriginDescr(u32 id, uptr *pc);
-void EnterSymbolizer();
-void ExitSymbolizer();
-bool IsInSymbolizer();
-
-struct SymbolizerScope {
- SymbolizerScope() { EnterSymbolizer(); }
- ~SymbolizerScope() { ExitSymbolizer(); }
-};
+bool IsInSymbolizerOrUnwider();
void PrintWarning(uptr pc, uptr bp);
void PrintWarningWithOrigin(uptr pc, uptr bp, u32 origin);
@@ -335,36 +301,51 @@ void UnpoisonThreadLocalState();
u32 ChainOrigin(u32 id, StackTrace *stack);
const int STACK_TRACE_TAG_POISON = StackTrace::TAG_CUSTOM + 1;
-
-#define GET_MALLOC_STACK_TRACE \
- BufferedStackTrace stack; \
- if (__msan_get_track_origins() && msan_inited) \
- stack.Unwind(StackTrace::GetCurrentPc(), GET_CURRENT_FRAME(), \
- nullptr, common_flags()->fast_unwind_on_malloc, \
- common_flags()->malloc_context_size)
+const int STACK_TRACE_TAG_FIELDS = STACK_TRACE_TAG_POISON + 1;
+const int STACK_TRACE_TAG_VPTR = STACK_TRACE_TAG_FIELDS + 1;
+
+#define GET_MALLOC_STACK_TRACE \
+ UNINITIALIZED BufferedStackTrace stack; \
+ if (__msan_get_track_origins() && msan_inited) { \
+ stack.Unwind(StackTrace::GetCurrentPc(), GET_CURRENT_FRAME(), nullptr, \
+ common_flags()->fast_unwind_on_malloc, \
+ common_flags()->malloc_context_size); \
+ }
// For platforms which support slow unwinder only, we restrict the store context
// size to 1, basically only storing the current pc. We do this because the slow
// unwinder which is based on libunwind is not async signal safe and causes
// random freezes in forking applications as well as in signal handlers.
-#define GET_STORE_STACK_TRACE_PC_BP(pc, bp) \
- BufferedStackTrace stack; \
- if (__msan_get_track_origins() > 1 && msan_inited) { \
- int size = flags()->store_context_size; \
- if (!SANITIZER_CAN_FAST_UNWIND) \
- size = Min(size, 1); \
- stack.Unwind(pc, bp, nullptr, common_flags()->fast_unwind_on_malloc, size);\
+#define GET_STORE_STACK_TRACE_PC_BP(pc, bp) \
+ UNINITIALIZED BufferedStackTrace stack; \
+ if (__msan_get_track_origins() > 1 && msan_inited) { \
+ int size = flags()->store_context_size; \
+ if (!SANITIZER_CAN_FAST_UNWIND) \
+ size = Min(size, 1); \
+ stack.Unwind(pc, bp, nullptr, common_flags()->fast_unwind_on_malloc, \
+ size); \
}
#define GET_STORE_STACK_TRACE \
GET_STORE_STACK_TRACE_PC_BP(StackTrace::GetCurrentPc(), GET_CURRENT_FRAME())
#define GET_FATAL_STACK_TRACE_PC_BP(pc, bp) \
- BufferedStackTrace stack; \
+ UNINITIALIZED BufferedStackTrace stack; \
if (msan_inited) { \
stack.Unwind(pc, bp, nullptr, common_flags()->fast_unwind_on_fatal); \
}
+#define GET_FATAL_STACK_TRACE \
+ GET_FATAL_STACK_TRACE_PC_BP(StackTrace::GetCurrentPc(), GET_CURRENT_FRAME())
+
+// Unwind the stack for fatal error, as the parameter `stack` is
+// empty without origins.
+#define GET_FATAL_STACK_TRACE_IF_EMPTY(STACK) \
+ if (msan_inited && (STACK)->size == 0) { \
+ (STACK)->Unwind(StackTrace::GetCurrentPc(), GET_CURRENT_FRAME(), nullptr, \
+ common_flags()->fast_unwind_on_fatal); \
+ }
+
class ScopedThreadLocalStateBackup {
public:
ScopedThreadLocalStateBackup() { Backup(); }
@@ -380,23 +361,8 @@ void *MsanTSDGet();
void MsanTSDSet(void *tsd);
void MsanTSDDtor(void *tsd);
-} // namespace __msan
+void InstallAtForkHandler();
-#define MSAN_MALLOC_HOOK(ptr, size) \
- do { \
- if (&__sanitizer_malloc_hook) { \
- UnpoisonParam(2); \
- __sanitizer_malloc_hook(ptr, size); \
- } \
- RunMallocHooks(ptr, size); \
- } while (false)
-#define MSAN_FREE_HOOK(ptr) \
- do { \
- if (&__sanitizer_free_hook) { \
- UnpoisonParam(1); \
- __sanitizer_free_hook(ptr); \
- } \
- RunFreeHooks(ptr); \
- } while (false)
+} // namespace __msan
#endif // MSAN_H
diff --git a/contrib/llvm-project/compiler-rt/lib/msan/msan_allocator.cpp b/contrib/llvm-project/compiler-rt/lib/msan/msan_allocator.cpp
index a97bd8371e08..b1bc5b9390f7 100644
--- a/contrib/llvm-project/compiler-rt/lib/msan/msan_allocator.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/msan/msan_allocator.cpp
@@ -11,16 +11,18 @@
// MemorySanitizer allocator.
//===----------------------------------------------------------------------===//
+#include "msan_allocator.h"
+
+#include "msan.h"
+#include "msan_interface_internal.h"
+#include "msan_origin.h"
+#include "msan_poisoning.h"
+#include "msan_thread.h"
#include "sanitizer_common/sanitizer_allocator.h"
#include "sanitizer_common/sanitizer_allocator_checks.h"
#include "sanitizer_common/sanitizer_allocator_interface.h"
#include "sanitizer_common/sanitizer_allocator_report.h"
#include "sanitizer_common/sanitizer_errno.h"
-#include "msan.h"
-#include "msan_allocator.h"
-#include "msan_origin.h"
-#include "msan_thread.h"
-#include "msan_poisoning.h"
namespace __msan {
@@ -30,6 +32,8 @@ struct Metadata {
struct MsanMapUnmapCallback {
void OnMap(uptr p, uptr size) const {}
+ void OnMapSecondary(uptr p, uptr size, uptr user_begin,
+ uptr user_size) const {}
void OnUnmap(uptr p, uptr size) const {
__msan_unpoison((void *)p, size);
@@ -44,6 +48,9 @@ struct MsanMapUnmapCallback {
}
};
+// Note: to ensure that the allocator is compatible with the application memory
+// layout (especially with high-entropy ASLR), kSpaceBeg and kSpaceSize must be
+// duplicated as MappingDesc::ALLOCATOR in msan.h.
#if defined(__mips64)
static const uptr kMaxAllowedMallocSize = 2UL << 30;
@@ -59,8 +66,7 @@ struct AP32 {
};
typedef SizeClassAllocator32<AP32> PrimaryAllocator;
#elif defined(__x86_64__)
-#if SANITIZER_NETBSD || \
- (SANITIZER_LINUX && !defined(MSAN_LINUX_X86_64_OLD_MAPPING))
+#if SANITIZER_NETBSD || SANITIZER_LINUX
static const uptr kAllocatorSpace = 0x700000000000ULL;
#else
static const uptr kAllocatorSpace = 0x600000000000ULL;
@@ -79,6 +85,22 @@ struct AP64 { // Allocator64 parameters. Deliberately using a short name.
typedef SizeClassAllocator64<AP64> PrimaryAllocator;
+#elif defined(__loongarch_lp64)
+const uptr kAllocatorSpace = 0x700000000000ULL;
+const uptr kMaxAllowedMallocSize = 8UL << 30;
+
+struct AP64 { // Allocator64 parameters. Deliberately using a short name.
+ static const uptr kSpaceBeg = kAllocatorSpace;
+ static const uptr kSpaceSize = 0x40000000000; // 4T.
+ static const uptr kMetadataSize = sizeof(Metadata);
+ typedef DefaultSizeClassMap SizeClassMap;
+ typedef MsanMapUnmapCallback MapUnmapCallback;
+ static const uptr kFlags = 0;
+ using AddressSpaceView = LocalAddressSpaceView;
+};
+
+typedef SizeClassAllocator64<AP64> PrimaryAllocator;
+
#elif defined(__powerpc64__)
static const uptr kMaxAllowedMallocSize = 2UL << 30; // 2G
@@ -108,19 +130,18 @@ struct AP64 { // Allocator64 parameters. Deliberately using a short name.
typedef SizeClassAllocator64<AP64> PrimaryAllocator;
#elif defined(__aarch64__)
-static const uptr kMaxAllowedMallocSize = 2UL << 30; // 2G
+static const uptr kMaxAllowedMallocSize = 8UL << 30;
-struct AP32 {
- static const uptr kSpaceBeg = 0;
- static const u64 kSpaceSize = SANITIZER_MMAP_RANGE_SIZE;
+struct AP64 {
+ static const uptr kSpaceBeg = 0xE00000000000ULL;
+ static const uptr kSpaceSize = 0x40000000000; // 4T.
static const uptr kMetadataSize = sizeof(Metadata);
- typedef __sanitizer::CompactSizeClassMap SizeClassMap;
- static const uptr kRegionSizeLog = 20;
- using AddressSpaceView = LocalAddressSpaceView;
+ typedef DefaultSizeClassMap SizeClassMap;
typedef MsanMapUnmapCallback MapUnmapCallback;
static const uptr kFlags = 0;
+ using AddressSpaceView = LocalAddressSpaceView;
};
-typedef SizeClassAllocator32<AP32> PrimaryAllocator;
+typedef SizeClassAllocator64<AP64> PrimaryAllocator;
#endif
typedef CombinedAllocator<PrimaryAllocator> Allocator;
typedef Allocator::AllocatorCache AllocatorCache;
@@ -141,25 +162,41 @@ void MsanAllocatorInit() {
max_malloc_size = kMaxAllowedMallocSize;
}
+void LockAllocator() { allocator.ForceLock(); }
+
+void UnlockAllocator() { allocator.ForceUnlock(); }
+
AllocatorCache *GetAllocatorCache(MsanThreadLocalMallocStorage *ms) {
CHECK(ms);
CHECK_LE(sizeof(AllocatorCache), sizeof(ms->allocator_cache));
return reinterpret_cast<AllocatorCache *>(ms->allocator_cache);
}
+void MsanThreadLocalMallocStorage::Init() {
+ allocator.InitCache(GetAllocatorCache(this));
+}
+
void MsanThreadLocalMallocStorage::CommitBack() {
allocator.SwallowCache(GetAllocatorCache(this));
+ allocator.DestroyCache(GetAllocatorCache(this));
}
-static void *MsanAllocate(StackTrace *stack, uptr size, uptr alignment,
+static void *MsanAllocate(BufferedStackTrace *stack, uptr size, uptr alignment,
bool zeroise) {
- if (size > max_malloc_size) {
+ if (UNLIKELY(size > max_malloc_size)) {
if (AllocatorMayReturnNull()) {
Report("WARNING: MemorySanitizer failed to allocate 0x%zx bytes\n", size);
return nullptr;
}
+ GET_FATAL_STACK_TRACE_IF_EMPTY(stack);
ReportAllocationSizeTooBig(size, max_malloc_size, stack);
}
+ if (UNLIKELY(IsRssLimitExceeded())) {
+ if (AllocatorMayReturnNull())
+ return nullptr;
+ GET_FATAL_STACK_TRACE_IF_EMPTY(stack);
+ ReportRssLimitExceeded(stack);
+ }
MsanThread *t = GetCurrentThread();
void *allocated;
if (t) {
@@ -174,13 +211,17 @@ static void *MsanAllocate(StackTrace *stack, uptr size, uptr alignment,
SetAllocatorOutOfMemory();
if (AllocatorMayReturnNull())
return nullptr;
+ GET_FATAL_STACK_TRACE_IF_EMPTY(stack);
ReportOutOfMemory(size, stack);
}
Metadata *meta =
reinterpret_cast<Metadata *>(allocator.GetMetaData(allocated));
meta->requested_size = size;
if (zeroise) {
- __msan_clear_and_unpoison(allocated, size);
+ if (allocator.FromPrimary(allocated))
+ __msan_clear_and_unpoison(allocated, size);
+ else
+ __msan_unpoison(allocated, size); // Mem is already zeroed.
} else if (flags()->poison_in_malloc) {
__msan_poison(allocated, size);
if (__msan_get_track_origins()) {
@@ -189,19 +230,23 @@ static void *MsanAllocate(StackTrace *stack, uptr size, uptr alignment,
__msan_set_origin(allocated, size, o.raw_id());
}
}
- MSAN_MALLOC_HOOK(allocated, size);
+ UnpoisonParam(2);
+ RunMallocHooks(allocated, size);
return allocated;
}
-void MsanDeallocate(StackTrace *stack, void *p) {
+void MsanDeallocate(BufferedStackTrace *stack, void *p) {
CHECK(p);
- MSAN_FREE_HOOK(p);
+ UnpoisonParam(1);
+ RunFreeHooks(p);
+
Metadata *meta = reinterpret_cast<Metadata *>(allocator.GetMetaData(p));
uptr size = meta->requested_size;
meta->requested_size = 0;
// This memory will not be reused by anyone else, so we are free to keep it
- // poisoned.
- if (flags()->poison_in_free) {
+ // poisoned. The secondary allocator will unmap and unpoison by
+ // MsanMapUnmapCallback, no need to poison it here.
+ if (flags()->poison_in_free && allocator.FromPrimary(p)) {
__msan_poison(p, size);
if (__msan_get_track_origins()) {
stack->tag = StackTrace::TAG_DEALLOC;
@@ -220,8 +265,8 @@ void MsanDeallocate(StackTrace *stack, void *p) {
}
}
-static void *MsanReallocate(StackTrace *stack, void *old_p, uptr new_size,
- uptr alignment) {
+static void *MsanReallocate(BufferedStackTrace *stack, void *old_p,
+ uptr new_size, uptr alignment) {
Metadata *meta = reinterpret_cast<Metadata*>(allocator.GetMetaData(old_p));
uptr old_size = meta->requested_size;
uptr actually_allocated_size = allocator.GetActuallyAllocatedSize(old_p);
@@ -245,15 +290,31 @@ static void *MsanReallocate(StackTrace *stack, void *old_p, uptr new_size,
return new_p;
}
-static void *MsanCalloc(StackTrace *stack, uptr nmemb, uptr size) {
+static void *MsanCalloc(BufferedStackTrace *stack, uptr nmemb, uptr size) {
if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) {
if (AllocatorMayReturnNull())
return nullptr;
+ GET_FATAL_STACK_TRACE_IF_EMPTY(stack);
ReportCallocOverflow(nmemb, size, stack);
}
return MsanAllocate(stack, nmemb * size, sizeof(u64), true);
}
+static const void *AllocationBegin(const void *p) {
+ if (!p)
+ return nullptr;
+ void *beg = allocator.GetBlockBegin(p);
+ if (!beg)
+ return nullptr;
+ Metadata *b = (Metadata *)allocator.GetMetaData(beg);
+ if (!b)
+ return nullptr;
+ if (b->requested_size == 0)
+ return nullptr;
+
+ return (const void *)beg;
+}
+
static uptr AllocationSize(const void *p) {
if (!p) return 0;
const void *beg = allocator.GetBlockBegin(p);
@@ -262,15 +323,19 @@ static uptr AllocationSize(const void *p) {
return b->requested_size;
}
-void *msan_malloc(uptr size, StackTrace *stack) {
+static uptr AllocationSizeFast(const void *p) {
+ return reinterpret_cast<Metadata *>(allocator.GetMetaData(p))->requested_size;
+}
+
+void *msan_malloc(uptr size, BufferedStackTrace *stack) {
return SetErrnoOnNull(MsanAllocate(stack, size, sizeof(u64), false));
}
-void *msan_calloc(uptr nmemb, uptr size, StackTrace *stack) {
+void *msan_calloc(uptr nmemb, uptr size, BufferedStackTrace *stack) {
return SetErrnoOnNull(MsanCalloc(stack, nmemb, size));
}
-void *msan_realloc(void *ptr, uptr size, StackTrace *stack) {
+void *msan_realloc(void *ptr, uptr size, BufferedStackTrace *stack) {
if (!ptr)
return SetErrnoOnNull(MsanAllocate(stack, size, sizeof(u64), false));
if (size == 0) {
@@ -280,26 +345,29 @@ void *msan_realloc(void *ptr, uptr size, StackTrace *stack) {
return SetErrnoOnNull(MsanReallocate(stack, ptr, size, sizeof(u64)));
}
-void *msan_reallocarray(void *ptr, uptr nmemb, uptr size, StackTrace *stack) {
+void *msan_reallocarray(void *ptr, uptr nmemb, uptr size,
+ BufferedStackTrace *stack) {
if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) {
errno = errno_ENOMEM;
if (AllocatorMayReturnNull())
return nullptr;
+ GET_FATAL_STACK_TRACE_IF_EMPTY(stack);
ReportReallocArrayOverflow(nmemb, size, stack);
}
return msan_realloc(ptr, nmemb * size, stack);
}
-void *msan_valloc(uptr size, StackTrace *stack) {
+void *msan_valloc(uptr size, BufferedStackTrace *stack) {
return SetErrnoOnNull(MsanAllocate(stack, size, GetPageSizeCached(), false));
}
-void *msan_pvalloc(uptr size, StackTrace *stack) {
+void *msan_pvalloc(uptr size, BufferedStackTrace *stack) {
uptr PageSize = GetPageSizeCached();
if (UNLIKELY(CheckForPvallocOverflow(size, PageSize))) {
errno = errno_ENOMEM;
if (AllocatorMayReturnNull())
return nullptr;
+ GET_FATAL_STACK_TRACE_IF_EMPTY(stack);
ReportPvallocOverflow(size, stack);
}
// pvalloc(0) should allocate one page.
@@ -307,31 +375,34 @@ void *msan_pvalloc(uptr size, StackTrace *stack) {
return SetErrnoOnNull(MsanAllocate(stack, size, PageSize, false));
}
-void *msan_aligned_alloc(uptr alignment, uptr size, StackTrace *stack) {
+void *msan_aligned_alloc(uptr alignment, uptr size, BufferedStackTrace *stack) {
if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(alignment, size))) {
errno = errno_EINVAL;
if (AllocatorMayReturnNull())
return nullptr;
+ GET_FATAL_STACK_TRACE_IF_EMPTY(stack);
ReportInvalidAlignedAllocAlignment(size, alignment, stack);
}
return SetErrnoOnNull(MsanAllocate(stack, size, alignment, false));
}
-void *msan_memalign(uptr alignment, uptr size, StackTrace *stack) {
+void *msan_memalign(uptr alignment, uptr size, BufferedStackTrace *stack) {
if (UNLIKELY(!IsPowerOfTwo(alignment))) {
errno = errno_EINVAL;
if (AllocatorMayReturnNull())
return nullptr;
+ GET_FATAL_STACK_TRACE_IF_EMPTY(stack);
ReportInvalidAllocationAlignment(alignment, stack);
}
return SetErrnoOnNull(MsanAllocate(stack, size, alignment, false));
}
int msan_posix_memalign(void **memptr, uptr alignment, uptr size,
- StackTrace *stack) {
+ BufferedStackTrace *stack) {
if (UNLIKELY(!CheckPosixMemalignAlignment(alignment))) {
if (AllocatorMayReturnNull())
return errno_EINVAL;
+ GET_FATAL_STACK_TRACE_IF_EMPTY(stack);
ReportInvalidPosixMemalignAlignment(alignment, stack);
}
void *ptr = MsanAllocate(stack, size, alignment, false);
@@ -367,4 +438,17 @@ uptr __sanitizer_get_estimated_allocated_size(uptr size) { return size; }
int __sanitizer_get_ownership(const void *p) { return AllocationSize(p) != 0; }
+const void *__sanitizer_get_allocated_begin(const void *p) {
+ return AllocationBegin(p);
+}
+
uptr __sanitizer_get_allocated_size(const void *p) { return AllocationSize(p); }
+
+uptr __sanitizer_get_allocated_size_fast(const void *p) {
+ DCHECK_EQ(p, __sanitizer_get_allocated_begin(p));
+ uptr ret = AllocationSizeFast(p);
+ DCHECK_EQ(ret, __sanitizer_get_allocated_size(p));
+ return ret;
+}
+
+void __sanitizer_purge_allocator() { allocator.ForceReleaseToOS(); }
diff --git a/contrib/llvm-project/compiler-rt/lib/msan/msan_allocator.h b/contrib/llvm-project/compiler-rt/lib/msan/msan_allocator.h
index 365af4d0c4dd..c2a38a401f3b 100644
--- a/contrib/llvm-project/compiler-rt/lib/msan/msan_allocator.h
+++ b/contrib/llvm-project/compiler-rt/lib/msan/msan_allocator.h
@@ -20,6 +20,7 @@ namespace __msan {
struct MsanThreadLocalMallocStorage {
// Allocator cache contains atomic_uint64_t which must be 8-byte aligned.
ALIGNED(8) uptr allocator_cache[96 * (512 * 8 + 16)]; // Opaque.
+ void Init();
void CommitBack();
private:
@@ -27,5 +28,8 @@ struct MsanThreadLocalMallocStorage {
MsanThreadLocalMallocStorage() {}
};
+void LockAllocator();
+void UnlockAllocator();
+
} // namespace __msan
#endif // MSAN_ALLOCATOR_H
diff --git a/contrib/llvm-project/compiler-rt/lib/msan/msan_chained_origin_depot.cpp b/contrib/llvm-project/compiler-rt/lib/msan/msan_chained_origin_depot.cpp
index 5dee80fd4692..b98b0e6b14b5 100644
--- a/contrib/llvm-project/compiler-rt/lib/msan/msan_chained_origin_depot.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/msan/msan_chained_origin_depot.cpp
@@ -19,7 +19,7 @@ namespace __msan {
static ChainedOriginDepot chainedOriginDepot;
-StackDepotStats *ChainedOriginDepotGetStats() {
+StackDepotStats ChainedOriginDepotGetStats() {
return chainedOriginDepot.GetStats();
}
@@ -31,12 +31,10 @@ u32 ChainedOriginDepotGet(u32 id, u32 *other) {
return chainedOriginDepot.Get(id, other);
}
-void ChainedOriginDepotLockAll() {
- chainedOriginDepot.LockAll();
-}
+void ChainedOriginDepotBeforeFork() { chainedOriginDepot.LockBeforeFork(); }
-void ChainedOriginDepotUnlockAll() {
- chainedOriginDepot.UnlockAll();
+void ChainedOriginDepotAfterFork(bool fork_child) {
+ chainedOriginDepot.UnlockAfterFork(fork_child);
}
} // namespace __msan
diff --git a/contrib/llvm-project/compiler-rt/lib/msan/msan_chained_origin_depot.h b/contrib/llvm-project/compiler-rt/lib/msan/msan_chained_origin_depot.h
index 60ab182fa4c8..7518745dc852 100644
--- a/contrib/llvm-project/compiler-rt/lib/msan/msan_chained_origin_depot.h
+++ b/contrib/llvm-project/compiler-rt/lib/msan/msan_chained_origin_depot.h
@@ -19,7 +19,7 @@
namespace __msan {
// Gets the statistic of the origin chain storage.
-StackDepotStats *ChainedOriginDepotGetStats();
+StackDepotStats ChainedOriginDepotGetStats();
// Stores a chain with StackDepot ID here_id and previous chain ID prev_id.
// If successful, returns true and the new chain id new_id.
@@ -30,8 +30,8 @@ bool ChainedOriginDepotPut(u32 here_id, u32 prev_id, u32 *new_id);
// Retrieves the stored StackDepot ID for the given origin ID.
u32 ChainedOriginDepotGet(u32 id, u32 *other);
-void ChainedOriginDepotLockAll();
-void ChainedOriginDepotUnlockAll();
+void ChainedOriginDepotBeforeFork();
+void ChainedOriginDepotAfterFork(bool fork_child);
} // namespace __msan
diff --git a/contrib/llvm-project/compiler-rt/lib/msan/msan_dl.cpp b/contrib/llvm-project/compiler-rt/lib/msan/msan_dl.cpp
new file mode 100644
index 000000000000..4f9ba52cf47d
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/msan/msan_dl.cpp
@@ -0,0 +1,65 @@
+//===-- msan_dl.cpp -------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of MemorySanitizer.
+//
+// Helper functions for unpoisoning results of dladdr and dladdr1.
+//===----------------------------------------------------------------------===//
+
+#include "msan_dl.h"
+
+#include <dlfcn.h>
+#include <elf.h>
+#include <link.h>
+
+#include "msan_poisoning.h"
+
+namespace __msan {
+
+void UnpoisonDllAddrInfo(void *info) {
+ Dl_info *ptr = (Dl_info *)(info);
+ __msan_unpoison(ptr, sizeof(*ptr));
+ if (ptr->dli_fname)
+ __msan_unpoison(ptr->dli_fname, internal_strlen(ptr->dli_fname) + 1);
+ if (ptr->dli_sname)
+ __msan_unpoison(ptr->dli_sname, internal_strlen(ptr->dli_sname) + 1);
+}
+
+#if SANITIZER_GLIBC
+void UnpoisonDllAddr1ExtraInfo(void **extra_info, int flags) {
+ if (flags == RTLD_DL_SYMENT) {
+ __msan_unpoison(extra_info, sizeof(void *));
+
+ ElfW(Sym) *s = *((ElfW(Sym) **)(extra_info));
+ __msan_unpoison(s, sizeof(ElfW(Sym)));
+ } else if (flags == RTLD_DL_LINKMAP) {
+ __msan_unpoison(extra_info, sizeof(void *));
+
+ struct link_map *map = *((struct link_map **)(extra_info));
+
+ // Walk forward
+ for (auto *ptr = map; ptr; ptr = ptr->l_next) {
+ __msan_unpoison(ptr, sizeof(struct link_map));
+ if (ptr->l_name)
+ __msan_unpoison(ptr->l_name, internal_strlen(ptr->l_name) + 1);
+ }
+
+ if (!map)
+ return;
+
+ // Walk backward
+ for (auto *ptr = map->l_prev; ptr; ptr = ptr->l_prev) {
+ __msan_unpoison(ptr, sizeof(struct link_map));
+ if (ptr->l_name)
+ __msan_unpoison(ptr->l_name, internal_strlen(ptr->l_name) + 1);
+ }
+ }
+}
+#endif
+
+} // namespace __msan
diff --git a/contrib/llvm-project/compiler-rt/lib/msan/msan_dl.h b/contrib/llvm-project/compiler-rt/lib/msan/msan_dl.h
new file mode 100644
index 000000000000..c0105ac3c600
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/msan/msan_dl.h
@@ -0,0 +1,30 @@
+//===-- msan_dl.h ---------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of MemorySanitizer.
+//
+// Helper functions for unpoisoning results of dladdr and dladdr1.
+//===----------------------------------------------------------------------===//
+
+#ifndef MSAN_DL_H
+#define MSAN_DL_H
+
+#include "msan.h"
+#include "sanitizer_common/sanitizer_common.h"
+
+namespace __msan {
+
+void UnpoisonDllAddrInfo(void *info);
+
+#if SANITIZER_GLIBC
+void UnpoisonDllAddr1ExtraInfo(void **extra_info, int flags);
+#endif
+
+} // namespace __msan
+
+#endif // MSAN_DL_H
diff --git a/contrib/llvm-project/compiler-rt/lib/msan/msan_flags.inc b/contrib/llvm-project/compiler-rt/lib/msan/msan_flags.inc
index e6a26015a22a..16db26bd42ed 100644
--- a/contrib/llvm-project/compiler-rt/lib/msan/msan_flags.inc
+++ b/contrib/llvm-project/compiler-rt/lib/msan/msan_flags.inc
@@ -24,7 +24,7 @@ MSAN_FLAG(bool, poison_heap_with_zeroes, false, "")
MSAN_FLAG(bool, poison_stack_with_zeroes, false, "")
MSAN_FLAG(bool, poison_in_malloc, true, "")
MSAN_FLAG(bool, poison_in_free, true, "")
-MSAN_FLAG(bool, poison_in_dtor, false, "")
+MSAN_FLAG(bool, poison_in_dtor, true, "")
MSAN_FLAG(bool, report_umrs, true, "")
MSAN_FLAG(bool, wrap_signals, true, "")
MSAN_FLAG(bool, print_stats, false, "")
diff --git a/contrib/llvm-project/compiler-rt/lib/msan/msan_interceptors.cpp b/contrib/llvm-project/compiler-rt/lib/msan/msan_interceptors.cpp
index 760f74e927d0..2c9f2c01e14b 100644
--- a/contrib/llvm-project/compiler-rt/lib/msan/msan_interceptors.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/msan/msan_interceptors.cpp
@@ -14,25 +14,29 @@
// sanitizer_common/sanitizer_common_interceptors.h
//===----------------------------------------------------------------------===//
+#define SANITIZER_COMMON_NO_REDEFINE_BUILTINS
+
#include "interception/interception.h"
#include "msan.h"
#include "msan_chained_origin_depot.h"
+#include "msan_dl.h"
#include "msan_origin.h"
+#include "msan_poisoning.h"
#include "msan_report.h"
#include "msan_thread.h"
-#include "msan_poisoning.h"
-#include "sanitizer_common/sanitizer_errno_codes.h"
-#include "sanitizer_common/sanitizer_platform_limits_posix.h"
-#include "sanitizer_common/sanitizer_platform_limits_netbsd.h"
#include "sanitizer_common/sanitizer_allocator.h"
+#include "sanitizer_common/sanitizer_allocator_dlsym.h"
#include "sanitizer_common/sanitizer_allocator_interface.h"
-#include "sanitizer_common/sanitizer_allocator_internal.h"
#include "sanitizer_common/sanitizer_atomic.h"
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_errno.h"
-#include "sanitizer_common/sanitizer_stackdepot.h"
+#include "sanitizer_common/sanitizer_errno_codes.h"
+#include "sanitizer_common/sanitizer_glibc_version.h"
#include "sanitizer_common/sanitizer_libc.h"
#include "sanitizer_common/sanitizer_linux.h"
+#include "sanitizer_common/sanitizer_platform_limits_netbsd.h"
+#include "sanitizer_common/sanitizer_platform_limits_posix.h"
+#include "sanitizer_common/sanitizer_stackdepot.h"
#include "sanitizer_common/sanitizer_tls_get_addr.h"
#include "sanitizer_common/sanitizer_vector.h"
@@ -74,22 +78,9 @@ bool IsInInterceptorScope() {
return in_interceptor_scope;
}
-static uptr allocated_for_dlsym;
-static const uptr kDlsymAllocPoolSize = 1024;
-static uptr alloc_memory_for_dlsym[kDlsymAllocPoolSize];
-
-static bool IsInDlsymAllocPool(const void *ptr) {
- uptr off = (uptr)ptr - (uptr)alloc_memory_for_dlsym;
- return off < sizeof(alloc_memory_for_dlsym);
-}
-
-static void *AllocateFromLocalPool(uptr size_in_bytes) {
- uptr size_in_words = RoundUpTo(size_in_bytes, kWordSize) / kWordSize;
- void *mem = (void *)&alloc_memory_for_dlsym[allocated_for_dlsym];
- allocated_for_dlsym += size_in_words;
- CHECK_LT(allocated_for_dlsym, kDlsymAllocPoolSize);
- return mem;
-}
+struct DlsymAlloc : public DlSymAllocator<DlsymAlloc> {
+ static bool UseImpl() { return !msan_inited; }
+};
#define ENSURE_MSAN_INITED() do { \
CHECK(!msan_init_is_running); \
@@ -102,10 +93,10 @@ static void *AllocateFromLocalPool(uptr size_in_bytes) {
#define CHECK_UNPOISONED_0(x, n) \
do { \
sptr __offset = __msan_test_shadow(x, n); \
- if (__msan::IsInSymbolizer()) break; \
+ if (__msan::IsInSymbolizerOrUnwider()) \
+ break; \
if (__offset >= 0 && __msan::flags()->report_umrs) { \
- GET_CALLER_PC_BP_SP; \
- (void)sp; \
+ GET_CALLER_PC_BP; \
ReportUMRInsideAddressRange(__func__, x, n, __offset); \
__msan::PrintWarningWithOrigin( \
pc, bp, __msan_get_origin((const char *)x + __offset)); \
@@ -220,18 +211,24 @@ INTERCEPTOR(void *, pvalloc, SIZE_T size) {
#endif
INTERCEPTOR(void, free, void *ptr) {
+ if (UNLIKELY(!ptr))
+ return;
+ if (DlsymAlloc::PointerIsMine(ptr))
+ return DlsymAlloc::Free(ptr);
GET_MALLOC_STACK_TRACE;
- if (!ptr || UNLIKELY(IsInDlsymAllocPool(ptr))) return;
MsanDeallocate(&stack, ptr);
}
#if !SANITIZER_FREEBSD && !SANITIZER_NETBSD
INTERCEPTOR(void, cfree, void *ptr) {
+ if (UNLIKELY(!ptr))
+ return;
+ if (DlsymAlloc::PointerIsMine(ptr))
+ return DlsymAlloc::Free(ptr);
GET_MALLOC_STACK_TRACE;
- if (!ptr || UNLIKELY(IsInDlsymAllocPool(ptr))) return;
MsanDeallocate(&stack, ptr);
}
-#define MSAN_MAYBE_INTERCEPT_CFREE INTERCEPT_FUNCTION(cfree)
+# define MSAN_MAYBE_INTERCEPT_CFREE INTERCEPT_FUNCTION(cfree)
#else
#define MSAN_MAYBE_INTERCEPT_CFREE
#endif
@@ -246,23 +243,37 @@ INTERCEPTOR(uptr, malloc_usable_size, void *ptr) {
#define MSAN_MAYBE_INTERCEPT_MALLOC_USABLE_SIZE
#endif
+#if (!SANITIZER_FREEBSD && !SANITIZER_NETBSD) || __GLIBC_PREREQ(2, 33)
+template <class T>
+static NOINLINE void clear_mallinfo(T *sret) {
+ ENSURE_MSAN_INITED();
+ internal_memset(sret, 0, sizeof(*sret));
+ __msan_unpoison(sret, sizeof(*sret));
+}
+#endif
+
#if !SANITIZER_FREEBSD && !SANITIZER_NETBSD
-// This function actually returns a struct by value, but we can't unpoison a
-// temporary! The following is equivalent on all supported platforms but
-// aarch64 (which uses a different register for sret value). We have a test
-// to confirm that.
-INTERCEPTOR(void, mallinfo, __sanitizer_struct_mallinfo *sret) {
-#ifdef __aarch64__
- uptr r8;
- asm volatile("mov %0,x8" : "=r" (r8));
- sret = reinterpret_cast<__sanitizer_struct_mallinfo*>(r8);
+// Interceptors use NRVO and assume that sret will be pre-allocated in
+// caller frame.
+INTERCEPTOR(__sanitizer_struct_mallinfo, mallinfo) {
+ __sanitizer_struct_mallinfo sret;
+ clear_mallinfo(&sret);
+ return sret;
+}
+# define MSAN_MAYBE_INTERCEPT_MALLINFO INTERCEPT_FUNCTION(mallinfo)
+#else
+# define MSAN_MAYBE_INTERCEPT_MALLINFO
#endif
- REAL(memset)(sret, 0, sizeof(*sret));
- __msan_unpoison(sret, sizeof(*sret));
+
+#if __GLIBC_PREREQ(2, 33)
+INTERCEPTOR(__sanitizer_struct_mallinfo2, mallinfo2) {
+ __sanitizer_struct_mallinfo2 sret;
+ clear_mallinfo(&sret);
+ return sret;
}
-#define MSAN_MAYBE_INTERCEPT_MALLINFO INTERCEPT_FUNCTION(mallinfo)
+# define MSAN_MAYBE_INTERCEPT_MALLINFO2 INTERCEPT_FUNCTION(mallinfo2)
#else
-#define MSAN_MAYBE_INTERCEPT_MALLINFO
+# define MSAN_MAYBE_INTERCEPT_MALLINFO2
#endif
#if !SANITIZER_FREEBSD && !SANITIZER_NETBSD
@@ -286,7 +297,7 @@ INTERCEPTOR(void, malloc_stats, void) {
INTERCEPTOR(char *, strcpy, char *dest, const char *src) {
ENSURE_MSAN_INITED();
GET_STORE_STACK_TRACE;
- SIZE_T n = REAL(strlen)(src);
+ SIZE_T n = internal_strlen(src);
CHECK_UNPOISONED_STRING(src + n, 0);
char *res = REAL(strcpy)(dest, src);
CopyShadowAndOrigin(dest, src, n + 1, &stack);
@@ -296,7 +307,7 @@ INTERCEPTOR(char *, strcpy, char *dest, const char *src) {
INTERCEPTOR(char *, strncpy, char *dest, const char *src, SIZE_T n) {
ENSURE_MSAN_INITED();
GET_STORE_STACK_TRACE;
- SIZE_T copy_size = REAL(strnlen)(src, n);
+ SIZE_T copy_size = internal_strnlen(src, n);
if (copy_size < n)
copy_size++; // trailing \0
char *res = REAL(strncpy)(dest, src, n);
@@ -309,15 +320,27 @@ INTERCEPTOR(char *, strncpy, char *dest, const char *src, SIZE_T n) {
INTERCEPTOR(char *, stpcpy, char *dest, const char *src) {
ENSURE_MSAN_INITED();
GET_STORE_STACK_TRACE;
- SIZE_T n = REAL(strlen)(src);
+ SIZE_T n = internal_strlen(src);
CHECK_UNPOISONED_STRING(src + n, 0);
char *res = REAL(stpcpy)(dest, src);
CopyShadowAndOrigin(dest, src, n + 1, &stack);
return res;
}
-#define MSAN_MAYBE_INTERCEPT_STPCPY INTERCEPT_FUNCTION(stpcpy)
+
+INTERCEPTOR(char *, stpncpy, char *dest, const char *src, SIZE_T n) {
+ ENSURE_MSAN_INITED();
+ GET_STORE_STACK_TRACE;
+ SIZE_T copy_size = Min(n, internal_strnlen(src, n) + 1);
+ char *res = REAL(stpncpy)(dest, src, n);
+ CopyShadowAndOrigin(dest, src, copy_size, &stack);
+ __msan_unpoison(dest + copy_size, n - copy_size);
+ return res;
+}
+# define MSAN_MAYBE_INTERCEPT_STPCPY INTERCEPT_FUNCTION(stpcpy)
+# define MSAN_MAYBE_INTERCEPT_STPNCPY INTERCEPT_FUNCTION(stpncpy)
#else
#define MSAN_MAYBE_INTERCEPT_STPCPY
+# define MSAN_MAYBE_INTERCEPT_STPNCPY
#endif
INTERCEPTOR(char *, strdup, char *src) {
@@ -325,7 +348,7 @@ INTERCEPTOR(char *, strdup, char *src) {
GET_STORE_STACK_TRACE;
// On FreeBSD strdup() leverages strlen().
InterceptorScope interceptor_scope;
- SIZE_T n = REAL(strlen)(src);
+ SIZE_T n = internal_strlen(src);
CHECK_UNPOISONED_STRING(src + n, 0);
char *res = REAL(strdup)(src);
CopyShadowAndOrigin(res, src, n + 1, &stack);
@@ -336,7 +359,7 @@ INTERCEPTOR(char *, strdup, char *src) {
INTERCEPTOR(char *, __strdup, char *src) {
ENSURE_MSAN_INITED();
GET_STORE_STACK_TRACE;
- SIZE_T n = REAL(strlen)(src);
+ SIZE_T n = internal_strlen(src);
CHECK_UNPOISONED_STRING(src + n, 0);
char *res = REAL(__strdup)(src);
CopyShadowAndOrigin(res, src, n + 1, &stack);
@@ -351,7 +374,7 @@ INTERCEPTOR(char *, __strdup, char *src) {
INTERCEPTOR(char *, gcvt, double number, SIZE_T ndigit, char *buf) {
ENSURE_MSAN_INITED();
char *res = REAL(gcvt)(number, ndigit, buf);
- SIZE_T n = REAL(strlen)(buf);
+ SIZE_T n = internal_strlen(buf);
__msan_unpoison(buf, n + 1);
return res;
}
@@ -363,8 +386,8 @@ INTERCEPTOR(char *, gcvt, double number, SIZE_T ndigit, char *buf) {
INTERCEPTOR(char *, strcat, char *dest, const char *src) {
ENSURE_MSAN_INITED();
GET_STORE_STACK_TRACE;
- SIZE_T src_size = REAL(strlen)(src);
- SIZE_T dest_size = REAL(strlen)(dest);
+ SIZE_T src_size = internal_strlen(src);
+ SIZE_T dest_size = internal_strlen(dest);
CHECK_UNPOISONED_STRING(src + src_size, 0);
CHECK_UNPOISONED_STRING(dest + dest_size, 0);
char *res = REAL(strcat)(dest, src);
@@ -375,8 +398,8 @@ INTERCEPTOR(char *, strcat, char *dest, const char *src) {
INTERCEPTOR(char *, strncat, char *dest, const char *src, SIZE_T n) {
ENSURE_MSAN_INITED();
GET_STORE_STACK_TRACE;
- SIZE_T dest_size = REAL(strlen)(dest);
- SIZE_T copy_size = REAL(strnlen)(src, n);
+ SIZE_T dest_size = internal_strlen(dest);
+ SIZE_T copy_size = internal_strnlen(src, n);
CHECK_UNPOISONED_STRING(dest + dest_size, 0);
char *res = REAL(strncat)(dest, src, n);
CopyShadowAndOrigin(dest + dest_size, src, copy_size, &stack);
@@ -392,11 +415,25 @@ INTERCEPTOR(char *, strncat, char *dest, const char *src, SIZE_T n) {
__msan_unpoison(endptr, sizeof(*endptr)); \
return res;
+// On s390x, long double return values are passed via implicit reference,
+// which needs to be unpoisoned. We make the implicit pointer explicit.
+#define INTERCEPTOR_STRTO_SRET_BODY(func, sret, ...) \
+ ENSURE_MSAN_INITED(); \
+ REAL(func)(sret, __VA_ARGS__); \
+ __msan_unpoison(sret, sizeof(*sret)); \
+ __msan_unpoison(endptr, sizeof(*endptr));
+
#define INTERCEPTOR_STRTO(ret_type, func, char_type) \
INTERCEPTOR(ret_type, func, const char_type *nptr, char_type **endptr) { \
INTERCEPTOR_STRTO_BODY(ret_type, func, nptr, endptr); \
}
+#define INTERCEPTOR_STRTO_SRET(ret_type, func, char_type) \
+ INTERCEPTOR(void, func, ret_type *sret, const char_type *nptr, \
+ char_type **endptr) { \
+ INTERCEPTOR_STRTO_SRET_BODY(func, sret, nptr, endptr); \
+ }
+
#define INTERCEPTOR_STRTO_BASE(ret_type, func, char_type) \
INTERCEPTOR(ret_type, func, const char_type *nptr, char_type **endptr, \
int base) { \
@@ -409,6 +446,12 @@ INTERCEPTOR(char *, strncat, char *dest, const char *src, SIZE_T n) {
INTERCEPTOR_STRTO_BODY(ret_type, func, nptr, endptr, loc); \
}
+#define INTERCEPTOR_STRTO_SRET_LOC(ret_type, func, char_type) \
+ INTERCEPTOR(void, func, ret_type *sret, const char_type *nptr, \
+ char_type **endptr, void *loc) { \
+ INTERCEPTOR_STRTO_SRET_BODY(func, sret, nptr, endptr, loc); \
+ }
+
#define INTERCEPTOR_STRTO_BASE_LOC(ret_type, func, char_type) \
INTERCEPTOR(ret_type, func, const char_type *nptr, char_type **endptr, \
int base, void *loc) { \
@@ -420,6 +463,10 @@ INTERCEPTOR(char *, strncat, char *dest, const char *src, SIZE_T n) {
INTERCEPTOR_STRTO(ret_type, func, char_type) \
INTERCEPTOR_STRTO_LOC(ret_type, func##_l, char_type)
+#define INTERCEPTORS_STRTO_SRET(ret_type, func, char_type) \
+ INTERCEPTOR_STRTO_SRET(ret_type, func, char_type) \
+ INTERCEPTOR_STRTO_SRET_LOC(ret_type, func##_l, char_type)
+
#define INTERCEPTORS_STRTO_BASE(ret_type, func, char_type) \
INTERCEPTOR_STRTO_BASE(ret_type, func, char_type) \
INTERCEPTOR_STRTO_BASE_LOC(ret_type, func##_l, char_type)
@@ -431,6 +478,12 @@ INTERCEPTOR(char *, strncat, char *dest, const char *src, SIZE_T n) {
INTERCEPTOR_STRTO_LOC(ret_type, __##func##_l, char_type) \
INTERCEPTOR_STRTO_LOC(ret_type, __##func##_internal, char_type)
+#define INTERCEPTORS_STRTO_SRET(ret_type, func, char_type) \
+ INTERCEPTOR_STRTO_SRET(ret_type, func, char_type) \
+ INTERCEPTOR_STRTO_SRET_LOC(ret_type, func##_l, char_type) \
+ INTERCEPTOR_STRTO_SRET_LOC(ret_type, __##func##_l, char_type) \
+ INTERCEPTOR_STRTO_SRET_LOC(ret_type, __##func##_internal, char_type)
+
#define INTERCEPTORS_STRTO_BASE(ret_type, func, char_type) \
INTERCEPTOR_STRTO_BASE(ret_type, func, char_type) \
INTERCEPTOR_STRTO_BASE_LOC(ret_type, func##_l, char_type) \
@@ -440,7 +493,11 @@ INTERCEPTOR(char *, strncat, char *dest, const char *src, SIZE_T n) {
INTERCEPTORS_STRTO(double, strtod, char)
INTERCEPTORS_STRTO(float, strtof, char)
+#ifdef __s390x__
+INTERCEPTORS_STRTO_SRET(long double, strtold, char)
+#else
INTERCEPTORS_STRTO(long double, strtold, char)
+#endif
INTERCEPTORS_STRTO_BASE(long, strtol, char)
INTERCEPTORS_STRTO_BASE(long long, strtoll, char)
INTERCEPTORS_STRTO_BASE(unsigned long, strtoul, char)
@@ -449,12 +506,43 @@ INTERCEPTORS_STRTO_BASE(u64, strtouq, char)
INTERCEPTORS_STRTO(double, wcstod, wchar_t)
INTERCEPTORS_STRTO(float, wcstof, wchar_t)
+#ifdef __s390x__
+INTERCEPTORS_STRTO_SRET(long double, wcstold, wchar_t)
+#else
INTERCEPTORS_STRTO(long double, wcstold, wchar_t)
+#endif
INTERCEPTORS_STRTO_BASE(long, wcstol, wchar_t)
INTERCEPTORS_STRTO_BASE(long long, wcstoll, wchar_t)
INTERCEPTORS_STRTO_BASE(unsigned long, wcstoul, wchar_t)
INTERCEPTORS_STRTO_BASE(unsigned long long, wcstoull, wchar_t)
+#if SANITIZER_GLIBC
+INTERCEPTORS_STRTO(double, __isoc23_strtod, char)
+INTERCEPTORS_STRTO(float, __isoc23_strtof, char)
+#ifdef __s390x__
+INTERCEPTORS_STRTO_SRET(long double, __isoc23_strtold, char)
+#else
+INTERCEPTORS_STRTO(long double, __isoc23_strtold, char)
+#endif
+INTERCEPTORS_STRTO_BASE(long, __isoc23_strtol, char)
+INTERCEPTORS_STRTO_BASE(long long, __isoc23_strtoll, char)
+INTERCEPTORS_STRTO_BASE(unsigned long, __isoc23_strtoul, char)
+INTERCEPTORS_STRTO_BASE(unsigned long long, __isoc23_strtoull, char)
+INTERCEPTORS_STRTO_BASE(u64, __isoc23_strtouq, char)
+
+INTERCEPTORS_STRTO(double, __isoc23_wcstod, wchar_t)
+INTERCEPTORS_STRTO(float, __isoc23_wcstof, wchar_t)
+#ifdef __s390x__
+INTERCEPTORS_STRTO_SRET(long double, __isoc23_wcstold, wchar_t)
+#else
+INTERCEPTORS_STRTO(long double, __isoc23_wcstold, wchar_t)
+#endif
+INTERCEPTORS_STRTO_BASE(long, __isoc23_wcstol, wchar_t)
+INTERCEPTORS_STRTO_BASE(long long, __isoc23_wcstoll, wchar_t)
+INTERCEPTORS_STRTO_BASE(unsigned long, __isoc23_wcstoul, wchar_t)
+INTERCEPTORS_STRTO_BASE(unsigned long long, __isoc23_wcstoull, wchar_t)
+#endif
+
#if SANITIZER_NETBSD
#define INTERCEPT_STRTO(func) \
INTERCEPT_FUNCTION(func); \
@@ -465,6 +553,12 @@ INTERCEPTORS_STRTO_BASE(unsigned long long, wcstoull, wchar_t)
INTERCEPT_FUNCTION(func##_l); \
INTERCEPT_FUNCTION(__##func##_l); \
INTERCEPT_FUNCTION(__##func##_internal);
+
+#define INTERCEPT_STRTO_VER(func, ver) \
+ INTERCEPT_FUNCTION_VER(func, ver); \
+ INTERCEPT_FUNCTION_VER(func##_l, ver); \
+ INTERCEPT_FUNCTION_VER(__##func##_l, ver); \
+ INTERCEPT_FUNCTION_VER(__##func##_internal, ver);
#endif
@@ -612,7 +706,8 @@ INTERCEPTOR(char *, fcvt, double x, int a, int *b, int *c) {
char *res = REAL(fcvt)(x, a, b, c);
__msan_unpoison(b, sizeof(*b));
__msan_unpoison(c, sizeof(*c));
- if (res) __msan_unpoison(res, REAL(strlen)(res) + 1);
+ if (res)
+ __msan_unpoison(res, internal_strlen(res) + 1);
return res;
}
#define MSAN_MAYBE_INTERCEPT_FCVT INTERCEPT_FUNCTION(fcvt)
@@ -625,7 +720,8 @@ INTERCEPTOR(char *, getenv, char *name) {
return REAL(getenv)(name);
ENSURE_MSAN_INITED();
char *res = REAL(getenv)(name);
- if (res) __msan_unpoison(res, REAL(strlen)(res) + 1);
+ if (res)
+ __msan_unpoison(res, internal_strlen(res) + 1);
return res;
}
@@ -635,7 +731,7 @@ static void UnpoisonEnviron() {
char **envp = environ;
for (; *envp; ++envp) {
__msan_unpoison(envp, sizeof(*envp));
- __msan_unpoison(*envp, REAL(strlen)(*envp) + 1);
+ __msan_unpoison(*envp, internal_strlen(*envp) + 1);
}
// Trailing NULL pointer.
__msan_unpoison(envp, sizeof(*envp));
@@ -656,7 +752,8 @@ INTERCEPTOR(int, putenv, char *string) {
return res;
}
-#if SANITIZER_FREEBSD || SANITIZER_NETBSD
+#define SANITIZER_STAT_LINUX (SANITIZER_LINUX && __GLIBC_PREREQ(2, 33))
+#if SANITIZER_FREEBSD || SANITIZER_NETBSD || SANITIZER_STAT_LINUX
INTERCEPTOR(int, fstat, int fd, void *buf) {
ENSURE_MSAN_INITED();
int res = REAL(fstat)(fd, buf);
@@ -664,12 +761,25 @@ INTERCEPTOR(int, fstat, int fd, void *buf) {
__msan_unpoison(buf, __sanitizer::struct_stat_sz);
return res;
}
-#define MSAN_MAYBE_INTERCEPT_FSTAT INTERCEPT_FUNCTION(fstat)
+# define MSAN_MAYBE_INTERCEPT_FSTAT MSAN_INTERCEPT_FUNC(fstat)
#else
#define MSAN_MAYBE_INTERCEPT_FSTAT
#endif
-#if !SANITIZER_FREEBSD && !SANITIZER_NETBSD
+#if SANITIZER_STAT_LINUX
+INTERCEPTOR(int, fstat64, int fd, void *buf) {
+ ENSURE_MSAN_INITED();
+ int res = REAL(fstat64)(fd, buf);
+ if (!res)
+ __msan_unpoison(buf, __sanitizer::struct_stat64_sz);
+ return res;
+}
+# define MSAN_MAYBE_INTERCEPT_FSTAT64 MSAN_INTERCEPT_FUNC(fstat64)
+#else
+# define MSAN_MAYBE_INTERCEPT_FSTAT64
+#endif
+
+#if SANITIZER_GLIBC
INTERCEPTOR(int, __fxstat, int magic, int fd, void *buf) {
ENSURE_MSAN_INITED();
int res = REAL(__fxstat)(magic, fd, buf);
@@ -677,12 +787,12 @@ INTERCEPTOR(int, __fxstat, int magic, int fd, void *buf) {
__msan_unpoison(buf, __sanitizer::struct_stat_sz);
return res;
}
-#define MSAN_MAYBE_INTERCEPT___FXSTAT INTERCEPT_FUNCTION(__fxstat)
+# define MSAN_MAYBE_INTERCEPT___FXSTAT MSAN_INTERCEPT_FUNC(__fxstat)
#else
#define MSAN_MAYBE_INTERCEPT___FXSTAT
#endif
-#if !SANITIZER_FREEBSD && !SANITIZER_NETBSD
+#if SANITIZER_GLIBC
INTERCEPTOR(int, __fxstat64, int magic, int fd, void *buf) {
ENSURE_MSAN_INITED();
int res = REAL(__fxstat64)(magic, fd, buf);
@@ -690,20 +800,37 @@ INTERCEPTOR(int, __fxstat64, int magic, int fd, void *buf) {
__msan_unpoison(buf, __sanitizer::struct_stat64_sz);
return res;
}
-#define MSAN_MAYBE_INTERCEPT___FXSTAT64 INTERCEPT_FUNCTION(__fxstat64)
+# define MSAN_MAYBE_INTERCEPT___FXSTAT64 MSAN_INTERCEPT_FUNC(__fxstat64)
#else
-#define MSAN_MAYBE_INTERCEPT___FXSTAT64
+# define MSAN_MAYBE_INTERCEPT___FXSTAT64
#endif
-#if SANITIZER_FREEBSD || SANITIZER_NETBSD
+#if SANITIZER_FREEBSD || SANITIZER_NETBSD || SANITIZER_STAT_LINUX
INTERCEPTOR(int, fstatat, int fd, char *pathname, void *buf, int flags) {
ENSURE_MSAN_INITED();
int res = REAL(fstatat)(fd, pathname, buf, flags);
if (!res) __msan_unpoison(buf, __sanitizer::struct_stat_sz);
return res;
}
-# define MSAN_INTERCEPT_FSTATAT INTERCEPT_FUNCTION(fstatat)
+# define MSAN_MAYBE_INTERCEPT_FSTATAT MSAN_INTERCEPT_FUNC(fstatat)
+#else
+# define MSAN_MAYBE_INTERCEPT_FSTATAT
+#endif
+
+#if SANITIZER_STAT_LINUX
+INTERCEPTOR(int, fstatat64, int fd, char *pathname, void *buf, int flags) {
+ ENSURE_MSAN_INITED();
+ int res = REAL(fstatat64)(fd, pathname, buf, flags);
+ if (!res)
+ __msan_unpoison(buf, __sanitizer::struct_stat64_sz);
+ return res;
+}
+# define MSAN_MAYBE_INTERCEPT_FSTATAT64 MSAN_INTERCEPT_FUNC(fstatat64)
#else
+# define MSAN_MAYBE_INTERCEPT_FSTATAT64
+#endif
+
+#if SANITIZER_GLIBC
INTERCEPTOR(int, __fxstatat, int magic, int fd, char *pathname, void *buf,
int flags) {
ENSURE_MSAN_INITED();
@@ -711,10 +838,12 @@ INTERCEPTOR(int, __fxstatat, int magic, int fd, char *pathname, void *buf,
if (!res) __msan_unpoison(buf, __sanitizer::struct_stat_sz);
return res;
}
-# define MSAN_INTERCEPT_FSTATAT INTERCEPT_FUNCTION(__fxstatat)
+# define MSAN_MAYBE_INTERCEPT___FXSTATAT MSAN_INTERCEPT_FUNC(__fxstatat)
+#else
+# define MSAN_MAYBE_INTERCEPT___FXSTATAT
#endif
-#if !SANITIZER_FREEBSD && !SANITIZER_NETBSD
+#if SANITIZER_GLIBC
INTERCEPTOR(int, __fxstatat64, int magic, int fd, char *pathname, void *buf,
int flags) {
ENSURE_MSAN_INITED();
@@ -722,9 +851,9 @@ INTERCEPTOR(int, __fxstatat64, int magic, int fd, char *pathname, void *buf,
if (!res) __msan_unpoison(buf, __sanitizer::struct_stat64_sz);
return res;
}
-#define MSAN_MAYBE_INTERCEPT___FXSTATAT64 INTERCEPT_FUNCTION(__fxstatat64)
+# define MSAN_MAYBE_INTERCEPT___FXSTATAT64 MSAN_INTERCEPT_FUNC(__fxstatat64)
#else
-#define MSAN_MAYBE_INTERCEPT___FXSTATAT64
+# define MSAN_MAYBE_INTERCEPT___FXSTATAT64
#endif
INTERCEPTOR(int, pipe, int pipefd[2]) {
@@ -758,7 +887,7 @@ INTERCEPTOR(char *, fgets_unlocked, char *s, int size, void *stream) {
ENSURE_MSAN_INITED();
char *res = REAL(fgets_unlocked)(s, size, stream);
if (res)
- __msan_unpoison(s, REAL(strlen)(s) + 1);
+ __msan_unpoison(s, internal_strlen(s) + 1);
return res;
}
#define MSAN_MAYBE_INTERCEPT_FGETS_UNLOCKED INTERCEPT_FUNCTION(fgets_unlocked)
@@ -829,7 +958,7 @@ INTERCEPTOR(int, gethostname, char *name, SIZE_T len) {
ENSURE_MSAN_INITED();
int res = REAL(gethostname)(name, len);
if (!res || (res == -1 && errno == errno_ENAMETOOLONG)) {
- SIZE_T real_len = REAL(strnlen)(name, len);
+ SIZE_T real_len = internal_strnlen(name, len);
if (real_len < len)
++real_len;
__msan_unpoison(name, real_len);
@@ -869,27 +998,15 @@ INTERCEPTOR(int, epoll_pwait, int epfd, void *events, int maxevents,
INTERCEPTOR(void *, calloc, SIZE_T nmemb, SIZE_T size) {
GET_MALLOC_STACK_TRACE;
- if (UNLIKELY(!msan_inited))
- // Hack: dlsym calls calloc before REAL(calloc) is retrieved from dlsym.
- return AllocateFromLocalPool(nmemb * size);
+ if (DlsymAlloc::Use())
+ return DlsymAlloc::Callocate(nmemb, size);
return msan_calloc(nmemb, size, &stack);
}
INTERCEPTOR(void *, realloc, void *ptr, SIZE_T size) {
+ if (DlsymAlloc::Use() || DlsymAlloc::PointerIsMine(ptr))
+ return DlsymAlloc::Realloc(ptr, size);
GET_MALLOC_STACK_TRACE;
- if (UNLIKELY(IsInDlsymAllocPool(ptr))) {
- uptr offset = (uptr)ptr - (uptr)alloc_memory_for_dlsym;
- uptr copy_size = Min(size, kDlsymAllocPoolSize - offset);
- void *new_ptr;
- if (UNLIKELY(!msan_inited)) {
- new_ptr = AllocateFromLocalPool(copy_size);
- } else {
- copy_size = size;
- new_ptr = msan_malloc(copy_size, &stack);
- }
- internal_memcpy(new_ptr, ptr, copy_size);
- return new_ptr;
- }
return msan_realloc(ptr, size, &stack);
}
@@ -899,16 +1016,15 @@ INTERCEPTOR(void *, reallocarray, void *ptr, SIZE_T nmemb, SIZE_T size) {
}
INTERCEPTOR(void *, malloc, SIZE_T size) {
+ if (DlsymAlloc::Use())
+ return DlsymAlloc::Allocate(size);
GET_MALLOC_STACK_TRACE;
- if (UNLIKELY(!msan_inited))
- // Hack: dlsym calls malloc before REAL(malloc) is retrieved from dlsym.
- return AllocateFromLocalPool(size);
return msan_malloc(size, &stack);
}
void __msan_allocated_memory(const void *data, uptr size) {
- GET_MALLOC_STACK_TRACE;
if (flags()->poison_in_malloc) {
+ GET_MALLOC_STACK_TRACE;
stack.tag = STACK_TRACE_TAG_POISON;
PoisonMemory(data, size, &stack);
}
@@ -920,13 +1036,29 @@ void __msan_copy_shadow(void *dest, const void *src, uptr n) {
}
void __sanitizer_dtor_callback(const void *data, uptr size) {
- GET_MALLOC_STACK_TRACE;
if (flags()->poison_in_dtor) {
+ GET_MALLOC_STACK_TRACE;
stack.tag = STACK_TRACE_TAG_POISON;
PoisonMemory(data, size, &stack);
}
}
+void __sanitizer_dtor_callback_fields(const void *data, uptr size) {
+ if (flags()->poison_in_dtor) {
+ GET_MALLOC_STACK_TRACE;
+ stack.tag = STACK_TRACE_TAG_FIELDS;
+ PoisonMemory(data, size, &stack);
+ }
+}
+
+void __sanitizer_dtor_callback_vptr(const void *data) {
+ if (flags()->poison_in_dtor) {
+ GET_MALLOC_STACK_TRACE;
+ stack.tag = STACK_TRACE_TAG_VPTR;
+ PoisonMemory(data, sizeof(void *), &stack);
+ }
+}
+
template <class Mmap>
static void *mmap_interceptor(Mmap real_mmap, void *addr, SIZE_T length,
int prot, int flags, int fd, OFF64_T offset) {
@@ -1000,12 +1132,13 @@ static void SignalAction(int signo, void *si, void *uc) {
ScopedThreadLocalStateBackup stlsb;
UnpoisonParam(3);
__msan_unpoison(si, sizeof(__sanitizer_sigaction));
- __msan_unpoison(uc, __sanitizer::ucontext_t_sz);
+ __msan_unpoison(uc, ucontext_t_sz(uc));
typedef void (*sigaction_cb)(int, void *, void *);
sigaction_cb cb =
(sigaction_cb)atomic_load(&sigactions[signo], memory_order_relaxed);
cb(signo, si, uc);
+ CHECK_UNPOISONED(uc, ucontext_t_sz(uc));
}
static void read_sigaction(const __sanitizer_sigaction *act) {
@@ -1023,6 +1156,8 @@ extern "C" int pthread_attr_destroy(void *attr);
static void *MsanThreadStartFunc(void *arg) {
MsanThread *t = (MsanThread *)arg;
SetCurrentThread(t);
+ t->Init();
+ SetSigProcMask(&t->starting_sigset_, nullptr);
return t->ThreadStart();
}
@@ -1038,7 +1173,7 @@ INTERCEPTOR(int, pthread_create, void *th, void *attr, void *(*callback)(void*),
AdjustStackSize(attr);
MsanThread *t = MsanThread::Create(callback, param);
-
+ ScopedBlockSignals block(&t->starting_sigset_);
int res = REAL(pthread_create)(th, attr, MsanThreadStartFunc, t);
if (attr == &myattr)
@@ -1062,17 +1197,37 @@ INTERCEPTOR(int, pthread_key_create, __sanitizer_pthread_key_t *key,
#if SANITIZER_NETBSD
INTERCEPTOR(int, __libc_thr_keycreate, __sanitizer_pthread_key_t *m,
void (*dtor)(void *value))
-ALIAS(WRAPPER_NAME(pthread_key_create));
+ALIAS(WRAP(pthread_key_create));
#endif
-INTERCEPTOR(int, pthread_join, void *th, void **retval) {
+INTERCEPTOR(int, pthread_join, void *thread, void **retval) {
ENSURE_MSAN_INITED();
- int res = REAL(pthread_join)(th, retval);
+ int res = REAL(pthread_join)(thread, retval);
if (!res && retval)
__msan_unpoison(retval, sizeof(*retval));
return res;
}
+#if SANITIZER_GLIBC
+INTERCEPTOR(int, pthread_tryjoin_np, void *thread, void **retval) {
+ ENSURE_MSAN_INITED();
+ int res = REAL(pthread_tryjoin_np)(thread, retval);
+ if (!res && retval)
+ __msan_unpoison(retval, sizeof(*retval));
+ return res;
+}
+
+INTERCEPTOR(int, pthread_timedjoin_np, void *thread, void **retval,
+ const struct timespec *abstime) {
+ int res = REAL(pthread_timedjoin_np)(thread, retval, abstime);
+ if (!res && retval)
+ __msan_unpoison(retval, sizeof(*retval));
+ return res;
+}
+#endif
+
+DEFINE_REAL_PTHREAD_FUNCTIONS
+
extern char *tzname[2];
INTERCEPTOR(void, tzset, int fake) {
@@ -1080,9 +1235,9 @@ INTERCEPTOR(void, tzset, int fake) {
InterceptorScope interceptor_scope;
REAL(tzset)(fake);
if (tzname[0])
- __msan_unpoison(tzname[0], REAL(strlen)(tzname[0]) + 1);
+ __msan_unpoison(tzname[0], internal_strlen(tzname[0]) + 1);
if (tzname[1])
- __msan_unpoison(tzname[1], REAL(strlen)(tzname[1]) + 1);
+ __msan_unpoison(tzname[1], internal_strlen(tzname[1]) + 1);
return;
}
@@ -1092,7 +1247,7 @@ struct MSanAtExitRecord {
};
struct InterceptorContext {
- BlockingMutex atexit_mu;
+ Mutex atexit_mu;
Vector<struct MSanAtExitRecord *> AtExitStack;
InterceptorContext()
@@ -1108,7 +1263,7 @@ InterceptorContext *interceptor_ctx() {
void MSanAtExitWrapper() {
MSanAtExitRecord *r;
{
- BlockingMutexLock l(&interceptor_ctx()->atexit_mu);
+ Lock l(&interceptor_ctx()->atexit_mu);
uptr element = interceptor_ctx()->AtExitStack.Size() - 1;
r = interceptor_ctx()->AtExitStack[element];
@@ -1142,7 +1297,7 @@ INTERCEPTOR(int, __cxa_atexit, void (*func)(void *), void *arg,
// Unpoison argument shadow for C++ module destructors.
INTERCEPTOR(int, atexit, void (*func)()) {
- // Avoid calling real atexit as it is unrechable on at least on Linux.
+ // Avoid calling real atexit as it is unreachable on at least on Linux.
if (msan_init_is_running)
return REAL(__cxa_atexit)((void (*)(void *a))func, 0, 0);
return setup_at_exit_wrapper((void(*)())func, 0, 0);
@@ -1159,7 +1314,7 @@ static int setup_at_exit_wrapper(void(*f)(), void *arg, void *dso) {
// NetBSD does not preserve the 2nd argument if dso is equal to 0
// Store ctx in a local stack-like structure
- BlockingMutexLock l(&interceptor_ctx()->atexit_mu);
+ Lock l(&interceptor_ctx()->atexit_mu);
res = REAL(__cxa_atexit)((void (*)(void *a))MSanAtExitWrapper, 0, 0);
if (!res) {
@@ -1171,24 +1326,6 @@ static int setup_at_exit_wrapper(void(*f)(), void *arg, void *dso) {
return res;
}
-static void BeforeFork() {
- StackDepotLockAll();
- ChainedOriginDepotLockAll();
-}
-
-static void AfterFork() {
- ChainedOriginDepotUnlockAll();
- StackDepotUnlockAll();
-}
-
-INTERCEPTOR(int, fork, void) {
- ENSURE_MSAN_INITED();
- BeforeFork();
- int pid = REAL(fork)();
- AfterFork();
- return pid;
-}
-
// NetBSD ships with openpty(3) in -lutil, that needs to be prebuilt explicitly
// with MSan.
#if SANITIZER_LINUX
@@ -1256,13 +1393,13 @@ int OnExit() {
do { \
if (!INTERCEPT_FUNCTION_VER(name, ver)) \
VReport(1, "MemorySanitizer: failed to intercept '%s@@%s'\n", #name, \
- #ver); \
+ ver); \
} while (0)
#define MSAN_INTERCEPT_FUNC_VER_UNVERSIONED_FALLBACK(name, ver) \
do { \
if (!INTERCEPT_FUNCTION_VER(name, ver) && !INTERCEPT_FUNCTION(name)) \
VReport(1, "MemorySanitizer: failed to intercept '%s@@%s' or '%s'\n", \
- #name, #ver, #name); \
+ #name, ver, #name); \
} while (0)
#define COMMON_INTERCEPT_FUNCTION(name) MSAN_INTERCEPT_FUNC(name)
@@ -1278,14 +1415,15 @@ int OnExit() {
CHECK_UNPOISONED_CTX(ctx, ptr, size)
#define COMMON_INTERCEPTOR_INITIALIZE_RANGE(ptr, size) \
__msan_unpoison(ptr, size)
-#define COMMON_INTERCEPTOR_ENTER(ctx, func, ...) \
- if (msan_init_is_running) return REAL(func)(__VA_ARGS__); \
- ENSURE_MSAN_INITED(); \
- MSanInterceptorContext msan_ctx = {IsInInterceptorScope()}; \
- ctx = (void *)&msan_ctx; \
- (void)ctx; \
- InterceptorScope interceptor_scope; \
- __msan_unpoison(__errno_location(), sizeof(int)); /* NOLINT */
+#define COMMON_INTERCEPTOR_ENTER(ctx, func, ...) \
+ if (msan_init_is_running) \
+ return REAL(func)(__VA_ARGS__); \
+ ENSURE_MSAN_INITED(); \
+ MSanInterceptorContext msan_ctx = {IsInInterceptorScope()}; \
+ ctx = (void *)&msan_ctx; \
+ (void)ctx; \
+ InterceptorScope interceptor_scope; \
+ __msan_unpoison(__errno_location(), sizeof(int));
#define COMMON_INTERCEPTOR_DIR_ACQUIRE(ctx, path) \
do { \
} while (false)
@@ -1353,6 +1491,7 @@ int OnExit() {
} while (false)
#include "sanitizer_common/sanitizer_platform_interceptors.h"
+#include "sanitizer_common/sanitizer_common_interceptors_memintrinsics.inc"
#include "sanitizer_common/sanitizer_common_interceptors.inc"
static uptr signal_impl(int signo, uptr cb);
@@ -1369,6 +1508,8 @@ static int sigaction_impl(int signo, const __sanitizer_sigaction *act,
return REAL(func)(signo, handler); \
}
+#define SIGNAL_INTERCEPTOR_ENTER() ENSURE_MSAN_INITED()
+
#include "sanitizer_common/sanitizer_signal_interceptors.inc"
static int sigaction_impl(int signo, const __sanitizer_sigaction *act,
@@ -1440,32 +1581,47 @@ static uptr signal_impl(int signo, uptr cb) {
#include "sanitizer_common/sanitizer_common_syscalls.inc"
#include "sanitizer_common/sanitizer_syscalls_netbsd.inc"
-struct dlinfo {
- char *dli_fname;
- void *dli_fbase;
- char *dli_sname;
- void *dli_saddr;
-};
+INTERCEPTOR(const char *, strsignal, int sig) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, strsignal, sig);
+ const char *res = REAL(strsignal)(sig);
+ if (res)
+ __msan_unpoison(res, internal_strlen(res) + 1);
+ return res;
+}
-INTERCEPTOR(int, dladdr, void *addr, dlinfo *info) {
+INTERCEPTOR(int, dladdr, void *addr, void *info) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, dladdr, addr, info);
int res = REAL(dladdr)(addr, info);
+ if (res != 0)
+ UnpoisonDllAddrInfo(info);
+ return res;
+}
+
+#if SANITIZER_GLIBC
+INTERCEPTOR(int, dladdr1, void *addr, void *info, void **extra_info,
+ int flags) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, dladdr1, addr, info, extra_info, flags);
+ int res = REAL(dladdr1)(addr, info, extra_info, flags);
if (res != 0) {
- __msan_unpoison(info, sizeof(*info));
- if (info->dli_fname)
- __msan_unpoison(info->dli_fname, REAL(strlen)(info->dli_fname) + 1);
- if (info->dli_sname)
- __msan_unpoison(info->dli_sname, REAL(strlen)(info->dli_sname) + 1);
+ UnpoisonDllAddrInfo(info);
+ UnpoisonDllAddr1ExtraInfo(extra_info, flags);
}
return res;
}
+# define MSAN_MAYBE_INTERCEPT_DLADDR1 MSAN_INTERCEPT_FUNC(dladdr1)
+#else
+#define MSAN_MAYBE_INTERCEPT_DLADDR1
+#endif
INTERCEPTOR(char *, dlerror, int fake) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, dlerror, fake);
char *res = REAL(dlerror)(fake);
- if (res) __msan_unpoison(res, REAL(strlen)(res) + 1);
+ if (res)
+ __msan_unpoison(res, internal_strlen(res) + 1);
return res;
}
@@ -1483,7 +1639,7 @@ static int msan_dl_iterate_phdr_cb(__sanitizer_dl_phdr_info *info, SIZE_T size,
if (info->dlpi_phdr && info->dlpi_phnum)
__msan_unpoison(info->dlpi_phdr, struct_ElfW_Phdr_sz * info->dlpi_phnum);
if (info->dlpi_name)
- __msan_unpoison(info->dlpi_name, REAL(strlen)(info->dlpi_name) + 1);
+ __msan_unpoison(info->dlpi_name, internal_strlen(info->dlpi_name) + 1);
}
dl_iterate_phdr_data *cbdata = (dl_iterate_phdr_data *)data;
UnpoisonParam(3);
@@ -1525,7 +1681,7 @@ INTERCEPTOR(wchar_t *, wcscpy, wchar_t *dest, const wchar_t *src) {
ENSURE_MSAN_INITED();
GET_STORE_STACK_TRACE;
wchar_t *res = REAL(wcscpy)(dest, src);
- CopyShadowAndOrigin(dest, src, sizeof(wchar_t) * (REAL(wcslen)(src) + 1),
+ CopyShadowAndOrigin(dest, src, sizeof(wchar_t) * (internal_wcslen(src) + 1),
&stack);
return res;
}
@@ -1533,7 +1689,7 @@ INTERCEPTOR(wchar_t *, wcscpy, wchar_t *dest, const wchar_t *src) {
INTERCEPTOR(wchar_t *, wcsncpy, wchar_t *dest, const wchar_t *src, SIZE_T n) {
ENSURE_MSAN_INITED();
GET_STORE_STACK_TRACE;
- SIZE_T copy_size = REAL(wcsnlen)(src, n);
+ SIZE_T copy_size = internal_wcsnlen(src, n);
if (copy_size < n) copy_size++; // trailing \0
wchar_t *res = REAL(wcsncpy)(dest, src, n);
CopyShadowAndOrigin(dest, src, copy_size * sizeof(wchar_t), &stack);
@@ -1567,7 +1723,7 @@ void __msan_clear_and_unpoison(void *a, uptr size) {
void *__msan_memcpy(void *dest, const void *src, SIZE_T n) {
if (!msan_inited) return internal_memcpy(dest, src, n);
- if (msan_init_is_running || __msan::IsInSymbolizer())
+ if (msan_init_is_running || __msan::IsInSymbolizerOrUnwider())
return REAL(memcpy)(dest, src, n);
ENSURE_MSAN_INITED();
GET_STORE_STACK_TRACE;
@@ -1597,7 +1753,7 @@ void *__msan_memmove(void *dest, const void *src, SIZE_T n) {
void __msan_unpoison_string(const char* s) {
if (!MEM_IS_APP(s)) return;
- __msan_unpoison(s, REAL(strlen)(s) + 1);
+ __msan_unpoison(s, internal_strlen(s) + 1);
}
namespace __msan {
@@ -1624,6 +1780,7 @@ void InitializeInterceptors() {
MSAN_MAYBE_INTERCEPT_CFREE;
MSAN_MAYBE_INTERCEPT_MALLOC_USABLE_SIZE;
MSAN_MAYBE_INTERCEPT_MALLINFO;
+ MSAN_MAYBE_INTERCEPT_MALLINFO2;
MSAN_MAYBE_INTERCEPT_MALLOPT;
MSAN_MAYBE_INTERCEPT_MALLOC_STATS;
INTERCEPT_FUNCTION(fread);
@@ -1637,6 +1794,7 @@ void InitializeInterceptors() {
INTERCEPT_FUNCTION(wmemmove);
INTERCEPT_FUNCTION(strcpy);
MSAN_MAYBE_INTERCEPT_STPCPY;
+ MSAN_MAYBE_INTERCEPT_STPNCPY;
INTERCEPT_FUNCTION(strdup);
MSAN_MAYBE_INTERCEPT___STRDUP;
INTERCEPT_FUNCTION(strncpy);
@@ -1645,7 +1803,11 @@ void InitializeInterceptors() {
INTERCEPT_FUNCTION(strncat);
INTERCEPT_STRTO(strtod);
INTERCEPT_STRTO(strtof);
+#ifdef SANITIZER_NLDBL_VERSION
+ INTERCEPT_STRTO_VER(strtold, SANITIZER_NLDBL_VERSION);
+#else
INTERCEPT_STRTO(strtold);
+#endif
INTERCEPT_STRTO(strtol);
INTERCEPT_STRTO(strtoul);
INTERCEPT_STRTO(strtoll);
@@ -1653,11 +1815,33 @@ void InitializeInterceptors() {
INTERCEPT_STRTO(strtouq);
INTERCEPT_STRTO(wcstod);
INTERCEPT_STRTO(wcstof);
+#ifdef SANITIZER_NLDBL_VERSION
+ INTERCEPT_STRTO_VER(wcstold, SANITIZER_NLDBL_VERSION);
+#else
INTERCEPT_STRTO(wcstold);
+#endif
INTERCEPT_STRTO(wcstol);
INTERCEPT_STRTO(wcstoul);
INTERCEPT_STRTO(wcstoll);
INTERCEPT_STRTO(wcstoull);
+#if SANITIZER_GLIBC
+ INTERCEPT_STRTO(__isoc23_strtod);
+ INTERCEPT_STRTO(__isoc23_strtof);
+ INTERCEPT_STRTO(__isoc23_strtold);
+ INTERCEPT_STRTO(__isoc23_strtol);
+ INTERCEPT_STRTO(__isoc23_strtoul);
+ INTERCEPT_STRTO(__isoc23_strtoll);
+ INTERCEPT_STRTO(__isoc23_strtoull);
+ INTERCEPT_STRTO(__isoc23_strtouq);
+ INTERCEPT_STRTO(__isoc23_wcstod);
+ INTERCEPT_STRTO(__isoc23_wcstof);
+ INTERCEPT_STRTO(__isoc23_wcstold);
+ INTERCEPT_STRTO(__isoc23_wcstol);
+ INTERCEPT_STRTO(__isoc23_wcstoul);
+ INTERCEPT_STRTO(__isoc23_wcstoll);
+ INTERCEPT_STRTO(__isoc23_wcstoull);
+#endif
+
#ifdef SANITIZER_NLDBL_VERSION
INTERCEPT_FUNCTION_VER(vswprintf, SANITIZER_NLDBL_VERSION);
INTERCEPT_FUNCTION_VER(swprintf, SANITIZER_NLDBL_VERSION);
@@ -1685,8 +1869,11 @@ void InitializeInterceptors() {
INTERCEPT_FUNCTION(gettimeofday);
MSAN_MAYBE_INTERCEPT_FCVT;
MSAN_MAYBE_INTERCEPT_FSTAT;
+ MSAN_MAYBE_INTERCEPT_FSTAT64;
MSAN_MAYBE_INTERCEPT___FXSTAT;
- MSAN_INTERCEPT_FSTATAT;
+ MSAN_MAYBE_INTERCEPT_FSTATAT;
+ MSAN_MAYBE_INTERCEPT_FSTATAT64;
+ MSAN_MAYBE_INTERCEPT___FXSTATAT;
MSAN_MAYBE_INTERCEPT___FXSTAT64;
MSAN_MAYBE_INTERCEPT___FXSTATAT64;
INTERCEPT_FUNCTION(pipe);
@@ -1701,7 +1888,9 @@ void InitializeInterceptors() {
INTERCEPT_FUNCTION(gethostname);
MSAN_MAYBE_INTERCEPT_EPOLL_WAIT;
MSAN_MAYBE_INTERCEPT_EPOLL_PWAIT;
+ INTERCEPT_FUNCTION(strsignal);
INTERCEPT_FUNCTION(dladdr);
+ MSAN_MAYBE_INTERCEPT_DLADDR1;
INTERCEPT_FUNCTION(dlerror);
INTERCEPT_FUNCTION(dl_iterate_phdr);
INTERCEPT_FUNCTION(getrusage);
@@ -1710,7 +1899,12 @@ void InitializeInterceptors() {
#else
INTERCEPT_FUNCTION(pthread_create);
#endif
+ INTERCEPT_FUNCTION(pthread_join);
INTERCEPT_FUNCTION(pthread_key_create);
+#if SANITIZER_GLIBC
+ INTERCEPT_FUNCTION(pthread_tryjoin_np);
+ INTERCEPT_FUNCTION(pthread_timedjoin_np);
+#endif
#if SANITIZER_NETBSD
INTERCEPT_FUNCTION(__libc_thr_keycreate);
@@ -1721,7 +1915,6 @@ void InitializeInterceptors() {
INTERCEPT_FUNCTION(atexit);
INTERCEPT_FUNCTION(__cxa_atexit);
INTERCEPT_FUNCTION(shmat);
- INTERCEPT_FUNCTION(fork);
MSAN_MAYBE_INTERCEPT_OPENPTY;
MSAN_MAYBE_INTERCEPT_FORKPTY;
diff --git a/contrib/llvm-project/compiler-rt/lib/msan/msan_interface_internal.h b/contrib/llvm-project/compiler-rt/lib/msan/msan_interface_internal.h
index 1edacbc7504f..c2eead13c20c 100644
--- a/contrib/llvm-project/compiler-rt/lib/msan/msan_interface_internal.h
+++ b/contrib/llvm-project/compiler-rt/lib/msan/msan_interface_internal.h
@@ -31,7 +31,7 @@ SANITIZER_INTERFACE_ATTRIBUTE
void __msan_warning();
// Print a warning and die.
-// Intrumentation inserts calls to this function when building in "fast" mode
+// Instrumentation inserts calls to this function when building in "fast" mode
// (i.e. -mllvm -msan-keep-going)
SANITIZER_INTERFACE_ATTRIBUTE __attribute__((noreturn))
void __msan_warning_noreturn();
@@ -109,6 +109,11 @@ void __msan_set_alloca_origin(void *a, uptr size, char *descr);
SANITIZER_INTERFACE_ATTRIBUTE
void __msan_set_alloca_origin4(void *a, uptr size, char *descr, uptr pc);
SANITIZER_INTERFACE_ATTRIBUTE
+void __msan_set_alloca_origin_with_descr(void *a, uptr size, u32 *id_ptr,
+ char *descr);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __msan_set_alloca_origin_no_descr(void *a, uptr size, u32 *id_ptr);
+SANITIZER_INTERFACE_ATTRIBUTE
u32 __msan_chain_origin(u32 id);
SANITIZER_INTERFACE_ATTRIBUTE
u32 __msan_get_origin(const void *a);
@@ -157,6 +162,10 @@ void __msan_allocated_memory(const void* data, uptr size);
// uninitialized.
SANITIZER_INTERFACE_ATTRIBUTE
void __sanitizer_dtor_callback(const void* data, uptr size);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __sanitizer_dtor_callback_fields(const void *data, uptr size);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __sanitizer_dtor_callback_vptr(const void *data);
SANITIZER_INTERFACE_ATTRIBUTE
u16 __sanitizer_unaligned_load16(const uu16 *p);
diff --git a/contrib/llvm-project/compiler-rt/lib/msan/msan_linux.cpp b/contrib/llvm-project/compiler-rt/lib/msan/msan_linux.cpp
index d5baee38e710..cd2d9f5c720c 100644
--- a/contrib/llvm-project/compiler-rt/lib/msan/msan_linux.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/msan/msan_linux.cpp
@@ -14,38 +14,45 @@
#include "sanitizer_common/sanitizer_platform.h"
#if SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD
-#include "msan.h"
-#include "msan_report.h"
-#include "msan_thread.h"
-
-#include <elf.h>
-#include <link.h>
-#include <pthread.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <signal.h>
-#include <unistd.h>
-#include <unwind.h>
-#include <sys/time.h>
-#include <sys/resource.h>
-
-#include "sanitizer_common/sanitizer_common.h"
-#include "sanitizer_common/sanitizer_procmaps.h"
+# include <elf.h>
+# include <link.h>
+# include <pthread.h>
+# include <signal.h>
+# include <stdio.h>
+# include <stdlib.h>
+# if SANITIZER_LINUX
+# include <sys/personality.h>
+# endif
+# include <sys/resource.h>
+# include <sys/time.h>
+# include <unistd.h>
+# include <unwind.h>
+
+# include "msan.h"
+# include "msan_allocator.h"
+# include "msan_chained_origin_depot.h"
+# include "msan_report.h"
+# include "msan_thread.h"
+# include "sanitizer_common/sanitizer_common.h"
+# include "sanitizer_common/sanitizer_procmaps.h"
+# include "sanitizer_common/sanitizer_stackdepot.h"
namespace __msan {
void ReportMapRange(const char *descr, uptr beg, uptr size) {
if (size > 0) {
uptr end = beg + size - 1;
- VPrintf(1, "%s : %p - %p\n", descr, beg, end);
+ VPrintf(1, "%s : 0x%zx - 0x%zx\n", descr, beg, end);
}
}
-static bool CheckMemoryRangeAvailability(uptr beg, uptr size) {
+static bool CheckMemoryRangeAvailability(uptr beg, uptr size, bool verbose) {
if (size > 0) {
uptr end = beg + size - 1;
if (!MemoryRangeIsAvailable(beg, end)) {
- Printf("FATAL: Memory range %p - %p is not available.\n", beg, end);
+ if (verbose)
+ Printf("FATAL: Memory range 0x%zx - 0x%zx is not available.\n", beg,
+ end);
return false;
}
}
@@ -65,8 +72,8 @@ static bool ProtectMemoryRange(uptr beg, uptr size, const char *name) {
}
if ((uptr)addr != beg) {
uptr end = beg + size - 1;
- Printf("FATAL: Cannot protect memory range %p - %p (%s).\n", beg, end,
- name);
+ Printf("FATAL: Cannot protect memory range 0x%zx - 0x%zx (%s).\n", beg,
+ end, name);
return false;
}
}
@@ -84,7 +91,7 @@ static void CheckMemoryLayoutSanity() {
CHECK(addr_is_type(start, type));
CHECK(addr_is_type((start + end) / 2, type));
CHECK(addr_is_type(end - 1, type));
- if (type == MappingDesc::APP) {
+ if (type == MappingDesc::APP || type == MappingDesc::ALLOCATOR) {
uptr addr = start;
CHECK(MEM_IS_SHADOW(MEM_TO_SHADOW(addr)));
CHECK(MEM_IS_ORIGIN(MEM_TO_ORIGIN(addr)));
@@ -104,9 +111,9 @@ static void CheckMemoryLayoutSanity() {
}
}
-bool InitShadow(bool init_origins) {
+static bool InitShadow(bool init_origins, bool dry_run) {
// Let user know mapping parameters first.
- VPrintf(1, "__msan_init %p\n", &__msan_init);
+ VPrintf(1, "__msan_init %p\n", reinterpret_cast<void *>(&__msan_init));
for (unsigned i = 0; i < kMemoryLayoutSize; ++i)
VPrintf(1, "%s: %zx - %zx\n", kMemoryLayout[i].name, kMemoryLayout[i].start,
kMemoryLayout[i].end - 1);
@@ -114,8 +121,9 @@ bool InitShadow(bool init_origins) {
CheckMemoryLayoutSanity();
if (!MEM_IS_APP(&__msan_init)) {
- Printf("FATAL: Code %p is out of application range. Non-PIE build?\n",
- (uptr)&__msan_init);
+ if (!dry_run)
+ Printf("FATAL: Code %p is out of application range. Non-PIE build?\n",
+ reinterpret_cast<void *>(&__msan_init));
return false;
}
@@ -136,20 +144,26 @@ bool InitShadow(bool init_origins) {
bool protect = type == MappingDesc::INVALID ||
(!init_origins && type == MappingDesc::ORIGIN);
CHECK(!(map && protect));
- if (!map && !protect)
- CHECK(type == MappingDesc::APP);
+ if (!map && !protect) {
+ CHECK(type == MappingDesc::APP || type == MappingDesc::ALLOCATOR);
+
+ if (dry_run && type == MappingDesc::ALLOCATOR &&
+ !CheckMemoryRangeAvailability(start, size, !dry_run))
+ return false;
+ }
if (map) {
- if (!CheckMemoryRangeAvailability(start, size))
+ if (dry_run && !CheckMemoryRangeAvailability(start, size, !dry_run))
return false;
- if (!MmapFixedSuperNoReserve(start, size, kMemoryLayout[i].name))
+ if (!dry_run &&
+ !MmapFixedSuperNoReserve(start, size, kMemoryLayout[i].name))
return false;
- if (common_flags()->use_madv_dontdump)
+ if (!dry_run && common_flags()->use_madv_dontdump)
DontDumpShadowMemory(start, size);
}
if (protect) {
- if (!CheckMemoryRangeAvailability(start, size))
+ if (dry_run && !CheckMemoryRangeAvailability(start, size, !dry_run))
return false;
- if (!ProtectMemoryRange(start, size, kMemoryLayout[i].name))
+ if (!dry_run && !ProtectMemoryRange(start, size, kMemoryLayout[i].name))
return false;
}
}
@@ -157,6 +171,35 @@ bool InitShadow(bool init_origins) {
return true;
}
+bool InitShadowWithReExec(bool init_origins) {
+ // Start with dry run: check layout is ok, but don't print warnings because
+ // warning messages will cause tests to fail (even if we successfully re-exec
+ // after the warning).
+ bool success = InitShadow(__msan_get_track_origins(), true);
+ if (!success) {
+# if SANITIZER_LINUX
+ // Perhaps ASLR entropy is too high. If ASLR is enabled, re-exec without it.
+ int old_personality = personality(0xffffffff);
+ bool aslr_on =
+ (old_personality != -1) && ((old_personality & ADDR_NO_RANDOMIZE) == 0);
+
+ if (aslr_on) {
+ VReport(1,
+ "WARNING: MemorySanitizer: memory layout is incompatible, "
+ "possibly due to high-entropy ASLR.\n"
+ "Re-execing with fixed virtual address space.\n"
+ "N.B. reducing ASLR entropy is preferable.\n");
+ CHECK_NE(personality(old_personality | ADDR_NO_RANDOMIZE), -1);
+ ReExec();
+ }
+# endif
+ }
+
+ // The earlier dry run didn't actually map or protect anything. Run again in
+ // non-dry run mode.
+ return success && InitShadow(__msan_get_track_origins(), false);
+}
+
static void MsanAtExit(void) {
if (flags()->print_stats && (flags()->atexit || msan_report_count > 0))
ReportStats();
@@ -254,7 +297,27 @@ void MsanTSDDtor(void *tsd) {
atomic_signal_fence(memory_order_seq_cst);
MsanThread::TSDDtor(tsd);
}
-#endif
+# endif
+
+static void BeforeFork() {
+ // Usually we lock ThreadRegistry, but msan does not have one.
+ LockAllocator();
+ StackDepotLockBeforeFork();
+ ChainedOriginDepotBeforeFork();
+}
+
+static void AfterFork(bool fork_child) {
+ ChainedOriginDepotAfterFork(fork_child);
+ StackDepotUnlockAfterFork(fork_child);
+ UnlockAllocator();
+ // Usually we unlock ThreadRegistry, but msan does not have one.
+}
+
+void InstallAtForkHandler() {
+ pthread_atfork(
+ &BeforeFork, []() { AfterFork(/* fork_child= */ false); },
+ []() { AfterFork(/* fork_child= */ true); });
+}
} // namespace __msan
diff --git a/contrib/llvm-project/compiler-rt/lib/msan/msan_new_delete.cpp b/contrib/llvm-project/compiler-rt/lib/msan/msan_new_delete.cpp
index d4e95c0f6513..7daa55474b7d 100644
--- a/contrib/llvm-project/compiler-rt/lib/msan/msan_new_delete.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/msan/msan_new_delete.cpp
@@ -30,16 +30,22 @@ namespace std {
// TODO(alekseys): throw std::bad_alloc instead of dying on OOM.
-#define OPERATOR_NEW_BODY(nothrow) \
- GET_MALLOC_STACK_TRACE; \
- void *res = msan_malloc(size, &stack);\
- if (!nothrow && UNLIKELY(!res)) ReportOutOfMemory(size, &stack);\
- return res
-#define OPERATOR_NEW_BODY_ALIGN(nothrow) \
- GET_MALLOC_STACK_TRACE;\
- void *res = msan_memalign((uptr)align, size, &stack);\
- if (!nothrow && UNLIKELY(!res)) ReportOutOfMemory(size, &stack);\
- return res;
+# define OPERATOR_NEW_BODY(nothrow) \
+ GET_MALLOC_STACK_TRACE; \
+ void *res = msan_malloc(size, &stack); \
+ if (!nothrow && UNLIKELY(!res)) { \
+ GET_FATAL_STACK_TRACE_IF_EMPTY(&stack); \
+ ReportOutOfMemory(size, &stack); \
+ } \
+ return res
+# define OPERATOR_NEW_BODY_ALIGN(nothrow) \
+ GET_MALLOC_STACK_TRACE; \
+ void *res = msan_memalign((uptr)align, size, &stack); \
+ if (!nothrow && UNLIKELY(!res)) { \
+ GET_FATAL_STACK_TRACE_IF_EMPTY(&stack); \
+ ReportOutOfMemory(size, &stack); \
+ } \
+ return res;
INTERCEPTOR_ATTRIBUTE
void *operator new(size_t size) { OPERATOR_NEW_BODY(false /*nothrow*/); }
diff --git a/contrib/llvm-project/compiler-rt/lib/msan/msan_poisoning.cpp b/contrib/llvm-project/compiler-rt/lib/msan/msan_poisoning.cpp
index 15892392f74a..1889e980bfc0 100644
--- a/contrib/llvm-project/compiler-rt/lib/msan/msan_poisoning.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/msan/msan_poisoning.cpp
@@ -14,6 +14,7 @@
#include "interception/interception.h"
#include "msan_origin.h"
+#include "msan_thread.h"
#include "sanitizer_common/sanitizer_common.h"
DECLARE_REAL(void *, memset, void *dest, int c, uptr n)
@@ -215,6 +216,13 @@ void SetShadow(const void *ptr, uptr size, u8 value) {
}
if (!MmapFixedSuperNoReserve(page_beg, page_end - page_beg))
Die();
+
+ if (__msan_get_track_origins()) {
+ // No need to set origin for zero shadow, but we can release pages.
+ uptr origin_beg = RoundUpTo(MEM_TO_ORIGIN(ptr), PageSize);
+ if (!MmapFixedSuperNoReserve(origin_beg, page_end - page_beg))
+ Die();
+ }
}
}
}
@@ -241,6 +249,9 @@ void PoisonMemory(const void *dst, uptr size, StackTrace *stack) {
SetShadow(dst, size, (u8)-1);
if (__msan_get_track_origins()) {
+ MsanThread *t = GetCurrentThread();
+ if (t && t->InSignalHandler())
+ return;
Origin o = Origin::CreateHeapOrigin(stack);
SetOrigin(dst, size, o.raw_id());
}
diff --git a/contrib/llvm-project/compiler-rt/lib/msan/msan_report.cpp b/contrib/llvm-project/compiler-rt/lib/msan/msan_report.cpp
index e10d9eb62231..99bf81f66dc9 100644
--- a/contrib/llvm-project/compiler-rt/lib/msan/msan_report.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/msan/msan_report.cpp
@@ -11,16 +11,18 @@
// Error reporting.
//===----------------------------------------------------------------------===//
+#include "msan_report.h"
+
#include "msan.h"
#include "msan_chained_origin_depot.h"
#include "msan_origin.h"
-#include "msan_report.h"
#include "sanitizer_common/sanitizer_allocator_internal.h"
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_flags.h"
#include "sanitizer_common/sanitizer_mutex.h"
#include "sanitizer_common/sanitizer_report_decorator.h"
#include "sanitizer_common/sanitizer_stackdepot.h"
+#include "sanitizer_common/sanitizer_stacktrace_printer.h"
#include "sanitizer_common/sanitizer_symbolizer.h"
using namespace __sanitizer;
@@ -36,24 +38,19 @@ class Decorator: public __sanitizer::SanitizerCommonDecorator {
static void DescribeStackOrigin(const char *so, uptr pc) {
Decorator d;
- char *s = internal_strdup(so);
- char *sep = internal_strchr(s, '@');
- CHECK(sep);
- *sep = '\0';
Printf("%s", d.Origin());
- Printf(
- " %sUninitialized value was created by an allocation of '%s%s%s'"
- " in the stack frame of function '%s%s%s'%s\n",
- d.Origin(), d.Name(), s, d.Origin(), d.Name(), sep + 1, d.Origin(),
- d.Default());
- InternalFree(s);
+ if (so) {
+ Printf(
+ " %sUninitialized value was created by an allocation of '%s%s%s'"
+ " in the stack frame%s\n",
+ d.Origin(), d.Name(), so, d.Origin(), d.Default());
+ } else {
+ Printf(" %sUninitialized value was created in the stack frame%s\n",
+ d.Origin(), d.Default());
+ }
- if (pc) {
- // For some reason function address in LLVM IR is 1 less then the address
- // of the first instruction.
- pc = StackTrace::GetNextInstructionPc(pc);
+ if (pc)
StackTrace(&pc, 1).Print();
- }
}
static void DescribeOrigin(u32 id) {
@@ -86,6 +83,13 @@ static void DescribeOrigin(u32 id) {
Printf(" %sMemory was marked as uninitialized%s\n", d.Origin(),
d.Default());
break;
+ case STACK_TRACE_TAG_FIELDS:
+ Printf(" %sMember fields were destroyed%s\n", d.Origin(), d.Default());
+ break;
+ case STACK_TRACE_TAG_VPTR:
+ Printf(" %sVirtual table ptr was destroyed%s\n", d.Origin(),
+ d.Default());
+ break;
default:
Printf(" %sUninitialized value was created%s\n", d.Origin(),
d.Default());
@@ -122,17 +126,17 @@ void ReportStats() {
ScopedErrorReportLock l;
if (__msan_get_track_origins() > 0) {
- StackDepotStats *stack_depot_stats = StackDepotGetStats();
+ StackDepotStats stack_depot_stats = StackDepotGetStats();
// FIXME: we want this at normal exit, too!
// FIXME: but only with verbosity=1 or something
- Printf("Unique heap origins: %zu\n", stack_depot_stats->n_uniq_ids);
- Printf("Stack depot allocated bytes: %zu\n", stack_depot_stats->allocated);
+ Printf("Unique heap origins: %zu\n", stack_depot_stats.n_uniq_ids);
+ Printf("Stack depot allocated bytes: %zu\n", stack_depot_stats.allocated);
- StackDepotStats *chained_origin_depot_stats = ChainedOriginDepotGetStats();
+ StackDepotStats chained_origin_depot_stats = ChainedOriginDepotGetStats();
Printf("Unique origin histories: %zu\n",
- chained_origin_depot_stats->n_uniq_ids);
+ chained_origin_depot_stats.n_uniq_ids);
Printf("History depot allocated bytes: %zu\n",
- chained_origin_depot_stats->allocated);
+ chained_origin_depot_stats.allocated);
}
}
@@ -201,13 +205,18 @@ void DescribeMemoryRange(const void *x, uptr size) {
Decorator d;
Printf("%s", d.Warning());
- Printf("Shadow map of [%p, %p), %zu bytes:\n", start, end, end - start);
+ uptr start_x = reinterpret_cast<uptr>(x);
+ Printf("Shadow map [%p, %p) of [%p, %p), %zu bytes:\n",
+ reinterpret_cast<void *>(start), reinterpret_cast<void *>(end),
+ reinterpret_cast<void *>(start_x),
+ reinterpret_cast<void *>(start_x + end - start), end - start);
Printf("%s", d.Default());
while (s < e) {
// Line start.
if (pos % 16 == 0) {
for (int i = 0; i < 4; ++i) origin_ids[i] = -1;
- Printf("%p:", s);
+ Printf("%p[%p]:", reinterpret_cast<void *>(s),
+ reinterpret_cast<void *>(start_x - start + s));
}
// Group start.
if (pos % 4 == 0) {
@@ -258,12 +267,13 @@ void DescribeMemoryRange(const void *x, uptr size) {
}
}
-void ReportUMRInsideAddressRange(const char *what, const void *start, uptr size,
- uptr offset) {
+void ReportUMRInsideAddressRange(const char *function, const void *start,
+ uptr size, uptr offset) {
+ function = StackTracePrinter::GetOrInit()->StripFunctionName(function);
Decorator d;
Printf("%s", d.Warning());
Printf("%sUninitialized bytes in %s%s%s at offset %zu inside [%p, %zu)%s\n",
- d.Warning(), d.Name(), what, d.Warning(), offset, start, size,
+ d.Warning(), d.Name(), function, d.Warning(), offset, start, size,
d.Default());
if (__sanitizer::Verbosity())
DescribeMemoryRange(start, size);
diff --git a/contrib/llvm-project/compiler-rt/lib/msan/msan_report.h b/contrib/llvm-project/compiler-rt/lib/msan/msan_report.h
index 0965b8cb6813..2ad81c37cf60 100644
--- a/contrib/llvm-project/compiler-rt/lib/msan/msan_report.h
+++ b/contrib/llvm-project/compiler-rt/lib/msan/msan_report.h
@@ -25,8 +25,8 @@ void ReportExpectedUMRNotFound(StackTrace *stack);
void ReportStats();
void ReportAtExitStatistics();
void DescribeMemoryRange(const void *x, uptr size);
-void ReportUMRInsideAddressRange(const char *what, const void *start, uptr size,
- uptr offset);
+void ReportUMRInsideAddressRange(const char *function, const void *start,
+ uptr size, uptr offset);
} // namespace __msan
diff --git a/contrib/llvm-project/compiler-rt/lib/msan/msan_thread.cpp b/contrib/llvm-project/compiler-rt/lib/msan/msan_thread.cpp
index 6ae012acd9a2..ff9b90bb81f0 100644
--- a/contrib/llvm-project/compiler-rt/lib/msan/msan_thread.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/msan/msan_thread.cpp
@@ -47,6 +47,7 @@ void MsanThread::Init() {
CHECK(MEM_IS_APP(stack_.bottom));
CHECK(MEM_IS_APP(stack_.top - 1));
ClearShadowForThreadStackAndTLS();
+ malloc_storage().Init();
}
void MsanThread::TSDDtor(void *tsd) {
@@ -66,8 +67,6 @@ void MsanThread::Destroy() {
}
thread_return_t MsanThread::ThreadStart() {
- Init();
-
if (!start_routine_) {
// start_routine_ == 0 if we're on the main thread or on one of the
// OS X libdispatch worker threads. But nobody is supposed to call
diff --git a/contrib/llvm-project/compiler-rt/lib/msan/msan_thread.h b/contrib/llvm-project/compiler-rt/lib/msan/msan_thread.h
index fe795e3a547a..f6ed1534cccd 100644
--- a/contrib/llvm-project/compiler-rt/lib/msan/msan_thread.h
+++ b/contrib/llvm-project/compiler-rt/lib/msan/msan_thread.h
@@ -15,7 +15,7 @@
#include "msan_allocator.h"
#include "sanitizer_common/sanitizer_common.h"
-
+#include "sanitizer_common/sanitizer_posix.h"
namespace __msan {
class MsanThread {
@@ -45,6 +45,7 @@ class MsanThread {
MsanThreadLocalMallocStorage &malloc_storage() { return malloc_storage_; }
int destructor_iterations_;
+ __sanitizer_sigset_t starting_sigset_;
private:
// NOTE: There is no MsanThread constructor. It is allocated
diff --git a/contrib/llvm-project/compiler-rt/lib/orc/adt.h b/contrib/llvm-project/compiler-rt/lib/orc/adt.h
index 33b731082f88..8884cc8812be 100644
--- a/contrib/llvm-project/compiler-rt/lib/orc/adt.h
+++ b/contrib/llvm-project/compiler-rt/lib/orc/adt.h
@@ -15,6 +15,7 @@
#include <cstring>
#include <limits>
+#include <ostream>
#include <string>
namespace __orc_rt {
@@ -57,57 +58,6 @@ private:
size_type Size = 0;
};
-/// A substitue for std::string_view (and llvm::StringRef).
-/// FIXME: Remove in favor of std::string_view once we have c++17.
-class string_view {
-public:
- typedef char value_type;
- typedef char *pointer;
- typedef const char *const_pointer;
- typedef char &reference;
- typedef const char &const_reference;
- typedef std::size_t size_type;
- typedef std::ptrdiff_t difference_type;
-
- typedef const_pointer const_iterator;
- typedef const_iterator iterator;
-
- constexpr string_view() noexcept = default;
- constexpr string_view(const char *S, size_type Count)
- : Data(S), Size(Count) {}
- string_view(const char *S) : Data(S), Size(strlen(S)) {}
-
- constexpr const_iterator begin() const noexcept { return Data; }
- constexpr const_iterator end() const noexcept { return Data + Size; }
- constexpr const_pointer data() const noexcept { return Data; }
- constexpr const_reference operator[](size_type idx) { return Data[idx]; }
- constexpr size_type size() const noexcept { return Size; }
- constexpr bool empty() const noexcept { return Size == 0; }
-
- friend bool operator==(const string_view &LHS, const string_view &RHS) {
- if (LHS.Size != RHS.Size)
- return false;
- if (LHS.Data == RHS.Data)
- return true;
- for (size_t I = 0; I != LHS.Size; ++I)
- if (LHS.Data[I] != RHS.Data[I])
- return false;
- return true;
- }
-
- friend bool operator!=(const string_view &LHS, const string_view &RHS) {
- return !(LHS == RHS);
- }
-
-private:
- const char *Data = nullptr;
- size_type Size = 0;
-};
-
-inline std::string to_string(string_view SV) {
- return std::string(SV.data(), SV.size());
-}
-
} // end namespace __orc_rt
-#endif // ORC_RT_COMMON_H
+#endif // ORC_RT_ADT_H
diff --git a/contrib/llvm-project/compiler-rt/lib/orc/bitmask_enum.h b/contrib/llvm-project/compiler-rt/lib/orc/bitmask_enum.h
new file mode 100644
index 000000000000..b9fb776bdf23
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/orc/bitmask_enum.h
@@ -0,0 +1,151 @@
+//===---- bitmask_enum.h - Enable bitmask operations on enums ---*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of the ORC runtime support library.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef ORC_RT_BITMASK_ENUM_H
+#define ORC_RT_BITMASK_ENUM_H
+
+#include "stl_extras.h"
+
+#include <cassert>
+#include <type_traits>
+
+namespace __orc_rt {
+
+/// ORC_RT_MARK_AS_BITMASK_ENUM lets you opt in an individual enum type so you
+/// can perform bitwise operations on it without putting static_cast everywhere.
+///
+/// \code
+/// enum MyEnum {
+/// E1 = 1, E2 = 2, E3 = 4, E4 = 8,
+/// ORC_RT_MARK_AS_BITMASK_ENUM(/* LargestValue = */ E4)
+/// };
+///
+/// void Foo() {
+/// MyEnum A = (E1 | E2) & E3 ^ ~E4; // Look, ma: No static_cast!
+/// }
+/// \endcode
+///
+/// Normally when you do a bitwise operation on an enum value, you get back an
+/// instance of the underlying type (e.g. int). But using this macro, bitwise
+/// ops on your enum will return you back instances of the enum. This is
+/// particularly useful for enums which represent a combination of flags.
+///
+/// The parameter to ORC_RT_MARK_AS_BITMASK_ENUM should be the largest
+/// individual value in your enum.
+///
+/// All of the enum's values must be non-negative.
+#define ORC_RT_MARK_AS_BITMASK_ENUM(LargestValue) \
+ ORC_RT_BITMASK_LARGEST_ENUMERATOR = LargestValue
+
+/// ORC_RT_DECLARE_ENUM_AS_BITMASK can be used to declare an enum type as a bit
+/// set, so that bitwise operation on such enum does not require static_cast.
+///
+/// \code
+/// enum MyEnum { E1 = 1, E2 = 2, E3 = 4, E4 = 8 };
+/// ORC_RT_DECLARE_ENUM_AS_BITMASK(MyEnum, E4);
+///
+/// void Foo() {
+/// MyEnum A = (E1 | E2) & E3 ^ ~E4; // No static_cast
+/// }
+/// \endcode
+///
+/// The second parameter to ORC_RT_DECLARE_ENUM_AS_BITMASK specifies the largest
+/// bit value of the enum type.
+///
+/// ORC_RT_DECLARE_ENUM_AS_BITMASK should be used in __orc_rt namespace.
+///
+/// This a non-intrusive alternative for ORC_RT_MARK_AS_BITMASK_ENUM. It allows
+/// declaring more than one non-scoped enumerations as bitmask types in the same
+/// scope. Otherwise it provides the same functionality as
+/// ORC_RT_MARK_AS_BITMASK_ENUM.
+#define ORC_RT_DECLARE_ENUM_AS_BITMASK(Enum, LargestValue) \
+ template <> struct is_bitmask_enum<Enum> : std::true_type {}; \
+ template <> struct largest_bitmask_enum_bit<Enum> { \
+ static constexpr std::underlying_type_t<Enum> value = LargestValue; \
+ }
+
+/// Traits class to determine whether an enum has been declared as a bitwise
+/// enum via ORC_RT_DECLARE_ENUM_AS_BITMASK.
+template <typename E, typename Enable = void>
+struct is_bitmask_enum : std::false_type {};
+
+template <typename E>
+struct is_bitmask_enum<
+ E, std::enable_if_t<sizeof(E::ORC_RT_BITMASK_LARGEST_ENUMERATOR) >= 0>>
+ : std::true_type {};
+
+template <typename E>
+inline constexpr bool is_bitmask_enum_v = is_bitmask_enum<E>::value;
+
+/// Traits class to deermine bitmask enum largest bit.
+template <typename E, typename Enable = void> struct largest_bitmask_enum_bit;
+
+template <typename E>
+struct largest_bitmask_enum_bit<
+ E, std::enable_if_t<sizeof(E::ORC_RT_BITMASK_LARGEST_ENUMERATOR) >= 0>> {
+ using UnderlyingTy = std::underlying_type_t<E>;
+ static constexpr UnderlyingTy value =
+ static_cast<UnderlyingTy>(E::ORC_RT_BITMASK_LARGEST_ENUMERATOR);
+};
+
+template <typename E> constexpr std::underlying_type_t<E> Mask() {
+ return bit_ceil(largest_bitmask_enum_bit<E>::value) - 1;
+}
+
+template <typename E> constexpr std::underlying_type_t<E> Underlying(E Val) {
+ auto U = static_cast<std::underlying_type_t<E>>(Val);
+ assert(U >= 0 && "Negative enum values are not allowed");
+ assert(U <= Mask<E>() && "Enum value too large (or langest val too small");
+ return U;
+}
+
+template <typename E, typename = std::enable_if_t<is_bitmask_enum_v<E>>>
+constexpr E operator~(E Val) {
+ return static_cast<E>(~Underlying(Val) & Mask<E>());
+}
+
+template <typename E, typename = std::enable_if_t<is_bitmask_enum_v<E>>>
+constexpr E operator|(E LHS, E RHS) {
+ return static_cast<E>(Underlying(LHS) | Underlying(RHS));
+}
+
+template <typename E, typename = std::enable_if_t<is_bitmask_enum_v<E>>>
+constexpr E operator&(E LHS, E RHS) {
+ return static_cast<E>(Underlying(LHS) & Underlying(RHS));
+}
+
+template <typename E, typename = std::enable_if_t<is_bitmask_enum_v<E>>>
+constexpr E operator^(E LHS, E RHS) {
+ return static_cast<E>(Underlying(LHS) ^ Underlying(RHS));
+}
+
+template <typename E, typename = std::enable_if_t<is_bitmask_enum_v<E>>>
+E &operator|=(E &LHS, E RHS) {
+ LHS = LHS | RHS;
+ return LHS;
+}
+
+template <typename E, typename = std::enable_if_t<is_bitmask_enum_v<E>>>
+E &operator&=(E &LHS, E RHS) {
+ LHS = LHS & RHS;
+ return LHS;
+}
+
+template <typename E, typename = std::enable_if_t<is_bitmask_enum_v<E>>>
+E &operator^=(E &LHS, E RHS) {
+ LHS = LHS ^ RHS;
+ return LHS;
+}
+
+} // end namespace __orc_rt
+
+#endif // ORC_RT_BITMASK_ENUM_H
diff --git a/contrib/llvm-project/compiler-rt/lib/orc/c_api.h b/contrib/llvm-project/compiler-rt/lib/orc/c_api.h
deleted file mode 100644
index 6677da06ede5..000000000000
--- a/contrib/llvm-project/compiler-rt/lib/orc/c_api.h
+++ /dev/null
@@ -1,208 +0,0 @@
-/*===- c_api.h - C API for the ORC runtime ------------------------*- C -*-===*\
-|* *|
-|* Part of the LLVM Project, under the Apache License v2.0 with LLVM *|
-|* Exceptions. *|
-|* See https://llvm.org/LICENSE.txt for license information. *|
-|* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception *|
-|* *|
-|*===----------------------------------------------------------------------===*|
-|* *|
-|* This file defines the C API for the ORC runtime *|
-|* *|
-|* NOTE: The OrtRTWrapperFunctionResult type must be kept in sync with the *|
-|* definition in llvm/include/llvm-c/OrcShared.h. *|
-|* *|
-\*===----------------------------------------------------------------------===*/
-
-#ifndef ORC_RT_C_API_H
-#define ORC_RT_C_API_H
-
-#include <assert.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-
-/* Helper to suppress strict prototype warnings. */
-#ifdef __clang__
-#define ORC_RT_C_STRICT_PROTOTYPES_BEGIN \
- _Pragma("clang diagnostic push") \
- _Pragma("clang diagnostic error \"-Wstrict-prototypes\"")
-#define ORC_RT_C_STRICT_PROTOTYPES_END _Pragma("clang diagnostic pop")
-#else
-#define ORC_RT_C_STRICT_PROTOTYPES_BEGIN
-#define ORC_RT_C_STRICT_PROTOTYPES_END
-#endif
-
-/* Helper to wrap C code for C++ */
-#ifdef __cplusplus
-#define ORC_RT_C_EXTERN_C_BEGIN \
- extern "C" { \
- ORC_RT_C_STRICT_PROTOTYPES_BEGIN
-#define ORC_RT_C_EXTERN_C_END \
- ORC_RT_C_STRICT_PROTOTYPES_END \
- }
-#else
-#define ORC_RT_C_EXTERN_C_BEGIN ORC_RT_C_STRICT_PROTOTYPES_BEGIN
-#define ORC_RT_C_EXTERN_C_END ORC_RT_C_STRICT_PROTOTYPES_END
-#endif
-
-ORC_RT_C_EXTERN_C_BEGIN
-
-typedef union {
- char *ValuePtr;
- char Value[sizeof(ValuePtr)];
-} __orc_rt_CWrapperFunctionResultDataUnion;
-
-/**
- * __orc_rt_CWrapperFunctionResult is a kind of C-SmallVector with an
- * out-of-band error state.
- *
- * If Size == 0 and Data.ValuePtr is non-zero then the value is in the
- * 'out-of-band error' state, and Data.ValuePtr points at a malloc-allocated,
- * null-terminated string error message.
- *
- * If Size <= sizeof(__orc_rt_CWrapperFunctionResultData) then the value is in
- * the 'small' state and the content is held in the first Size bytes of
- * Data.Value.
- *
- * If Size > sizeof(OrtRTCWrapperFunctionResultData) then the value is in the
- * 'large' state and the content is held in the first Size bytes of the
- * memory pointed to by Data.ValuePtr. This memory must have been allocated by
- * malloc, and will be freed with free when this value is destroyed.
- */
-typedef struct {
- __orc_rt_CWrapperFunctionResultDataUnion Data;
- size_t Size;
-} __orc_rt_CWrapperFunctionResult;
-
-typedef struct __orc_rt_CSharedOpaqueJITProcessControl
- *__orc_rt_SharedJITProcessControlRef;
-
-/**
- * Zero-initialize an __orc_rt_CWrapperFunctionResult.
- */
-static inline void
-__orc_rt_CWrapperFunctionResultInit(__orc_rt_CWrapperFunctionResult *R) {
- R->Size = 0;
- R->Data.ValuePtr = 0;
-}
-
-/**
- * Create an __orc_rt_CWrapperFunctionResult with an uninitialized buffer of
- * size Size. The buffer is returned via the DataPtr argument.
- */
-static inline char *
-__orc_rt_CWrapperFunctionResultAllocate(__orc_rt_CWrapperFunctionResult *R,
- size_t Size) {
- R->Size = Size;
- if (Size <= sizeof(R->Data.Value))
- return R->Data.Value;
-
- R->Data.ValuePtr = (char *)malloc(Size);
- return R->Data.ValuePtr;
-}
-
-/**
- * Create an __orc_rt_WrapperFunctionResult from the given data range.
- */
-static inline __orc_rt_CWrapperFunctionResult
-__orc_rt_CreateCWrapperFunctionResultFromRange(const char *Data, size_t Size) {
- __orc_rt_CWrapperFunctionResult R;
- R.Size = Size;
- if (R.Size > sizeof(R.Data.Value)) {
- char *Tmp = (char *)malloc(Size);
- memcpy(Tmp, Data, Size);
- R.Data.ValuePtr = Tmp;
- } else
- memcpy(R.Data.Value, Data, Size);
- return R;
-}
-
-/**
- * Create an __orc_rt_CWrapperFunctionResult by copying the given string,
- * including the null-terminator.
- *
- * This function copies the input string. The client is responsible for freeing
- * the ErrMsg arg.
- */
-static inline __orc_rt_CWrapperFunctionResult
-__orc_rt_CreateCWrapperFunctionResultFromString(const char *Source) {
- return __orc_rt_CreateCWrapperFunctionResultFromRange(Source,
- strlen(Source) + 1);
-}
-
-/**
- * Create an __orc_rt_CWrapperFunctionResult representing an out-of-band
- * error.
- *
- * This function takes ownership of the string argument which must have been
- * allocated with malloc.
- */
-static inline __orc_rt_CWrapperFunctionResult
-__orc_rt_CreateCWrapperFunctionResultFromOutOfBandError(const char *ErrMsg) {
- __orc_rt_CWrapperFunctionResult R;
- R.Size = 0;
- char *Tmp = (char *)malloc(strlen(ErrMsg) + 1);
- strcpy(Tmp, ErrMsg);
- R.Data.ValuePtr = Tmp;
- return R;
-}
-
-/**
- * This should be called to destroy __orc_rt_CWrapperFunctionResult values
- * regardless of their state.
- */
-static inline void
-__orc_rt_DisposeCWrapperFunctionResult(__orc_rt_CWrapperFunctionResult *R) {
- if (R->Size > sizeof(R->Data.Value) ||
- (R->Size == 0 && R->Data.ValuePtr))
- free(R->Data.ValuePtr);
-}
-
-/**
- * Get a pointer to the data contained in the given
- * __orc_rt_CWrapperFunctionResult.
- */
-static inline const char *
-__orc_rt_CWrapperFunctionResultData(const __orc_rt_CWrapperFunctionResult *R) {
- assert((R->Size != 0 || R->Data.ValuePtr == nullptr) &&
- "Cannot get data for out-of-band error value");
- return R->Size > sizeof(R->Data.Value) ? R->Data.ValuePtr : R->Data.Value;
-}
-
-/**
- * Safely get the size of the given __orc_rt_CWrapperFunctionResult.
- *
- * Asserts that we're not trying to access the size of an error value.
- */
-static inline size_t
-__orc_rt_CWrapperFunctionResultSize(const __orc_rt_CWrapperFunctionResult *R) {
- assert((R->Size != 0 || R->Data.ValuePtr == nullptr) &&
- "Cannot get size for out-of-band error value");
- return R->Size;
-}
-
-/**
- * Returns 1 if this value is equivalent to a value just initialized by
- * __orc_rt_CWrapperFunctionResultInit, 0 otherwise.
- */
-static inline size_t
-__orc_rt_CWrapperFunctionResultEmpty(const __orc_rt_CWrapperFunctionResult *R) {
- return R->Size == 0 && R->Data.ValuePtr == 0;
-}
-
-/**
- * Returns a pointer to the out-of-band error string for this
- * __orc_rt_CWrapperFunctionResult, or null if there is no error.
- *
- * The __orc_rt_CWrapperFunctionResult retains ownership of the error
- * string, so it should be copied if the caller wishes to preserve it.
- */
-static inline const char *__orc_rt_CWrapperFunctionResultGetOutOfBandError(
- const __orc_rt_CWrapperFunctionResult *R) {
- return R->Size == 0 ? R->Data.ValuePtr : 0;
-}
-
-ORC_RT_C_EXTERN_C_END
-
-#endif /* ORC_RT_C_API_H */
diff --git a/contrib/llvm-project/compiler-rt/lib/orc/coff_platform.cpp b/contrib/llvm-project/compiler-rt/lib/orc/coff_platform.cpp
new file mode 100644
index 000000000000..9fe5c0b06289
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/orc/coff_platform.cpp
@@ -0,0 +1,775 @@
+//===- coff_platform.cpp --------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains code required to load the rest of the COFF runtime.
+//
+//===----------------------------------------------------------------------===//
+
+#define NOMINMAX
+#include <windows.h>
+
+#include "coff_platform.h"
+
+#include "debug.h"
+#include "error.h"
+#include "wrapper_function_utils.h"
+
+#include <array>
+#include <list>
+#include <map>
+#include <mutex>
+#include <sstream>
+#include <string_view>
+#include <vector>
+
+#define DEBUG_TYPE "coff_platform"
+
+using namespace __orc_rt;
+
+namespace __orc_rt {
+
+using COFFJITDylibDepInfo = std::vector<ExecutorAddr>;
+using COFFJITDylibDepInfoMap =
+ std::unordered_map<ExecutorAddr, COFFJITDylibDepInfo>;
+
+using SPSCOFFObjectSectionsMap =
+ SPSSequence<SPSTuple<SPSString, SPSExecutorAddrRange>>;
+
+using SPSCOFFJITDylibDepInfo = SPSSequence<SPSExecutorAddr>;
+
+using SPSCOFFJITDylibDepInfoMap =
+ SPSSequence<SPSTuple<SPSExecutorAddr, SPSCOFFJITDylibDepInfo>>;
+
+} // namespace __orc_rt
+
+ORC_RT_JIT_DISPATCH_TAG(__orc_rt_coff_symbol_lookup_tag)
+ORC_RT_JIT_DISPATCH_TAG(__orc_rt_coff_push_initializers_tag)
+
+namespace {
+class COFFPlatformRuntimeState {
+private:
+ // Ctor/dtor section.
+ // Manage lists of *tor functions sorted by the last character of subsection
+ // name.
+ struct XtorSection {
+ void Register(char SubsectionChar, span<void (*)(void)> Xtors) {
+ Subsections[SubsectionChar - 'A'].push_back(Xtors);
+ SubsectionsNew[SubsectionChar - 'A'].push_back(Xtors);
+ }
+
+ void RegisterNoRun(char SubsectionChar, span<void (*)(void)> Xtors) {
+ Subsections[SubsectionChar - 'A'].push_back(Xtors);
+ }
+
+ void Reset() { SubsectionsNew = Subsections; }
+
+ void RunAllNewAndFlush();
+
+ private:
+ std::array<std::vector<span<void (*)(void)>>, 26> Subsections;
+ std::array<std::vector<span<void (*)(void)>>, 26> SubsectionsNew;
+ };
+
+ struct JITDylibState {
+ std::string Name;
+ void *Header = nullptr;
+ size_t LinkedAgainstRefCount = 0;
+ size_t DlRefCount = 0;
+ std::vector<JITDylibState *> Deps;
+ std::vector<void (*)(void)> AtExits;
+ XtorSection CInitSection; // XIA~XIZ
+ XtorSection CXXInitSection; // XCA~XCZ
+ XtorSection CPreTermSection; // XPA~XPZ
+ XtorSection CTermSection; // XTA~XTZ
+
+ bool referenced() const {
+ return LinkedAgainstRefCount != 0 || DlRefCount != 0;
+ }
+ };
+
+public:
+ static void initialize();
+ static COFFPlatformRuntimeState &get();
+ static bool isInitialized() { return CPS; }
+ static void destroy();
+
+ COFFPlatformRuntimeState() = default;
+
+ // Delete copy and move constructors.
+ COFFPlatformRuntimeState(const COFFPlatformRuntimeState &) = delete;
+ COFFPlatformRuntimeState &
+ operator=(const COFFPlatformRuntimeState &) = delete;
+ COFFPlatformRuntimeState(COFFPlatformRuntimeState &&) = delete;
+ COFFPlatformRuntimeState &operator=(COFFPlatformRuntimeState &&) = delete;
+
+ const char *dlerror();
+ void *dlopen(std::string_view Name, int Mode);
+ int dlclose(void *Header);
+ void *dlsym(void *Header, std::string_view Symbol);
+
+ Error registerJITDylib(std::string Name, void *Header);
+ Error deregisterJITDylib(void *Header);
+
+ Error registerAtExit(ExecutorAddr HeaderAddr, void (*AtExit)(void));
+
+ Error registerObjectSections(
+ ExecutorAddr HeaderAddr,
+ std::vector<std::pair<std::string_view, ExecutorAddrRange>> Secs,
+ bool RunInitializers);
+ Error deregisterObjectSections(
+ ExecutorAddr HeaderAddr,
+ std::vector<std::pair<std::string_view, ExecutorAddrRange>> Secs);
+
+ void *findJITDylibBaseByPC(uint64_t PC);
+
+private:
+ Error registerBlockRange(ExecutorAddr HeaderAddr, ExecutorAddrRange Range);
+ Error deregisterBlockRange(ExecutorAddr HeaderAddr, ExecutorAddrRange Range);
+
+ Error registerSEHFrames(ExecutorAddr HeaderAddr,
+ ExecutorAddrRange SEHFrameRange);
+ Error deregisterSEHFrames(ExecutorAddr HeaderAddr,
+ ExecutorAddrRange SEHFrameRange);
+
+ Expected<void *> dlopenImpl(std::string_view Path, int Mode);
+ Error dlopenFull(JITDylibState &JDS);
+ Error dlopenInitialize(JITDylibState &JDS, COFFJITDylibDepInfoMap &DepInfo);
+
+ Error dlcloseImpl(void *DSOHandle);
+ Error dlcloseDeinitialize(JITDylibState &JDS);
+
+ JITDylibState *getJITDylibStateByHeader(void *DSOHandle);
+ JITDylibState *getJITDylibStateByName(std::string_view Path);
+ Expected<ExecutorAddr> lookupSymbolInJITDylib(void *DSOHandle,
+ std::string_view Symbol);
+
+ static COFFPlatformRuntimeState *CPS;
+
+ std::recursive_mutex JDStatesMutex;
+ std::map<void *, JITDylibState> JDStates;
+ struct BlockRange {
+ void *Header;
+ size_t Size;
+ };
+ std::map<void *, BlockRange> BlockRanges;
+ std::unordered_map<std::string_view, void *> JDNameToHeader;
+ std::string DLFcnError;
+};
+
+} // namespace
+
+COFFPlatformRuntimeState *COFFPlatformRuntimeState::CPS = nullptr;
+
+COFFPlatformRuntimeState::JITDylibState *
+COFFPlatformRuntimeState::getJITDylibStateByHeader(void *Header) {
+ auto I = JDStates.find(Header);
+ if (I == JDStates.end())
+ return nullptr;
+ return &I->second;
+}
+
+COFFPlatformRuntimeState::JITDylibState *
+COFFPlatformRuntimeState::getJITDylibStateByName(std::string_view Name) {
+ // FIXME: Avoid creating string copy here.
+ auto I = JDNameToHeader.find(std::string(Name.data(), Name.size()));
+ if (I == JDNameToHeader.end())
+ return nullptr;
+ void *H = I->second;
+ auto J = JDStates.find(H);
+ assert(J != JDStates.end() &&
+ "JITDylib has name map entry but no header map entry");
+ return &J->second;
+}
+
+Error COFFPlatformRuntimeState::registerJITDylib(std::string Name,
+ void *Header) {
+ ORC_RT_DEBUG({
+ printdbg("Registering JITDylib %s: Header = %p\n", Name.c_str(), Header);
+ });
+ std::lock_guard<std::recursive_mutex> Lock(JDStatesMutex);
+ if (JDStates.count(Header)) {
+ std::ostringstream ErrStream;
+ ErrStream << "Duplicate JITDylib registration for header " << Header
+ << " (name = " << Name << ")";
+ return make_error<StringError>(ErrStream.str());
+ }
+ if (JDNameToHeader.count(Name)) {
+ std::ostringstream ErrStream;
+ ErrStream << "Duplicate JITDylib registration for header " << Header
+ << " (header = " << Header << ")";
+ return make_error<StringError>(ErrStream.str());
+ }
+
+ auto &JDS = JDStates[Header];
+ JDS.Name = std::move(Name);
+ JDS.Header = Header;
+ JDNameToHeader[JDS.Name] = Header;
+ return Error::success();
+}
+
+Error COFFPlatformRuntimeState::deregisterJITDylib(void *Header) {
+ std::lock_guard<std::recursive_mutex> Lock(JDStatesMutex);
+ auto I = JDStates.find(Header);
+ if (I == JDStates.end()) {
+ std::ostringstream ErrStream;
+ ErrStream << "Attempted to deregister unrecognized header " << Header;
+ return make_error<StringError>(ErrStream.str());
+ }
+
+ // Remove std::string construction once we can use C++20.
+ auto J = JDNameToHeader.find(
+ std::string(I->second.Name.data(), I->second.Name.size()));
+ assert(J != JDNameToHeader.end() &&
+ "Missing JDNameToHeader entry for JITDylib");
+
+ ORC_RT_DEBUG({
+ printdbg("Deregistering JITDylib %s: Header = %p\n", I->second.Name.c_str(),
+ Header);
+ });
+
+ JDNameToHeader.erase(J);
+ JDStates.erase(I);
+ return Error::success();
+}
+
+void COFFPlatformRuntimeState::XtorSection::RunAllNewAndFlush() {
+ for (auto &Subsection : SubsectionsNew) {
+ for (auto &XtorGroup : Subsection)
+ for (auto &Xtor : XtorGroup)
+ if (Xtor)
+ Xtor();
+ Subsection.clear();
+ }
+}
+
+const char *COFFPlatformRuntimeState::dlerror() { return DLFcnError.c_str(); }
+
+void *COFFPlatformRuntimeState::dlopen(std::string_view Path, int Mode) {
+ ORC_RT_DEBUG({
+ std::string S(Path.data(), Path.size());
+ printdbg("COFFPlatform::dlopen(\"%s\")\n", S.c_str());
+ });
+ std::lock_guard<std::recursive_mutex> Lock(JDStatesMutex);
+ if (auto H = dlopenImpl(Path, Mode))
+ return *H;
+ else {
+ // FIXME: Make dlerror thread safe.
+ DLFcnError = toString(H.takeError());
+ return nullptr;
+ }
+}
+
+int COFFPlatformRuntimeState::dlclose(void *DSOHandle) {
+ ORC_RT_DEBUG({
+ auto *JDS = getJITDylibStateByHeader(DSOHandle);
+ std::string DylibName;
+ if (JDS) {
+ std::string S;
+ printdbg("COFFPlatform::dlclose(%p) (%s)\n", DSOHandle, S.c_str());
+ } else
+ printdbg("COFFPlatform::dlclose(%p) (%s)\n", DSOHandle, "invalid handle");
+ });
+ std::lock_guard<std::recursive_mutex> Lock(JDStatesMutex);
+ if (auto Err = dlcloseImpl(DSOHandle)) {
+ // FIXME: Make dlerror thread safe.
+ DLFcnError = toString(std::move(Err));
+ return -1;
+ }
+ return 0;
+}
+
+void *COFFPlatformRuntimeState::dlsym(void *Header, std::string_view Symbol) {
+ auto Addr = lookupSymbolInJITDylib(Header, Symbol);
+ if (!Addr) {
+ return 0;
+ }
+
+ return Addr->toPtr<void *>();
+}
+
+Expected<void *> COFFPlatformRuntimeState::dlopenImpl(std::string_view Path,
+ int Mode) {
+ // Try to find JITDylib state by name.
+ auto *JDS = getJITDylibStateByName(Path);
+
+ if (!JDS)
+ return make_error<StringError>("No registered JTIDylib for path " +
+ std::string(Path.data(), Path.size()));
+
+ if (auto Err = dlopenFull(*JDS))
+ return std::move(Err);
+
+ // Bump the ref-count on this dylib.
+ ++JDS->DlRefCount;
+
+ // Return the header address.
+ return JDS->Header;
+}
+
+Error COFFPlatformRuntimeState::dlopenFull(JITDylibState &JDS) {
+ // Call back to the JIT to push the initializers.
+ Expected<COFFJITDylibDepInfoMap> DepInfoMap((COFFJITDylibDepInfoMap()));
+ if (auto Err = WrapperFunction<SPSExpected<SPSCOFFJITDylibDepInfoMap>(
+ SPSExecutorAddr)>::call(&__orc_rt_coff_push_initializers_tag,
+ DepInfoMap,
+ ExecutorAddr::fromPtr(JDS.Header)))
+ return Err;
+ if (!DepInfoMap)
+ return DepInfoMap.takeError();
+
+ if (auto Err = dlopenInitialize(JDS, *DepInfoMap))
+ return Err;
+
+ if (!DepInfoMap->empty()) {
+ ORC_RT_DEBUG({
+ printdbg("Unrecognized dep-info key headers in dlopen of %s\n",
+ JDS.Name.c_str());
+ });
+ std::ostringstream ErrStream;
+ ErrStream << "Encountered unrecognized dep-info key headers "
+ "while processing dlopen of "
+ << JDS.Name;
+ return make_error<StringError>(ErrStream.str());
+ }
+
+ return Error::success();
+}
+
+Error COFFPlatformRuntimeState::dlopenInitialize(
+ JITDylibState &JDS, COFFJITDylibDepInfoMap &DepInfo) {
+ ORC_RT_DEBUG({
+ printdbg("COFFPlatformRuntimeState::dlopenInitialize(\"%s\")\n",
+ JDS.Name.c_str());
+ });
+
+ // Skip visited dependency.
+ auto I = DepInfo.find(ExecutorAddr::fromPtr(JDS.Header));
+ if (I == DepInfo.end())
+ return Error::success();
+
+ auto DI = std::move(I->second);
+ DepInfo.erase(I);
+
+ // Run initializers of dependencies in proper order by depth-first traversal
+ // of dependency graph.
+ std::vector<JITDylibState *> OldDeps;
+ std::swap(JDS.Deps, OldDeps);
+ JDS.Deps.reserve(DI.size());
+ for (auto DepHeaderAddr : DI) {
+ auto *DepJDS = getJITDylibStateByHeader(DepHeaderAddr.toPtr<void *>());
+ if (!DepJDS) {
+ std::ostringstream ErrStream;
+ ErrStream << "Encountered unrecognized dep header "
+ << DepHeaderAddr.toPtr<void *>() << " while initializing "
+ << JDS.Name;
+ return make_error<StringError>(ErrStream.str());
+ }
+ ++DepJDS->LinkedAgainstRefCount;
+ if (auto Err = dlopenInitialize(*DepJDS, DepInfo))
+ return Err;
+ }
+
+ // Run static initializers.
+ JDS.CInitSection.RunAllNewAndFlush();
+ JDS.CXXInitSection.RunAllNewAndFlush();
+
+ // Decrement old deps.
+ for (auto *DepJDS : OldDeps) {
+ --DepJDS->LinkedAgainstRefCount;
+ if (!DepJDS->referenced())
+ if (auto Err = dlcloseDeinitialize(*DepJDS))
+ return Err;
+ }
+
+ return Error::success();
+}
+
+Error COFFPlatformRuntimeState::dlcloseImpl(void *DSOHandle) {
+ // Try to find JITDylib state by header.
+ auto *JDS = getJITDylibStateByHeader(DSOHandle);
+
+ if (!JDS) {
+ std::ostringstream ErrStream;
+ ErrStream << "No registered JITDylib for " << DSOHandle;
+ return make_error<StringError>(ErrStream.str());
+ }
+
+ // Bump the ref-count.
+ --JDS->DlRefCount;
+
+ if (!JDS->referenced())
+ return dlcloseDeinitialize(*JDS);
+
+ return Error::success();
+}
+
+Error COFFPlatformRuntimeState::dlcloseDeinitialize(JITDylibState &JDS) {
+ ORC_RT_DEBUG({
+ printdbg("COFFPlatformRuntimeState::dlcloseDeinitialize(\"%s\")\n",
+ JDS.Name.c_str());
+ });
+
+ // Run atexits
+ for (auto AtExit : JDS.AtExits)
+ AtExit();
+ JDS.AtExits.clear();
+
+ // Run static terminators.
+ JDS.CPreTermSection.RunAllNewAndFlush();
+ JDS.CTermSection.RunAllNewAndFlush();
+
+ // Queue all xtors as new again.
+ JDS.CInitSection.Reset();
+ JDS.CXXInitSection.Reset();
+ JDS.CPreTermSection.Reset();
+ JDS.CTermSection.Reset();
+
+ // Deinitialize any dependencies.
+ for (auto *DepJDS : JDS.Deps) {
+ --DepJDS->LinkedAgainstRefCount;
+ if (!DepJDS->referenced())
+ if (auto Err = dlcloseDeinitialize(*DepJDS))
+ return Err;
+ }
+
+ return Error::success();
+}
+
+Expected<ExecutorAddr>
+COFFPlatformRuntimeState::lookupSymbolInJITDylib(void *header,
+ std::string_view Sym) {
+ Expected<ExecutorAddr> Result((ExecutorAddr()));
+ if (auto Err = WrapperFunction<SPSExpected<SPSExecutorAddr>(
+ SPSExecutorAddr, SPSString)>::call(&__orc_rt_coff_symbol_lookup_tag,
+ Result,
+ ExecutorAddr::fromPtr(header),
+ Sym))
+ return std::move(Err);
+ return Result;
+}
+
+Error COFFPlatformRuntimeState::registerObjectSections(
+ ExecutorAddr HeaderAddr,
+ std::vector<std::pair<std::string_view, ExecutorAddrRange>> Secs,
+ bool RunInitializers) {
+ std::lock_guard<std::recursive_mutex> Lock(JDStatesMutex);
+ auto I = JDStates.find(HeaderAddr.toPtr<void *>());
+ if (I == JDStates.end()) {
+ std::ostringstream ErrStream;
+ ErrStream << "Unrecognized header " << HeaderAddr.getValue();
+ return make_error<StringError>(ErrStream.str());
+ }
+ auto &JDState = I->second;
+ for (auto &KV : Secs) {
+ if (auto Err = registerBlockRange(HeaderAddr, KV.second))
+ return Err;
+ if (KV.first.empty())
+ continue;
+ char LastChar = KV.first.data()[KV.first.size() - 1];
+ if (KV.first == ".pdata") {
+ if (auto Err = registerSEHFrames(HeaderAddr, KV.second))
+ return Err;
+ } else if (KV.first >= ".CRT$XIA" && KV.first <= ".CRT$XIZ") {
+ if (RunInitializers)
+ JDState.CInitSection.Register(LastChar,
+ KV.second.toSpan<void (*)(void)>());
+ else
+ JDState.CInitSection.RegisterNoRun(LastChar,
+ KV.second.toSpan<void (*)(void)>());
+ } else if (KV.first >= ".CRT$XCA" && KV.first <= ".CRT$XCZ") {
+ if (RunInitializers)
+ JDState.CXXInitSection.Register(LastChar,
+ KV.second.toSpan<void (*)(void)>());
+ else
+ JDState.CXXInitSection.RegisterNoRun(
+ LastChar, KV.second.toSpan<void (*)(void)>());
+ } else if (KV.first >= ".CRT$XPA" && KV.first <= ".CRT$XPZ")
+ JDState.CPreTermSection.Register(LastChar,
+ KV.second.toSpan<void (*)(void)>());
+ else if (KV.first >= ".CRT$XTA" && KV.first <= ".CRT$XTZ")
+ JDState.CTermSection.Register(LastChar,
+ KV.second.toSpan<void (*)(void)>());
+ }
+ return Error::success();
+}
+
+Error COFFPlatformRuntimeState::deregisterObjectSections(
+ ExecutorAddr HeaderAddr,
+ std::vector<std::pair<std::string_view, ExecutorAddrRange>> Secs) {
+ std::lock_guard<std::recursive_mutex> Lock(JDStatesMutex);
+ auto I = JDStates.find(HeaderAddr.toPtr<void *>());
+ if (I == JDStates.end()) {
+ std::ostringstream ErrStream;
+ ErrStream << "Attempted to deregister unrecognized header "
+ << HeaderAddr.getValue();
+ return make_error<StringError>(ErrStream.str());
+ }
+ for (auto &KV : Secs) {
+ if (auto Err = deregisterBlockRange(HeaderAddr, KV.second))
+ return Err;
+ if (KV.first == ".pdata")
+ if (auto Err = deregisterSEHFrames(HeaderAddr, KV.second))
+ return Err;
+ }
+ return Error::success();
+}
+
+Error COFFPlatformRuntimeState::registerSEHFrames(
+ ExecutorAddr HeaderAddr, ExecutorAddrRange SEHFrameRange) {
+ int N = (SEHFrameRange.End.getValue() - SEHFrameRange.Start.getValue()) /
+ sizeof(RUNTIME_FUNCTION);
+ auto Func = SEHFrameRange.Start.toPtr<PRUNTIME_FUNCTION>();
+ if (!RtlAddFunctionTable(Func, N,
+ static_cast<DWORD64>(HeaderAddr.getValue())))
+ return make_error<StringError>("Failed to register SEH frames");
+ return Error::success();
+}
+
+Error COFFPlatformRuntimeState::deregisterSEHFrames(
+ ExecutorAddr HeaderAddr, ExecutorAddrRange SEHFrameRange) {
+ if (!RtlDeleteFunctionTable(SEHFrameRange.Start.toPtr<PRUNTIME_FUNCTION>()))
+ return make_error<StringError>("Failed to deregister SEH frames");
+ return Error::success();
+}
+
+Error COFFPlatformRuntimeState::registerBlockRange(ExecutorAddr HeaderAddr,
+ ExecutorAddrRange Range) {
+ assert(!BlockRanges.count(Range.Start.toPtr<void *>()) &&
+ "Block range address already registered");
+ BlockRange B = {HeaderAddr.toPtr<void *>(), Range.size()};
+ BlockRanges.emplace(Range.Start.toPtr<void *>(), B);
+ return Error::success();
+}
+
+Error COFFPlatformRuntimeState::deregisterBlockRange(ExecutorAddr HeaderAddr,
+ ExecutorAddrRange Range) {
+ assert(BlockRanges.count(Range.Start.toPtr<void *>()) &&
+ "Block range address not registered");
+ BlockRanges.erase(Range.Start.toPtr<void *>());
+ return Error::success();
+}
+
+Error COFFPlatformRuntimeState::registerAtExit(ExecutorAddr HeaderAddr,
+ void (*AtExit)(void)) {
+ std::lock_guard<std::recursive_mutex> Lock(JDStatesMutex);
+ auto I = JDStates.find(HeaderAddr.toPtr<void *>());
+ if (I == JDStates.end()) {
+ std::ostringstream ErrStream;
+ ErrStream << "Unrecognized header " << HeaderAddr.getValue();
+ return make_error<StringError>(ErrStream.str());
+ }
+ I->second.AtExits.push_back(AtExit);
+ return Error::success();
+}
+
+void COFFPlatformRuntimeState::initialize() {
+ assert(!CPS && "COFFPlatformRuntimeState should be null");
+ CPS = new COFFPlatformRuntimeState();
+}
+
+COFFPlatformRuntimeState &COFFPlatformRuntimeState::get() {
+ assert(CPS && "COFFPlatformRuntimeState not initialized");
+ return *CPS;
+}
+
+void COFFPlatformRuntimeState::destroy() {
+ assert(CPS && "COFFPlatformRuntimeState not initialized");
+ delete CPS;
+}
+
+void *COFFPlatformRuntimeState::findJITDylibBaseByPC(uint64_t PC) {
+ std::lock_guard<std::recursive_mutex> Lock(JDStatesMutex);
+ auto It = BlockRanges.upper_bound(reinterpret_cast<void *>(PC));
+ if (It == BlockRanges.begin())
+ return nullptr;
+ --It;
+ auto &Range = It->second;
+ if (PC >= reinterpret_cast<uint64_t>(It->first) + Range.Size)
+ return nullptr;
+ return Range.Header;
+}
+
+ORC_RT_INTERFACE orc_rt_CWrapperFunctionResult
+__orc_rt_coff_platform_bootstrap(char *ArgData, size_t ArgSize) {
+ COFFPlatformRuntimeState::initialize();
+ return WrapperFunctionResult().release();
+}
+
+ORC_RT_INTERFACE orc_rt_CWrapperFunctionResult
+__orc_rt_coff_platform_shutdown(char *ArgData, size_t ArgSize) {
+ COFFPlatformRuntimeState::destroy();
+ return WrapperFunctionResult().release();
+}
+
+ORC_RT_INTERFACE orc_rt_CWrapperFunctionResult
+__orc_rt_coff_register_jitdylib(char *ArgData, size_t ArgSize) {
+ return WrapperFunction<SPSError(SPSString, SPSExecutorAddr)>::handle(
+ ArgData, ArgSize,
+ [](std::string &Name, ExecutorAddr HeaderAddr) {
+ return COFFPlatformRuntimeState::get().registerJITDylib(
+ std::move(Name), HeaderAddr.toPtr<void *>());
+ })
+ .release();
+}
+
+ORC_RT_INTERFACE orc_rt_CWrapperFunctionResult
+__orc_rt_coff_deregister_jitdylib(char *ArgData, size_t ArgSize) {
+ return WrapperFunction<SPSError(SPSExecutorAddr)>::handle(
+ ArgData, ArgSize,
+ [](ExecutorAddr HeaderAddr) {
+ return COFFPlatformRuntimeState::get().deregisterJITDylib(
+ HeaderAddr.toPtr<void *>());
+ })
+ .release();
+}
+
+ORC_RT_INTERFACE orc_rt_CWrapperFunctionResult
+__orc_rt_coff_register_object_sections(char *ArgData, size_t ArgSize) {
+ return WrapperFunction<SPSError(SPSExecutorAddr, SPSCOFFObjectSectionsMap,
+ bool)>::
+ handle(ArgData, ArgSize,
+ [](ExecutorAddr HeaderAddr,
+ std::vector<std::pair<std::string_view, ExecutorAddrRange>>
+ &Secs,
+ bool RunInitializers) {
+ return COFFPlatformRuntimeState::get().registerObjectSections(
+ HeaderAddr, std::move(Secs), RunInitializers);
+ })
+ .release();
+}
+
+ORC_RT_INTERFACE orc_rt_CWrapperFunctionResult
+__orc_rt_coff_deregister_object_sections(char *ArgData, size_t ArgSize) {
+ return WrapperFunction<SPSError(SPSExecutorAddr, SPSCOFFObjectSectionsMap)>::
+ handle(ArgData, ArgSize,
+ [](ExecutorAddr HeaderAddr,
+ std::vector<std::pair<std::string_view, ExecutorAddrRange>>
+ &Secs) {
+ return COFFPlatformRuntimeState::get().deregisterObjectSections(
+ HeaderAddr, std::move(Secs));
+ })
+ .release();
+}
+//------------------------------------------------------------------------------
+// JIT'd dlfcn alternatives.
+//------------------------------------------------------------------------------
+
+const char *__orc_rt_coff_jit_dlerror() {
+ return COFFPlatformRuntimeState::get().dlerror();
+}
+
+void *__orc_rt_coff_jit_dlopen(const char *path, int mode) {
+ return COFFPlatformRuntimeState::get().dlopen(path, mode);
+}
+
+int __orc_rt_coff_jit_dlclose(void *header) {
+ return COFFPlatformRuntimeState::get().dlclose(header);
+}
+
+void *__orc_rt_coff_jit_dlsym(void *header, const char *symbol) {
+ return COFFPlatformRuntimeState::get().dlsym(header, symbol);
+}
+
+//------------------------------------------------------------------------------
+// COFF SEH exception support
+//------------------------------------------------------------------------------
+
+struct ThrowInfo {
+ uint32_t attributes;
+ void *data;
+};
+
+ORC_RT_INTERFACE void __stdcall __orc_rt_coff_cxx_throw_exception(
+ void *pExceptionObject, ThrowInfo *pThrowInfo) {
+#ifdef __clang__
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wmultichar"
+#endif
+ constexpr uint32_t EH_EXCEPTION_NUMBER = 'msc' | 0xE0000000;
+#ifdef __clang__
+#pragma clang diagnostic pop
+#endif
+ constexpr uint32_t EH_MAGIC_NUMBER1 = 0x19930520;
+ auto BaseAddr = COFFPlatformRuntimeState::get().findJITDylibBaseByPC(
+ reinterpret_cast<uint64_t>(pThrowInfo));
+ if (!BaseAddr) {
+ // This is not from JIT'd region.
+ // FIXME: Use the default implementation like below when alias api is
+ // capable. _CxxThrowException(pExceptionObject, pThrowInfo);
+ fprintf(stderr, "Throwing exception from compiled callback into JIT'd "
+ "exception handler not supported yet.\n");
+ abort();
+ return;
+ }
+ const ULONG_PTR parameters[] = {
+ EH_MAGIC_NUMBER1,
+ reinterpret_cast<ULONG_PTR>(pExceptionObject),
+ reinterpret_cast<ULONG_PTR>(pThrowInfo),
+ reinterpret_cast<ULONG_PTR>(BaseAddr),
+ };
+ RaiseException(EH_EXCEPTION_NUMBER, EXCEPTION_NONCONTINUABLE,
+ _countof(parameters), parameters);
+}
+
+//------------------------------------------------------------------------------
+// COFF atexits
+//------------------------------------------------------------------------------
+
+typedef int (*OnExitFunction)(void);
+typedef void (*AtExitFunction)(void);
+
+ORC_RT_INTERFACE OnExitFunction __orc_rt_coff_onexit(void *Header,
+ OnExitFunction Func) {
+ if (auto Err = COFFPlatformRuntimeState::get().registerAtExit(
+ ExecutorAddr::fromPtr(Header), (void (*)(void))Func)) {
+ consumeError(std::move(Err));
+ return nullptr;
+ }
+ return Func;
+}
+
+ORC_RT_INTERFACE int __orc_rt_coff_atexit(void *Header, AtExitFunction Func) {
+ if (auto Err = COFFPlatformRuntimeState::get().registerAtExit(
+ ExecutorAddr::fromPtr(Header), (void (*)(void))Func)) {
+ consumeError(std::move(Err));
+ return -1;
+ }
+ return 0;
+}
+
+//------------------------------------------------------------------------------
+// COFF Run Program
+//------------------------------------------------------------------------------
+
+ORC_RT_INTERFACE int64_t __orc_rt_coff_run_program(const char *JITDylibName,
+ const char *EntrySymbolName,
+ int argc, char *argv[]) {
+ using MainTy = int (*)(int, char *[]);
+
+ void *H =
+ __orc_rt_coff_jit_dlopen(JITDylibName, __orc_rt::coff::ORC_RT_RTLD_LAZY);
+ if (!H) {
+ __orc_rt_log_error(__orc_rt_coff_jit_dlerror());
+ return -1;
+ }
+
+ auto *Main =
+ reinterpret_cast<MainTy>(__orc_rt_coff_jit_dlsym(H, EntrySymbolName));
+
+ if (!Main) {
+ __orc_rt_log_error(__orc_rt_coff_jit_dlerror());
+ return -1;
+ }
+
+ int Result = Main(argc, argv);
+
+ if (__orc_rt_coff_jit_dlclose(H) == -1)
+ __orc_rt_log_error(__orc_rt_coff_jit_dlerror());
+
+ return Result;
+}
diff --git a/contrib/llvm-project/compiler-rt/lib/orc/coff_platform.h b/contrib/llvm-project/compiler-rt/lib/orc/coff_platform.h
new file mode 100644
index 000000000000..c84185d40b60
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/orc/coff_platform.h
@@ -0,0 +1,39 @@
+//===- coff_platform.h -------------------------------------------*- C++-*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// ORC Runtime support for dynamic loading features on COFF-based platforms.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef ORC_RT_COFF_PLATFORM_H
+#define ORC_RT_COFF_PLATFORM_H
+
+#include "common.h"
+#include "executor_address.h"
+
+// dlfcn functions.
+ORC_RT_INTERFACE const char *__orc_rt_coff_jit_dlerror();
+ORC_RT_INTERFACE void *__orc_rt_coff_jit_dlopen(const char *path, int mode);
+ORC_RT_INTERFACE int __orc_rt_coff_jit_dlclose(void *header);
+ORC_RT_INTERFACE void *__orc_rt_coff_jit_dlsym(void *header,
+ const char *symbol);
+
+namespace __orc_rt {
+namespace coff {
+
+enum dlopen_mode : int {
+ ORC_RT_RTLD_LAZY = 0x1,
+ ORC_RT_RTLD_NOW = 0x2,
+ ORC_RT_RTLD_LOCAL = 0x4,
+ ORC_RT_RTLD_GLOBAL = 0x8
+};
+
+} // end namespace coff
+} // end namespace __orc_rt
+
+#endif
diff --git a/contrib/llvm-project/compiler-rt/lib/orc/coff_platform.per_jd.cpp b/contrib/llvm-project/compiler-rt/lib/orc/coff_platform.per_jd.cpp
new file mode 100644
index 000000000000..6c208cb31858
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/orc/coff_platform.per_jd.cpp
@@ -0,0 +1,31 @@
+//===- coff_platform.per_jd.cpp -------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains code that will be loaded per each JITDylib.
+//
+//===----------------------------------------------------------------------===//
+#include "compiler.h"
+
+ORC_RT_INTERFACE void __orc_rt_coff_per_jd_marker() {}
+
+typedef int (*OnExitFunction)(void);
+typedef void (*AtExitFunction)(void);
+
+extern "C" void *__ImageBase;
+ORC_RT_INTERFACE OnExitFunction __orc_rt_coff_onexit(void *Header,
+ OnExitFunction Func);
+ORC_RT_INTERFACE int __orc_rt_coff_atexit(void *Header, AtExitFunction Func);
+
+ORC_RT_INTERFACE OnExitFunction
+__orc_rt_coff_onexit_per_jd(OnExitFunction Func) {
+ return __orc_rt_coff_onexit(&__ImageBase, Func);
+}
+
+ORC_RT_INTERFACE int __orc_rt_coff_atexit_per_jd(AtExitFunction Func) {
+ return __orc_rt_coff_atexit(&__ImageBase, Func);
+}
diff --git a/contrib/llvm-project/compiler-rt/lib/orc/common.h b/contrib/llvm-project/compiler-rt/lib/orc/common.h
index 54e613ecb42e..73c5c4a2bd8d 100644
--- a/contrib/llvm-project/compiler-rt/lib/orc/common.h
+++ b/contrib/llvm-project/compiler-rt/lib/orc/common.h
@@ -13,8 +13,8 @@
#ifndef ORC_RT_COMMON_H
#define ORC_RT_COMMON_H
-#include "c_api.h"
#include "compiler.h"
+#include "orc_rt/c_api.h"
#include <type_traits>
/// This macro should be used to define tags that will be associated with
@@ -34,14 +34,14 @@ extern "C" void __orc_rt_log_error(const char *ErrMsg);
/// This is declared for use by the runtime, but should be implemented in the
/// executor or provided by a definition added to the JIT before the runtime
/// is loaded.
-extern "C" __orc_rt_Opaque __orc_rt_jit_dispatch_ctx ORC_RT_WEAK_IMPORT;
+ORC_RT_IMPORT __orc_rt_Opaque __orc_rt_jit_dispatch_ctx ORC_RT_WEAK_IMPORT;
/// For dispatching calls to the JIT object.
///
/// This is declared for use by the runtime, but should be implemented in the
/// executor or provided by a definition added to the JIT before the runtime
/// is loaded.
-extern "C" __orc_rt_CWrapperFunctionResult
+ORC_RT_IMPORT orc_rt_CWrapperFunctionResult
__orc_rt_jit_dispatch(__orc_rt_Opaque *DispatchCtx, const void *FnTag,
const char *Data, size_t Size) ORC_RT_WEAK_IMPORT;
diff --git a/contrib/llvm-project/compiler-rt/lib/orc/compiler.h b/contrib/llvm-project/compiler-rt/lib/orc/compiler.h
index 2e4cd144e335..88cb3d92b03b 100644
--- a/contrib/llvm-project/compiler-rt/lib/orc/compiler.h
+++ b/contrib/llvm-project/compiler-rt/lib/orc/compiler.h
@@ -15,8 +15,15 @@
#ifndef ORC_RT_COMPILER_H
#define ORC_RT_COMPILER_H
+#if defined(_WIN32)
+#define ORC_RT_INTERFACE extern "C"
+#define ORC_RT_HIDDEN
+#define ORC_RT_IMPORT extern "C" __declspec(dllimport)
+#else
#define ORC_RT_INTERFACE extern "C" __attribute__((visibility("default")))
#define ORC_RT_HIDDEN __attribute__((visibility("hidden")))
+#define ORC_RT_IMPORT extern "C"
+#endif
#ifndef __has_builtin
# define __has_builtin(x) 0
@@ -56,8 +63,10 @@
#define ORC_RT_UNLIKELY(EXPR) (EXPR)
#endif
-#ifdef __APPLE__
+#if defined(__APPLE__)
#define ORC_RT_WEAK_IMPORT __attribute__((weak_import))
+#elif defined(_WIN32)
+#define ORC_RT_WEAK_IMPORT
#else
#define ORC_RT_WEAK_IMPORT __attribute__((weak))
#endif
diff --git a/contrib/llvm-project/compiler-rt/lib/orc/debug.cpp b/contrib/llvm-project/compiler-rt/lib/orc/debug.cpp
new file mode 100644
index 000000000000..af20fa4e6f4e
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/orc/debug.cpp
@@ -0,0 +1,83 @@
+//===- debug.cpp ----------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of the ORC runtime support library.
+//
+//===----------------------------------------------------------------------===//
+
+#include "debug.h"
+
+#include <cassert>
+#include <cstdarg>
+#include <cstdio>
+#include <cstdlib>
+#include <cstring>
+
+
+namespace __orc_rt {
+
+#ifndef NDEBUG
+
+std::atomic<const char *> DebugTypes;
+char DebugTypesAll;
+char DebugTypesNone;
+
+/// Sets the DebugState and DebugTypes values -- this function may be called
+/// concurrently on multiple threads, but will always assign the same values so
+/// this should be safe.
+const char *initializeDebug() {
+ if (const char *DT = getenv("ORC_RT_DEBUG")) {
+ // If ORC_RT_DEBUG=1 then log everything.
+ if (strcmp(DT, "1") == 0) {
+ DebugTypes.store(&DebugTypesAll, std::memory_order_relaxed);
+ return &DebugTypesAll;
+ }
+
+ // If ORC_RT_DEBUG is non-empty then record the string for use in
+ // debugTypeEnabled.
+ if (strcmp(DT, "") != 0) {
+ DebugTypes.store(DT, std::memory_order_relaxed);
+ return DT;
+ }
+ }
+
+ // If ORT_RT_DEBUG is undefined or defined as empty then log nothing.
+ DebugTypes.store(&DebugTypesNone, std::memory_order_relaxed);
+ return &DebugTypesNone;
+}
+
+bool debugTypeEnabled(const char *Type, const char *Types) {
+ assert(Types && Types != &DebugTypesAll && Types != &DebugTypesNone &&
+ "Invalid Types value");
+ size_t TypeLen = strlen(Type);
+ const char *Start = Types;
+ const char *End = Start;
+
+ do {
+ if (*End == '\0' || *End == ',') {
+ size_t ItemLen = End - Start;
+ if (ItemLen == TypeLen && memcmp(Type, Start, TypeLen) == 0)
+ return true;
+ if (*End == '\0')
+ return false;
+ Start = End + 1;
+ }
+ ++End;
+ } while (true);
+}
+
+void printdbg(const char *format, ...) {
+ va_list Args;
+ va_start(Args, format);
+ vfprintf(stderr, format, Args);
+ va_end(Args);
+}
+
+#endif // !NDEBUG
+
+} // end namespace __orc_rt
diff --git a/contrib/llvm-project/compiler-rt/lib/orc/debug.h b/contrib/llvm-project/compiler-rt/lib/orc/debug.h
new file mode 100644
index 000000000000..a0bc653d032e
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/orc/debug.h
@@ -0,0 +1,56 @@
+//===- debug.h - Debugging output utilities ---------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of the ORC runtime support library.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef ORC_RT_DEBUG_H
+#define ORC_RT_DEBUG_H
+
+#include <atomic>
+
+#ifndef NDEBUG
+
+namespace __orc_rt {
+
+extern std::atomic<const char *> DebugTypes;
+extern char DebugTypesAll;
+extern char DebugTypesNone;
+
+const char *initializeDebug();
+bool debugTypeEnabled(const char *Type, const char *Types);
+void printdbg(const char *format, ...);
+
+} // namespace __orc_rt
+
+#define ORC_RT_DEBUG_WITH_TYPE(TYPE, X) \
+ do { \
+ const char *Types = \
+ ::__orc_rt::DebugTypes.load(std::memory_order_relaxed); \
+ if (!Types) \
+ Types = initializeDebug(); \
+ if (Types == &DebugTypesNone) \
+ break; \
+ if (Types == &DebugTypesAll || \
+ ::__orc_rt::debugTypeEnabled(TYPE, Types)) { \
+ X; \
+ } \
+ } while (false)
+
+#else
+
+#define ORC_RT_DEBUG_WITH_TYPE(TYPE, X) \
+ do { \
+ } while (false)
+
+#endif // !NDEBUG
+
+#define ORC_RT_DEBUG(X) ORC_RT_DEBUG_WITH_TYPE(DEBUG_TYPE, X)
+
+#endif // ORC_RT_DEBUG_H
diff --git a/contrib/llvm-project/compiler-rt/lib/orc/dlfcn_wrapper.cpp b/contrib/llvm-project/compiler-rt/lib/orc/dlfcn_wrapper.cpp
new file mode 100644
index 000000000000..ece63da2cb48
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/orc/dlfcn_wrapper.cpp
@@ -0,0 +1,52 @@
+//===- dlfcn_wrapper.cpp --------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of the ORC runtime support library.
+//
+//===----------------------------------------------------------------------===//
+
+#include "adt.h"
+#include "common.h"
+#include "wrapper_function_utils.h"
+
+#include <vector>
+
+using namespace __orc_rt;
+
+extern "C" const char *__orc_rt_jit_dlerror();
+extern "C" void *__orc_rt_jit_dlopen(const char *path, int mode);
+extern "C" int __orc_rt_jit_dlclose(void *dso_handle);
+
+ORC_RT_INTERFACE orc_rt_CWrapperFunctionResult
+__orc_rt_jit_dlerror_wrapper(const char *ArgData, size_t ArgSize) {
+ return WrapperFunction<SPSString()>::handle(
+ ArgData, ArgSize,
+ []() { return std::string(__orc_rt_jit_dlerror()); })
+ .release();
+}
+
+ORC_RT_INTERFACE orc_rt_CWrapperFunctionResult
+__orc_rt_jit_dlopen_wrapper(const char *ArgData, size_t ArgSize) {
+ return WrapperFunction<SPSExecutorAddr(SPSString, int32_t)>::handle(
+ ArgData, ArgSize,
+ [](const std::string &Path, int32_t mode) {
+ return ExecutorAddr::fromPtr(
+ __orc_rt_jit_dlopen(Path.c_str(), mode));
+ })
+ .release();
+}
+
+ORC_RT_INTERFACE orc_rt_CWrapperFunctionResult
+__orc_rt_jit_dlclose_wrapper(const char *ArgData, size_t ArgSize) {
+ return WrapperFunction<int32_t(SPSExecutorAddr)>::handle(
+ ArgData, ArgSize,
+ [](ExecutorAddr &DSOHandle) {
+ return __orc_rt_jit_dlclose(DSOHandle.toPtr<void *>());
+ })
+ .release();
+}
diff --git a/contrib/llvm-project/compiler-rt/lib/orc/elfnix_platform.cpp b/contrib/llvm-project/compiler-rt/lib/orc/elfnix_platform.cpp
new file mode 100644
index 000000000000..c087e71038f9
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/orc/elfnix_platform.cpp
@@ -0,0 +1,624 @@
+//===- elfnix_platform.cpp ------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains code required to load the rest of the ELF-on-*IX runtime.
+//
+//===----------------------------------------------------------------------===//
+
+#include "elfnix_platform.h"
+#include "common.h"
+#include "compiler.h"
+#include "error.h"
+#include "wrapper_function_utils.h"
+
+#include <algorithm>
+#include <map>
+#include <mutex>
+#include <sstream>
+#include <string_view>
+#include <unordered_map>
+#include <vector>
+
+using namespace __orc_rt;
+using namespace __orc_rt::elfnix;
+
+// Declare function tags for functions in the JIT process.
+ORC_RT_JIT_DISPATCH_TAG(__orc_rt_elfnix_get_initializers_tag)
+ORC_RT_JIT_DISPATCH_TAG(__orc_rt_elfnix_get_deinitializers_tag)
+ORC_RT_JIT_DISPATCH_TAG(__orc_rt_elfnix_symbol_lookup_tag)
+
+// eh-frame registration functions, made available via aliases
+// installed by the Platform
+extern "C" void __register_frame(const void *);
+extern "C" void __deregister_frame(const void *);
+
+extern "C" void
+__unw_add_dynamic_eh_frame_section(const void *) ORC_RT_WEAK_IMPORT;
+extern "C" void
+__unw_remove_dynamic_eh_frame_section(const void *) ORC_RT_WEAK_IMPORT;
+
+namespace {
+
+Error validatePointerSectionExtent(const char *SectionName,
+ const ExecutorAddrRange &SE) {
+ if (SE.size() % sizeof(uintptr_t)) {
+ std::ostringstream ErrMsg;
+ ErrMsg << std::hex << "Size of " << SectionName << " 0x"
+ << SE.Start.getValue() << " -- 0x" << SE.End.getValue()
+ << " is not a pointer multiple";
+ return make_error<StringError>(ErrMsg.str());
+ }
+ return Error::success();
+}
+
+Error runInitArray(const std::vector<ExecutorAddrRange> &InitArraySections,
+ const ELFNixJITDylibInitializers &MOJDIs) {
+
+ for (const auto &ModInits : InitArraySections) {
+ if (auto Err = validatePointerSectionExtent(".init_array", ModInits))
+ return Err;
+
+ using InitFunc = void (*)();
+ for (auto *Init : ModInits.toSpan<InitFunc>())
+ (*Init)();
+ }
+
+ return Error::success();
+}
+
+struct TLSInfoEntry {
+ unsigned long Key = 0;
+ unsigned long DataAddress = 0;
+};
+
+struct TLSDescriptor {
+ void (*Resolver)(void *);
+ TLSInfoEntry *InfoEntry;
+};
+
+class ELFNixPlatformRuntimeState {
+private:
+ struct AtExitEntry {
+ void (*Func)(void *);
+ void *Arg;
+ };
+
+ using AtExitsVector = std::vector<AtExitEntry>;
+
+ struct PerJITDylibState {
+ void *Header = nullptr;
+ size_t RefCount = 0;
+ bool AllowReinitialization = false;
+ AtExitsVector AtExits;
+ };
+
+public:
+ static void initialize(void *DSOHandle);
+ static ELFNixPlatformRuntimeState &get();
+ static void destroy();
+
+ ELFNixPlatformRuntimeState(void *DSOHandle);
+
+ // Delete copy and move constructors.
+ ELFNixPlatformRuntimeState(const ELFNixPlatformRuntimeState &) = delete;
+ ELFNixPlatformRuntimeState &
+ operator=(const ELFNixPlatformRuntimeState &) = delete;
+ ELFNixPlatformRuntimeState(ELFNixPlatformRuntimeState &&) = delete;
+ ELFNixPlatformRuntimeState &operator=(ELFNixPlatformRuntimeState &&) = delete;
+
+ Error registerObjectSections(ELFNixPerObjectSectionsToRegister POSR);
+ Error deregisterObjectSections(ELFNixPerObjectSectionsToRegister POSR);
+
+ const char *dlerror();
+ void *dlopen(std::string_view Name, int Mode);
+ int dlclose(void *DSOHandle);
+ void *dlsym(void *DSOHandle, std::string_view Symbol);
+
+ int registerAtExit(void (*F)(void *), void *Arg, void *DSOHandle);
+ void runAtExits(void *DSOHandle);
+
+ /// Returns the base address of the section containing ThreadData.
+ Expected<std::pair<const char *, size_t>>
+ getThreadDataSectionFor(const char *ThreadData);
+
+ void *getPlatformJDDSOHandle() { return PlatformJDDSOHandle; }
+
+private:
+ PerJITDylibState *getJITDylibStateByHeaderAddr(void *DSOHandle);
+ PerJITDylibState *getJITDylibStateByName(std::string_view Path);
+ PerJITDylibState &
+ getOrCreateJITDylibState(ELFNixJITDylibInitializers &MOJDIs);
+
+ Error registerThreadDataSection(span<const char> ThreadDataSection);
+
+ Expected<ExecutorAddr> lookupSymbolInJITDylib(void *DSOHandle,
+ std::string_view Symbol);
+
+ Expected<ELFNixJITDylibInitializerSequence>
+ getJITDylibInitializersByName(std::string_view Path);
+ Expected<void *> dlopenInitialize(std::string_view Path, int Mode);
+ Error initializeJITDylib(ELFNixJITDylibInitializers &MOJDIs);
+
+ static ELFNixPlatformRuntimeState *MOPS;
+
+ void *PlatformJDDSOHandle;
+
+ // Frame registration functions:
+ void (*registerEHFrameSection)(const void *) = nullptr;
+ void (*deregisterEHFrameSection)(const void *) = nullptr;
+
+ // FIXME: Move to thread-state.
+ std::string DLFcnError;
+
+ std::recursive_mutex JDStatesMutex;
+ std::unordered_map<void *, PerJITDylibState> JDStates;
+ std::unordered_map<std::string, void *> JDNameToHeader;
+
+ std::mutex ThreadDataSectionsMutex;
+ std::map<const char *, size_t> ThreadDataSections;
+};
+
+ELFNixPlatformRuntimeState *ELFNixPlatformRuntimeState::MOPS = nullptr;
+
+void ELFNixPlatformRuntimeState::initialize(void *DSOHandle) {
+ assert(!MOPS && "ELFNixPlatformRuntimeState should be null");
+ MOPS = new ELFNixPlatformRuntimeState(DSOHandle);
+}
+
+ELFNixPlatformRuntimeState &ELFNixPlatformRuntimeState::get() {
+ assert(MOPS && "ELFNixPlatformRuntimeState not initialized");
+ return *MOPS;
+}
+
+void ELFNixPlatformRuntimeState::destroy() {
+ assert(MOPS && "ELFNixPlatformRuntimeState not initialized");
+ delete MOPS;
+}
+
+ELFNixPlatformRuntimeState::ELFNixPlatformRuntimeState(void *DSOHandle)
+ : PlatformJDDSOHandle(DSOHandle) {
+ if (__unw_add_dynamic_eh_frame_section &&
+ __unw_remove_dynamic_eh_frame_section) {
+ registerEHFrameSection = __unw_add_dynamic_eh_frame_section;
+ deregisterEHFrameSection = __unw_remove_dynamic_eh_frame_section;
+ } else {
+ registerEHFrameSection = __register_frame;
+ deregisterEHFrameSection = __deregister_frame;
+ }
+}
+
+Error ELFNixPlatformRuntimeState::registerObjectSections(
+ ELFNixPerObjectSectionsToRegister POSR) {
+ if (POSR.EHFrameSection.Start)
+ registerEHFrameSection(POSR.EHFrameSection.Start.toPtr<const char *>());
+
+ if (POSR.ThreadDataSection.Start) {
+ if (auto Err = registerThreadDataSection(
+ POSR.ThreadDataSection.toSpan<const char>()))
+ return Err;
+ }
+
+ return Error::success();
+}
+
+Error ELFNixPlatformRuntimeState::deregisterObjectSections(
+ ELFNixPerObjectSectionsToRegister POSR) {
+ if (POSR.EHFrameSection.Start)
+ deregisterEHFrameSection(POSR.EHFrameSection.Start.toPtr<const char *>());
+
+ return Error::success();
+}
+
+const char *ELFNixPlatformRuntimeState::dlerror() { return DLFcnError.c_str(); }
+
+void *ELFNixPlatformRuntimeState::dlopen(std::string_view Path, int Mode) {
+ std::lock_guard<std::recursive_mutex> Lock(JDStatesMutex);
+
+ // Use fast path if all JITDylibs are already loaded and don't require
+ // re-running initializers.
+ if (auto *JDS = getJITDylibStateByName(Path)) {
+ if (!JDS->AllowReinitialization) {
+ ++JDS->RefCount;
+ return JDS->Header;
+ }
+ }
+
+ auto H = dlopenInitialize(Path, Mode);
+ if (!H) {
+ DLFcnError = toString(H.takeError());
+ return nullptr;
+ }
+
+ return *H;
+}
+
+int ELFNixPlatformRuntimeState::dlclose(void *DSOHandle) {
+ runAtExits(DSOHandle);
+ return 0;
+}
+
+void *ELFNixPlatformRuntimeState::dlsym(void *DSOHandle,
+ std::string_view Symbol) {
+ auto Addr = lookupSymbolInJITDylib(DSOHandle, Symbol);
+ if (!Addr) {
+ DLFcnError = toString(Addr.takeError());
+ return 0;
+ }
+
+ return Addr->toPtr<void *>();
+}
+
+int ELFNixPlatformRuntimeState::registerAtExit(void (*F)(void *), void *Arg,
+ void *DSOHandle) {
+ // FIXME: Handle out-of-memory errors, returning -1 if OOM.
+ std::lock_guard<std::recursive_mutex> Lock(JDStatesMutex);
+ auto *JDS = getJITDylibStateByHeaderAddr(DSOHandle);
+ assert(JDS && "JITDylib state not initialized");
+ JDS->AtExits.push_back({F, Arg});
+ return 0;
+}
+
+void ELFNixPlatformRuntimeState::runAtExits(void *DSOHandle) {
+ // FIXME: Should atexits be allowed to run concurrently with access to
+ // JDState?
+ AtExitsVector V;
+ {
+ std::lock_guard<std::recursive_mutex> Lock(JDStatesMutex);
+ auto *JDS = getJITDylibStateByHeaderAddr(DSOHandle);
+ assert(JDS && "JITDlybi state not initialized");
+ std::swap(V, JDS->AtExits);
+ }
+
+ while (!V.empty()) {
+ auto &AE = V.back();
+ AE.Func(AE.Arg);
+ V.pop_back();
+ }
+}
+
+Expected<std::pair<const char *, size_t>>
+ELFNixPlatformRuntimeState::getThreadDataSectionFor(const char *ThreadData) {
+ std::lock_guard<std::mutex> Lock(ThreadDataSectionsMutex);
+ auto I = ThreadDataSections.upper_bound(ThreadData);
+ // Check that we have a valid entry conovering this address.
+ if (I == ThreadDataSections.begin())
+ return make_error<StringError>("No thread local data section for key");
+ I = std::prev(I);
+ if (ThreadData >= I->first + I->second)
+ return make_error<StringError>("No thread local data section for key");
+ return *I;
+}
+
+ELFNixPlatformRuntimeState::PerJITDylibState *
+ELFNixPlatformRuntimeState::getJITDylibStateByHeaderAddr(void *DSOHandle) {
+ auto I = JDStates.find(DSOHandle);
+ if (I == JDStates.end())
+ return nullptr;
+ return &I->second;
+}
+
+ELFNixPlatformRuntimeState::PerJITDylibState *
+ELFNixPlatformRuntimeState::getJITDylibStateByName(std::string_view Name) {
+ // FIXME: Avoid creating string copy here.
+ auto I = JDNameToHeader.find(std::string(Name.data(), Name.size()));
+ if (I == JDNameToHeader.end())
+ return nullptr;
+ void *H = I->second;
+ auto J = JDStates.find(H);
+ assert(J != JDStates.end() &&
+ "JITDylib has name map entry but no header map entry");
+ return &J->second;
+}
+
+ELFNixPlatformRuntimeState::PerJITDylibState &
+ELFNixPlatformRuntimeState::getOrCreateJITDylibState(
+ ELFNixJITDylibInitializers &MOJDIs) {
+ void *Header = MOJDIs.DSOHandleAddress.toPtr<void *>();
+
+ auto &JDS = JDStates[Header];
+
+ // If this entry hasn't been created yet.
+ if (!JDS.Header) {
+ assert(!JDNameToHeader.count(MOJDIs.Name) &&
+ "JITDylib has header map entry but no name map entry");
+ JDNameToHeader[MOJDIs.Name] = Header;
+ JDS.Header = Header;
+ }
+
+ return JDS;
+}
+
+Error ELFNixPlatformRuntimeState::registerThreadDataSection(
+ span<const char> ThreadDataSection) {
+ std::lock_guard<std::mutex> Lock(ThreadDataSectionsMutex);
+ auto I = ThreadDataSections.upper_bound(ThreadDataSection.data());
+ if (I != ThreadDataSections.begin()) {
+ auto J = std::prev(I);
+ if (J->first + J->second > ThreadDataSection.data())
+ return make_error<StringError>("Overlapping .tdata sections");
+ }
+ ThreadDataSections.insert(
+ I, std::make_pair(ThreadDataSection.data(), ThreadDataSection.size()));
+ return Error::success();
+}
+
+Expected<ExecutorAddr>
+ELFNixPlatformRuntimeState::lookupSymbolInJITDylib(void *DSOHandle,
+ std::string_view Sym) {
+ Expected<ExecutorAddr> Result((ExecutorAddr()));
+ if (auto Err = WrapperFunction<SPSExpected<SPSExecutorAddr>(
+ SPSExecutorAddr, SPSString)>::call(&__orc_rt_elfnix_symbol_lookup_tag,
+ Result,
+ ExecutorAddr::fromPtr(DSOHandle),
+ Sym))
+ return std::move(Err);
+ return Result;
+}
+
+Expected<ELFNixJITDylibInitializerSequence>
+ELFNixPlatformRuntimeState::getJITDylibInitializersByName(
+ std::string_view Path) {
+ Expected<ELFNixJITDylibInitializerSequence> Result(
+ (ELFNixJITDylibInitializerSequence()));
+ std::string PathStr(Path.data(), Path.size());
+ if (auto Err =
+ WrapperFunction<SPSExpected<SPSELFNixJITDylibInitializerSequence>(
+ SPSString)>::call(&__orc_rt_elfnix_get_initializers_tag, Result,
+ Path))
+ return std::move(Err);
+ return Result;
+}
+
+Expected<void *>
+ELFNixPlatformRuntimeState::dlopenInitialize(std::string_view Path, int Mode) {
+ // Either our JITDylib wasn't loaded, or it or one of its dependencies allows
+ // reinitialization. We need to call in to the JIT to see if there's any new
+ // work pending.
+ auto InitSeq = getJITDylibInitializersByName(Path);
+ if (!InitSeq)
+ return InitSeq.takeError();
+
+ // Init sequences should be non-empty.
+ if (InitSeq->empty())
+ return make_error<StringError>(
+ "__orc_rt_elfnix_get_initializers returned an "
+ "empty init sequence");
+
+ // Otherwise register and run initializers for each JITDylib.
+ for (auto &MOJDIs : *InitSeq)
+ if (auto Err = initializeJITDylib(MOJDIs))
+ return std::move(Err);
+
+ // Return the header for the last item in the list.
+ auto *JDS = getJITDylibStateByHeaderAddr(
+ InitSeq->back().DSOHandleAddress.toPtr<void *>());
+ assert(JDS && "Missing state entry for JD");
+ return JDS->Header;
+}
+
+long getPriority(const std::string &name) {
+ auto pos = name.find_last_not_of("0123456789");
+ if (pos == name.size() - 1)
+ return 65535;
+ else
+ return std::strtol(name.c_str() + pos + 1, nullptr, 10);
+}
+
+Error ELFNixPlatformRuntimeState::initializeJITDylib(
+ ELFNixJITDylibInitializers &MOJDIs) {
+
+ auto &JDS = getOrCreateJITDylibState(MOJDIs);
+ ++JDS.RefCount;
+
+ using SectionList = std::vector<ExecutorAddrRange>;
+ std::sort(MOJDIs.InitSections.begin(), MOJDIs.InitSections.end(),
+ [](const std::pair<std::string, SectionList> &LHS,
+ const std::pair<std::string, SectionList> &RHS) -> bool {
+ return getPriority(LHS.first) < getPriority(RHS.first);
+ });
+ for (auto &Entry : MOJDIs.InitSections)
+ if (auto Err = runInitArray(Entry.second, MOJDIs))
+ return Err;
+
+ return Error::success();
+}
+class ELFNixPlatformRuntimeTLVManager {
+public:
+ void *getInstance(const char *ThreadData);
+
+private:
+ std::unordered_map<const char *, char *> Instances;
+ std::unordered_map<const char *, std::unique_ptr<char[]>> AllocatedSections;
+};
+
+void *ELFNixPlatformRuntimeTLVManager::getInstance(const char *ThreadData) {
+ auto I = Instances.find(ThreadData);
+ if (I != Instances.end())
+ return I->second;
+ auto TDS =
+ ELFNixPlatformRuntimeState::get().getThreadDataSectionFor(ThreadData);
+ if (!TDS) {
+ __orc_rt_log_error(toString(TDS.takeError()).c_str());
+ return nullptr;
+ }
+
+ auto &Allocated = AllocatedSections[TDS->first];
+ if (!Allocated) {
+ Allocated = std::make_unique<char[]>(TDS->second);
+ memcpy(Allocated.get(), TDS->first, TDS->second);
+ }
+ size_t ThreadDataDelta = ThreadData - TDS->first;
+ assert(ThreadDataDelta <= TDS->second && "ThreadData outside section bounds");
+
+ char *Instance = Allocated.get() + ThreadDataDelta;
+ Instances[ThreadData] = Instance;
+ return Instance;
+}
+
+void destroyELFNixTLVMgr(void *ELFNixTLVMgr) {
+ delete static_cast<ELFNixPlatformRuntimeTLVManager *>(ELFNixTLVMgr);
+}
+
+} // end anonymous namespace
+
+//------------------------------------------------------------------------------
+// JIT entry points
+//------------------------------------------------------------------------------
+
+ORC_RT_INTERFACE orc_rt_CWrapperFunctionResult
+__orc_rt_elfnix_platform_bootstrap(char *ArgData, size_t ArgSize) {
+ return WrapperFunction<void(uint64_t)>::handle(
+ ArgData, ArgSize,
+ [](uint64_t &DSOHandle) {
+ ELFNixPlatformRuntimeState::initialize(
+ reinterpret_cast<void *>(DSOHandle));
+ })
+ .release();
+}
+
+ORC_RT_INTERFACE orc_rt_CWrapperFunctionResult
+__orc_rt_elfnix_platform_shutdown(char *ArgData, size_t ArgSize) {
+ ELFNixPlatformRuntimeState::destroy();
+ return WrapperFunctionResult().release();
+}
+
+/// Wrapper function for registering metadata on a per-object basis.
+ORC_RT_INTERFACE orc_rt_CWrapperFunctionResult
+__orc_rt_elfnix_register_object_sections(char *ArgData, size_t ArgSize) {
+ return WrapperFunction<SPSError(SPSELFNixPerObjectSectionsToRegister)>::
+ handle(ArgData, ArgSize,
+ [](ELFNixPerObjectSectionsToRegister &POSR) {
+ return ELFNixPlatformRuntimeState::get().registerObjectSections(
+ std::move(POSR));
+ })
+ .release();
+}
+
+/// Wrapper for releasing per-object metadat.
+ORC_RT_INTERFACE orc_rt_CWrapperFunctionResult
+__orc_rt_elfnix_deregister_object_sections(char *ArgData, size_t ArgSize) {
+ return WrapperFunction<SPSError(SPSELFNixPerObjectSectionsToRegister)>::
+ handle(ArgData, ArgSize,
+ [](ELFNixPerObjectSectionsToRegister &POSR) {
+ return ELFNixPlatformRuntimeState::get()
+ .deregisterObjectSections(std::move(POSR));
+ })
+ .release();
+}
+
+//------------------------------------------------------------------------------
+// TLV support
+//------------------------------------------------------------------------------
+
+ORC_RT_INTERFACE void *__orc_rt_elfnix_tls_get_addr_impl(TLSInfoEntry *D) {
+ auto *TLVMgr = static_cast<ELFNixPlatformRuntimeTLVManager *>(
+ pthread_getspecific(D->Key));
+ if (!TLVMgr)
+ TLVMgr = new ELFNixPlatformRuntimeTLVManager();
+ if (pthread_setspecific(D->Key, TLVMgr)) {
+ __orc_rt_log_error("Call to pthread_setspecific failed");
+ return nullptr;
+ }
+
+ return TLVMgr->getInstance(
+ reinterpret_cast<char *>(static_cast<uintptr_t>(D->DataAddress)));
+}
+
+ORC_RT_INTERFACE ptrdiff_t ___orc_rt_elfnix_tlsdesc_resolver_impl(
+ TLSDescriptor *D, const char *ThreadPointer) {
+ const char *TLVPtr = reinterpret_cast<const char *>(
+ __orc_rt_elfnix_tls_get_addr_impl(D->InfoEntry));
+ return TLVPtr - ThreadPointer;
+}
+
+ORC_RT_INTERFACE orc_rt_CWrapperFunctionResult
+__orc_rt_elfnix_create_pthread_key(char *ArgData, size_t ArgSize) {
+ return WrapperFunction<SPSExpected<uint64_t>(void)>::handle(
+ ArgData, ArgSize,
+ []() -> Expected<uint64_t> {
+ pthread_key_t Key;
+ if (int Err = pthread_key_create(&Key, destroyELFNixTLVMgr)) {
+ __orc_rt_log_error("Call to pthread_key_create failed");
+ return make_error<StringError>(strerror(Err));
+ }
+ return static_cast<uint64_t>(Key);
+ })
+ .release();
+}
+
+//------------------------------------------------------------------------------
+// cxa_atexit support
+//------------------------------------------------------------------------------
+
+int __orc_rt_elfnix_cxa_atexit(void (*func)(void *), void *arg,
+ void *dso_handle) {
+ return ELFNixPlatformRuntimeState::get().registerAtExit(func, arg,
+ dso_handle);
+}
+
+int __orc_rt_elfnix_atexit(void (*func)(void *)) {
+ auto &PlatformRTState = ELFNixPlatformRuntimeState::get();
+ return ELFNixPlatformRuntimeState::get().registerAtExit(
+ func, NULL, PlatformRTState.getPlatformJDDSOHandle());
+}
+
+void __orc_rt_elfnix_cxa_finalize(void *dso_handle) {
+ ELFNixPlatformRuntimeState::get().runAtExits(dso_handle);
+}
+
+//------------------------------------------------------------------------------
+// JIT'd dlfcn alternatives.
+//------------------------------------------------------------------------------
+
+const char *__orc_rt_elfnix_jit_dlerror() {
+ return ELFNixPlatformRuntimeState::get().dlerror();
+}
+
+void *__orc_rt_elfnix_jit_dlopen(const char *path, int mode) {
+ return ELFNixPlatformRuntimeState::get().dlopen(path, mode);
+}
+
+int __orc_rt_elfnix_jit_dlclose(void *dso_handle) {
+ return ELFNixPlatformRuntimeState::get().dlclose(dso_handle);
+}
+
+void *__orc_rt_elfnix_jit_dlsym(void *dso_handle, const char *symbol) {
+ return ELFNixPlatformRuntimeState::get().dlsym(dso_handle, symbol);
+}
+
+//------------------------------------------------------------------------------
+// ELFNix Run Program
+//------------------------------------------------------------------------------
+
+ORC_RT_INTERFACE int64_t __orc_rt_elfnix_run_program(
+ const char *JITDylibName, const char *EntrySymbolName, int argc,
+ char *argv[]) {
+ using MainTy = int (*)(int, char *[]);
+
+ void *H = __orc_rt_elfnix_jit_dlopen(JITDylibName,
+ __orc_rt::elfnix::ORC_RT_RTLD_LAZY);
+ if (!H) {
+ __orc_rt_log_error(__orc_rt_elfnix_jit_dlerror());
+ return -1;
+ }
+
+ auto *Main =
+ reinterpret_cast<MainTy>(__orc_rt_elfnix_jit_dlsym(H, EntrySymbolName));
+
+ if (!Main) {
+ __orc_rt_log_error(__orc_rt_elfnix_jit_dlerror());
+ return -1;
+ }
+
+ int Result = Main(argc, argv);
+
+ if (__orc_rt_elfnix_jit_dlclose(H) == -1)
+ __orc_rt_log_error(__orc_rt_elfnix_jit_dlerror());
+
+ return Result;
+}
diff --git a/contrib/llvm-project/compiler-rt/lib/orc/elfnix_platform.h b/contrib/llvm-project/compiler-rt/lib/orc/elfnix_platform.h
new file mode 100644
index 000000000000..e0ee9591dfc6
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/orc/elfnix_platform.h
@@ -0,0 +1,131 @@
+//===- elfnix_platform.h ----------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// ORC Runtime support for dynamic loading features on ELF-based platforms.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef ORC_RT_ELFNIX_PLATFORM_H
+#define ORC_RT_ELFNIX_PLATFORM_H
+
+#include "common.h"
+#include "executor_address.h"
+
+// Atexit functions.
+ORC_RT_INTERFACE int __orc_rt_elfnix_cxa_atexit(void (*func)(void *), void *arg,
+ void *dso_handle);
+ORC_RT_INTERFACE int __orc_rt_elfnix_atexit(void (*func)(void *));
+ORC_RT_INTERFACE void __orc_rt_elfnix_cxa_finalize(void *dso_handle);
+
+// dlfcn functions.
+ORC_RT_INTERFACE const char *__orc_rt_elfnix_jit_dlerror();
+ORC_RT_INTERFACE void *__orc_rt_elfnix_jit_dlopen(const char *path, int mode);
+ORC_RT_INTERFACE int __orc_rt_elfnix_jit_dlclose(void *dso_handle);
+ORC_RT_INTERFACE void *__orc_rt_elfnix_jit_dlsym(void *dso_handle,
+ const char *symbol);
+
+namespace __orc_rt {
+namespace elfnix {
+
+struct ELFNixPerObjectSectionsToRegister {
+ ExecutorAddrRange EHFrameSection;
+ ExecutorAddrRange ThreadDataSection;
+};
+
+struct ELFNixJITDylibInitializers {
+ using SectionList = std::vector<ExecutorAddrRange>;
+
+ ELFNixJITDylibInitializers() = default;
+ ELFNixJITDylibInitializers(std::string Name, ExecutorAddr DSOHandleAddress)
+ : Name(std::move(Name)), DSOHandleAddress(std::move(DSOHandleAddress)) {}
+
+ std::string Name;
+ ExecutorAddr DSOHandleAddress;
+
+ std::vector<std::pair<std::string, SectionList>> InitSections;
+};
+
+class ELFNixJITDylibDeinitializers {};
+
+using ELFNixJITDylibInitializerSequence =
+ std::vector<ELFNixJITDylibInitializers>;
+
+using ELFNixJITDylibDeinitializerSequence =
+ std::vector<ELFNixJITDylibDeinitializers>;
+
+enum dlopen_mode : int {
+ ORC_RT_RTLD_LAZY = 0x1,
+ ORC_RT_RTLD_NOW = 0x2,
+ ORC_RT_RTLD_LOCAL = 0x4,
+ ORC_RT_RTLD_GLOBAL = 0x8
+};
+
+} // end namespace elfnix
+
+using SPSELFNixPerObjectSectionsToRegister =
+ SPSTuple<SPSExecutorAddrRange, SPSExecutorAddrRange>;
+
+template <>
+class SPSSerializationTraits<SPSELFNixPerObjectSectionsToRegister,
+ elfnix::ELFNixPerObjectSectionsToRegister> {
+
+public:
+ static size_t size(const elfnix::ELFNixPerObjectSectionsToRegister &MOPOSR) {
+ return SPSELFNixPerObjectSectionsToRegister::AsArgList::size(
+ MOPOSR.EHFrameSection, MOPOSR.ThreadDataSection);
+ }
+
+ static bool
+ serialize(SPSOutputBuffer &OB,
+ const elfnix::ELFNixPerObjectSectionsToRegister &MOPOSR) {
+ return SPSELFNixPerObjectSectionsToRegister::AsArgList::serialize(
+ OB, MOPOSR.EHFrameSection, MOPOSR.ThreadDataSection);
+ }
+
+ static bool deserialize(SPSInputBuffer &IB,
+ elfnix::ELFNixPerObjectSectionsToRegister &MOPOSR) {
+ return SPSELFNixPerObjectSectionsToRegister::AsArgList::deserialize(
+ IB, MOPOSR.EHFrameSection, MOPOSR.ThreadDataSection);
+ }
+};
+
+using SPSNamedExecutorAddrRangeSequenceMap =
+ SPSSequence<SPSTuple<SPSString, SPSExecutorAddrRangeSequence>>;
+
+using SPSELFNixJITDylibInitializers =
+ SPSTuple<SPSString, SPSExecutorAddr, SPSNamedExecutorAddrRangeSequenceMap>;
+
+using SPSELFNixJITDylibInitializerSequence =
+ SPSSequence<SPSELFNixJITDylibInitializers>;
+
+/// Serialization traits for ELFNixJITDylibInitializers.
+template <>
+class SPSSerializationTraits<SPSELFNixJITDylibInitializers,
+ elfnix::ELFNixJITDylibInitializers> {
+public:
+ static size_t size(const elfnix::ELFNixJITDylibInitializers &MOJDIs) {
+ return SPSELFNixJITDylibInitializers::AsArgList::size(
+ MOJDIs.Name, MOJDIs.DSOHandleAddress, MOJDIs.InitSections);
+ }
+
+ static bool serialize(SPSOutputBuffer &OB,
+ const elfnix::ELFNixJITDylibInitializers &MOJDIs) {
+ return SPSELFNixJITDylibInitializers::AsArgList::serialize(
+ OB, MOJDIs.Name, MOJDIs.DSOHandleAddress, MOJDIs.InitSections);
+ }
+
+ static bool deserialize(SPSInputBuffer &IB,
+ elfnix::ELFNixJITDylibInitializers &MOJDIs) {
+ return SPSELFNixJITDylibInitializers::AsArgList::deserialize(
+ IB, MOJDIs.Name, MOJDIs.DSOHandleAddress, MOJDIs.InitSections);
+ }
+};
+
+} // end namespace __orc_rt
+
+#endif // ORC_RT_ELFNIX_PLATFORM_H
diff --git a/contrib/llvm-project/compiler-rt/lib/orc/elfnix_tls.aarch64.S b/contrib/llvm-project/compiler-rt/lib/orc/elfnix_tls.aarch64.S
new file mode 100644
index 000000000000..8dcdd535be8a
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/orc/elfnix_tls.aarch64.S
@@ -0,0 +1,94 @@
+//===-- elfnix_tlv.aarch64.s ---------------------------------------*- ASM -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of the ORC runtime support library.
+//
+//===----------------------------------------------------------------------===//
+
+// The content of this file is aarch64-only
+#if defined(__arm64__) || defined(__aarch64__)
+
+#define REGISTER_SAVE_SPACE_SIZE 32 * 24
+
+ .text
+
+ // returns address of TLV in x0, all other registers preserved
+ // TODO: add fast-path for repeat access
+ .globl ___orc_rt_elfnix_tlsdesc_resolver
+___orc_rt_elfnix_tlsdesc_resolver:
+ sub sp, sp, #REGISTER_SAVE_SPACE_SIZE
+ stp x29, x30, [sp, #16 * 1]
+ stp x27, x28, [sp, #16 * 2]
+ stp x25, x26, [sp, #16 * 3]
+ stp x23, x24, [sp, #16 * 4]
+ stp x21, x22, [sp, #16 * 5]
+ stp x19, x20, [sp, #16 * 6]
+ stp x17, x18, [sp, #16 * 7]
+ stp x15, x16, [sp, #16 * 8]
+ stp x13, x14, [sp, #16 * 9]
+ stp x11, x12, [sp, #16 * 10]
+ stp x9, x10, [sp, #16 * 11]
+ stp x7, x8, [sp, #16 * 12]
+ stp x5, x6, [sp, #16 * 13]
+ stp x3, x4, [sp, #16 * 14]
+ stp x1, x2, [sp, #16 * 15]
+ stp q30, q31, [sp, #32 * 8]
+ stp q28, q29, [sp, #32 * 9]
+ stp q26, q27, [sp, #32 * 10]
+ stp q24, q25, [sp, #32 * 11]
+ stp q22, q23, [sp, #32 * 12]
+ stp q20, q21, [sp, #32 * 13]
+ stp q18, q19, [sp, #32 * 14]
+ stp q16, q17, [sp, #32 * 15]
+ stp q14, q15, [sp, #32 * 16]
+ stp q12, q13, [sp, #32 * 17]
+ stp q10, q11, [sp, #32 * 18]
+ stp q8, q9, [sp, #32 * 19]
+ stp q6, q7, [sp, #32 * 20]
+ stp q4, q5, [sp, #32 * 21]
+ stp q2, q3, [sp, #32 * 22]
+ stp q0, q1, [sp, #32 * 23]
+
+ mrs x1, TPIDR_EL0 // get thread pointer
+ bl ___orc_rt_elfnix_tlsdesc_resolver_impl
+
+ ldp q0, q1, [sp, #32 * 23]
+ ldp q2, q3, [sp, #32 * 22]
+ ldp q4, q5, [sp, #32 * 21]
+ ldp q6, q7, [sp, #32 * 20]
+ ldp q8, q9, [sp, #32 * 19]
+ ldp q10, q11, [sp, #32 * 18]
+ ldp q12, q13, [sp, #32 * 17]
+ ldp q14, q15, [sp, #32 * 16]
+ ldp q16, q17, [sp, #32 * 15]
+ ldp q18, q19, [sp, #32 * 14]
+ ldp q20, q21, [sp, #32 * 13]
+ ldp q22, q23, [sp, #32 * 12]
+ ldp q24, q25, [sp, #32 * 11]
+ ldp q26, q27, [sp, #32 * 10]
+ ldp q28, q29, [sp, #32 * 9]
+ ldp q30, q31, [sp, #32 * 8]
+ ldp x1, x2, [sp, #16 * 15]
+ ldp x3, x4, [sp, #16 * 14]
+ ldp x5, x6, [sp, #16 * 13]
+ ldp x7, x8, [sp, #16 * 12]
+ ldp x9, x10, [sp, #16 * 11]
+ ldp x11, x12, [sp, #16 * 10]
+ ldp x13, x14, [sp, #16 * 9]
+ ldp x15, x16, [sp, #16 * 8]
+ ldp x17, x18, [sp, #16 * 7]
+ ldp x19, x20, [sp, #16 * 6]
+ ldp x21, x22, [sp, #16 * 5]
+ ldp x23, x24, [sp, #16 * 4]
+ ldp x25, x26, [sp, #16 * 3]
+ ldp x27, x28, [sp, #16 * 2]
+ ldp x29, x30, [sp, #16 * 1]
+ add sp, sp, #REGISTER_SAVE_SPACE_SIZE
+ ret
+
+#endif // defined(__arm64__) || defined(__aarch64__)
diff --git a/contrib/llvm-project/compiler-rt/lib/orc/elfnix_tls.ppc64.S b/contrib/llvm-project/compiler-rt/lib/orc/elfnix_tls.ppc64.S
new file mode 100644
index 000000000000..84854795dba1
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/orc/elfnix_tls.ppc64.S
@@ -0,0 +1,33 @@
+//===-- orc_rt_elfnix_tls.ppc64.s -------------------------------*- ASM -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of the ORC runtime support library.
+//
+//===----------------------------------------------------------------------===//
+
+// The content of this file is PowerPC64 only.
+#if defined(__powerpc64__)
+
+ .text
+ // TODO: add fast-path for repeat access.
+ // See https://github.com/llvm/llvm-project/issues/51162.
+ .global ___orc_rt_elfnix_tls_get_addr
+___orc_rt_elfnix_tls_get_addr:
+ addis 2, 12, .TOC.-___orc_rt_elfnix_tls_get_addr@ha
+ addi 2, 2, .TOC.-___orc_rt_elfnix_tls_get_addr@l
+ mflr 0
+ std 0, 16(1)
+ stdu 1, -32(1)
+ bl __orc_rt_elfnix_tls_get_addr_impl
+ nop
+ addi 1, 1, 32
+ ld 0, 16(1)
+ mtlr 0
+ blr
+
+#endif
diff --git a/contrib/llvm-project/compiler-rt/lib/orc/elfnix_tls.x86-64.S b/contrib/llvm-project/compiler-rt/lib/orc/elfnix_tls.x86-64.S
new file mode 100644
index 000000000000..b3e0bef00867
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/orc/elfnix_tls.x86-64.S
@@ -0,0 +1,64 @@
+
+//===-- orc_rt_elfnix_tls_x86-64.s -------------------------------*- ASM -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of the ORC runtime support library.
+//
+//===----------------------------------------------------------------------===//
+
+// The content of this file is x86_64-only
+#if defined(__x86_64__)
+
+#define REGISTER_SAVE_SPACE_SIZE 512
+
+ .text
+
+ // returns address of TLV in %rax, all other registers preserved
+ .globl ___orc_rt_elfnix_tls_get_addr
+___orc_rt_elfnix_tls_get_addr:
+ pushq %rbp
+ movq %rsp, %rbp
+ subq $REGISTER_SAVE_SPACE_SIZE, %rsp
+ movq %rcx, -16(%rbp)
+ movq %rdx, -24(%rbp)
+ movq %rsi, -32(%rbp)
+ movq %rdi, -40(%rbp)
+ movq %r8, -48(%rbp)
+ movq %r9, -56(%rbp)
+ movq %r10, -64(%rbp)
+ movq %r11, -72(%rbp)
+ movdqa %xmm0, -128(%rbp)
+ movdqa %xmm1, -144(%rbp)
+ movdqa %xmm2, -160(%rbp)
+ movdqa %xmm3, -176(%rbp)
+ movdqa %xmm4, -192(%rbp)
+ movdqa %xmm5, -208(%rbp)
+ movdqa %xmm6, -224(%rbp)
+ movdqa %xmm7, -240(%rbp)
+ call __orc_rt_elfnix_tls_get_addr_impl
+ movq -16(%rbp), %rcx
+ movq -24(%rbp), %rdx
+ movq -32(%rbp), %rsi
+ movq -40(%rbp), %rdi
+ movq -48(%rbp), %r8
+ movq -56(%rbp), %r9
+ movq -64(%rbp), %r10
+ movq -72(%rbp), %r11
+ movdqa -128(%rbp), %xmm0
+ movdqa -144(%rbp), %xmm1
+ movdqa -160(%rbp), %xmm2
+ movdqa -176(%rbp), %xmm3
+ movdqa -192(%rbp), %xmm4
+ movdqa -208(%rbp), %xmm5
+ movdqa -224(%rbp), %xmm6
+ movdqa -240(%rbp), %xmm7
+ addq $REGISTER_SAVE_SPACE_SIZE, %rsp
+ popq %rbp
+ ret
+
+#endif // defined(__x86_64__)
diff --git a/contrib/llvm-project/compiler-rt/lib/orc/error.h b/contrib/llvm-project/compiler-rt/lib/orc/error.h
index 92ac5a884ac6..4c378ecc01c4 100644
--- a/contrib/llvm-project/compiler-rt/lib/orc/error.h
+++ b/contrib/llvm-project/compiler-rt/lib/orc/error.h
@@ -113,9 +113,7 @@ private:
bool isChecked() const { return ErrPtr & 0x1; }
- void setChecked(bool Checked) {
- ErrPtr = (reinterpret_cast<uintptr_t>(ErrPtr) & ~uintptr_t(1)) | Checked;
- }
+ void setChecked(bool Checked) { ErrPtr = (ErrPtr & ~uintptr_t(1)) | Checked; }
template <typename ErrT = ErrorInfoBase> std::unique_ptr<ErrT> takePayload() {
static_assert(std::is_base_of<ErrorInfoBase, ErrT>::value,
diff --git a/contrib/llvm-project/compiler-rt/lib/orc/executor_address.h b/contrib/llvm-project/compiler-rt/lib/orc/executor_address.h
index cfe985bdb60f..1542ee96bd92 100644
--- a/contrib/llvm-project/compiler-rt/lib/orc/executor_address.h
+++ b/contrib/llvm-project/compiler-rt/lib/orc/executor_address.h
@@ -24,39 +24,76 @@
namespace __orc_rt {
-/// Represents the difference between two addresses in the executor process.
-class ExecutorAddrDiff {
-public:
- ExecutorAddrDiff() = default;
- explicit ExecutorAddrDiff(uint64_t Value) : Value(Value) {}
-
- uint64_t getValue() const { return Value; }
-
-private:
- int64_t Value = 0;
-};
+using ExecutorAddrDiff = uint64_t;
/// Represents an address in the executor process.
-class ExecutorAddress {
+class ExecutorAddr {
public:
- ExecutorAddress() = default;
- explicit ExecutorAddress(uint64_t Addr) : Addr(Addr) {}
+ /// A wrap/unwrap function that leaves pointers unmodified.
+ template <typename T> using rawPtr = __orc_rt::identity<T *>;
+
+ /// Default wrap function to use on this host.
+ template <typename T> using defaultWrap = rawPtr<T>;
+
+ /// Default unwrap function to use on this host.
+ template <typename T> using defaultUnwrap = rawPtr<T>;
+
+ /// Merges a tag into the raw address value:
+ /// P' = P | (TagValue << TagOffset).
+ class Tag {
+ public:
+ constexpr Tag(uintptr_t TagValue, uintptr_t TagOffset)
+ : TagMask(TagValue << TagOffset) {}
+
+ template <typename T> constexpr T *operator()(T *P) {
+ return reinterpret_cast<T *>(reinterpret_cast<uintptr_t>(P) | TagMask);
+ }
+
+ private:
+ uintptr_t TagMask;
+ };
+
+ /// Strips a tag of the given length from the given offset within the pointer:
+ /// P' = P & ~(((1 << TagLen) -1) << TagOffset)
+ class Untag {
+ public:
+ constexpr Untag(uintptr_t TagLen, uintptr_t TagOffset)
+ : UntagMask(~(((uintptr_t(1) << TagLen) - 1) << TagOffset)) {}
+
+ template <typename T> constexpr T *operator()(T *P) {
+ return reinterpret_cast<T *>(reinterpret_cast<uintptr_t>(P) & UntagMask);
+ }
+
+ private:
+ uintptr_t UntagMask;
+ };
+
+ ExecutorAddr() = default;
+ explicit ExecutorAddr(uint64_t Addr) : Addr(Addr) {}
+
+ /// Create an ExecutorAddr from the given pointer.
+ template <typename T, typename UnwrapFn = defaultUnwrap<T>>
+ static ExecutorAddr fromPtr(T *Ptr, UnwrapFn &&Unwrap = UnwrapFn()) {
+ return ExecutorAddr(
+ static_cast<uint64_t>(reinterpret_cast<uintptr_t>(Unwrap(Ptr))));
+ }
- /// Create an ExecutorAddress from the given pointer.
- /// Warning: This should only be used when JITing in-process.
- template <typename T> static ExecutorAddress fromPtr(T *Value) {
- return ExecutorAddress(
- static_cast<uint64_t>(reinterpret_cast<uintptr_t>(Value)));
+ /// Cast this ExecutorAddr to a pointer of the given type.
+ template <typename T, typename WrapFn = defaultWrap<std::remove_pointer_t<T>>>
+ std::enable_if_t<std::is_pointer<T>::value, T>
+ toPtr(WrapFn &&Wrap = WrapFn()) const {
+ uintptr_t IntPtr = static_cast<uintptr_t>(Addr);
+ assert(IntPtr == Addr && "ExecutorAddr value out of range for uintptr_t");
+ return Wrap(reinterpret_cast<T>(IntPtr));
}
- /// Cast this ExecutorAddress to a pointer of the given type.
- /// Warning: This should only be esude when JITing in-process.
- template <typename T> T toPtr() const {
- static_assert(std::is_pointer<T>::value, "T must be a pointer type");
+ /// Cast this ExecutorAddr to a pointer of the given function type.
+ template <typename T, typename WrapFn = defaultWrap<T>>
+ std::enable_if_t<std::is_function<T>::value, T *>
+ toPtr(WrapFn &&Wrap = WrapFn()) const {
uintptr_t IntPtr = static_cast<uintptr_t>(Addr);
- assert(IntPtr == Addr &&
- "JITTargetAddress value out of range for uintptr_t");
- return reinterpret_cast<T>(IntPtr);
+ assert(IntPtr == Addr && "ExecutorAddr value out of range for uintptr_t");
+ return Wrap(reinterpret_cast<T *>(IntPtr));
}
uint64_t getValue() const { return Addr; }
@@ -65,54 +102,48 @@ public:
explicit operator bool() const { return Addr != 0; }
- friend bool operator==(const ExecutorAddress &LHS,
- const ExecutorAddress &RHS) {
+ friend bool operator==(const ExecutorAddr &LHS, const ExecutorAddr &RHS) {
return LHS.Addr == RHS.Addr;
}
- friend bool operator!=(const ExecutorAddress &LHS,
- const ExecutorAddress &RHS) {
+ friend bool operator!=(const ExecutorAddr &LHS, const ExecutorAddr &RHS) {
return LHS.Addr != RHS.Addr;
}
- friend bool operator<(const ExecutorAddress &LHS,
- const ExecutorAddress &RHS) {
+ friend bool operator<(const ExecutorAddr &LHS, const ExecutorAddr &RHS) {
return LHS.Addr < RHS.Addr;
}
- friend bool operator<=(const ExecutorAddress &LHS,
- const ExecutorAddress &RHS) {
+ friend bool operator<=(const ExecutorAddr &LHS, const ExecutorAddr &RHS) {
return LHS.Addr <= RHS.Addr;
}
- friend bool operator>(const ExecutorAddress &LHS,
- const ExecutorAddress &RHS) {
+ friend bool operator>(const ExecutorAddr &LHS, const ExecutorAddr &RHS) {
return LHS.Addr > RHS.Addr;
}
- friend bool operator>=(const ExecutorAddress &LHS,
- const ExecutorAddress &RHS) {
+ friend bool operator>=(const ExecutorAddr &LHS, const ExecutorAddr &RHS) {
return LHS.Addr >= RHS.Addr;
}
- ExecutorAddress &operator++() {
+ ExecutorAddr &operator++() {
++Addr;
return *this;
}
- ExecutorAddress &operator--() {
+ ExecutorAddr &operator--() {
--Addr;
return *this;
}
- ExecutorAddress operator++(int) { return ExecutorAddress(Addr++); }
- ExecutorAddress operator--(int) { return ExecutorAddress(Addr++); }
+ ExecutorAddr operator++(int) { return ExecutorAddr(Addr++); }
+ ExecutorAddr operator--(int) { return ExecutorAddr(Addr++); }
- ExecutorAddress &operator+=(const ExecutorAddrDiff Delta) {
- Addr += Delta.getValue();
+ ExecutorAddr &operator+=(const ExecutorAddrDiff Delta) {
+ Addr += Delta;
return *this;
}
- ExecutorAddress &operator-=(const ExecutorAddrDiff Delta) {
- Addr -= Delta.getValue();
+ ExecutorAddr &operator-=(const ExecutorAddrDiff Delta) {
+ Addr -= Delta;
return *this;
}
@@ -121,88 +152,112 @@ private:
};
/// Subtracting two addresses yields an offset.
-inline ExecutorAddrDiff operator-(const ExecutorAddress &LHS,
- const ExecutorAddress &RHS) {
+inline ExecutorAddrDiff operator-(const ExecutorAddr &LHS,
+ const ExecutorAddr &RHS) {
return ExecutorAddrDiff(LHS.getValue() - RHS.getValue());
}
/// Adding an offset and an address yields an address.
-inline ExecutorAddress operator+(const ExecutorAddress &LHS,
- const ExecutorAddrDiff &RHS) {
- return ExecutorAddress(LHS.getValue() + RHS.getValue());
+inline ExecutorAddr operator+(const ExecutorAddr &LHS,
+ const ExecutorAddrDiff &RHS) {
+ return ExecutorAddr(LHS.getValue() + RHS);
}
/// Adding an address and an offset yields an address.
-inline ExecutorAddress operator+(const ExecutorAddrDiff &LHS,
- const ExecutorAddress &RHS) {
- return ExecutorAddress(LHS.getValue() + RHS.getValue());
+inline ExecutorAddr operator+(const ExecutorAddrDiff &LHS,
+ const ExecutorAddr &RHS) {
+ return ExecutorAddr(LHS + RHS.getValue());
}
/// Represents an address range in the exceutor process.
-struct ExecutorAddressRange {
- ExecutorAddressRange() = default;
- ExecutorAddressRange(ExecutorAddress StartAddress, ExecutorAddress EndAddress)
- : StartAddress(StartAddress), EndAddress(EndAddress) {}
-
- bool empty() const { return StartAddress == EndAddress; }
- ExecutorAddrDiff size() const { return EndAddress - StartAddress; }
+struct ExecutorAddrRange {
+ ExecutorAddrRange() = default;
+ ExecutorAddrRange(ExecutorAddr Start, ExecutorAddr End)
+ : Start(Start), End(End) {}
+ ExecutorAddrRange(ExecutorAddr Start, ExecutorAddrDiff Size)
+ : Start(Start), End(Start + Size) {}
+
+ bool empty() const { return Start == End; }
+ ExecutorAddrDiff size() const { return End - Start; }
+
+ friend bool operator==(const ExecutorAddrRange &LHS,
+ const ExecutorAddrRange &RHS) {
+ return LHS.Start == RHS.Start && LHS.End == RHS.End;
+ }
+ friend bool operator!=(const ExecutorAddrRange &LHS,
+ const ExecutorAddrRange &RHS) {
+ return !(LHS == RHS);
+ }
+ bool contains(ExecutorAddr Addr) const { return Start <= Addr && Addr < End; }
+ bool overlaps(const ExecutorAddrRange &Other) {
+ return !(Other.End <= Start || End <= Other.Start);
+ }
template <typename T> span<T> toSpan() const {
- assert(size().getValue() % sizeof(T) == 0 &&
+ assert(size() % sizeof(T) == 0 &&
"AddressRange is not a multiple of sizeof(T)");
- return span<T>(StartAddress.toPtr<T *>(), size().getValue() / sizeof(T));
+ return span<T>(Start.toPtr<T *>(), size() / sizeof(T));
}
- ExecutorAddress StartAddress;
- ExecutorAddress EndAddress;
+ ExecutorAddr Start;
+ ExecutorAddr End;
};
-/// SPS serializatior for ExecutorAddress.
-template <> class SPSSerializationTraits<SPSExecutorAddress, ExecutorAddress> {
+/// SPS serializatior for ExecutorAddr.
+template <> class SPSSerializationTraits<SPSExecutorAddr, ExecutorAddr> {
public:
- static size_t size(const ExecutorAddress &EA) {
+ static size_t size(const ExecutorAddr &EA) {
return SPSArgList<uint64_t>::size(EA.getValue());
}
- static bool serialize(SPSOutputBuffer &BOB, const ExecutorAddress &EA) {
+ static bool serialize(SPSOutputBuffer &BOB, const ExecutorAddr &EA) {
return SPSArgList<uint64_t>::serialize(BOB, EA.getValue());
}
- static bool deserialize(SPSInputBuffer &BIB, ExecutorAddress &EA) {
+ static bool deserialize(SPSInputBuffer &BIB, ExecutorAddr &EA) {
uint64_t Tmp;
if (!SPSArgList<uint64_t>::deserialize(BIB, Tmp))
return false;
- EA = ExecutorAddress(Tmp);
+ EA = ExecutorAddr(Tmp);
return true;
}
};
-using SPSExecutorAddressRange =
- SPSTuple<SPSExecutorAddress, SPSExecutorAddress>;
+using SPSExecutorAddrRange = SPSTuple<SPSExecutorAddr, SPSExecutorAddr>;
/// Serialization traits for address ranges.
template <>
-class SPSSerializationTraits<SPSExecutorAddressRange, ExecutorAddressRange> {
+class SPSSerializationTraits<SPSExecutorAddrRange, ExecutorAddrRange> {
public:
- static size_t size(const ExecutorAddressRange &Value) {
- return SPSArgList<SPSExecutorAddress, SPSExecutorAddress>::size(
- Value.StartAddress, Value.EndAddress);
+ static size_t size(const ExecutorAddrRange &Value) {
+ return SPSArgList<SPSExecutorAddr, SPSExecutorAddr>::size(Value.Start,
+ Value.End);
}
- static bool serialize(SPSOutputBuffer &BOB,
- const ExecutorAddressRange &Value) {
- return SPSArgList<SPSExecutorAddress, SPSExecutorAddress>::serialize(
- BOB, Value.StartAddress, Value.EndAddress);
+ static bool serialize(SPSOutputBuffer &BOB, const ExecutorAddrRange &Value) {
+ return SPSArgList<SPSExecutorAddr, SPSExecutorAddr>::serialize(
+ BOB, Value.Start, Value.End);
}
- static bool deserialize(SPSInputBuffer &BIB, ExecutorAddressRange &Value) {
- return SPSArgList<SPSExecutorAddress, SPSExecutorAddress>::deserialize(
- BIB, Value.StartAddress, Value.EndAddress);
+ static bool deserialize(SPSInputBuffer &BIB, ExecutorAddrRange &Value) {
+ return SPSArgList<SPSExecutorAddr, SPSExecutorAddr>::deserialize(
+ BIB, Value.Start, Value.End);
}
};
-using SPSExecutorAddressRangeSequence = SPSSequence<SPSExecutorAddressRange>;
+using SPSExecutorAddrRangeSequence = SPSSequence<SPSExecutorAddrRange>;
} // End namespace __orc_rt
+namespace std {
+
+// Make ExecutorAddr hashable.
+template <> struct hash<__orc_rt::ExecutorAddr> {
+ size_t operator()(const __orc_rt::ExecutorAddr &A) const {
+ return hash<uint64_t>()(A.getValue());
+ }
+};
+
+} // namespace std
+
#endif // ORC_RT_EXECUTOR_ADDRESS_H
diff --git a/contrib/llvm-project/compiler-rt/lib/orc/executor_symbol_def.h b/contrib/llvm-project/compiler-rt/lib/orc/executor_symbol_def.h
new file mode 100644
index 000000000000..454cefe525cf
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/orc/executor_symbol_def.h
@@ -0,0 +1,151 @@
+//===--------- ExecutorSymbolDef.h - (Addr, Flags) pair ---------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Represents a defining location for a symbol in the executing program.
+//
+// This file was derived from
+// llvm/include/llvm/ExecutionEngine/Orc/Shared/ExecutorSymbolDef.h.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef ORC_RT_EXECUTOR_SYMBOL_DEF_H
+#define ORC_RT_EXECUTOR_SYMBOL_DEF_H
+
+#include "bitmask_enum.h"
+#include "executor_address.h"
+#include "simple_packed_serialization.h"
+
+namespace __orc_rt {
+
+/// Flags for symbols in the JIT.
+class JITSymbolFlags {
+public:
+ using UnderlyingType = uint8_t;
+ using TargetFlagsType = uint8_t;
+
+ /// These values must be kept in sync with \c JITSymbolFlags in the JIT.
+ enum FlagNames : UnderlyingType {
+ None = 0,
+ HasError = 1U << 0,
+ Weak = 1U << 1,
+ Common = 1U << 2,
+ Absolute = 1U << 3,
+ Exported = 1U << 4,
+ Callable = 1U << 5,
+ MaterializationSideEffectsOnly = 1U << 6,
+ ORC_RT_MARK_AS_BITMASK_ENUM( // LargestValue =
+ MaterializationSideEffectsOnly)
+ };
+
+ /// Default-construct a JITSymbolFlags instance.
+ JITSymbolFlags() = default;
+
+ /// Construct a JITSymbolFlags instance from the given flags and target
+ /// flags.
+ JITSymbolFlags(FlagNames Flags, TargetFlagsType TargetFlags)
+ : TargetFlags(TargetFlags), Flags(Flags) {}
+
+ bool operator==(const JITSymbolFlags &RHS) const {
+ return Flags == RHS.Flags && TargetFlags == RHS.TargetFlags;
+ }
+
+ /// Get the underlying flags value as an integer.
+ UnderlyingType getRawFlagsValue() const {
+ return static_cast<UnderlyingType>(Flags);
+ }
+
+ /// Return a reference to the target-specific flags.
+ TargetFlagsType &getTargetFlags() { return TargetFlags; }
+
+ /// Return a reference to the target-specific flags.
+ const TargetFlagsType &getTargetFlags() const { return TargetFlags; }
+
+private:
+ TargetFlagsType TargetFlags = 0;
+ FlagNames Flags = None;
+};
+
+/// Represents a defining location for a JIT symbol.
+class ExecutorSymbolDef {
+public:
+ ExecutorSymbolDef() = default;
+ ExecutorSymbolDef(ExecutorAddr Addr, JITSymbolFlags Flags)
+ : Addr(Addr), Flags(Flags) {}
+
+ const ExecutorAddr &getAddress() const { return Addr; }
+
+ const JITSymbolFlags &getFlags() const { return Flags; }
+
+ friend bool operator==(const ExecutorSymbolDef &LHS,
+ const ExecutorSymbolDef &RHS) {
+ return LHS.getAddress() == RHS.getAddress() &&
+ LHS.getFlags() == RHS.getFlags();
+ }
+
+private:
+ ExecutorAddr Addr;
+ JITSymbolFlags Flags;
+};
+
+using SPSJITSymbolFlags =
+ SPSTuple<JITSymbolFlags::UnderlyingType, JITSymbolFlags::TargetFlagsType>;
+
+/// SPS serializatior for JITSymbolFlags.
+template <> class SPSSerializationTraits<SPSJITSymbolFlags, JITSymbolFlags> {
+ using FlagsArgList = SPSJITSymbolFlags::AsArgList;
+
+public:
+ static size_t size(const JITSymbolFlags &F) {
+ return FlagsArgList::size(F.getRawFlagsValue(), F.getTargetFlags());
+ }
+
+ static bool serialize(SPSOutputBuffer &BOB, const JITSymbolFlags &F) {
+ return FlagsArgList::serialize(BOB, F.getRawFlagsValue(),
+ F.getTargetFlags());
+ }
+
+ static bool deserialize(SPSInputBuffer &BIB, JITSymbolFlags &F) {
+ JITSymbolFlags::UnderlyingType RawFlags;
+ JITSymbolFlags::TargetFlagsType TargetFlags;
+ if (!FlagsArgList::deserialize(BIB, RawFlags, TargetFlags))
+ return false;
+ F = JITSymbolFlags{static_cast<JITSymbolFlags::FlagNames>(RawFlags),
+ TargetFlags};
+ return true;
+ }
+};
+
+using SPSExecutorSymbolDef = SPSTuple<SPSExecutorAddr, SPSJITSymbolFlags>;
+
+/// SPS serializatior for ExecutorSymbolDef.
+template <>
+class SPSSerializationTraits<SPSExecutorSymbolDef, ExecutorSymbolDef> {
+ using DefArgList = SPSExecutorSymbolDef::AsArgList;
+
+public:
+ static size_t size(const ExecutorSymbolDef &ESD) {
+ return DefArgList::size(ESD.getAddress(), ESD.getFlags());
+ }
+
+ static bool serialize(SPSOutputBuffer &BOB, const ExecutorSymbolDef &ESD) {
+ return DefArgList::serialize(BOB, ESD.getAddress(), ESD.getFlags());
+ }
+
+ static bool deserialize(SPSInputBuffer &BIB, ExecutorSymbolDef &ESD) {
+ ExecutorAddr Addr;
+ JITSymbolFlags Flags;
+ if (!DefArgList::deserialize(BIB, Addr, Flags))
+ return false;
+ ESD = ExecutorSymbolDef{Addr, Flags};
+ return true;
+ }
+};
+
+} // End namespace __orc_rt
+
+#endif // ORC_RT_EXECUTOR_SYMBOL_DEF_H
diff --git a/contrib/llvm-project/compiler-rt/lib/orc/interval_map.h b/contrib/llvm-project/compiler-rt/lib/orc/interval_map.h
new file mode 100644
index 000000000000..8c1609d72f57
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/orc/interval_map.h
@@ -0,0 +1,168 @@
+//===--------- interval_map.h - A sorted interval map -----------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Implements a coalescing interval map.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef ORC_RT_INTERVAL_MAP_H
+#define ORC_RT_INTERVAL_MAP_H
+
+#include "adt.h"
+#include <cassert>
+#include <map>
+
+namespace __orc_rt {
+
+enum class IntervalCoalescing { Enabled, Disabled };
+
+/// Maps intervals to keys with optional coalescing.
+///
+/// NOTE: The interface is kept mostly compatible with LLVM's IntervalMap
+/// collection to make it easy to swap over in the future if we choose
+/// to.
+template <typename KeyT, typename ValT> class IntervalMapBase {
+private:
+ using KeyPairT = std::pair<KeyT, KeyT>;
+
+ struct Compare {
+ using is_transparent = std::true_type;
+ bool operator()(const KeyPairT &LHS, const KeyPairT &RHS) const {
+ return LHS < RHS;
+ }
+ bool operator()(const KeyPairT &LHS, const KeyT &RHS) const {
+ return LHS.first < RHS;
+ }
+ bool operator()(const KeyT &LHS, const KeyPairT &RHS) const {
+ return LHS < RHS.first;
+ }
+ };
+
+ using ImplMap = std::map<KeyPairT, ValT, Compare>;
+
+public:
+ using iterator = typename ImplMap::iterator;
+ using const_iterator = typename ImplMap::const_iterator;
+ using size_type = typename ImplMap::size_type;
+
+ bool empty() const { return Impl.empty(); }
+
+ void clear() { Impl.clear(); }
+
+ iterator begin() { return Impl.begin(); }
+ iterator end() { return Impl.end(); }
+
+ const_iterator begin() const { return Impl.begin(); }
+ const_iterator end() const { return Impl.end(); }
+
+ iterator find(KeyT K) {
+ // Early out if the key is clearly outside the range.
+ if (empty() || K < begin()->first.first ||
+ K >= std::prev(end())->first.second)
+ return end();
+
+ auto I = Impl.upper_bound(K);
+ assert(I != begin() && "Should have hit early out above");
+ I = std::prev(I);
+ if (K < I->first.second)
+ return I;
+ return end();
+ }
+
+ const_iterator find(KeyT K) const {
+ return const_cast<IntervalMapBase<KeyT, ValT> *>(this)->find(K);
+ }
+
+ ValT lookup(KeyT K, ValT NotFound = ValT()) const {
+ auto I = find(K);
+ if (I == end())
+ return NotFound;
+ return I->second;
+ }
+
+ // Erase [KS, KE), which must be entirely containing within one existing
+ // range in the map. Removal is allowed to split the range.
+ void erase(KeyT KS, KeyT KE) {
+ if (empty())
+ return;
+
+ auto J = Impl.upper_bound(KS);
+
+ // Check previous range. Bail out if range to remove is entirely after
+ // it.
+ auto I = std::prev(J);
+ if (KS >= I->first.second)
+ return;
+
+ // Assert that range is wholly contained.
+ assert(KE <= I->first.second);
+
+ auto Tmp = std::move(*I);
+ Impl.erase(I);
+
+ // Split-right -- introduce right-split range.
+ if (KE < Tmp.first.second) {
+ Impl.insert(
+ J, std::make_pair(std::make_pair(KE, Tmp.first.second), Tmp.second));
+ J = std::prev(J);
+ }
+
+ // Split-left -- introduce left-split range.
+ if (KS > Tmp.first.first)
+ Impl.insert(
+ J, std::make_pair(std::make_pair(Tmp.first.first, KS), Tmp.second));
+ }
+
+protected:
+ ImplMap Impl;
+};
+
+template <typename KeyT, typename ValT, IntervalCoalescing Coalescing>
+class IntervalMap;
+
+template <typename KeyT, typename ValT>
+class IntervalMap<KeyT, ValT, IntervalCoalescing::Enabled>
+ : public IntervalMapBase<KeyT, ValT> {
+public:
+ // Coalescing insert. Requires that ValTs be equality-comparable.
+ void insert(KeyT KS, KeyT KE, ValT V) {
+ auto J = this->Impl.upper_bound(KS);
+
+ // Coalesce-right if possible. Either way, J points at our insertion
+ // point.
+ if (J != this->end() && KE == J->first.first && J->second == V) {
+ KE = J->first.second;
+ auto Tmp = J++;
+ this->Impl.erase(Tmp);
+ }
+
+ // Coalesce-left if possible.
+ if (J != this->begin()) {
+ auto I = std::prev(J);
+ if (I->first.second == KS && I->second == V) {
+ KS = I->first.first;
+ this->Impl.erase(I);
+ }
+ }
+ this->Impl.insert(J, std::make_pair(std::make_pair(KS, KE), std::move(V)));
+ }
+};
+
+template <typename KeyT, typename ValT>
+class IntervalMap<KeyT, ValT, IntervalCoalescing::Disabled>
+ : public IntervalMapBase<KeyT, ValT> {
+public:
+ // Non-coalescing insert. Does not require ValT to be equality-comparable.
+ void insert(KeyT KS, KeyT KE, ValT V) {
+ this->Impl.insert(std::make_pair(std::make_pair(KS, KE), std::move(V)));
+ }
+};
+
+} // End namespace __orc_rt
+
+#endif // ORC_RT_INTERVAL_MAP_H
diff --git a/contrib/llvm-project/compiler-rt/lib/orc/interval_set.h b/contrib/llvm-project/compiler-rt/lib/orc/interval_set.h
new file mode 100644
index 000000000000..20f40f9c7d37
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/orc/interval_set.h
@@ -0,0 +1,87 @@
+//===--------- interval_set.h - A sorted interval set -----------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Implements a coalescing interval set.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef ORC_RT_INTERVAL_SET_H
+#define ORC_RT_INTERVAL_SET_H
+
+#include "interval_map.h"
+
+namespace __orc_rt {
+
+/// Implements a coalescing interval set.
+///
+/// Adjacent intervals are coalesced.
+///
+/// NOTE: The interface is kept mostly compatible with LLVM's IntervalMap
+/// collection to make it easy to swap over in the future if we choose
+/// to.
+template <typename KeyT, IntervalCoalescing Coalescing>
+class IntervalSet {
+private:
+ using ImplMap = IntervalMap<KeyT, std::monostate, Coalescing>;
+public:
+
+ using value_type = std::pair<KeyT, KeyT>;
+
+ class const_iterator {
+ friend class IntervalSet;
+ public:
+ using difference_type = typename ImplMap::iterator::difference_type;
+ using value_type = IntervalSet::value_type;
+ using pointer = const value_type *;
+ using reference = const value_type &;
+ using iterator_category = std::input_iterator_tag;
+
+ const_iterator() = default;
+ const value_type &operator*() const { return I->first; }
+ const value_type *operator->() const { return &I->first; }
+ const_iterator &operator++() { ++I; return *this; }
+ const_iterator operator++(int) { auto Tmp = I; ++I; return Tmp; }
+ friend bool operator==(const const_iterator &LHS,
+ const const_iterator &RHS) {
+ return LHS.I == RHS.I;
+ }
+ friend bool operator!=(const const_iterator &LHS,
+ const const_iterator &RHS) {
+ return LHS.I != RHS.I;
+ }
+ private:
+ const_iterator(typename ImplMap::const_iterator I) : I(std::move(I)) {}
+ typename ImplMap::const_iterator I;
+ };
+
+ bool empty() const { return Map.empty(); }
+
+ void clear() { Map.clear(); }
+
+ const_iterator begin() const { return const_iterator(Map.begin()); }
+ const_iterator end() const { return const_iterator(Map.end()); }
+
+ const_iterator find(KeyT K) const {
+ return const_iterator(Map.find(K));
+ }
+
+ void insert(KeyT KS, KeyT KE) {
+ Map.insert(std::move(KS), std::move(KE), std::monostate());
+ }
+
+ void erase(KeyT KS, KeyT KE) {
+ Map.erase(KS, KE);
+ }
+
+private:
+ ImplMap Map;
+};
+
+} // End namespace __orc_rt
+
+#endif // ORC_RT_INTERVAL_SET_H
diff --git a/contrib/llvm-project/compiler-rt/lib/orc/macho_platform.cpp b/contrib/llvm-project/compiler-rt/lib/orc/macho_platform.cpp
index 2a960fb548fa..340846f5f900 100644
--- a/contrib/llvm-project/compiler-rt/lib/orc/macho_platform.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/orc/macho_platform.cpp
@@ -11,212 +11,137 @@
//===----------------------------------------------------------------------===//
#include "macho_platform.h"
+#include "bitmask_enum.h"
#include "common.h"
+#include "debug.h"
#include "error.h"
+#include "interval_map.h"
#include "wrapper_function_utils.h"
+#include <algorithm>
+#include <ios>
#include <map>
#include <mutex>
#include <sstream>
+#include <string_view>
#include <unordered_map>
+#include <unordered_set>
#include <vector>
+#define DEBUG_TYPE "macho_platform"
+
using namespace __orc_rt;
using namespace __orc_rt::macho;
// Declare function tags for functions in the JIT process.
-ORC_RT_JIT_DISPATCH_TAG(__orc_rt_macho_get_initializers_tag)
-ORC_RT_JIT_DISPATCH_TAG(__orc_rt_macho_get_deinitializers_tag)
-ORC_RT_JIT_DISPATCH_TAG(__orc_rt_macho_symbol_lookup_tag)
-
-// eh-frame registration functions.
-// We expect these to be available for all processes.
-extern "C" void __register_frame(const void *);
-extern "C" void __deregister_frame(const void *);
+ORC_RT_JIT_DISPATCH_TAG(__orc_rt_macho_push_initializers_tag)
+ORC_RT_JIT_DISPATCH_TAG(__orc_rt_macho_push_symbols_tag)
-// Objective-C types.
-struct objc_class;
struct objc_image_info;
-struct objc_object;
-struct objc_selector;
-
-using Class = objc_class *;
-using id = objc_object *;
-using SEL = objc_selector *;
+struct mach_header;
// Objective-C registration functions.
// These are weakly imported. If the Objective-C runtime has not been loaded
// then code containing Objective-C sections will generate an error.
-extern "C" id objc_msgSend(id, SEL, ...) ORC_RT_WEAK_IMPORT;
-extern "C" Class objc_readClassPair(Class,
- const objc_image_info *) ORC_RT_WEAK_IMPORT;
-extern "C" SEL sel_registerName(const char *) ORC_RT_WEAK_IMPORT;
-
-// Swift types.
-class ProtocolRecord;
-class ProtocolConformanceRecord;
-
extern "C" void
-swift_registerProtocols(const ProtocolRecord *begin,
- const ProtocolRecord *end) ORC_RT_WEAK_IMPORT;
+_objc_map_images(unsigned count, const char *const paths[],
+ const mach_header *const mhdrs[]) ORC_RT_WEAK_IMPORT;
+
+extern "C" void _objc_load_image(const char *path,
+ const mach_header *mh) ORC_RT_WEAK_IMPORT;
+
+// Libunwind prototypes.
+struct unw_dynamic_unwind_sections {
+ uintptr_t dso_base;
+ uintptr_t dwarf_section;
+ size_t dwarf_section_length;
+ uintptr_t compact_unwind_section;
+ size_t compact_unwind_section_length;
+};
-extern "C" void swift_registerProtocolConformances(
- const ProtocolConformanceRecord *begin,
- const ProtocolConformanceRecord *end) ORC_RT_WEAK_IMPORT;
+typedef int (*unw_find_dynamic_unwind_sections)(
+ uintptr_t addr, struct unw_dynamic_unwind_sections *info);
-namespace {
+extern "C" int __unw_add_find_dynamic_unwind_sections(
+ unw_find_dynamic_unwind_sections find_dynamic_unwind_sections)
+ ORC_RT_WEAK_IMPORT;
-template <typename HandleFDEFn>
-void walkEHFrameSection(span<const char> EHFrameSection,
- HandleFDEFn HandleFDE) {
- const char *CurCFIRecord = EHFrameSection.data();
- uint64_t Size = *reinterpret_cast<const uint32_t *>(CurCFIRecord);
+extern "C" int __unw_remove_find_dynamic_unwind_sections(
+ unw_find_dynamic_unwind_sections find_dynamic_unwind_sections)
+ ORC_RT_WEAK_IMPORT;
- while (CurCFIRecord != EHFrameSection.end() && Size != 0) {
- const char *OffsetField = CurCFIRecord + (Size == 0xffffffff ? 12 : 4);
- if (Size == 0xffffffff)
- Size = *reinterpret_cast<const uint64_t *>(CurCFIRecord + 4) + 12;
- else
- Size += 4;
- uint32_t Offset = *reinterpret_cast<const uint32_t *>(OffsetField);
+namespace {
- if (Offset != 0)
- HandleFDE(CurCFIRecord);
+struct MachOJITDylibDepInfo {
+ bool Sealed = false;
+ std::vector<ExecutorAddr> DepHeaders;
+};
- CurCFIRecord += Size;
- Size = *reinterpret_cast<const uint32_t *>(CurCFIRecord);
- }
-}
+using MachOJITDylibDepInfoMap =
+ std::unordered_map<ExecutorAddr, MachOJITDylibDepInfo>;
-Error validatePointerSectionExtent(const char *SectionName,
- const ExecutorAddressRange &SE) {
- if (SE.size().getValue() % sizeof(uintptr_t)) {
- std::ostringstream ErrMsg;
- ErrMsg << std::hex << "Size of " << SectionName << " 0x"
- << SE.StartAddress.getValue() << " -- 0x" << SE.EndAddress.getValue()
- << " is not a pointer multiple";
- return make_error<StringError>(ErrMsg.str());
- }
- return Error::success();
-}
+} // anonymous namespace
-Error registerObjCSelectors(
- const std::vector<ExecutorAddressRange> &ObjCSelRefsSections,
- const MachOJITDylibInitializers &MOJDIs) {
+namespace __orc_rt {
- if (ORC_RT_UNLIKELY(!sel_registerName))
- return make_error<StringError>("sel_registerName is not available");
+using SPSMachOObjectPlatformSectionsMap =
+ SPSSequence<SPSTuple<SPSString, SPSExecutorAddrRange>>;
- for (const auto &ObjCSelRefs : ObjCSelRefsSections) {
+using SPSMachOJITDylibDepInfo = SPSTuple<bool, SPSSequence<SPSExecutorAddr>>;
- if (auto Err = validatePointerSectionExtent("__objc_selrefs", ObjCSelRefs))
- return Err;
+using SPSMachOJITDylibDepInfoMap =
+ SPSSequence<SPSTuple<SPSExecutorAddr, SPSMachOJITDylibDepInfo>>;
- fprintf(stderr, "Processing selrefs section at 0x%llx\n",
- ObjCSelRefs.StartAddress.getValue());
- for (uintptr_t SelEntry : ObjCSelRefs.toSpan<uintptr_t>()) {
- const char *SelName = reinterpret_cast<const char *>(SelEntry);
- fprintf(stderr, "Registering selector \"%s\"\n", SelName);
- auto Sel = sel_registerName(SelName);
- *reinterpret_cast<SEL *>(SelEntry) = Sel;
- }
+template <>
+class SPSSerializationTraits<SPSMachOJITDylibDepInfo, MachOJITDylibDepInfo> {
+public:
+ static size_t size(const MachOJITDylibDepInfo &JDI) {
+ return SPSMachOJITDylibDepInfo::AsArgList::size(JDI.Sealed, JDI.DepHeaders);
}
- return Error::success();
-}
-
-Error registerObjCClasses(
- const std::vector<ExecutorAddressRange> &ObjCClassListSections,
- const MachOJITDylibInitializers &MOJDIs) {
-
- if (ObjCClassListSections.empty())
- return Error::success();
-
- if (ORC_RT_UNLIKELY(!objc_msgSend))
- return make_error<StringError>("objc_msgSend is not available");
- if (ORC_RT_UNLIKELY(!objc_readClassPair))
- return make_error<StringError>("objc_readClassPair is not available");
-
- struct ObjCClassCompiled {
- void *Metaclass;
- void *Parent;
- void *Cache1;
- void *Cache2;
- void *Data;
- };
-
- auto *ImageInfo =
- MOJDIs.ObjCImageInfoAddress.toPtr<const objc_image_info *>();
- auto ClassSelector = sel_registerName("class");
-
- for (const auto &ObjCClassList : ObjCClassListSections) {
-
- if (auto Err =
- validatePointerSectionExtent("__objc_classlist", ObjCClassList))
- return Err;
-
- for (uintptr_t ClassPtr : ObjCClassList.toSpan<uintptr_t>()) {
- auto *Cls = reinterpret_cast<Class>(ClassPtr);
- auto *ClassCompiled = reinterpret_cast<ObjCClassCompiled *>(ClassPtr);
- objc_msgSend(reinterpret_cast<id>(ClassCompiled->Parent), ClassSelector);
- auto Registered = objc_readClassPair(Cls, ImageInfo);
-
- // FIXME: Improve diagnostic by reporting the failed class's name.
- if (Registered != Cls)
- return make_error<StringError>("Unable to register Objective-C class");
- }
+ static bool serialize(SPSOutputBuffer &OB, const MachOJITDylibDepInfo &JDI) {
+ return SPSMachOJITDylibDepInfo::AsArgList::serialize(OB, JDI.Sealed,
+ JDI.DepHeaders);
}
- return Error::success();
-}
-
-Error registerSwift5Protocols(
- const std::vector<ExecutorAddressRange> &Swift5ProtocolSections,
- const MachOJITDylibInitializers &MOJDIs) {
- if (ORC_RT_UNLIKELY(!Swift5ProtocolSections.empty() &&
- !swift_registerProtocols))
- return make_error<StringError>("swift_registerProtocols is not available");
-
- for (const auto &Swift5Protocols : Swift5ProtocolSections)
- swift_registerProtocols(
- Swift5Protocols.StartAddress.toPtr<const ProtocolRecord *>(),
- Swift5Protocols.EndAddress.toPtr<const ProtocolRecord *>());
-
- return Error::success();
-}
-
-Error registerSwift5ProtocolConformances(
- const std::vector<ExecutorAddressRange> &Swift5ProtocolConformanceSections,
- const MachOJITDylibInitializers &MOJDIs) {
-
- if (ORC_RT_UNLIKELY(!Swift5ProtocolConformanceSections.empty() &&
- !swift_registerProtocolConformances))
- return make_error<StringError>(
- "swift_registerProtocolConformances is not available");
+ static bool deserialize(SPSInputBuffer &IB, MachOJITDylibDepInfo &JDI) {
+ return SPSMachOJITDylibDepInfo::AsArgList::deserialize(IB, JDI.Sealed,
+ JDI.DepHeaders);
+ }
+};
- for (const auto &ProtoConfSec : Swift5ProtocolConformanceSections)
- swift_registerProtocolConformances(
- ProtoConfSec.StartAddress.toPtr<const ProtocolConformanceRecord *>(),
- ProtoConfSec.EndAddress.toPtr<const ProtocolConformanceRecord *>());
+struct UnwindSectionInfo {
+ std::vector<ExecutorAddrRange> CodeRanges;
+ ExecutorAddrRange DwarfSection;
+ ExecutorAddrRange CompactUnwindSection;
+};
- return Error::success();
-}
+using SPSUnwindSectionInfo =
+ SPSTuple<SPSSequence<SPSExecutorAddrRange>, SPSExecutorAddrRange,
+ SPSExecutorAddrRange>;
-Error runModInits(const std::vector<ExecutorAddressRange> &ModInitsSections,
- const MachOJITDylibInitializers &MOJDIs) {
+template <>
+class SPSSerializationTraits<SPSUnwindSectionInfo, UnwindSectionInfo> {
+public:
+ static size_t size(const UnwindSectionInfo &USI) {
+ return SPSUnwindSectionInfo::AsArgList::size(
+ USI.CodeRanges, USI.DwarfSection, USI.CompactUnwindSection);
+ }
- for (const auto &ModInits : ModInitsSections) {
- if (auto Err = validatePointerSectionExtent("__mod_inits", ModInits))
- return Err;
+ static bool serialize(SPSOutputBuffer &OB, const UnwindSectionInfo &USI) {
+ return SPSUnwindSectionInfo::AsArgList::serialize(
+ OB, USI.CodeRanges, USI.DwarfSection, USI.CompactUnwindSection);
+ }
- using InitFunc = void (*)();
- for (auto *Init : ModInits.toSpan<InitFunc>())
- (*Init)();
+ static bool deserialize(SPSInputBuffer &IB, UnwindSectionInfo &USI) {
+ return SPSUnwindSectionInfo::AsArgList::deserialize(
+ IB, USI.CodeRanges, USI.DwarfSection, USI.CompactUnwindSection);
}
+};
- return Error::success();
-}
+} // namespace __orc_rt
+namespace {
struct TLVDescriptor {
void *(*Thunk)(TLVDescriptor *) = nullptr;
unsigned long Key = 0;
@@ -224,6 +149,16 @@ struct TLVDescriptor {
};
class MachOPlatformRuntimeState {
+public:
+ // Used internally by MachOPlatformRuntimeState, but made public to enable
+ // serialization.
+ enum class MachOExecutorSymbolFlags : uint8_t {
+ None = 0,
+ Weak = 1U << 0,
+ Callable = 1U << 1,
+ ORC_RT_MARK_AS_BITMASK_ENUM(/* LargestValue = */ Callable)
+ };
+
private:
struct AtExitEntry {
void (*Func)(void *);
@@ -232,17 +167,135 @@ private:
using AtExitsVector = std::vector<AtExitEntry>;
- struct PerJITDylibState {
+ /// Used to manage sections of fixed-sized metadata records (e.g. pointer
+ /// sections, selector refs, etc.)
+ template <typename RecordElement> class RecordSectionsTracker {
+ public:
+ /// Add a section to the "new" list.
+ void add(span<RecordElement> Sec) { New.push_back(std::move(Sec)); }
+
+ /// Returns true if there are new sections to process.
+ bool hasNewSections() const { return !New.empty(); }
+
+ /// Returns the number of new sections to process.
+ size_t numNewSections() const { return New.size(); }
+
+ /// Process all new sections.
+ template <typename ProcessSectionFunc>
+ std::enable_if_t<std::is_void_v<
+ std::invoke_result_t<ProcessSectionFunc, span<RecordElement>>>>
+ processNewSections(ProcessSectionFunc &&ProcessSection) {
+ for (auto &Sec : New)
+ ProcessSection(Sec);
+ moveNewToProcessed();
+ }
+
+ /// Proces all new sections with a fallible handler.
+ ///
+ /// Successfully handled sections will be moved to the Processed
+ /// list.
+ template <typename ProcessSectionFunc>
+ std::enable_if_t<
+ std::is_same_v<Error, std::invoke_result_t<ProcessSectionFunc,
+ span<RecordElement>>>,
+ Error>
+ processNewSections(ProcessSectionFunc &&ProcessSection) {
+ for (size_t I = 0; I != New.size(); ++I) {
+ if (auto Err = ProcessSection(New[I])) {
+ for (size_t J = 0; J != I; ++J)
+ Processed.push_back(New[J]);
+ New.erase(New.begin(), New.begin() + I);
+ return Err;
+ }
+ }
+ moveNewToProcessed();
+ return Error::success();
+ }
+
+ /// Move all sections back to New for reprocessing.
+ void reset() {
+ moveNewToProcessed();
+ New = std::move(Processed);
+ }
+
+ /// Remove the section with the given range.
+ bool removeIfPresent(ExecutorAddrRange R) {
+ if (removeIfPresent(New, R))
+ return true;
+ return removeIfPresent(Processed, R);
+ }
+
+ private:
+ void moveNewToProcessed() {
+ if (Processed.empty())
+ Processed = std::move(New);
+ else {
+ Processed.reserve(Processed.size() + New.size());
+ std::copy(New.begin(), New.end(), std::back_inserter(Processed));
+ New.clear();
+ }
+ }
+
+ bool removeIfPresent(std::vector<span<RecordElement>> &V,
+ ExecutorAddrRange R) {
+ auto RI = std::find_if(
+ V.rbegin(), V.rend(),
+ [RS = R.toSpan<RecordElement>()](const span<RecordElement> &E) {
+ return E.data() == RS.data();
+ });
+ if (RI != V.rend()) {
+ V.erase(std::next(RI).base());
+ return true;
+ }
+ return false;
+ }
+
+ std::vector<span<RecordElement>> Processed;
+ std::vector<span<RecordElement>> New;
+ };
+
+ struct UnwindSections {
+ UnwindSections(const UnwindSectionInfo &USI)
+ : DwarfSection(USI.DwarfSection.toSpan<char>()),
+ CompactUnwindSection(USI.CompactUnwindSection.toSpan<char>()) {}
+
+ span<char> DwarfSection;
+ span<char> CompactUnwindSection;
+ };
+
+ using UnwindSectionsMap =
+ IntervalMap<char *, UnwindSections, IntervalCoalescing::Disabled>;
+
+ struct JITDylibState {
+
+ using SymbolTableMap =
+ std::unordered_map<std::string_view,
+ std::pair<ExecutorAddr, MachOExecutorSymbolFlags>>;
+
+ std::string Name;
void *Header = nullptr;
- size_t RefCount = 0;
- bool AllowReinitialization = false;
+ bool Sealed = false;
+ size_t LinkedAgainstRefCount = 0;
+ size_t DlRefCount = 0;
+ SymbolTableMap SymbolTable;
+ std::vector<JITDylibState *> Deps;
AtExitsVector AtExits;
+ const objc_image_info *ObjCImageInfo = nullptr;
+ std::unordered_map<void *, std::vector<char>> DataSectionContent;
+ std::unordered_map<void *, size_t> ZeroInitRanges;
+ UnwindSectionsMap UnwindSections;
+ RecordSectionsTracker<void (*)()> ModInitsSections;
+ RecordSectionsTracker<char> ObjCRuntimeRegistrationObjects;
+
+ bool referenced() const {
+ return LinkedAgainstRefCount != 0 || DlRefCount != 0;
+ }
};
public:
- static void initialize();
+ static Error create();
static MachOPlatformRuntimeState &get();
- static void destroy();
+ static Error destroy();
MachOPlatformRuntimeState() = default;
@@ -253,15 +306,36 @@ public:
MachOPlatformRuntimeState(MachOPlatformRuntimeState &&) = delete;
MachOPlatformRuntimeState &operator=(MachOPlatformRuntimeState &&) = delete;
- Error registerObjectSections(MachOPerObjectSectionsToRegister POSR);
- Error deregisterObjectSections(MachOPerObjectSectionsToRegister POSR);
+ Error initialize();
+ Error shutdown();
+
+ Error registerJITDylib(std::string Name, void *Header);
+ Error deregisterJITDylib(void *Header);
+ Error registerThreadDataSection(span<const char> ThreadDataSection);
+ Error deregisterThreadDataSection(span<const char> ThreadDataSection);
+ Error registerObjectSymbolTable(
+ ExecutorAddr HeaderAddr,
+ const std::vector<std::tuple<ExecutorAddr, ExecutorAddr,
+ MachOExecutorSymbolFlags>> &Entries);
+ Error deregisterObjectSymbolTable(
+ ExecutorAddr HeaderAddr,
+ const std::vector<std::tuple<ExecutorAddr, ExecutorAddr,
+ MachOExecutorSymbolFlags>> &Entries);
+ Error registerObjectPlatformSections(
+ ExecutorAddr HeaderAddr, std::optional<UnwindSectionInfo> UnwindSections,
+ std::vector<std::pair<std::string_view, ExecutorAddrRange>> Secs);
+ Error deregisterObjectPlatformSections(
+ ExecutorAddr HeaderAddr, std::optional<UnwindSectionInfo> UnwindSections,
+ std::vector<std::pair<std::string_view, ExecutorAddrRange>> Secs);
const char *dlerror();
- void *dlopen(string_view Name, int Mode);
+ void *dlopen(std::string_view Name, int Mode);
int dlclose(void *DSOHandle);
- void *dlsym(void *DSOHandle, string_view Symbol);
+ void *dlsym(void *DSOHandle, const char *Symbol);
int registerAtExit(void (*F)(void *), void *Arg, void *DSOHandle);
+ void runAtExits(std::unique_lock<std::mutex> &JDStatesLock,
+ JITDylibState &JDS);
void runAtExits(void *DSOHandle);
/// Returns the base address of the section containing ThreadData.
@@ -269,48 +343,112 @@ public:
getThreadDataSectionFor(const char *ThreadData);
private:
- PerJITDylibState *getJITDylibStateByHeaderAddr(void *DSOHandle);
- PerJITDylibState *getJITDylibStateByName(string_view Path);
- PerJITDylibState &getOrCreateJITDylibState(MachOJITDylibInitializers &MOJDIs);
-
- Error registerThreadDataSection(span<const char> ThreadDataSec);
-
- Expected<ExecutorAddress> lookupSymbolInJITDylib(void *DSOHandle,
- string_view Symbol);
-
- Expected<MachOJITDylibInitializerSequence>
- getJITDylibInitializersByName(string_view Path);
- Expected<void *> dlopenInitialize(string_view Path, int Mode);
- Error initializeJITDylib(MachOJITDylibInitializers &MOJDIs);
+ JITDylibState *getJITDylibStateByHeader(void *DSOHandle);
+ JITDylibState *getJITDylibStateByName(std::string_view Path);
+
+ /// Requests materialization of the given symbols. For each pair, the bool
+ /// element indicates whether the symbol is required (true) or weakly
+ /// referenced (false).
+ Error requestPushSymbols(JITDylibState &JDS,
+ span<std::pair<std::string_view, bool>> Symbols);
+
+ /// Attempts to look up the given symbols locally, requesting a push from the
+ /// remote if they're not found. Results are written to the Result span, which
+ /// must have the same size as the Symbols span.
+ Error
+ lookupSymbols(JITDylibState &JDS, std::unique_lock<std::mutex> &JDStatesLock,
+ span<std::pair<ExecutorAddr, MachOExecutorSymbolFlags>> Result,
+ span<std::pair<std::string_view, bool>> Symbols);
+
+ bool lookupUnwindSections(void *Addr, unw_dynamic_unwind_sections &Info);
+
+ static int findDynamicUnwindSections(uintptr_t addr,
+ unw_dynamic_unwind_sections *info);
+ static Error registerEHFrames(span<const char> EHFrameSection);
+ static Error deregisterEHFrames(span<const char> EHFrameSection);
+
+ static Error registerObjCRegistrationObjects(JITDylibState &JDS);
+ static Error runModInits(std::unique_lock<std::mutex> &JDStatesLock,
+ JITDylibState &JDS);
+
+ Expected<void *> dlopenImpl(std::string_view Path, int Mode);
+ Error dlopenFull(std::unique_lock<std::mutex> &JDStatesLock,
+ JITDylibState &JDS);
+ Error dlopenInitialize(std::unique_lock<std::mutex> &JDStatesLock,
+ JITDylibState &JDS, MachOJITDylibDepInfoMap &DepInfo);
+
+ Error dlcloseImpl(void *DSOHandle);
+ Error dlcloseDeinitialize(std::unique_lock<std::mutex> &JDStatesLock,
+ JITDylibState &JDS);
static MachOPlatformRuntimeState *MOPS;
- using InitSectionHandler =
- Error (*)(const std::vector<ExecutorAddressRange> &Sections,
- const MachOJITDylibInitializers &MOJDIs);
- const std::vector<std::pair<const char *, InitSectionHandler>> InitSections =
- {{"__DATA,__objc_selrefs", registerObjCSelectors},
- {"__DATA,__objc_classlist", registerObjCClasses},
- {"__TEXT,__swift5_protos", registerSwift5Protocols},
- {"__TEXT,__swift5_proto", registerSwift5ProtocolConformances},
- {"__DATA,__mod_init_func", runModInits}};
+ bool UseCallbackStyleUnwindInfo = false;
// FIXME: Move to thread-state.
std::string DLFcnError;
- std::recursive_mutex JDStatesMutex;
- std::unordered_map<void *, PerJITDylibState> JDStates;
- std::unordered_map<std::string, void *> JDNameToHeader;
+ // APIMutex guards against concurrent entry into key "dyld" API functions
+ // (e.g. dlopen, dlclose).
+ std::recursive_mutex DyldAPIMutex;
+ // JDStatesMutex guards the data structures that hold JITDylib state.
+ std::mutex JDStatesMutex;
+ std::unordered_map<void *, JITDylibState> JDStates;
+ std::unordered_map<std::string_view, void *> JDNameToHeader;
+
+ // ThreadDataSectionsMutex guards thread local data section state.
std::mutex ThreadDataSectionsMutex;
std::map<const char *, size_t> ThreadDataSections;
};
+} // anonymous namespace
+
+namespace __orc_rt {
+
+class SPSMachOExecutorSymbolFlags;
+
+template <>
+class SPSSerializationTraits<
+ SPSMachOExecutorSymbolFlags,
+ MachOPlatformRuntimeState::MachOExecutorSymbolFlags> {
+private:
+ using UT = std::underlying_type_t<
+ MachOPlatformRuntimeState::MachOExecutorSymbolFlags>;
+
+public:
+ static size_t
+ size(const MachOPlatformRuntimeState::MachOExecutorSymbolFlags &SF) {
+ return sizeof(UT);
+ }
+
+ static bool
+ serialize(SPSOutputBuffer &OB,
+ const MachOPlatformRuntimeState::MachOExecutorSymbolFlags &SF) {
+ return SPSArgList<UT>::serialize(OB, static_cast<UT>(SF));
+ }
+
+ static bool
+ deserialize(SPSInputBuffer &IB,
+ MachOPlatformRuntimeState::MachOExecutorSymbolFlags &SF) {
+ UT Tmp;
+ if (!SPSArgList<UT>::deserialize(IB, Tmp))
+ return false;
+ SF = static_cast<MachOPlatformRuntimeState::MachOExecutorSymbolFlags>(Tmp);
+ return true;
+ }
+};
+
+} // namespace __orc_rt
+
+namespace {
+
MachOPlatformRuntimeState *MachOPlatformRuntimeState::MOPS = nullptr;
-void MachOPlatformRuntimeState::initialize() {
+Error MachOPlatformRuntimeState::create() {
assert(!MOPS && "MachOPlatformRuntimeState should be null");
MOPS = new MachOPlatformRuntimeState();
+ return MOPS->initialize();
}
MachOPlatformRuntimeState &MachOPlatformRuntimeState::get() {
@@ -318,99 +456,424 @@ MachOPlatformRuntimeState &MachOPlatformRuntimeState::get() {
return *MOPS;
}
-void MachOPlatformRuntimeState::destroy() {
+Error MachOPlatformRuntimeState::destroy() {
assert(MOPS && "MachOPlatformRuntimeState not initialized");
+ auto Err = MOPS->shutdown();
delete MOPS;
+ return Err;
}
-Error MachOPlatformRuntimeState::registerObjectSections(
- MachOPerObjectSectionsToRegister POSR) {
- if (POSR.EHFrameSection.StartAddress)
- walkEHFrameSection(POSR.EHFrameSection.toSpan<const char>(),
- __register_frame);
+Error MachOPlatformRuntimeState::initialize() {
+ UseCallbackStyleUnwindInfo = __unw_add_find_dynamic_unwind_sections &&
+ __unw_remove_find_dynamic_unwind_sections;
+ if (UseCallbackStyleUnwindInfo) {
+ ORC_RT_DEBUG({
+ printdbg("__unw_add/remove_find_dynamic_unwind_sections available."
+ " Using callback-based frame info lookup.\n");
+ });
+ if (__unw_add_find_dynamic_unwind_sections(&findDynamicUnwindSections))
+ return make_error<StringError>(
+ "Could not register findDynamicUnwindSections");
+ } else {
+ ORC_RT_DEBUG({
+ printdbg("__unw_add/remove_find_dynamic_unwind_sections not available."
+ " Using classic frame info registration.\n");
+ });
+ }
+ return Error::success();
+}
- if (POSR.ThreadDataSection.StartAddress) {
- if (auto Err = registerThreadDataSection(
- POSR.ThreadDataSection.toSpan<const char>()))
- return Err;
+Error MachOPlatformRuntimeState::shutdown() {
+ if (UseCallbackStyleUnwindInfo) {
+ if (__unw_remove_find_dynamic_unwind_sections(&findDynamicUnwindSections)) {
+ ORC_RT_DEBUG(
+ { printdbg("__unw_remove_find_dynamic_unwind_sections failed.\n"); });
+ }
+ }
+ return Error::success();
+}
+
+Error MachOPlatformRuntimeState::registerJITDylib(std::string Name,
+ void *Header) {
+ ORC_RT_DEBUG({
+ printdbg("Registering JITDylib %s: Header = %p\n", Name.c_str(), Header);
+ });
+ std::lock_guard<std::mutex> Lock(JDStatesMutex);
+ if (JDStates.count(Header)) {
+ std::ostringstream ErrStream;
+ ErrStream << "Duplicate JITDylib registration for header " << Header
+ << " (name = " << Name << ")";
+ return make_error<StringError>(ErrStream.str());
+ }
+ if (JDNameToHeader.count(Name)) {
+ std::ostringstream ErrStream;
+ ErrStream << "Duplicate JITDylib registration for header " << Header
+ << " (header = " << Header << ")";
+ return make_error<StringError>(ErrStream.str());
}
+ auto &JDS = JDStates[Header];
+ JDS.Name = std::move(Name);
+ JDS.Header = Header;
+ JDNameToHeader[JDS.Name] = Header;
return Error::success();
}
-Error MachOPlatformRuntimeState::deregisterObjectSections(
- MachOPerObjectSectionsToRegister POSR) {
- if (POSR.EHFrameSection.StartAddress)
- walkEHFrameSection(POSR.EHFrameSection.toSpan<const char>(),
- __deregister_frame);
+Error MachOPlatformRuntimeState::deregisterJITDylib(void *Header) {
+ std::lock_guard<std::mutex> Lock(JDStatesMutex);
+ auto I = JDStates.find(Header);
+ if (I == JDStates.end()) {
+ std::ostringstream ErrStream;
+ ErrStream << "Attempted to deregister unrecognized header " << Header;
+ return make_error<StringError>(ErrStream.str());
+ }
+
+ // Remove std::string construction once we can use C++20.
+ auto J = JDNameToHeader.find(
+ std::string(I->second.Name.data(), I->second.Name.size()));
+ assert(J != JDNameToHeader.end() &&
+ "Missing JDNameToHeader entry for JITDylib");
+
+ ORC_RT_DEBUG({
+ printdbg("Deregistering JITDylib %s: Header = %p\n", I->second.Name.c_str(),
+ Header);
+ });
+ JDNameToHeader.erase(J);
+ JDStates.erase(I);
return Error::success();
}
-const char *MachOPlatformRuntimeState::dlerror() { return DLFcnError.c_str(); }
+Error MachOPlatformRuntimeState::registerThreadDataSection(
+ span<const char> ThreadDataSection) {
+ std::lock_guard<std::mutex> Lock(ThreadDataSectionsMutex);
+ auto I = ThreadDataSections.upper_bound(ThreadDataSection.data());
+ if (I != ThreadDataSections.begin()) {
+ auto J = std::prev(I);
+ if (J->first + J->second > ThreadDataSection.data())
+ return make_error<StringError>("Overlapping __thread_data sections");
+ }
+ ThreadDataSections.insert(
+ I, std::make_pair(ThreadDataSection.data(), ThreadDataSection.size()));
+ return Error::success();
+}
+
+Error MachOPlatformRuntimeState::deregisterThreadDataSection(
+ span<const char> ThreadDataSection) {
+ std::lock_guard<std::mutex> Lock(ThreadDataSectionsMutex);
+ auto I = ThreadDataSections.find(ThreadDataSection.data());
+ if (I == ThreadDataSections.end())
+ return make_error<StringError>("Attempt to deregister unknown thread data "
+ "section");
+ ThreadDataSections.erase(I);
+ return Error::success();
+}
+
+Error MachOPlatformRuntimeState::registerObjectSymbolTable(
+ ExecutorAddr HeaderAddr,
+ const std::vector<std::tuple<ExecutorAddr, ExecutorAddr,
+ MachOExecutorSymbolFlags>> &Entries) {
+
+ std::lock_guard<std::mutex> Lock(JDStatesMutex);
+ auto *JDS = getJITDylibStateByHeader(HeaderAddr.toPtr<void *>());
+ if (!JDS) {
+ std::ostringstream ErrStream;
+ ErrStream << "Could not register object platform sections for "
+ "unrecognized header "
+ << HeaderAddr.toPtr<void *>();
+ return make_error<StringError>(ErrStream.str());
+ }
+
+ for (auto &[NameAddr, SymAddr, Flags] : Entries)
+ JDS->SymbolTable[NameAddr.toPtr<const char *>()] = {SymAddr, Flags};
+
+ return Error::success();
+}
+
+Error MachOPlatformRuntimeState::deregisterObjectSymbolTable(
+ ExecutorAddr HeaderAddr,
+ const std::vector<std::tuple<ExecutorAddr, ExecutorAddr,
+ MachOExecutorSymbolFlags>> &Entries) {
+
+ std::lock_guard<std::mutex> Lock(JDStatesMutex);
+ auto *JDS = getJITDylibStateByHeader(HeaderAddr.toPtr<void *>());
+ if (!JDS) {
+ std::ostringstream ErrStream;
+ ErrStream << "Could not register object platform sections for "
+ "unrecognized header "
+ << HeaderAddr.toPtr<void *>();
+ return make_error<StringError>(ErrStream.str());
+ }
-void *MachOPlatformRuntimeState::dlopen(string_view Path, int Mode) {
- std::lock_guard<std::recursive_mutex> Lock(JDStatesMutex);
+ for (auto &[NameAddr, SymAddr, Flags] : Entries)
+ JDS->SymbolTable.erase(NameAddr.toPtr<const char *>());
- // Use fast path if all JITDylibs are already loaded and don't require
- // re-running initializers.
- if (auto *JDS = getJITDylibStateByName(Path)) {
- if (!JDS->AllowReinitialization) {
- ++JDS->RefCount;
- return JDS->Header;
+ return Error::success();
+}
+
+Error MachOPlatformRuntimeState::registerObjectPlatformSections(
+ ExecutorAddr HeaderAddr, std::optional<UnwindSectionInfo> UnwindInfo,
+ std::vector<std::pair<std::string_view, ExecutorAddrRange>> Secs) {
+
+ // FIXME: Reject platform section registration after the JITDylib is
+ // sealed?
+
+ ORC_RT_DEBUG({
+ printdbg("MachOPlatform: Registering object sections for %p.\n",
+ HeaderAddr.toPtr<void *>());
+ });
+
+ std::lock_guard<std::mutex> Lock(JDStatesMutex);
+ auto *JDS = getJITDylibStateByHeader(HeaderAddr.toPtr<void *>());
+ if (!JDS) {
+ std::ostringstream ErrStream;
+ ErrStream << "Could not register object platform sections for "
+ "unrecognized header "
+ << HeaderAddr.toPtr<void *>();
+ return make_error<StringError>(ErrStream.str());
+ }
+
+ if (UnwindInfo && UseCallbackStyleUnwindInfo) {
+ ORC_RT_DEBUG({
+ printdbg(" Registering new-style unwind info for:\n"
+ " DWARF: %p -- %p\n"
+ " Compact-unwind: %p -- %p\n"
+ " for:\n",
+ UnwindInfo->DwarfSection.Start.toPtr<void *>(),
+ UnwindInfo->DwarfSection.End.toPtr<void *>(),
+ UnwindInfo->CompactUnwindSection.Start.toPtr<void *>(),
+ UnwindInfo->CompactUnwindSection.End.toPtr<void *>());
+ });
+ for (auto &CodeRange : UnwindInfo->CodeRanges) {
+ JDS->UnwindSections.insert(CodeRange.Start.toPtr<char *>(),
+ CodeRange.End.toPtr<char *>(), *UnwindInfo);
+ ORC_RT_DEBUG({
+ printdbg(" [ %p -- %p ]\n", CodeRange.Start.toPtr<void *>(),
+ CodeRange.End.toPtr<void *>());
+ });
}
}
- auto H = dlopenInitialize(Path, Mode);
- if (!H) {
+ for (auto &KV : Secs) {
+ // FIXME: Validate section ranges?
+ if (KV.first == "__TEXT,__eh_frame") {
+ if (!UseCallbackStyleUnwindInfo) {
+ // Use classic libunwind registration.
+ if (auto Err = registerEHFrames(KV.second.toSpan<const char>()))
+ return Err;
+ }
+ } else if (KV.first == "__DATA,__data") {
+ assert(!JDS->DataSectionContent.count(KV.second.Start.toPtr<char *>()) &&
+ "Address already registered.");
+ auto S = KV.second.toSpan<char>();
+ JDS->DataSectionContent[KV.second.Start.toPtr<char *>()] =
+ std::vector<char>(S.begin(), S.end());
+ } else if (KV.first == "__DATA,__common") {
+ JDS->ZeroInitRanges[KV.second.Start.toPtr<char *>()] = KV.second.size();
+ } else if (KV.first == "__DATA,__thread_data") {
+ if (auto Err = registerThreadDataSection(KV.second.toSpan<const char>()))
+ return Err;
+ } else if (KV.first == "__llvm_jitlink_ObjCRuntimeRegistrationObject")
+ JDS->ObjCRuntimeRegistrationObjects.add(KV.second.toSpan<char>());
+ else if (KV.first == "__DATA,__mod_init_func")
+ JDS->ModInitsSections.add(KV.second.toSpan<void (*)()>());
+ else {
+ // Should this be a warning instead?
+ return make_error<StringError>(
+ "Encountered unexpected section " +
+ std::string(KV.first.data(), KV.first.size()) +
+ " while registering object platform sections");
+ }
+ }
+
+ return Error::success();
+}
+
+Error MachOPlatformRuntimeState::deregisterObjectPlatformSections(
+ ExecutorAddr HeaderAddr, std::optional<UnwindSectionInfo> UnwindInfo,
+ std::vector<std::pair<std::string_view, ExecutorAddrRange>> Secs) {
+ // TODO: Make this more efficient? (maybe unnecessary if removal is rare?)
+ // TODO: Add a JITDylib prepare-for-teardown operation that clears all
+ // registered sections, causing this function to take the fast-path.
+ ORC_RT_DEBUG({
+ printdbg("MachOPlatform: Deregistering object sections for %p.\n",
+ HeaderAddr.toPtr<void *>());
+ });
+
+ std::lock_guard<std::mutex> Lock(JDStatesMutex);
+ auto *JDS = getJITDylibStateByHeader(HeaderAddr.toPtr<void *>());
+ if (!JDS) {
+ std::ostringstream ErrStream;
+ ErrStream << "Could not register object platform sections for unrecognized "
+ "header "
+ << HeaderAddr.toPtr<void *>();
+ return make_error<StringError>(ErrStream.str());
+ }
+
+ // FIXME: Implement faster-path by returning immediately if JDS is being
+ // torn down entirely?
+
+ // TODO: Make library permanent (i.e. not able to be dlclosed) if it contains
+ // any Swift or ObjC. Once this happens we can clear (and no longer record)
+ // data section content, as the library could never be re-initialized.
+
+ if (UnwindInfo && UseCallbackStyleUnwindInfo) {
+ ORC_RT_DEBUG({
+ printdbg(" Deregistering new-style unwind info for:\n"
+ " DWARF: %p -- %p\n"
+ " Compact-unwind: %p -- %p\n"
+ " for:\n",
+ UnwindInfo->DwarfSection.Start.toPtr<void *>(),
+ UnwindInfo->DwarfSection.End.toPtr<void *>(),
+ UnwindInfo->CompactUnwindSection.Start.toPtr<void *>(),
+ UnwindInfo->CompactUnwindSection.End.toPtr<void *>());
+ });
+ for (auto &CodeRange : UnwindInfo->CodeRanges) {
+ JDS->UnwindSections.erase(CodeRange.Start.toPtr<char *>(),
+ CodeRange.End.toPtr<char *>());
+ ORC_RT_DEBUG({
+ printdbg(" [ %p -- %p ]\n", CodeRange.Start.toPtr<void *>(),
+ CodeRange.End.toPtr<void *>());
+ });
+ }
+ }
+
+ for (auto &KV : Secs) {
+ // FIXME: Validate section ranges?
+ if (KV.first == "__TEXT,__eh_frame") {
+ if (!UseCallbackStyleUnwindInfo) {
+ // Use classic libunwind registration.
+ if (auto Err = deregisterEHFrames(KV.second.toSpan<const char>()))
+ return Err;
+ }
+ } else if (KV.first == "__DATA,__data") {
+ JDS->DataSectionContent.erase(KV.second.Start.toPtr<char *>());
+ } else if (KV.first == "__DATA,__common") {
+ JDS->ZeroInitRanges.erase(KV.second.Start.toPtr<char *>());
+ } else if (KV.first == "__DATA,__thread_data") {
+ if (auto Err =
+ deregisterThreadDataSection(KV.second.toSpan<const char>()))
+ return Err;
+ } else if (KV.first == "__llvm_jitlink_ObjCRuntimeRegistrationObject")
+ JDS->ObjCRuntimeRegistrationObjects.removeIfPresent(KV.second);
+ else if (KV.first == "__DATA,__mod_init_func")
+ JDS->ModInitsSections.removeIfPresent(KV.second);
+ else {
+ // Should this be a warning instead?
+ return make_error<StringError>(
+ "Encountered unexpected section " +
+ std::string(KV.first.data(), KV.first.size()) +
+ " while deregistering object platform sections");
+ }
+ }
+ return Error::success();
+}
+
+const char *MachOPlatformRuntimeState::dlerror() { return DLFcnError.c_str(); }
+
+void *MachOPlatformRuntimeState::dlopen(std::string_view Path, int Mode) {
+ ORC_RT_DEBUG({
+ std::string S(Path.data(), Path.size());
+ printdbg("MachOPlatform::dlopen(\"%s\")\n", S.c_str());
+ });
+ std::lock_guard<std::recursive_mutex> Lock(DyldAPIMutex);
+ if (auto H = dlopenImpl(Path, Mode))
+ return *H;
+ else {
+ // FIXME: Make dlerror thread safe.
DLFcnError = toString(H.takeError());
return nullptr;
}
-
- return *H;
}
int MachOPlatformRuntimeState::dlclose(void *DSOHandle) {
- runAtExits(DSOHandle);
+ ORC_RT_DEBUG({
+ auto *JDS = getJITDylibStateByHeader(DSOHandle);
+ std::string DylibName;
+ if (JDS) {
+ std::string S;
+ printdbg("MachOPlatform::dlclose(%p) (%s)\n", DSOHandle, S.c_str());
+ } else
+ printdbg("MachOPlatform::dlclose(%p) (%s)\n", DSOHandle,
+ "invalid handle");
+ });
+ std::lock_guard<std::recursive_mutex> Lock(DyldAPIMutex);
+ if (auto Err = dlcloseImpl(DSOHandle)) {
+ // FIXME: Make dlerror thread safe.
+ DLFcnError = toString(std::move(Err));
+ return -1;
+ }
return 0;
}
-void *MachOPlatformRuntimeState::dlsym(void *DSOHandle, string_view Symbol) {
- auto Addr = lookupSymbolInJITDylib(DSOHandle, Symbol);
- if (!Addr) {
- DLFcnError = toString(Addr.takeError());
- return 0;
+void *MachOPlatformRuntimeState::dlsym(void *DSOHandle, const char *Symbol) {
+ std::unique_lock<std::mutex> Lock(JDStatesMutex);
+ auto *JDS = getJITDylibStateByHeader(DSOHandle);
+ if (!JDS) {
+ std::ostringstream ErrStream;
+ ErrStream << "In call to dlsym, unrecognized header address " << DSOHandle;
+ DLFcnError = ErrStream.str();
+ return nullptr;
}
- return Addr->toPtr<void *>();
+ std::string MangledName = std::string("_") + Symbol;
+ std::pair<std::string_view, bool> Lookup(MangledName, false);
+ std::pair<ExecutorAddr, MachOExecutorSymbolFlags> Result;
+
+ if (auto Err = lookupSymbols(*JDS, Lock, {&Result, 1}, {&Lookup, 1})) {
+ DLFcnError = toString(std::move(Err));
+ return nullptr;
+ }
+
+ // Sign callable symbols as functions, to match dyld.
+ if ((Result.second & MachOExecutorSymbolFlags::Callable) ==
+ MachOExecutorSymbolFlags::Callable)
+ return reinterpret_cast<void *>(Result.first.toPtr<void(void)>());
+ return Result.first.toPtr<void *>();
}
int MachOPlatformRuntimeState::registerAtExit(void (*F)(void *), void *Arg,
void *DSOHandle) {
// FIXME: Handle out-of-memory errors, returning -1 if OOM.
- std::lock_guard<std::recursive_mutex> Lock(JDStatesMutex);
- auto *JDS = getJITDylibStateByHeaderAddr(DSOHandle);
- assert(JDS && "JITDylib state not initialized");
+ std::lock_guard<std::mutex> Lock(JDStatesMutex);
+ auto *JDS = getJITDylibStateByHeader(DSOHandle);
+ if (!JDS) {
+ ORC_RT_DEBUG({
+ printdbg("MachOPlatformRuntimeState::registerAtExit called with "
+ "unrecognized dso handle %p\n",
+ DSOHandle);
+ });
+ return -1;
+ }
JDS->AtExits.push_back({F, Arg});
return 0;
}
-void MachOPlatformRuntimeState::runAtExits(void *DSOHandle) {
- // FIXME: Should atexits be allowed to run concurrently with access to
- // JDState?
- AtExitsVector V;
- {
- std::lock_guard<std::recursive_mutex> Lock(JDStatesMutex);
- auto *JDS = getJITDylibStateByHeaderAddr(DSOHandle);
- assert(JDS && "JITDlybi state not initialized");
- std::swap(V, JDS->AtExits);
- }
-
- while (!V.empty()) {
- auto &AE = V.back();
+void MachOPlatformRuntimeState::runAtExits(
+ std::unique_lock<std::mutex> &JDStatesLock, JITDylibState &JDS) {
+ auto AtExits = std::move(JDS.AtExits);
+
+ // Unlock while running atexits, as they may trigger operations that modify
+ // JDStates.
+ JDStatesLock.unlock();
+ while (!AtExits.empty()) {
+ auto &AE = AtExits.back();
AE.Func(AE.Arg);
- V.pop_back();
+ AtExits.pop_back();
}
+ JDStatesLock.lock();
+}
+
+void MachOPlatformRuntimeState::runAtExits(void *DSOHandle) {
+ std::unique_lock<std::mutex> Lock(JDStatesMutex);
+ auto *JDS = getJITDylibStateByHeader(DSOHandle);
+ ORC_RT_DEBUG({
+ printdbg("MachOPlatformRuntimeState::runAtExits called on unrecognized "
+ "dso_handle %p\n",
+ DSOHandle);
+ });
+ if (JDS)
+ runAtExits(Lock, *JDS);
}
Expected<std::pair<const char *, size_t>>
@@ -426,125 +889,399 @@ MachOPlatformRuntimeState::getThreadDataSectionFor(const char *ThreadData) {
return *I;
}
-MachOPlatformRuntimeState::PerJITDylibState *
-MachOPlatformRuntimeState::getJITDylibStateByHeaderAddr(void *DSOHandle) {
+MachOPlatformRuntimeState::JITDylibState *
+MachOPlatformRuntimeState::getJITDylibStateByHeader(void *DSOHandle) {
auto I = JDStates.find(DSOHandle);
- if (I == JDStates.end())
- return nullptr;
+ if (I == JDStates.end()) {
+ I = JDStates.insert(std::make_pair(DSOHandle, JITDylibState())).first;
+ I->second.Header = DSOHandle;
+ }
return &I->second;
}
-MachOPlatformRuntimeState::PerJITDylibState *
-MachOPlatformRuntimeState::getJITDylibStateByName(string_view Name) {
- // FIXME: Avoid creating string copy here.
+MachOPlatformRuntimeState::JITDylibState *
+MachOPlatformRuntimeState::getJITDylibStateByName(std::string_view Name) {
+ // FIXME: Avoid creating string once we have C++20.
auto I = JDNameToHeader.find(std::string(Name.data(), Name.size()));
- if (I == JDNameToHeader.end())
- return nullptr;
- void *H = I->second;
- auto J = JDStates.find(H);
- assert(J != JDStates.end() &&
- "JITDylib has name map entry but no header map entry");
- return &J->second;
+ if (I != JDNameToHeader.end())
+ return getJITDylibStateByHeader(I->second);
+ return nullptr;
+}
+
+Error MachOPlatformRuntimeState::requestPushSymbols(
+ JITDylibState &JDS, span<std::pair<std::string_view, bool>> Symbols) {
+ Error OpErr = Error::success();
+ if (auto Err = WrapperFunction<SPSError(
+ SPSExecutorAddr, SPSSequence<SPSTuple<SPSString, bool>>)>::
+ call(&__orc_rt_macho_push_symbols_tag, OpErr,
+ ExecutorAddr::fromPtr(JDS.Header), Symbols)) {
+ cantFail(std::move(OpErr));
+ return std::move(Err);
+ }
+ return OpErr;
}
-MachOPlatformRuntimeState::PerJITDylibState &
-MachOPlatformRuntimeState::getOrCreateJITDylibState(
- MachOJITDylibInitializers &MOJDIs) {
- void *Header = MOJDIs.MachOHeaderAddress.toPtr<void *>();
+Error MachOPlatformRuntimeState::lookupSymbols(
+ JITDylibState &JDS, std::unique_lock<std::mutex> &JDStatesLock,
+ span<std::pair<ExecutorAddr, MachOExecutorSymbolFlags>> Result,
+ span<std::pair<std::string_view, bool>> Symbols) {
+ assert(JDStatesLock.owns_lock() &&
+ "JDStatesLock should be locked at call-site");
+ assert(Result.size() == Symbols.size() &&
+ "Results and Symbols span sizes should match");
+
+ // Make an initial pass over the local symbol table.
+ std::vector<size_t> MissingSymbolIndexes;
+ for (size_t Idx = 0; Idx != Symbols.size(); ++Idx) {
+ auto I = JDS.SymbolTable.find(Symbols[Idx].first);
+ if (I != JDS.SymbolTable.end())
+ Result[Idx] = I->second;
+ else
+ MissingSymbolIndexes.push_back(Idx);
+ }
- auto &JDS = JDStates[Header];
+ // If everything has been resolved already then bail out early.
+ if (MissingSymbolIndexes.empty())
+ return Error::success();
+
+ // Otherwise call back to the controller to try to request that the symbol
+ // be materialized.
+ std::vector<std::pair<std::string_view, bool>> MissingSymbols;
+ MissingSymbols.reserve(MissingSymbolIndexes.size());
+ ORC_RT_DEBUG({
+ printdbg("requesting push of %i missing symbols...\n",
+ MissingSymbolIndexes.size());
+ });
+ for (auto MissingIdx : MissingSymbolIndexes)
+ MissingSymbols.push_back(Symbols[MissingIdx]);
+
+ JDStatesLock.unlock();
+ if (auto Err = requestPushSymbols(
+ JDS, {MissingSymbols.data(), MissingSymbols.size()}))
+ return Err;
+ JDStatesLock.lock();
+
+ // Try to resolve the previously missing symbols locally.
+ std::vector<size_t> MissingRequiredSymbols;
+ for (auto MissingIdx : MissingSymbolIndexes) {
+ auto I = JDS.SymbolTable.find(Symbols[MissingIdx].first);
+ if (I != JDS.SymbolTable.end())
+ Result[MissingIdx] = I->second;
+ else {
+ if (Symbols[MissingIdx].second)
+ MissingRequiredSymbols.push_back(MissingIdx);
+ else
+ Result[MissingIdx] = {ExecutorAddr(), {}};
+ }
+ }
- // If this entry hasn't been created yet.
- if (!JDS.Header) {
- assert(!JDNameToHeader.count(MOJDIs.Name) &&
- "JITDylib has header map entry but no name map entry");
- JDNameToHeader[MOJDIs.Name] = Header;
- JDS.Header = Header;
+ // Error out if any missing symbols could not be resolved.
+ if (!MissingRequiredSymbols.empty()) {
+ std::ostringstream ErrStream;
+ ErrStream << "Lookup could not find required symbols: [ ";
+ for (auto MissingIdx : MissingRequiredSymbols)
+ ErrStream << "\"" << Symbols[MissingIdx].first << "\" ";
+ ErrStream << "]";
+ return make_error<StringError>(ErrStream.str());
}
- return JDS;
+ return Error::success();
}
-Error MachOPlatformRuntimeState::registerThreadDataSection(
- span<const char> ThreadDataSection) {
- std::lock_guard<std::mutex> Lock(ThreadDataSectionsMutex);
- auto I = ThreadDataSections.upper_bound(ThreadDataSection.data());
- if (I != ThreadDataSections.begin()) {
- auto J = std::prev(I);
- if (J->first + J->second > ThreadDataSection.data())
- return make_error<StringError>("Overlapping __thread_data sections");
+// eh-frame registration functions.
+// We expect these to be available for all processes.
+extern "C" void __register_frame(const void *);
+extern "C" void __deregister_frame(const void *);
+
+template <typename HandleFDEFn>
+void walkEHFrameSection(span<const char> EHFrameSection,
+ HandleFDEFn HandleFDE) {
+ const char *CurCFIRecord = EHFrameSection.data();
+ uint64_t Size = *reinterpret_cast<const uint32_t *>(CurCFIRecord);
+
+ while (CurCFIRecord != EHFrameSection.end() && Size != 0) {
+ const char *OffsetField = CurCFIRecord + (Size == 0xffffffff ? 12 : 4);
+ if (Size == 0xffffffff)
+ Size = *reinterpret_cast<const uint64_t *>(CurCFIRecord + 4) + 12;
+ else
+ Size += 4;
+ uint32_t Offset = *reinterpret_cast<const uint32_t *>(OffsetField);
+
+ if (Offset != 0)
+ HandleFDE(CurCFIRecord);
+
+ CurCFIRecord += Size;
+ Size = *reinterpret_cast<const uint32_t *>(CurCFIRecord);
}
- ThreadDataSections.insert(
- I, std::make_pair(ThreadDataSection.data(), ThreadDataSection.size()));
- return Error::success();
}
-Expected<ExecutorAddress>
-MachOPlatformRuntimeState::lookupSymbolInJITDylib(void *DSOHandle,
- string_view Sym) {
- Expected<ExecutorAddress> Result((ExecutorAddress()));
- if (auto Err = WrapperFunction<SPSExpected<SPSExecutorAddress>(
- SPSExecutorAddress,
- SPSString)>::call(&__orc_rt_macho_symbol_lookup_tag, Result,
- ExecutorAddress::fromPtr(DSOHandle), Sym))
- return std::move(Err);
- return Result;
+bool MachOPlatformRuntimeState::lookupUnwindSections(
+ void *Addr, unw_dynamic_unwind_sections &Info) {
+ ORC_RT_DEBUG(
+ { printdbg("Tried to lookup unwind-info via new lookup call.\n"); });
+ std::lock_guard<std::mutex> Lock(JDStatesMutex);
+ for (auto &KV : JDStates) {
+ auto &JD = KV.second;
+ auto I = JD.UnwindSections.find(reinterpret_cast<char *>(Addr));
+ if (I != JD.UnwindSections.end()) {
+ Info.dso_base = reinterpret_cast<uintptr_t>(JD.Header);
+ Info.dwarf_section =
+ reinterpret_cast<uintptr_t>(I->second.DwarfSection.data());
+ Info.dwarf_section_length = I->second.DwarfSection.size();
+ Info.compact_unwind_section =
+ reinterpret_cast<uintptr_t>(I->second.CompactUnwindSection.data());
+ Info.compact_unwind_section_length =
+ I->second.CompactUnwindSection.size();
+ return true;
+ }
+ }
+ return false;
}
-Expected<MachOJITDylibInitializerSequence>
-MachOPlatformRuntimeState::getJITDylibInitializersByName(string_view Path) {
- Expected<MachOJITDylibInitializerSequence> Result(
- (MachOJITDylibInitializerSequence()));
- std::string PathStr(Path.data(), Path.size());
- if (auto Err =
- WrapperFunction<SPSExpected<SPSMachOJITDylibInitializerSequence>(
- SPSString)>::call(&__orc_rt_macho_get_initializers_tag, Result,
- Path))
- return std::move(Err);
- return Result;
+int MachOPlatformRuntimeState::findDynamicUnwindSections(
+ uintptr_t addr, unw_dynamic_unwind_sections *info) {
+ if (!info)
+ return 0;
+ return MachOPlatformRuntimeState::get().lookupUnwindSections((void *)addr,
+ *info);
+}
+
+Error MachOPlatformRuntimeState::registerEHFrames(
+ span<const char> EHFrameSection) {
+ walkEHFrameSection(EHFrameSection, __register_frame);
+ return Error::success();
+}
+
+Error MachOPlatformRuntimeState::deregisterEHFrames(
+ span<const char> EHFrameSection) {
+ walkEHFrameSection(EHFrameSection, __deregister_frame);
+ return Error::success();
}
-Expected<void *> MachOPlatformRuntimeState::dlopenInitialize(string_view Path,
- int Mode) {
- // Either our JITDylib wasn't loaded, or it or one of its dependencies allows
- // reinitialization. We need to call in to the JIT to see if there's any new
- // work pending.
- auto InitSeq = getJITDylibInitializersByName(Path);
- if (!InitSeq)
- return InitSeq.takeError();
+Error MachOPlatformRuntimeState::registerObjCRegistrationObjects(
+ JITDylibState &JDS) {
+ ORC_RT_DEBUG(printdbg("Registering Objective-C / Swift metadata.\n"));
- // Init sequences should be non-empty.
- if (InitSeq->empty())
+ std::vector<char *> RegObjBases;
+ JDS.ObjCRuntimeRegistrationObjects.processNewSections(
+ [&](span<char> RegObj) { RegObjBases.push_back(RegObj.data()); });
+
+ if (RegObjBases.empty())
+ return Error::success();
+
+ if (!_objc_map_images || !_objc_load_image)
return make_error<StringError>(
- "__orc_rt_macho_get_initializers returned an "
- "empty init sequence");
+ "Could not register Objective-C / Swift metadata: _objc_map_images / "
+ "_objc_load_image not found");
+
+ std::vector<char *> Paths;
+ Paths.resize(RegObjBases.size());
+ _objc_map_images(RegObjBases.size(), Paths.data(),
+ reinterpret_cast<mach_header **>(RegObjBases.data()));
+
+ for (void *RegObjBase : RegObjBases)
+ _objc_load_image(nullptr, reinterpret_cast<mach_header *>(RegObjBase));
+
+ return Error::success();
+}
+
+Error MachOPlatformRuntimeState::runModInits(
+ std::unique_lock<std::mutex> &JDStatesLock, JITDylibState &JDS) {
+ std::vector<span<void (*)()>> InitSections;
+ InitSections.reserve(JDS.ModInitsSections.numNewSections());
+
+ // Copy initializer sections: If the JITDylib is unsealed then the
+ // initializers could reach back into the JIT and cause more initializers to
+ // be added.
+ // FIXME: Skip unlock and run in-place on sealed JITDylibs?
+ JDS.ModInitsSections.processNewSections(
+ [&](span<void (*)()> Inits) { InitSections.push_back(Inits); });
- // Otherwise register and run initializers for each JITDylib.
- for (auto &MOJDIs : *InitSeq)
- if (auto Err = initializeJITDylib(MOJDIs))
+ JDStatesLock.unlock();
+ for (auto InitSec : InitSections)
+ for (auto *Init : InitSec)
+ Init();
+ JDStatesLock.lock();
+
+ return Error::success();
+}
+
+Expected<void *> MachOPlatformRuntimeState::dlopenImpl(std::string_view Path,
+ int Mode) {
+ std::unique_lock<std::mutex> Lock(JDStatesMutex);
+
+ // Try to find JITDylib state by name.
+ auto *JDS = getJITDylibStateByName(Path);
+
+ if (!JDS)
+ return make_error<StringError>("No registered JTIDylib for path " +
+ std::string(Path.data(), Path.size()));
+
+ // If this JITDylib is unsealed, or this is the first dlopen then run
+ // full dlopen path (update deps, push and run initializers, update ref
+ // counts on all JITDylibs in the dep tree).
+ if (!JDS->referenced() || !JDS->Sealed) {
+ if (auto Err = dlopenFull(Lock, *JDS))
return std::move(Err);
+ }
+
+ // Bump the ref-count on this dylib.
+ ++JDS->DlRefCount;
- // Return the header for the last item in the list.
- auto *JDS = getJITDylibStateByHeaderAddr(
- InitSeq->back().MachOHeaderAddress.toPtr<void *>());
- assert(JDS && "Missing state entry for JD");
+ // Return the header address.
return JDS->Header;
}
-Error MachOPlatformRuntimeState::initializeJITDylib(
- MachOJITDylibInitializers &MOJDIs) {
+Error MachOPlatformRuntimeState::dlopenFull(
+ std::unique_lock<std::mutex> &JDStatesLock, JITDylibState &JDS) {
+ // Call back to the JIT to push the initializers.
+ Expected<MachOJITDylibDepInfoMap> DepInfo((MachOJITDylibDepInfoMap()));
+ // Unlock so that we can accept the initializer update.
+ JDStatesLock.unlock();
+ if (auto Err = WrapperFunction<SPSExpected<SPSMachOJITDylibDepInfoMap>(
+ SPSExecutorAddr)>::call(&__orc_rt_macho_push_initializers_tag,
+ DepInfo, ExecutorAddr::fromPtr(JDS.Header)))
+ return Err;
+ JDStatesLock.lock();
+
+ if (!DepInfo)
+ return DepInfo.takeError();
+
+ if (auto Err = dlopenInitialize(JDStatesLock, JDS, *DepInfo))
+ return Err;
+
+ if (!DepInfo->empty()) {
+ ORC_RT_DEBUG({
+ printdbg("Unrecognized dep-info key headers in dlopen of %s\n",
+ JDS.Name.c_str());
+ });
+ std::ostringstream ErrStream;
+ ErrStream << "Encountered unrecognized dep-info key headers "
+ "while processing dlopen of "
+ << JDS.Name;
+ return make_error<StringError>(ErrStream.str());
+ }
- auto &JDS = getOrCreateJITDylibState(MOJDIs);
- ++JDS.RefCount;
+ return Error::success();
+}
- for (auto &KV : InitSections) {
- const auto &Name = KV.first;
- const auto &Handler = KV.second;
- auto I = MOJDIs.InitSections.find(Name);
- if (I != MOJDIs.InitSections.end()) {
- if (auto Err = Handler(I->second, MOJDIs))
- return Err;
+Error MachOPlatformRuntimeState::dlopenInitialize(
+ std::unique_lock<std::mutex> &JDStatesLock, JITDylibState &JDS,
+ MachOJITDylibDepInfoMap &DepInfo) {
+ ORC_RT_DEBUG({
+ printdbg("MachOPlatformRuntimeState::dlopenInitialize(\"%s\")\n",
+ JDS.Name.c_str());
+ });
+
+ // If the header is not present in the dep map then assume that we
+ // already processed it earlier in the dlopenInitialize traversal and
+ // return.
+ // TODO: Keep a visited set instead so that we can error out on missing
+ // entries?
+ auto I = DepInfo.find(ExecutorAddr::fromPtr(JDS.Header));
+ if (I == DepInfo.end())
+ return Error::success();
+
+ auto DI = std::move(I->second);
+ DepInfo.erase(I);
+
+ // We don't need to re-initialize sealed JITDylibs that have already been
+ // initialized. Just check that their dep-map entry is empty as expected.
+ if (JDS.Sealed) {
+ if (!DI.DepHeaders.empty()) {
+ std::ostringstream ErrStream;
+ ErrStream << "Sealed JITDylib " << JDS.Header
+ << " already has registered dependencies";
+ return make_error<StringError>(ErrStream.str());
+ }
+ if (JDS.referenced())
+ return Error::success();
+ } else
+ JDS.Sealed = DI.Sealed;
+
+ // This is an unsealed or newly sealed JITDylib. Run initializers.
+ std::vector<JITDylibState *> OldDeps;
+ std::swap(JDS.Deps, OldDeps);
+ JDS.Deps.reserve(DI.DepHeaders.size());
+ for (auto DepHeaderAddr : DI.DepHeaders) {
+ auto *DepJDS = getJITDylibStateByHeader(DepHeaderAddr.toPtr<void *>());
+ if (!DepJDS) {
+ std::ostringstream ErrStream;
+ ErrStream << "Encountered unrecognized dep header "
+ << DepHeaderAddr.toPtr<void *>() << " while initializing "
+ << JDS.Name;
+ return make_error<StringError>(ErrStream.str());
}
+ ++DepJDS->LinkedAgainstRefCount;
+ if (auto Err = dlopenInitialize(JDStatesLock, *DepJDS, DepInfo))
+ return Err;
+ }
+
+ // Initialize this JITDylib.
+ if (auto Err = registerObjCRegistrationObjects(JDS))
+ return Err;
+ if (auto Err = runModInits(JDStatesLock, JDS))
+ return Err;
+
+ // Decrement old deps.
+ // FIXME: We should probably continue and just report deinitialize errors
+ // here.
+ for (auto *DepJDS : OldDeps) {
+ --DepJDS->LinkedAgainstRefCount;
+ if (!DepJDS->referenced())
+ if (auto Err = dlcloseDeinitialize(JDStatesLock, *DepJDS))
+ return Err;
+ }
+
+ return Error::success();
+}
+
+Error MachOPlatformRuntimeState::dlcloseImpl(void *DSOHandle) {
+ std::unique_lock<std::mutex> Lock(JDStatesMutex);
+
+ // Try to find JITDylib state by header.
+ auto *JDS = getJITDylibStateByHeader(DSOHandle);
+
+ if (!JDS) {
+ std::ostringstream ErrStream;
+ ErrStream << "No registered JITDylib for " << DSOHandle;
+ return make_error<StringError>(ErrStream.str());
+ }
+
+ // Bump the ref-count.
+ --JDS->DlRefCount;
+
+ if (!JDS->referenced())
+ return dlcloseDeinitialize(Lock, *JDS);
+
+ return Error::success();
+}
+
+Error MachOPlatformRuntimeState::dlcloseDeinitialize(
+ std::unique_lock<std::mutex> &JDStatesLock, JITDylibState &JDS) {
+
+ ORC_RT_DEBUG({
+ printdbg("MachOPlatformRuntimeState::dlcloseDeinitialize(\"%s\")\n",
+ JDS.Name.c_str());
+ });
+
+ runAtExits(JDStatesLock, JDS);
+
+ // Reset mod-inits
+ JDS.ModInitsSections.reset();
+
+ // Reset data section contents.
+ for (auto &KV : JDS.DataSectionContent)
+ memcpy(KV.first, KV.second.data(), KV.second.size());
+ for (auto &KV : JDS.ZeroInitRanges)
+ memset(KV.first, 0, KV.second);
+
+ // Deinitialize any dependencies.
+ for (auto *DepJDS : JDS.Deps) {
+ --DepJDS->LinkedAgainstRefCount;
+ if (!DepJDS->referenced())
+ if (auto Err = dlcloseDeinitialize(JDStatesLock, *DepJDS))
+ return Err;
}
return Error::success();
@@ -589,48 +1326,130 @@ void destroyMachOTLVMgr(void *MachOTLVMgr) {
delete static_cast<MachOPlatformRuntimeTLVManager *>(MachOTLVMgr);
}
+Error runWrapperFunctionCalls(std::vector<WrapperFunctionCall> WFCs) {
+ for (auto &WFC : WFCs)
+ if (auto Err = WFC.runWithSPSRet<void>())
+ return Err;
+ return Error::success();
+}
+
} // end anonymous namespace
//------------------------------------------------------------------------------
// JIT entry points
//------------------------------------------------------------------------------
-ORC_RT_INTERFACE __orc_rt_CWrapperFunctionResult
+ORC_RT_INTERFACE orc_rt_CWrapperFunctionResult
__orc_rt_macho_platform_bootstrap(char *ArgData, size_t ArgSize) {
- MachOPlatformRuntimeState::initialize();
- return WrapperFunctionResult().release();
+ return WrapperFunction<SPSError()>::handle(
+ ArgData, ArgSize,
+ []() { return MachOPlatformRuntimeState::create(); })
+ .release();
}
-ORC_RT_INTERFACE __orc_rt_CWrapperFunctionResult
+ORC_RT_INTERFACE orc_rt_CWrapperFunctionResult
__orc_rt_macho_platform_shutdown(char *ArgData, size_t ArgSize) {
- MachOPlatformRuntimeState::destroy();
- return WrapperFunctionResult().release();
+ return WrapperFunction<SPSError()>::handle(
+ ArgData, ArgSize,
+ []() { return MachOPlatformRuntimeState::destroy(); })
+ .release();
}
-/// Wrapper function for registering metadata on a per-object basis.
-ORC_RT_INTERFACE __orc_rt_CWrapperFunctionResult
-__orc_rt_macho_register_object_sections(char *ArgData, size_t ArgSize) {
- return WrapperFunction<SPSError(SPSMachOPerObjectSectionsToRegister)>::handle(
+ORC_RT_INTERFACE orc_rt_CWrapperFunctionResult
+__orc_rt_macho_register_jitdylib(char *ArgData, size_t ArgSize) {
+ return WrapperFunction<SPSError(SPSString, SPSExecutorAddr)>::handle(
ArgData, ArgSize,
- [](MachOPerObjectSectionsToRegister &POSR) {
- return MachOPlatformRuntimeState::get().registerObjectSections(
- std::move(POSR));
+ [](std::string &Name, ExecutorAddr HeaderAddr) {
+ return MachOPlatformRuntimeState::get().registerJITDylib(
+ std::move(Name), HeaderAddr.toPtr<void *>());
})
.release();
}
-/// Wrapper for releasing per-object metadat.
-ORC_RT_INTERFACE __orc_rt_CWrapperFunctionResult
-__orc_rt_macho_deregister_object_sections(char *ArgData, size_t ArgSize) {
- return WrapperFunction<SPSError(SPSMachOPerObjectSectionsToRegister)>::handle(
+ORC_RT_INTERFACE orc_rt_CWrapperFunctionResult
+__orc_rt_macho_deregister_jitdylib(char *ArgData, size_t ArgSize) {
+ return WrapperFunction<SPSError(SPSExecutorAddr)>::handle(
ArgData, ArgSize,
- [](MachOPerObjectSectionsToRegister &POSR) {
- return MachOPlatformRuntimeState::get().deregisterObjectSections(
- std::move(POSR));
+ [](ExecutorAddr HeaderAddr) {
+ return MachOPlatformRuntimeState::get().deregisterJITDylib(
+ HeaderAddr.toPtr<void *>());
})
.release();
}
+ORC_RT_INTERFACE orc_rt_CWrapperFunctionResult
+__orc_rt_macho_register_object_platform_sections(char *ArgData,
+ size_t ArgSize) {
+ return WrapperFunction<SPSError(SPSExecutorAddr,
+ SPSOptional<SPSUnwindSectionInfo>,
+ SPSMachOObjectPlatformSectionsMap)>::
+ handle(ArgData, ArgSize,
+ [](ExecutorAddr HeaderAddr, std::optional<UnwindSectionInfo> USI,
+ std::vector<std::pair<std::string_view, ExecutorAddrRange>>
+ &Secs) {
+ return MachOPlatformRuntimeState::get()
+ .registerObjectPlatformSections(HeaderAddr, std::move(USI),
+ std::move(Secs));
+ })
+ .release();
+}
+
+ORC_RT_INTERFACE orc_rt_CWrapperFunctionResult
+__orc_rt_macho_register_object_symbol_table(char *ArgData, size_t ArgSize) {
+ using SymtabContainer = std::vector<
+ std::tuple<ExecutorAddr, ExecutorAddr,
+ MachOPlatformRuntimeState::MachOExecutorSymbolFlags>>;
+ return WrapperFunction<SPSError(
+ SPSExecutorAddr, SPSSequence<SPSTuple<SPSExecutorAddr, SPSExecutorAddr,
+ SPSMachOExecutorSymbolFlags>>)>::
+ handle(ArgData, ArgSize,
+ [](ExecutorAddr HeaderAddr, SymtabContainer &Symbols) {
+ return MachOPlatformRuntimeState::get()
+ .registerObjectSymbolTable(HeaderAddr, Symbols);
+ })
+ .release();
+}
+
+ORC_RT_INTERFACE orc_rt_CWrapperFunctionResult
+__orc_rt_macho_deregister_object_symbol_table(char *ArgData, size_t ArgSize) {
+ using SymtabContainer = std::vector<
+ std::tuple<ExecutorAddr, ExecutorAddr,
+ MachOPlatformRuntimeState::MachOExecutorSymbolFlags>>;
+ return WrapperFunction<SPSError(
+ SPSExecutorAddr, SPSSequence<SPSTuple<SPSExecutorAddr, SPSExecutorAddr,
+ SPSMachOExecutorSymbolFlags>>)>::
+ handle(ArgData, ArgSize,
+ [](ExecutorAddr HeaderAddr, SymtabContainer &Symbols) {
+ return MachOPlatformRuntimeState::get()
+ .deregisterObjectSymbolTable(HeaderAddr, Symbols);
+ })
+ .release();
+}
+
+ORC_RT_INTERFACE orc_rt_CWrapperFunctionResult
+__orc_rt_macho_deregister_object_platform_sections(char *ArgData,
+ size_t ArgSize) {
+ return WrapperFunction<SPSError(SPSExecutorAddr,
+ SPSOptional<SPSUnwindSectionInfo>,
+ SPSMachOObjectPlatformSectionsMap)>::
+ handle(ArgData, ArgSize,
+ [](ExecutorAddr HeaderAddr, std::optional<UnwindSectionInfo> USI,
+ std::vector<std::pair<std::string_view, ExecutorAddrRange>>
+ &Secs) {
+ return MachOPlatformRuntimeState::get()
+ .deregisterObjectPlatformSections(HeaderAddr, std::move(USI),
+ std::move(Secs));
+ })
+ .release();
+}
+
+ORC_RT_INTERFACE orc_rt_CWrapperFunctionResult
+__orc_rt_macho_run_wrapper_function_calls(char *ArgData, size_t ArgSize) {
+ return WrapperFunction<SPSError(SPSSequence<SPSWrapperFunctionCall>)>::handle(
+ ArgData, ArgSize, runWrapperFunctionCalls)
+ .release();
+}
+
//------------------------------------------------------------------------------
// TLV support
//------------------------------------------------------------------------------
@@ -650,7 +1469,7 @@ ORC_RT_INTERFACE void *__orc_rt_macho_tlv_get_addr_impl(TLVDescriptor *D) {
reinterpret_cast<char *>(static_cast<uintptr_t>(D->DataAddress)));
}
-ORC_RT_INTERFACE __orc_rt_CWrapperFunctionResult
+ORC_RT_INTERFACE orc_rt_CWrapperFunctionResult
__orc_rt_macho_create_pthread_key(char *ArgData, size_t ArgSize) {
return WrapperFunction<SPSExpected<uint64_t>(void)>::handle(
ArgData, ArgSize,
diff --git a/contrib/llvm-project/compiler-rt/lib/orc/macho_platform.h b/contrib/llvm-project/compiler-rt/lib/orc/macho_platform.h
index 6c05e844b0cd..3b2242ab27ce 100644
--- a/contrib/llvm-project/compiler-rt/lib/orc/macho_platform.h
+++ b/contrib/llvm-project/compiler-rt/lib/orc/macho_platform.h
@@ -31,34 +31,6 @@ ORC_RT_INTERFACE void *__orc_rt_macho_jit_dlsym(void *dso_handle,
namespace __orc_rt {
namespace macho {
-struct MachOPerObjectSectionsToRegister {
- ExecutorAddressRange EHFrameSection;
- ExecutorAddressRange ThreadDataSection;
-};
-
-struct MachOJITDylibInitializers {
- using SectionList = std::vector<ExecutorAddressRange>;
-
- MachOJITDylibInitializers() = default;
- MachOJITDylibInitializers(std::string Name,
- ExecutorAddress MachOHeaderAddress)
- : Name(std::move(Name)),
- MachOHeaderAddress(std::move(MachOHeaderAddress)) {}
-
- std::string Name;
- ExecutorAddress MachOHeaderAddress;
- ExecutorAddress ObjCImageInfoAddress;
-
- std::unordered_map<std::string, SectionList> InitSections;
-};
-
-class MachOJITDylibDeinitializers {};
-
-using MachOJITDylibInitializerSequence = std::vector<MachOJITDylibInitializers>;
-
-using MachOJITDylibDeinitializerSequence =
- std::vector<MachOJITDylibDeinitializers>;
-
enum dlopen_mode : int {
ORC_RT_RTLD_LAZY = 0x1,
ORC_RT_RTLD_NOW = 0x2,
@@ -67,69 +39,6 @@ enum dlopen_mode : int {
};
} // end namespace macho
-
-using SPSMachOPerObjectSectionsToRegister =
- SPSTuple<SPSExecutorAddressRange, SPSExecutorAddressRange>;
-
-template <>
-class SPSSerializationTraits<SPSMachOPerObjectSectionsToRegister,
- macho::MachOPerObjectSectionsToRegister> {
-
-public:
- static size_t size(const macho::MachOPerObjectSectionsToRegister &MOPOSR) {
- return SPSMachOPerObjectSectionsToRegister::AsArgList::size(
- MOPOSR.EHFrameSection, MOPOSR.ThreadDataSection);
- }
-
- static bool serialize(SPSOutputBuffer &OB,
- const macho::MachOPerObjectSectionsToRegister &MOPOSR) {
- return SPSMachOPerObjectSectionsToRegister::AsArgList::serialize(
- OB, MOPOSR.EHFrameSection, MOPOSR.ThreadDataSection);
- }
-
- static bool deserialize(SPSInputBuffer &IB,
- macho::MachOPerObjectSectionsToRegister &MOPOSR) {
- return SPSMachOPerObjectSectionsToRegister::AsArgList::deserialize(
- IB, MOPOSR.EHFrameSection, MOPOSR.ThreadDataSection);
- }
-};
-
-using SPSNamedExecutorAddressRangeSequenceMap =
- SPSSequence<SPSTuple<SPSString, SPSExecutorAddressRangeSequence>>;
-
-using SPSMachOJITDylibInitializers =
- SPSTuple<SPSString, SPSExecutorAddress, SPSExecutorAddress,
- SPSNamedExecutorAddressRangeSequenceMap>;
-
-using SPSMachOJITDylibInitializerSequence =
- SPSSequence<SPSMachOJITDylibInitializers>;
-
-/// Serialization traits for MachOJITDylibInitializers.
-template <>
-class SPSSerializationTraits<SPSMachOJITDylibInitializers,
- macho::MachOJITDylibInitializers> {
-public:
- static size_t size(const macho::MachOJITDylibInitializers &MOJDIs) {
- return SPSMachOJITDylibInitializers::AsArgList::size(
- MOJDIs.Name, MOJDIs.MachOHeaderAddress, MOJDIs.ObjCImageInfoAddress,
- MOJDIs.InitSections);
- }
-
- static bool serialize(SPSOutputBuffer &OB,
- const macho::MachOJITDylibInitializers &MOJDIs) {
- return SPSMachOJITDylibInitializers::AsArgList::serialize(
- OB, MOJDIs.Name, MOJDIs.MachOHeaderAddress, MOJDIs.ObjCImageInfoAddress,
- MOJDIs.InitSections);
- }
-
- static bool deserialize(SPSInputBuffer &IB,
- macho::MachOJITDylibInitializers &MOJDIs) {
- return SPSMachOJITDylibInitializers::AsArgList::deserialize(
- IB, MOJDIs.Name, MOJDIs.MachOHeaderAddress, MOJDIs.ObjCImageInfoAddress,
- MOJDIs.InitSections);
- }
-};
-
} // end namespace __orc_rt
#endif // ORC_RT_MACHO_PLATFORM_H
diff --git a/contrib/llvm-project/compiler-rt/lib/orc/macho_tlv.arm64.S b/contrib/llvm-project/compiler-rt/lib/orc/macho_tlv.arm64.S
new file mode 100644
index 000000000000..f6eb9fc4da39
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/orc/macho_tlv.arm64.S
@@ -0,0 +1,92 @@
+//===-- macho_tlv.arm64.s ---------------------------------------*- ASM -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of the ORC runtime support library.
+//
+//===----------------------------------------------------------------------===//
+
+// The content of this file is arm64-only
+#if defined(__arm64__) || defined(__aarch64__)
+
+#define REGISTER_SAVE_SPACE_SIZE 32 * 24
+
+ .text
+
+ // returns address of TLV in x0, all other registers preserved
+ .globl ___orc_rt_macho_tlv_get_addr
+___orc_rt_macho_tlv_get_addr:
+ sub sp, sp, #REGISTER_SAVE_SPACE_SIZE
+ stp x29, x30, [sp, #16 * 1]
+ stp x27, x28, [sp, #16 * 2]
+ stp x25, x26, [sp, #16 * 3]
+ stp x23, x24, [sp, #16 * 4]
+ stp x21, x22, [sp, #16 * 5]
+ stp x19, x20, [sp, #16 * 6]
+ stp x17, x18, [sp, #16 * 7]
+ stp x15, x16, [sp, #16 * 8]
+ stp x13, x14, [sp, #16 * 9]
+ stp x11, x12, [sp, #16 * 10]
+ stp x9, x10, [sp, #16 * 11]
+ stp x7, x8, [sp, #16 * 12]
+ stp x5, x6, [sp, #16 * 13]
+ stp x3, x4, [sp, #16 * 14]
+ stp x1, x2, [sp, #16 * 15]
+ stp q30, q31, [sp, #32 * 8]
+ stp q28, q29, [sp, #32 * 9]
+ stp q26, q27, [sp, #32 * 10]
+ stp q24, q25, [sp, #32 * 11]
+ stp q22, q23, [sp, #32 * 12]
+ stp q20, q21, [sp, #32 * 13]
+ stp q18, q19, [sp, #32 * 14]
+ stp q16, q17, [sp, #32 * 15]
+ stp q14, q15, [sp, #32 * 16]
+ stp q12, q13, [sp, #32 * 17]
+ stp q10, q11, [sp, #32 * 18]
+ stp q8, q9, [sp, #32 * 19]
+ stp q6, q7, [sp, #32 * 20]
+ stp q4, q5, [sp, #32 * 21]
+ stp q2, q3, [sp, #32 * 22]
+ stp q0, q1, [sp, #32 * 23]
+
+ bl ___orc_rt_macho_tlv_get_addr_impl
+
+ ldp q0, q1, [sp, #32 * 23]
+ ldp q2, q3, [sp, #32 * 22]
+ ldp q4, q5, [sp, #32 * 21]
+ ldp q6, q7, [sp, #32 * 20]
+ ldp q8, q9, [sp, #32 * 19]
+ ldp q10, q11, [sp, #32 * 18]
+ ldp q12, q13, [sp, #32 * 17]
+ ldp q14, q15, [sp, #32 * 16]
+ ldp q16, q17, [sp, #32 * 15]
+ ldp q18, q19, [sp, #32 * 14]
+ ldp q20, q21, [sp, #32 * 13]
+ ldp q22, q23, [sp, #32 * 12]
+ ldp q24, q25, [sp, #32 * 11]
+ ldp q26, q27, [sp, #32 * 10]
+ ldp q28, q29, [sp, #32 * 9]
+ ldp q30, q31, [sp, #32 * 8]
+ ldp x1, x2, [sp, #16 * 15]
+ ldp x3, x4, [sp, #16 * 14]
+ ldp x5, x6, [sp, #16 * 13]
+ ldp x7, x8, [sp, #16 * 12]
+ ldp x9, x10, [sp, #16 * 11]
+ ldp x11, x12, [sp, #16 * 10]
+ ldp x13, x14, [sp, #16 * 9]
+ ldp x15, x16, [sp, #16 * 8]
+ ldp x17, x18, [sp, #16 * 7]
+ ldp x19, x20, [sp, #16 * 6]
+ ldp x21, x22, [sp, #16 * 5]
+ ldp x23, x24, [sp, #16 * 4]
+ ldp x25, x26, [sp, #16 * 3]
+ ldp x27, x28, [sp, #16 * 2]
+ ldp x29, x30, [sp, #16 * 1]
+ add sp, sp, #REGISTER_SAVE_SPACE_SIZE
+ ret
+
+#endif // defined(__arm64__) || defined(__aarch64__)
diff --git a/contrib/llvm-project/compiler-rt/lib/orc/macho_tlv.x86-64.S b/contrib/llvm-project/compiler-rt/lib/orc/macho_tlv.x86-64.S
index 0affe403eec2..e3daf23e3029 100644
--- a/contrib/llvm-project/compiler-rt/lib/orc/macho_tlv.x86-64.S
+++ b/contrib/llvm-project/compiler-rt/lib/orc/macho_tlv.x86-64.S
@@ -10,6 +10,9 @@
//
//===----------------------------------------------------------------------===//
+// The content of this file is x86_64-only
+#if defined(__x86_64__)
+
#define REGISTER_SAVE_SPACE_SIZE 512
.text
@@ -66,3 +69,5 @@ ___orc_rt_macho_tlv_get_addr:
addq $REGISTER_SAVE_SPACE_SIZE, %rsp
popq %rbp
ret
+
+#endif // defined(__x86_64__)
diff --git a/contrib/llvm-project/compiler-rt/lib/orc/run_program_wrapper.cpp b/contrib/llvm-project/compiler-rt/lib/orc/run_program_wrapper.cpp
index d0f88534aa9c..24a7b4fc3cbe 100644
--- a/contrib/llvm-project/compiler-rt/lib/orc/run_program_wrapper.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/orc/run_program_wrapper.cpp
@@ -22,14 +22,14 @@ extern "C" int64_t __orc_rt_run_program(const char *JITDylibName,
const char *EntrySymbolName, int argc,
char *argv[]);
-ORC_RT_INTERFACE __orc_rt_CWrapperFunctionResult
+ORC_RT_INTERFACE orc_rt_CWrapperFunctionResult
__orc_rt_run_program_wrapper(const char *ArgData, size_t ArgSize) {
return WrapperFunction<int64_t(SPSString, SPSString,
SPSSequence<SPSString>)>::
handle(ArgData, ArgSize,
[](const std::string &JITDylibName,
const std::string &EntrySymbolName,
- const std::vector<string_view> &Args) {
+ const std::vector<std::string_view> &Args) {
std::vector<std::unique_ptr<char[]>> ArgVStorage;
ArgVStorage.reserve(Args.size());
for (auto &Arg : Args) {
diff --git a/contrib/llvm-project/compiler-rt/lib/orc/simple_packed_serialization.h b/contrib/llvm-project/compiler-rt/lib/orc/simple_packed_serialization.h
index b561a19d8f04..488d2407ddd4 100644
--- a/contrib/llvm-project/compiler-rt/lib/orc/simple_packed_serialization.h
+++ b/contrib/llvm-project/compiler-rt/lib/orc/simple_packed_serialization.h
@@ -39,7 +39,9 @@
#include "error.h"
#include "stl_extras.h"
+#include <optional>
#include <string>
+#include <string_view>
#include <tuple>
#include <type_traits>
#include <unordered_map>
@@ -176,7 +178,7 @@ public:
class SPSEmpty {};
/// Represents an address in the executor.
-class SPSExecutorAddress {};
+class SPSExecutorAddr {};
/// SPS tag type for tuples.
///
@@ -188,6 +190,14 @@ public:
typedef SPSArgList<SPSTagTs...> AsArgList;
};
+/// SPS tag type for optionals.
+///
+/// SPSOptionals should be serialized as a bool with true indicating that an
+/// SPSTagT value is present, and false indicating that there is no value.
+/// If the boolean is true then the serialized SPSTagT will follow immediately
+/// after it.
+template <typename SPSTagT> class SPSOptional {};
+
/// SPS tag type for sequences.
///
/// SPSSequences should be serialized as a uint64_t sequence length,
@@ -271,6 +281,13 @@ public:
static constexpr bool available = true;
};
+/// Trivial span<T> -> SPSSequence<SPSElementTagT> serialization.
+template <typename SPSElementTagT, typename T>
+class TrivialSPSSequenceSerialization<SPSElementTagT, span<T>> {
+public:
+ static constexpr bool available = true;
+};
+
/// Trivial SPSSequence<SPSElementTagT> -> std::vector<T> deserialization.
template <typename SPSElementTagT, typename T>
class TrivialSPSSequenceDeserialization<SPSElementTagT, std::vector<T>> {
@@ -354,6 +371,65 @@ public:
}
};
+/// Trivial serialization / deserialization for span<char>
+template <> class SPSSerializationTraits<SPSSequence<char>, span<const char>> {
+public:
+ static size_t size(const span<const char> &S) {
+ return SPSArgList<uint64_t>::size(static_cast<uint64_t>(S.size())) +
+ S.size();
+ }
+ static bool serialize(SPSOutputBuffer &OB, const span<const char> &S) {
+ if (!SPSArgList<uint64_t>::serialize(OB, static_cast<uint64_t>(S.size())))
+ return false;
+ return OB.write(S.data(), S.size());
+ }
+ static bool deserialize(SPSInputBuffer &IB, span<const char> &S) {
+ uint64_t Size;
+ if (!SPSArgList<uint64_t>::deserialize(IB, Size))
+ return false;
+ S = span<const char>(IB.data(), Size);
+ return IB.skip(Size);
+ }
+};
+
+/// SPSTuple serialization for std::tuple.
+template <typename... SPSTagTs, typename... Ts>
+class SPSSerializationTraits<SPSTuple<SPSTagTs...>, std::tuple<Ts...>> {
+private:
+ using TupleArgList = typename SPSTuple<SPSTagTs...>::AsArgList;
+ using ArgIndices = std::make_index_sequence<sizeof...(Ts)>;
+
+ template <std::size_t... I>
+ static size_t size(const std::tuple<Ts...> &T, std::index_sequence<I...>) {
+ return TupleArgList::size(std::get<I>(T)...);
+ }
+
+ template <std::size_t... I>
+ static bool serialize(SPSOutputBuffer &OB, const std::tuple<Ts...> &T,
+ std::index_sequence<I...>) {
+ return TupleArgList::serialize(OB, std::get<I>(T)...);
+ }
+
+ template <std::size_t... I>
+ static bool deserialize(SPSInputBuffer &IB, std::tuple<Ts...> &T,
+ std::index_sequence<I...>) {
+ return TupleArgList::deserialize(IB, std::get<I>(T)...);
+ }
+
+public:
+ static size_t size(const std::tuple<Ts...> &T) {
+ return size(T, ArgIndices{});
+ }
+
+ static bool serialize(SPSOutputBuffer &OB, const std::tuple<Ts...> &T) {
+ return serialize(OB, T, ArgIndices{});
+ }
+
+ static bool deserialize(SPSInputBuffer &IB, std::tuple<Ts...> &T) {
+ return deserialize(IB, T, ArgIndices{});
+ }
+};
+
/// SPSTuple serialization for std::pair.
template <typename SPSTagT1, typename SPSTagT2, typename T1, typename T2>
class SPSSerializationTraits<SPSTuple<SPSTagT1, SPSTagT2>, std::pair<T1, T2>> {
@@ -374,32 +450,66 @@ public:
}
};
+/// SPSOptional serialization for std::optional.
+template <typename SPSTagT, typename T>
+class SPSSerializationTraits<SPSOptional<SPSTagT>, std::optional<T>> {
+public:
+ static size_t size(const std::optional<T> &Value) {
+ size_t Size = SPSArgList<bool>::size(!!Value);
+ if (Value)
+ Size += SPSArgList<SPSTagT>::size(*Value);
+ return Size;
+ }
+
+ static bool serialize(SPSOutputBuffer &OB, const std::optional<T> &Value) {
+ if (!SPSArgList<bool>::serialize(OB, !!Value))
+ return false;
+ if (Value)
+ return SPSArgList<SPSTagT>::serialize(OB, *Value);
+ return true;
+ }
+
+ static bool deserialize(SPSInputBuffer &IB, std::optional<T> &Value) {
+ bool HasValue;
+ if (!SPSArgList<bool>::deserialize(IB, HasValue))
+ return false;
+ if (HasValue) {
+ Value = T();
+ return SPSArgList<SPSTagT>::deserialize(IB, *Value);
+ } else
+ Value = std::optional<T>();
+ return true;
+ }
+};
+
/// Serialization for string_views.
///
/// Serialization is as for regular strings. Deserialization points directly
/// into the blob.
-template <> class SPSSerializationTraits<SPSString, __orc_rt::string_view> {
+template <> class SPSSerializationTraits<SPSString, std::string_view> {
public:
- static size_t size(const __orc_rt::string_view &S) {
+ static size_t size(const std::string_view &S) {
return SPSArgList<uint64_t>::size(static_cast<uint64_t>(S.size())) +
S.size();
}
- static bool serialize(SPSOutputBuffer &OB, const __orc_rt::string_view &S) {
+ static bool serialize(SPSOutputBuffer &OB, const std::string_view &S) {
if (!SPSArgList<uint64_t>::serialize(OB, static_cast<uint64_t>(S.size())))
return false;
return OB.write(S.data(), S.size());
}
- static bool deserialize(SPSInputBuffer &IB, __orc_rt::string_view &S) {
+ static bool deserialize(SPSInputBuffer &IB, std::string_view &S) {
const char *Data = nullptr;
uint64_t Size;
if (!SPSArgList<uint64_t>::deserialize(IB, Size))
return false;
+ if (Size > std::numeric_limits<size_t>::max())
+ return false;
Data = IB.data();
if (!IB.skip(Size))
return false;
- S = {Data, Size};
+ S = {Data, static_cast<size_t>(Size)};
return true;
}
};
diff --git a/contrib/llvm-project/compiler-rt/lib/orc/stl_extras.h b/contrib/llvm-project/compiler-rt/lib/orc/stl_extras.h
index ad7286e87ae3..80a6cd13ac28 100644
--- a/contrib/llvm-project/compiler-rt/lib/orc/stl_extras.h
+++ b/contrib/llvm-project/compiler-rt/lib/orc/stl_extras.h
@@ -13,32 +13,31 @@
#ifndef ORC_RT_STL_EXTRAS_H
#define ORC_RT_STL_EXTRAS_H
+#include <cstdint>
#include <utility>
#include <tuple>
namespace __orc_rt {
-namespace detail {
-
-template <typename F, typename Tuple, std::size_t... I>
-decltype(auto) apply_tuple_impl(F &&f, Tuple &&t, std::index_sequence<I...>) {
- return std::forward<F>(f)(std::get<I>(std::forward<Tuple>(t))...);
-}
-
-} // end namespace detail
-
-/// Given an input tuple (a1, a2, ..., an), pass the arguments of the
-/// tuple variadically to f as if by calling f(a1, a2, ..., an) and
-/// return the result.
-///
-/// FIXME: Switch to std::apply once we can use c++17.
-template <typename F, typename Tuple>
-decltype(auto) apply_tuple(F &&f, Tuple &&t) {
- using Indices = std::make_index_sequence<
- std::tuple_size<typename std::decay<Tuple>::type>::value>;
-
- return detail::apply_tuple_impl(std::forward<F>(f), std::forward<Tuple>(t),
- Indices{});
+/// Substitute for std::identity.
+/// Switch to std::identity once we can use c++20.
+template <class Ty> struct identity {
+ using is_transparent = void;
+ using argument_type = Ty;
+
+ Ty &operator()(Ty &self) const { return self; }
+ const Ty &operator()(const Ty &self) const { return self; }
+};
+
+/// Substitute for std::bit_ceil.
+constexpr uint64_t bit_ceil(uint64_t Val) noexcept {
+ Val |= (Val >> 1);
+ Val |= (Val >> 2);
+ Val |= (Val >> 4);
+ Val |= (Val >> 8);
+ Val |= (Val >> 16);
+ Val |= (Val >> 32);
+ return Val + 1;
}
} // namespace __orc_rt
diff --git a/contrib/llvm-project/compiler-rt/lib/orc/string_pool.h b/contrib/llvm-project/compiler-rt/lib/orc/string_pool.h
new file mode 100644
index 000000000000..c0ba4ea8980e
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/orc/string_pool.h
@@ -0,0 +1,172 @@
+//===------- string_pool.h - Thread-safe pool for strings -------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Contains a thread-safe string pool. Strings are ref-counted, but not
+// automatically deallocated. Unused entries can be cleared by calling
+// StringPool::clearDeadEntries.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef ORC_RT_STRING_POOL_H
+#define ORC_RT_STRING_POOL_H
+
+#include <atomic>
+#include <cassert>
+#include <functional>
+#include <mutex>
+#include <string>
+#include <unordered_map>
+
+namespace __orc_rt {
+
+class PooledStringPtr;
+
+/// String pool for strings names used by the ORC runtime.
+class StringPool {
+ friend class PooledStringPtr;
+
+public:
+ /// Destroy a StringPool.
+ ~StringPool();
+
+ /// Create a string pointer from the given string.
+ PooledStringPtr intern(std::string S);
+
+ /// Remove from the pool any entries that are no longer referenced.
+ void clearDeadEntries();
+
+ /// Returns true if the pool is empty.
+ bool empty() const;
+
+private:
+ using RefCountType = std::atomic<size_t>;
+ using PoolMap = std::unordered_map<std::string, RefCountType>;
+ using PoolMapEntry = PoolMap::value_type;
+ mutable std::mutex PoolMutex;
+ PoolMap Pool;
+};
+
+/// Pointer to a pooled string.
+class PooledStringPtr {
+ friend class StringPool;
+ friend struct std::hash<PooledStringPtr>;
+
+public:
+ PooledStringPtr() = default;
+ PooledStringPtr(std::nullptr_t) {}
+ PooledStringPtr(const PooledStringPtr &Other) : S(Other.S) {
+ if (S)
+ ++S->second;
+ }
+
+ PooledStringPtr &operator=(const PooledStringPtr &Other) {
+ if (S) {
+ assert(S->second && "Releasing PooledStringPtr with zero ref count");
+ --S->second;
+ }
+ S = Other.S;
+ if (S)
+ ++S->second;
+ return *this;
+ }
+
+ PooledStringPtr(PooledStringPtr &&Other) : S(nullptr) {
+ std::swap(S, Other.S);
+ }
+
+ PooledStringPtr &operator=(PooledStringPtr &&Other) {
+ if (S) {
+ assert(S->second && "Releasing PooledStringPtr with zero ref count");
+ --S->second;
+ }
+ S = nullptr;
+ std::swap(S, Other.S);
+ return *this;
+ }
+
+ ~PooledStringPtr() {
+ if (S) {
+ assert(S->second && "Releasing PooledStringPtr with zero ref count");
+ --S->second;
+ }
+ }
+
+ explicit operator bool() const { return S; }
+
+ const std::string &operator*() const { return S->first; }
+
+ friend bool operator==(const PooledStringPtr &LHS,
+ const PooledStringPtr &RHS) {
+ return LHS.S == RHS.S;
+ }
+
+ friend bool operator!=(const PooledStringPtr &LHS,
+ const PooledStringPtr &RHS) {
+ return !(LHS == RHS);
+ }
+
+ friend bool operator<(const PooledStringPtr &LHS,
+ const PooledStringPtr &RHS) {
+ return LHS.S < RHS.S;
+ }
+
+private:
+ using PoolEntry = StringPool::PoolMapEntry;
+ using PoolEntryPtr = PoolEntry *;
+
+ PooledStringPtr(StringPool::PoolMapEntry *S) : S(S) {
+ if (S)
+ ++S->second;
+ }
+
+ PoolEntryPtr S = nullptr;
+};
+
+inline StringPool::~StringPool() {
+#ifndef NDEBUG
+ clearDeadEntries();
+ assert(Pool.empty() && "Dangling references at pool destruction time");
+#endif // NDEBUG
+}
+
+inline PooledStringPtr StringPool::intern(std::string S) {
+ std::lock_guard<std::mutex> Lock(PoolMutex);
+ PoolMap::iterator I;
+ bool Added;
+ std::tie(I, Added) = Pool.try_emplace(std::move(S), 0);
+ return PooledStringPtr(&*I);
+}
+
+inline void StringPool::clearDeadEntries() {
+ std::lock_guard<std::mutex> Lock(PoolMutex);
+ for (auto I = Pool.begin(), E = Pool.end(); I != E;) {
+ auto Tmp = I++;
+ if (Tmp->second == 0)
+ Pool.erase(Tmp);
+ }
+}
+
+inline bool StringPool::empty() const {
+ std::lock_guard<std::mutex> Lock(PoolMutex);
+ return Pool.empty();
+}
+
+} // end namespace __orc_rt
+
+namespace std {
+
+// Make PooledStringPtrs hashable.
+template <> struct hash<__orc_rt::PooledStringPtr> {
+ size_t operator()(const __orc_rt::PooledStringPtr &A) const {
+ return hash<__orc_rt::PooledStringPtr::PoolEntryPtr>()(A.S);
+ }
+};
+
+} // namespace std
+
+#endif // ORC_RT_REF_COUNTED_STRING_POOL_H
diff --git a/contrib/llvm-project/compiler-rt/lib/orc/tests/tools/orc-rt-executor.cpp b/contrib/llvm-project/compiler-rt/lib/orc/tests/tools/orc-rt-executor.cpp
new file mode 100644
index 000000000000..da45a2d64d68
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/orc/tests/tools/orc-rt-executor.cpp
@@ -0,0 +1,49 @@
+//===- orc-rt-executor.cpp ------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Contains the orc-rt-executor test tool. This is a "blank executable" that
+// links the ORC runtime and can accept code from a JIT controller like lii or
+// llvm-jitlink.
+//
+//===----------------------------------------------------------------------===//
+
+#include <cstring>
+#include <iostream>
+#include <optional>
+#include <string_view>
+
+void printHelp(std::string_view ProgName, std::ostream &OS) {
+ OS << "usage: " << ProgName << " [help] [<mode>] <program arguments>...\n"
+ << " <mode> -- specify how to listen for JIT'd program\n"
+ << " filedesc=<in>,<out> -- read from <in> filedesc, write to out\n"
+ << " tcp=<host>:<port> -- listen on the given host/port\n"
+ << " help -- print help and exit\n"
+ << "\n"
+ << " Notes:\n"
+ << " Program arguments will be made available to the JIT controller.\n"
+ << " When running a JIT'd program containing a main function the\n"
+ << " controller may choose to pass these on to main, however\n"
+ << " orc-rt-executor does not enforce this.\n";
+}
+
+int main(int argc, char *argv[]) {
+ if (argc < 2) {
+ printHelp("orc-rt-executor", std::cerr);
+ std::cerr << "error: insufficient arguments.\n";
+ exit(1);
+ }
+
+ if (!strcmp(argv[1], "help")) {
+ printHelp(argv[0], std::cerr);
+ exit(0);
+ }
+
+ std::cerr << "error: One day I will be a real program, but I am not yet.\n";
+
+ return 0;
+}
diff --git a/contrib/llvm-project/compiler-rt/lib/orc/tests/unit/adt_test.cpp b/contrib/llvm-project/compiler-rt/lib/orc/tests/unit/adt_test.cpp
new file mode 100644
index 000000000000..6625a590e363
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/orc/tests/unit/adt_test.cpp
@@ -0,0 +1,50 @@
+//===-- adt_test.cpp ------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of the ORC runtime.
+//
+//===----------------------------------------------------------------------===//
+
+#include "adt.h"
+#include "gtest/gtest.h"
+
+#include <sstream>
+#include <string>
+
+using namespace __orc_rt;
+
+TEST(ADTTest, SpanDefaultConstruction) {
+ span<int> S;
+ EXPECT_TRUE(S.empty()) << "Default constructed span not empty";
+ EXPECT_EQ(S.size(), 0U) << "Default constructed span size not zero";
+ EXPECT_EQ(S.begin(), S.end()) << "Default constructed span begin != end";
+}
+
+TEST(ADTTest, SpanConstructFromFixedArray) {
+ int A[] = {1, 2, 3, 4, 5};
+ span<int> S(A);
+ EXPECT_FALSE(S.empty()) << "Span should be non-empty";
+ EXPECT_EQ(S.size(), 5U) << "Span has unexpected size";
+ EXPECT_EQ(std::distance(S.begin(), S.end()), 5U)
+ << "Unexpected iterator range size";
+ EXPECT_EQ(S.data(), &A[0]) << "Span data has unexpected value";
+ for (unsigned I = 0; I != S.size(); ++I)
+ EXPECT_EQ(S[I], A[I]) << "Unexpected span element value";
+}
+
+TEST(ADTTest, SpanConstructFromIteratorAndSize) {
+ int A[] = {1, 2, 3, 4, 5};
+ span<int> S(&A[0], 5);
+ EXPECT_FALSE(S.empty()) << "Span should be non-empty";
+ EXPECT_EQ(S.size(), 5U) << "Span has unexpected size";
+ EXPECT_EQ(std::distance(S.begin(), S.end()), 5U)
+ << "Unexpected iterator range size";
+ EXPECT_EQ(S.data(), &A[0]) << "Span data has unexpected value";
+ for (unsigned I = 0; I != S.size(); ++I)
+ EXPECT_EQ(S[I], A[I]) << "Unexpected span element value";
+}
diff --git a/contrib/llvm-project/compiler-rt/lib/orc/tests/unit/bitmask_enum_test.cpp b/contrib/llvm-project/compiler-rt/lib/orc/tests/unit/bitmask_enum_test.cpp
new file mode 100644
index 000000000000..4c27d54fb4a9
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/orc/tests/unit/bitmask_enum_test.cpp
@@ -0,0 +1,143 @@
+//===-- adt_test.cpp ------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of the ORC runtime.
+//
+//===----------------------------------------------------------------------===//
+
+#include "bitmask_enum.h"
+#include "gtest/gtest.h"
+
+#include <sstream>
+#include <string>
+
+using namespace __orc_rt;
+
+namespace {
+
+enum Flags { F0 = 0, F1 = 1, F2 = 2, F3 = 4, F4 = 8 };
+
+} // namespace
+
+namespace __orc_rt {
+ORC_RT_DECLARE_ENUM_AS_BITMASK(Flags, F4);
+} // namespace __orc_rt
+
+static_assert(is_bitmask_enum<Flags>::value != 0);
+static_assert(largest_bitmask_enum_bit<Flags>::value == Flags::F4);
+
+namespace {
+
+static_assert(is_bitmask_enum<Flags>::value != 0);
+static_assert(largest_bitmask_enum_bit<Flags>::value == Flags::F4);
+
+TEST(BitmaskEnumTest, BitwiseOr) {
+ Flags f = F1 | F2;
+ EXPECT_EQ(3, f);
+
+ f = f | F3;
+ EXPECT_EQ(7, f);
+}
+
+TEST(BitmaskEnumTest, BitwiseOrEquals) {
+ Flags f = F1;
+ f |= F3;
+ EXPECT_EQ(5, f);
+
+ // |= should return a reference to the LHS.
+ f = F2;
+ (f |= F3) = F1;
+ EXPECT_EQ(F1, f);
+}
+
+TEST(BitmaskEnumTest, BitwiseAnd) {
+ Flags f = static_cast<Flags>(3) & F2;
+ EXPECT_EQ(F2, f);
+
+ f = (f | F3) & (F1 | F2 | F3);
+ EXPECT_EQ(6, f);
+}
+
+TEST(BitmaskEnumTest, BitwiseAndEquals) {
+ Flags f = F1 | F2 | F3;
+ f &= F1 | F2;
+ EXPECT_EQ(3, f);
+
+ // &= should return a reference to the LHS.
+ (f &= F1) = F3;
+ EXPECT_EQ(F3, f);
+}
+
+TEST(BitmaskEnumTest, BitwiseXor) {
+ Flags f = (F1 | F2) ^ (F2 | F3);
+ EXPECT_EQ(5, f);
+
+ f = f ^ F1;
+ EXPECT_EQ(4, f);
+}
+
+TEST(BitmaskEnumTest, BitwiseXorEquals) {
+ Flags f = (F1 | F2);
+ f ^= (F2 | F4);
+ EXPECT_EQ(9, f);
+
+ // ^= should return a reference to the LHS.
+ (f ^= F4) = F3;
+ EXPECT_EQ(F3, f);
+}
+
+TEST(BitmaskEnumTest, ConstantExpression) {
+ constexpr Flags f1 = ~F1;
+ constexpr Flags f2 = F1 | F2;
+ constexpr Flags f3 = F1 & F2;
+ constexpr Flags f4 = F1 ^ F2;
+ EXPECT_EQ(f1, ~F1);
+ EXPECT_EQ(f2, F1 | F2);
+ EXPECT_EQ(f3, F1 & F2);
+ EXPECT_EQ(f4, F1 ^ F2);
+}
+
+TEST(BitmaskEnumTest, BitwiseNot) {
+ Flags f = ~F1;
+ EXPECT_EQ(14, f); // Largest value for f is 15.
+ EXPECT_EQ(15, ~F0);
+}
+
+enum class FlagsClass {
+ F0 = 0,
+ F1 = 1,
+ F2 = 2,
+ F3 = 4,
+ ORC_RT_MARK_AS_BITMASK_ENUM(F3)
+};
+
+TEST(BitmaskEnumTest, ScopedEnum) {
+ FlagsClass f = (FlagsClass::F1 & ~FlagsClass::F0) | FlagsClass::F2;
+ f |= FlagsClass::F3;
+ EXPECT_EQ(7, static_cast<int>(f));
+}
+
+struct Container {
+ enum Flags {
+ F0 = 0,
+ F1 = 1,
+ F2 = 2,
+ F3 = 4,
+ ORC_RT_MARK_AS_BITMASK_ENUM(F3)
+ };
+
+ static Flags getFlags() {
+ Flags f = F0 | F1;
+ f |= F2;
+ return f;
+ }
+};
+
+TEST(BitmaskEnumTest, EnumInStruct) { EXPECT_EQ(3, Container::getFlags()); }
+
+} // namespace
diff --git a/contrib/llvm-project/compiler-rt/lib/orc/tests/unit/c_api_test.cpp b/contrib/llvm-project/compiler-rt/lib/orc/tests/unit/c_api_test.cpp
new file mode 100644
index 000000000000..497cb937e2af
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/orc/tests/unit/c_api_test.cpp
@@ -0,0 +1,200 @@
+//===-- c_api_test.cpp ----------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of the ORC runtime.
+//
+//===----------------------------------------------------------------------===//
+
+#include "orc_rt/c_api.h"
+#include "gtest/gtest.h"
+
+TEST(CAPITest, CWrapperFunctionResultInit) {
+ orc_rt_CWrapperFunctionResult R;
+ orc_rt_CWrapperFunctionResultInit(&R);
+
+ EXPECT_EQ(R.Size, 0U);
+ EXPECT_EQ(R.Data.ValuePtr, nullptr);
+
+ // Check that this value isn't treated as an out-of-band error.
+ EXPECT_EQ(orc_rt_CWrapperFunctionResultGetOutOfBandError(&R), nullptr);
+
+ // Check that we can dispose of the value.
+ orc_rt_DisposeCWrapperFunctionResult(&R);
+}
+
+TEST(CAPITest, CWrapperFunctionResultAllocSmall) {
+ constexpr size_t SmallAllocSize = sizeof(const char *);
+
+ auto R = orc_rt_CWrapperFunctionResultAllocate(SmallAllocSize);
+ char *DataPtr = orc_rt_CWrapperFunctionResultData(&R);
+
+ for (size_t I = 0; I != SmallAllocSize; ++I)
+ DataPtr[I] = 0x55 + I;
+
+ // Check that the inline storage in R.Data.Value contains the expected
+ // sequence.
+ EXPECT_EQ(R.Size, SmallAllocSize);
+ for (size_t I = 0; I != SmallAllocSize; ++I)
+ EXPECT_EQ(R.Data.Value[I], (char)(0x55 + I))
+ << "Unexpected value at index " << I;
+
+ // Check that this value isn't treated as an out-of-band error.
+ EXPECT_EQ(orc_rt_CWrapperFunctionResultGetOutOfBandError(&R), nullptr);
+
+ // Check that orc_rt_CWrapperFunctionResult(Data|Result|Size) and
+ // orc_rt_CWrapperFunctionResultGetOutOfBandError behave as expected.
+ EXPECT_EQ(orc_rt_CWrapperFunctionResultData(&R), R.Data.Value);
+ EXPECT_EQ(orc_rt_CWrapperFunctionResultSize(&R), SmallAllocSize);
+ EXPECT_FALSE(orc_rt_CWrapperFunctionResultEmpty(&R));
+ EXPECT_EQ(orc_rt_CWrapperFunctionResultGetOutOfBandError(&R), nullptr);
+
+ // Check that we can dispose of the value.
+ orc_rt_DisposeCWrapperFunctionResult(&R);
+}
+
+TEST(CAPITest, CWrapperFunctionResultAllocLarge) {
+ constexpr size_t LargeAllocSize = sizeof(const char *) + 1;
+
+ auto R = orc_rt_CWrapperFunctionResultAllocate(LargeAllocSize);
+ char *DataPtr = orc_rt_CWrapperFunctionResultData(&R);
+
+ for (size_t I = 0; I != LargeAllocSize; ++I)
+ DataPtr[I] = 0x55 + I;
+
+ // Check that the inline storage in R.Data.Value contains the expected
+ // sequence.
+ EXPECT_EQ(R.Size, LargeAllocSize);
+ EXPECT_EQ(R.Data.ValuePtr, DataPtr);
+ for (size_t I = 0; I != LargeAllocSize; ++I)
+ EXPECT_EQ(R.Data.ValuePtr[I], (char)(0x55 + I))
+ << "Unexpected value at index " << I;
+
+ // Check that this value isn't treated as an out-of-band error.
+ EXPECT_EQ(orc_rt_CWrapperFunctionResultGetOutOfBandError(&R), nullptr);
+
+ // Check that orc_rt_CWrapperFunctionResult(Data|Result|Size) and
+ // orc_rt_CWrapperFunctionResultGetOutOfBandError behave as expected.
+ EXPECT_EQ(orc_rt_CWrapperFunctionResultData(&R), R.Data.ValuePtr);
+ EXPECT_EQ(orc_rt_CWrapperFunctionResultSize(&R), LargeAllocSize);
+ EXPECT_FALSE(orc_rt_CWrapperFunctionResultEmpty(&R));
+ EXPECT_EQ(orc_rt_CWrapperFunctionResultGetOutOfBandError(&R), nullptr);
+
+ // Check that we can dispose of the value.
+ orc_rt_DisposeCWrapperFunctionResult(&R);
+}
+
+TEST(CAPITest, CWrapperFunctionResultFromRangeSmall) {
+ constexpr size_t SmallAllocSize = sizeof(const char *);
+
+ char Source[SmallAllocSize];
+ for (size_t I = 0; I != SmallAllocSize; ++I)
+ Source[I] = 0x55 + I;
+
+ orc_rt_CWrapperFunctionResult R =
+ orc_rt_CreateCWrapperFunctionResultFromRange(Source, SmallAllocSize);
+
+ // Check that the inline storage in R.Data.Value contains the expected
+ // sequence.
+ EXPECT_EQ(R.Size, SmallAllocSize);
+ for (size_t I = 0; I != SmallAllocSize; ++I)
+ EXPECT_EQ(R.Data.Value[I], (char)(0x55 + I))
+ << "Unexpected value at index " << I;
+
+ // Check that we can dispose of the value.
+ orc_rt_DisposeCWrapperFunctionResult(&R);
+}
+
+TEST(CAPITest, CWrapperFunctionResultFromRangeLarge) {
+ constexpr size_t LargeAllocSize = sizeof(const char *) + 1;
+
+ char Source[LargeAllocSize];
+ for (size_t I = 0; I != LargeAllocSize; ++I)
+ Source[I] = 0x55 + I;
+
+ orc_rt_CWrapperFunctionResult R =
+ orc_rt_CreateCWrapperFunctionResultFromRange(Source, LargeAllocSize);
+
+ // Check that the inline storage in R.Data.Value contains the expected
+ // sequence.
+ EXPECT_EQ(R.Size, LargeAllocSize);
+ for (size_t I = 0; I != LargeAllocSize; ++I)
+ EXPECT_EQ(R.Data.ValuePtr[I], (char)(0x55 + I))
+ << "Unexpected value at index " << I;
+
+ // Check that we can dispose of the value.
+ orc_rt_DisposeCWrapperFunctionResult(&R);
+}
+
+TEST(CAPITest, CWrapperFunctionResultFromStringSmall) {
+ constexpr size_t SmallAllocSize = sizeof(const char *);
+
+ char Source[SmallAllocSize];
+ for (size_t I = 0; I != SmallAllocSize - 1; ++I)
+ Source[I] = 'a' + I;
+ Source[SmallAllocSize - 1] = '\0';
+
+ orc_rt_CWrapperFunctionResult R =
+ orc_rt_CreateCWrapperFunctionResultFromString(Source);
+
+ // Check that the inline storage in R.Data.Value contains the expected
+ // sequence.
+ EXPECT_EQ(R.Size, SmallAllocSize);
+ for (size_t I = 0; I != SmallAllocSize - 1; ++I)
+ EXPECT_EQ(R.Data.Value[I], (char)('a' + I))
+ << "Unexpected value at index " << I;
+ EXPECT_EQ(R.Data.Value[SmallAllocSize - 1], '\0')
+ << "Unexpected value at index " << (SmallAllocSize - 1);
+
+ // Check that we can dispose of the value.
+ orc_rt_DisposeCWrapperFunctionResult(&R);
+}
+
+TEST(CAPITest, CWrapperFunctionResultFromStringLarge) {
+ constexpr size_t LargeAllocSize = sizeof(const char *) + 1;
+
+ char Source[LargeAllocSize];
+ for (size_t I = 0; I != LargeAllocSize - 1; ++I)
+ Source[I] = 'a' + I;
+ Source[LargeAllocSize - 1] = '\0';
+
+ orc_rt_CWrapperFunctionResult R =
+ orc_rt_CreateCWrapperFunctionResultFromString(Source);
+
+ // Check that the inline storage in R.Data.Value contains the expected
+ // sequence.
+ EXPECT_EQ(R.Size, LargeAllocSize);
+ for (size_t I = 0; I != LargeAllocSize - 1; ++I)
+ EXPECT_EQ(R.Data.ValuePtr[I], (char)('a' + I))
+ << "Unexpected value at index " << I;
+ EXPECT_EQ(R.Data.ValuePtr[LargeAllocSize - 1], '\0')
+ << "Unexpected value at index " << (LargeAllocSize - 1);
+
+ // Check that we can dispose of the value.
+ orc_rt_DisposeCWrapperFunctionResult(&R);
+}
+
+TEST(CAPITest, CWrapperFunctionResultFromOutOfBandError) {
+ constexpr const char *ErrMsg = "test error message";
+ orc_rt_CWrapperFunctionResult R =
+ orc_rt_CreateCWrapperFunctionResultFromOutOfBandError(ErrMsg);
+
+#ifndef NDEBUG
+ EXPECT_DEATH({ orc_rt_CWrapperFunctionResultData(&R); },
+ "Cannot get data for out-of-band error value");
+ EXPECT_DEATH({ orc_rt_CWrapperFunctionResultSize(&R); },
+ "Cannot get size for out-of-band error value");
+#endif
+
+ EXPECT_FALSE(orc_rt_CWrapperFunctionResultEmpty(&R));
+ const char *OOBErrMsg = orc_rt_CWrapperFunctionResultGetOutOfBandError(&R);
+ EXPECT_NE(OOBErrMsg, nullptr);
+ EXPECT_NE(OOBErrMsg, ErrMsg);
+ EXPECT_TRUE(strcmp(OOBErrMsg, ErrMsg) == 0);
+
+ orc_rt_DisposeCWrapperFunctionResult(&R);
+}
diff --git a/contrib/llvm-project/compiler-rt/lib/orc/tests/unit/endian_test.cpp b/contrib/llvm-project/compiler-rt/lib/orc/tests/unit/endian_test.cpp
new file mode 100644
index 000000000000..71b677af694c
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/orc/tests/unit/endian_test.cpp
@@ -0,0 +1,174 @@
+//===- endian_test.cpp ------------------------- swap byte order test -----===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of the ORC runtime.
+//
+// Adapted from the llvm/unittests/Support/SwapByteOrderTest.cpp LLVM unit test.
+//
+//===----------------------------------------------------------------------===//
+
+#include "endianness.h"
+#include "gtest/gtest.h"
+
+using namespace __orc_rt;
+
+TEST(Endian, ByteSwap_32) {
+ EXPECT_EQ(0x44332211u, ByteSwap_32(0x11223344));
+ EXPECT_EQ(0xDDCCBBAAu, ByteSwap_32(0xAABBCCDD));
+}
+
+TEST(Endian, ByteSwap_64) {
+ EXPECT_EQ(0x8877665544332211ULL, ByteSwap_64(0x1122334455667788LL));
+ EXPECT_EQ(0x1100FFEEDDCCBBAAULL, ByteSwap_64(0xAABBCCDDEEFF0011LL));
+}
+
+// In these first two tests all of the original_uintx values are truncated
+// except for 64. We could avoid this, but there's really no point.
+TEST(Endian, getSwappedBytes_UnsignedRoundTrip) {
+ // The point of the bit twiddling of magic is to test with and without bits
+ // in every byte.
+ uint64_t value = 1;
+ for (std::size_t i = 0; i <= sizeof(value); ++i) {
+ uint8_t original_uint8 = static_cast<uint8_t>(value);
+ EXPECT_EQ(original_uint8, getSwappedBytes(getSwappedBytes(original_uint8)));
+
+ uint16_t original_uint16 = static_cast<uint16_t>(value);
+ EXPECT_EQ(original_uint16,
+ getSwappedBytes(getSwappedBytes(original_uint16)));
+
+ uint32_t original_uint32 = static_cast<uint32_t>(value);
+ EXPECT_EQ(original_uint32,
+ getSwappedBytes(getSwappedBytes(original_uint32)));
+
+ uint64_t original_uint64 = static_cast<uint64_t>(value);
+ EXPECT_EQ(original_uint64,
+ getSwappedBytes(getSwappedBytes(original_uint64)));
+
+ value = (value << 8) | 0x55; // binary 0101 0101.
+ }
+}
+
+TEST(Endian, getSwappedBytes_SignedRoundTrip) {
+ // The point of the bit twiddling of magic is to test with and without bits
+ // in every byte.
+ uint64_t value = 1;
+ for (std::size_t i = 0; i <= sizeof(value); ++i) {
+ int8_t original_int8 = static_cast<int8_t>(value);
+ EXPECT_EQ(original_int8, getSwappedBytes(getSwappedBytes(original_int8)));
+
+ int16_t original_int16 = static_cast<int16_t>(value);
+ EXPECT_EQ(original_int16, getSwappedBytes(getSwappedBytes(original_int16)));
+
+ int32_t original_int32 = static_cast<int32_t>(value);
+ EXPECT_EQ(original_int32, getSwappedBytes(getSwappedBytes(original_int32)));
+
+ int64_t original_int64 = static_cast<int64_t>(value);
+ EXPECT_EQ(original_int64, getSwappedBytes(getSwappedBytes(original_int64)));
+
+ // Test other sign.
+ value *= -1;
+
+ original_int8 = static_cast<int8_t>(value);
+ EXPECT_EQ(original_int8, getSwappedBytes(getSwappedBytes(original_int8)));
+
+ original_int16 = static_cast<int16_t>(value);
+ EXPECT_EQ(original_int16, getSwappedBytes(getSwappedBytes(original_int16)));
+
+ original_int32 = static_cast<int32_t>(value);
+ EXPECT_EQ(original_int32, getSwappedBytes(getSwappedBytes(original_int32)));
+
+ original_int64 = static_cast<int64_t>(value);
+ EXPECT_EQ(original_int64, getSwappedBytes(getSwappedBytes(original_int64)));
+
+ // Return to normal sign and twiddle.
+ value *= -1;
+ value = (value << 8) | 0x55; // binary 0101 0101.
+ }
+}
+
+TEST(Endian, getSwappedBytes_uint8_t) {
+ EXPECT_EQ(uint8_t(0x11), getSwappedBytes(uint8_t(0x11)));
+}
+
+TEST(Endian, getSwappedBytes_uint16_t) {
+ EXPECT_EQ(uint16_t(0x1122), getSwappedBytes(uint16_t(0x2211)));
+}
+
+TEST(Endian, getSwappedBytes_uint32_t) {
+ EXPECT_EQ(uint32_t(0x11223344), getSwappedBytes(uint32_t(0x44332211)));
+}
+
+TEST(Endian, getSwappedBytes_uint64_t) {
+ EXPECT_EQ(uint64_t(0x1122334455667788ULL),
+ getSwappedBytes(uint64_t(0x8877665544332211ULL)));
+}
+
+TEST(Endian, getSwappedBytes_int8_t) {
+ EXPECT_EQ(int8_t(0x11), getSwappedBytes(int8_t(0x11)));
+}
+
+TEST(Endian, getSwappedBytes_int16_t) {
+ EXPECT_EQ(int16_t(0x1122), getSwappedBytes(int16_t(0x2211)));
+}
+
+TEST(Endian, getSwappedBytes_int32_t) {
+ EXPECT_EQ(int32_t(0x11223344), getSwappedBytes(int32_t(0x44332211)));
+}
+
+TEST(Endian, getSwappedBytes_int64_t) {
+ EXPECT_EQ(int64_t(0x1122334455667788LL),
+ getSwappedBytes(int64_t(0x8877665544332211LL)));
+}
+
+TEST(Endian, swapByteOrder_uint8_t) {
+ uint8_t value = 0x11;
+ swapByteOrder(value);
+ EXPECT_EQ(uint8_t(0x11), value);
+}
+
+TEST(Endian, swapByteOrder_uint16_t) {
+ uint16_t value = 0x2211;
+ swapByteOrder(value);
+ EXPECT_EQ(uint16_t(0x1122), value);
+}
+
+TEST(Endian, swapByteOrder_uint32_t) {
+ uint32_t value = 0x44332211;
+ swapByteOrder(value);
+ EXPECT_EQ(uint32_t(0x11223344), value);
+}
+
+TEST(Endian, swapByteOrder_uint64_t) {
+ uint64_t value = 0x8877665544332211ULL;
+ swapByteOrder(value);
+ EXPECT_EQ(uint64_t(0x1122334455667788ULL), value);
+}
+
+TEST(Endian, swapByteOrder_int8_t) {
+ int8_t value = 0x11;
+ swapByteOrder(value);
+ EXPECT_EQ(int8_t(0x11), value);
+}
+
+TEST(Endian, swapByteOrder_int16_t) {
+ int16_t value = 0x2211;
+ swapByteOrder(value);
+ EXPECT_EQ(int16_t(0x1122), value);
+}
+
+TEST(Endian, swapByteOrder_int32_t) {
+ int32_t value = 0x44332211;
+ swapByteOrder(value);
+ EXPECT_EQ(int32_t(0x11223344), value);
+}
+
+TEST(Endian, swapByteOrder_int64_t) {
+ int64_t value = 0x8877665544332211LL;
+ swapByteOrder(value);
+ EXPECT_EQ(int64_t(0x1122334455667788LL), value);
+}
diff --git a/contrib/llvm-project/compiler-rt/lib/orc/tests/unit/error_test.cpp b/contrib/llvm-project/compiler-rt/lib/orc/tests/unit/error_test.cpp
new file mode 100644
index 000000000000..5251d788e01b
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/orc/tests/unit/error_test.cpp
@@ -0,0 +1,295 @@
+//===-- error_test.cpp --sssssssss-----------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of the ORC runtime.
+//
+// Note:
+// This unit test was adapted from
+// llvm/unittests/Support/ExtensibleRTTITest.cpp
+//
+//===----------------------------------------------------------------------===//
+
+#include "error.h"
+#include "gtest/gtest.h"
+
+using namespace __orc_rt;
+
+namespace {
+
+class CustomError : public RTTIExtends<CustomError, ErrorInfoBase> {
+public:
+ CustomError(int V1) : V1(V1) {}
+ std::string toString() const override {
+ return "CustomError V1 = " + std::to_string(V1);
+ }
+ int getV1() const { return V1; }
+
+protected:
+ int V1;
+};
+
+class CustomSubError : public RTTIExtends<CustomSubError, CustomError> {
+public:
+ CustomSubError(int V1, std::string V2)
+ : RTTIExtends<CustomSubError, CustomError>(V1), V2(std::move(V2)) {}
+ std::string toString() const override {
+ return "CustomSubError V1 = " + std::to_string(V1) + ", " + V2;
+ }
+ const std::string &getV2() const { return V2; }
+
+protected:
+ std::string V2;
+};
+
+} // end anonymous namespace
+
+// Test that a checked success value doesn't cause any issues.
+TEST(Error, CheckedSuccess) {
+ Error E = Error::success();
+ EXPECT_FALSE(E) << "Unexpected error while testing Error 'Success'";
+}
+
+// Check that a consumed success value doesn't cause any issues.
+TEST(Error, ConsumeSuccess) { consumeError(Error::success()); }
+
+TEST(Error, ConsumeError) {
+ Error E = make_error<CustomError>(42);
+ if (E) {
+ consumeError(std::move(E));
+ } else
+ ADD_FAILURE() << "Error failure value should convert to true";
+}
+
+// Test that unchecked success values cause an abort.
+TEST(Error, UncheckedSuccess) {
+ EXPECT_DEATH({ Error E = Error::success(); },
+ "Error must be checked prior to destruction")
+ << "Unchecked Error Succes value did not cause abort()";
+}
+
+// Test that a checked but unhandled error causes an abort.
+TEST(Error, CheckedButUnhandledError) {
+ auto DropUnhandledError = []() {
+ Error E = make_error<CustomError>(42);
+ (void)!E;
+ };
+ EXPECT_DEATH(DropUnhandledError(),
+ "Error must be checked prior to destruction")
+ << "Unhandled Error failure value did not cause an abort()";
+}
+
+// Test that error_cast works as expected.
+TEST(Error, BasicErrorCast) {
+ {
+ // Check casting base error value to base error type.
+ auto E = make_error<CustomError>(42);
+ if (auto CSE = error_cast<CustomSubError>(E)) {
+ ADD_FAILURE() << "Derived cast incorrectly matched base error";
+ } else if (auto CE = error_cast<CustomError>(E)) {
+ EXPECT_EQ(CE->getV1(), 42) << "Unexpected wrapped value";
+ } else
+ ADD_FAILURE() << "Unexpected error value";
+ }
+
+ {
+ // Check casting derived error value to base error type.
+ auto E = make_error<CustomSubError>(42, "foo");
+ if (auto CE = error_cast<CustomError>(E)) {
+ EXPECT_EQ(CE->getV1(), 42) << "Unexpected wrapped value";
+ } else
+ ADD_FAILURE() << "Unexpected error value";
+ }
+
+ {
+ // Check casting derived error value to derived error type.
+ auto E = make_error<CustomSubError>(42, "foo");
+ if (auto CSE = error_cast<CustomSubError>(E)) {
+ EXPECT_EQ(CSE->getV1(), 42) << "Unexpected wrapped value";
+ EXPECT_EQ(CSE->getV2(), "foo") << "Unexpected wrapped value";
+ } else
+ ADD_FAILURE() << "Unexpected error value";
+ }
+}
+
+// ErrorAsOutParameter tester.
+static void errAsOutParamHelper(Error &Err) {
+ ErrorAsOutParameter ErrAsOutParam(&Err);
+ // Verify that checked flag is raised - assignment should not crash.
+ Err = Error::success();
+ // Raise the checked bit manually - caller should still have to test the
+ // error.
+ (void)!!Err;
+}
+
+// Test that ErrorAsOutParameter sets the checked flag on construction.
+TEST(Error, ErrorAsOutParameterChecked) {
+ Error E = Error::success();
+ errAsOutParamHelper(E);
+ (void)!!E;
+}
+
+// Test that ErrorAsOutParameter clears the checked flag on destruction.
+TEST(Error, ErrorAsOutParameterUnchecked) {
+ EXPECT_DEATH(
+ {
+ Error E = Error::success();
+ errAsOutParamHelper(E);
+ },
+ "Error must be checked prior to destruction")
+ << "ErrorAsOutParameter did not clear the checked flag on destruction.";
+}
+
+// Check 'Error::isA<T>' method handling.
+TEST(Error, IsAHandling) {
+ // Check 'isA' handling.
+ Error E = make_error<CustomError>(42);
+ Error F = make_error<CustomSubError>(42, "foo");
+ Error G = Error::success();
+
+ EXPECT_TRUE(E.isA<CustomError>());
+ EXPECT_FALSE(E.isA<CustomSubError>());
+ EXPECT_TRUE(F.isA<CustomError>());
+ EXPECT_TRUE(F.isA<CustomSubError>());
+ EXPECT_FALSE(G.isA<CustomError>());
+
+ consumeError(std::move(E));
+ consumeError(std::move(F));
+ consumeError(std::move(G));
+}
+
+TEST(Error, StringError) {
+ auto E = make_error<StringError>("foo");
+ if (auto SE = error_cast<StringError>(E)) {
+ EXPECT_EQ(SE->toString(), "foo") << "Unexpected StringError value";
+ } else
+ ADD_FAILURE() << "Expected StringError value";
+}
+
+// Test Checked Expected<T> in success mode.
+TEST(Error, CheckedExpectedInSuccessMode) {
+ Expected<int> A = 7;
+ EXPECT_TRUE(!!A) << "Expected with non-error value doesn't convert to 'true'";
+ // Access is safe in second test, since we checked the error in the first.
+ EXPECT_EQ(*A, 7) << "Incorrect Expected non-error value";
+}
+
+// Test Expected with reference type.
+TEST(Error, ExpectedWithReferenceType) {
+ int A = 7;
+ Expected<int &> B = A;
+ // 'Check' B.
+ (void)!!B;
+ int &C = *B;
+ EXPECT_EQ(&A, &C) << "Expected failed to propagate reference";
+}
+
+// Test Unchecked Expected<T> in success mode.
+// We expect this to blow up the same way Error would.
+// Test runs in debug mode only.
+TEST(Error, UncheckedExpectedInSuccessModeDestruction) {
+ EXPECT_DEATH({ Expected<int> A = 7; },
+ "Expected<T> must be checked before access or destruction.")
+ << "Unchecekd Expected<T> success value did not cause an abort().";
+}
+
+// Test Unchecked Expected<T> in success mode.
+// We expect this to blow up the same way Error would.
+// Test runs in debug mode only.
+TEST(Error, UncheckedExpectedInSuccessModeAccess) {
+ EXPECT_DEATH(
+ {
+ Expected<int> A = 7;
+ *A;
+ },
+ "Expected<T> must be checked before access or destruction.")
+ << "Unchecekd Expected<T> success value did not cause an abort().";
+}
+
+// Test Unchecked Expected<T> in success mode.
+// We expect this to blow up the same way Error would.
+// Test runs in debug mode only.
+TEST(Error, UncheckedExpectedInSuccessModeAssignment) {
+ EXPECT_DEATH(
+ {
+ Expected<int> A = 7;
+ A = 7;
+ },
+ "Expected<T> must be checked before access or destruction.")
+ << "Unchecekd Expected<T> success value did not cause an abort().";
+}
+
+// Test Expected<T> in failure mode.
+TEST(Error, ExpectedInFailureMode) {
+ Expected<int> A = make_error<CustomError>(42);
+ EXPECT_FALSE(!!A) << "Expected with error value doesn't convert to 'false'";
+ Error E = A.takeError();
+ EXPECT_TRUE(E.isA<CustomError>()) << "Incorrect Expected error value";
+ consumeError(std::move(E));
+}
+
+// Check that an Expected instance with an error value doesn't allow access to
+// operator*.
+// Test runs in debug mode only.
+TEST(Error, AccessExpectedInFailureMode) {
+ Expected<int> A = make_error<CustomError>(42);
+ EXPECT_DEATH(*A, "Expected<T> must be checked before access or destruction.")
+ << "Incorrect Expected error value";
+ consumeError(A.takeError());
+}
+
+// Check that an Expected instance with an error triggers an abort if
+// unhandled.
+// Test runs in debug mode only.
+TEST(Error, UnhandledExpectedInFailureMode) {
+ EXPECT_DEATH({ Expected<int> A = make_error<CustomError>(42); },
+ "Expected<T> must be checked before access or destruction.")
+ << "Unchecked Expected<T> failure value did not cause an abort()";
+}
+
+// Test covariance of Expected.
+TEST(Error, ExpectedCovariance) {
+ class B {};
+ class D : public B {};
+
+ Expected<B *> A1(Expected<D *>(nullptr));
+ // Check A1 by converting to bool before assigning to it.
+ (void)!!A1;
+ A1 = Expected<D *>(nullptr);
+ // Check A1 again before destruction.
+ (void)!!A1;
+
+ Expected<std::unique_ptr<B>> A2(Expected<std::unique_ptr<D>>(nullptr));
+ // Check A2 by converting to bool before assigning to it.
+ (void)!!A2;
+ A2 = Expected<std::unique_ptr<D>>(nullptr);
+ // Check A2 again before destruction.
+ (void)!!A2;
+}
+
+// Test that the ExitOnError utility works as expected.
+TEST(Error, CantFailSuccess) {
+ cantFail(Error::success());
+
+ int X = cantFail(Expected<int>(42));
+ EXPECT_EQ(X, 42) << "Expected value modified by cantFail";
+
+ int Dummy = 42;
+ int &Y = cantFail(Expected<int &>(Dummy));
+ EXPECT_EQ(&Dummy, &Y) << "Reference mangled by cantFail";
+}
+
+// Test that cantFail results in a crash if you pass it a failure value.
+TEST(Error, CantFailDeath) {
+ EXPECT_DEATH(cantFail(make_error<StringError>("foo")),
+ "cantFail called on failure value")
+ << "cantFail(Error) did not cause an abort for failure value";
+
+ EXPECT_DEATH(cantFail(Expected<int>(make_error<StringError>("foo"))),
+ "cantFail called on failure value")
+ << "cantFail(Expected<int>) did not cause an abort for failure value";
+}
diff --git a/contrib/llvm-project/compiler-rt/lib/orc/tests/unit/executor_address_test.cpp b/contrib/llvm-project/compiler-rt/lib/orc/tests/unit/executor_address_test.cpp
new file mode 100644
index 000000000000..05b91f3f8609
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/orc/tests/unit/executor_address_test.cpp
@@ -0,0 +1,115 @@
+//===-- executor_address_test.cpp -----------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of the ORC runtime.
+//
+// Note:
+// This unit test was adapted from
+// llvm/unittests/Support/ExecutorAddressTest.cpp
+//
+//===----------------------------------------------------------------------===//
+
+#include "executor_address.h"
+#include "gtest/gtest.h"
+
+using namespace __orc_rt;
+
+TEST(ExecutorAddrTest, DefaultAndNull) {
+ // Check that default constructed values and isNull behave as expected.
+
+ ExecutorAddr Default;
+ ExecutorAddr Null(0);
+ ExecutorAddr NonNull(1);
+
+ EXPECT_TRUE(Null.isNull());
+ EXPECT_EQ(Default, Null);
+
+ EXPECT_FALSE(NonNull.isNull());
+ EXPECT_NE(Default, NonNull);
+}
+
+TEST(ExecutorAddrTest, Ordering) {
+ // Check that ordering operations.
+ ExecutorAddr A1(1), A2(2);
+
+ EXPECT_LE(A1, A1);
+ EXPECT_LT(A1, A2);
+ EXPECT_GT(A2, A1);
+ EXPECT_GE(A2, A2);
+}
+
+TEST(ExecutorAddrTest, PtrConversion) {
+ // Test toPtr / fromPtr round-tripping.
+ int X = 0;
+ auto XAddr = ExecutorAddr::fromPtr(&X);
+ int *XPtr = XAddr.toPtr<int *>();
+
+ EXPECT_EQ(XPtr, &X);
+}
+
+static void F() {}
+
+TEST(ExecutorAddrTest, PtrConversionWithFunctionType) {
+ // Test that function types (as opposed to function pointer types) can be
+ // used with toPtr.
+ auto FAddr = ExecutorAddr::fromPtr(F);
+ void (*FPtr)() = FAddr.toPtr<void()>();
+
+ EXPECT_EQ(FPtr, &F);
+}
+
+TEST(ExecutorAddrTest, WrappingAndUnwrapping) {
+ constexpr uintptr_t RawAddr = 0x123456;
+ int *RawPtr = (int *)RawAddr;
+
+ constexpr uintptr_t TagOffset = 8 * (sizeof(uintptr_t) - 1);
+ uintptr_t TagVal = 0xA5;
+ uintptr_t TagBits = TagVal << TagOffset;
+ void *TaggedPtr = (void *)((uintptr_t)RawPtr | TagBits);
+
+ ExecutorAddr EA =
+ ExecutorAddr::fromPtr(TaggedPtr, ExecutorAddr::Untag(8, TagOffset));
+
+ EXPECT_EQ(EA.getValue(), RawAddr);
+
+ void *ReconstitutedTaggedPtr =
+ EA.toPtr<void *>(ExecutorAddr::Tag(TagVal, TagOffset));
+
+ EXPECT_EQ(TaggedPtr, ReconstitutedTaggedPtr);
+}
+
+TEST(ExecutorAddrTest, AddrRanges) {
+ ExecutorAddr A0(0), A1(1), A2(2), A3(3);
+ ExecutorAddrRange R0(A0, A1), R1(A1, A2), R2(A2, A3), R3(A0, A2), R4(A1, A3);
+ // 012
+ // R0: # -- Before R1
+ // R1: # --
+ // R2: # -- After R1
+ // R3: ## -- Overlaps R1 start
+ // R4: ## -- Overlaps R1 end
+
+ EXPECT_EQ(R1, ExecutorAddrRange(A1, A2));
+ EXPECT_EQ(R1, ExecutorAddrRange(A1, ExecutorAddrDiff(1)));
+ EXPECT_NE(R1, R2);
+
+ EXPECT_TRUE(R1.contains(A1));
+ EXPECT_FALSE(R1.contains(A0));
+ EXPECT_FALSE(R1.contains(A2));
+
+ EXPECT_FALSE(R1.overlaps(R0));
+ EXPECT_FALSE(R1.overlaps(R2));
+ EXPECT_TRUE(R1.overlaps(R3));
+ EXPECT_TRUE(R1.overlaps(R4));
+}
+
+TEST(ExecutorAddrTest, Hashable) {
+ uint64_t RawAddr = 0x1234567890ABCDEF;
+ ExecutorAddr Addr(RawAddr);
+
+ EXPECT_EQ(std::hash<uint64_t>()(RawAddr), std::hash<ExecutorAddr>()(Addr));
+}
diff --git a/contrib/llvm-project/compiler-rt/lib/orc/tests/unit/executor_symbol_def_test.cpp b/contrib/llvm-project/compiler-rt/lib/orc/tests/unit/executor_symbol_def_test.cpp
new file mode 100644
index 000000000000..181091ca1e60
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/orc/tests/unit/executor_symbol_def_test.cpp
@@ -0,0 +1,19 @@
+//===-- executor_symbol_def_test.cpp --------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "executor_symbol_def.h"
+#include "simple_packed_serialization_utils.h"
+#include "gtest/gtest.h"
+
+using namespace __orc_rt;
+
+TEST(ExecutorSymbolDefTest, Serialization) {
+ blobSerializationRoundTrip<SPSExecutorSymbolDef>(ExecutorSymbolDef{});
+ blobSerializationRoundTrip<SPSExecutorSymbolDef>(
+ ExecutorSymbolDef{ExecutorAddr{0x70}, {JITSymbolFlags::Callable, 9}});
+} \ No newline at end of file
diff --git a/contrib/llvm-project/compiler-rt/lib/orc/tests/unit/extensible_rtti_test.cpp b/contrib/llvm-project/compiler-rt/lib/orc/tests/unit/extensible_rtti_test.cpp
new file mode 100644
index 000000000000..feca1ec1d18c
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/orc/tests/unit/extensible_rtti_test.cpp
@@ -0,0 +1,54 @@
+//===-- extensible_rtti_test.cpp ------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of the ORC runtime.
+//
+// Note:
+// This unit test was adapted from
+// llvm/unittests/Support/ExtensibleRTTITest.cpp
+//
+//===----------------------------------------------------------------------===//
+
+#include "extensible_rtti.h"
+#include "gtest/gtest.h"
+
+using namespace __orc_rt;
+
+namespace {
+
+class MyBase : public RTTIExtends<MyBase, RTTIRoot> {};
+
+class MyDerivedA : public RTTIExtends<MyDerivedA, MyBase> {};
+
+class MyDerivedB : public RTTIExtends<MyDerivedB, MyBase> {};
+
+} // end anonymous namespace
+
+TEST(ExtensibleRTTITest, BaseCheck) {
+ MyBase MB;
+ MyDerivedA MDA;
+ MyDerivedB MDB;
+
+ // Check MB properties.
+ EXPECT_TRUE(isa<RTTIRoot>(MB));
+ EXPECT_TRUE(isa<MyBase>(MB));
+ EXPECT_FALSE(isa<MyDerivedA>(MB));
+ EXPECT_FALSE(isa<MyDerivedB>(MB));
+
+ // Check MDA properties.
+ EXPECT_TRUE(isa<RTTIRoot>(MDA));
+ EXPECT_TRUE(isa<MyBase>(MDA));
+ EXPECT_TRUE(isa<MyDerivedA>(MDA));
+ EXPECT_FALSE(isa<MyDerivedB>(MDA));
+
+ // Check MDB properties.
+ EXPECT_TRUE(isa<RTTIRoot>(MDB));
+ EXPECT_TRUE(isa<MyBase>(MDB));
+ EXPECT_FALSE(isa<MyDerivedA>(MDB));
+ EXPECT_TRUE(isa<MyDerivedB>(MDB));
+}
diff --git a/contrib/llvm-project/compiler-rt/lib/orc/tests/unit/interval_map_test.cpp b/contrib/llvm-project/compiler-rt/lib/orc/tests/unit/interval_map_test.cpp
new file mode 100644
index 000000000000..a1c6958fcd52
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/orc/tests/unit/interval_map_test.cpp
@@ -0,0 +1,204 @@
+//===-- interval_map_test.cpp ---------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of the ORC runtime.
+//
+//===----------------------------------------------------------------------===//
+
+#include "interval_map.h"
+#include "gtest/gtest.h"
+
+using namespace __orc_rt;
+
+TEST(IntervalMapTest, DefaultConstructed) {
+ // Check that a default-constructed IntervalMap behaves as expected.
+ IntervalMap<unsigned, unsigned, IntervalCoalescing::Enabled> M;
+
+ EXPECT_TRUE(M.empty());
+ EXPECT_TRUE(M.begin() == M.end());
+ EXPECT_TRUE(M.find(0) == M.end());
+}
+
+TEST(IntervalMapTest, InsertSingleElement) {
+ // Check that a map with a single element inserted behaves as expected.
+ IntervalMap<unsigned, unsigned, IntervalCoalescing::Enabled> M;
+
+ M.insert(7, 8, 42);
+
+ EXPECT_FALSE(M.empty());
+ EXPECT_EQ(std::next(M.begin()), M.end());
+ EXPECT_EQ(M.find(7), M.begin());
+ EXPECT_EQ(M.find(8), M.end());
+ EXPECT_EQ(M.lookup(7), 42U);
+ EXPECT_EQ(M.lookup(8), 0U); // 8 not present, so should return unsigned().
+}
+
+TEST(IntervalMapTest, InsertCoalesceWithPrevious) {
+ // Check that insertions coalesce with previous ranges that share the same
+ // value. Also check that they _don't_ coalesce if the values are different.
+
+ // Check that insertion coalesces with previous range when values are equal.
+ IntervalMap<unsigned, unsigned, IntervalCoalescing::Enabled> M1;
+
+ M1.insert(7, 8, 42);
+ M1.insert(8, 9, 42);
+
+ EXPECT_FALSE(M1.empty());
+ EXPECT_EQ(std::next(M1.begin()), M1.end()); // Should see just one range.
+ EXPECT_EQ(M1.find(7), M1.find(8)); // 7 and 8 should point to same range.
+ EXPECT_EQ(M1.lookup(7), 42U); // Value should be preserved.
+
+ // Check that insertion does not coalesce with previous range when values are
+ // not equal.
+ IntervalMap<unsigned, unsigned, IntervalCoalescing::Enabled> M2;
+
+ M2.insert(7, 8, 42);
+ M2.insert(8, 9, 7);
+
+ EXPECT_FALSE(M2.empty());
+ EXPECT_EQ(std::next(std::next(M2.begin())), M2.end()); // Expect two ranges.
+ EXPECT_NE(M2.find(7), M2.find(8)); // 7 and 8 should be different ranges.
+ EXPECT_EQ(M2.lookup(7), 42U); // Keys 7 and 8 should map to different values.
+ EXPECT_EQ(M2.lookup(8), 7U);
+}
+
+TEST(IntervalMapTest, InsertCoalesceWithFollowing) {
+ // Check that insertions coalesce with following ranges that share the same
+ // value. Also check that they _don't_ coalesce if the values are different.
+
+ // Check that insertion coalesces with following range when values are equal.
+ IntervalMap<unsigned, unsigned, IntervalCoalescing::Enabled> M1;
+
+ M1.insert(8, 9, 42);
+ M1.insert(7, 8, 42);
+
+ EXPECT_FALSE(M1.empty());
+ EXPECT_EQ(std::next(M1.begin()), M1.end()); // Should see just one range.
+ EXPECT_EQ(M1.find(7), M1.find(8)); // 7 and 8 should point to same range.
+ EXPECT_EQ(M1.lookup(7), 42U); // Value should be preserved.
+
+ // Check that insertion does not coalesce with previous range when values are
+ // not equal.
+ IntervalMap<unsigned, unsigned, IntervalCoalescing::Enabled> M2;
+
+ M2.insert(8, 9, 42);
+ M2.insert(7, 8, 7);
+
+ EXPECT_FALSE(M2.empty());
+ EXPECT_EQ(std::next(std::next(M2.begin())), M2.end()); // Expect two ranges.
+ EXPECT_EQ(M2.lookup(7), 7U); // Keys 7 and 8 should map to different values.
+ EXPECT_EQ(M2.lookup(8), 42U);
+}
+
+TEST(IntervalMapTest, InsertCoalesceBoth) {
+ // Check that insertions coalesce with ranges on both sides where posssible.
+ // Also check that they _don't_ coalesce if the values are different.
+
+ // Check that insertion coalesces with both previous and following ranges
+ // when values are equal.
+ IntervalMap<unsigned, unsigned, IntervalCoalescing::Enabled> M1;
+
+ M1.insert(7, 8, 42);
+ M1.insert(9, 10, 42);
+
+ // Check no coalescing yet.
+ EXPECT_NE(M1.find(7), M1.find(9));
+
+ // Insert a 3rd range to trigger coalescing on both sides.
+ M1.insert(8, 9, 42);
+
+ EXPECT_FALSE(M1.empty());
+ EXPECT_EQ(std::next(M1.begin()), M1.end()); // Should see just one range.
+ EXPECT_EQ(M1.find(7), M1.find(8)); // 7, 8, and 9 should point to same range.
+ EXPECT_EQ(M1.find(8), M1.find(9));
+ EXPECT_EQ(M1.lookup(7), 42U); // Value should be preserved.
+
+ // Check that insertion does not coalesce with previous range when values are
+ // not equal.
+ IntervalMap<unsigned, unsigned, IntervalCoalescing::Enabled> M2;
+
+ M2.insert(7, 8, 42);
+ M2.insert(8, 9, 7);
+ M2.insert(9, 10, 42);
+
+ EXPECT_FALSE(M2.empty());
+ // Expect three ranges.
+ EXPECT_EQ(std::next(std::next(std::next(M2.begin()))), M2.end());
+ EXPECT_NE(M2.find(7), M2.find(8)); // All keys should map to different ranges.
+ EXPECT_NE(M2.find(8), M2.find(9));
+ EXPECT_EQ(M2.lookup(7), 42U); // Key 7, 8, and 9 should map to different vals.
+ EXPECT_EQ(M2.lookup(8), 7U);
+ EXPECT_EQ(M2.lookup(9), 42U);
+}
+
+TEST(IntervalMapTest, EraseSingleElement) {
+ // Check that we can insert and then remove a single range.
+ IntervalMap<unsigned, unsigned, IntervalCoalescing::Enabled> M;
+
+ M.insert(7, 10, 42);
+ EXPECT_FALSE(M.empty());
+ M.erase(7, 10);
+ EXPECT_TRUE(M.empty());
+}
+
+TEST(IntervalMapTest, EraseSplittingLeft) {
+ // Check that removal of a trailing subrange succeeds, but leaves the
+ // residual range in-place.
+ IntervalMap<unsigned, unsigned, IntervalCoalescing::Enabled> M;
+
+ M.insert(7, 10, 42);
+ EXPECT_FALSE(M.empty());
+ M.erase(9, 10);
+ EXPECT_EQ(std::next(M.begin()), M.end());
+ EXPECT_EQ(M.begin()->first.first, 7U);
+ EXPECT_EQ(M.begin()->first.second, 9U);
+}
+
+TEST(IntervalMapTest, EraseSplittingRight) {
+ // Check that removal of a leading subrange succeeds, but leaves the
+ // residual range in-place.
+ IntervalMap<unsigned, unsigned, IntervalCoalescing::Enabled> M;
+
+ M.insert(7, 10, 42);
+ EXPECT_FALSE(M.empty());
+ M.erase(7, 8);
+ EXPECT_EQ(std::next(M.begin()), M.end());
+ EXPECT_EQ(M.begin()->first.first, 8U);
+ EXPECT_EQ(M.begin()->first.second, 10U);
+}
+
+TEST(IntervalMapTest, EraseSplittingBoth) {
+ // Check that removal of an interior subrange leaves both the leading and
+ // trailing residual subranges in-place.
+ IntervalMap<unsigned, unsigned, IntervalCoalescing::Enabled> M;
+
+ M.insert(7, 10, 42);
+ EXPECT_FALSE(M.empty());
+ M.erase(8, 9);
+ EXPECT_EQ(std::next(std::next(M.begin())), M.end());
+ EXPECT_EQ(M.begin()->first.first, 7U);
+ EXPECT_EQ(M.begin()->first.second, 8U);
+ EXPECT_EQ(std::next(M.begin())->first.first, 9U);
+ EXPECT_EQ(std::next(M.begin())->first.second, 10U);
+}
+
+TEST(IntervalMapTest, NonCoalescingMapPermitsNonComparableKeys) {
+ // Test that values that can't be equality-compared are still usable when
+ // coalescing is disabled and behave as expected.
+
+ struct S {}; // Struct with no equality comparison.
+
+ IntervalMap<unsigned, S, IntervalCoalescing::Disabled> M;
+
+ M.insert(7, 8, S());
+
+ EXPECT_FALSE(M.empty());
+ EXPECT_EQ(std::next(M.begin()), M.end());
+ EXPECT_EQ(M.find(7), M.begin());
+ EXPECT_EQ(M.find(8), M.end());
+}
diff --git a/contrib/llvm-project/compiler-rt/lib/orc/tests/unit/interval_set_test.cpp b/contrib/llvm-project/compiler-rt/lib/orc/tests/unit/interval_set_test.cpp
new file mode 100644
index 000000000000..7971a55f271f
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/orc/tests/unit/interval_set_test.cpp
@@ -0,0 +1,121 @@
+//===-- interval_set_test.cpp ---------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of the ORC runtime.
+//
+//===----------------------------------------------------------------------===//
+
+#include "interval_set.h"
+#include "gtest/gtest.h"
+
+using namespace __orc_rt;
+
+TEST(IntervalSetTest, DefaultConstructed) {
+ // Check that a default-constructed IntervalSet behaves as expected.
+ IntervalSet<unsigned, IntervalCoalescing::Enabled> S;
+
+ EXPECT_TRUE(S.empty());
+ EXPECT_TRUE(S.begin() == S.end());
+ EXPECT_TRUE(S.find(0) == S.end());
+}
+
+TEST(IntervalSetTest, InsertSingleElement) {
+ // Check that a set with a single element inserted behaves as expected.
+ IntervalSet<unsigned, IntervalCoalescing::Enabled> S;
+
+ S.insert(7, 8);
+
+ EXPECT_FALSE(S.empty());
+ EXPECT_EQ(std::next(S.begin()), S.end());
+ EXPECT_EQ(S.find(7), S.begin());
+ EXPECT_EQ(S.find(8), S.end());
+}
+
+TEST(IntervalSetTest, InsertCoalesceWithPrevious) {
+ // Check that insertions coalesce with previous ranges.
+ IntervalSet<unsigned, IntervalCoalescing::Enabled> S;
+
+ S.insert(7, 8);
+ S.insert(8, 9);
+
+ EXPECT_FALSE(S.empty());
+ EXPECT_EQ(std::next(S.begin()), S.end()); // Should see just one range.
+ EXPECT_EQ(S.find(7), S.find(8)); // 7 and 8 should point to same range.
+}
+
+TEST(IntervalSetTest, InsertCoalesceWithFollowing) {
+ // Check that insertions coalesce with following ranges.
+ IntervalSet<unsigned, IntervalCoalescing::Enabled> S;
+
+ S.insert(8, 9);
+ S.insert(7, 8);
+
+ EXPECT_FALSE(S.empty());
+ EXPECT_EQ(std::next(S.begin()), S.end()); // Should see just one range.
+ EXPECT_EQ(S.find(7), S.find(8)); // 7 and 8 should point to same range.
+}
+
+TEST(IntervalSetTest, InsertCoalesceBoth) {
+ // Check that insertions coalesce with ranges on both sides.
+ IntervalSet<unsigned, IntervalCoalescing::Enabled> S;
+
+ S.insert(7, 8);
+ S.insert(9, 10);
+
+ // Check no coalescing yet.
+ EXPECT_NE(S.find(7), S.find(9));
+
+ // Insert a 3rd range to trigger coalescing on both sides.
+ S.insert(8, 9);
+
+ EXPECT_FALSE(S.empty());
+ EXPECT_EQ(std::next(S.begin()), S.end()); // Should see just one range.
+ EXPECT_EQ(S.find(7), S.find(8)); // 7, 8, and 9 should point to same range.
+ EXPECT_EQ(S.find(8), S.find(9));
+}
+
+TEST(IntervalSetTest, EraseSplittingLeft) {
+ // Check that removal of a trailing subrange succeeds, but leaves the
+ // residual range in-place.
+ IntervalSet<unsigned, IntervalCoalescing::Enabled> S;
+
+ S.insert(7, 10);
+ EXPECT_FALSE(S.empty());
+ S.erase(9, 10);
+ EXPECT_EQ(std::next(S.begin()), S.end());
+ EXPECT_EQ(S.begin()->first, 7U);
+ EXPECT_EQ(S.begin()->second, 9U);
+}
+
+TEST(IntervalSetTest, EraseSplittingRight) {
+ // Check that removal of a leading subrange succeeds, but leaves the
+ // residual range in-place.
+ IntervalSet<unsigned, IntervalCoalescing::Enabled> S;
+
+ S.insert(7, 10);
+ EXPECT_FALSE(S.empty());
+ S.erase(7, 8);
+ EXPECT_EQ(std::next(S.begin()), S.end());
+ EXPECT_EQ(S.begin()->first, 8U);
+ EXPECT_EQ(S.begin()->second, 10U);
+}
+
+TEST(IntervalSetTest, EraseSplittingBoth) {
+ // Check that removal of an interior subrange leaves both the leading and
+ // trailing residual subranges in-place.
+ IntervalSet<unsigned, IntervalCoalescing::Enabled> S;
+
+ S.insert(7, 10);
+ EXPECT_FALSE(S.empty());
+ S.erase(8, 9);
+ EXPECT_EQ(std::next(std::next(S.begin())), S.end());
+ EXPECT_EQ(S.begin()->first, 7U);
+ EXPECT_EQ(S.begin()->second, 8U);
+ EXPECT_EQ(std::next(S.begin())->first, 9U);
+ EXPECT_EQ(std::next(S.begin())->second, 10U);
+}
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_persistent_allocator.cpp b/contrib/llvm-project/compiler-rt/lib/orc/tests/unit/orc_unit_test_main.cpp
index 1ca0375b8a54..d02101704d65 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_persistent_allocator.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/orc/tests/unit/orc_unit_test_main.cpp
@@ -1,4 +1,4 @@
-//===-- sanitizer_persistent_allocator.cpp ----------------------*- C++ -*-===//
+//===-- orc_unit_test_main.cpp --------------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
@@ -6,13 +6,13 @@
//
//===----------------------------------------------------------------------===//
//
-// This file is shared between AddressSanitizer and ThreadSanitizer
-// run-time libraries.
+// This file is a part of the ORC runtime.
+//
//===----------------------------------------------------------------------===//
-#include "sanitizer_persistent_allocator.h"
-
-namespace __sanitizer {
-
-PersistentAllocator thePersistentAllocator;
+#include "gtest/gtest.h"
-} // namespace __sanitizer
+int main(int argc, char **argv) {
+ testing::GTEST_FLAG(death_test_style) = "threadsafe";
+ testing::InitGoogleTest(&argc, argv);
+ return RUN_ALL_TESTS();
+}
diff --git a/contrib/llvm-project/compiler-rt/lib/orc/tests/unit/simple_packed_serialization_test.cpp b/contrib/llvm-project/compiler-rt/lib/orc/tests/unit/simple_packed_serialization_test.cpp
new file mode 100644
index 000000000000..397114b4017e
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/orc/tests/unit/simple_packed_serialization_test.cpp
@@ -0,0 +1,184 @@
+//===-- simple_packed_serialization_test.cpp ------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of the ORC runtime.
+//
+//===----------------------------------------------------------------------===//
+
+#include "simple_packed_serialization.h"
+#include "simple_packed_serialization_utils.h"
+#include "gtest/gtest.h"
+
+using namespace __orc_rt;
+
+TEST(SimplePackedSerializationTest, SPSOutputBuffer) {
+ constexpr unsigned NumBytes = 8;
+ char Buffer[NumBytes];
+ char Zero = 0;
+ SPSOutputBuffer OB(Buffer, NumBytes);
+
+ // Expect that we can write NumBytes of content.
+ for (unsigned I = 0; I != NumBytes; ++I) {
+ char C = I;
+ EXPECT_TRUE(OB.write(&C, 1));
+ }
+
+ // Expect an error when we attempt to write an extra byte.
+ EXPECT_FALSE(OB.write(&Zero, 1));
+
+ // Check that the buffer contains the expected content.
+ for (unsigned I = 0; I != NumBytes; ++I)
+ EXPECT_EQ(Buffer[I], (char)I);
+}
+
+TEST(SimplePackedSerializationTest, SPSInputBuffer) {
+ char Buffer[] = {0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07};
+ SPSInputBuffer IB(Buffer, sizeof(Buffer));
+
+ char C;
+ for (unsigned I = 0; I != sizeof(Buffer); ++I) {
+ EXPECT_TRUE(IB.read(&C, 1));
+ EXPECT_EQ(C, (char)I);
+ }
+
+ EXPECT_FALSE(IB.read(&C, 1));
+}
+
+template <typename T> static void testFixedIntegralTypeSerialization() {
+ blobSerializationRoundTrip<T, T>(0);
+ blobSerializationRoundTrip<T, T>(static_cast<T>(1));
+ if (std::is_signed<T>::value) {
+ blobSerializationRoundTrip<T, T>(static_cast<T>(-1));
+ blobSerializationRoundTrip<T, T>(std::numeric_limits<T>::min());
+ }
+ blobSerializationRoundTrip<T, T>(std::numeric_limits<T>::max());
+}
+
+TEST(SimplePackedSerializationTest, BoolSerialization) {
+ blobSerializationRoundTrip<bool, bool>(true);
+ blobSerializationRoundTrip<bool, bool>(false);
+}
+
+TEST(SimplePackedSerializationTest, CharSerialization) {
+ blobSerializationRoundTrip<char, char>((char)0x00);
+ blobSerializationRoundTrip<char, char>((char)0xAA);
+ blobSerializationRoundTrip<char, char>((char)0xFF);
+}
+
+TEST(SimplePackedSerializationTest, Int8Serialization) {
+ testFixedIntegralTypeSerialization<int8_t>();
+}
+
+TEST(SimplePackedSerializationTest, UInt8Serialization) {
+ testFixedIntegralTypeSerialization<uint8_t>();
+}
+
+TEST(SimplePackedSerializationTest, Int16Serialization) {
+ testFixedIntegralTypeSerialization<int16_t>();
+}
+
+TEST(SimplePackedSerializationTest, UInt16Serialization) {
+ testFixedIntegralTypeSerialization<uint16_t>();
+}
+
+TEST(SimplePackedSerializationTest, Int32Serialization) {
+ testFixedIntegralTypeSerialization<int32_t>();
+}
+
+TEST(SimplePackedSerializationTest, UInt32Serialization) {
+ testFixedIntegralTypeSerialization<uint32_t>();
+}
+
+TEST(SimplePackedSerializationTest, Int64Serialization) {
+ testFixedIntegralTypeSerialization<int64_t>();
+}
+
+TEST(SimplePackedSerializationTest, UInt64Serialization) {
+ testFixedIntegralTypeSerialization<uint64_t>();
+}
+
+TEST(SimplePackedSerializationTest, SequenceSerialization) {
+ std::vector<int32_t> V({1, 2, -47, 139});
+ blobSerializationRoundTrip<SPSSequence<int32_t>, std::vector<int32_t>>(V);
+}
+
+TEST(SimplePackedSerializationTest, StringViewCharSequenceSerialization) {
+ const char *HW = "Hello, world!";
+ blobSerializationRoundTrip<SPSString, std::string_view>(std::string_view(HW));
+}
+
+TEST(SimplePackedSerializationTest, SpanSerialization) {
+ const char Data[] = {3, 2, 1, 0, 1, 2, 3}; // Span should handle nulls.
+ span<const char> OutS(Data, sizeof(Data));
+
+ size_t Size = SPSArgList<SPSSequence<char>>::size(OutS);
+ auto Buffer = std::make_unique<char[]>(Size);
+ SPSOutputBuffer OB(Buffer.get(), Size);
+
+ EXPECT_TRUE(SPSArgList<SPSSequence<char>>::serialize(OB, OutS));
+
+ SPSInputBuffer IB(Buffer.get(), Size);
+
+ span<const char> InS;
+
+ EXPECT_TRUE(SPSArgList<SPSSequence<char>>::deserialize(IB, InS));
+
+ // Check that the serialized and deserialized values match.
+ EXPECT_EQ(InS.size(), OutS.size());
+ EXPECT_EQ(memcmp(OutS.data(), InS.data(), InS.size()), 0);
+
+ // Check that the span points directly to the input buffer.
+ EXPECT_EQ(InS.data(), Buffer.get() + sizeof(uint64_t));
+}
+
+TEST(SimplePackedSerializationTest, StdTupleSerialization) {
+ std::tuple<int32_t, std::string, bool> P(42, "foo", true);
+ blobSerializationRoundTrip<SPSTuple<int32_t, SPSString, bool>>(P);
+}
+
+TEST(SimplePackedSerializationTest, StdPairSerialization) {
+ std::pair<int32_t, std::string> P(42, "foo");
+ blobSerializationRoundTrip<SPSTuple<int32_t, SPSString>,
+ std::pair<int32_t, std::string>>(P);
+}
+
+TEST(SimplePackedSerializationTest, StdOptionalNoValueSerialization) {
+ std::optional<int64_t> NoValue;
+ blobSerializationRoundTrip<SPSOptional<int64_t>>(NoValue);
+}
+
+TEST(SimplePackedSerializationTest, StdOptionalValueSerialization) {
+ std::optional<int64_t> Value(42);
+ blobSerializationRoundTrip<SPSOptional<int64_t>>(Value);
+}
+
+TEST(SimplePackedSerializationTest, ArgListSerialization) {
+ using BAL = SPSArgList<bool, int32_t, SPSString>;
+
+ bool Arg1 = true;
+ int32_t Arg2 = 42;
+ std::string Arg3 = "foo";
+
+ size_t Size = BAL::size(Arg1, Arg2, Arg3);
+ auto Buffer = std::make_unique<char[]>(Size);
+ SPSOutputBuffer OB(Buffer.get(), Size);
+
+ EXPECT_TRUE(BAL::serialize(OB, Arg1, Arg2, Arg3));
+
+ SPSInputBuffer IB(Buffer.get(), Size);
+
+ bool ArgOut1;
+ int32_t ArgOut2;
+ std::string ArgOut3;
+
+ EXPECT_TRUE(BAL::deserialize(IB, ArgOut1, ArgOut2, ArgOut3));
+
+ EXPECT_EQ(Arg1, ArgOut1);
+ EXPECT_EQ(Arg2, ArgOut2);
+ EXPECT_EQ(Arg3, ArgOut3);
+}
diff --git a/contrib/llvm-project/compiler-rt/lib/orc/tests/unit/simple_packed_serialization_utils.h b/contrib/llvm-project/compiler-rt/lib/orc/tests/unit/simple_packed_serialization_utils.h
new file mode 100644
index 000000000000..746be43d250b
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/orc/tests/unit/simple_packed_serialization_utils.h
@@ -0,0 +1,34 @@
+//===-- simple_packed_serialization_utils.h -------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef ORC_RT_TEST_SIMPLE_PACKED_SERIALIZATION_UTILS_H
+#define ORC_RT_TEST_SIMPLE_PACKED_SERIALIZATION_UTILS_H
+
+#include "simple_packed_serialization.h"
+#include "gtest/gtest.h"
+
+template <typename SPSTagT, typename T>
+static void blobSerializationRoundTrip(const T &Value) {
+ using BST = __orc_rt::SPSSerializationTraits<SPSTagT, T>;
+
+ size_t Size = BST::size(Value);
+ auto Buffer = std::make_unique<char[]>(Size);
+ __orc_rt::SPSOutputBuffer OB(Buffer.get(), Size);
+
+ EXPECT_TRUE(BST::serialize(OB, Value));
+
+ __orc_rt::SPSInputBuffer IB(Buffer.get(), Size);
+
+ T DSValue;
+ EXPECT_TRUE(BST::deserialize(IB, DSValue));
+
+ EXPECT_EQ(Value, DSValue)
+ << "Incorrect value after serialization/deserialization round-trip";
+}
+
+#endif // ORC_RT_TEST_SIMPLE_PACKED_SERIALIZATION_UTILS_H \ No newline at end of file
diff --git a/contrib/llvm-project/compiler-rt/lib/orc/tests/unit/string_pool_test.cpp b/contrib/llvm-project/compiler-rt/lib/orc/tests/unit/string_pool_test.cpp
new file mode 100644
index 000000000000..15ee2ce7d24d
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/orc/tests/unit/string_pool_test.cpp
@@ -0,0 +1,66 @@
+//===---------- string_pool_test.cpp - Unit tests for StringPool ----------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "string_pool.h"
+#include "gtest/gtest.h"
+
+using namespace __orc_rt;
+
+namespace {
+
+TEST(StringPool, UniquingAndComparisons) {
+ StringPool SP;
+ auto P1 = SP.intern("hello");
+
+ std::string S("hel");
+ S += "lo";
+ auto P2 = SP.intern(S);
+
+ auto P3 = SP.intern("goodbye");
+
+ EXPECT_EQ(P1, P2) << "Failed to unique entries";
+ EXPECT_NE(P1, P3) << "Unequal pooled strings comparing equal";
+
+ // We want to test that less-than comparison of PooledStringPtrs compiles,
+ // however we can't test the actual result as this is a pointer comparison and
+ // PooledStringPtr doesn't expose the underlying address of the string.
+ (void)(P1 < P3);
+}
+
+TEST(StringPool, Dereference) {
+ StringPool SP;
+ auto Foo = SP.intern("foo");
+ EXPECT_EQ(*Foo, "foo") << "Equality on dereferenced string failed";
+}
+
+TEST(StringPool, ClearDeadEntries) {
+ StringPool SP;
+ {
+ auto P1 = SP.intern("s1");
+ SP.clearDeadEntries();
+ EXPECT_FALSE(SP.empty()) << "\"s1\" entry in pool should still be retained";
+ }
+ SP.clearDeadEntries();
+ EXPECT_TRUE(SP.empty()) << "pool should be empty";
+}
+
+TEST(StringPool, NullPtr) {
+ // Make sure that we can default construct and then destroy a null
+ // PooledStringPtr.
+ PooledStringPtr Null;
+}
+
+TEST(StringPool, Hashable) {
+ StringPool SP;
+ PooledStringPtr P1 = SP.intern("s1");
+ PooledStringPtr Null;
+ EXPECT_NE(std::hash<PooledStringPtr>()(P1),
+ std::hash<PooledStringPtr>()(Null));
+}
+
+} // end anonymous namespace
diff --git a/contrib/llvm-project/compiler-rt/lib/orc/tests/unit/wrapper_function_utils_test.cpp b/contrib/llvm-project/compiler-rt/lib/orc/tests/unit/wrapper_function_utils_test.cpp
new file mode 100644
index 000000000000..f10c5093046d
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/orc/tests/unit/wrapper_function_utils_test.cpp
@@ -0,0 +1,184 @@
+//===-- wrapper_function_utils_test.cpp -----------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of the ORC runtime.
+//
+//===----------------------------------------------------------------------===//
+
+#include "wrapper_function_utils.h"
+#include "gtest/gtest.h"
+
+using namespace __orc_rt;
+
+namespace {
+constexpr const char *TestString = "test string";
+} // end anonymous namespace
+
+TEST(WrapperFunctionUtilsTest, DefaultWrapperFunctionResult) {
+ WrapperFunctionResult R;
+ EXPECT_TRUE(R.empty());
+ EXPECT_EQ(R.size(), 0U);
+ EXPECT_EQ(R.getOutOfBandError(), nullptr);
+}
+
+TEST(WrapperFunctionUtilsTest, WrapperFunctionResultFromCStruct) {
+ orc_rt_CWrapperFunctionResult CR =
+ orc_rt_CreateCWrapperFunctionResultFromString(TestString);
+ WrapperFunctionResult R(CR);
+ EXPECT_EQ(R.size(), strlen(TestString) + 1);
+ EXPECT_TRUE(strcmp(R.data(), TestString) == 0);
+ EXPECT_FALSE(R.empty());
+ EXPECT_EQ(R.getOutOfBandError(), nullptr);
+}
+
+TEST(WrapperFunctionUtilsTest, WrapperFunctionResultFromRange) {
+ auto R = WrapperFunctionResult::copyFrom(TestString, strlen(TestString) + 1);
+ EXPECT_EQ(R.size(), strlen(TestString) + 1);
+ EXPECT_TRUE(strcmp(R.data(), TestString) == 0);
+ EXPECT_FALSE(R.empty());
+ EXPECT_EQ(R.getOutOfBandError(), nullptr);
+}
+
+TEST(WrapperFunctionUtilsTest, WrapperFunctionResultFromCString) {
+ auto R = WrapperFunctionResult::copyFrom(TestString);
+ EXPECT_EQ(R.size(), strlen(TestString) + 1);
+ EXPECT_TRUE(strcmp(R.data(), TestString) == 0);
+ EXPECT_FALSE(R.empty());
+ EXPECT_EQ(R.getOutOfBandError(), nullptr);
+}
+
+TEST(WrapperFunctionUtilsTest, WrapperFunctionResultFromStdString) {
+ auto R = WrapperFunctionResult::copyFrom(std::string(TestString));
+ EXPECT_EQ(R.size(), strlen(TestString) + 1);
+ EXPECT_TRUE(strcmp(R.data(), TestString) == 0);
+ EXPECT_FALSE(R.empty());
+ EXPECT_EQ(R.getOutOfBandError(), nullptr);
+}
+
+TEST(WrapperFunctionUtilsTest, WrapperFunctionResultFromOutOfBandError) {
+ auto R = WrapperFunctionResult::createOutOfBandError(TestString);
+ EXPECT_FALSE(R.empty());
+ EXPECT_TRUE(strcmp(R.getOutOfBandError(), TestString) == 0);
+}
+
+TEST(WrapperFunctionUtilsTest, WrapperFunctionCCallCreateEmpty) {
+ EXPECT_TRUE(!!WrapperFunctionCall::Create<SPSArgList<>>(ExecutorAddr()));
+}
+
+static void voidNoop() {}
+
+static orc_rt_CWrapperFunctionResult voidNoopWrapper(const char *ArgData,
+ size_t ArgSize) {
+ return WrapperFunction<void()>::handle(ArgData, ArgSize, voidNoop).release();
+}
+
+static orc_rt_CWrapperFunctionResult addWrapper(const char *ArgData,
+ size_t ArgSize) {
+ return WrapperFunction<int32_t(int32_t, int32_t)>::handle(
+ ArgData, ArgSize,
+ [](int32_t X, int32_t Y) -> int32_t { return X + Y; })
+ .release();
+}
+
+extern "C" __orc_rt_Opaque __orc_rt_jit_dispatch_ctx{};
+
+extern "C" orc_rt_CWrapperFunctionResult
+__orc_rt_jit_dispatch(__orc_rt_Opaque *Ctx, const void *FnTag,
+ const char *ArgData, size_t ArgSize) {
+ using WrapperFunctionType =
+ orc_rt_CWrapperFunctionResult (*)(const char *, size_t);
+
+ return reinterpret_cast<WrapperFunctionType>(const_cast<void *>(FnTag))(
+ ArgData, ArgSize);
+}
+
+TEST(WrapperFunctionUtilsTest, WrapperFunctionCallVoidNoopAndHandle) {
+ EXPECT_FALSE(!!WrapperFunction<void()>::call((void *)&voidNoopWrapper));
+}
+
+TEST(WrapperFunctionUtilsTest, WrapperFunctionCallAddWrapperAndHandle) {
+ int32_t Result;
+ EXPECT_FALSE(!!WrapperFunction<int32_t(int32_t, int32_t)>::call(
+ (void *)&addWrapper, Result, 1, 2));
+ EXPECT_EQ(Result, (int32_t)3);
+}
+
+class AddClass {
+public:
+ AddClass(int32_t X) : X(X) {}
+ int32_t addMethod(int32_t Y) { return X + Y; }
+
+private:
+ int32_t X;
+};
+
+static orc_rt_CWrapperFunctionResult addMethodWrapper(const char *ArgData,
+ size_t ArgSize) {
+ return WrapperFunction<int32_t(SPSExecutorAddr, int32_t)>::handle(
+ ArgData, ArgSize, makeMethodWrapperHandler(&AddClass::addMethod))
+ .release();
+}
+
+TEST(WrapperFunctionUtilsTest, WrapperFunctionMethodCallAndHandleRet) {
+ int32_t Result;
+ AddClass AddObj(1);
+ EXPECT_FALSE(!!WrapperFunction<int32_t(SPSExecutorAddr, int32_t)>::call(
+ (void *)&addMethodWrapper, Result, ExecutorAddr::fromPtr(&AddObj), 2));
+ EXPECT_EQ(Result, (int32_t)3);
+}
+
+static orc_rt_CWrapperFunctionResult sumArrayWrapper(const char *ArgData,
+ size_t ArgSize) {
+ return WrapperFunction<int8_t(SPSExecutorAddrRange)>::handle(
+ ArgData, ArgSize,
+ [](ExecutorAddrRange R) {
+ int8_t Sum = 0;
+ for (char C : R.toSpan<char>())
+ Sum += C;
+ return Sum;
+ })
+ .release();
+}
+
+TEST(WrapperFunctionUtilsTest, SerializedWrapperFunctionCallTest) {
+ {
+ // Check wrapper function calls.
+ char A[] = {1, 2, 3, 4};
+
+ auto WFC =
+ cantFail(WrapperFunctionCall::Create<SPSArgList<SPSExecutorAddrRange>>(
+ ExecutorAddr::fromPtr(sumArrayWrapper),
+ ExecutorAddrRange(ExecutorAddr::fromPtr(A),
+ ExecutorAddrDiff(sizeof(A)))));
+
+ WrapperFunctionResult WFR(WFC.run());
+ EXPECT_EQ(WFR.size(), 1U);
+ EXPECT_EQ(WFR.data()[0], 10);
+ }
+
+ {
+ // Check calls to void functions.
+ auto WFC =
+ cantFail(WrapperFunctionCall::Create<SPSArgList<SPSExecutorAddrRange>>(
+ ExecutorAddr::fromPtr(voidNoopWrapper), ExecutorAddrRange()));
+ auto Err = WFC.runWithSPSRet<void>();
+ EXPECT_FALSE(!!Err);
+ }
+
+ {
+ // Check calls with arguments and return values.
+ auto WFC =
+ cantFail(WrapperFunctionCall::Create<SPSArgList<int32_t, int32_t>>(
+ ExecutorAddr::fromPtr(addWrapper), 2, 4));
+
+ int32_t Result = 0;
+ auto Err = WFC.runWithSPSRet<int32_t>(Result);
+ EXPECT_FALSE(!!Err);
+ EXPECT_EQ(Result, 6);
+ }
+}
diff --git a/contrib/llvm-project/compiler-rt/lib/orc/wrapper_function_utils.h b/contrib/llvm-project/compiler-rt/lib/orc/wrapper_function_utils.h
index 49faa03e5eb8..8009438547a3 100644
--- a/contrib/llvm-project/compiler-rt/lib/orc/wrapper_function_utils.h
+++ b/contrib/llvm-project/compiler-rt/lib/orc/wrapper_function_utils.h
@@ -13,9 +13,10 @@
#ifndef ORC_RT_WRAPPER_FUNCTION_UTILS_H
#define ORC_RT_WRAPPER_FUNCTION_UTILS_H
-#include "c_api.h"
+#include "orc_rt/c_api.h"
#include "common.h"
#include "error.h"
+#include "executor_address.h"
#include "simple_packed_serialization.h"
#include <type_traits>
@@ -26,66 +27,66 @@ namespace __orc_rt {
class WrapperFunctionResult {
public:
/// Create a default WrapperFunctionResult.
- WrapperFunctionResult() { __orc_rt_CWrapperFunctionResultInit(&R); }
+ WrapperFunctionResult() { orc_rt_CWrapperFunctionResultInit(&R); }
/// Create a WrapperFunctionResult from a CWrapperFunctionResult. This
/// instance takes ownership of the result object and will automatically
/// call dispose on the result upon destruction.
- WrapperFunctionResult(__orc_rt_CWrapperFunctionResult R) : R(R) {}
+ WrapperFunctionResult(orc_rt_CWrapperFunctionResult R) : R(R) {}
WrapperFunctionResult(const WrapperFunctionResult &) = delete;
WrapperFunctionResult &operator=(const WrapperFunctionResult &) = delete;
WrapperFunctionResult(WrapperFunctionResult &&Other) {
- __orc_rt_CWrapperFunctionResultInit(&R);
+ orc_rt_CWrapperFunctionResultInit(&R);
std::swap(R, Other.R);
}
WrapperFunctionResult &operator=(WrapperFunctionResult &&Other) {
- __orc_rt_CWrapperFunctionResult Tmp;
- __orc_rt_CWrapperFunctionResultInit(&Tmp);
+ orc_rt_CWrapperFunctionResult Tmp;
+ orc_rt_CWrapperFunctionResultInit(&Tmp);
std::swap(Tmp, Other.R);
std::swap(R, Tmp);
return *this;
}
- ~WrapperFunctionResult() { __orc_rt_DisposeCWrapperFunctionResult(&R); }
+ ~WrapperFunctionResult() { orc_rt_DisposeCWrapperFunctionResult(&R); }
/// Relinquish ownership of and return the
- /// __orc_rt_CWrapperFunctionResult.
- __orc_rt_CWrapperFunctionResult release() {
- __orc_rt_CWrapperFunctionResult Tmp;
- __orc_rt_CWrapperFunctionResultInit(&Tmp);
+ /// orc_rt_CWrapperFunctionResult.
+ orc_rt_CWrapperFunctionResult release() {
+ orc_rt_CWrapperFunctionResult Tmp;
+ orc_rt_CWrapperFunctionResultInit(&Tmp);
std::swap(R, Tmp);
return Tmp;
}
/// Get a pointer to the data contained in this instance.
- const char *data() const { return __orc_rt_CWrapperFunctionResultData(&R); }
+ char *data() { return orc_rt_CWrapperFunctionResultData(&R); }
/// Returns the size of the data contained in this instance.
- size_t size() const { return __orc_rt_CWrapperFunctionResultSize(&R); }
+ size_t size() const { return orc_rt_CWrapperFunctionResultSize(&R); }
/// Returns true if this value is equivalent to a default-constructed
/// WrapperFunctionResult.
- bool empty() const { return __orc_rt_CWrapperFunctionResultEmpty(&R); }
+ bool empty() const { return orc_rt_CWrapperFunctionResultEmpty(&R); }
/// Create a WrapperFunctionResult with the given size and return a pointer
/// to the underlying memory.
- static char *allocate(WrapperFunctionResult &R, size_t Size) {
- __orc_rt_DisposeCWrapperFunctionResult(&R.R);
- __orc_rt_CWrapperFunctionResultInit(&R.R);
- return __orc_rt_CWrapperFunctionResultAllocate(&R.R, Size);
+ static WrapperFunctionResult allocate(size_t Size) {
+ WrapperFunctionResult R;
+ R.R = orc_rt_CWrapperFunctionResultAllocate(Size);
+ return R;
}
/// Copy from the given char range.
static WrapperFunctionResult copyFrom(const char *Source, size_t Size) {
- return __orc_rt_CreateCWrapperFunctionResultFromRange(Source, Size);
+ return orc_rt_CreateCWrapperFunctionResultFromRange(Source, Size);
}
/// Copy from the given null-terminated string (includes the null-terminator).
static WrapperFunctionResult copyFrom(const char *Source) {
- return __orc_rt_CreateCWrapperFunctionResultFromString(Source);
+ return orc_rt_CreateCWrapperFunctionResultFromString(Source);
}
/// Copy from the given std::string (includes the null terminator).
@@ -95,7 +96,7 @@ public:
/// Create an out-of-band error by copying the given string.
static WrapperFunctionResult createOutOfBandError(const char *Msg) {
- return __orc_rt_CreateCWrapperFunctionResultFromOutOfBandError(Msg);
+ return orc_rt_CreateCWrapperFunctionResultFromOutOfBandError(Msg);
}
/// Create an out-of-band error by copying the given string.
@@ -103,31 +104,28 @@ public:
return createOutOfBandError(Msg.c_str());
}
+ template <typename SPSArgListT, typename... ArgTs>
+ static WrapperFunctionResult fromSPSArgs(const ArgTs &...Args) {
+ auto Result = allocate(SPSArgListT::size(Args...));
+ SPSOutputBuffer OB(Result.data(), Result.size());
+ if (!SPSArgListT::serialize(OB, Args...))
+ return createOutOfBandError(
+ "Error serializing arguments to blob in call");
+ return Result;
+ }
+
/// If this value is an out-of-band error then this returns the error message,
/// otherwise returns nullptr.
const char *getOutOfBandError() const {
- return __orc_rt_CWrapperFunctionResultGetOutOfBandError(&R);
+ return orc_rt_CWrapperFunctionResultGetOutOfBandError(&R);
}
private:
- __orc_rt_CWrapperFunctionResult R;
+ orc_rt_CWrapperFunctionResult R;
};
namespace detail {
-template <typename SPSArgListT, typename... ArgTs>
-Expected<WrapperFunctionResult>
-serializeViaSPSToWrapperFunctionResult(const ArgTs &...Args) {
- WrapperFunctionResult Result;
- char *DataPtr =
- WrapperFunctionResult::allocate(Result, SPSArgListT::size(Args...));
- SPSOutputBuffer OB(DataPtr, Result.size());
- if (!SPSArgListT::serialize(OB, Args...))
- return make_error<StringError>(
- "Error serializing arguments to blob in call");
- return std::move(Result);
-}
-
template <typename RetT> class WrapperFunctionHandlerCaller {
public:
template <typename HandlerT, typename ArgTupleT, std::size_t... I>
@@ -173,12 +171,8 @@ public:
auto HandlerResult = WrapperFunctionHandlerCaller<RetT>::call(
std::forward<HandlerT>(H), Args, ArgIndices{});
- if (auto Result = ResultSerializer<decltype(HandlerResult)>::serialize(
- std::move(HandlerResult)))
- return std::move(*Result);
- else
- return WrapperFunctionResult::createOutOfBandError(
- toString(Result.takeError()));
+ return ResultSerializer<decltype(HandlerResult)>::serialize(
+ std::move(HandlerResult));
}
private:
@@ -188,13 +182,12 @@ private:
SPSInputBuffer IB(ArgData, ArgSize);
return SPSArgList<SPSTagTs...>::deserialize(IB, std::get<I>(Args)...);
}
-
};
-// Map function references to function types.
+// Map function pointers to function types.
template <typename RetT, typename... ArgTs,
template <typename> class ResultSerializer, typename... SPSTagTs>
-class WrapperFunctionHandlerHelper<RetT (&)(ArgTs...), ResultSerializer,
+class WrapperFunctionHandlerHelper<RetT (*)(ArgTs...), ResultSerializer,
SPSTagTs...>
: public WrapperFunctionHandlerHelper<RetT(ArgTs...), ResultSerializer,
SPSTagTs...> {};
@@ -217,16 +210,15 @@ class WrapperFunctionHandlerHelper<RetT (ClassT::*)(ArgTs...) const,
template <typename SPSRetTagT, typename RetT> class ResultSerializer {
public:
- static Expected<WrapperFunctionResult> serialize(RetT Result) {
- return serializeViaSPSToWrapperFunctionResult<SPSArgList<SPSRetTagT>>(
- Result);
+ static WrapperFunctionResult serialize(RetT Result) {
+ return WrapperFunctionResult::fromSPSArgs<SPSArgList<SPSRetTagT>>(Result);
}
};
template <typename SPSRetTagT> class ResultSerializer<SPSRetTagT, Error> {
public:
- static Expected<WrapperFunctionResult> serialize(Error Err) {
- return serializeViaSPSToWrapperFunctionResult<SPSArgList<SPSRetTagT>>(
+ static WrapperFunctionResult serialize(Error Err) {
+ return WrapperFunctionResult::fromSPSArgs<SPSArgList<SPSRetTagT>>(
toSPSSerializable(std::move(Err)));
}
};
@@ -234,8 +226,8 @@ public:
template <typename SPSRetTagT, typename T>
class ResultSerializer<SPSRetTagT, Expected<T>> {
public:
- static Expected<WrapperFunctionResult> serialize(Expected<T> E) {
- return serializeViaSPSToWrapperFunctionResult<SPSArgList<SPSRetTagT>>(
+ static WrapperFunctionResult serialize(Expected<T> E) {
+ return WrapperFunctionResult::fromSPSArgs<SPSArgList<SPSRetTagT>>(
toSPSSerializable(std::move(E)));
}
};
@@ -304,20 +296,22 @@ public:
// operation fails.
detail::ResultDeserializer<SPSRetTagT, RetT>::makeSafe(Result);
+ // Since the functions cannot be zero/unresolved on Windows, the following
+ // reference taking would always be non-zero, thus generating a compiler
+ // warning otherwise.
+#if !defined(_WIN32)
if (ORC_RT_UNLIKELY(!&__orc_rt_jit_dispatch_ctx))
return make_error<StringError>("__orc_rt_jit_dispatch_ctx not set");
if (ORC_RT_UNLIKELY(!&__orc_rt_jit_dispatch))
return make_error<StringError>("__orc_rt_jit_dispatch not set");
-
+#endif
auto ArgBuffer =
- detail::serializeViaSPSToWrapperFunctionResult<SPSArgList<SPSTagTs...>>(
- Args...);
- if (!ArgBuffer)
- return ArgBuffer.takeError();
-
- WrapperFunctionResult ResultBuffer =
- __orc_rt_jit_dispatch(&__orc_rt_jit_dispatch_ctx, FnTag,
- ArgBuffer->data(), ArgBuffer->size());
+ WrapperFunctionResult::fromSPSArgs<SPSArgList<SPSTagTs...>>(Args...);
+ if (const char *ErrMsg = ArgBuffer.getOutOfBandError())
+ return make_error<StringError>(ErrMsg);
+
+ WrapperFunctionResult ResultBuffer = __orc_rt_jit_dispatch(
+ &__orc_rt_jit_dispatch_ctx, FnTag, ArgBuffer.data(), ArgBuffer.size());
if (auto ErrMsg = ResultBuffer.getOutOfBandError())
return make_error<StringError>(ErrMsg);
@@ -329,8 +323,8 @@ public:
static WrapperFunctionResult handle(const char *ArgData, size_t ArgSize,
HandlerT &&Handler) {
using WFHH =
- detail::WrapperFunctionHandlerHelper<HandlerT, ResultSerializer,
- SPSTagTs...>;
+ detail::WrapperFunctionHandlerHelper<std::remove_reference_t<HandlerT>,
+ ResultSerializer, SPSTagTs...>;
return WFHH::apply(std::forward<HandlerT>(Handler), ArgData, ArgSize);
}
@@ -362,6 +356,154 @@ public:
using WrapperFunction<SPSEmpty(SPSTagTs...)>::handle;
};
+/// A function object that takes an ExecutorAddr as its first argument,
+/// casts that address to a ClassT*, then calls the given method on that
+/// pointer passing in the remaining function arguments. This utility
+/// removes some of the boilerplate from writing wrappers for method calls.
+///
+/// @code{.cpp}
+/// class MyClass {
+/// public:
+/// void myMethod(uint32_t, bool) { ... }
+/// };
+///
+/// // SPS Method signature -- note MyClass object address as first argument.
+/// using SPSMyMethodWrapperSignature =
+/// SPSTuple<SPSExecutorAddr, uint32_t, bool>;
+///
+/// WrapperFunctionResult
+/// myMethodCallWrapper(const char *ArgData, size_t ArgSize) {
+/// return WrapperFunction<SPSMyMethodWrapperSignature>::handle(
+/// ArgData, ArgSize, makeMethodWrapperHandler(&MyClass::myMethod));
+/// }
+/// @endcode
+///
+template <typename RetT, typename ClassT, typename... ArgTs>
+class MethodWrapperHandler {
+public:
+ using MethodT = RetT (ClassT::*)(ArgTs...);
+ MethodWrapperHandler(MethodT M) : M(M) {}
+ RetT operator()(ExecutorAddr ObjAddr, ArgTs &...Args) {
+ return (ObjAddr.toPtr<ClassT *>()->*M)(std::forward<ArgTs>(Args)...);
+ }
+
+private:
+ MethodT M;
+};
+
+/// Create a MethodWrapperHandler object from the given method pointer.
+template <typename RetT, typename ClassT, typename... ArgTs>
+MethodWrapperHandler<RetT, ClassT, ArgTs...>
+makeMethodWrapperHandler(RetT (ClassT::*Method)(ArgTs...)) {
+ return MethodWrapperHandler<RetT, ClassT, ArgTs...>(Method);
+}
+
+/// Represents a call to a wrapper function.
+class WrapperFunctionCall {
+public:
+ // FIXME: Switch to a SmallVector<char, 24> once ORC runtime has a
+ // smallvector.
+ using ArgDataBufferType = std::vector<char>;
+
+ /// Create a WrapperFunctionCall using the given SPS serializer to serialize
+ /// the arguments.
+ template <typename SPSSerializer, typename... ArgTs>
+ static Expected<WrapperFunctionCall> Create(ExecutorAddr FnAddr,
+ const ArgTs &...Args) {
+ ArgDataBufferType ArgData;
+ ArgData.resize(SPSSerializer::size(Args...));
+ SPSOutputBuffer OB(ArgData.empty() ? nullptr : ArgData.data(),
+ ArgData.size());
+ if (SPSSerializer::serialize(OB, Args...))
+ return WrapperFunctionCall(FnAddr, std::move(ArgData));
+ return make_error<StringError>("Cannot serialize arguments for "
+ "AllocActionCall");
+ }
+
+ WrapperFunctionCall() = default;
+
+ /// Create a WrapperFunctionCall from a target function and arg buffer.
+ WrapperFunctionCall(ExecutorAddr FnAddr, ArgDataBufferType ArgData)
+ : FnAddr(FnAddr), ArgData(std::move(ArgData)) {}
+
+ /// Returns the address to be called.
+ const ExecutorAddr &getCallee() const { return FnAddr; }
+
+ /// Returns the argument data.
+ const ArgDataBufferType &getArgData() const { return ArgData; }
+
+ /// WrapperFunctionCalls convert to true if the callee is non-null.
+ explicit operator bool() const { return !!FnAddr; }
+
+ /// Run call returning raw WrapperFunctionResult.
+ WrapperFunctionResult run() const {
+ using FnTy =
+ orc_rt_CWrapperFunctionResult(const char *ArgData, size_t ArgSize);
+ return WrapperFunctionResult(
+ FnAddr.toPtr<FnTy *>()(ArgData.data(), ArgData.size()));
+ }
+
+ /// Run call and deserialize result using SPS.
+ template <typename SPSRetT, typename RetT>
+ std::enable_if_t<!std::is_same<SPSRetT, void>::value, Error>
+ runWithSPSRet(RetT &RetVal) const {
+ auto WFR = run();
+ if (const char *ErrMsg = WFR.getOutOfBandError())
+ return make_error<StringError>(ErrMsg);
+ SPSInputBuffer IB(WFR.data(), WFR.size());
+ if (!SPSSerializationTraits<SPSRetT, RetT>::deserialize(IB, RetVal))
+ return make_error<StringError>("Could not deserialize result from "
+ "serialized wrapper function call");
+ return Error::success();
+ }
+
+ /// Overload for SPS functions returning void.
+ template <typename SPSRetT>
+ std::enable_if_t<std::is_same<SPSRetT, void>::value, Error>
+ runWithSPSRet() const {
+ SPSEmpty E;
+ return runWithSPSRet<SPSEmpty>(E);
+ }
+
+ /// Run call and deserialize an SPSError result. SPSError returns and
+ /// deserialization failures are merged into the returned error.
+ Error runWithSPSRetErrorMerged() const {
+ detail::SPSSerializableError RetErr;
+ if (auto Err = runWithSPSRet<SPSError>(RetErr))
+ return Err;
+ return detail::fromSPSSerializable(std::move(RetErr));
+ }
+
+private:
+ ExecutorAddr FnAddr;
+ std::vector<char> ArgData;
+};
+
+using SPSWrapperFunctionCall = SPSTuple<SPSExecutorAddr, SPSSequence<char>>;
+
+template <>
+class SPSSerializationTraits<SPSWrapperFunctionCall, WrapperFunctionCall> {
+public:
+ static size_t size(const WrapperFunctionCall &WFC) {
+ return SPSArgList<SPSExecutorAddr, SPSSequence<char>>::size(
+ WFC.getCallee(), WFC.getArgData());
+ }
+
+ static bool serialize(SPSOutputBuffer &OB, const WrapperFunctionCall &WFC) {
+ return SPSArgList<SPSExecutorAddr, SPSSequence<char>>::serialize(
+ OB, WFC.getCallee(), WFC.getArgData());
+ }
+
+ static bool deserialize(SPSInputBuffer &IB, WrapperFunctionCall &WFC) {
+ ExecutorAddr FnAddr;
+ WrapperFunctionCall::ArgDataBufferType ArgData;
+ if (!SPSWrapperFunctionCall::AsArgList::deserialize(IB, FnAddr, ArgData))
+ return false;
+ WFC = WrapperFunctionCall(FnAddr, std::move(ArgData));
+ return true;
+ }
+};
+
} // end namespace __orc_rt
#endif // ORC_RT_WRAPPER_FUNCTION_UTILS_H
diff --git a/contrib/llvm-project/compiler-rt/lib/profile/GCDAProfiling.c b/contrib/llvm-project/compiler-rt/lib/profile/GCDAProfiling.c
index 8e51f57b09ff..4f46fd2839b9 100644
--- a/contrib/llvm-project/compiler-rt/lib/profile/GCDAProfiling.c
+++ b/contrib/llvm-project/compiler-rt/lib/profile/GCDAProfiling.c
@@ -3,9 +3,9 @@
|* Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|* See https://llvm.org/LICENSE.txt for license information.
|* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-|*
+|*
|*===----------------------------------------------------------------------===*|
-|*
+|*
|* This file implements the call back routines for the gcov profiling
|* instrumentation pass. Link against this library when running code through
|* the -insert-gcov-profiling LLVM pass.
@@ -65,7 +65,7 @@ static char *filename = NULL;
/*
* The current file we're outputting.
- */
+ */
static FILE *output_file = NULL;
/*
@@ -83,7 +83,7 @@ static HANDLE mmap_handle = NULL;
#endif
static int fd = -1;
-typedef void (*fn_ptr)();
+typedef void (*fn_ptr)(void);
typedef void* dynamic_object_id;
// The address of this variable identifies a given dynamic object.
@@ -183,7 +183,7 @@ static void write_64bit_value(uint64_t i) {
write_32bit_value(hi);
}
-static uint32_t read_32bit_value() {
+static uint32_t read_32bit_value(void) {
uint32_t val;
if (new_file)
@@ -194,7 +194,7 @@ static uint32_t read_32bit_value() {
return val;
}
-static uint64_t read_64bit_value() {
+static uint64_t read_64bit_value(void) {
// GCOV uses a lo-/hi-word format even on big-endian systems.
// See also GCOVBuffer::readInt64 in LLVM.
uint32_t lo = read_32bit_value();
@@ -218,7 +218,7 @@ static char *mangle_filename(const char *orig_filename) {
return new_filename;
}
-static int map_file() {
+static int map_file(void) {
fseek(output_file, 0L, SEEK_END);
file_size = ftell(output_file);
@@ -262,13 +262,8 @@ static int map_file() {
return 0;
}
-static void unmap_file() {
+static void unmap_file(void) {
#if defined(_WIN32)
- if (!FlushViewOfFile(write_buffer, file_size)) {
- fprintf(stderr, "profiling: %s: cannot flush mapped view: %lu\n", filename,
- GetLastError());
- }
-
if (!UnmapViewOfFile(write_buffer)) {
fprintf(stderr, "profiling: %s: cannot unmap mapped view: %lu\n", filename,
GetLastError());
@@ -449,7 +444,7 @@ void llvm_gcda_emit_arcs(uint32_t num_counters, uint64_t *counters) {
}
COMPILER_RT_VISIBILITY
-void llvm_gcda_summary_info() {
+void llvm_gcda_summary_info(void) {
uint32_t runs = 1;
static uint32_t run_counted = 0; // We only want to increase the run count once.
uint32_t val = 0;
@@ -513,7 +508,7 @@ void llvm_gcda_summary_info() {
}
COMPILER_RT_VISIBILITY
-void llvm_gcda_end_file() {
+void llvm_gcda_end_file(void) {
/* Write out EOF record. */
if (output_file) {
write_bytes("\0\0\0\0\0\0\0\0", 8);
diff --git a/contrib/llvm-project/compiler-rt/lib/profile/InstrProfiling.c b/contrib/llvm-project/compiler-rt/lib/profile/InstrProfiling.c
index 6df65f66df73..da04d8ebdec9 100644
--- a/contrib/llvm-project/compiler-rt/lib/profile/InstrProfiling.c
+++ b/contrib/llvm-project/compiler-rt/lib/profile/InstrProfiling.c
@@ -20,12 +20,20 @@
#define INSTR_PROF_VALUE_PROF_DATA
#include "profile/InstrProfData.inc"
+static uint32_t __llvm_profile_global_timestamp = 1;
+
+COMPILER_RT_VISIBILITY
+void INSTR_PROF_PROFILE_SET_TIMESTAMP(uint64_t *Probe) {
+ if (*Probe == 0 || *Probe == (uint64_t)-1)
+ *Probe = __llvm_profile_global_timestamp++;
+}
+
COMPILER_RT_VISIBILITY uint64_t __llvm_profile_get_magic(void) {
return sizeof(void *) == sizeof(uint64_t) ? (INSTR_PROF_RAW_MAGIC_64)
: (INSTR_PROF_RAW_MAGIC_32);
}
-COMPILER_RT_VISIBILITY void __llvm_profile_set_dumped() {
+COMPILER_RT_VISIBILITY void __llvm_profile_set_dumped(void) {
lprofSetProfileDumped(1);
}
@@ -38,14 +46,23 @@ __llvm_profile_get_num_padding_bytes(uint64_t SizeInBytes) {
}
COMPILER_RT_VISIBILITY uint64_t __llvm_profile_get_version(void) {
- return __llvm_profile_raw_version;
+ return INSTR_PROF_RAW_VERSION_VAR;
}
COMPILER_RT_VISIBILITY void __llvm_profile_reset_counters(void) {
- uint64_t *I = __llvm_profile_begin_counters();
- uint64_t *E = __llvm_profile_end_counters();
+ if (__llvm_profile_get_version() & VARIANT_MASK_TEMPORAL_PROF)
+ __llvm_profile_global_timestamp = 1;
+
+ char *I = __llvm_profile_begin_counters();
+ char *E = __llvm_profile_end_counters();
+
+ char ResetValue =
+ (__llvm_profile_get_version() & VARIANT_MASK_BYTE_COVERAGE) ? 0xFF : 0;
+ memset(I, ResetValue, E - I);
- memset(I, 0, sizeof(uint64_t) * (E - I));
+ I = __llvm_profile_begin_bitmap();
+ E = __llvm_profile_end_bitmap();
+ memset(I, 0x0, E - I);
const __llvm_profile_data *DataBegin = __llvm_profile_begin_data();
const __llvm_profile_data *DataEnd = __llvm_profile_end_data();
@@ -62,11 +79,11 @@ COMPILER_RT_VISIBILITY void __llvm_profile_reset_counters(void) {
CurrentVSiteCount += DI->NumValueSites[VKI];
for (i = 0; i < CurrentVSiteCount; ++i) {
- ValueProfNode *CurrentVNode = ValueCounters[i];
+ ValueProfNode *CurrVNode = ValueCounters[i];
- while (CurrentVNode) {
- CurrentVNode->Count = 0;
- CurrentVNode = CurrentVNode->Next;
+ while (CurrVNode) {
+ CurrVNode->Count = 0;
+ CurrVNode = CurrVNode->Next;
}
}
}
diff --git a/contrib/llvm-project/compiler-rt/lib/profile/InstrProfiling.h b/contrib/llvm-project/compiler-rt/lib/profile/InstrProfiling.h
index 237acb33ffa1..012390833691 100644
--- a/contrib/llvm-project/compiler-rt/lib/profile/InstrProfiling.h
+++ b/contrib/llvm-project/compiler-rt/lib/profile/InstrProfiling.h
@@ -12,6 +12,17 @@
#include "InstrProfilingPort.h"
#include <stdio.h>
+// Make sure __LLVM_INSTR_PROFILE_GENERATE is always defined before
+// including instr_prof_interface.h so the interface functions are
+// declared correctly for the runtime.
+// __LLVM_INSTR_PROFILE_GENERATE is always `#undef`ed after the header,
+// because compiler-rt does not support profiling the profiling runtime itself.
+#ifndef __LLVM_INSTR_PROFILE_GENERATE
+#define __LLVM_INSTR_PROFILE_GENERATE
+#endif
+#include "profile/instr_prof_interface.h"
+#undef __LLVM_INSTR_PROFILE_GENERATE
+
#define INSTR_PROF_VISIBILITY COMPILER_RT_VISIBILITY
#include "profile/InstrProfData.inc"
@@ -55,6 +66,12 @@ int __llvm_profile_is_continuous_mode_enabled(void);
void __llvm_profile_enable_continuous_mode(void);
/*!
+ * \brief Disable continuous mode.
+ *
+ */
+void __llvm_profile_disable_continuous_mode(void);
+
+/*!
* \brief Set the page size.
*
* This is a pre-requisite for enabling continuous mode. The buffer size
@@ -86,26 +103,22 @@ const __llvm_profile_data *__llvm_profile_begin_data(void);
const __llvm_profile_data *__llvm_profile_end_data(void);
const char *__llvm_profile_begin_names(void);
const char *__llvm_profile_end_names(void);
-uint64_t *__llvm_profile_begin_counters(void);
-uint64_t *__llvm_profile_end_counters(void);
+char *__llvm_profile_begin_counters(void);
+char *__llvm_profile_end_counters(void);
+char *__llvm_profile_begin_bitmap(void);
+char *__llvm_profile_end_bitmap(void);
ValueProfNode *__llvm_profile_begin_vnodes();
ValueProfNode *__llvm_profile_end_vnodes();
uint32_t *__llvm_profile_begin_orderfile();
/*!
- * \brief Clear profile counters to zero.
- *
- */
-void __llvm_profile_reset_counters(void);
-
-/*!
* \brief Merge profile data from buffer.
*
- * Read profile data form buffer \p Profile and merge with in-process profile
- * counters. The client is expected to have checked or already knows the profile
- * data in the buffer matches the in-process counter structure before calling
- * it. Returns 0 (success) if the profile data is valid. Upon reading
- * invalid/corrupted profile data, returns 1 (failure).
+ * Read profile data from buffer \p Profile and merge with in-process profile
+ * counters and bitmaps. The client is expected to have checked or already
+ * know the profile data in the buffer matches the in-process counter
+ * structure before calling it. Returns 0 (success) if the profile data is
+ * valid. Upon reading invalid/corrupted profile data, returns 1 (failure).
*/
int __llvm_profile_merge_from_buffer(const char *Profile, uint64_t Size);
@@ -113,8 +126,8 @@ int __llvm_profile_merge_from_buffer(const char *Profile, uint64_t Size);
*
* Returns 0 (success) if the profile data in buffer \p Profile with size
* \p Size was generated by the same binary and therefore matches
- * structurally the in-process counters. If the profile data in buffer is
- * not compatible, the interface returns 1 (failure).
+ * structurally the in-process counters and bitmaps. If the profile data in
+ * buffer is not compatible, the interface returns 1 (failure).
*/
int __llvm_profile_check_compatibility(const char *Profile,
uint64_t Size);
@@ -148,53 +161,10 @@ void __llvm_profile_instrument_target_value(uint64_t TargetValue, void *Data,
int __llvm_profile_write_file(void);
int __llvm_orderfile_write_file(void);
-/*!
- * \brief this is a wrapper interface to \c __llvm_profile_write_file.
- * After this interface is invoked, a arleady dumped flag will be set
- * so that profile won't be dumped again during program exit.
- * Invocation of interface __llvm_profile_reset_counters will clear
- * the flag. This interface is designed to be used to collect profile
- * data from user selected hot regions. The use model is
- * __llvm_profile_reset_counters();
- * ... hot region 1
- * __llvm_profile_dump();
- * .. some other code
- * __llvm_profile_reset_counters();
- * ... hot region 2
- * __llvm_profile_dump();
- *
- * It is expected that on-line profile merging is on with \c %m specifier
- * used in profile filename . If merging is not turned on, user is expected
- * to invoke __llvm_profile_set_filename to specify different profile names
- * for different regions before dumping to avoid profile write clobbering.
- */
-int __llvm_profile_dump(void);
-
-int __llvm_orderfile_dump(void);
/*!
- * \brief Set the filename for writing instrumentation data.
- *
- * Sets the filename to be used for subsequent calls to
- * \a __llvm_profile_write_file().
- *
- * \c Name is not copied, so it must remain valid. Passing NULL resets the
- * filename logic to the default behaviour.
- *
- * Note: There may be multiple copies of the profile runtime (one for each
- * instrumented image/DSO). This API only modifies the filename within the
- * copy of the runtime available to the calling image.
- *
- * Warning: This is a no-op if continuous mode (\ref
- * __llvm_profile_is_continuous_mode_enabled) is on. The reason for this is
- * that in continuous mode, profile counters are mmap()'d to the profile at
- * program initialization time. Support for transferring the mmap'd profile
- * counts to a new file has not been implemented.
- */
-void __llvm_profile_set_filename(const char *Name);
-
-/*!
- * \brief Set the FILE object for writing instrumentation data.
+ * \brief Set the FILE object for writing instrumentation data. Return 0 if set
+ * successfully or return 1 if failed.
*
* Sets the FILE object to be used for subsequent calls to
* \a __llvm_profile_write_file(). The profile file name set by environment
@@ -213,13 +183,12 @@ void __llvm_profile_set_filename(const char *Name);
* instrumented image/DSO). This API only modifies the file object within the
* copy of the runtime available to the calling image.
*
- * Warning: This is a no-op if continuous mode (\ref
- * __llvm_profile_is_continuous_mode_enabled) is on. The reason for this is
- * that in continuous mode, profile counters are mmap()'d to the profile at
- * program initialization time. Support for transferring the mmap'd profile
- * counts to a new file has not been implemented.
+ * Warning: This is a no-op if EnableMerge is 0 in continuous mode (\ref
+ * __llvm_profile_is_continuous_mode_enabled), because disable merging requires
+ * copying the old profile file to new profile file and this function is usually
+ * used when the proess doesn't have permission to open file.
*/
-void __llvm_profile_set_file_object(FILE *File, int EnableMerge);
+int __llvm_profile_set_file_object(FILE *File, int EnableMerge);
/*! \brief Register to write instrumentation data to file at exit. */
int __llvm_profile_register_write_file_atexit(void);
@@ -260,25 +229,42 @@ uint64_t __llvm_profile_get_magic(void);
uint64_t __llvm_profile_get_version(void);
/*! \brief Get the number of entries in the profile data section. */
+uint64_t __llvm_profile_get_num_data(const __llvm_profile_data *Begin,
+ const __llvm_profile_data *End);
+
+/*! \brief Get the size of the profile data section in bytes. */
uint64_t __llvm_profile_get_data_size(const __llvm_profile_data *Begin,
const __llvm_profile_data *End);
+/*! \brief Get the size in bytes of a single counter entry. */
+size_t __llvm_profile_counter_entry_size(void);
+
+/*! \brief Get the number of entries in the profile counters section. */
+uint64_t __llvm_profile_get_num_counters(const char *Begin, const char *End);
+
+/*! \brief Get the size of the profile counters section in bytes. */
+uint64_t __llvm_profile_get_counters_size(const char *Begin, const char *End);
+
+/*! \brief Get the number of bytes in the profile bitmap section. */
+uint64_t __llvm_profile_get_num_bitmap_bytes(const char *Begin,
+ const char *End);
+
+/*! \brief Get the size of the profile name section in bytes. */
+uint64_t __llvm_profile_get_name_size(const char *Begin, const char *End);
+
/* ! \brief Given the sizes of the data and counter information, return the
* number of padding bytes before and after the counters, and after the names,
* in the raw profile.
*
- * Note: In this context, "size" means "number of entries", i.e. the first two
- * arguments must be the result of __llvm_profile_get_data_size() and of
- * (__llvm_profile_end_counters() - __llvm_profile_begin_counters()) resp.
- *
* Note: When mmap() mode is disabled, no padding bytes before/after counters
* are needed. However, in mmap() mode, the counter section in the raw profile
* must be page-aligned: this API computes the number of padding bytes
* needed to achieve that.
*/
void __llvm_profile_get_padding_sizes_for_counters(
- uint64_t DataSize, uint64_t CountersSize, uint64_t NamesSize,
- uint64_t *PaddingBytesBeforeCounters, uint64_t *PaddingBytesAfterCounters,
+ uint64_t DataSize, uint64_t CountersSize, uint64_t NumBitmapBytes,
+ uint64_t NamesSize, uint64_t *PaddingBytesBeforeCounters,
+ uint64_t *PaddingBytesAfterCounters, uint64_t *PaddingBytesAfterBitmap,
uint64_t *PaddingBytesAfterNames);
/*!
@@ -301,14 +287,12 @@ void __llvm_profile_set_dumped();
COMPILER_RT_VISIBILITY extern int INSTR_PROF_PROFILE_RUNTIME_VAR;
/*!
- * This variable is defined in InstrProfiling.c. Its main purpose is to
- * encode the raw profile version value and other format related information
- * such as whether the profile is from IR based instrumentation. The variable
- * is defined as weak so that compiler can emit an overriding definition
- * depending on user option. Since we don't support mixing FE and IR based
- * data in the same raw profile data file (in other words, shared libs and
- * main program are expected to be instrumented in the same way), there is
- * no need for this variable to be hidden.
+ * This variable is defined in InstrProfilingVersionVar.c as a hidden symbol
+ * (except on Apple platforms where this symbol is checked by TAPI). Its main
+ * purpose is to encode the raw profile version value and other format related
+ * information such as whether the profile is from IR based instrumentation. The
+ * variable is defined as weak so that compiler can emit an overriding
+ * definition depending on user option.
*/
extern uint64_t INSTR_PROF_RAW_VERSION_VAR; /* __llvm_profile_raw_version */
diff --git a/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingBuffer.c b/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingBuffer.c
index 68b4f5cd6f52..af52804b2b53 100644
--- a/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingBuffer.c
+++ b/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingBuffer.c
@@ -33,6 +33,10 @@ COMPILER_RT_VISIBILITY void __llvm_profile_enable_continuous_mode(void) {
ContinuouslySyncProfile = 1;
}
+COMPILER_RT_VISIBILITY void __llvm_profile_disable_continuous_mode(void) {
+ ContinuouslySyncProfile = 0;
+}
+
COMPILER_RT_VISIBILITY void __llvm_profile_set_page_size(unsigned PS) {
PageSize = PS;
}
@@ -41,23 +45,62 @@ COMPILER_RT_VISIBILITY
uint64_t __llvm_profile_get_size_for_buffer(void) {
const __llvm_profile_data *DataBegin = __llvm_profile_begin_data();
const __llvm_profile_data *DataEnd = __llvm_profile_end_data();
- const uint64_t *CountersBegin = __llvm_profile_begin_counters();
- const uint64_t *CountersEnd = __llvm_profile_end_counters();
+ const char *CountersBegin = __llvm_profile_begin_counters();
+ const char *CountersEnd = __llvm_profile_end_counters();
+ const char *BitmapBegin = __llvm_profile_begin_bitmap();
+ const char *BitmapEnd = __llvm_profile_end_bitmap();
const char *NamesBegin = __llvm_profile_begin_names();
const char *NamesEnd = __llvm_profile_end_names();
return __llvm_profile_get_size_for_buffer_internal(
- DataBegin, DataEnd, CountersBegin, CountersEnd, NamesBegin, NamesEnd);
+ DataBegin, DataEnd, CountersBegin, CountersEnd, BitmapBegin, BitmapEnd,
+ NamesBegin, NamesEnd);
}
COMPILER_RT_VISIBILITY
-uint64_t __llvm_profile_get_data_size(const __llvm_profile_data *Begin,
- const __llvm_profile_data *End) {
+uint64_t __llvm_profile_get_num_data(const __llvm_profile_data *Begin,
+ const __llvm_profile_data *End) {
intptr_t BeginI = (intptr_t)Begin, EndI = (intptr_t)End;
return ((EndI + sizeof(__llvm_profile_data) - 1) - BeginI) /
sizeof(__llvm_profile_data);
}
+COMPILER_RT_VISIBILITY
+uint64_t __llvm_profile_get_data_size(const __llvm_profile_data *Begin,
+ const __llvm_profile_data *End) {
+ return __llvm_profile_get_num_data(Begin, End) * sizeof(__llvm_profile_data);
+}
+
+COMPILER_RT_VISIBILITY size_t __llvm_profile_counter_entry_size(void) {
+ if (__llvm_profile_get_version() & VARIANT_MASK_BYTE_COVERAGE)
+ return sizeof(uint8_t);
+ return sizeof(uint64_t);
+}
+
+COMPILER_RT_VISIBILITY
+uint64_t __llvm_profile_get_num_counters(const char *Begin, const char *End) {
+ intptr_t BeginI = (intptr_t)Begin, EndI = (intptr_t)End;
+ return ((EndI + __llvm_profile_counter_entry_size() - 1) - BeginI) /
+ __llvm_profile_counter_entry_size();
+}
+
+COMPILER_RT_VISIBILITY
+uint64_t __llvm_profile_get_counters_size(const char *Begin, const char *End) {
+ return __llvm_profile_get_num_counters(Begin, End) *
+ __llvm_profile_counter_entry_size();
+}
+
+COMPILER_RT_VISIBILITY
+uint64_t __llvm_profile_get_num_bitmap_bytes(const char *Begin,
+ const char *End) {
+ return (End - Begin);
+}
+
+COMPILER_RT_VISIBILITY
+uint64_t __llvm_profile_get_name_size(const char *Begin, const char *End) {
+ return End - Begin;
+}
+
/// Calculate the number of padding bytes needed to add to \p Offset in order
/// for (\p Offset + Padding) to be page-aligned.
static uint64_t calculateBytesNeededToPageAlign(uint64_t Offset) {
@@ -77,49 +120,56 @@ static int needsCounterPadding(void) {
COMPILER_RT_VISIBILITY
void __llvm_profile_get_padding_sizes_for_counters(
- uint64_t DataSize, uint64_t CountersSize, uint64_t NamesSize,
- uint64_t *PaddingBytesBeforeCounters, uint64_t *PaddingBytesAfterCounters,
+ uint64_t DataSize, uint64_t CountersSize, uint64_t NumBitmapBytes,
+ uint64_t NamesSize, uint64_t *PaddingBytesBeforeCounters,
+ uint64_t *PaddingBytesAfterCounters, uint64_t *PaddingBytesAfterBitmapBytes,
uint64_t *PaddingBytesAfterNames) {
if (!needsCounterPadding()) {
*PaddingBytesBeforeCounters = 0;
- *PaddingBytesAfterCounters = 0;
+ *PaddingBytesAfterCounters =
+ __llvm_profile_get_num_padding_bytes(CountersSize);
+ *PaddingBytesAfterBitmapBytes =
+ __llvm_profile_get_num_padding_bytes(NumBitmapBytes);
*PaddingBytesAfterNames = __llvm_profile_get_num_padding_bytes(NamesSize);
return;
}
// In continuous mode, the file offsets for headers and for the start of
// counter sections need to be page-aligned.
- uint64_t DataSizeInBytes = DataSize * sizeof(__llvm_profile_data);
- uint64_t CountersSizeInBytes = CountersSize * sizeof(uint64_t);
- *PaddingBytesBeforeCounters = calculateBytesNeededToPageAlign(
- sizeof(__llvm_profile_header) + DataSizeInBytes);
- *PaddingBytesAfterCounters =
- calculateBytesNeededToPageAlign(CountersSizeInBytes);
+ *PaddingBytesBeforeCounters =
+ calculateBytesNeededToPageAlign(sizeof(__llvm_profile_header) + DataSize);
+ *PaddingBytesAfterCounters = calculateBytesNeededToPageAlign(CountersSize);
+ *PaddingBytesAfterBitmapBytes =
+ calculateBytesNeededToPageAlign(NumBitmapBytes);
*PaddingBytesAfterNames = calculateBytesNeededToPageAlign(NamesSize);
}
COMPILER_RT_VISIBILITY
uint64_t __llvm_profile_get_size_for_buffer_internal(
const __llvm_profile_data *DataBegin, const __llvm_profile_data *DataEnd,
- const uint64_t *CountersBegin, const uint64_t *CountersEnd,
- const char *NamesBegin, const char *NamesEnd) {
+ const char *CountersBegin, const char *CountersEnd, const char *BitmapBegin,
+ const char *BitmapEnd, const char *NamesBegin, const char *NamesEnd) {
/* Match logic in __llvm_profile_write_buffer(). */
const uint64_t NamesSize = (NamesEnd - NamesBegin) * sizeof(char);
uint64_t DataSize = __llvm_profile_get_data_size(DataBegin, DataEnd);
- uint64_t CountersSize = CountersEnd - CountersBegin;
+ uint64_t CountersSize =
+ __llvm_profile_get_counters_size(CountersBegin, CountersEnd);
+ const uint64_t NumBitmapBytes =
+ __llvm_profile_get_num_bitmap_bytes(BitmapBegin, BitmapEnd);
/* Determine how much padding is needed before/after the counters and after
* the names. */
uint64_t PaddingBytesBeforeCounters, PaddingBytesAfterCounters,
- PaddingBytesAfterNames;
+ PaddingBytesAfterNames, PaddingBytesAfterBitmapBytes;
__llvm_profile_get_padding_sizes_for_counters(
- DataSize, CountersSize, NamesSize, &PaddingBytesBeforeCounters,
- &PaddingBytesAfterCounters, &PaddingBytesAfterNames);
+ DataSize, CountersSize, NumBitmapBytes, NamesSize,
+ &PaddingBytesBeforeCounters, &PaddingBytesAfterCounters,
+ &PaddingBytesAfterBitmapBytes, &PaddingBytesAfterNames);
return sizeof(__llvm_profile_header) + __llvm_write_binary_ids(NULL) +
- (DataSize * sizeof(__llvm_profile_data)) + PaddingBytesBeforeCounters +
- (CountersSize * sizeof(uint64_t)) + PaddingBytesAfterCounters +
- NamesSize + PaddingBytesAfterNames;
+ DataSize + PaddingBytesBeforeCounters + CountersSize +
+ PaddingBytesAfterCounters + NumBitmapBytes +
+ PaddingBytesAfterBitmapBytes + NamesSize + PaddingBytesAfterNames;
}
COMPILER_RT_VISIBILITY
@@ -136,10 +186,12 @@ COMPILER_RT_VISIBILITY int __llvm_profile_write_buffer(char *Buffer) {
COMPILER_RT_VISIBILITY int __llvm_profile_write_buffer_internal(
char *Buffer, const __llvm_profile_data *DataBegin,
- const __llvm_profile_data *DataEnd, const uint64_t *CountersBegin,
- const uint64_t *CountersEnd, const char *NamesBegin, const char *NamesEnd) {
+ const __llvm_profile_data *DataEnd, const char *CountersBegin,
+ const char *CountersEnd, const char *BitmapBegin, const char *BitmapEnd,
+ const char *NamesBegin, const char *NamesEnd) {
ProfDataWriter BufferWriter;
initBufferWriter(&BufferWriter, Buffer);
return lprofWriteDataImpl(&BufferWriter, DataBegin, DataEnd, CountersBegin,
- CountersEnd, 0, NamesBegin, NamesEnd, 0);
+ CountersEnd, BitmapBegin, BitmapEnd, 0, NamesBegin,
+ NamesEnd, 0);
}
diff --git a/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingFile.c b/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingFile.c
index 2e91f16a2158..f3b457d786e6 100644
--- a/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingFile.c
+++ b/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingFile.c
@@ -92,30 +92,199 @@ static lprofFilename lprofCurFilename = {0, 0, 0, {0}, NULL,
{0}, 0, 0, 0, PNS_unknown};
static int ProfileMergeRequested = 0;
-static int isProfileMergeRequested() { return ProfileMergeRequested; }
+static int getProfileFileSizeForMerging(FILE *ProfileFile,
+ uint64_t *ProfileFileSize);
+
+#if defined(__APPLE__)
+static const int ContinuousModeSupported = 1;
+static const int UseBiasVar = 0;
+static const char *FileOpenMode = "a+b";
+static void *BiasAddr = NULL;
+static void *BiasDefaultAddr = NULL;
+static int mmapForContinuousMode(uint64_t CurrentFileOffset, FILE *File) {
+ /* Get the sizes of various profile data sections. Taken from
+ * __llvm_profile_get_size_for_buffer(). */
+ const __llvm_profile_data *DataBegin = __llvm_profile_begin_data();
+ const __llvm_profile_data *DataEnd = __llvm_profile_end_data();
+ const char *CountersBegin = __llvm_profile_begin_counters();
+ const char *CountersEnd = __llvm_profile_end_counters();
+ const char *BitmapBegin = __llvm_profile_begin_bitmap();
+ const char *BitmapEnd = __llvm_profile_end_bitmap();
+ const char *NamesBegin = __llvm_profile_begin_names();
+ const char *NamesEnd = __llvm_profile_end_names();
+ const uint64_t NamesSize = (NamesEnd - NamesBegin) * sizeof(char);
+ uint64_t DataSize = __llvm_profile_get_data_size(DataBegin, DataEnd);
+ uint64_t CountersSize =
+ __llvm_profile_get_counters_size(CountersBegin, CountersEnd);
+ uint64_t NumBitmapBytes =
+ __llvm_profile_get_num_bitmap_bytes(BitmapBegin, BitmapEnd);
+
+ /* Check that the counter, bitmap, and data sections in this image are
+ * page-aligned. */
+ unsigned PageSize = getpagesize();
+ if ((intptr_t)CountersBegin % PageSize != 0) {
+ PROF_ERR("Counters section not page-aligned (start = %p, pagesz = %u).\n",
+ CountersBegin, PageSize);
+ return 1;
+ }
+ if ((intptr_t)BitmapBegin % PageSize != 0) {
+ PROF_ERR("Bitmap section not page-aligned (start = %p, pagesz = %u).\n",
+ BitmapBegin, PageSize);
+ return 1;
+ }
+ if ((intptr_t)DataBegin % PageSize != 0) {
+ PROF_ERR("Data section not page-aligned (start = %p, pagesz = %u).\n",
+ DataBegin, PageSize);
+ return 1;
+ }
+ int Fileno = fileno(File);
+ /* Determine how much padding is needed before/after the counters and
+ * after the names. */
+ uint64_t PaddingBytesBeforeCounters, PaddingBytesAfterCounters,
+ PaddingBytesAfterNames, PaddingBytesAfterBitmapBytes;
+ __llvm_profile_get_padding_sizes_for_counters(
+ DataSize, CountersSize, NumBitmapBytes, NamesSize,
+ &PaddingBytesBeforeCounters, &PaddingBytesAfterCounters,
+ &PaddingBytesAfterBitmapBytes, &PaddingBytesAfterNames);
+
+ uint64_t PageAlignedCountersLength = CountersSize + PaddingBytesAfterCounters;
+ uint64_t FileOffsetToCounters = CurrentFileOffset +
+ sizeof(__llvm_profile_header) + DataSize +
+ PaddingBytesBeforeCounters;
+ void *CounterMmap = mmap((void *)CountersBegin, PageAlignedCountersLength,
+ PROT_READ | PROT_WRITE, MAP_FIXED | MAP_SHARED,
+ Fileno, FileOffsetToCounters);
+ if (CounterMmap != CountersBegin) {
+ PROF_ERR(
+ "Continuous counter sync mode is enabled, but mmap() failed (%s).\n"
+ " - CountersBegin: %p\n"
+ " - PageAlignedCountersLength: %" PRIu64 "\n"
+ " - Fileno: %d\n"
+ " - FileOffsetToCounters: %" PRIu64 "\n",
+ strerror(errno), CountersBegin, PageAlignedCountersLength, Fileno,
+ FileOffsetToCounters);
+ return 1;
+ }
+
+ /* Also mmap MCDC bitmap bytes. If there aren't any bitmap bytes, mmap()
+ * will fail with EINVAL. */
+ if (NumBitmapBytes == 0)
+ return 0;
+
+ uint64_t PageAlignedBitmapLength =
+ NumBitmapBytes + PaddingBytesAfterBitmapBytes;
+ uint64_t FileOffsetToBitmap =
+ CurrentFileOffset + sizeof(__llvm_profile_header) + DataSize +
+ PaddingBytesBeforeCounters + CountersSize + PaddingBytesAfterCounters;
+ void *BitmapMmap =
+ mmap((void *)BitmapBegin, PageAlignedBitmapLength, PROT_READ | PROT_WRITE,
+ MAP_FIXED | MAP_SHARED, Fileno, FileOffsetToBitmap);
+ if (BitmapMmap != BitmapBegin) {
+ PROF_ERR(
+ "Continuous counter sync mode is enabled, but mmap() failed (%s).\n"
+ " - BitmapBegin: %p\n"
+ " - PageAlignedBitmapLength: %" PRIu64 "\n"
+ " - Fileno: %d\n"
+ " - FileOffsetToBitmap: %" PRIu64 "\n",
+ strerror(errno), BitmapBegin, PageAlignedBitmapLength, Fileno,
+ FileOffsetToBitmap);
+ return 1;
+ }
+ return 0;
+}
+#elif defined(__ELF__) || defined(_WIN32)
+
+#define INSTR_PROF_PROFILE_COUNTER_BIAS_DEFAULT_VAR \
+ INSTR_PROF_CONCAT(INSTR_PROF_PROFILE_COUNTER_BIAS_VAR, _default)
+COMPILER_RT_VISIBILITY intptr_t INSTR_PROF_PROFILE_COUNTER_BIAS_DEFAULT_VAR = 0;
+
+/* This variable is a weak external reference which could be used to detect
+ * whether or not the compiler defined this symbol. */
+#if defined(_MSC_VER)
+COMPILER_RT_VISIBILITY extern intptr_t INSTR_PROF_PROFILE_COUNTER_BIAS_VAR;
+#if defined(_M_IX86) || defined(__i386__)
+#define WIN_SYM_PREFIX "_"
+#else
+#define WIN_SYM_PREFIX
+#endif
+#pragma comment( \
+ linker, "/alternatename:" WIN_SYM_PREFIX INSTR_PROF_QUOTE( \
+ INSTR_PROF_PROFILE_COUNTER_BIAS_VAR) "=" WIN_SYM_PREFIX \
+ INSTR_PROF_QUOTE(INSTR_PROF_PROFILE_COUNTER_BIAS_DEFAULT_VAR))
+#else
+COMPILER_RT_VISIBILITY extern intptr_t INSTR_PROF_PROFILE_COUNTER_BIAS_VAR
+ __attribute__((weak, alias(INSTR_PROF_QUOTE(
+ INSTR_PROF_PROFILE_COUNTER_BIAS_DEFAULT_VAR))));
+#endif
+static const int ContinuousModeSupported = 1;
+static const int UseBiasVar = 1;
+/* TODO: If there are two DSOs, the second DSO initilization will truncate the
+ * first profile file. */
+static const char *FileOpenMode = "w+b";
+/* This symbol is defined by the compiler when runtime counter relocation is
+ * used and runtime provides a weak alias so we can check if it's defined. */
+static void *BiasAddr = &INSTR_PROF_PROFILE_COUNTER_BIAS_VAR;
+static void *BiasDefaultAddr = &INSTR_PROF_PROFILE_COUNTER_BIAS_DEFAULT_VAR;
+static int mmapForContinuousMode(uint64_t CurrentFileOffset, FILE *File) {
+ /* Get the sizes of various profile data sections. Taken from
+ * __llvm_profile_get_size_for_buffer(). */
+ const __llvm_profile_data *DataBegin = __llvm_profile_begin_data();
+ const __llvm_profile_data *DataEnd = __llvm_profile_end_data();
+ const char *CountersBegin = __llvm_profile_begin_counters();
+ const char *CountersEnd = __llvm_profile_end_counters();
+ const char *BitmapBegin = __llvm_profile_begin_bitmap();
+ const char *BitmapEnd = __llvm_profile_end_bitmap();
+ uint64_t DataSize = __llvm_profile_get_data_size(DataBegin, DataEnd);
+ /* Get the file size. */
+ uint64_t FileSize = 0;
+ if (getProfileFileSizeForMerging(File, &FileSize))
+ return 1;
+
+ /* Map the profile. */
+ char *Profile = (char *)mmap(NULL, FileSize, PROT_READ | PROT_WRITE,
+ MAP_SHARED, fileno(File), 0);
+ if (Profile == MAP_FAILED) {
+ PROF_ERR("Unable to mmap profile: %s\n", strerror(errno));
+ return 1;
+ }
+ const uint64_t CountersOffsetInBiasMode =
+ sizeof(__llvm_profile_header) + __llvm_write_binary_ids(NULL) + DataSize;
+ /* Update the profile fields based on the current mapping. */
+ INSTR_PROF_PROFILE_COUNTER_BIAS_VAR =
+ (intptr_t)Profile - (uintptr_t)CountersBegin + CountersOffsetInBiasMode;
+
+ /* Return the memory allocated for counters to OS. */
+ lprofReleaseMemoryPagesToOS((uintptr_t)CountersBegin, (uintptr_t)CountersEnd);
+
+ /* BIAS MODE not supported yet for Bitmap (MCDC). */
+
+ /* Return the memory allocated for counters to OS. */
+ lprofReleaseMemoryPagesToOS((uintptr_t)BitmapBegin, (uintptr_t)BitmapEnd);
+ return 0;
+}
+#else
+static const int ContinuousModeSupported = 0;
+static const int UseBiasVar = 0;
+static const char *FileOpenMode = "a+b";
+static void *BiasAddr = NULL;
+static void *BiasDefaultAddr = NULL;
+static int mmapForContinuousMode(uint64_t CurrentFileOffset, FILE *File) {
+ return 0;
+}
+#endif
+
+static int isProfileMergeRequested(void) { return ProfileMergeRequested; }
static void setProfileMergeRequested(int EnableMerge) {
ProfileMergeRequested = EnableMerge;
}
static FILE *ProfileFile = NULL;
-static FILE *getProfileFile() { return ProfileFile; }
+static FILE *getProfileFile(void) { return ProfileFile; }
static void setProfileFile(FILE *File) { ProfileFile = File; }
-COMPILER_RT_VISIBILITY void __llvm_profile_set_file_object(FILE *File,
- int EnableMerge) {
- if (__llvm_profile_is_continuous_mode_enabled()) {
- PROF_WARN("__llvm_profile_set_file_object(fd=%d) not supported, because "
- "continuous sync mode (%%c) is enabled",
- fileno(File));
- return;
- }
- setProfileFile(File);
- setProfileMergeRequested(EnableMerge);
-}
-
-static int getCurFilenameLength();
+static int getCurFilenameLength(void);
static const char *getCurFilename(char *FilenameBuf, int ForceUseBuf);
-static unsigned doMerging() {
+static unsigned doMerging(void) {
return lprofCurFilename.MergePoolSize || isProfileMergeRequested();
}
@@ -166,17 +335,17 @@ static void initFileWriter(ProfDataWriter *This, FILE *File) {
COMPILER_RT_VISIBILITY ProfBufferIO *
lprofCreateBufferIOInternal(void *File, uint32_t BufferSz) {
FreeHook = &free;
- DynamicBufferIOBuffer = (uint8_t *)calloc(BufferSz, 1);
+ DynamicBufferIOBuffer = (uint8_t *)calloc(1, BufferSz);
VPBufferSize = BufferSz;
ProfDataWriter *fileWriter =
- (ProfDataWriter *)calloc(sizeof(ProfDataWriter), 1);
+ (ProfDataWriter *)calloc(1, sizeof(ProfDataWriter));
initFileWriter(fileWriter, File);
ProfBufferIO *IO = lprofCreateBufferIO(fileWriter);
IO->OwnFileWriter = 1;
return IO;
}
-static void setupIOBuffer() {
+static void setupIOBuffer(void) {
const char *BufferSzStr = 0;
BufferSzStr = getenv("LLVM_VP_BUFFER_SIZE");
if (BufferSzStr && BufferSzStr[0]) {
@@ -297,13 +466,15 @@ static void createProfileDir(const char *Filename) {
* its instrumented shared libraries dump profile data into their own data file.
*/
static FILE *openFileForMerging(const char *ProfileFileName, int *MergeDone) {
- FILE *ProfileFile = NULL;
+ FILE *ProfileFile = getProfileFile();
int rc;
-
- ProfileFile = getProfileFile();
- if (ProfileFile) {
+ // initializeProfileForContinuousMode will lock the profile, but if
+ // ProfileFile is set by user via __llvm_profile_set_file_object, it's assumed
+ // unlocked at this point.
+ if (ProfileFile && !__llvm_profile_is_continuous_mode_enabled()) {
lprofLockFileHandle(ProfileFile);
- } else {
+ }
+ if (!ProfileFile) {
createProfileDir(ProfileFileName);
ProfileFile = lprofOpenFileEx(ProfileFileName);
}
@@ -354,7 +525,7 @@ static int writeFile(const char *OutputName) {
if (OutputFile == getProfileFile()) {
fflush(OutputFile);
- if (doMerging()) {
+ if (doMerging() && !__llvm_profile_is_continuous_mode_enabled()) {
lprofUnlockFileHandle(OutputFile);
}
} else {
@@ -426,13 +597,6 @@ static void truncateCurrentFile(void) {
fclose(File);
}
-// TODO: Move these functions into InstrProfilingPlatform* files.
-#if defined(__APPLE__)
-static void assertIsZero(int *i) {
- if (*i)
- PROF_WARN("Expected flag to be 0, but got: %d\n", *i);
-}
-
/* Write a partial profile to \p Filename, which is required to be backed by
* the open file object \p File. */
static int writeProfileWithFileObject(const char *Filename, FILE *File) {
@@ -444,45 +608,22 @@ static int writeProfileWithFileObject(const char *Filename, FILE *File) {
return rc;
}
-/* Unlock the profile \p File and clear the unlock flag. */
-static void unlockProfile(int *ProfileRequiresUnlock, FILE *File) {
- if (!*ProfileRequiresUnlock) {
- PROF_WARN("%s", "Expected to require profile unlock\n");
- }
-
- lprofUnlockFileHandle(File);
- *ProfileRequiresUnlock = 0;
-}
-
static void initializeProfileForContinuousMode(void) {
if (!__llvm_profile_is_continuous_mode_enabled())
return;
-
- /* Get the sizes of various profile data sections. Taken from
- * __llvm_profile_get_size_for_buffer(). */
- const __llvm_profile_data *DataBegin = __llvm_profile_begin_data();
- const __llvm_profile_data *DataEnd = __llvm_profile_end_data();
- const uint64_t *CountersBegin = __llvm_profile_begin_counters();
- const uint64_t *CountersEnd = __llvm_profile_end_counters();
- const char *NamesBegin = __llvm_profile_begin_names();
- const char *NamesEnd = __llvm_profile_end_names();
- const uint64_t NamesSize = (NamesEnd - NamesBegin) * sizeof(char);
- uint64_t DataSize = __llvm_profile_get_data_size(DataBegin, DataEnd);
- uint64_t CountersSize = CountersEnd - CountersBegin;
-
- /* Check that the counter and data sections in this image are page-aligned. */
- unsigned PageSize = getpagesize();
- if ((intptr_t)CountersBegin % PageSize != 0) {
- PROF_ERR("Counters section not page-aligned (start = %p, pagesz = %u).\n",
- CountersBegin, PageSize);
+ if (!ContinuousModeSupported) {
+ PROF_ERR("%s\n", "continuous mode is unsupported on this platform");
return;
}
- if ((intptr_t)DataBegin % PageSize != 0) {
- PROF_ERR("Data section not page-aligned (start = %p, pagesz = %u).\n",
- DataBegin, PageSize);
+ if (UseBiasVar && BiasAddr == BiasDefaultAddr) {
+ PROF_ERR("%s\n", "__llvm_profile_counter_bias is undefined");
return;
}
+ /* Get the sizes of counter section. */
+ uint64_t CountersSize = __llvm_profile_get_counters_size(
+ __llvm_profile_begin_counters(), __llvm_profile_end_counters());
+
int Length = getCurFilenameLength();
char *FilenameBuf = (char *)COMPILER_RT_ALLOCA(Length + 1);
const char *Filename = getCurFilename(FilenameBuf, 0);
@@ -490,34 +631,8 @@ static void initializeProfileForContinuousMode(void) {
return;
FILE *File = NULL;
- off_t CurrentFileOffset = 0;
- off_t OffsetModPage = 0;
-
- /* Whether an exclusive lock on the profile must be dropped after init.
- * Use a cleanup to warn if the unlock does not occur. */
- COMPILER_RT_CLEANUP(assertIsZero) int ProfileRequiresUnlock = 0;
-
- if (!doMerging()) {
- /* We are not merging profiles, so open the raw profile in append mode. */
- File = fopen(Filename, "a+b");
- if (!File)
- return;
-
- /* Check that the offset within the file is page-aligned. */
- CurrentFileOffset = ftello(File);
- OffsetModPage = CurrentFileOffset % PageSize;
- if (OffsetModPage != 0) {
- PROF_ERR("Continuous counter sync mode is enabled, but raw profile is not"
- "page-aligned. CurrentFileOffset = %" PRIu64 ", pagesz = %u.\n",
- (uint64_t)CurrentFileOffset, PageSize);
- return;
- }
-
- /* Grow the profile so that mmap() can succeed. Leak the file handle, as
- * the file should stay open. */
- if (writeProfileWithFileObject(Filename, File) != 0)
- return;
- } else {
+ uint64_t CurrentFileOffset = 0;
+ if (doMerging()) {
/* We are merging profiles. Map the counter section as shared memory into
* the profile, i.e. into each participating process. An increment in one
* process should be visible to every other process with the same counter
@@ -526,206 +641,80 @@ static void initializeProfileForContinuousMode(void) {
if (!File)
return;
- ProfileRequiresUnlock = 1;
-
- uint64_t ProfileFileSize;
- if (getProfileFileSizeForMerging(File, &ProfileFileSize) == -1)
- return unlockProfile(&ProfileRequiresUnlock, File);
-
+ uint64_t ProfileFileSize = 0;
+ if (getProfileFileSizeForMerging(File, &ProfileFileSize) == -1) {
+ lprofUnlockFileHandle(File);
+ fclose(File);
+ return;
+ }
if (ProfileFileSize == 0) {
/* Grow the profile so that mmap() can succeed. Leak the file handle, as
* the file should stay open. */
- if (writeProfileWithFileObject(Filename, File) != 0)
- return unlockProfile(&ProfileRequiresUnlock, File);
+ if (writeProfileWithFileObject(Filename, File) != 0) {
+ lprofUnlockFileHandle(File);
+ fclose(File);
+ return;
+ }
} else {
/* The merged profile has a non-zero length. Check that it is compatible
* with the data in this process. */
char *ProfileBuffer;
- if (mmapProfileForMerging(File, ProfileFileSize, &ProfileBuffer) == -1 ||
- munmap(ProfileBuffer, ProfileFileSize) == -1)
- return unlockProfile(&ProfileRequiresUnlock, File);
- }
- }
-
- /* mmap() the profile counters so long as there is at least one counter.
- * If there aren't any counters, mmap() would fail with EINVAL. */
- if (CountersSize > 0) {
- int Fileno = fileno(File);
-
- /* Determine how much padding is needed before/after the counters and after
- * the names. */
- uint64_t PaddingBytesBeforeCounters, PaddingBytesAfterCounters,
- PaddingBytesAfterNames;
- __llvm_profile_get_padding_sizes_for_counters(
- DataSize, CountersSize, NamesSize, &PaddingBytesBeforeCounters,
- &PaddingBytesAfterCounters, &PaddingBytesAfterNames);
-
- uint64_t PageAlignedCountersLength =
- (CountersSize * sizeof(uint64_t)) + PaddingBytesAfterCounters;
- uint64_t FileOffsetToCounters =
- CurrentFileOffset + sizeof(__llvm_profile_header) +
- (DataSize * sizeof(__llvm_profile_data)) + PaddingBytesBeforeCounters;
-
- uint64_t *CounterMmap = (uint64_t *)mmap(
- (void *)CountersBegin, PageAlignedCountersLength, PROT_READ | PROT_WRITE,
- MAP_FIXED | MAP_SHARED, Fileno, FileOffsetToCounters);
- if (CounterMmap != CountersBegin) {
- PROF_ERR(
- "Continuous counter sync mode is enabled, but mmap() failed (%s).\n"
- " - CountersBegin: %p\n"
- " - PageAlignedCountersLength: %" PRIu64 "\n"
- " - Fileno: %d\n"
- " - FileOffsetToCounters: %" PRIu64 "\n",
- strerror(errno), CountersBegin, PageAlignedCountersLength, Fileno,
- FileOffsetToCounters);
+ if (mmapProfileForMerging(File, ProfileFileSize, &ProfileBuffer) == -1) {
+ lprofUnlockFileHandle(File);
+ fclose(File);
+ return;
+ }
+ (void)munmap(ProfileBuffer, ProfileFileSize);
}
- }
-
- if (ProfileRequiresUnlock)
- unlockProfile(&ProfileRequiresUnlock, File);
-}
-#elif defined(__ELF__) || defined(_WIN32)
-
-#define INSTR_PROF_PROFILE_COUNTER_BIAS_DEFAULT_VAR \
- INSTR_PROF_CONCAT(INSTR_PROF_PROFILE_COUNTER_BIAS_VAR, _default)
-intptr_t INSTR_PROF_PROFILE_COUNTER_BIAS_DEFAULT_VAR = 0;
-
-/* This variable is a weak external reference which could be used to detect
- * whether or not the compiler defined this symbol. */
-#if defined(_MSC_VER)
-COMPILER_RT_VISIBILITY extern intptr_t INSTR_PROF_PROFILE_COUNTER_BIAS_VAR;
-#if defined(_M_IX86) || defined(__i386__)
-#define WIN_SYM_PREFIX "_"
-#else
-#define WIN_SYM_PREFIX
-#endif
-#pragma comment( \
- linker, "/alternatename:" WIN_SYM_PREFIX INSTR_PROF_QUOTE( \
- INSTR_PROF_PROFILE_COUNTER_BIAS_VAR) "=" WIN_SYM_PREFIX \
- INSTR_PROF_QUOTE(INSTR_PROF_PROFILE_COUNTER_BIAS_DEFAULT_VAR))
-#else
-COMPILER_RT_VISIBILITY extern intptr_t INSTR_PROF_PROFILE_COUNTER_BIAS_VAR
- __attribute__((weak, alias(INSTR_PROF_QUOTE(
- INSTR_PROF_PROFILE_COUNTER_BIAS_DEFAULT_VAR))));
-#endif
-
-static int writeMMappedFile(FILE *OutputFile, char **Profile) {
- if (!OutputFile)
- return -1;
-
- /* Write the data into a file. */
- setupIOBuffer();
- ProfDataWriter fileWriter;
- initFileWriter(&fileWriter, OutputFile);
- if (lprofWriteData(&fileWriter, NULL, 0)) {
- PROF_ERR("Failed to write profile: %s\n", strerror(errno));
- return -1;
- }
- fflush(OutputFile);
-
- /* Get the file size. */
- uint64_t FileSize = ftell(OutputFile);
-
- /* Map the profile. */
- *Profile = (char *)mmap(
- NULL, FileSize, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(OutputFile), 0);
- if (*Profile == MAP_FAILED) {
- PROF_ERR("Unable to mmap profile: %s\n", strerror(errno));
- return -1;
- }
-
- return 0;
-}
-
-static void initializeProfileForContinuousMode(void) {
- if (!__llvm_profile_is_continuous_mode_enabled())
- return;
-
- /* This symbol is defined by the compiler when runtime counter relocation is
- * used and runtime provides a weak alias so we can check if it's defined. */
- void *BiasAddr = &INSTR_PROF_PROFILE_COUNTER_BIAS_VAR;
- void *BiasDefaultAddr = &INSTR_PROF_PROFILE_COUNTER_BIAS_DEFAULT_VAR;
- if (BiasAddr == BiasDefaultAddr) {
- PROF_ERR("%s\n", "__llvm_profile_counter_bias is undefined");
- return;
- }
-
- /* Get the sizes of various profile data sections. Taken from
- * __llvm_profile_get_size_for_buffer(). */
- const __llvm_profile_data *DataBegin = __llvm_profile_begin_data();
- const __llvm_profile_data *DataEnd = __llvm_profile_end_data();
- const uint64_t *CountersBegin = __llvm_profile_begin_counters();
- const uint64_t *CountersEnd = __llvm_profile_end_counters();
- uint64_t DataSize = __llvm_profile_get_data_size(DataBegin, DataEnd);
- const uint64_t CountersOffset = sizeof(__llvm_profile_header) +
- __llvm_write_binary_ids(NULL) +
- (DataSize * sizeof(__llvm_profile_data));
-
- int Length = getCurFilenameLength();
- char *FilenameBuf = (char *)COMPILER_RT_ALLOCA(Length + 1);
- const char *Filename = getCurFilename(FilenameBuf, 0);
- if (!Filename)
- return;
-
- FILE *File = NULL;
- char *Profile = NULL;
-
- if (!doMerging()) {
- File = fopen(Filename, "w+b");
+ } else {
+ File = fopen(Filename, FileOpenMode);
if (!File)
return;
-
- if (writeMMappedFile(File, &Profile) == -1) {
+ /* Check that the offset within the file is page-aligned. */
+ CurrentFileOffset = ftell(File);
+ unsigned PageSize = getpagesize();
+ if (CurrentFileOffset % PageSize != 0) {
+ PROF_ERR("Continuous counter sync mode is enabled, but raw profile is not"
+ "page-aligned. CurrentFileOffset = %" PRIu64 ", pagesz = %u.\n",
+ (uint64_t)CurrentFileOffset, PageSize);
fclose(File);
return;
}
- } else {
- File = lprofOpenFileEx(Filename);
- if (!File)
- return;
-
- uint64_t ProfileFileSize = 0;
- if (getProfileFileSizeForMerging(File, &ProfileFileSize) == -1) {
- lprofUnlockFileHandle(File);
+ if (writeProfileWithFileObject(Filename, File) != 0) {
fclose(File);
return;
}
+ }
- if (!ProfileFileSize) {
- if (writeMMappedFile(File, &Profile) == -1) {
- fclose(File);
- return;
- }
- } else {
- /* The merged profile has a non-zero length. Check that it is compatible
- * with the data in this process. */
- if (mmapProfileForMerging(File, ProfileFileSize, &Profile) == -1) {
- fclose(File);
- return;
- }
- }
+ /* mmap() the profile counters so long as there is at least one counter.
+ * If there aren't any counters, mmap() would fail with EINVAL. */
+ if (CountersSize > 0)
+ mmapForContinuousMode(CurrentFileOffset, File);
+ if (doMerging()) {
lprofUnlockFileHandle(File);
}
-
- /* Update the profile fields based on the current mapping. */
- INSTR_PROF_PROFILE_COUNTER_BIAS_VAR =
- (intptr_t)Profile - (uintptr_t)CountersBegin +
- CountersOffset;
-
- /* Return the memory allocated for counters to OS. */
- lprofReleaseMemoryPagesToOS((uintptr_t)CountersBegin, (uintptr_t)CountersEnd);
-}
-#else
-static void initializeProfileForContinuousMode(void) {
- PROF_ERR("%s\n", "continuous mode is unsupported on this platform");
+ if (File != NULL) {
+ fclose(File);
+ }
}
-#endif
static const char *DefaultProfileName = "default.profraw";
static void resetFilenameToDefault(void) {
if (lprofCurFilename.FilenamePat && lprofCurFilename.OwnsFilenamePat) {
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#elif defined(__clang__)
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wcast-qual"
+#endif
free((void *)lprofCurFilename.FilenamePat);
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#elif defined(__clang__)
+#pragma clang diagnostic pop
+#endif
}
memset(&lprofCurFilename, 0, sizeof(lprofCurFilename));
lprofCurFilename.FilenamePat = DefaultProfileName;
@@ -767,6 +756,13 @@ static int parseFilenamePattern(const char *FilenamePat,
int MergingEnabled = 0;
int FilenamePatLen = strlen(FilenamePat);
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#elif defined(__clang__)
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wcast-qual"
+#endif
/* Clean up cached prefix and filename. */
if (lprofCurFilename.ProfilePathPrefix)
free((void *)lprofCurFilename.ProfilePathPrefix);
@@ -774,6 +770,11 @@ static int parseFilenamePattern(const char *FilenamePat,
if (lprofCurFilename.FilenamePat && lprofCurFilename.OwnsFilenamePat) {
free((void *)lprofCurFilename.FilenamePat);
}
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#elif defined(__clang__)
+#pragma clang diagnostic pop
+#endif
memset(&lprofCurFilename, 0, sizeof(lprofCurFilename));
@@ -818,6 +819,7 @@ static int parseFilenamePattern(const char *FilenamePat,
if (__llvm_profile_is_continuous_mode_enabled()) {
PROF_WARN("%%c specifier can only be specified once in %s.\n",
FilenamePat);
+ __llvm_profile_disable_continuous_mode();
return -1;
}
#if defined(__APPLE__) || defined(__ELF__) || defined(_WIN32)
@@ -892,7 +894,7 @@ static void parseAndSetFilename(const char *FilenamePat,
* filename with PID and hostname substitutions. */
/* The length to hold uint64_t followed by 3 digits pool id including '_' */
#define SIGLEN 24
-static int getCurFilenameLength() {
+static int getCurFilenameLength(void) {
int Len;
if (!lprofCurFilename.FilenamePat || !lprofCurFilename.FilenamePat[0])
return 0;
@@ -1088,10 +1090,14 @@ int __llvm_profile_write_file(void) {
int rc, Length;
const char *Filename;
char *FilenameBuf;
- int PDeathSig = 0;
+
+ // Temporarily suspend getting SIGKILL when the parent exits.
+ int PDeathSig = lprofSuspendSigKill();
if (lprofProfileDumped() || __llvm_profile_is_continuous_mode_enabled()) {
PROF_NOTE("Profile data not written to file: %s.\n", "already written");
+ if (PDeathSig == 1)
+ lprofRestoreSigKill();
return 0;
}
@@ -1102,6 +1108,8 @@ int __llvm_profile_write_file(void) {
/* Check the filename. */
if (!Filename) {
PROF_ERR("Failed to write file : %s\n", "Filename not set");
+ if (PDeathSig == 1)
+ lprofRestoreSigKill();
return -1;
}
@@ -1111,12 +1119,11 @@ int __llvm_profile_write_file(void) {
"expected %d, but get %d\n",
INSTR_PROF_RAW_VERSION,
(int)GET_VERSION(__llvm_profile_get_version()));
+ if (PDeathSig == 1)
+ lprofRestoreSigKill();
return -1;
}
- // Temporarily suspend getting SIGKILL when the parent exits.
- PDeathSig = lprofSuspendSigKill();
-
/* Write profile data to the file. */
rc = writeFile(Filename);
if (rc)
@@ -1149,7 +1156,9 @@ int __llvm_orderfile_write_file(void) {
int rc, Length, LengthBeforeAppend, SuffixLength;
const char *Filename;
char *FilenameBuf;
- int PDeathSig = 0;
+
+ // Temporarily suspend getting SIGKILL when the parent exits.
+ int PDeathSig = lprofSuspendSigKill();
SuffixLength = strlen(OrderFileSuffix);
Length = getCurFilenameLength() + SuffixLength;
@@ -1159,6 +1168,8 @@ int __llvm_orderfile_write_file(void) {
/* Check the filename. */
if (!Filename) {
PROF_ERR("Failed to write file : %s\n", "Filename not set");
+ if (PDeathSig == 1)
+ lprofRestoreSigKill();
return -1;
}
@@ -1173,12 +1184,11 @@ int __llvm_orderfile_write_file(void) {
"expected %d, but get %d\n",
INSTR_PROF_RAW_VERSION,
(int)GET_VERSION(__llvm_profile_get_version()));
+ if (PDeathSig == 1)
+ lprofRestoreSigKill();
return -1;
}
- // Temporarily suspend getting SIGKILL when the parent exits.
- PDeathSig = lprofSuspendSigKill();
-
/* Write order data to the file. */
rc = writeOrderFile(Filename);
if (rc)
@@ -1212,4 +1222,53 @@ int __llvm_profile_register_write_file_atexit(void) {
return atexit(writeFileWithoutReturn);
}
+COMPILER_RT_VISIBILITY int __llvm_profile_set_file_object(FILE *File,
+ int EnableMerge) {
+ if (__llvm_profile_is_continuous_mode_enabled()) {
+ if (!EnableMerge) {
+ PROF_WARN("__llvm_profile_set_file_object(fd=%d) not supported in "
+ "continuous sync mode when merging is disabled\n",
+ fileno(File));
+ return 1;
+ }
+ if (lprofLockFileHandle(File) != 0) {
+ PROF_WARN("Data may be corrupted during profile merging : %s\n",
+ "Fail to obtain file lock due to system limit.");
+ }
+ uint64_t ProfileFileSize = 0;
+ if (getProfileFileSizeForMerging(File, &ProfileFileSize) == -1) {
+ lprofUnlockFileHandle(File);
+ return 1;
+ }
+ if (ProfileFileSize == 0) {
+ FreeHook = &free;
+ setupIOBuffer();
+ ProfDataWriter fileWriter;
+ initFileWriter(&fileWriter, File);
+ if (lprofWriteData(&fileWriter, 0, 0)) {
+ lprofUnlockFileHandle(File);
+ PROF_ERR("Failed to write file \"%d\": %s\n", fileno(File),
+ strerror(errno));
+ return 1;
+ }
+ fflush(File);
+ } else {
+ /* The merged profile has a non-zero length. Check that it is compatible
+ * with the data in this process. */
+ char *ProfileBuffer;
+ if (mmapProfileForMerging(File, ProfileFileSize, &ProfileBuffer) == -1) {
+ lprofUnlockFileHandle(File);
+ return 1;
+ }
+ (void)munmap(ProfileBuffer, ProfileFileSize);
+ }
+ mmapForContinuousMode(0, File);
+ lprofUnlockFileHandle(File);
+ } else {
+ setProfileFile(File);
+ setProfileMergeRequested(EnableMerge);
+ }
+ return 0;
+}
+
#endif
diff --git a/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingInternal.c b/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingInternal.c
index edd38ad765c5..3dd659f90510 100644
--- a/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingInternal.c
+++ b/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingInternal.c
@@ -15,7 +15,7 @@
static unsigned ProfileDumped = 0;
-COMPILER_RT_VISIBILITY unsigned lprofProfileDumped() {
+COMPILER_RT_VISIBILITY unsigned lprofProfileDumped(void) {
return ProfileDumped;
}
diff --git a/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingInternal.h b/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingInternal.h
index ffa790a4cb66..03ed67fcfa76 100644
--- a/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingInternal.h
+++ b/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingInternal.h
@@ -21,8 +21,8 @@
*/
uint64_t __llvm_profile_get_size_for_buffer_internal(
const __llvm_profile_data *DataBegin, const __llvm_profile_data *DataEnd,
- const uint64_t *CountersBegin, const uint64_t *CountersEnd,
- const char *NamesBegin, const char *NamesEnd);
+ const char *CountersBegin, const char *CountersEnd, const char *BitmapBegin,
+ const char *BitmapEnd, const char *NamesBegin, const char *NamesEnd);
/*!
* \brief Write instrumentation data to the given buffer, given explicit
@@ -35,8 +35,9 @@ uint64_t __llvm_profile_get_size_for_buffer_internal(
*/
int __llvm_profile_write_buffer_internal(
char *Buffer, const __llvm_profile_data *DataBegin,
- const __llvm_profile_data *DataEnd, const uint64_t *CountersBegin,
- const uint64_t *CountersEnd, const char *NamesBegin, const char *NamesEnd);
+ const __llvm_profile_data *DataEnd, const char *CountersBegin,
+ const char *CountersEnd, const char *BitmapBegin, const char *BitmapEnd,
+ const char *NamesBegin, const char *NamesEnd);
/*!
* The data structure describing the data to be written by the
@@ -145,15 +146,15 @@ typedef struct VPDataReaderType {
uint32_t N);
} VPDataReaderType;
-/* Write profile data to destinitation. If SkipNameDataWrite is set to 1,
- the name data is already in destintation, we just skip over it. */
+/* Write profile data to destination. If SkipNameDataWrite is set to 1,
+ the name data is already in destination, we just skip over it. */
int lprofWriteData(ProfDataWriter *Writer, VPDataReaderType *VPDataReader,
int SkipNameDataWrite);
int lprofWriteDataImpl(ProfDataWriter *Writer,
const __llvm_profile_data *DataBegin,
const __llvm_profile_data *DataEnd,
- const uint64_t *CountersBegin,
- const uint64_t *CountersEnd,
+ const char *CountersBegin, const char *CountersEnd,
+ const char *BitmapBegin, const char *BitmapEnd,
VPDataReaderType *VPDataReader, const char *NamesBegin,
const char *NamesEnd, int SkipNameDataWrite);
@@ -199,4 +200,12 @@ extern void (*VPMergeHook)(struct ValueProfData *, __llvm_profile_data *);
*/
int __llvm_write_binary_ids(ProfDataWriter *Writer);
+/*
+ * Write binary id length and then its data, because binary id does not
+ * have a fixed length.
+ */
+int lprofWriteOneBinaryId(ProfDataWriter *Writer, uint64_t BinaryIdLen,
+ const uint8_t *BinaryIdData,
+ uint64_t BinaryIdPadding);
+
#endif
diff --git a/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingMerge.c b/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingMerge.c
index 16ebc2f8b2a9..b5850e99ee37 100644
--- a/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingMerge.c
+++ b/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingMerge.c
@@ -20,34 +20,42 @@ COMPILER_RT_VISIBILITY
void (*VPMergeHook)(ValueProfData *, __llvm_profile_data *);
COMPILER_RT_VISIBILITY
-uint64_t lprofGetLoadModuleSignature() {
+uint64_t lprofGetLoadModuleSignature(void) {
/* A very fast way to compute a module signature. */
uint64_t Version = __llvm_profile_get_version();
- uint64_t CounterSize = (uint64_t)(__llvm_profile_end_counters() -
- __llvm_profile_begin_counters());
- uint64_t DataSize = __llvm_profile_get_data_size(__llvm_profile_begin_data(),
- __llvm_profile_end_data());
+ uint64_t NumCounters = __llvm_profile_get_num_counters(
+ __llvm_profile_begin_counters(), __llvm_profile_end_counters());
+ uint64_t NumData = __llvm_profile_get_num_data(__llvm_profile_begin_data(),
+ __llvm_profile_end_data());
uint64_t NamesSize =
(uint64_t)(__llvm_profile_end_names() - __llvm_profile_begin_names());
uint64_t NumVnodes =
(uint64_t)(__llvm_profile_end_vnodes() - __llvm_profile_begin_vnodes());
const __llvm_profile_data *FirstD = __llvm_profile_begin_data();
- return (NamesSize << 40) + (CounterSize << 30) + (DataSize << 20) +
- (NumVnodes << 10) + (DataSize > 0 ? FirstD->NameRef : 0) + Version;
+ return (NamesSize << 40) + (NumCounters << 30) + (NumData << 20) +
+ (NumVnodes << 10) + (NumData > 0 ? FirstD->NameRef : 0) + Version +
+ __llvm_profile_get_magic();
}
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#elif defined(__clang__)
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wcast-qual"
+#endif
+
/* Returns 1 if profile is not structurally compatible. */
COMPILER_RT_VISIBILITY
int __llvm_profile_check_compatibility(const char *ProfileData,
uint64_t ProfileSize) {
- /* Check profile header only for now */
__llvm_profile_header *Header = (__llvm_profile_header *)ProfileData;
__llvm_profile_data *SrcDataStart, *SrcDataEnd, *SrcData, *DstData;
SrcDataStart =
(__llvm_profile_data *)(ProfileData + sizeof(__llvm_profile_header) +
Header->BinaryIdsSize);
- SrcDataEnd = SrcDataStart + Header->DataSize;
+ SrcDataEnd = SrcDataStart + Header->NumData;
if (ProfileSize < sizeof(__llvm_profile_header))
return 1;
@@ -55,19 +63,26 @@ int __llvm_profile_check_compatibility(const char *ProfileData,
/* Check the header first. */
if (Header->Magic != __llvm_profile_get_magic() ||
Header->Version != __llvm_profile_get_version() ||
- Header->DataSize !=
- __llvm_profile_get_data_size(__llvm_profile_begin_data(),
- __llvm_profile_end_data()) ||
- Header->CountersSize != (uint64_t)(__llvm_profile_end_counters() -
- __llvm_profile_begin_counters()) ||
- Header->NamesSize != (uint64_t)(__llvm_profile_end_names() -
- __llvm_profile_begin_names()) ||
+ Header->NumData !=
+ __llvm_profile_get_num_data(__llvm_profile_begin_data(),
+ __llvm_profile_end_data()) ||
+ Header->NumCounters !=
+ __llvm_profile_get_num_counters(__llvm_profile_begin_counters(),
+ __llvm_profile_end_counters()) ||
+ Header->NumBitmapBytes !=
+ __llvm_profile_get_num_bitmap_bytes(__llvm_profile_begin_bitmap(),
+ __llvm_profile_end_bitmap()) ||
+ Header->NamesSize !=
+ __llvm_profile_get_name_size(__llvm_profile_begin_names(),
+ __llvm_profile_end_names()) ||
Header->ValueKindLast != IPVK_Last)
return 1;
- if (ProfileSize < sizeof(__llvm_profile_header) + Header->BinaryIdsSize +
- Header->DataSize * sizeof(__llvm_profile_data) +
- Header->NamesSize + Header->CountersSize)
+ if (ProfileSize <
+ sizeof(__llvm_profile_header) + Header->BinaryIdsSize +
+ Header->NumData * sizeof(__llvm_profile_data) + Header->NamesSize +
+ Header->NumCounters * __llvm_profile_counter_entry_size() +
+ Header->NumBitmapBytes)
return 1;
for (SrcData = SrcDataStart,
@@ -75,7 +90,8 @@ int __llvm_profile_check_compatibility(const char *ProfileData,
SrcData < SrcDataEnd; ++SrcData, ++DstData) {
if (SrcData->NameRef != DstData->NameRef ||
SrcData->FuncHash != DstData->FuncHash ||
- SrcData->NumCounters != DstData->NumCounters)
+ SrcData->NumCounters != DstData->NumCounters ||
+ SrcData->NumBitmapBytes != DstData->NumBitmapBytes)
return 1;
}
@@ -83,46 +99,121 @@ int __llvm_profile_check_compatibility(const char *ProfileData,
return 0;
}
+static uintptr_t signextIfWin64(void *V) {
+#ifdef _WIN64
+ return (uintptr_t)(int32_t)(uintptr_t)V;
+#else
+ return (uintptr_t)V;
+#endif
+}
+
COMPILER_RT_VISIBILITY
int __llvm_profile_merge_from_buffer(const char *ProfileData,
uint64_t ProfileSize) {
+ if (__llvm_profile_get_version() & VARIANT_MASK_TEMPORAL_PROF) {
+ PROF_ERR("%s\n",
+ "Temporal profiles do not support profile merging at runtime. "
+ "Instead, merge raw profiles using the llvm-profdata tool.");
+ return 1;
+ }
+
__llvm_profile_data *SrcDataStart, *SrcDataEnd, *SrcData, *DstData;
__llvm_profile_header *Header = (__llvm_profile_header *)ProfileData;
- uint64_t *SrcCountersStart;
+ char *SrcCountersStart, *DstCounter;
+ const char *SrcCountersEnd, *SrcCounter;
+ const char *SrcBitmapStart;
const char *SrcNameStart;
const char *SrcValueProfDataStart, *SrcValueProfData;
+ uintptr_t CountersDelta = Header->CountersDelta;
+ uintptr_t BitmapDelta = Header->BitmapDelta;
SrcDataStart =
(__llvm_profile_data *)(ProfileData + sizeof(__llvm_profile_header) +
Header->BinaryIdsSize);
- SrcDataEnd = SrcDataStart + Header->DataSize;
- SrcCountersStart = (uint64_t *)SrcDataEnd;
- SrcNameStart = (const char *)(SrcCountersStart + Header->CountersSize);
+ SrcDataEnd = SrcDataStart + Header->NumData;
+ SrcCountersStart = (char *)SrcDataEnd;
+ SrcCountersEnd = SrcCountersStart +
+ Header->NumCounters * __llvm_profile_counter_entry_size();
+ SrcBitmapStart = SrcCountersEnd;
+ SrcNameStart = SrcBitmapStart + Header->NumBitmapBytes;
SrcValueProfDataStart =
SrcNameStart + Header->NamesSize +
__llvm_profile_get_num_padding_bytes(Header->NamesSize);
- if (SrcNameStart < (const char *)SrcCountersStart)
+ if (SrcNameStart < SrcCountersStart || SrcNameStart < SrcBitmapStart)
return 1;
+ // Merge counters by iterating the entire counter section when data section is
+ // empty due to correlation.
+ if (Header->NumData == 0) {
+ for (SrcCounter = SrcCountersStart,
+ DstCounter = __llvm_profile_begin_counters();
+ SrcCounter < SrcCountersEnd;) {
+ if (__llvm_profile_get_version() & VARIANT_MASK_BYTE_COVERAGE) {
+ *DstCounter &= *SrcCounter;
+ } else {
+ *(uint64_t *)DstCounter += *(uint64_t *)SrcCounter;
+ }
+ SrcCounter += __llvm_profile_counter_entry_size();
+ DstCounter += __llvm_profile_counter_entry_size();
+ }
+ return 0;
+ }
+
for (SrcData = SrcDataStart,
DstData = (__llvm_profile_data *)__llvm_profile_begin_data(),
SrcValueProfData = SrcValueProfDataStart;
SrcData < SrcDataEnd; ++SrcData, ++DstData) {
- uint64_t *DstCounters = (uint64_t *)DstData->CounterPtr;
+ // For the in-memory destination, CounterPtr is the distance from the start
+ // address of the data to the start address of the counter. On WIN64,
+ // CounterPtr is a truncated 32-bit value due to COFF limitation. Sign
+ // extend CounterPtr to get the original value.
+ char *DstCounters =
+ (char *)((uintptr_t)DstData + signextIfWin64(DstData->CounterPtr));
+ char *DstBitmap =
+ (char *)((uintptr_t)DstData + signextIfWin64(DstData->BitmapPtr));
unsigned NVK = 0;
+ // SrcData is a serialized representation of the memory image. We need to
+ // compute the in-buffer counter offset from the in-memory address distance.
+ // The initial CountersDelta is the in-memory address difference
+ // start(__llvm_prf_cnts)-start(__llvm_prf_data), so SrcData->CounterPtr -
+ // CountersDelta computes the offset into the in-buffer counter section.
+ //
+ // On WIN64, CountersDelta is truncated as well, so no need for signext.
+ char *SrcCounters =
+ SrcCountersStart + ((uintptr_t)SrcData->CounterPtr - CountersDelta);
+ // CountersDelta needs to be decreased as we advance to the next data
+ // record.
+ CountersDelta -= sizeof(*SrcData);
unsigned NC = SrcData->NumCounters;
if (NC == 0)
return 1;
- uint64_t *SrcCounters = SrcCountersStart + ((size_t)SrcData->CounterPtr -
- Header->CountersDelta) /
- sizeof(uint64_t);
- if (SrcCounters < SrcCountersStart ||
- (const char *)SrcCounters >= SrcNameStart ||
- (const char *)(SrcCounters + NC) > SrcNameStart)
+ if (SrcCounters < SrcCountersStart || SrcCounters >= SrcNameStart ||
+ (SrcCounters + __llvm_profile_counter_entry_size() * NC) > SrcNameStart)
return 1;
- for (unsigned I = 0; I < NC; I++)
- DstCounters[I] += SrcCounters[I];
+ for (unsigned I = 0; I < NC; I++) {
+ if (__llvm_profile_get_version() & VARIANT_MASK_BYTE_COVERAGE) {
+ // A value of zero signifies the function is covered.
+ DstCounters[I] &= SrcCounters[I];
+ } else {
+ ((uint64_t *)DstCounters)[I] += ((uint64_t *)SrcCounters)[I];
+ }
+ }
+
+ const char *SrcBitmap =
+ SrcBitmapStart + ((uintptr_t)SrcData->BitmapPtr - BitmapDelta);
+ // BitmapDelta also needs to be decreased as we advance to the next data
+ // record.
+ BitmapDelta -= sizeof(*SrcData);
+ unsigned NB = SrcData->NumBitmapBytes;
+ // NumBitmapBytes may legitimately be 0. Just keep going.
+ if (NB != 0) {
+ if (SrcBitmap < SrcBitmapStart || (SrcBitmap + NB) > SrcNameStart)
+ return 1;
+ // Merge Src and Dst Bitmap bytes by simply ORing them together.
+ for (unsigned I = 0; I < NB; I++)
+ DstBitmap[I] |= SrcBitmap[I];
+ }
/* Now merge value profile data. */
if (!VPMergeHook)
@@ -143,3 +234,9 @@ int __llvm_profile_merge_from_buffer(const char *ProfileData,
return 0;
}
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#elif defined(__clang__)
+#pragma clang diagnostic pop
+#endif
diff --git a/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingNameVar.c b/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingNameVar.c
index 2d67a55b985c..407272806ba3 100644
--- a/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingNameVar.c
+++ b/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingNameVar.c
@@ -14,4 +14,4 @@
* user has not specified one. Set this up by moving the runtime's copy of this
* symbol to an object file within the archive.
*/
-COMPILER_RT_WEAK char INSTR_PROF_PROFILE_NAME_VAR[1] = {0};
+COMPILER_RT_WEAK COMPILER_RT_VISIBILITY char INSTR_PROF_PROFILE_NAME_VAR[1] = {0};
diff --git a/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingPlatformAIX.c b/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingPlatformAIX.c
new file mode 100644
index 000000000000..002bec164d7e
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingPlatformAIX.c
@@ -0,0 +1,215 @@
+/*===- InstrProfilingPlatformAIX.c - Profile data AIX platform ------------===*\
+|*
+|* Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+|* See https://llvm.org/LICENSE.txt for license information.
+|* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+|*
+\*===----------------------------------------------------------------------===*/
+
+#if defined(_AIX)
+
+#ifdef __64BIT__
+#define __XCOFF64__
+#endif
+#include <errno.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/ldr.h>
+#include <xcoff.h>
+
+#include "InstrProfiling.h"
+#include "InstrProfilingInternal.h"
+
+#define BIN_ID_PREFIX "xcoff_binary_id:"
+
+// If found, write the build-id into the Result buffer.
+static size_t FindBinaryId(char *Result, size_t Size) {
+ unsigned long EntryAddr = (unsigned long)__builtin_return_address(0);
+
+ // Use loadquery to get information about loaded modules; loadquery writes
+ // its result into a buffer of unknown size.
+ char Buf[1024];
+ size_t BufSize = sizeof(Buf);
+ char *BufPtr = Buf;
+ int RC = -1;
+
+ errno = 0;
+ RC = loadquery(L_GETXINFO | L_IGNOREUNLOAD, BufPtr, (unsigned int)BufSize);
+ if (RC == -1 && errno == ENOMEM) {
+ BufSize = 64000; // should be plenty for any program.
+ BufPtr = malloc(BufSize);
+ if (BufPtr != 0)
+ RC = loadquery(L_GETXINFO | L_IGNOREUNLOAD, BufPtr, (unsigned int)BufSize);
+ }
+
+ if (RC == -1)
+ goto done;
+
+ // Locate the ld_xinfo corresponding to this module.
+ struct ld_xinfo *CurInfo = (struct ld_xinfo *)BufPtr;
+ while (1) {
+ unsigned long CurTextStart = (uint64_t)CurInfo->ldinfo_textorg;
+ unsigned long CurTextEnd = CurTextStart + CurInfo->ldinfo_textsize;
+ if (CurTextStart <= EntryAddr && EntryAddr < CurTextEnd) {
+ // Found my slot. Now search for the build-id.
+ char *p = (char *)CurInfo->ldinfo_textorg;
+
+ FILHDR *f = (FILHDR *)p;
+ AOUTHDR *a = (AOUTHDR *)(p + FILHSZ);
+ SCNHDR *s =
+ (SCNHDR *)(p + FILHSZ + f->f_opthdr + SCNHSZ * (a->o_snloader - 1));
+ LDHDR *ldhdr = (LDHDR *)(p + s->s_scnptr);
+ // This is the loader string table
+ char *lstr = (char *)ldhdr + ldhdr->l_stoff;
+
+ // If the build-id exists, it's the first entry.
+ // Each entry is comprised of a 2-byte size component, followed by the
+ // data.
+ size_t len = *(short *)lstr;
+ char *str = (char *)(lstr + 2);
+ size_t PrefixLen = sizeof(BIN_ID_PREFIX) - 1;
+ if (len > PrefixLen && (len - PrefixLen) <= Size &&
+ strncmp(str, BIN_ID_PREFIX, PrefixLen) == 0) {
+ memcpy(Result, str + PrefixLen, len - PrefixLen);
+ RC = len - PrefixLen;
+ goto done;
+ }
+ break;
+ }
+ if (CurInfo->ldinfo_next == 0u)
+ break;
+ CurInfo = (struct ld_xinfo *)((char *)CurInfo + CurInfo->ldinfo_next);
+ }
+done:
+ if (BufSize != sizeof(Buf) && BufPtr != 0)
+ free(BufPtr);
+ return RC;
+}
+
+static int StrToHexError = 0;
+static uint8_t StrToHex(char c) {
+ if (c >= '0' && c <= '9')
+ return c - '0';
+ if (c >= 'a' && c <= 'f')
+ return c - 'a' + 0xa;
+ if (c >= 'A' && c <= 'F')
+ return c - 'A' + 0xa;
+ StrToHexError = 1;
+ return 0;
+}
+
+COMPILER_RT_VISIBILITY int __llvm_write_binary_ids(ProfDataWriter *Writer) {
+ // 200 bytes should be enough for the build-id hex string.
+ static char Buf[200];
+ // Profile reading tools expect this to be 8-bytes long.
+ static int64_t BinaryIdLen = 0;
+ static uint8_t *BinaryIdData = 0;
+
+ // -1 means we already checked for a BinaryId and didn't find one.
+ if (BinaryIdLen == -1)
+ return 0;
+
+ // Are we being called for the first time?
+ if (BinaryIdLen == 0) {
+ if (getenv("LLVM_PROFILE_NO_BUILD_ID"))
+ goto fail;
+
+ int BuildIdLen = FindBinaryId(Buf, sizeof(Buf));
+ if (BuildIdLen <= 0)
+ goto fail;
+
+ if (Buf[BuildIdLen - 1] == '\0')
+ BuildIdLen--;
+
+ // assume even number of digits/chars, so 0xabc must be 0x0abc
+ if ((BuildIdLen % 2) != 0 || BuildIdLen == 0)
+ goto fail;
+
+ // The numeric ID is represented as an ascii string in the loader section,
+ // so convert it to raw binary.
+ BinaryIdLen = BuildIdLen / 2;
+ BinaryIdData = (uint8_t *)Buf;
+
+ // Skip "0x" prefix if it exists.
+ if (Buf[0] == '0' && Buf[1] == 'x') {
+ BinaryIdLen -= 1;
+ BinaryIdData += 2;
+ }
+
+ StrToHexError = 0;
+ for (int i = 0; i < BinaryIdLen; i++)
+ BinaryIdData[i] = (StrToHex(BinaryIdData[2 * i]) << 4) +
+ StrToHex(BinaryIdData[2 * i + 1]);
+
+ if (StrToHexError)
+ goto fail;
+
+ if (getenv("LLVM_PROFILE_VERBOSE")) {
+ char *StrBuf = (char *)COMPILER_RT_ALLOCA(2 * BinaryIdLen + 1);
+ for (int i = 0; i < (int)BinaryIdLen; i++)
+ sprintf(&StrBuf[2 * i], "%02x", BinaryIdData[i]);
+ PROF_NOTE("Writing binary id: %s\n", StrBuf);
+ }
+ }
+
+ uint8_t BinaryIdPadding = __llvm_profile_get_num_padding_bytes(BinaryIdLen);
+ if (Writer && lprofWriteOneBinaryId(Writer, BinaryIdLen, BinaryIdData,
+ BinaryIdPadding) == -1)
+ return -1; // Return -1 rather goto fail to match the NT_GNU_BUILD_ID path.
+
+ return sizeof(BinaryIdLen) + BinaryIdLen + BinaryIdPadding;
+
+fail:
+ if (getenv("LLVM_PROFILE_VERBOSE"))
+ fprintf(stderr, "no or invalid binary id: %.*s\n", (int)sizeof(Buf), Buf);
+ BinaryIdLen = -1;
+ return 0;
+}
+
+// Empty stubs to allow linking object files using the registration-based scheme
+COMPILER_RT_VISIBILITY
+void __llvm_profile_register_function(void *Data_) {}
+
+COMPILER_RT_VISIBILITY
+void __llvm_profile_register_names_function(void *NamesStart,
+ uint64_t NamesSize) {}
+
+// The __start_SECNAME and __stop_SECNAME symbols (for SECNAME \in
+// {"__llvm_prf_cnts", "__llvm_prf_data", "__llvm_prf_name", "__llvm_prf_vnds"})
+// are always live when linking on AIX, regardless if the .o's being linked
+// reference symbols from the profile library (for example when no files were
+// compiled with -fprofile-generate). That's because these symbols are kept
+// alive through references in constructor functions that are always live in the
+// default linking model on AIX (-bcdtors:all). The __start_SECNAME and
+// __stop_SECNAME symbols are only resolved by the linker when the SECNAME
+// section exists. So for the scenario where the user objects have no such
+// section (i.e. when they are compiled with -fno-profile-generate), we always
+// define these zero length variables in each of the above 4 sections.
+static int dummy_cnts[0] COMPILER_RT_SECTION(
+ COMPILER_RT_SEG INSTR_PROF_CNTS_SECT_NAME);
+static int dummy_bits[0] COMPILER_RT_SECTION(
+ COMPILER_RT_SEG INSTR_PROF_BITS_SECT_NAME);
+static int dummy_data[0] COMPILER_RT_SECTION(
+ COMPILER_RT_SEG INSTR_PROF_DATA_SECT_NAME);
+static const int dummy_name[0] COMPILER_RT_SECTION(
+ COMPILER_RT_SEG INSTR_PROF_NAME_SECT_NAME);
+static int dummy_vnds[0] COMPILER_RT_SECTION(
+ COMPILER_RT_SEG INSTR_PROF_VNODES_SECT_NAME);
+static int dummy_orderfile[0] COMPILER_RT_SECTION(
+ COMPILER_RT_SEG INSTR_PROF_ORDERFILE_SECT_NAME);
+
+// To avoid GC'ing of the dummy variables by the linker, reference them in an
+// array and reference the array in the runtime registration code
+// (InstrProfilingRuntime.cpp)
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
+COMPILER_RT_VISIBILITY
+void *__llvm_profile_keep[] = {(void *)&dummy_cnts, (void *)&dummy_bits,
+ (void *)&dummy_data, (void *)&dummy_name,
+ (void *)&dummy_vnds, (void *)&dummy_orderfile};
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
+#endif
diff --git a/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingPlatformDarwin.c b/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingPlatformDarwin.c
index c2e7fad98386..2154d242a817 100644
--- a/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingPlatformDarwin.c
+++ b/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingPlatformDarwin.c
@@ -26,11 +26,15 @@ extern char
COMPILER_RT_VISIBILITY
extern char NamesEnd __asm("section$end$__DATA$" INSTR_PROF_NAME_SECT_NAME);
COMPILER_RT_VISIBILITY
-extern uint64_t
+extern char
CountersStart __asm("section$start$__DATA$" INSTR_PROF_CNTS_SECT_NAME);
COMPILER_RT_VISIBILITY
-extern uint64_t
- CountersEnd __asm("section$end$__DATA$" INSTR_PROF_CNTS_SECT_NAME);
+extern char CountersEnd __asm("section$end$__DATA$" INSTR_PROF_CNTS_SECT_NAME);
+COMPILER_RT_VISIBILITY
+extern char
+ BitmapStart __asm("section$start$__DATA$" INSTR_PROF_BITS_SECT_NAME);
+COMPILER_RT_VISIBILITY
+extern char BitmapEnd __asm("section$end$__DATA$" INSTR_PROF_BITS_SECT_NAME);
COMPILER_RT_VISIBILITY
extern uint32_t
OrderFileStart __asm("section$start$__DATA$" INSTR_PROF_ORDERFILE_SECT_NAME);
@@ -53,9 +57,13 @@ const char *__llvm_profile_begin_names(void) { return &NamesStart; }
COMPILER_RT_VISIBILITY
const char *__llvm_profile_end_names(void) { return &NamesEnd; }
COMPILER_RT_VISIBILITY
-uint64_t *__llvm_profile_begin_counters(void) { return &CountersStart; }
+char *__llvm_profile_begin_counters(void) { return &CountersStart; }
+COMPILER_RT_VISIBILITY
+char *__llvm_profile_end_counters(void) { return &CountersEnd; }
+COMPILER_RT_VISIBILITY
+char *__llvm_profile_begin_bitmap(void) { return &BitmapStart; }
COMPILER_RT_VISIBILITY
-uint64_t *__llvm_profile_end_counters(void) { return &CountersEnd; }
+char *__llvm_profile_end_bitmap(void) { return &BitmapEnd; }
COMPILER_RT_VISIBILITY
uint32_t *__llvm_profile_begin_orderfile(void) { return &OrderFileStart; }
diff --git a/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingPlatformFuchsia.c b/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingPlatformFuchsia.c
index 1be0ef36a288..fdcb82e4d72b 100644
--- a/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingPlatformFuchsia.c
+++ b/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingPlatformFuchsia.c
@@ -37,7 +37,7 @@
/* This variable is an external reference to symbol defined by the compiler. */
COMPILER_RT_VISIBILITY extern intptr_t INSTR_PROF_PROFILE_COUNTER_BIAS_VAR;
-COMPILER_RT_VISIBILITY unsigned lprofProfileDumped() {
+COMPILER_RT_VISIBILITY unsigned lprofProfileDumped(void) {
return 1;
}
COMPILER_RT_VISIBILITY void lprofSetProfileDumped(unsigned Value) {}
@@ -52,7 +52,7 @@ static inline void lprofWrite(const char *fmt, ...) {
int ret = vsnprintf(s, sizeof(s), fmt, ap);
va_end(ap);
- __sanitizer_log_write(s, ret + 1);
+ __sanitizer_log_write(s, ret);
}
struct lprofVMOWriterCtx {
@@ -116,13 +116,13 @@ void __llvm_profile_initialize(void) {
const __llvm_profile_data *DataBegin = __llvm_profile_begin_data();
const __llvm_profile_data *DataEnd = __llvm_profile_end_data();
- const uint64_t *CountersBegin = __llvm_profile_begin_counters();
- const uint64_t *CountersEnd = __llvm_profile_end_counters();
+ const char *CountersBegin = __llvm_profile_begin_counters();
+ const char *CountersEnd = __llvm_profile_end_counters();
const uint64_t DataSize = __llvm_profile_get_data_size(DataBegin, DataEnd);
- const uint64_t CountersOffset = sizeof(__llvm_profile_header) +
- __llvm_write_binary_ids(NULL) +
- (DataSize * sizeof(__llvm_profile_data));
- uint64_t CountersSize = CountersEnd - CountersBegin;
+ const uint64_t CountersOffset =
+ sizeof(__llvm_profile_header) + __llvm_write_binary_ids(NULL) + DataSize;
+ uint64_t CountersSize =
+ __llvm_profile_get_counters_size(CountersBegin, CountersEnd);
/* Don't publish a VMO if there are no counters. */
if (!CountersSize)
@@ -179,9 +179,6 @@ void __llvm_profile_initialize(void) {
* also consumes the VMO handle. */
__sanitizer_publish_data(ProfileSinkName, Vmo);
- /* Use the dumpfile symbolizer markup element to write the name of VMO. */
- lprofWrite("LLVM Profile: {{{dumpfile:%s:%s}}}\n", ProfileSinkName, VmoName);
-
/* Update the profile fields based on the current mapping. */
INSTR_PROF_PROFILE_COUNTER_BIAS_VAR =
(intptr_t)Mapping - (uintptr_t)CountersBegin + CountersOffset;
diff --git a/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingPlatformLinux.c b/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingPlatformLinux.c
index 5d47083b8bfe..19266ab6c6fb 100644
--- a/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingPlatformLinux.c
+++ b/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingPlatformLinux.c
@@ -7,31 +7,27 @@
\*===----------------------------------------------------------------------===*/
#if defined(__linux__) || defined(__FreeBSD__) || defined(__Fuchsia__) || \
- (defined(__sun__) && defined(__svr4__)) || defined(__NetBSD__)
+ (defined(__sun__) && defined(__svr4__)) || defined(__NetBSD__) || \
+ defined(_AIX)
+#if !defined(_AIX)
#include <elf.h>
#include <link.h>
+#endif
#include <stdlib.h>
#include <string.h>
#include "InstrProfiling.h"
#include "InstrProfilingInternal.h"
-#if defined(__FreeBSD__) && !defined(ElfW)
-/*
- * FreeBSD's elf.h and link.h headers do not define the ElfW(type) macro yet.
- * If this is added to all supported FreeBSD versions in the future, this
- * compatibility macro can be removed.
- */
-#define ElfW(type) __ElfN(type)
-#endif
-
#define PROF_DATA_START INSTR_PROF_SECT_START(INSTR_PROF_DATA_COMMON)
#define PROF_DATA_STOP INSTR_PROF_SECT_STOP(INSTR_PROF_DATA_COMMON)
#define PROF_NAME_START INSTR_PROF_SECT_START(INSTR_PROF_NAME_COMMON)
#define PROF_NAME_STOP INSTR_PROF_SECT_STOP(INSTR_PROF_NAME_COMMON)
#define PROF_CNTS_START INSTR_PROF_SECT_START(INSTR_PROF_CNTS_COMMON)
#define PROF_CNTS_STOP INSTR_PROF_SECT_STOP(INSTR_PROF_CNTS_COMMON)
+#define PROF_BITS_START INSTR_PROF_SECT_START(INSTR_PROF_BITS_COMMON)
+#define PROF_BITS_STOP INSTR_PROF_SECT_STOP(INSTR_PROF_BITS_COMMON)
#define PROF_ORDERFILE_START INSTR_PROF_SECT_START(INSTR_PROF_ORDERFILE_COMMON)
#define PROF_VNODES_START INSTR_PROF_SECT_START(INSTR_PROF_VNODES_COMMON)
#define PROF_VNODES_STOP INSTR_PROF_SECT_STOP(INSTR_PROF_VNODES_COMMON)
@@ -43,8 +39,10 @@ extern __llvm_profile_data PROF_DATA_START COMPILER_RT_VISIBILITY
COMPILER_RT_WEAK;
extern __llvm_profile_data PROF_DATA_STOP COMPILER_RT_VISIBILITY
COMPILER_RT_WEAK;
-extern uint64_t PROF_CNTS_START COMPILER_RT_VISIBILITY COMPILER_RT_WEAK;
-extern uint64_t PROF_CNTS_STOP COMPILER_RT_VISIBILITY COMPILER_RT_WEAK;
+extern char PROF_CNTS_START COMPILER_RT_VISIBILITY COMPILER_RT_WEAK;
+extern char PROF_CNTS_STOP COMPILER_RT_VISIBILITY COMPILER_RT_WEAK;
+extern char PROF_BITS_START COMPILER_RT_VISIBILITY COMPILER_RT_WEAK;
+extern char PROF_BITS_STOP COMPILER_RT_VISIBILITY COMPILER_RT_WEAK;
extern uint32_t PROF_ORDERFILE_START COMPILER_RT_VISIBILITY COMPILER_RT_WEAK;
extern char PROF_NAME_START COMPILER_RT_VISIBILITY COMPILER_RT_WEAK;
extern char PROF_NAME_STOP COMPILER_RT_VISIBILITY COMPILER_RT_WEAK;
@@ -65,12 +63,18 @@ COMPILER_RT_VISIBILITY const char *__llvm_profile_begin_names(void) {
COMPILER_RT_VISIBILITY const char *__llvm_profile_end_names(void) {
return &PROF_NAME_STOP;
}
-COMPILER_RT_VISIBILITY uint64_t *__llvm_profile_begin_counters(void) {
+COMPILER_RT_VISIBILITY char *__llvm_profile_begin_counters(void) {
return &PROF_CNTS_START;
}
-COMPILER_RT_VISIBILITY uint64_t *__llvm_profile_end_counters(void) {
+COMPILER_RT_VISIBILITY char *__llvm_profile_end_counters(void) {
return &PROF_CNTS_STOP;
}
+COMPILER_RT_VISIBILITY char *__llvm_profile_begin_bitmap(void) {
+ return &PROF_BITS_START;
+}
+COMPILER_RT_VISIBILITY char *__llvm_profile_end_bitmap(void) {
+ return &PROF_BITS_STOP;
+}
COMPILER_RT_VISIBILITY uint32_t *__llvm_profile_begin_orderfile(void) {
return &PROF_ORDERFILE_START;
}
@@ -91,23 +95,6 @@ static size_t RoundUp(size_t size, size_t align) {
}
/*
- * Write binary id length and then its data, because binary id does not
- * have a fixed length.
- */
-static int WriteOneBinaryId(ProfDataWriter *Writer, uint64_t BinaryIdLen,
- const uint8_t *BinaryIdData) {
- ProfDataIOVec BinaryIdIOVec[] = {
- {&BinaryIdLen, sizeof(uint64_t), 1, 0},
- {BinaryIdData, sizeof(uint8_t), BinaryIdLen, 0}};
- if (Writer->Write(Writer, BinaryIdIOVec,
- sizeof(BinaryIdIOVec) / sizeof(*BinaryIdIOVec)))
- return -1;
-
- /* Successfully wrote binary id, report success. */
- return 0;
-}
-
-/*
* Look for the note that has the name "GNU\0" and type NT_GNU_BUILD_ID
* that contains build id. If build id exists, write binary id.
*
@@ -122,19 +109,19 @@ static int WriteOneBinaryId(ProfDataWriter *Writer, uint64_t BinaryIdLen,
static int WriteBinaryIdForNote(ProfDataWriter *Writer,
const ElfW(Nhdr) * Note) {
int BinaryIdSize = 0;
-
const char *NoteName = (const char *)Note + sizeof(ElfW(Nhdr));
if (Note->n_type == NT_GNU_BUILD_ID && Note->n_namesz == 4 &&
memcmp(NoteName, "GNU\0", 4) == 0) {
-
uint64_t BinaryIdLen = Note->n_descsz;
const uint8_t *BinaryIdData =
(const uint8_t *)(NoteName + RoundUp(Note->n_namesz, 4));
+ uint8_t BinaryIdPadding = __llvm_profile_get_num_padding_bytes(BinaryIdLen);
if (Writer != NULL &&
- WriteOneBinaryId(Writer, BinaryIdLen, BinaryIdData) == -1)
+ lprofWriteOneBinaryId(Writer, BinaryIdLen, BinaryIdData,
+ BinaryIdPadding) == -1)
return -1;
- BinaryIdSize = sizeof(BinaryIdLen) + BinaryIdLen;
+ BinaryIdSize = sizeof(BinaryIdLen) + BinaryIdLen + BinaryIdPadding;
}
return BinaryIdSize;
@@ -147,12 +134,12 @@ static int WriteBinaryIdForNote(ProfDataWriter *Writer,
*/
static int WriteBinaryIds(ProfDataWriter *Writer, const ElfW(Nhdr) * Note,
const ElfW(Nhdr) * NotesEnd) {
- int TotalBinaryIdsSize = 0;
+ int BinaryIdsSize = 0;
while (Note < NotesEnd) {
- int Result = WriteBinaryIdForNote(Writer, Note);
- if (Result == -1)
+ int OneBinaryIdSize = WriteBinaryIdForNote(Writer, Note);
+ if (OneBinaryIdSize == -1)
return -1;
- TotalBinaryIdsSize += Result;
+ BinaryIdsSize += OneBinaryIdSize;
/* Calculate the offset of the next note in notes section. */
size_t NoteOffset = sizeof(ElfW(Nhdr)) + RoundUp(Note->n_namesz, 4) +
@@ -160,7 +147,7 @@ static int WriteBinaryIds(ProfDataWriter *Writer, const ElfW(Nhdr) * Note,
Note = (const ElfW(Nhdr) *)((const char *)(Note) + NoteOffset);
}
- return TotalBinaryIdsSize;
+ return BinaryIdsSize;
}
/*
@@ -174,23 +161,48 @@ COMPILER_RT_VISIBILITY int __llvm_write_binary_ids(ProfDataWriter *Writer) {
const ElfW(Phdr) *ProgramHeader =
(const ElfW(Phdr) *)((uintptr_t)ElfHeader + ElfHeader->e_phoff);
+ int TotalBinaryIdsSize = 0;
uint32_t I;
/* Iterate through entries in the program header. */
for (I = 0; I < ElfHeader->e_phnum; I++) {
- /* Look for the notes section in program header entries. */
+ /* Look for the notes segment in program header entries. */
if (ProgramHeader[I].p_type != PT_NOTE)
continue;
- const ElfW(Nhdr) *Note =
- (const ElfW(Nhdr) *)((uintptr_t)ElfHeader + ProgramHeader[I].p_offset);
- const ElfW(Nhdr) *NotesEnd =
- (const ElfW(Nhdr) *)((const char *)(Note) + ProgramHeader[I].p_filesz);
- return WriteBinaryIds(Writer, Note, NotesEnd);
+ /* There can be multiple notes segment, and examine each of them. */
+ const ElfW(Nhdr) * Note;
+ const ElfW(Nhdr) * NotesEnd;
+ /*
+ * When examining notes in file, use p_offset, which is the offset within
+ * the elf file, to find the start of notes.
+ */
+ if (ProgramHeader[I].p_memsz == 0 ||
+ ProgramHeader[I].p_memsz == ProgramHeader[I].p_filesz) {
+ Note = (const ElfW(Nhdr) *)((uintptr_t)ElfHeader +
+ ProgramHeader[I].p_offset);
+ NotesEnd = (const ElfW(Nhdr) *)((const char *)(Note) +
+ ProgramHeader[I].p_filesz);
+ } else {
+ /*
+ * When examining notes in memory, use p_vaddr, which is the address of
+ * section after loaded to memory, to find the start of notes.
+ */
+ Note =
+ (const ElfW(Nhdr) *)((uintptr_t)ElfHeader + ProgramHeader[I].p_vaddr);
+ NotesEnd =
+ (const ElfW(Nhdr) *)((const char *)(Note) + ProgramHeader[I].p_memsz);
+ }
+
+ int BinaryIdsSize = WriteBinaryIds(Writer, Note, NotesEnd);
+ if (TotalBinaryIdsSize == -1)
+ return -1;
+
+ TotalBinaryIdsSize += BinaryIdsSize;
}
- return 0;
+ return TotalBinaryIdsSize;
}
-#else /* !NT_GNU_BUILD_ID */
+#elif !defined(_AIX) /* !NT_GNU_BUILD_ID */
/*
* Fallback implementation for targets that don't support the GNU
* extensions NT_GNU_BUILD_ID and __ehdr_start.
diff --git a/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingPlatformOther.c b/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingPlatformOther.c
index 0e59148e2044..5319ca813b43 100644
--- a/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingPlatformOther.c
+++ b/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingPlatformOther.c
@@ -7,8 +7,8 @@
\*===----------------------------------------------------------------------===*/
#if !defined(__APPLE__) && !defined(__linux__) && !defined(__FreeBSD__) && \
- !(defined(__sun__) && defined(__svr4__)) && !defined(__NetBSD__) && \
- !defined(_WIN32)
+ !defined(__Fuchsia__) && !(defined(__sun__) && defined(__svr4__)) && \
+ !defined(__NetBSD__) && !defined(_WIN32) && !defined(_AIX)
#include <stdlib.h>
#include <stdio.h>
@@ -20,8 +20,8 @@ static const __llvm_profile_data *DataFirst = NULL;
static const __llvm_profile_data *DataLast = NULL;
static const char *NamesFirst = NULL;
static const char *NamesLast = NULL;
-static uint64_t *CountersFirst = NULL;
-static uint64_t *CountersLast = NULL;
+static char *CountersFirst = NULL;
+static char *CountersLast = NULL;
static uint32_t *OrderFileFirst = NULL;
static const void *getMinAddr(const void *A1, const void *A2) {
@@ -46,17 +46,21 @@ void __llvm_profile_register_function(void *Data_) {
if (!DataFirst) {
DataFirst = Data;
DataLast = Data + 1;
- CountersFirst = Data->CounterPtr;
- CountersLast = (uint64_t *)Data->CounterPtr + Data->NumCounters;
+ CountersFirst = (char *)((uintptr_t)Data_ + Data->CounterPtr);
+ CountersLast =
+ CountersFirst + Data->NumCounters * __llvm_profile_counter_entry_size();
return;
}
DataFirst = (const __llvm_profile_data *)getMinAddr(DataFirst, Data);
- CountersFirst = (uint64_t *)getMinAddr(CountersFirst, Data->CounterPtr);
+ CountersFirst = (char *)getMinAddr(
+ CountersFirst, (char *)((uintptr_t)Data_ + Data->CounterPtr));
DataLast = (const __llvm_profile_data *)getMaxAddr(DataLast, Data + 1);
- CountersLast = (uint64_t *)getMaxAddr(
- CountersLast, (uint64_t *)Data->CounterPtr + Data->NumCounters);
+ CountersLast = (char *)getMaxAddr(
+ CountersLast,
+ (char *)((uintptr_t)Data_ + Data->CounterPtr) +
+ Data->NumCounters * __llvm_profile_counter_entry_size());
}
COMPILER_RT_VISIBILITY
@@ -81,9 +85,13 @@ const char *__llvm_profile_begin_names(void) { return NamesFirst; }
COMPILER_RT_VISIBILITY
const char *__llvm_profile_end_names(void) { return NamesLast; }
COMPILER_RT_VISIBILITY
-uint64_t *__llvm_profile_begin_counters(void) { return CountersFirst; }
+char *__llvm_profile_begin_counters(void) { return CountersFirst; }
COMPILER_RT_VISIBILITY
-uint64_t *__llvm_profile_end_counters(void) { return CountersLast; }
+char *__llvm_profile_end_counters(void) { return CountersLast; }
+COMPILER_RT_VISIBILITY
+char *__llvm_profile_begin_bitmap(void) { return BitmapFirst; }
+COMPILER_RT_VISIBILITY
+char *__llvm_profile_end_bitmap(void) { return BitmapLast; }
/* TODO: correctly set up OrderFileFirst. */
COMPILER_RT_VISIBILITY
uint32_t *__llvm_profile_begin_orderfile(void) { return OrderFileFirst; }
diff --git a/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingPlatformWindows.c b/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingPlatformWindows.c
index a0192ced4f26..0751b28f81d0 100644
--- a/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingPlatformWindows.c
+++ b/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingPlatformWindows.c
@@ -13,12 +13,14 @@
#if defined(_MSC_VER)
/* Merge read-write sections into .data. */
-#pragma comment(linker, "/MERGE:.lprfc=.data")
+#pragma comment(linker, "/MERGE:.lprfb=.data")
#pragma comment(linker, "/MERGE:.lprfd=.data")
#pragma comment(linker, "/MERGE:.lprfv=.data")
#pragma comment(linker, "/MERGE:.lprfnd=.data")
/* Do *NOT* merge .lprfn and .lcovmap into .rdata. llvm-cov must be able to find
* after the fact.
+ * Do *NOT* merge .lprfc .rdata. When binary profile correlation is enabled,
+ * llvm-cov must be able to find after the fact.
*/
/* Allocate read-only section bounds. */
@@ -30,6 +32,8 @@
#pragma section(".lprfd$Z", read, write)
#pragma section(".lprfc$A", read, write)
#pragma section(".lprfc$Z", read, write)
+#pragma section(".lprfb$A", read, write)
+#pragma section(".lprfb$Z", read, write)
#pragma section(".lorderfile$A", read, write)
#pragma section(".lprfnd$A", read, write)
#pragma section(".lprfnd$Z", read, write)
@@ -41,8 +45,10 @@ __llvm_profile_data COMPILER_RT_SECTION(".lprfd$Z") DataEnd = {0};
const char COMPILER_RT_SECTION(".lprfn$A") NamesStart = '\0';
const char COMPILER_RT_SECTION(".lprfn$Z") NamesEnd = '\0';
-uint64_t COMPILER_RT_SECTION(".lprfc$A") CountersStart;
-uint64_t COMPILER_RT_SECTION(".lprfc$Z") CountersEnd;
+char COMPILER_RT_SECTION(".lprfc$A") CountersStart;
+char COMPILER_RT_SECTION(".lprfc$Z") CountersEnd;
+char COMPILER_RT_SECTION(".lprfb$A") BitmapStart;
+char COMPILER_RT_SECTION(".lprfb$Z") BitmapEnd;
uint32_t COMPILER_RT_SECTION(".lorderfile$A") OrderFileStart;
ValueProfNode COMPILER_RT_SECTION(".lprfnd$A") VNodesStart;
@@ -56,8 +62,10 @@ const __llvm_profile_data *__llvm_profile_end_data(void) { return &DataEnd; }
const char *__llvm_profile_begin_names(void) { return &NamesStart + 1; }
const char *__llvm_profile_end_names(void) { return &NamesEnd; }
-uint64_t *__llvm_profile_begin_counters(void) { return &CountersStart + 1; }
-uint64_t *__llvm_profile_end_counters(void) { return &CountersEnd; }
+char *__llvm_profile_begin_counters(void) { return &CountersStart + 1; }
+char *__llvm_profile_end_counters(void) { return &CountersEnd; }
+char *__llvm_profile_begin_bitmap(void) { return &BitmapStart + 1; }
+char *__llvm_profile_end_bitmap(void) { return &BitmapEnd; }
uint32_t *__llvm_profile_begin_orderfile(void) { return &OrderFileStart; }
ValueProfNode *__llvm_profile_begin_vnodes(void) { return &VNodesStart + 1; }
@@ -66,7 +74,17 @@ ValueProfNode *__llvm_profile_end_vnodes(void) { return &VNodesEnd; }
ValueProfNode *CurrentVNode = &VNodesStart + 1;
ValueProfNode *EndVNode = &VNodesEnd;
+/* lld-link provides __buildid symbol which ponits to the 16 bytes build id when
+ * using /build-id flag. https://lld.llvm.org/windows_support.html#lld-flags */
+#define BUILD_ID_LEN 16
+COMPILER_RT_WEAK uint8_t __buildid[BUILD_ID_LEN];
COMPILER_RT_VISIBILITY int __llvm_write_binary_ids(ProfDataWriter *Writer) {
+ if (*__buildid) {
+ if (Writer &&
+ lprofWriteOneBinaryId(Writer, BUILD_ID_LEN, __buildid, 0) == -1)
+ return -1;
+ return sizeof(uint64_t) + BUILD_ID_LEN;
+ }
return 0;
}
diff --git a/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingRuntime.cpp b/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingRuntime.cpp
index 4ea2bb263f5a..6b2ce9700173 100644
--- a/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingRuntime.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingRuntime.cpp
@@ -10,19 +10,15 @@ extern "C" {
#include "InstrProfiling.h"
-/* int __llvm_profile_runtime */
-COMPILER_RT_VISIBILITY int INSTR_PROF_PROFILE_RUNTIME_VAR;
+static int RegisterRuntime() {
+ __llvm_profile_initialize();
+#ifdef _AIX
+ extern COMPILER_RT_VISIBILITY void *__llvm_profile_keep[];
+ (void)*(void *volatile *)__llvm_profile_keep;
+#endif
+ return 0;
}
-namespace {
-
-class RegisterRuntime {
-public:
- RegisterRuntime() {
- __llvm_profile_initialize();
- }
-};
-
-RegisterRuntime Registration;
-
+/* int __llvm_profile_runtime */
+COMPILER_RT_VISIBILITY int INSTR_PROF_PROFILE_RUNTIME_VAR = RegisterRuntime();
}
diff --git a/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingUtil.c b/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingUtil.c
index 4fa792b72eac..cd18cba3e268 100644
--- a/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingUtil.c
+++ b/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingUtil.c
@@ -34,13 +34,19 @@
#endif
#if defined(__Fuchsia__)
+#include <zircon/process.h>
#include <zircon/syscalls.h>
#endif
+#if defined(__FreeBSD__)
+#include <signal.h>
+#include <sys/procctl.h>
+#endif
+
#include "InstrProfiling.h"
#include "InstrProfilingUtil.h"
-COMPILER_RT_WEAK unsigned lprofDirMode = 0755;
+COMPILER_RT_VISIBILITY unsigned lprofDirMode = 0755;
COMPILER_RT_VISIBILITY
void __llvm_profile_recursive_mkdir(char *path) {
@@ -318,26 +324,39 @@ COMPILER_RT_VISIBILITY const char *lprofFindLastDirSeparator(const char *Path) {
return Sep;
}
-COMPILER_RT_VISIBILITY int lprofSuspendSigKill() {
+COMPILER_RT_VISIBILITY int lprofSuspendSigKill(void) {
#if defined(__linux__)
int PDeachSig = 0;
/* Temporarily suspend getting SIGKILL upon exit of the parent process. */
if (prctl(PR_GET_PDEATHSIG, &PDeachSig) == 0 && PDeachSig == SIGKILL)
prctl(PR_SET_PDEATHSIG, 0);
return (PDeachSig == SIGKILL);
+#elif defined(__FreeBSD__)
+ int PDeachSig = 0, PDisableSig = 0;
+ if (procctl(P_PID, 0, PROC_PDEATHSIG_STATUS, &PDeachSig) == 0 &&
+ PDeachSig == SIGKILL)
+ procctl(P_PID, 0, PROC_PDEATHSIG_CTL, &PDisableSig);
+ return (PDeachSig == SIGKILL);
#else
return 0;
#endif
}
-COMPILER_RT_VISIBILITY void lprofRestoreSigKill() {
+COMPILER_RT_VISIBILITY void lprofRestoreSigKill(void) {
#if defined(__linux__)
prctl(PR_SET_PDEATHSIG, SIGKILL);
+#elif defined(__FreeBSD__)
+ int PEnableSig = SIGKILL;
+ procctl(P_PID, 0, PROC_PDEATHSIG_CTL, &PEnableSig);
#endif
}
COMPILER_RT_VISIBILITY int lprofReleaseMemoryPagesToOS(uintptr_t Begin,
uintptr_t End) {
+#if defined(__ve__)
+ // VE doesn't support madvise.
+ return 0;
+#else
size_t PageSize = getpagesize();
uintptr_t BeginAligned = lprofRoundUpTo((uintptr_t)Begin, PageSize);
uintptr_t EndAligned = lprofRoundDownTo((uintptr_t)End, PageSize);
@@ -352,4 +371,5 @@ COMPILER_RT_VISIBILITY int lprofReleaseMemoryPagesToOS(uintptr_t Begin,
#endif
}
return 0;
+#endif
}
diff --git a/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingValue.c b/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingValue.c
index 7f368b9f8d4e..a608d41d39e7 100644
--- a/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingValue.c
+++ b/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingValue.c
@@ -39,7 +39,7 @@ COMPILER_RT_VISIBILITY ValueProfNode
COMPILER_RT_VISIBILITY uint32_t VPMaxNumValsPerSite =
INSTR_PROF_DEFAULT_NUM_VAL_PER_SITE;
-COMPILER_RT_VISIBILITY void lprofSetupValueProfiler() {
+COMPILER_RT_VISIBILITY void lprofSetupValueProfiler(void) {
const char *Str = 0;
Str = getenv("LLVM_VP_MAX_NUM_VALS_PER_SITE");
if (Str && Str[0]) {
@@ -59,7 +59,19 @@ COMPILER_RT_VISIBILITY void lprofSetMaxValsPerSite(uint32_t MaxVals) {
COMPILER_RT_VISIBILITY void
__llvm_profile_set_num_value_sites(__llvm_profile_data *Data,
uint32_t ValueKind, uint16_t NumValueSites) {
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#elif defined(__clang__)
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wcast-qual"
+#endif
*((uint16_t *)&Data->NumValueSites[ValueKind]) = NumValueSites;
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#elif defined(__clang__)
+#pragma clang diagnostic pop
+#endif
}
/* This method is only used in value profiler mock testing. */
@@ -253,7 +265,7 @@ __llvm_profile_instrument_memop(uint64_t TargetValue, void *Data,
/*
* A wrapper struct that represents value profile runtime data.
* Like InstrProfRecord class which is used by profiling host tools,
- * ValueProfRuntimeRecord also implements the abstract intefaces defined in
+ * ValueProfRuntimeRecord also implements the abstract interfaces defined in
* ValueProfRecordClosure so that the runtime data can be serialized using
* shared C implementation.
*/
@@ -353,6 +365,6 @@ static VPDataReaderType TheVPDataReader = {
getFirstValueProfRecord, getNumValueDataForSiteWrapper,
getValueProfDataSizeWrapper, getNextNValueData};
-COMPILER_RT_VISIBILITY VPDataReaderType *lprofGetVPDataReader() {
+COMPILER_RT_VISIBILITY VPDataReaderType *lprofGetVPDataReader(void) {
return &TheVPDataReader;
}
diff --git a/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingVersionVar.c b/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingVersionVar.c
index a6f222150794..21400bfb2caa 100644
--- a/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingVersionVar.c
+++ b/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingVersionVar.c
@@ -14,4 +14,5 @@
* user has not specified one. Set this up by moving the runtime's copy of this
* symbol to an object file within the archive.
*/
-COMPILER_RT_WEAK uint64_t INSTR_PROF_RAW_VERSION_VAR = INSTR_PROF_RAW_VERSION;
+COMPILER_RT_VISIBILITY COMPILER_RT_WEAK uint64_t INSTR_PROF_RAW_VERSION_VAR =
+ INSTR_PROF_RAW_VERSION;
diff --git a/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingWriter.c b/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingWriter.c
index 25f630293227..4d767d138514 100644
--- a/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingWriter.c
+++ b/contrib/llvm-project/compiler-rt/lib/profile/InstrProfilingWriter.c
@@ -32,7 +32,7 @@ static uint32_t VPDataArraySize = sizeof(VPDataArray) / sizeof(*VPDataArray);
COMPILER_RT_VISIBILITY uint8_t *DynamicBufferIOBuffer = 0;
COMPILER_RT_VISIBILITY uint32_t VPBufferSize = 0;
-/* The buffer writer is reponsponsible in keeping writer state
+/* The buffer writer is responsible in keeping writer state
* across the call.
*/
COMPILER_RT_VISIBILITY uint32_t lprofBufferWriter(ProfDataWriter *This,
@@ -244,44 +244,66 @@ COMPILER_RT_VISIBILITY int lprofWriteData(ProfDataWriter *Writer,
/* Match logic in __llvm_profile_write_buffer(). */
const __llvm_profile_data *DataBegin = __llvm_profile_begin_data();
const __llvm_profile_data *DataEnd = __llvm_profile_end_data();
- const uint64_t *CountersBegin = __llvm_profile_begin_counters();
- const uint64_t *CountersEnd = __llvm_profile_end_counters();
+ const char *CountersBegin = __llvm_profile_begin_counters();
+ const char *CountersEnd = __llvm_profile_end_counters();
+ const char *BitmapBegin = __llvm_profile_begin_bitmap();
+ const char *BitmapEnd = __llvm_profile_end_bitmap();
const char *NamesBegin = __llvm_profile_begin_names();
const char *NamesEnd = __llvm_profile_end_names();
return lprofWriteDataImpl(Writer, DataBegin, DataEnd, CountersBegin,
- CountersEnd, VPDataReader, NamesBegin, NamesEnd,
- SkipNameDataWrite);
+ CountersEnd, BitmapBegin, BitmapEnd, VPDataReader,
+ NamesBegin, NamesEnd, SkipNameDataWrite);
}
COMPILER_RT_VISIBILITY int
lprofWriteDataImpl(ProfDataWriter *Writer, const __llvm_profile_data *DataBegin,
const __llvm_profile_data *DataEnd,
- const uint64_t *CountersBegin, const uint64_t *CountersEnd,
+ const char *CountersBegin, const char *CountersEnd,
+ const char *BitmapBegin, const char *BitmapEnd,
VPDataReaderType *VPDataReader, const char *NamesBegin,
const char *NamesEnd, int SkipNameDataWrite) {
-
/* Calculate size of sections. */
- const uint64_t DataSize = __llvm_profile_get_data_size(DataBegin, DataEnd);
- const uint64_t CountersSize = CountersEnd - CountersBegin;
- const uint64_t NamesSize = NamesEnd - NamesBegin;
+ const uint64_t DataSectionSize =
+ __llvm_profile_get_data_size(DataBegin, DataEnd);
+ const uint64_t NumData = __llvm_profile_get_num_data(DataBegin, DataEnd);
+ const uint64_t CountersSectionSize =
+ __llvm_profile_get_counters_size(CountersBegin, CountersEnd);
+ const uint64_t NumCounters =
+ __llvm_profile_get_num_counters(CountersBegin, CountersEnd);
+ const uint64_t NumBitmapBytes =
+ __llvm_profile_get_num_bitmap_bytes(BitmapBegin, BitmapEnd);
+ const uint64_t NamesSize = __llvm_profile_get_name_size(NamesBegin, NamesEnd);
/* Create the header. */
__llvm_profile_header Header;
- if (!DataSize)
- return 0;
-
/* Determine how much padding is needed before/after the counters and after
* the names. */
uint64_t PaddingBytesBeforeCounters, PaddingBytesAfterCounters,
- PaddingBytesAfterNames;
+ PaddingBytesAfterNames, PaddingBytesAfterBitmapBytes;
__llvm_profile_get_padding_sizes_for_counters(
- DataSize, CountersSize, NamesSize, &PaddingBytesBeforeCounters,
- &PaddingBytesAfterCounters, &PaddingBytesAfterNames);
+ DataSectionSize, CountersSectionSize, NumBitmapBytes, NamesSize,
+ &PaddingBytesBeforeCounters, &PaddingBytesAfterCounters,
+ &PaddingBytesAfterBitmapBytes, &PaddingBytesAfterNames);
+ {
/* Initialize header structure. */
#define INSTR_PROF_RAW_HEADER(Type, Name, Init) Header.Name = Init;
#include "profile/InstrProfData.inc"
+ }
+
+ /* On WIN64, label differences are truncated 32-bit values. Truncate
+ * CountersDelta to match. */
+#ifdef _WIN64
+ Header.CountersDelta = (uint32_t)Header.CountersDelta;
+ Header.BitmapDelta = (uint32_t)Header.BitmapDelta;
+#endif
+
+ /* The data and names sections are omitted in lightweight mode. */
+ if (NumData == 0 && NamesSize == 0) {
+ Header.CountersDelta = 0;
+ Header.NamesDelta = 0;
+ }
/* Write the profile header. */
ProfDataIOVec IOVec[] = {{&Header, sizeof(__llvm_profile_header), 1, 0}};
@@ -294,18 +316,43 @@ lprofWriteDataImpl(ProfDataWriter *Writer, const __llvm_profile_data *DataBegin,
/* Write the profile data. */
ProfDataIOVec IOVecData[] = {
- {DataBegin, sizeof(__llvm_profile_data), DataSize, 0},
+ {DataBegin, sizeof(uint8_t), DataSectionSize, 0},
{NULL, sizeof(uint8_t), PaddingBytesBeforeCounters, 1},
- {CountersBegin, sizeof(uint64_t), CountersSize, 0},
+ {CountersBegin, sizeof(uint8_t), CountersSectionSize, 0},
{NULL, sizeof(uint8_t), PaddingBytesAfterCounters, 1},
+ {BitmapBegin, sizeof(uint8_t), NumBitmapBytes, 0},
+ {NULL, sizeof(uint8_t), PaddingBytesAfterBitmapBytes, 1},
{SkipNameDataWrite ? NULL : NamesBegin, sizeof(uint8_t), NamesSize, 0},
{NULL, sizeof(uint8_t), PaddingBytesAfterNames, 1}};
if (Writer->Write(Writer, IOVecData, sizeof(IOVecData) / sizeof(*IOVecData)))
return -1;
- /* Value profiling is not yet supported in continuous mode. */
- if (__llvm_profile_is_continuous_mode_enabled())
+ /* Value profiling is not yet supported in continuous mode and profile
+ * correlation mode. */
+ if (__llvm_profile_is_continuous_mode_enabled() ||
+ (NumData == 0 && NamesSize == 0))
return 0;
return writeValueProfData(Writer, VPDataReader, DataBegin, DataEnd);
}
+
+/*
+ * Write binary id length and then its data, because binary id does not
+ * have a fixed length.
+ */
+COMPILER_RT_VISIBILITY
+int lprofWriteOneBinaryId(ProfDataWriter *Writer, uint64_t BinaryIdLen,
+ const uint8_t *BinaryIdData,
+ uint64_t BinaryIdPadding) {
+ ProfDataIOVec BinaryIdIOVec[] = {
+ {&BinaryIdLen, sizeof(uint64_t), 1, 0},
+ {BinaryIdData, sizeof(uint8_t), BinaryIdLen, 0},
+ {NULL, sizeof(uint8_t), BinaryIdPadding, 1},
+ };
+ if (Writer->Write(Writer, BinaryIdIOVec,
+ sizeof(BinaryIdIOVec) / sizeof(*BinaryIdIOVec)))
+ return -1;
+
+ /* Successfully wrote binary id, report success. */
+ return 0;
+}
diff --git a/contrib/llvm-project/compiler-rt/lib/profile/WindowsMMap.c b/contrib/llvm-project/compiler-rt/lib/profile/WindowsMMap.c
index 07c0a689feae..9d7da835b1ed 100644
--- a/contrib/llvm-project/compiler-rt/lib/profile/WindowsMMap.c
+++ b/contrib/llvm-project/compiler-rt/lib/profile/WindowsMMap.c
@@ -124,8 +124,7 @@ int madvise(void *addr, size_t length, int advice)
return 0;
}
-COMPILER_RT_VISIBILITY
-int lock(HANDLE handle, DWORD lockType, BOOL blocking) {
+static int lock(HANDLE handle, DWORD lockType, BOOL blocking) {
DWORD flags = lockType;
if (!blocking)
flags |= LOCKFILE_FAIL_IMMEDIATELY;
diff --git a/contrib/llvm-project/compiler-rt/lib/profile/WindowsMMap.h b/contrib/llvm-project/compiler-rt/lib/profile/WindowsMMap.h
index 68b8de2398d6..1df1a0be0b02 100644
--- a/contrib/llvm-project/compiler-rt/lib/profile/WindowsMMap.h
+++ b/contrib/llvm-project/compiler-rt/lib/profile/WindowsMMap.h
@@ -60,6 +60,12 @@
# define DWORD_LO(x) (x)
#endif
+#define mmap __llvm_profile_mmap
+#define munmap __llvm_profile_munmap
+#define msync __llvm_profile_msync
+#define madvise __llvm_profile_madvise
+#define flock __llvm_profile_flock
+
void *mmap(void *start, size_t length, int prot, int flags, int fd,
off_t offset);
diff --git a/contrib/llvm-project/compiler-rt/lib/safestack/safestack_platform.h b/contrib/llvm-project/compiler-rt/lib/safestack/safestack_platform.h
index 81e4c2645ce2..2b1fc139baa9 100644
--- a/contrib/llvm-project/compiler-rt/lib/safestack/safestack_platform.h
+++ b/contrib/llvm-project/compiler-rt/lib/safestack/safestack_platform.h
@@ -94,7 +94,7 @@ inline void *Mmap(void *addr, size_t length, int prot, int flags, int fd,
off_t offset) {
#if SANITIZER_NETBSD
return __mmap(addr, length, prot, flags, fd, 0, offset);
-#elif defined(__x86_64__) && (SANITIZER_FREEBSD)
+#elif SANITIZER_FREEBSD && (defined(__aarch64__) || defined(__x86_64__))
return (void *)__syscall(SYS_mmap, addr, length, prot, flags, fd, offset);
#else
return (void *)syscall(SYS_mmap, addr, length, prot, flags, fd, offset);
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sancov_flags.inc b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sancov_flags.inc
index cca33fc359f4..de9ede217fc3 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sancov_flags.inc
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sancov_flags.inc
@@ -14,7 +14,7 @@
#endif
SANCOV_FLAG(bool, symbolize, true,
- "If set, converage information will be symbolized by sancov tool "
+ "If set, coverage information will be symbolized by sancov tool "
"after dumping.")
SANCOV_FLAG(bool, help, false, "Print flags help.")
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_addrhashmap.h b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_addrhashmap.h
index 15f81a04350f..fe48b9caf067 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_addrhashmap.h
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_addrhashmap.h
@@ -39,6 +39,11 @@ namespace __sanitizer {
// the current thread has exclusive access to the data
// if !h.exists() then the element never existed
// }
+// {
+// Map::Handle h(&m, addr, false, true);
+// this will create a new element or return a handle to an existing element
+// if !h.created() this thread does *not* have exclusive access to the data
+// }
template<typename T, uptr kSize>
class AddrHashMap {
private:
@@ -56,7 +61,7 @@ class AddrHashMap {
static const uptr kBucketSize = 3;
struct Bucket {
- RWMutex mtx;
+ Mutex mtx;
atomic_uintptr_t add;
Cell cells[kBucketSize];
};
@@ -89,6 +94,12 @@ class AddrHashMap {
bool create_;
};
+ typedef void (*ForEachCallback)(const uptr key, const T &val, void *arg);
+ // ForEach acquires a lock on each bucket while iterating over
+ // elements. Note that this only ensures that the structure of the hashmap is
+ // unchanged, there may be a data race to the element itself.
+ void ForEach(ForEachCallback cb, void *arg);
+
private:
friend class Handle;
Bucket *table_;
@@ -98,6 +109,33 @@ class AddrHashMap {
uptr calcHash(uptr addr);
};
+template <typename T, uptr kSize>
+void AddrHashMap<T, kSize>::ForEach(ForEachCallback cb, void *arg) {
+ for (uptr n = 0; n < kSize; n++) {
+ Bucket *bucket = &table_[n];
+
+ ReadLock lock(&bucket->mtx);
+
+ for (uptr i = 0; i < kBucketSize; i++) {
+ Cell *c = &bucket->cells[i];
+ uptr addr1 = atomic_load(&c->addr, memory_order_acquire);
+ if (addr1 != 0)
+ cb(addr1, c->val, arg);
+ }
+
+ // Iterate over any additional cells.
+ if (AddBucket *add =
+ (AddBucket *)atomic_load(&bucket->add, memory_order_acquire)) {
+ for (uptr i = 0; i < add->size; i++) {
+ Cell *c = &add->cells[i];
+ uptr addr1 = atomic_load(&c->addr, memory_order_acquire);
+ if (addr1 != 0)
+ cb(addr1, c->val, arg);
+ }
+ }
+ }
+}
+
template<typename T, uptr kSize>
AddrHashMap<T, kSize>::Handle::Handle(AddrHashMap<T, kSize> *map, uptr addr) {
map_ = map;
@@ -163,7 +201,8 @@ AddrHashMap<T, kSize>::AddrHashMap() {
}
template <typename T, uptr kSize>
-void AddrHashMap<T, kSize>::acquire(Handle *h) NO_THREAD_SAFETY_ANALYSIS {
+void AddrHashMap<T, kSize>::acquire(Handle *h)
+ SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
uptr addr = h->addr_;
uptr hash = calcHash(addr);
Bucket *b = &table_[hash];
@@ -292,7 +331,8 @@ void AddrHashMap<T, kSize>::acquire(Handle *h) NO_THREAD_SAFETY_ANALYSIS {
}
template <typename T, uptr kSize>
- void AddrHashMap<T, kSize>::release(Handle *h) NO_THREAD_SAFETY_ANALYSIS {
+ void AddrHashMap<T, kSize>::release(Handle *h)
+ SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
if (!h->cell_)
return;
Bucket *b = h->bucket_;
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_allocator.cpp b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_allocator.cpp
index bcb7370a7906..0513ae36fbc7 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_allocator.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_allocator.cpp
@@ -17,6 +17,7 @@
#include "sanitizer_allocator_internal.h"
#include "sanitizer_atomic.h"
#include "sanitizer_common.h"
+#include "sanitizer_platform.h"
namespace __sanitizer {
@@ -24,66 +25,6 @@ namespace __sanitizer {
const char *PrimaryAllocatorName = "SizeClassAllocator";
const char *SecondaryAllocatorName = "LargeMmapAllocator";
-// ThreadSanitizer for Go uses libc malloc/free.
-#if defined(SANITIZER_USE_MALLOC)
-# if SANITIZER_LINUX && !SANITIZER_ANDROID
-extern "C" void *__libc_malloc(uptr size);
-# if !SANITIZER_GO
-extern "C" void *__libc_memalign(uptr alignment, uptr size);
-# endif
-extern "C" void *__libc_realloc(void *ptr, uptr size);
-extern "C" void __libc_free(void *ptr);
-# else
-# include <stdlib.h>
-# define __libc_malloc malloc
-# if !SANITIZER_GO
-static void *__libc_memalign(uptr alignment, uptr size) {
- void *p;
- uptr error = posix_memalign(&p, alignment, size);
- if (error) return nullptr;
- return p;
-}
-# endif
-# define __libc_realloc realloc
-# define __libc_free free
-# endif
-
-static void *RawInternalAlloc(uptr size, InternalAllocatorCache *cache,
- uptr alignment) {
- (void)cache;
-#if !SANITIZER_GO
- if (alignment == 0)
- return __libc_malloc(size);
- else
- return __libc_memalign(alignment, size);
-#else
- // Windows does not provide __libc_memalign/posix_memalign. It provides
- // __aligned_malloc, but the allocated blocks can't be passed to free,
- // they need to be passed to __aligned_free. InternalAlloc interface does
- // not account for such requirement. Alignemnt does not seem to be used
- // anywhere in runtime, so just call __libc_malloc for now.
- DCHECK_EQ(alignment, 0);
- return __libc_malloc(size);
-#endif
-}
-
-static void *RawInternalRealloc(void *ptr, uptr size,
- InternalAllocatorCache *cache) {
- (void)cache;
- return __libc_realloc(ptr, size);
-}
-
-static void RawInternalFree(void *ptr, InternalAllocatorCache *cache) {
- (void)cache;
- __libc_free(ptr);
-}
-
-InternalAllocator *internal_allocator() {
- return 0;
-}
-
-#else // SANITIZER_GO || defined(SANITIZER_USE_MALLOC)
-
static ALIGNED(64) char internal_alloc_placeholder[sizeof(InternalAllocator)];
static atomic_uint8_t internal_allocator_initialized;
static StaticSpinMutex internal_alloc_init_mu;
@@ -135,8 +76,6 @@ static void RawInternalFree(void *ptr, InternalAllocatorCache *cache) {
internal_allocator()->Deallocate(cache, ptr);
}
-#endif // SANITIZER_GO || defined(SANITIZER_USE_MALLOC)
-
static void NORETURN ReportInternalAllocatorOutOfMemory(uptr requested_size) {
SetAllocatorOutOfMemory();
Report("FATAL: %s: internal allocator is out of memory trying to allocate "
@@ -187,22 +126,36 @@ void InternalFree(void *addr, InternalAllocatorCache *cache) {
RawInternalFree(addr, cache);
}
+void InternalAllocatorLock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
+ internal_allocator_cache_mu.Lock();
+ internal_allocator()->ForceLock();
+}
+
+void InternalAllocatorUnlock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
+ internal_allocator()->ForceUnlock();
+ internal_allocator_cache_mu.Unlock();
+}
+
// LowLevelAllocator
constexpr uptr kLowLevelAllocatorDefaultAlignment = 8;
+constexpr uptr kMinNumPagesRounded = 16;
+constexpr uptr kMinRoundedSize = 65536;
static uptr low_level_alloc_min_alignment = kLowLevelAllocatorDefaultAlignment;
static LowLevelAllocateCallback low_level_alloc_callback;
+static LowLevelAllocator Alloc;
+LowLevelAllocator &GetGlobalLowLevelAllocator() { return Alloc; }
+
void *LowLevelAllocator::Allocate(uptr size) {
// Align allocation size.
size = RoundUpTo(size, low_level_alloc_min_alignment);
if (allocated_end_ - allocated_current_ < (sptr)size) {
- uptr size_to_allocate = RoundUpTo(size, GetPageSizeCached());
- allocated_current_ =
- (char*)MmapOrDie(size_to_allocate, __func__);
+ uptr size_to_allocate = RoundUpTo(
+ size, Min(GetPageSizeCached() * kMinNumPagesRounded, kMinRoundedSize));
+ allocated_current_ = (char *)MmapOrDie(size_to_allocate, __func__);
allocated_end_ = allocated_current_ + size_to_allocate;
if (low_level_alloc_callback) {
- low_level_alloc_callback((uptr)allocated_current_,
- size_to_allocate);
+ low_level_alloc_callback((uptr)allocated_current_, size_to_allocate);
}
}
CHECK(allocated_end_ - allocated_current_ >= (sptr)size);
@@ -247,4 +200,14 @@ void PrintHintAllocatorCannotReturnNull() {
"allocator_may_return_null=1\n");
}
+static atomic_uint8_t rss_limit_exceeded;
+
+bool IsRssLimitExceeded() {
+ return atomic_load(&rss_limit_exceeded, memory_order_relaxed);
+}
+
+void SetRssLimitExceeded(bool limit_exceeded) {
+ atomic_store(&rss_limit_exceeded, limit_exceeded, memory_order_relaxed);
+}
+
} // namespace __sanitizer
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_allocator.h b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_allocator.h
index 5ec47416fe0c..0b28f86d1408 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_allocator.h
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_allocator.h
@@ -14,6 +14,7 @@
#define SANITIZER_ALLOCATOR_H
#include "sanitizer_common.h"
+#include "sanitizer_flat_map.h"
#include "sanitizer_internal_defs.h"
#include "sanitizer_lfstack.h"
#include "sanitizer_libc.h"
@@ -43,12 +44,6 @@ void SetAllocatorOutOfMemory();
void PrintHintAllocatorCannotReturnNull();
-// Allocators call these callbacks on mmap/munmap.
-struct NoOpMapUnmapCallback {
- void OnMap(uptr p, uptr size) const { }
- void OnUnmap(uptr p, uptr size) const { }
-};
-
// Callback type for iterating over chunks.
typedef void (*ForEachChunkCallback)(uptr chunk, void *arg);
@@ -67,15 +62,24 @@ inline void RandomShuffle(T *a, u32 n, u32 *rand_state) {
*rand_state = state;
}
+struct NoOpMapUnmapCallback {
+ void OnMap(uptr p, uptr size) const {}
+ void OnMapSecondary(uptr p, uptr size, uptr user_begin,
+ uptr user_size) const {}
+ void OnUnmap(uptr p, uptr size) const {}
+};
+
#include "sanitizer_allocator_size_class_map.h"
#include "sanitizer_allocator_stats.h"
#include "sanitizer_allocator_primary64.h"
-#include "sanitizer_allocator_bytemap.h"
#include "sanitizer_allocator_primary32.h"
#include "sanitizer_allocator_local_cache.h"
#include "sanitizer_allocator_secondary.h"
#include "sanitizer_allocator_combined.h"
+bool IsRssLimitExceeded();
+void SetRssLimitExceeded(bool limit_exceeded);
+
} // namespace __sanitizer
#endif // SANITIZER_ALLOCATOR_H
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_allocator_bytemap.h b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_allocator_bytemap.h
deleted file mode 100644
index 0084bb62c83c..000000000000
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_allocator_bytemap.h
+++ /dev/null
@@ -1,107 +0,0 @@
-//===-- sanitizer_allocator_bytemap.h ---------------------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-//
-// Part of the Sanitizer Allocator.
-//
-//===----------------------------------------------------------------------===//
-#ifndef SANITIZER_ALLOCATOR_H
-#error This file must be included inside sanitizer_allocator.h
-#endif
-
-// Maps integers in rage [0, kSize) to u8 values.
-template <u64 kSize, typename AddressSpaceViewTy = LocalAddressSpaceView>
-class FlatByteMap {
- public:
- using AddressSpaceView = AddressSpaceViewTy;
- void Init() {
- internal_memset(map_, 0, sizeof(map_));
- }
-
- void set(uptr idx, u8 val) {
- CHECK_LT(idx, kSize);
- CHECK_EQ(0U, map_[idx]);
- map_[idx] = val;
- }
- u8 operator[] (uptr idx) {
- CHECK_LT(idx, kSize);
- // FIXME: CHECK may be too expensive here.
- return map_[idx];
- }
- private:
- u8 map_[kSize];
-};
-
-// TwoLevelByteMap maps integers in range [0, kSize1*kSize2) to u8 values.
-// It is implemented as a two-dimensional array: array of kSize1 pointers
-// to kSize2-byte arrays. The secondary arrays are mmaped on demand.
-// Each value is initially zero and can be set to something else only once.
-// Setting and getting values from multiple threads is safe w/o extra locking.
-template <u64 kSize1, u64 kSize2,
- typename AddressSpaceViewTy = LocalAddressSpaceView,
- class MapUnmapCallback = NoOpMapUnmapCallback>
-class TwoLevelByteMap {
- public:
- using AddressSpaceView = AddressSpaceViewTy;
- void Init() {
- internal_memset(map1_, 0, sizeof(map1_));
- mu_.Init();
- }
-
- void TestOnlyUnmap() {
- for (uptr i = 0; i < kSize1; i++) {
- u8 *p = Get(i);
- if (!p) continue;
- MapUnmapCallback().OnUnmap(reinterpret_cast<uptr>(p), kSize2);
- UnmapOrDie(p, kSize2);
- }
- }
-
- uptr size() const { return kSize1 * kSize2; }
- uptr size1() const { return kSize1; }
- uptr size2() const { return kSize2; }
-
- void set(uptr idx, u8 val) {
- CHECK_LT(idx, kSize1 * kSize2);
- u8 *map2 = GetOrCreate(idx / kSize2);
- CHECK_EQ(0U, map2[idx % kSize2]);
- map2[idx % kSize2] = val;
- }
-
- u8 operator[] (uptr idx) const {
- CHECK_LT(idx, kSize1 * kSize2);
- u8 *map2 = Get(idx / kSize2);
- if (!map2) return 0;
- auto value_ptr = AddressSpaceView::Load(&map2[idx % kSize2]);
- return *value_ptr;
- }
-
- private:
- u8 *Get(uptr idx) const {
- CHECK_LT(idx, kSize1);
- return reinterpret_cast<u8 *>(
- atomic_load(&map1_[idx], memory_order_acquire));
- }
-
- u8 *GetOrCreate(uptr idx) {
- u8 *res = Get(idx);
- if (!res) {
- SpinMutexLock l(&mu_);
- if (!(res = Get(idx))) {
- res = (u8*)MmapOrDie(kSize2, "TwoLevelByteMap");
- MapUnmapCallback().OnMap(reinterpret_cast<uptr>(res), kSize2);
- atomic_store(&map1_[idx], reinterpret_cast<uptr>(res),
- memory_order_release);
- }
- }
- return res;
- }
-
- atomic_uintptr_t map1_[kSize1];
- StaticSpinMutex mu_;
-};
-
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_allocator_combined.h b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_allocator_combined.h
index 0e81e6764f9a..49940d9b5d50 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_allocator_combined.h
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_allocator_combined.h
@@ -29,9 +29,9 @@ class CombinedAllocator {
LargeMmapAllocatorPtrArray,
typename PrimaryAllocator::AddressSpaceView>;
- void InitLinkerInitialized(s32 release_to_os_interval_ms) {
- stats_.InitLinkerInitialized();
- primary_.Init(release_to_os_interval_ms);
+ void InitLinkerInitialized(s32 release_to_os_interval_ms,
+ uptr heap_start = 0) {
+ primary_.Init(release_to_os_interval_ms, heap_start);
secondary_.InitLinkerInitialized();
}
@@ -112,15 +112,13 @@ class CombinedAllocator {
return new_p;
}
- bool PointerIsMine(void *p) {
+ bool PointerIsMine(const void *p) const {
if (primary_.PointerIsMine(p))
return true;
return secondary_.PointerIsMine(p);
}
- bool FromPrimary(void *p) {
- return primary_.PointerIsMine(p);
- }
+ bool FromPrimary(const void *p) const { return primary_.PointerIsMine(p); }
void *GetMetaData(const void *p) {
if (primary_.PointerIsMine(p))
@@ -136,7 +134,7 @@ class CombinedAllocator {
// This function does the same as GetBlockBegin, but is much faster.
// Must be called with the allocator locked.
- void *GetBlockBeginFastLocked(void *p) {
+ void *GetBlockBeginFastLocked(const void *p) {
if (primary_.PointerIsMine(p))
return primary_.GetBlockBegin(p);
return secondary_.GetBlockBeginFastLocked(p);
@@ -177,12 +175,12 @@ class CombinedAllocator {
// ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
// introspection API.
- void ForceLock() NO_THREAD_SAFETY_ANALYSIS {
+ void ForceLock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
primary_.ForceLock();
secondary_.ForceLock();
}
- void ForceUnlock() NO_THREAD_SAFETY_ANALYSIS {
+ void ForceUnlock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
secondary_.ForceUnlock();
primary_.ForceUnlock();
}
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_allocator_dlsym.h b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_allocator_dlsym.h
new file mode 100644
index 000000000000..92b1373ef84d
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_allocator_dlsym.h
@@ -0,0 +1,79 @@
+//===-- sanitizer_allocator_dlsym.h -----------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Hack: Sanitizer initializer calls dlsym which may need to allocate and call
+// back into uninitialized sanitizer.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_ALLOCATOR_DLSYM_H
+#define SANITIZER_ALLOCATOR_DLSYM_H
+
+#include "sanitizer_allocator_internal.h"
+
+namespace __sanitizer {
+
+template <typename Details>
+struct DlSymAllocator {
+ static bool Use() {
+ // Fuchsia doesn't use dlsym-based interceptors.
+ return !SANITIZER_FUCHSIA && UNLIKELY(Details::UseImpl());
+ }
+
+ static bool PointerIsMine(const void *ptr) {
+ // Fuchsia doesn't use dlsym-based interceptors.
+ return !SANITIZER_FUCHSIA &&
+ UNLIKELY(internal_allocator()->FromPrimary(ptr));
+ }
+
+ static void *Allocate(uptr size_in_bytes) {
+ void *ptr = InternalAlloc(size_in_bytes, nullptr, kWordSize);
+ CHECK(internal_allocator()->FromPrimary(ptr));
+ Details::OnAllocate(ptr,
+ internal_allocator()->GetActuallyAllocatedSize(ptr));
+ return ptr;
+ }
+
+ static void *Callocate(SIZE_T nmemb, SIZE_T size) {
+ void *ptr = InternalCalloc(nmemb, size);
+ CHECK(internal_allocator()->FromPrimary(ptr));
+ Details::OnAllocate(ptr,
+ internal_allocator()->GetActuallyAllocatedSize(ptr));
+ return ptr;
+ }
+
+ static void Free(void *ptr) {
+ uptr size = internal_allocator()->GetActuallyAllocatedSize(ptr);
+ Details::OnFree(ptr, size);
+ InternalFree(ptr);
+ }
+
+ static void *Realloc(void *ptr, uptr new_size) {
+ if (!ptr)
+ return Allocate(new_size);
+ CHECK(internal_allocator()->FromPrimary(ptr));
+ if (!new_size) {
+ Free(ptr);
+ return nullptr;
+ }
+ uptr size = internal_allocator()->GetActuallyAllocatedSize(ptr);
+ uptr memcpy_size = Min(new_size, size);
+ void *new_ptr = Allocate(new_size);
+ if (new_ptr)
+ internal_memcpy(new_ptr, ptr, memcpy_size);
+ Free(ptr);
+ return new_ptr;
+ }
+
+ static void OnAllocate(const void *ptr, uptr size) {}
+ static void OnFree(const void *ptr, uptr size) {}
+};
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_ALLOCATOR_DLSYM_H
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_allocator_interface.h b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_allocator_interface.h
index c1b27563e2fc..de2b271fb0ed 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_allocator_interface.h
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_allocator_interface.h
@@ -21,8 +21,12 @@ extern "C" {
SANITIZER_INTERFACE_ATTRIBUTE
uptr __sanitizer_get_estimated_allocated_size(uptr size);
SANITIZER_INTERFACE_ATTRIBUTE int __sanitizer_get_ownership(const void *p);
+SANITIZER_INTERFACE_ATTRIBUTE const void *__sanitizer_get_allocated_begin(
+ const void *p);
SANITIZER_INTERFACE_ATTRIBUTE uptr
__sanitizer_get_allocated_size(const void *p);
+SANITIZER_INTERFACE_ATTRIBUTE uptr
+__sanitizer_get_allocated_size_fast(const void *p);
SANITIZER_INTERFACE_ATTRIBUTE uptr __sanitizer_get_current_allocated_bytes();
SANITIZER_INTERFACE_ATTRIBUTE uptr __sanitizer_get_heap_size();
SANITIZER_INTERFACE_ATTRIBUTE uptr __sanitizer_get_free_bytes();
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_allocator_internal.h b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_allocator_internal.h
index 32849036fd04..62523c7ae187 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_allocator_internal.h
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_allocator_internal.h
@@ -48,8 +48,9 @@ void *InternalReallocArray(void *p, uptr count, uptr size,
void *InternalCalloc(uptr count, uptr size,
InternalAllocatorCache *cache = nullptr);
void InternalFree(void *p, InternalAllocatorCache *cache = nullptr);
+void InternalAllocatorLock();
+void InternalAllocatorUnlock();
InternalAllocator *internal_allocator();
-
} // namespace __sanitizer
#endif // SANITIZER_ALLOCATOR_INTERNAL_H
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_allocator_primary32.h b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_allocator_primary32.h
index 38d2a7d117fb..52fe3fe3d15b 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_allocator_primary32.h
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_allocator_primary32.h
@@ -189,7 +189,7 @@ class SizeClassAllocator32 {
sci->free_list.push_front(b);
}
- bool PointerIsMine(const void *p) {
+ bool PointerIsMine(const void *p) const {
uptr mem = reinterpret_cast<uptr>(p);
if (SANITIZER_SIGN_EXTENDED_ADDRESSES)
mem &= (kSpaceSize - 1);
@@ -198,8 +198,9 @@ class SizeClassAllocator32 {
return GetSizeClass(p) != 0;
}
- uptr GetSizeClass(const void *p) {
- return possible_regions[ComputeRegionId(reinterpret_cast<uptr>(p))];
+ uptr GetSizeClass(const void *p) const {
+ uptr id = ComputeRegionId(reinterpret_cast<uptr>(p));
+ return possible_regions.contains(id) ? possible_regions[id] : 0;
}
void *GetBlockBegin(const void *p) {
@@ -237,13 +238,13 @@ class SizeClassAllocator32 {
// ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
// introspection API.
- void ForceLock() NO_THREAD_SAFETY_ANALYSIS {
+ void ForceLock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
for (uptr i = 0; i < kNumClasses; i++) {
GetSizeClassInfo(i)->mutex.Lock();
}
}
- void ForceUnlock() NO_THREAD_SAFETY_ANALYSIS {
+ void ForceUnlock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
for (int i = kNumClasses - 1; i >= 0; i--) {
GetSizeClassInfo(i)->mutex.Unlock();
}
@@ -251,9 +252,9 @@ class SizeClassAllocator32 {
// Iterate over all existing chunks.
// The allocator must be locked when calling this function.
- void ForEachChunk(ForEachChunkCallback callback, void *arg) {
+ void ForEachChunk(ForEachChunkCallback callback, void *arg) const {
for (uptr region = 0; region < kNumPossibleRegions; region++)
- if (possible_regions[region]) {
+ if (possible_regions.contains(region) && possible_regions[region]) {
uptr chunk_size = ClassIdToSize(possible_regions[region]);
uptr max_chunks_in_region = kRegionSize / (chunk_size + kMetadataSize);
uptr region_beg = region * kRegionSize;
@@ -292,9 +293,7 @@ class SizeClassAllocator32 {
return res;
}
- uptr ComputeRegionBeg(uptr mem) {
- return mem & ~(kRegionSize - 1);
- }
+ uptr ComputeRegionBeg(uptr mem) const { return mem & ~(kRegionSize - 1); }
uptr AllocateRegion(AllocatorStats *stat, uptr class_id) {
DCHECK_LT(class_id, kNumClasses);
@@ -305,7 +304,7 @@ class SizeClassAllocator32 {
MapUnmapCallback().OnMap(res, kRegionSize);
stat->Add(AllocatorStatMapped, kRegionSize);
CHECK(IsAligned(res, kRegionSize));
- possible_regions.set(ComputeRegionId(res), static_cast<u8>(class_id));
+ possible_regions[ComputeRegionId(res)] = class_id;
return res;
}
@@ -354,7 +353,7 @@ class SizeClassAllocator32 {
DCHECK_GT(max_count, 0);
TransferBatch *b = nullptr;
constexpr uptr kShuffleArraySize = 48;
- uptr shuffle_array[kShuffleArraySize];
+ UNINITIALIZED uptr shuffle_array[kShuffleArraySize];
uptr count = 0;
for (uptr i = region; i < region + n_chunks * size; i += size) {
shuffle_array[count++] = i;
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_allocator_primary64.h b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_allocator_primary64.h
index b142ee0131b2..34a64f26478f 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_allocator_primary64.h
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_allocator_primary64.h
@@ -161,7 +161,7 @@ class SizeClassAllocator64 {
void ForceReleaseToOS() {
MemoryMapperT memory_mapper(*this);
for (uptr class_id = 1; class_id < kNumClasses; class_id++) {
- BlockingMutexLock l(&GetRegionInfo(class_id)->mutex);
+ Lock l(&GetRegionInfo(class_id)->mutex);
MaybeReleaseToOS(&memory_mapper, class_id, true /*force*/);
}
}
@@ -178,7 +178,7 @@ class SizeClassAllocator64 {
uptr region_beg = GetRegionBeginBySizeClass(class_id);
CompactPtrT *free_array = GetFreeArray(region_beg);
- BlockingMutexLock l(&region->mutex);
+ Lock l(&region->mutex);
uptr old_num_chunks = region->num_freed_chunks;
uptr new_num_freed_chunks = old_num_chunks + n_chunks;
// Failure to allocate free array space while releasing memory is non
@@ -204,7 +204,7 @@ class SizeClassAllocator64 {
uptr region_beg = GetRegionBeginBySizeClass(class_id);
CompactPtrT *free_array = GetFreeArray(region_beg);
- BlockingMutexLock l(&region->mutex);
+ Lock l(&region->mutex);
#if SANITIZER_WINDOWS
/* On Windows unmapping of memory during __sanitizer_purge_allocator is
explicit and immediate, so unmapped regions must be explicitly mapped back
@@ -282,6 +282,8 @@ class SizeClassAllocator64 {
CHECK(kMetadataSize);
uptr class_id = GetSizeClass(p);
uptr size = ClassIdToSize(class_id);
+ if (!size)
+ return nullptr;
uptr chunk_idx = GetChunkIdx(reinterpret_cast<uptr>(p), size);
uptr region_beg = GetRegionBeginBySizeClass(class_id);
return reinterpret_cast<void *>(GetMetadataEnd(region_beg) -
@@ -300,9 +302,8 @@ class SizeClassAllocator64 {
UnmapWithCallbackOrDie((uptr)address_range.base(), address_range.size());
}
- static void FillMemoryProfile(uptr start, uptr rss, bool file, uptr *stats,
- uptr stats_size) {
- for (uptr class_id = 0; class_id < stats_size; class_id++)
+ static void FillMemoryProfile(uptr start, uptr rss, bool file, uptr *stats) {
+ for (uptr class_id = 0; class_id < kNumClasses; class_id++)
if (stats[class_id] == start)
stats[class_id] = rss;
}
@@ -315,7 +316,7 @@ class SizeClassAllocator64 {
Printf(
"%s %02zd (%6zd): mapped: %6zdK allocs: %7zd frees: %7zd inuse: %6zd "
"num_freed_chunks %7zd avail: %6zd rss: %6zdK releases: %6zd "
- "last released: %6zdK region: 0x%zx\n",
+ "last released: %6lldK region: 0x%zx\n",
region->exhausted ? "F" : " ", class_id, ClassIdToSize(class_id),
region->mapped_user >> 10, region->stats.n_allocated,
region->stats.n_freed, in_use, region->num_freed_chunks, avail_chunks,
@@ -328,7 +329,7 @@ class SizeClassAllocator64 {
uptr rss_stats[kNumClasses];
for (uptr class_id = 0; class_id < kNumClasses; class_id++)
rss_stats[class_id] = SpaceBeg() + kRegionSize * class_id;
- GetMemoryProfile(FillMemoryProfile, rss_stats, kNumClasses);
+ GetMemoryProfile(FillMemoryProfile, rss_stats);
uptr total_mapped = 0;
uptr total_rss = 0;
@@ -353,13 +354,13 @@ class SizeClassAllocator64 {
// ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
// introspection API.
- void ForceLock() NO_THREAD_SAFETY_ANALYSIS {
+ void ForceLock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
for (uptr i = 0; i < kNumClasses; i++) {
GetRegionInfo(i)->mutex.Lock();
}
}
- void ForceUnlock() NO_THREAD_SAFETY_ANALYSIS {
+ void ForceUnlock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
for (int i = (int)kNumClasses - 1; i >= 0; i--) {
GetRegionInfo(i)->mutex.Unlock();
}
@@ -623,7 +624,7 @@ class SizeClassAllocator64 {
static const uptr kRegionSize = kSpaceSize / kNumClassesRounded;
// FreeArray is the array of free-d chunks (stored as 4-byte offsets).
- // In the worst case it may reguire kRegionSize/SizeClassMap::kMinSize
+ // In the worst case it may require kRegionSize/SizeClassMap::kMinSize
// elements, but in reality this will not happen. For simplicity we
// dedicate 1/8 of the region's virtual space to FreeArray.
static const uptr kFreeArraySize = kRegionSize / 8;
@@ -634,16 +635,17 @@ class SizeClassAllocator64 {
return kUsingConstantSpaceBeg ? kSpaceBeg : NonConstSpaceBeg;
}
uptr SpaceEnd() const { return SpaceBeg() + kSpaceSize; }
- // kRegionSize must be >= 2^32.
- COMPILER_CHECK((kRegionSize) >= (1ULL << (SANITIZER_WORDSIZE / 2)));
+ // kRegionSize should be able to satisfy the largest size class.
+ static_assert(kRegionSize >= SizeClassMap::kMaxSize,
+ "Region size exceed largest size");
// kRegionSize must be <= 2^36, see CompactPtrT.
COMPILER_CHECK((kRegionSize) <= (1ULL << (SANITIZER_WORDSIZE / 2 + 4)));
// Call mmap for user memory with at least this size.
- static const uptr kUserMapSize = 1 << 16;
+ static const uptr kUserMapSize = 1 << 18;
// Call mmap for metadata memory with at least this size.
static const uptr kMetaMapSize = 1 << 16;
// Call mmap for free array memory with at least this size.
- static const uptr kFreeArrayMapSize = 1 << 16;
+ static const uptr kFreeArrayMapSize = 1 << 18;
atomic_sint32_t release_to_os_interval_ms_;
@@ -665,7 +667,7 @@ class SizeClassAllocator64 {
};
struct ALIGNED(SANITIZER_CACHE_LINE_SIZE) RegionInfo {
- BlockingMutex mutex;
+ Mutex mutex;
uptr num_freed_chunks; // Number of elements in the freearray.
uptr mapped_free_array; // Bytes mapped for freearray.
uptr allocated_user; // Bytes allocated for user memory.
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_allocator_report.cpp b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_allocator_report.cpp
index 1c6520819ef9..129f925e6fb6 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_allocator_report.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_allocator_report.cpp
@@ -128,8 +128,7 @@ void NORETURN ReportAllocationSizeTooBig(uptr user_size, uptr max_size,
void NORETURN ReportOutOfMemory(uptr requested_size, const StackTrace *stack) {
{
ScopedAllocatorErrorReport report("out-of-memory", stack);
- Report("ERROR: %s: allocator is out of memory trying to allocate 0x%zx "
- "bytes\n", SanitizerToolName, requested_size);
+ ERROR_OOM("allocator is trying to allocate 0x%zx bytes\n", requested_size);
}
Die();
}
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_allocator_secondary.h b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_allocator_secondary.h
index dd34fe85cc3a..0607819e7ef7 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_allocator_secondary.h
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_allocator_secondary.h
@@ -82,7 +82,7 @@ class LargeMmapAllocator {
InitLinkerInitialized();
}
- void *Allocate(AllocatorStats *stat, uptr size, uptr alignment) {
+ void *Allocate(AllocatorStats *stat, const uptr size, uptr alignment) {
CHECK(IsPowerOfTwo(alignment));
uptr map_size = RoundUpMapSize(size);
if (alignment > page_size_)
@@ -99,11 +99,11 @@ class LargeMmapAllocator {
if (!map_beg)
return nullptr;
CHECK(IsAligned(map_beg, page_size_));
- MapUnmapCallback().OnMap(map_beg, map_size);
uptr map_end = map_beg + map_size;
uptr res = map_beg + page_size_;
if (res & (alignment - 1)) // Align.
res += alignment - (res & (alignment - 1));
+ MapUnmapCallback().OnMapSecondary(map_beg, map_size, res, size);
CHECK(IsAligned(res, alignment));
CHECK(IsAligned(res, page_size_));
CHECK_GE(res + size, map_beg);
@@ -161,7 +161,7 @@ class LargeMmapAllocator {
return res;
}
- bool PointerIsMine(const void *p) {
+ bool PointerIsMine(const void *p) const {
return GetBlockBegin(p) != nullptr;
}
@@ -179,7 +179,7 @@ class LargeMmapAllocator {
return GetHeader(p) + 1;
}
- void *GetBlockBegin(const void *ptr) {
+ void *GetBlockBegin(const void *ptr) const {
uptr p = reinterpret_cast<uptr>(ptr);
SpinMutexLock l(&mutex_);
uptr nearest_chunk = 0;
@@ -215,7 +215,7 @@ class LargeMmapAllocator {
// This function does the same as GetBlockBegin, but is much faster.
// Must be called with the allocator locked.
- void *GetBlockBeginFastLocked(void *ptr) {
+ void *GetBlockBeginFastLocked(const void *ptr) {
mutex_.CheckLocked();
uptr p = reinterpret_cast<uptr>(ptr);
uptr n = n_chunks_;
@@ -267,9 +267,9 @@ class LargeMmapAllocator {
// ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
// introspection API.
- void ForceLock() ACQUIRE(mutex_) { mutex_.Lock(); }
+ void ForceLock() SANITIZER_ACQUIRE(mutex_) { mutex_.Lock(); }
- void ForceUnlock() RELEASE(mutex_) { mutex_.Unlock(); }
+ void ForceUnlock() SANITIZER_RELEASE(mutex_) { mutex_.Unlock(); }
// Iterate over all existing chunks.
// The allocator must be locked when calling this function.
@@ -301,7 +301,7 @@ class LargeMmapAllocator {
return GetHeader(reinterpret_cast<uptr>(p));
}
- void *GetUser(const Header *h) {
+ void *GetUser(const Header *h) const {
CHECK(IsAligned((uptr)h, page_size_));
return reinterpret_cast<void*>(reinterpret_cast<uptr>(h) + page_size_);
}
@@ -318,5 +318,5 @@ class LargeMmapAllocator {
struct Stats {
uptr n_allocs, n_frees, currently_allocated, max_allocated, by_size_log[64];
} stats;
- StaticSpinMutex mutex_;
+ mutable StaticSpinMutex mutex_;
};
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_allocator_size_class_map.h b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_allocator_size_class_map.h
index c50d13303ede..361793f2490a 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_allocator_size_class_map.h
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_allocator_size_class_map.h
@@ -193,13 +193,13 @@ class SizeClassMap {
uptr cached = MaxCachedHint(s) * s;
if (i == kBatchClassID)
d = p = l = 0;
- Printf("c%02zd => s: %zd diff: +%zd %02zd%% l %zd "
- "cached: %zd %zd; id %zd\n",
- i, Size(i), d, p, l, MaxCachedHint(s), cached, ClassID(s));
+ Printf(
+ "c%02zu => s: %zu diff: +%zu %02zu%% l %zu cached: %zu %zu; id %zu\n",
+ i, Size(i), d, p, l, MaxCachedHint(s), cached, ClassID(s));
total_cached += cached;
prev_s = s;
}
- Printf("Total cached: %zd\n", total_cached);
+ Printf("Total cached: %zu\n", total_cached);
}
static void Validate() {
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_allocator_stats.h b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_allocator_stats.h
index 6f14e3863c31..ae4dac9c8c96 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_allocator_stats.h
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_allocator_stats.h
@@ -25,19 +25,13 @@ typedef uptr AllocatorStatCounters[AllocatorStatCount];
// Per-thread stats, live in per-thread cache.
class AllocatorStats {
public:
- void Init() {
- internal_memset(this, 0, sizeof(*this));
- }
- void InitLinkerInitialized() {}
-
+ void Init() { internal_memset(this, 0, sizeof(*this)); }
void Add(AllocatorStat i, uptr v) {
- v += atomic_load(&stats_[i], memory_order_relaxed);
- atomic_store(&stats_[i], v, memory_order_relaxed);
+ atomic_fetch_add(&stats_[i], v, memory_order_relaxed);
}
void Sub(AllocatorStat i, uptr v) {
- v = atomic_load(&stats_[i], memory_order_relaxed) - v;
- atomic_store(&stats_[i], v, memory_order_relaxed);
+ atomic_fetch_sub(&stats_[i], v, memory_order_relaxed);
}
void Set(AllocatorStat i, uptr v) {
@@ -58,17 +52,13 @@ class AllocatorStats {
// Global stats, used for aggregation and querying.
class AllocatorGlobalStats : public AllocatorStats {
public:
- void InitLinkerInitialized() {
- next_ = this;
- prev_ = this;
- }
void Init() {
internal_memset(this, 0, sizeof(*this));
- InitLinkerInitialized();
}
void Register(AllocatorStats *s) {
SpinMutexLock l(&mu_);
+ LazyInit();
s->next_ = next_;
s->prev_ = this;
next_->prev_ = s;
@@ -87,7 +77,7 @@ class AllocatorGlobalStats : public AllocatorStats {
internal_memset(s, 0, AllocatorStatCount * sizeof(uptr));
SpinMutexLock l(&mu_);
const AllocatorStats *stats = this;
- for (;;) {
+ for (; stats;) {
for (int i = 0; i < AllocatorStatCount; i++)
s[i] += stats->Get(AllocatorStat(i));
stats = stats->next_;
@@ -100,6 +90,13 @@ class AllocatorGlobalStats : public AllocatorStats {
}
private:
+ void LazyInit() {
+ if (!next_) {
+ next_ = this;
+ prev_ = this;
+ }
+ }
+
mutable StaticSpinMutex mu_;
};
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_array_ref.h b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_array_ref.h
new file mode 100644
index 000000000000..28d125383da4
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_array_ref.h
@@ -0,0 +1,123 @@
+//===-- sanitizer_array_ref.h -----------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_ARRAY_REF_H
+#define SANITIZER_ARRAY_REF_H
+
+#include "sanitizer_internal_defs.h"
+
+namespace __sanitizer {
+
+/// ArrayRef - Represent a constant reference to an array (0 or more elements
+/// consecutively in memory), i.e. a start pointer and a length. It allows
+/// various APIs to take consecutive elements easily and conveniently.
+///
+/// This class does not own the underlying data, it is expected to be used in
+/// situations where the data resides in some other buffer, whose lifetime
+/// extends past that of the ArrayRef. For this reason, it is not in general
+/// safe to store an ArrayRef.
+///
+/// This is intended to be trivially copyable, so it should be passed by
+/// value.
+template <typename T>
+class ArrayRef {
+ public:
+ constexpr ArrayRef() {}
+ constexpr ArrayRef(const T *begin, const T *end) : begin_(begin), end_(end) {
+ DCHECK(empty() || begin);
+ }
+ constexpr ArrayRef(const T *data, uptr length)
+ : ArrayRef(data, data + length) {}
+ template <uptr N>
+ constexpr ArrayRef(const T (&src)[N]) : ArrayRef(src, src + N) {}
+ template <typename C>
+ constexpr ArrayRef(const C &src)
+ : ArrayRef(src.data(), src.data() + src.size()) {}
+ ArrayRef(const T &one_elt) : ArrayRef(&one_elt, &one_elt + 1) {}
+
+ const T *data() const { return empty() ? nullptr : begin_; }
+
+ const T *begin() const { return begin_; }
+ const T *end() const { return end_; }
+
+ bool empty() const { return begin_ == end_; }
+
+ uptr size() const { return end_ - begin_; }
+
+ /// equals - Check for element-wise equality.
+ bool equals(ArrayRef rhs) const {
+ if (size() != rhs.size())
+ return false;
+ auto r = rhs.begin();
+ for (auto &l : *this) {
+ if (!(l == *r))
+ return false;
+ ++r;
+ }
+ return true;
+ }
+
+ /// slice(n, m) - Chop off the first N elements of the array, and keep M
+ /// elements in the array.
+ ArrayRef<T> slice(uptr N, uptr M) const {
+ DCHECK_LE(N + M, size());
+ return ArrayRef<T>(data() + N, M);
+ }
+
+ /// slice(n) - Chop off the first N elements of the array.
+ ArrayRef<T> slice(uptr N) const { return slice(N, size() - N); }
+
+ /// Drop the first \p N elements of the array.
+ ArrayRef<T> drop_front(uptr N = 1) const {
+ DCHECK_GE(size(), N);
+ return slice(N, size() - N);
+ }
+
+ /// Drop the last \p N elements of the array.
+ ArrayRef<T> drop_back(uptr N = 1) const {
+ DCHECK_GE(size(), N);
+ return slice(0, size() - N);
+ }
+
+ /// Return a copy of *this with only the first \p N elements.
+ ArrayRef<T> take_front(uptr N = 1) const {
+ if (N >= size())
+ return *this;
+ return drop_back(size() - N);
+ }
+
+ /// Return a copy of *this with only the last \p N elements.
+ ArrayRef<T> take_back(uptr N = 1) const {
+ if (N >= size())
+ return *this;
+ return drop_front(size() - N);
+ }
+
+ const T &operator[](uptr index) const {
+ DCHECK_LT(index, size());
+ return begin_[index];
+ }
+
+ private:
+ const T *begin_ = nullptr;
+ const T *end_ = nullptr;
+};
+
+template <typename T>
+inline bool operator==(ArrayRef<T> lhs, ArrayRef<T> rhs) {
+ return lhs.equals(rhs);
+}
+
+template <typename T>
+inline bool operator!=(ArrayRef<T> lhs, ArrayRef<T> rhs) {
+ return !(lhs == rhs);
+}
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_ARRAY_REF_H
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_asm.h b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_asm.h
index 803af3285e18..3af66a4e4499 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_asm.h
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_asm.h
@@ -6,7 +6,7 @@
//
//===----------------------------------------------------------------------===//
//
-// Various support for assemebler.
+// Various support for assembler.
//
//===----------------------------------------------------------------------===//
@@ -42,13 +42,84 @@
# define CFI_RESTORE(reg)
#endif
+#if defined(__x86_64__) || defined(__i386__) || defined(__sparc__)
+# define ASM_TAIL_CALL jmp
+#elif defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
+ defined(__powerpc__) || defined(__loongarch_lp64)
+# define ASM_TAIL_CALL b
+#elif defined(__s390__)
+# define ASM_TAIL_CALL jg
+#elif defined(__riscv)
+# define ASM_TAIL_CALL tail
+#endif
+
+// Currently, almost all of the shared libraries rely on the value of
+// $t9 to get the address of current function, instead of PCREL, even
+// on MIPSr6. To be compatiable with them, we have to set $t9 properly.
+// MIPS uses GOT to get the address of preemptible functions.
+#if defined(__mips64)
+# define C_ASM_TAIL_CALL(t_func, i_func) \
+ "lui $t8, %hi(%neg(%gp_rel(" t_func ")))\n" \
+ "daddu $t8, $t8, $t9\n" \
+ "daddiu $t8, $t8, %lo(%neg(%gp_rel(" t_func ")))\n" \
+ "ld $t9, %got_disp(" i_func ")($t8)\n" \
+ "jr $t9\n"
+#elif defined(__mips__)
+# define C_ASM_TAIL_CALL(t_func, i_func) \
+ ".set noreorder\n" \
+ ".cpload $t9\n" \
+ ".set reorder\n" \
+ "lw $t9, %got(" i_func ")($gp)\n" \
+ "jr $t9\n"
+#elif defined(ASM_TAIL_CALL)
+# define C_ASM_TAIL_CALL(t_func, i_func) \
+ SANITIZER_STRINGIFY(ASM_TAIL_CALL) " " i_func
+#endif
+
+#if defined(__ELF__) && defined(__x86_64__) || defined(__i386__) || \
+ defined(__riscv)
+# define ASM_PREEMPTIBLE_SYM(sym) sym@plt
+#else
+# define ASM_PREEMPTIBLE_SYM(sym) sym
+#endif
+
#if !defined(__APPLE__)
# define ASM_HIDDEN(symbol) .hidden symbol
-# define ASM_TYPE_FUNCTION(symbol) .type symbol, %function
+# if defined(__arm__) || defined(__aarch64__)
+# define ASM_TYPE_FUNCTION(symbol) .type symbol, %function
+# else
+# define ASM_TYPE_FUNCTION(symbol) .type symbol, @function
+# endif
# define ASM_SIZE(symbol) .size symbol, .-symbol
# define ASM_SYMBOL(symbol) symbol
# define ASM_SYMBOL_INTERCEPTOR(symbol) symbol
-# define ASM_WRAPPER_NAME(symbol) __interceptor_##symbol
+# if defined(__i386__) || defined(__powerpc__) || defined(__s390__) || \
+ defined(__sparc__)
+// For details, see interception.h
+# define ASM_WRAPPER_NAME(symbol) __interceptor_##symbol
+# define ASM_TRAMPOLINE_ALIAS(symbol, name) \
+ .weak symbol; \
+ .set symbol, ASM_WRAPPER_NAME(name)
+# define ASM_INTERCEPTOR_TRAMPOLINE(name)
+# define ASM_INTERCEPTOR_TRAMPOLINE_SUPPORT 0
+# else // Architecture supports interceptor trampoline
+// Keep trampoline implementation in sync with interception/interception.h
+# define ASM_WRAPPER_NAME(symbol) ___interceptor_##symbol
+# define ASM_TRAMPOLINE_ALIAS(symbol, name) \
+ .weak symbol; \
+ .set symbol, __interceptor_trampoline_##name
+# define ASM_INTERCEPTOR_TRAMPOLINE(name) \
+ .weak __interceptor_##name; \
+ .set __interceptor_##name, ASM_WRAPPER_NAME(name); \
+ .globl __interceptor_trampoline_##name; \
+ ASM_TYPE_FUNCTION(__interceptor_trampoline_##name); \
+ __interceptor_trampoline_##name: \
+ CFI_STARTPROC; \
+ ASM_TAIL_CALL ASM_PREEMPTIBLE_SYM(__interceptor_##name); \
+ CFI_ENDPROC; \
+ ASM_SIZE(__interceptor_trampoline_##name)
+# define ASM_INTERCEPTOR_TRAMPOLINE_SUPPORT 1
+# endif // Architecture supports interceptor trampoline
#else
# define ASM_HIDDEN(symbol)
# define ASM_TYPE_FUNCTION(symbol)
@@ -61,8 +132,15 @@
#if defined(__ELF__) && (defined(__GNU__) || defined(__FreeBSD__) || \
defined(__Fuchsia__) || defined(__linux__))
// clang-format off
-#define NO_EXEC_STACK_DIRECTIVE .section .note.GNU-stack,"",%progbits // NOLINT
+#define NO_EXEC_STACK_DIRECTIVE .section .note.GNU-stack,"",%progbits
// clang-format on
#else
#define NO_EXEC_STACK_DIRECTIVE
#endif
+
+#if (defined(__x86_64__) || defined(__i386__)) && defined(__has_include) && __has_include(<cet.h>)
+#include <cet.h>
+#endif
+#ifndef _CET_ENDBR
+#define _CET_ENDBR
+#endif
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_atomic_clang.h b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_atomic_clang.h
index fc13ca52dda7..4318d64d16cf 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_atomic_clang.h
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_atomic_clang.h
@@ -74,13 +74,12 @@ template <typename T>
inline bool atomic_compare_exchange_strong(volatile T *a, typename T::Type *cmp,
typename T::Type xchg,
memory_order mo) {
- typedef typename T::Type Type;
- Type cmpv = *cmp;
- Type prev;
- prev = __sync_val_compare_and_swap(&a->val_dont_use, cmpv, xchg);
- if (prev == cmpv) return true;
- *cmp = prev;
- return false;
+ // Transitioned from __sync_val_compare_and_swap to support targets like
+ // SPARC V8 that cannot inline atomic cmpxchg. __atomic_compare_exchange
+ // can then be resolved from libatomic. __ATOMIC_SEQ_CST is used to best
+ // match the __sync builtin memory order.
+ return __atomic_compare_exchange(&a->val_dont_use, cmp, &xchg, false,
+ __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
}
template<typename T>
@@ -96,8 +95,8 @@ inline bool atomic_compare_exchange_weak(volatile T *a,
// This include provides explicit template instantiations for atomic_uint64_t
// on MIPS32, which does not directly support 8 byte atomics. It has to
// proceed the template definitions above.
-#if defined(_MIPS_SIM) && defined(_ABIO32)
- #include "sanitizer_atomic_clang_mips.h"
+#if defined(_MIPS_SIM) && defined(_ABIO32) && _MIPS_SIM == _ABIO32
+# include "sanitizer_atomic_clang_mips.h"
#endif
#undef ATOMIC_ORDER
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_atomic_clang_mips.h b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_atomic_clang_mips.h
index 2b39097112d4..f3d3052e5b7c 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_atomic_clang_mips.h
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_atomic_clang_mips.h
@@ -18,7 +18,7 @@ namespace __sanitizer {
// MIPS32 does not support atomics > 4 bytes. To address this lack of
// functionality, the sanitizer library provides helper methods which use an
-// internal spin lock mechanism to emulate atomic oprations when the size is
+// internal spin lock mechanism to emulate atomic operations when the size is
// 8 bytes.
static void __spin_lock(volatile int *lock) {
while (__sync_lock_test_and_set(lock, 1))
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_chained_origin_depot.cpp b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_chained_origin_depot.cpp
index 250ac39e1301..df2b2eb23df2 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_chained_origin_depot.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_chained_origin_depot.cpp
@@ -11,16 +11,57 @@
#include "sanitizer_chained_origin_depot.h"
+#include "sanitizer_stackdepotbase.h"
+
namespace __sanitizer {
-bool ChainedOriginDepot::ChainedOriginDepotNode::eq(
- u32 hash, const args_type &args) const {
- return here_id == args.here_id && prev_id == args.prev_id;
-}
+namespace {
+struct ChainedOriginDepotDesc {
+ u32 here_id;
+ u32 prev_id;
+};
-uptr ChainedOriginDepot::ChainedOriginDepotNode::storage_size(
- const args_type &args) {
- return sizeof(ChainedOriginDepotNode);
+struct ChainedOriginDepotNode {
+ using hash_type = u32;
+ u32 link;
+ u32 here_id;
+ u32 prev_id;
+
+ typedef ChainedOriginDepotDesc args_type;
+
+ bool eq(hash_type hash, const args_type &args) const;
+
+ static uptr allocated() { return 0; }
+
+ static hash_type hash(const args_type &args);
+
+ static bool is_valid(const args_type &args);
+
+ void store(u32 id, const args_type &args, hash_type other_hash);
+
+ args_type load(u32 id) const;
+
+ struct Handle {
+ const ChainedOriginDepotNode *node_ = nullptr;
+ u32 id_ = 0;
+ Handle(const ChainedOriginDepotNode *node, u32 id) : node_(node), id_(id) {}
+ bool valid() const { return node_; }
+ u32 id() const { return id_; }
+ int here_id() const { return node_->here_id; }
+ int prev_id() const { return node_->prev_id; }
+ };
+
+ static Handle get_handle(u32 id);
+
+ typedef Handle handle_type;
+};
+
+} // namespace
+
+static StackDepotBase<ChainedOriginDepotNode, 4, 20> depot;
+
+bool ChainedOriginDepotNode::eq(hash_type hash, const args_type &args) const {
+ return here_id == args.here_id && prev_id == args.prev_id;
}
/* This is murmur2 hash for the 64->32 bit case.
@@ -36,7 +77,8 @@ uptr ChainedOriginDepot::ChainedOriginDepotNode::storage_size(
split, or one of two reserved values (-1) or (-2). Either case can
dominate depending on the workload.
*/
-u32 ChainedOriginDepot::ChainedOriginDepotNode::hash(const args_type &args) {
+ChainedOriginDepotNode::hash_type ChainedOriginDepotNode::hash(
+ const args_type &args) {
const u32 m = 0x5bd1e995;
const u32 seed = 0x9747b28c;
const u32 r = 24;
@@ -61,37 +103,33 @@ u32 ChainedOriginDepot::ChainedOriginDepotNode::hash(const args_type &args) {
return h;
}
-bool ChainedOriginDepot::ChainedOriginDepotNode::is_valid(
- const args_type &args) {
- return true;
-}
+bool ChainedOriginDepotNode::is_valid(const args_type &args) { return true; }
-void ChainedOriginDepot::ChainedOriginDepotNode::store(const args_type &args,
- u32 other_hash) {
+void ChainedOriginDepotNode::store(u32 id, const args_type &args,
+ hash_type other_hash) {
here_id = args.here_id;
prev_id = args.prev_id;
}
-ChainedOriginDepot::ChainedOriginDepotNode::args_type
-ChainedOriginDepot::ChainedOriginDepotNode::load() const {
+ChainedOriginDepotNode::args_type ChainedOriginDepotNode::load(u32 id) const {
args_type ret = {here_id, prev_id};
return ret;
}
-ChainedOriginDepot::ChainedOriginDepotNode::Handle
-ChainedOriginDepot::ChainedOriginDepotNode::get_handle() {
- return Handle(this);
+ChainedOriginDepotNode::Handle ChainedOriginDepotNode::get_handle(u32 id) {
+ return Handle(&depot.nodes[id], id);
}
ChainedOriginDepot::ChainedOriginDepot() {}
-StackDepotStats *ChainedOriginDepot::GetStats() { return depot.GetStats(); }
+StackDepotStats ChainedOriginDepot::GetStats() const {
+ return depot.GetStats();
+}
bool ChainedOriginDepot::Put(u32 here_id, u32 prev_id, u32 *new_id) {
ChainedOriginDepotDesc desc = {here_id, prev_id};
bool inserted;
- ChainedOriginDepotNode::Handle h = depot.Put(desc, &inserted);
- *new_id = h.valid() ? h.id() : 0;
+ *new_id = depot.Put(desc, &inserted);
return inserted;
}
@@ -101,8 +139,12 @@ u32 ChainedOriginDepot::Get(u32 id, u32 *other) {
return desc.here_id;
}
-void ChainedOriginDepot::LockAll() { depot.LockAll(); }
+void ChainedOriginDepot::LockBeforeFork() { depot.LockBeforeFork(); }
+
+void ChainedOriginDepot::UnlockAfterFork(bool fork_child) {
+ depot.UnlockAfterFork(fork_child);
+}
-void ChainedOriginDepot::UnlockAll() { depot.UnlockAll(); }
+void ChainedOriginDepot::TestOnlyUnmap() { depot.TestOnlyUnmap(); }
} // namespace __sanitizer
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_chained_origin_depot.h b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_chained_origin_depot.h
index 453cdf6b5449..f3da28129e6b 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_chained_origin_depot.h
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_chained_origin_depot.h
@@ -13,7 +13,6 @@
#define SANITIZER_CHAINED_ORIGIN_DEPOT_H
#include "sanitizer_common.h"
-#include "sanitizer_stackdepotbase.h"
namespace __sanitizer {
@@ -22,7 +21,7 @@ class ChainedOriginDepot {
ChainedOriginDepot();
// Gets the statistic of the origin chain storage.
- StackDepotStats *GetStats();
+ StackDepotStats GetStats() const;
// Stores a chain with StackDepot ID here_id and previous chain ID prev_id.
// If successful, returns true and the new chain id new_id.
@@ -33,52 +32,11 @@ class ChainedOriginDepot {
// Retrieves the stored StackDepot ID for the given origin ID.
u32 Get(u32 id, u32 *other);
- void LockAll();
- void UnlockAll();
+ void LockBeforeFork();
+ void UnlockAfterFork(bool fork_child);
+ void TestOnlyUnmap();
private:
- struct ChainedOriginDepotDesc {
- u32 here_id;
- u32 prev_id;
- };
-
- struct ChainedOriginDepotNode {
- ChainedOriginDepotNode *link;
- u32 id;
- u32 here_id;
- u32 prev_id;
-
- typedef ChainedOriginDepotDesc args_type;
-
- bool eq(u32 hash, const args_type &args) const;
-
- static uptr storage_size(const args_type &args);
-
- static u32 hash(const args_type &args);
-
- static bool is_valid(const args_type &args);
-
- void store(const args_type &args, u32 other_hash);
-
- args_type load() const;
-
- struct Handle {
- ChainedOriginDepotNode *node_;
- Handle() : node_(nullptr) {}
- explicit Handle(ChainedOriginDepotNode *node) : node_(node) {}
- bool valid() { return node_; }
- u32 id() { return node_->id; }
- int here_id() { return node_->here_id; }
- int prev_id() { return node_->prev_id; }
- };
-
- Handle get_handle();
-
- typedef Handle handle_type;
- };
-
- StackDepotBase<ChainedOriginDepotNode, 4, 20> depot;
-
ChainedOriginDepot(const ChainedOriginDepot &) = delete;
void operator=(const ChainedOriginDepot &) = delete;
};
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common.cpp b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common.cpp
index 5fae8e33b905..5efdd864295b 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common.cpp
@@ -11,10 +11,12 @@
//===----------------------------------------------------------------------===//
#include "sanitizer_common.h"
+
#include "sanitizer_allocator_interface.h"
#include "sanitizer_allocator_internal.h"
#include "sanitizer_atomic.h"
#include "sanitizer_flags.h"
+#include "sanitizer_interface_internal.h"
#include "sanitizer_libc.h"
#include "sanitizer_placement_new.h"
@@ -44,15 +46,41 @@ void NORETURN ReportMmapFailureAndDie(uptr size, const char *mem_type,
Die();
}
recursion_count++;
- Report("ERROR: %s failed to "
- "%s 0x%zx (%zd) bytes of %s (error code: %d)\n",
- SanitizerToolName, mmap_type, size, size, mem_type, err);
+ if (ErrorIsOOM(err)) {
+ ERROR_OOM("failed to %s 0x%zx (%zd) bytes of %s (error code: %d)\n",
+ mmap_type, size, size, mem_type, err);
+ } else {
+ Report(
+ "ERROR: %s failed to "
+ "%s 0x%zx (%zd) bytes of %s (error code: %d)\n",
+ SanitizerToolName, mmap_type, size, size, mem_type, err);
+ }
#if !SANITIZER_GO
DumpProcessMap();
#endif
UNREACHABLE("unable to mmap");
}
+void NORETURN ReportMunmapFailureAndDie(void *addr, uptr size, error_t err,
+ bool raw_report) {
+ static int recursion_count;
+ if (raw_report || recursion_count) {
+ // If raw report is requested or we went into recursion just die. The
+ // Report() and CHECK calls below may call munmap recursively and fail.
+ RawWrite("ERROR: Failed to munmap\n");
+ Die();
+ }
+ recursion_count++;
+ Report(
+ "ERROR: %s failed to deallocate 0x%zx (%zd) bytes at address %p (error "
+ "code: %d)\n",
+ SanitizerToolName, size, size, addr, err);
+#if !SANITIZER_GO
+ DumpProcessMap();
+#endif
+ UNREACHABLE("unable to unmmap");
+}
+
typedef bool UptrComparisonFunction(const uptr &a, const uptr &b);
typedef bool U32ComparisonFunction(const u32 &a, const u32 &b);
@@ -87,8 +115,9 @@ void ReportErrorSummary(const char *error_message, const char *alt_tool_name) {
if (!common_flags()->print_summary)
return;
InternalScopedString buff;
- buff.append("SUMMARY: %s: %s",
- alt_tool_name ? alt_tool_name : SanitizerToolName, error_message);
+ buff.AppendF("SUMMARY: %s: %s",
+ alt_tool_name ? alt_tool_name : SanitizerToolName,
+ error_message);
__sanitizer_report_error_summary(buff.data());
}
@@ -138,13 +167,21 @@ void LoadedModule::set(const char *module_name, uptr base_address,
set(module_name, base_address);
arch_ = arch;
internal_memcpy(uuid_, uuid, sizeof(uuid_));
+ uuid_size_ = kModuleUUIDSize;
instrumented_ = instrumented;
}
+void LoadedModule::setUuid(const char *uuid, uptr size) {
+ if (size > kModuleUUIDSize)
+ size = kModuleUUIDSize;
+ internal_memcpy(uuid_, uuid, size);
+ uuid_size_ = size;
+}
+
void LoadedModule::clear() {
InternalFree(full_name_);
base_address_ = 0;
- max_executable_address_ = 0;
+ max_address_ = 0;
full_name_ = nullptr;
arch_ = kModuleArchUnknown;
internal_memset(uuid_, 0, kModuleUUIDSize);
@@ -162,8 +199,7 @@ void LoadedModule::addAddressRange(uptr beg, uptr end, bool executable,
AddressRange *r =
new(mem) AddressRange(beg, end, executable, writable, name);
ranges_.push_back(r);
- if (executable && end > max_executable_address_)
- max_executable_address_ = end;
+ max_address_ = Max(max_address_, end);
}
bool LoadedModule::containsAddress(uptr address) const {
@@ -301,18 +337,22 @@ struct MallocFreeHook {
static MallocFreeHook MFHooks[kMaxMallocFreeHooks];
-void RunMallocHooks(const void *ptr, uptr size) {
+void RunMallocHooks(void *ptr, uptr size) {
+ __sanitizer_malloc_hook(ptr, size);
for (int i = 0; i < kMaxMallocFreeHooks; i++) {
auto hook = MFHooks[i].malloc_hook;
- if (!hook) return;
+ if (!hook)
+ break;
hook(ptr, size);
}
}
-void RunFreeHooks(const void *ptr) {
+void RunFreeHooks(void *ptr) {
+ __sanitizer_free_hook(ptr);
for (int i = 0; i < kMaxMallocFreeHooks; i++) {
auto hook = MFHooks[i].free_hook;
- if (!hook) return;
+ if (!hook)
+ break;
hook(ptr);
}
}
@@ -338,6 +378,13 @@ void SleepForSeconds(unsigned seconds) {
}
void SleepForMillis(unsigned millis) { internal_usleep((u64)millis * 1000); }
+void WaitForDebugger(unsigned seconds, const char *label) {
+ if (seconds) {
+ Report("Sleeping for %u second(s) %s\n", seconds, label);
+ SleepForSeconds(seconds);
+ }
+}
+
} // namespace __sanitizer
using namespace __sanitizer;
@@ -360,4 +407,16 @@ int __sanitizer_install_malloc_and_free_hooks(void (*malloc_hook)(const void *,
void (*free_hook)(const void *)) {
return InstallMallocFreeHooks(malloc_hook, free_hook);
}
+
+// Provide default (no-op) implementation of malloc hooks.
+SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_malloc_hook, void *ptr,
+ uptr size) {
+ (void)ptr;
+ (void)size;
+}
+
+SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_free_hook, void *ptr) {
+ (void)ptr;
+}
+
} // extern "C"
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common.h b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common.h
index cbdbb0c4c4bd..b99c0cffcbb1 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common.h
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common.h
@@ -16,7 +16,6 @@
#define SANITIZER_COMMON_H
#include "sanitizer_flags.h"
-#include "sanitizer_interface_internal.h"
#include "sanitizer_internal_defs.h"
#include "sanitizer_libc.h"
#include "sanitizer_list.h"
@@ -33,6 +32,7 @@ struct AddressInfo;
struct BufferedStackTrace;
struct SignalContext;
struct StackTrace;
+struct SymbolizedStack;
// Constants.
const uptr kWordSize = SANITIZER_WORDSIZE / 8;
@@ -118,9 +118,15 @@ void *MmapAlignedOrDieOnFatalError(uptr size, uptr alignment,
// unaccessible memory.
bool MprotectNoAccess(uptr addr, uptr size);
bool MprotectReadOnly(uptr addr, uptr size);
+bool MprotectReadWrite(uptr addr, uptr size);
void MprotectMallocZones(void *addr, int prot);
+#if SANITIZER_WINDOWS
+// Zero previously mmap'd memory. Currently used only on Windows.
+bool ZeroMmapFixedRegion(uptr fixed_addr, uptr size) WARN_UNUSED_RESULT;
+#endif
+
#if SANITIZER_LINUX
// Unmap memory. Currently only used on Linux.
void UnmapFromTo(uptr from, uptr to);
@@ -171,8 +177,8 @@ void SetShadowRegionHugePageMode(uptr addr, uptr length);
bool DontDumpShadowMemory(uptr addr, uptr length);
// Check if the built VMA size matches the runtime one.
void CheckVMASize();
-void RunMallocHooks(const void *ptr, uptr size);
-void RunFreeHooks(const void *ptr);
+void RunMallocHooks(void *ptr, uptr size);
+void RunFreeHooks(void *ptr);
class ReservedAddressRange {
public:
@@ -192,20 +198,27 @@ class ReservedAddressRange {
};
typedef void (*fill_profile_f)(uptr start, uptr rss, bool file,
- /*out*/uptr *stats, uptr stats_size);
+ /*out*/ uptr *stats);
// Parse the contents of /proc/self/smaps and generate a memory profile.
-// |cb| is a tool-specific callback that fills the |stats| array containing
-// |stats_size| elements.
-void GetMemoryProfile(fill_profile_f cb, uptr *stats, uptr stats_size);
+// |cb| is a tool-specific callback that fills the |stats| array.
+void GetMemoryProfile(fill_profile_f cb, uptr *stats);
+void ParseUnixMemoryProfile(fill_profile_f cb, uptr *stats, char *smaps,
+ uptr smaps_len);
// Simple low-level (mmap-based) allocator for internal use. Doesn't have
// constructor, so all instances of LowLevelAllocator should be
// linker initialized.
+//
+// NOTE: Users should instead use the singleton provided via
+// `GetGlobalLowLevelAllocator()` rather than create a new one. This way, the
+// number of mmap fragments can be reduced and use the same contiguous mmap
+// provided by this singleton.
class LowLevelAllocator {
public:
// Requires an external lock.
void *Allocate(uptr size);
+
private:
char *allocated_end_;
char *allocated_current_;
@@ -217,13 +230,15 @@ typedef void (*LowLevelAllocateCallback)(uptr ptr, uptr size);
// Passing NULL removes the callback.
void SetLowLevelAllocateCallback(LowLevelAllocateCallback callback);
+LowLevelAllocator &GetGlobalLowLevelAllocator();
+
// IO
void CatastrophicErrorWrite(const char *buffer, uptr length);
void RawWrite(const char *buffer);
bool ColorizeReports();
void RemoveANSIEscapeSequencesFromString(char *buffer);
-void Printf(const char *format, ...);
-void Report(const char *format, ...);
+void Printf(const char *format, ...) FORMAT(1, 2);
+void Report(const char *format, ...) FORMAT(1, 2);
void SetPrintfAndReportCallback(void (*callback)(const char *));
#define VReport(level, ...) \
do { \
@@ -237,12 +252,12 @@ void SetPrintfAndReportCallback(void (*callback)(const char *));
// Lock sanitizer error reporting and protects against nested errors.
class ScopedErrorReportLock {
public:
- ScopedErrorReportLock() ACQUIRE(mutex_) { Lock(); }
- ~ScopedErrorReportLock() RELEASE(mutex_) { Unlock(); }
+ ScopedErrorReportLock() SANITIZER_ACQUIRE(mutex_) { Lock(); }
+ ~ScopedErrorReportLock() SANITIZER_RELEASE(mutex_) { Unlock(); }
- static void Lock() ACQUIRE(mutex_);
- static void Unlock() RELEASE(mutex_);
- static void CheckLocked() CHECK_LOCKED(mutex_);
+ static void Lock() SANITIZER_ACQUIRE(mutex_);
+ static void Unlock() SANITIZER_RELEASE(mutex_);
+ static void CheckLocked() SANITIZER_CHECK_LOCKED(mutex_);
private:
static atomic_uintptr_t reporting_thread_;
@@ -285,7 +300,7 @@ void SetStackSizeLimitInBytes(uptr limit);
bool AddressSpaceIsUnlimited();
void SetAddressSpaceUnlimited();
void AdjustStackSize(void *attr);
-void PlatformPrepareForSandboxing(__sanitizer_sandbox_arguments *args);
+void PlatformPrepareForSandboxing(void *args);
void SetSandboxingCallback(void (*f)());
void InitializeCoverage(bool enabled, const char *coverage_dir);
@@ -294,6 +309,7 @@ void InitTlsSize();
uptr GetTlsSize();
// Other
+void WaitForDebugger(unsigned seconds, const char *label);
void SleepForSeconds(unsigned seconds);
void SleepForMillis(unsigned millis);
u64 NanoTime();
@@ -309,6 +325,20 @@ CheckFailed(const char *file, int line, const char *cond, u64 v1, u64 v2);
void NORETURN ReportMmapFailureAndDie(uptr size, const char *mem_type,
const char *mmap_type, error_t err,
bool raw_report = false);
+void NORETURN ReportMunmapFailureAndDie(void *ptr, uptr size, error_t err,
+ bool raw_report = false);
+
+// Returns true if the platform-specific error reported is an OOM error.
+bool ErrorIsOOM(error_t err);
+
+// This reports an error in the form:
+//
+// `ERROR: {{SanitizerToolName}}: out of memory: {{err_msg}}`
+//
+// Downstream tools that read sanitizer output will know that errors starting
+// in this format are specifically OOM errors.
+#define ERROR_OOM(err_msg, ...) \
+ Report("ERROR: %s: out of memory: " err_msg, SanitizerToolName, __VA_ARGS__)
// Specific tools may override behavior of "Die" function to do tool-specific
// job.
@@ -325,12 +355,6 @@ void SetUserDieCallback(DieCallbackType callback);
void SetCheckUnwindCallback(void (*callback)());
-// Callback will be called if soft_rss_limit_mb is given and the limit is
-// exceeded (exceeded==true) or if rss went down below the limit
-// (exceeded==false).
-// The callback should be registered once at the tool init time.
-void SetSoftRssLimitExceededCallback(void (*Callback)(bool exceeded));
-
// Functions related to signal handling.
typedef void (*SignalHandlerType)(int, void *, void *);
HandleSignalMode GetHandleSignalMode(int signum);
@@ -370,8 +394,10 @@ void ReportErrorSummary(const char *error_type, const AddressInfo &info,
// Same as above, but obtains AddressInfo by symbolizing top stack trace frame.
void ReportErrorSummary(const char *error_type, const StackTrace *trace,
const char *alt_tool_name = nullptr);
+// Skips frames which we consider internal and not usefull to the users.
+const SymbolizedStack *SkipInternalFrames(const SymbolizedStack *frames);
-void ReportMmapWriteExec(int prot);
+void ReportMmapWriteExec(int prot, int mflags);
// Math
#if SANITIZER_WINDOWS && !defined(__clang__) && !defined(__GNUC__)
@@ -419,9 +445,7 @@ inline uptr LeastSignificantSetBitIndex(uptr x) {
return up;
}
-inline bool IsPowerOfTwo(uptr x) {
- return (x & (x - 1)) == 0;
-}
+inline constexpr bool IsPowerOfTwo(uptr x) { return (x & (x - 1)) == 0; }
inline uptr RoundUpToPowerOfTwo(uptr size) {
CHECK(size);
@@ -433,16 +457,16 @@ inline uptr RoundUpToPowerOfTwo(uptr size) {
return 1ULL << (up + 1);
}
-inline uptr RoundUpTo(uptr size, uptr boundary) {
+inline constexpr uptr RoundUpTo(uptr size, uptr boundary) {
RAW_CHECK(IsPowerOfTwo(boundary));
return (size + boundary - 1) & ~(boundary - 1);
}
-inline uptr RoundDownTo(uptr x, uptr boundary) {
+inline constexpr uptr RoundDownTo(uptr x, uptr boundary) {
return x & ~(boundary - 1);
}
-inline bool IsAligned(uptr a, uptr alignment) {
+inline constexpr bool IsAligned(uptr a, uptr alignment) {
return (a & (alignment - 1)) == 0;
}
@@ -461,6 +485,10 @@ template <class T>
constexpr T Max(T a, T b) {
return a > b ? a : b;
}
+template <class T>
+constexpr T Abs(T a) {
+ return a < 0 ? -a : a;
+}
template<class T> void Swap(T& a, T& b) {
T tmp = a;
a = b;
@@ -502,8 +530,8 @@ class InternalMmapVectorNoCtor {
return data_[i];
}
void push_back(const T &element) {
- CHECK_LE(size_, capacity());
- if (size_ == capacity()) {
+ if (UNLIKELY(size_ >= capacity())) {
+ CHECK_EQ(size_, capacity());
uptr new_capacity = RoundUpToPowerOfTwo(size_ + 1);
Realloc(new_capacity);
}
@@ -563,7 +591,7 @@ class InternalMmapVectorNoCtor {
}
private:
- void Realloc(uptr new_capacity) {
+ NOINLINE void Realloc(uptr new_capacity) {
CHECK_GT(new_capacity, 0);
CHECK_LE(size_, new_capacity);
uptr new_capacity_bytes =
@@ -618,7 +646,8 @@ class InternalScopedString {
buffer_.resize(1);
buffer_[0] = '\0';
}
- void append(const char *format, ...);
+ void Append(const char *str);
+ void AppendF(const char *format, ...) FORMAT(2, 3);
const char *data() const { return buffer_.data(); }
char *data() { return buffer_.data(); }
@@ -670,11 +699,9 @@ void Sort(T *v, uptr size, Compare comp = {}) {
// Works like std::lower_bound: finds the first element that is not less
// than the val.
-template <class Container,
+template <class Container, class T,
class Compare = CompareLess<typename Container::value_type>>
-uptr InternalLowerBound(const Container &v,
- const typename Container::value_type &val,
- Compare comp = {}) {
+uptr InternalLowerBound(const Container &v, const T &val, Compare comp = {}) {
uptr first = 0;
uptr last = v.size();
while (last > first) {
@@ -697,7 +724,9 @@ enum ModuleArch {
kModuleArchARMV7S,
kModuleArchARMV7K,
kModuleArchARM64,
- kModuleArchRISCV64
+ kModuleArchLoongArch64,
+ kModuleArchRISCV64,
+ kModuleArchHexagon
};
// Sorts and removes duplicates from the container.
@@ -721,12 +750,15 @@ void SortAndDedup(Container &v, Compare comp = {}) {
v.resize(last + 1);
}
+constexpr uptr kDefaultFileMaxSize = FIRST_32_SECOND_64(1 << 26, 1 << 28);
+
// Opens the file 'file_name" and reads up to 'max_len' bytes.
// The resulting buffer is mmaped and stored in '*buff'.
// Returns true if file was successfully opened and read.
bool ReadFileToVector(const char *file_name,
InternalMmapVectorNoCtor<char> *buff,
- uptr max_len = 1 << 26, error_t *errno_p = nullptr);
+ uptr max_len = kDefaultFileMaxSize,
+ error_t *errno_p = nullptr);
// Opens the file 'file_name" and reads up to 'max_len' bytes.
// This function is less I/O efficient than ReadFileToVector as it may reread
@@ -737,9 +769,12 @@ bool ReadFileToVector(const char *file_name,
// The total number of read bytes is stored in '*read_len'.
// Returns true if file was successfully opened and read.
bool ReadFileToBuffer(const char *file_name, char **buff, uptr *buff_size,
- uptr *read_len, uptr max_len = 1 << 26,
+ uptr *read_len, uptr max_len = kDefaultFileMaxSize,
error_t *errno_p = nullptr);
+int GetModuleAndOffsetForPc(uptr pc, char *module_name, uptr module_name_len,
+ uptr *pc_offset);
+
// When adding a new architecture, don't forget to also update
// script/asan_symbolize.py and sanitizer_symbolizer_libcdep.cpp.
inline const char *ModuleArchToString(ModuleArch arch) {
@@ -762,14 +797,22 @@ inline const char *ModuleArchToString(ModuleArch arch) {
return "armv7k";
case kModuleArchARM64:
return "arm64";
+ case kModuleArchLoongArch64:
+ return "loongarch64";
case kModuleArchRISCV64:
return "riscv64";
+ case kModuleArchHexagon:
+ return "hexagon";
}
CHECK(0 && "Invalid module arch");
return "";
}
+#if SANITIZER_APPLE
const uptr kModuleUUIDSize = 16;
+#else
+const uptr kModuleUUIDSize = 32;
+#endif
const uptr kMaxSegName = 16;
// Represents a binary loaded into virtual memory (e.g. this can be an
@@ -779,8 +822,9 @@ class LoadedModule {
LoadedModule()
: full_name_(nullptr),
base_address_(0),
- max_executable_address_(0),
+ max_address_(0),
arch_(kModuleArchUnknown),
+ uuid_size_(0),
instrumented_(false) {
internal_memset(uuid_, 0, kModuleUUIDSize);
ranges_.clear();
@@ -788,6 +832,7 @@ class LoadedModule {
void set(const char *module_name, uptr base_address);
void set(const char *module_name, uptr base_address, ModuleArch arch,
u8 uuid[kModuleUUIDSize], bool instrumented);
+ void setUuid(const char *uuid, uptr size);
void clear();
void addAddressRange(uptr beg, uptr end, bool executable, bool writable,
const char *name = nullptr);
@@ -795,9 +840,10 @@ class LoadedModule {
const char *full_name() const { return full_name_; }
uptr base_address() const { return base_address_; }
- uptr max_executable_address() const { return max_executable_address_; }
+ uptr max_address() const { return max_address_; }
ModuleArch arch() const { return arch_; }
const u8 *uuid() const { return uuid_; }
+ uptr uuid_size() const { return uuid_size_; }
bool instrumented() const { return instrumented_; }
struct AddressRange {
@@ -824,8 +870,9 @@ class LoadedModule {
private:
char *full_name_; // Owned.
uptr base_address_;
- uptr max_executable_address_;
+ uptr max_address_;
ModuleArch arch_;
+ uptr uuid_size_;
u8 uuid_[kModuleUUIDSize];
bool instrumented_;
IntrusiveList<AddressRange> ranges_;
@@ -883,13 +930,13 @@ void WriteToSyslog(const char *buffer);
#define SANITIZER_WIN_TRACE 0
#endif
-#if SANITIZER_MAC || SANITIZER_WIN_TRACE
+#if SANITIZER_APPLE || SANITIZER_WIN_TRACE
void LogFullErrorReport(const char *buffer);
#else
inline void LogFullErrorReport(const char *buffer) {}
#endif
-#if SANITIZER_LINUX || SANITIZER_MAC
+#if SANITIZER_LINUX || SANITIZER_APPLE
void WriteOneLineToSyslog(const char *s);
void LogMessageOnPrintf(const char *str);
#else
@@ -951,7 +998,7 @@ struct SignalContext {
uptr sp;
uptr bp;
bool is_memory_access;
- enum WriteFlag { UNKNOWN, READ, WRITE } write_flag;
+ enum WriteFlag { Unknown, Read, Write } write_flag;
// In some cases the kernel cannot provide the true faulting address; `addr`
// will be zero then. This field allows to distinguish between these cases
@@ -996,7 +1043,6 @@ struct SignalContext {
};
void InitializePlatformEarly();
-void MaybeReexec();
template <typename Fn>
class RunOnDestruction {
@@ -1049,31 +1095,10 @@ inline u32 GetNumberOfCPUsCached() {
return NumberOfCPUsCached;
}
-template <typename T>
-class ArrayRef {
- public:
- ArrayRef() {}
- ArrayRef(T *begin, T *end) : begin_(begin), end_(end) {}
-
- T *begin() { return begin_; }
- T *end() { return end_; }
-
- private:
- T *begin_ = nullptr;
- T *end_ = nullptr;
-};
-
-#define PRINTF_128(v) \
- (*((u8 *)&v + 0)), (*((u8 *)&v + 1)), (*((u8 *)&v + 2)), (*((u8 *)&v + 3)), \
- (*((u8 *)&v + 4)), (*((u8 *)&v + 5)), (*((u8 *)&v + 6)), \
- (*((u8 *)&v + 7)), (*((u8 *)&v + 8)), (*((u8 *)&v + 9)), \
- (*((u8 *)&v + 10)), (*((u8 *)&v + 11)), (*((u8 *)&v + 12)), \
- (*((u8 *)&v + 13)), (*((u8 *)&v + 14)), (*((u8 *)&v + 15))
-
} // namespace __sanitizer
inline void *operator new(__sanitizer::operator_new_size_type size,
- __sanitizer::LowLevelAllocator &alloc) { // NOLINT
+ __sanitizer::LowLevelAllocator &alloc) {
return alloc.Allocate(size);
}
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors.inc b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors.inc
index 6205d853a4c9..3ecdb55cdbf7 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors.inc
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors.inc
@@ -21,34 +21,29 @@
// COMMON_INTERCEPTOR_FD_RELEASE
// COMMON_INTERCEPTOR_FD_ACCESS
// COMMON_INTERCEPTOR_SET_THREAD_NAME
-// COMMON_INTERCEPTOR_ON_DLOPEN
+// COMMON_INTERCEPTOR_DLOPEN
// COMMON_INTERCEPTOR_ON_EXIT
-// COMMON_INTERCEPTOR_MUTEX_PRE_LOCK
-// COMMON_INTERCEPTOR_MUTEX_POST_LOCK
-// COMMON_INTERCEPTOR_MUTEX_UNLOCK
-// COMMON_INTERCEPTOR_MUTEX_REPAIR
// COMMON_INTERCEPTOR_SET_PTHREAD_NAME
// COMMON_INTERCEPTOR_HANDLE_RECVMSG
// COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED
-// COMMON_INTERCEPTOR_MEMSET_IMPL
-// COMMON_INTERCEPTOR_MEMMOVE_IMPL
-// COMMON_INTERCEPTOR_MEMCPY_IMPL
// COMMON_INTERCEPTOR_MMAP_IMPL
+// COMMON_INTERCEPTOR_MUNMAP_IMPL
// COMMON_INTERCEPTOR_COPY_STRING
// COMMON_INTERCEPTOR_STRNDUP_IMPL
// COMMON_INTERCEPTOR_STRERROR
//===----------------------------------------------------------------------===//
+#include <stdarg.h>
+
#include "interception/interception.h"
#include "sanitizer_addrhashmap.h"
+#include "sanitizer_dl.h"
#include "sanitizer_errno.h"
#include "sanitizer_placement_new.h"
#include "sanitizer_platform_interceptors.h"
#include "sanitizer_symbolizer.h"
#include "sanitizer_tls_get_addr.h"
-#include <stdarg.h>
-
#if SANITIZER_INTERCEPTOR_HOOKS
#define CALL_WEAK_INTERCEPTOR_HOOK(f, ...) f(__VA_ARGS__);
#define DECLARE_WEAK_INTERCEPTOR_HOOK(f, ...) \
@@ -132,14 +127,75 @@ extern const short *_toupper_tab_;
extern const short *_tolower_tab_;
#endif
-// Platform-specific options.
-#if SANITIZER_MAC
-#define PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE 0
-#elif SANITIZER_WINDOWS64
-#define PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE 0
-#else
-#define PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE 1
-#endif // SANITIZER_MAC
+#if SANITIZER_MUSL && \
+ (defined(__i386__) || defined(__arm__) || SANITIZER_MIPS32 || SANITIZER_PPC32)
+// musl 1.2.0 on existing 32-bit architectures uses new symbol names for the
+// time-related functions that take 64-bit time_t values. See
+// https://musl.libc.org/time64.html
+#define adjtime __adjtime64
+#define adjtimex __adjtimex_time64
+#define aio_suspend __aio_suspend_time64
+#define clock_adjtime __clock_adjtime64
+#define clock_getres __clock_getres_time64
+#define clock_gettime __clock_gettime64
+#define clock_nanosleep __clock_nanosleep_time64
+#define clock_settime __clock_settime64
+#define cnd_timedwait __cnd_timedwait_time64
+#define ctime __ctime64
+#define ctime_r __ctime64_r
+#define difftime __difftime64
+#define dlsym __dlsym_time64
+#define fstatat __fstatat_time64
+#define fstat __fstat_time64
+#define ftime __ftime64
+#define futimens __futimens_time64
+#define futimesat __futimesat_time64
+#define futimes __futimes_time64
+#define getitimer __getitimer_time64
+#define getrusage __getrusage_time64
+#define gettimeofday __gettimeofday_time64
+#define gmtime __gmtime64
+#define gmtime_r __gmtime64_r
+#define localtime __localtime64
+#define localtime_r __localtime64_r
+#define lstat __lstat_time64
+#define lutimes __lutimes_time64
+#define mktime __mktime64
+#define mq_timedreceive __mq_timedreceive_time64
+#define mq_timedsend __mq_timedsend_time64
+#define mtx_timedlock __mtx_timedlock_time64
+#define nanosleep __nanosleep_time64
+#define ppoll __ppoll_time64
+#define pselect __pselect_time64
+#define pthread_cond_timedwait __pthread_cond_timedwait_time64
+#define pthread_mutex_timedlock __pthread_mutex_timedlock_time64
+#define pthread_rwlock_timedrdlock __pthread_rwlock_timedrdlock_time64
+#define pthread_rwlock_timedwrlock __pthread_rwlock_timedwrlock_time64
+#define pthread_timedjoin_np __pthread_timedjoin_np_time64
+#define recvmmsg __recvmmsg_time64
+#define sched_rr_get_interval __sched_rr_get_interval_time64
+#define select __select_time64
+#define semtimedop __semtimedop_time64
+#define sem_timedwait __sem_timedwait_time64
+#define setitimer __setitimer_time64
+#define settimeofday __settimeofday_time64
+#define sigtimedwait __sigtimedwait_time64
+#define stat __stat_time64
+#define stime __stime64
+#define thrd_sleep __thrd_sleep_time64
+#define timegm __timegm_time64
+#define timerfd_gettime __timerfd_gettime64
+#define timerfd_settime __timerfd_settime64
+#define timer_gettime __timer_gettime64
+#define timer_settime __timer_settime64
+#define timespec_get __timespec_get_time64
+#define time __time64
+#define utimensat __utimensat_time64
+#define utimes __utimes_time64
+#define utime __utime64
+#define wait3 __wait3_time64
+#define wait4 __wait4_time64
+#endif
#ifndef COMMON_INTERCEPTOR_INITIALIZE_RANGE
#define COMMON_INTERCEPTOR_INITIALIZE_RANGE(p, size) {}
@@ -153,26 +209,6 @@ extern const short *_tolower_tab_;
#define COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd) {}
#endif
-#ifndef COMMON_INTERCEPTOR_MUTEX_PRE_LOCK
-#define COMMON_INTERCEPTOR_MUTEX_PRE_LOCK(ctx, m) {}
-#endif
-
-#ifndef COMMON_INTERCEPTOR_MUTEX_POST_LOCK
-#define COMMON_INTERCEPTOR_MUTEX_POST_LOCK(ctx, m) {}
-#endif
-
-#ifndef COMMON_INTERCEPTOR_MUTEX_UNLOCK
-#define COMMON_INTERCEPTOR_MUTEX_UNLOCK(ctx, m) {}
-#endif
-
-#ifndef COMMON_INTERCEPTOR_MUTEX_REPAIR
-#define COMMON_INTERCEPTOR_MUTEX_REPAIR(ctx, m) {}
-#endif
-
-#ifndef COMMON_INTERCEPTOR_MUTEX_INVALID
-#define COMMON_INTERCEPTOR_MUTEX_INVALID(ctx, m) {}
-#endif
-
#ifndef COMMON_INTERCEPTOR_HANDLE_RECVMSG
#define COMMON_INTERCEPTOR_HANDLE_RECVMSG(ctx, msg) ((void)(msg))
#endif
@@ -204,11 +240,11 @@ extern const short *_tolower_tab_;
#define COMMON_INTERCEPTOR_READ_STRING(ctx, s, n) \
COMMON_INTERCEPTOR_READ_RANGE((ctx), (s), \
- common_flags()->strict_string_checks ? (REAL(strlen)(s)) + 1 : (n) )
+ common_flags()->strict_string_checks ? (internal_strlen(s)) + 1 : (n) )
-#ifndef COMMON_INTERCEPTOR_ON_DLOPEN
-#define COMMON_INTERCEPTOR_ON_DLOPEN(filename, flag) \
- CheckNoDeepBind(filename, flag);
+#ifndef COMMON_INTERCEPTOR_DLOPEN
+#define COMMON_INTERCEPTOR_DLOPEN(filename, flag) \
+ ({ CheckNoDeepBind(filename, flag); REAL(dlopen)(filename, flag); })
#endif
#ifndef COMMON_INTERCEPTOR_GET_TLS_RANGE
@@ -256,53 +292,17 @@ extern const short *_tolower_tab_;
COMMON_INTERCEPT_FUNCTION(fn)
#endif
-#ifndef COMMON_INTERCEPTOR_MEMSET_IMPL
-#define COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, dst, v, size) \
- { \
- if (COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED) \
- return internal_memset(dst, v, size); \
- COMMON_INTERCEPTOR_ENTER(ctx, memset, dst, v, size); \
- if (common_flags()->intercept_intrin) \
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, size); \
- return REAL(memset)(dst, v, size); \
- }
-#endif
-
-#ifndef COMMON_INTERCEPTOR_MEMMOVE_IMPL
-#define COMMON_INTERCEPTOR_MEMMOVE_IMPL(ctx, dst, src, size) \
- { \
- if (COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED) \
- return internal_memmove(dst, src, size); \
- COMMON_INTERCEPTOR_ENTER(ctx, memmove, dst, src, size); \
- if (common_flags()->intercept_intrin) { \
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, size); \
- COMMON_INTERCEPTOR_READ_RANGE(ctx, src, size); \
- } \
- return REAL(memmove)(dst, src, size); \
- }
-#endif
-
-#ifndef COMMON_INTERCEPTOR_MEMCPY_IMPL
-#define COMMON_INTERCEPTOR_MEMCPY_IMPL(ctx, dst, src, size) \
- { \
- if (COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED) { \
- return internal_memmove(dst, src, size); \
- } \
- COMMON_INTERCEPTOR_ENTER(ctx, memcpy, dst, src, size); \
- if (common_flags()->intercept_intrin) { \
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, size); \
- COMMON_INTERCEPTOR_READ_RANGE(ctx, src, size); \
- } \
- return REAL(memcpy)(dst, src, size); \
- }
-#endif
-
#ifndef COMMON_INTERCEPTOR_MMAP_IMPL
#define COMMON_INTERCEPTOR_MMAP_IMPL(ctx, mmap, addr, sz, prot, flags, fd, \
off) \
{ return REAL(mmap)(addr, sz, prot, flags, fd, off); }
#endif
+#ifndef COMMON_INTERCEPTOR_MUNMAP_IMPL
+#define COMMON_INTERCEPTOR_MUNMAP_IMPL(ctx, addr, sz) \
+ { return REAL(munmap)(addr, sz); }
+#endif
+
#ifndef COMMON_INTERCEPTOR_COPY_STRING
#define COMMON_INTERCEPTOR_COPY_STRING(ctx, to, from, size) {}
#endif
@@ -315,9 +315,11 @@ extern const short *_tolower_tab_;
if (common_flags()->intercept_strndup) { \
COMMON_INTERCEPTOR_READ_STRING(ctx, s, Min(size, copy_length + 1)); \
} \
- COMMON_INTERCEPTOR_COPY_STRING(ctx, new_mem, s, copy_length); \
- internal_memcpy(new_mem, s, copy_length); \
- new_mem[copy_length] = '\0'; \
+ if (new_mem) { \
+ COMMON_INTERCEPTOR_COPY_STRING(ctx, new_mem, s, copy_length); \
+ internal_memcpy(new_mem, s, copy_length); \
+ new_mem[copy_length] = '\0'; \
+ } \
return new_mem;
#endif
@@ -435,7 +437,7 @@ INTERCEPTOR(char*, textdomain, const char *domainname) {
if (domainname) COMMON_INTERCEPTOR_READ_STRING(ctx, domainname, 0);
char *domain = REAL(textdomain)(domainname);
if (domain) {
- COMMON_INTERCEPTOR_INITIALIZE_RANGE(domain, REAL(strlen)(domain) + 1);
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(domain, internal_strlen(domain) + 1);
}
return domain;
}
@@ -444,11 +446,13 @@ INTERCEPTOR(char*, textdomain, const char *domainname) {
#define INIT_TEXTDOMAIN
#endif
-#if SANITIZER_INTERCEPT_STRCMP
+#if SANITIZER_INTERCEPT_STRCMP || SANITIZER_INTERCEPT_MEMCMP
static inline int CharCmpX(unsigned char c1, unsigned char c2) {
return (c1 == c2) ? 0 : (c1 < c2) ? -1 : 1;
}
+#endif
+#if SANITIZER_INTERCEPT_STRCMP
DECLARE_WEAK_INTERCEPTOR_HOOK(__sanitizer_weak_hook_strcmp, uptr called_pc,
const char *s1, const char *s2, int result)
@@ -575,8 +579,8 @@ INTERCEPTOR(int, strncasecmp, const char *s1, const char *s2, SIZE_T size) {
#if SANITIZER_INTERCEPT_STRSTR || SANITIZER_INTERCEPT_STRCASESTR
static inline void StrstrCheck(void *ctx, char *r, const char *s1,
const char *s2) {
- uptr len1 = REAL(strlen)(s1);
- uptr len2 = REAL(strlen)(s2);
+ uptr len1 = internal_strlen(s1);
+ uptr len2 = internal_strlen(s2);
COMMON_INTERCEPTOR_READ_STRING(ctx, s1, r ? r - s1 + len2 : len1 + 1);
COMMON_INTERCEPTOR_READ_RANGE(ctx, s2, len2 + 1);
}
@@ -640,10 +644,10 @@ INTERCEPTOR(char*, strtok, char *str, const char *delimiters) {
// for subsequent calls). We do not need to check strtok's result.
// As the delimiters can change, we check them every call.
if (str != nullptr) {
- COMMON_INTERCEPTOR_READ_RANGE(ctx, str, REAL(strlen)(str) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, str, internal_strlen(str) + 1);
}
COMMON_INTERCEPTOR_READ_RANGE(ctx, delimiters,
- REAL(strlen)(delimiters) + 1);
+ internal_strlen(delimiters) + 1);
return REAL(strtok)(str, delimiters);
} else {
// However, when strict_string_checks is disabled we cannot check the
@@ -657,11 +661,11 @@ INTERCEPTOR(char*, strtok, char *str, const char *delimiters) {
COMMON_INTERCEPTOR_READ_RANGE(ctx, delimiters, 1);
char *result = REAL(strtok)(str, delimiters);
if (result != nullptr) {
- COMMON_INTERCEPTOR_READ_RANGE(ctx, result, REAL(strlen)(result) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, result, internal_strlen(result) + 1);
} else if (str != nullptr) {
// No delimiter were found, it's safe to assume that the entire str was
// scanned.
- COMMON_INTERCEPTOR_READ_RANGE(ctx, str, REAL(strlen)(str) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, str, internal_strlen(str) + 1);
}
return result;
}
@@ -706,7 +710,7 @@ INTERCEPTOR(char*, strchr, const char *s, int c) {
if (common_flags()->intercept_strchr) {
// Keep strlen as macro argument, as macro may ignore it.
COMMON_INTERCEPTOR_READ_STRING(ctx, s,
- (result ? result - s : REAL(strlen)(s)) + 1);
+ (result ? result - s : internal_strlen(s)) + 1);
}
return result;
}
@@ -737,7 +741,7 @@ INTERCEPTOR(char*, strrchr, const char *s, int c) {
return internal_strrchr(s, c);
COMMON_INTERCEPTOR_ENTER(ctx, strrchr, s, c);
if (common_flags()->intercept_strchr)
- COMMON_INTERCEPTOR_READ_RANGE(ctx, s, REAL(strlen)(s) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, s, internal_strlen(s) + 1);
return REAL(strrchr)(s, c);
}
#define INIT_STRRCHR COMMON_INTERCEPT_FUNCTION(strrchr)
@@ -751,7 +755,7 @@ INTERCEPTOR(SIZE_T, strspn, const char *s1, const char *s2) {
COMMON_INTERCEPTOR_ENTER(ctx, strspn, s1, s2);
SIZE_T r = REAL(strspn)(s1, s2);
if (common_flags()->intercept_strspn) {
- COMMON_INTERCEPTOR_READ_RANGE(ctx, s2, REAL(strlen)(s2) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, s2, internal_strlen(s2) + 1);
COMMON_INTERCEPTOR_READ_STRING(ctx, s1, r + 1);
}
return r;
@@ -762,7 +766,7 @@ INTERCEPTOR(SIZE_T, strcspn, const char *s1, const char *s2) {
COMMON_INTERCEPTOR_ENTER(ctx, strcspn, s1, s2);
SIZE_T r = REAL(strcspn)(s1, s2);
if (common_flags()->intercept_strspn) {
- COMMON_INTERCEPTOR_READ_RANGE(ctx, s2, REAL(strlen)(s2) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, s2, internal_strlen(s2) + 1);
COMMON_INTERCEPTOR_READ_STRING(ctx, s1, r + 1);
}
return r;
@@ -781,9 +785,9 @@ INTERCEPTOR(char *, strpbrk, const char *s1, const char *s2) {
COMMON_INTERCEPTOR_ENTER(ctx, strpbrk, s1, s2);
char *r = REAL(strpbrk)(s1, s2);
if (common_flags()->intercept_strpbrk) {
- COMMON_INTERCEPTOR_READ_RANGE(ctx, s2, REAL(strlen)(s2) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, s2, internal_strlen(s2) + 1);
COMMON_INTERCEPTOR_READ_STRING(ctx, s1,
- r ? r - s1 + 1 : REAL(strlen)(s1) + 1);
+ r ? r - s1 + 1 : internal_strlen(s1) + 1);
}
return r;
}
@@ -793,57 +797,6 @@ INTERCEPTOR(char *, strpbrk, const char *s1, const char *s2) {
#define INIT_STRPBRK
#endif
-#if SANITIZER_INTERCEPT_MEMSET
-INTERCEPTOR(void *, memset, void *dst, int v, uptr size) {
- void *ctx;
- COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, dst, v, size);
-}
-
-#define INIT_MEMSET COMMON_INTERCEPT_FUNCTION(memset)
-#else
-#define INIT_MEMSET
-#endif
-
-#if SANITIZER_INTERCEPT_MEMMOVE
-INTERCEPTOR(void *, memmove, void *dst, const void *src, uptr size) {
- void *ctx;
- COMMON_INTERCEPTOR_MEMMOVE_IMPL(ctx, dst, src, size);
-}
-
-#define INIT_MEMMOVE COMMON_INTERCEPT_FUNCTION(memmove)
-#else
-#define INIT_MEMMOVE
-#endif
-
-#if SANITIZER_INTERCEPT_MEMCPY
-INTERCEPTOR(void *, memcpy, void *dst, const void *src, uptr size) {
- // On OS X, calling internal_memcpy here will cause memory corruptions,
- // because memcpy and memmove are actually aliases of the same
- // implementation. We need to use internal_memmove here.
- // N.B.: If we switch this to internal_ we'll have to use internal_memmove
- // due to memcpy being an alias of memmove on OS X.
- void *ctx;
-#if PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE
- COMMON_INTERCEPTOR_MEMCPY_IMPL(ctx, dst, src, size);
-#else
- COMMON_INTERCEPTOR_MEMMOVE_IMPL(ctx, dst, src, size);
-#endif
-}
-
-#define INIT_MEMCPY \
- do { \
- if (PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE) { \
- COMMON_INTERCEPT_FUNCTION(memcpy); \
- } else { \
- ASSIGN_REAL(memcpy, memmove); \
- } \
- CHECK(REAL(memcpy)); \
- } while (false)
-
-#else
-#define INIT_MEMCPY
-#endif
-
#if SANITIZER_INTERCEPT_MEMCMP
DECLARE_WEAK_INTERCEPTOR_HOOK(__sanitizer_weak_hook_memcmp, uptr called_pc,
const void *s1, const void *s2, uptr n,
@@ -1251,7 +1204,7 @@ INTERCEPTOR(char *, fgets, char *s, SIZE_T size, void *file) {
// https://github.com/google/sanitizers/issues/321.
char *res = REAL(fgets)(s, size, file);
if (res)
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, s, REAL(strlen)(s) + 1);
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, s, internal_strlen(s) + 1);
return res;
}
#define INIT_FGETS COMMON_INTERCEPT_FUNCTION(fgets)
@@ -1264,8 +1217,8 @@ INTERCEPTOR_WITH_SUFFIX(int, fputs, char *s, void *file) {
// libc file streams can call user-supplied functions, see fopencookie.
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, fputs, s, file);
- if (!SANITIZER_MAC || s) { // `fputs(NULL, file)` is supported on Darwin.
- COMMON_INTERCEPTOR_READ_RANGE(ctx, s, REAL(strlen)(s) + 1);
+ if (!SANITIZER_APPLE || s) { // `fputs(NULL, file)` is supported on Darwin.
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, s, internal_strlen(s) + 1);
}
return REAL(fputs)(s, file);
}
@@ -1279,8 +1232,8 @@ INTERCEPTOR(int, puts, char *s) {
// libc file streams can call user-supplied functions, see fopencookie.
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, puts, s);
- if (!SANITIZER_MAC || s) { // `puts(NULL)` is supported on Darwin.
- COMMON_INTERCEPTOR_READ_RANGE(ctx, s, REAL(strlen)(s) + 1);
+ if (!SANITIZER_APPLE || s) { // `puts(NULL)` is supported on Darwin.
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, s, internal_strlen(s) + 1);
}
return REAL(puts)(s);
}
@@ -1295,12 +1248,21 @@ INTERCEPTOR(int, prctl, int option, unsigned long arg2, unsigned long arg3,
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, prctl, option, arg2, arg3, arg4, arg5);
static const int PR_SET_NAME = 15;
- int res = REAL(prctl(option, arg2, arg3, arg4, arg5));
+ static const int PR_SET_VMA = 0x53564d41;
+ static const int PR_SCHED_CORE = 62;
+ static const int PR_SCHED_CORE_GET = 0;
+ if (option == PR_SET_VMA && arg2 == 0UL) {
+ char *name = (char *)arg5;
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, name, internal_strlen(name) + 1);
+ }
+ int res = REAL(prctl)(option, arg2, arg3, arg4, arg5);
if (option == PR_SET_NAME) {
char buff[16];
internal_strncpy(buff, (char *)arg2, 15);
buff[15] = 0;
COMMON_INTERCEPTOR_SET_THREAD_NAME(ctx, buff);
+ } else if (res != -1 && option == PR_SCHED_CORE && arg2 == PR_SCHED_CORE_GET) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, (u64*)(arg5), sizeof(u64));
}
return res;
}
@@ -1334,7 +1296,7 @@ static void unpoison_tm(void *ctx, __sanitizer_tm *tm) {
// Can not use COMMON_INTERCEPTOR_WRITE_RANGE here, because tm->tm_zone
// can point to shared memory and tsan would report a data race.
COMMON_INTERCEPTOR_INITIALIZE_RANGE(tm->tm_zone,
- REAL(strlen(tm->tm_zone)) + 1);
+ internal_strlen(tm->tm_zone) + 1);
}
#endif
}
@@ -1387,7 +1349,7 @@ INTERCEPTOR(char *, ctime, unsigned long *timep) {
char *res = REAL(ctime)(timep);
if (res) {
COMMON_INTERCEPTOR_READ_RANGE(ctx, timep, sizeof(*timep));
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, REAL(strlen)(res) + 1);
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, internal_strlen(res) + 1);
}
return res;
}
@@ -1400,7 +1362,7 @@ INTERCEPTOR(char *, ctime_r, unsigned long *timep, char *result) {
char *res = REAL(ctime_r)(timep, result);
if (res) {
COMMON_INTERCEPTOR_READ_RANGE(ctx, timep, sizeof(*timep));
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, REAL(strlen)(res) + 1);
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, internal_strlen(res) + 1);
}
return res;
}
@@ -1413,7 +1375,7 @@ INTERCEPTOR(char *, asctime, __sanitizer_tm *tm) {
char *res = REAL(asctime)(tm);
if (res) {
COMMON_INTERCEPTOR_READ_RANGE(ctx, tm, sizeof(*tm));
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, REAL(strlen)(res) + 1);
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, internal_strlen(res) + 1);
}
return res;
}
@@ -1426,7 +1388,7 @@ INTERCEPTOR(char *, asctime_r, __sanitizer_tm *tm, char *result) {
char *res = REAL(asctime_r)(tm, result);
if (res) {
COMMON_INTERCEPTOR_READ_RANGE(ctx, tm, sizeof(*tm));
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, REAL(strlen)(res) + 1);
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, internal_strlen(res) + 1);
}
return res;
}
@@ -1463,7 +1425,7 @@ INTERCEPTOR(char *, strptime, char *s, char *format, __sanitizer_tm *tm) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, strptime, s, format, tm);
if (format)
- COMMON_INTERCEPTOR_READ_RANGE(ctx, format, REAL(strlen)(format) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, format, internal_strlen(format) + 1);
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
// https://github.com/google/sanitizers/issues/321.
@@ -1532,6 +1494,16 @@ VSCANF_INTERCEPTOR_IMPL(__isoc99_vsscanf, false, str, format, ap)
INTERCEPTOR(int, __isoc99_vfscanf, void *stream, const char *format, va_list ap)
VSCANF_INTERCEPTOR_IMPL(__isoc99_vfscanf, false, stream, format, ap)
+
+INTERCEPTOR(int, __isoc23_vscanf, const char *format, va_list ap)
+VSCANF_INTERCEPTOR_IMPL(__isoc23_vscanf, false, format, ap)
+
+INTERCEPTOR(int, __isoc23_vsscanf, const char *str, const char *format,
+ va_list ap)
+VSCANF_INTERCEPTOR_IMPL(__isoc23_vsscanf, false, str, format, ap)
+
+INTERCEPTOR(int, __isoc23_vfscanf, void *stream, const char *format, va_list ap)
+VSCANF_INTERCEPTOR_IMPL(__isoc23_vfscanf, false, stream, format, ap)
#endif // SANITIZER_INTERCEPT_ISOC99_SCANF
INTERCEPTOR(int, scanf, const char *format, ...)
@@ -1552,6 +1524,15 @@ FORMAT_INTERCEPTOR_IMPL(__isoc99_fscanf, __isoc99_vfscanf, stream, format)
INTERCEPTOR(int, __isoc99_sscanf, const char *str, const char *format, ...)
FORMAT_INTERCEPTOR_IMPL(__isoc99_sscanf, __isoc99_vsscanf, str, format)
+
+INTERCEPTOR(int, __isoc23_scanf, const char *format, ...)
+FORMAT_INTERCEPTOR_IMPL(__isoc23_scanf, __isoc23_vscanf, format)
+
+INTERCEPTOR(int, __isoc23_fscanf, void *stream, const char *format, ...)
+FORMAT_INTERCEPTOR_IMPL(__isoc23_fscanf, __isoc23_vfscanf, stream, format)
+
+INTERCEPTOR(int, __isoc23_sscanf, const char *str, const char *format, ...)
+FORMAT_INTERCEPTOR_IMPL(__isoc23_sscanf, __isoc23_vsscanf, str, format)
#endif
#endif
@@ -1575,7 +1556,13 @@ FORMAT_INTERCEPTOR_IMPL(__isoc99_sscanf, __isoc99_vsscanf, str, format)
COMMON_INTERCEPT_FUNCTION(__isoc99_fscanf); \
COMMON_INTERCEPT_FUNCTION(__isoc99_vscanf); \
COMMON_INTERCEPT_FUNCTION(__isoc99_vsscanf); \
- COMMON_INTERCEPT_FUNCTION(__isoc99_vfscanf);
+ COMMON_INTERCEPT_FUNCTION(__isoc99_vfscanf); \
+ COMMON_INTERCEPT_FUNCTION(__isoc23_scanf); \
+ COMMON_INTERCEPT_FUNCTION(__isoc23_sscanf); \
+ COMMON_INTERCEPT_FUNCTION(__isoc23_fscanf); \
+ COMMON_INTERCEPT_FUNCTION(__isoc23_vscanf); \
+ COMMON_INTERCEPT_FUNCTION(__isoc23_vsscanf); \
+ COMMON_INTERCEPT_FUNCTION(__isoc23_vfscanf);
#else
#define INIT_ISOC99_SCANF
#endif
@@ -1843,9 +1830,9 @@ INTERCEPTOR(int, ioctl, int d, unsigned long request, ...) {
const ioctl_desc *desc = ioctl_lookup(request);
ioctl_desc decoded_desc;
if (!desc) {
- VPrintf(2, "Decoding unknown ioctl 0x%x\n", request);
+ VPrintf(2, "Decoding unknown ioctl 0x%lx\n", request);
if (!ioctl_decode(request, &decoded_desc))
- Printf("WARNING: failed decoding unknown ioctl 0x%x\n", request);
+ Printf("WARNING: failed decoding unknown ioctl 0x%lx\n", request);
else
desc = &decoded_desc;
}
@@ -1869,26 +1856,26 @@ UNUSED static void unpoison_passwd(void *ctx, __sanitizer_passwd *pwd) {
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, pwd, sizeof(*pwd));
if (pwd->pw_name)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, pwd->pw_name,
- REAL(strlen)(pwd->pw_name) + 1);
+ internal_strlen(pwd->pw_name) + 1);
if (pwd->pw_passwd)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, pwd->pw_passwd,
- REAL(strlen)(pwd->pw_passwd) + 1);
+ internal_strlen(pwd->pw_passwd) + 1);
#if !SANITIZER_ANDROID
if (pwd->pw_gecos)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, pwd->pw_gecos,
- REAL(strlen)(pwd->pw_gecos) + 1);
+ internal_strlen(pwd->pw_gecos) + 1);
#endif
-#if SANITIZER_MAC || SANITIZER_FREEBSD || SANITIZER_NETBSD
+#if SANITIZER_APPLE || SANITIZER_FREEBSD || SANITIZER_NETBSD
if (pwd->pw_class)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, pwd->pw_class,
- REAL(strlen)(pwd->pw_class) + 1);
+ internal_strlen(pwd->pw_class) + 1);
#endif
if (pwd->pw_dir)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, pwd->pw_dir,
- REAL(strlen)(pwd->pw_dir) + 1);
+ internal_strlen(pwd->pw_dir) + 1);
if (pwd->pw_shell)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, pwd->pw_shell,
- REAL(strlen)(pwd->pw_shell) + 1);
+ internal_strlen(pwd->pw_shell) + 1);
}
}
@@ -1897,13 +1884,13 @@ UNUSED static void unpoison_group(void *ctx, __sanitizer_group *grp) {
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, grp, sizeof(*grp));
if (grp->gr_name)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, grp->gr_name,
- REAL(strlen)(grp->gr_name) + 1);
+ internal_strlen(grp->gr_name) + 1);
if (grp->gr_passwd)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, grp->gr_passwd,
- REAL(strlen)(grp->gr_passwd) + 1);
+ internal_strlen(grp->gr_passwd) + 1);
char **p = grp->gr_mem;
for (; *p; ++p) {
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *p, REAL(strlen)(*p) + 1);
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *p, internal_strlen(*p) + 1);
}
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, grp->gr_mem,
(p - grp->gr_mem + 1) * sizeof(*p));
@@ -1916,7 +1903,7 @@ INTERCEPTOR(__sanitizer_passwd *, getpwnam, const char *name) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, getpwnam, name);
if (name)
- COMMON_INTERCEPTOR_READ_RANGE(ctx, name, REAL(strlen)(name) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, name, internal_strlen(name) + 1);
__sanitizer_passwd *res = REAL(getpwnam)(name);
unpoison_passwd(ctx, res);
return res;
@@ -1931,7 +1918,7 @@ INTERCEPTOR(__sanitizer_passwd *, getpwuid, u32 uid) {
INTERCEPTOR(__sanitizer_group *, getgrnam, const char *name) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, getgrnam, name);
- COMMON_INTERCEPTOR_READ_RANGE(ctx, name, REAL(strlen)(name) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, name, internal_strlen(name) + 1);
__sanitizer_group *res = REAL(getgrnam)(name);
unpoison_group(ctx, res);
return res;
@@ -1957,7 +1944,7 @@ INTERCEPTOR(int, getpwnam_r, const char *name, __sanitizer_passwd *pwd,
char *buf, SIZE_T buflen, __sanitizer_passwd **result) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, getpwnam_r, name, pwd, buf, buflen, result);
- COMMON_INTERCEPTOR_READ_RANGE(ctx, name, REAL(strlen)(name) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, name, internal_strlen(name) + 1);
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
// https://github.com/google/sanitizers/issues/321.
@@ -1984,7 +1971,7 @@ INTERCEPTOR(int, getgrnam_r, const char *name, __sanitizer_group *grp,
char *buf, SIZE_T buflen, __sanitizer_group **result) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, getgrnam_r, name, grp, buf, buflen, result);
- COMMON_INTERCEPTOR_READ_RANGE(ctx, name, REAL(strlen)(name) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, name, internal_strlen(name) + 1);
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
// https://github.com/google/sanitizers/issues/321.
@@ -2229,8 +2216,20 @@ INTERCEPTOR(int, clock_getcpuclockid, pid_t pid,
return res;
}
-#define INIT_CLOCK_GETCPUCLOCKID \
- COMMON_INTERCEPT_FUNCTION(clock_getcpuclockid);
+INTERCEPTOR(int, pthread_getcpuclockid, uptr thread,
+ __sanitizer_clockid_t *clockid) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, pthread_getcpuclockid, thread, clockid);
+ int res = REAL(pthread_getcpuclockid)(thread, clockid);
+ if (!res && clockid) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, clockid, sizeof *clockid);
+ }
+ return res;
+}
+
+#define INIT_CLOCK_GETCPUCLOCKID \
+ COMMON_INTERCEPT_FUNCTION(clock_getcpuclockid); \
+ COMMON_INTERCEPT_FUNCTION(pthread_getcpuclockid);
#else
#define INIT_CLOCK_GETCPUCLOCKID
#endif
@@ -2289,7 +2288,7 @@ static void unpoison_glob_t(void *ctx, __sanitizer_glob_t *pglob) {
ctx, pglob->gl_pathv, (pglob->gl_pathc + 1) * sizeof(*pglob->gl_pathv));
for (SIZE_T i = 0; i < pglob->gl_pathc; ++i) {
char *p = pglob->gl_pathv[i];
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p, REAL(strlen)(p) + 1);
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p, internal_strlen(p) + 1);
}
}
@@ -2319,19 +2318,19 @@ static void *wrapped_gl_readdir(void *dir) {
static void *wrapped_gl_opendir(const char *s) {
COMMON_INTERCEPTOR_UNPOISON_PARAM(1);
- COMMON_INTERCEPTOR_INITIALIZE_RANGE(s, REAL(strlen)(s) + 1);
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(s, internal_strlen(s) + 1);
return pglob_copy->gl_opendir(s);
}
static int wrapped_gl_lstat(const char *s, void *st) {
COMMON_INTERCEPTOR_UNPOISON_PARAM(2);
- COMMON_INTERCEPTOR_INITIALIZE_RANGE(s, REAL(strlen)(s) + 1);
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(s, internal_strlen(s) + 1);
return pglob_copy->gl_lstat(s, st);
}
static int wrapped_gl_stat(const char *s, void *st) {
COMMON_INTERCEPTOR_UNPOISON_PARAM(2);
- COMMON_INTERCEPTOR_INITIALIZE_RANGE(s, REAL(strlen)(s) + 1);
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(s, internal_strlen(s) + 1);
return pglob_copy->gl_stat(s, st);
}
@@ -2410,6 +2409,136 @@ INTERCEPTOR(int, glob64, const char *pattern, int flags,
#define INIT_GLOB64
#endif // SANITIZER_INTERCEPT_GLOB64
+#if SANITIZER_INTERCEPT___B64_TO
+INTERCEPTOR(int, __b64_ntop, unsigned char const *src, SIZE_T srclength,
+ char *target, SIZE_T targsize) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, __b64_ntop, src, srclength, target, targsize);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, src, srclength);
+ int res = REAL(__b64_ntop)(src, srclength, target, targsize);
+ if (res >= 0)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, target, res + 1);
+ return res;
+}
+INTERCEPTOR(int, __b64_pton, char const *src, char *target, SIZE_T targsize) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, __b64_pton, src, target, targsize);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, src, internal_strlen(src) + 1);
+ int res = REAL(__b64_pton)(src, target, targsize);
+ if (res >= 0)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, target, res);
+ return res;
+}
+#define INIT___B64_TO \
+ COMMON_INTERCEPT_FUNCTION(__b64_ntop); \
+ COMMON_INTERCEPT_FUNCTION(__b64_pton);
+#else // SANITIZER_INTERCEPT___B64_TO
+#define INIT___B64_TO
+#endif // SANITIZER_INTERCEPT___B64_TO
+
+#if SANITIZER_INTERCEPT_DN_COMP_EXPAND
+# if __GLIBC_PREREQ(2, 34)
+// Changed with https://sourceware.org/git/?p=glibc.git;h=640bbdf
+# define DN_COMP_INTERCEPTOR_NAME dn_comp
+# define DN_EXPAND_INTERCEPTOR_NAME dn_expand
+# else
+# define DN_COMP_INTERCEPTOR_NAME __dn_comp
+# define DN_EXPAND_INTERCEPTOR_NAME __dn_expand
+# endif
+INTERCEPTOR(int, DN_COMP_INTERCEPTOR_NAME, unsigned char *exp_dn,
+ unsigned char *comp_dn, int length, unsigned char **dnptrs,
+ unsigned char **lastdnptr) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, DN_COMP_INTERCEPTOR_NAME, exp_dn, comp_dn,
+ length, dnptrs, lastdnptr);
+ int res = REAL(DN_COMP_INTERCEPTOR_NAME)(exp_dn, comp_dn, length, dnptrs,
+ lastdnptr);
+ if (res >= 0) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, comp_dn, res);
+ if (dnptrs && lastdnptr) {
+ unsigned char **p = dnptrs;
+ for (; p != lastdnptr && *p; ++p)
+ ;
+ if (p != lastdnptr)
+ ++p;
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dnptrs, (p - dnptrs) * sizeof(*p));
+ }
+ }
+ return res;
+}
+INTERCEPTOR(int, DN_EXPAND_INTERCEPTOR_NAME, unsigned char const *base,
+ unsigned char const *end, unsigned char const *src, char *dest,
+ int space) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, DN_EXPAND_INTERCEPTOR_NAME, base, end, src,
+ dest, space);
+ // TODO: add read check if __dn_comp intercept added
+ int res = REAL(DN_EXPAND_INTERCEPTOR_NAME)(base, end, src, dest, space);
+ if (res >= 0)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dest, internal_strlen(dest) + 1);
+ return res;
+}
+# define INIT_DN_COMP_EXPAND \
+ COMMON_INTERCEPT_FUNCTION(DN_COMP_INTERCEPTOR_NAME); \
+ COMMON_INTERCEPT_FUNCTION(DN_EXPAND_INTERCEPTOR_NAME);
+#else // SANITIZER_INTERCEPT_DN_COMP_EXPAND
+# define INIT_DN_COMP_EXPAND
+#endif // SANITIZER_INTERCEPT_DN_COMP_EXPAND
+
+#if SANITIZER_INTERCEPT_POSIX_SPAWN
+
+template <class RealSpawnPtr>
+static int PosixSpawnImpl(void *ctx, RealSpawnPtr *real_posix_spawn, pid_t *pid,
+ const char *file_or_path, const void *file_actions,
+ const void *attrp, char *const argv[],
+ char *const envp[]) {
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, file_or_path,
+ internal_strlen(file_or_path) + 1);
+ if (argv) {
+ for (char *const *s = argv; ; ++s) {
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, s, sizeof(*s));
+ if (!*s) break;
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, *s, internal_strlen(*s) + 1);
+ }
+ }
+ if (envp) {
+ for (char *const *s = envp; ; ++s) {
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, s, sizeof(*s));
+ if (!*s) break;
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, *s, internal_strlen(*s) + 1);
+ }
+ }
+ int res =
+ real_posix_spawn(pid, file_or_path, file_actions, attrp, argv, envp);
+ if (res == 0)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, pid, sizeof(*pid));
+ return res;
+}
+INTERCEPTOR(int, posix_spawn, pid_t *pid, const char *path,
+ const void *file_actions, const void *attrp, char *const argv[],
+ char *const envp[]) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, posix_spawn, pid, path, file_actions, attrp,
+ argv, envp);
+ return PosixSpawnImpl(ctx, REAL(posix_spawn), pid, path, file_actions, attrp,
+ argv, envp);
+}
+INTERCEPTOR(int, posix_spawnp, pid_t *pid, const char *file,
+ const void *file_actions, const void *attrp, char *const argv[],
+ char *const envp[]) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, posix_spawnp, pid, file, file_actions, attrp,
+ argv, envp);
+ return PosixSpawnImpl(ctx, REAL(posix_spawnp), pid, file, file_actions, attrp,
+ argv, envp);
+}
+# define INIT_POSIX_SPAWN \
+ COMMON_INTERCEPT_FUNCTION(posix_spawn); \
+ COMMON_INTERCEPT_FUNCTION(posix_spawnp);
+#else // SANITIZER_INTERCEPT_POSIX_SPAWN
+# define INIT_POSIX_SPAWN
+#endif // SANITIZER_INTERCEPT_POSIX_SPAWN
+
#if SANITIZER_INTERCEPT_WAIT
// According to sys/wait.h, wait(), waitid(), waitpid() may have symbol version
// suffixes on Darwin. See the declaration of INTERCEPTOR_WITH_SUFFIX for
@@ -2519,7 +2648,7 @@ INTERCEPTOR(char *, inet_ntop, int af, const void *src, char *dst, u32 size) {
// its metadata. See
// https://github.com/google/sanitizers/issues/321.
char *res = REAL(inet_ntop)(af, src, dst, size);
- if (res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, REAL(strlen)(res) + 1);
+ if (res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, internal_strlen(res) + 1);
return res;
}
INTERCEPTOR(int, inet_pton, int af, const char *src, void *dst) {
@@ -2548,7 +2677,7 @@ INTERCEPTOR(int, inet_pton, int af, const char *src, void *dst) {
INTERCEPTOR(int, inet_aton, const char *cp, void *dst) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, inet_aton, cp, dst);
- if (cp) COMMON_INTERCEPTOR_READ_RANGE(ctx, cp, REAL(strlen)(cp) + 1);
+ if (cp) COMMON_INTERCEPTOR_READ_RANGE(ctx, cp, internal_strlen(cp) + 1);
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
// https://github.com/google/sanitizers/issues/321.
@@ -2590,9 +2719,9 @@ INTERCEPTOR(int, getaddrinfo, char *node, char *service,
struct __sanitizer_addrinfo **out) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, getaddrinfo, node, service, hints, out);
- if (node) COMMON_INTERCEPTOR_READ_RANGE(ctx, node, REAL(strlen)(node) + 1);
+ if (node) COMMON_INTERCEPTOR_READ_RANGE(ctx, node, internal_strlen(node) + 1);
if (service)
- COMMON_INTERCEPTOR_READ_RANGE(ctx, service, REAL(strlen)(service) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, service, internal_strlen(service) + 1);
if (hints)
COMMON_INTERCEPTOR_READ_RANGE(ctx, hints, sizeof(__sanitizer_addrinfo));
// FIXME: under ASan the call below may write to freed memory and corrupt
@@ -2608,7 +2737,7 @@ INTERCEPTOR(int, getaddrinfo, char *node, char *service,
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p->ai_addr, p->ai_addrlen);
if (p->ai_canonname)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p->ai_canonname,
- REAL(strlen)(p->ai_canonname) + 1);
+ internal_strlen(p->ai_canonname) + 1);
p = p->ai_next;
}
}
@@ -2634,9 +2763,9 @@ INTERCEPTOR(int, getnameinfo, void *sockaddr, unsigned salen, char *host,
REAL(getnameinfo)(sockaddr, salen, host, hostlen, serv, servlen, flags);
if (res == 0) {
if (host && hostlen)
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, host, REAL(strlen)(host) + 1);
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, host, internal_strlen(host) + 1);
if (serv && servlen)
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, serv, REAL(strlen)(serv) + 1);
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, serv, internal_strlen(serv) + 1);
}
return res;
}
@@ -2646,17 +2775,20 @@ INTERCEPTOR(int, getnameinfo, void *sockaddr, unsigned salen, char *host,
#endif
#if SANITIZER_INTERCEPT_GETSOCKNAME
-INTERCEPTOR(int, getsockname, int sock_fd, void *addr, int *addrlen) {
+INTERCEPTOR(int, getsockname, int sock_fd, void *addr, unsigned *addrlen) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, getsockname, sock_fd, addr, addrlen);
- COMMON_INTERCEPTOR_READ_RANGE(ctx, addrlen, sizeof(*addrlen));
- int addrlen_in = *addrlen;
+ unsigned addr_sz;
+ if (addrlen) {
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, addrlen, sizeof(*addrlen));
+ addr_sz = *addrlen;
+ }
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
// https://github.com/google/sanitizers/issues/321.
int res = REAL(getsockname)(sock_fd, addr, addrlen);
- if (res == 0) {
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, addr, Min(addrlen_in, *addrlen));
+ if (!res && addr && addrlen) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, addr, Min(addr_sz, *addrlen));
}
return res;
}
@@ -2669,10 +2801,10 @@ INTERCEPTOR(int, getsockname, int sock_fd, void *addr, int *addrlen) {
static void write_hostent(void *ctx, struct __sanitizer_hostent *h) {
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, h, sizeof(__sanitizer_hostent));
if (h->h_name)
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, h->h_name, REAL(strlen)(h->h_name) + 1);
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, h->h_name, internal_strlen(h->h_name) + 1);
char **p = h->h_aliases;
while (*p) {
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *p, REAL(strlen)(*p) + 1);
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *p, internal_strlen(*p) + 1);
++p;
}
COMMON_INTERCEPTOR_WRITE_RANGE(
@@ -3161,13 +3293,17 @@ INTERCEPTOR(int, getpeername, int sockfd, void *addr, unsigned *addrlen) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, getpeername, sockfd, addr, addrlen);
unsigned addr_sz;
- if (addrlen) addr_sz = *addrlen;
+ if (addrlen) {
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, addrlen, sizeof(*addrlen));
+ addr_sz = *addrlen;
+ }
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
// https://github.com/google/sanitizers/issues/321.
int res = REAL(getpeername)(sockfd, addr, addrlen);
- if (!res && addr && addrlen)
+ if (!res && addr && addrlen) {
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, addr, Min(addr_sz, *addrlen));
+ }
return res;
}
#define INIT_GETPEERNAME COMMON_INTERCEPT_FUNCTION(getpeername);
@@ -3196,7 +3332,7 @@ INTERCEPTOR(int, sysinfo, void *info) {
INTERCEPTOR(__sanitizer_dirent *, opendir, const char *path) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, opendir, path);
- COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, path, internal_strlen(path) + 1);
__sanitizer_dirent *res = REAL(opendir)(path);
if (res)
COMMON_INTERCEPTOR_DIR_ACQUIRE(ctx, path);
@@ -3210,7 +3346,8 @@ INTERCEPTOR(__sanitizer_dirent *, readdir, void *dirp) {
// its metadata. See
// https://github.com/google/sanitizers/issues/321.
__sanitizer_dirent *res = REAL(readdir)(dirp);
- if (res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, res->d_reclen);
+ if (res)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, __sanitizer_dirsiz(res));
return res;
}
@@ -3225,7 +3362,7 @@ INTERCEPTOR(int, readdir_r, void *dirp, __sanitizer_dirent *entry,
if (!res) {
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, result, sizeof(*result));
if (*result)
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *result, (*result)->d_reclen);
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *result, __sanitizer_dirsiz(*result));
}
return res;
}
@@ -3246,7 +3383,8 @@ INTERCEPTOR(__sanitizer_dirent64 *, readdir64, void *dirp) {
// its metadata. See
// https://github.com/google/sanitizers/issues/321.
__sanitizer_dirent64 *res = REAL(readdir64)(dirp);
- if (res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, res->d_reclen);
+ if (res)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, __sanitizer_dirsiz(res));
return res;
}
@@ -3261,7 +3399,7 @@ INTERCEPTOR(int, readdir64_r, void *dirp, __sanitizer_dirent64 *entry,
if (!res) {
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, result, sizeof(*result));
if (*result)
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *result, (*result)->d_reclen);
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *result, __sanitizer_dirsiz(*result));
}
return res;
}
@@ -3351,10 +3489,10 @@ INTERCEPTOR(char *, setlocale, int category, char *locale) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, setlocale, category, locale);
if (locale)
- COMMON_INTERCEPTOR_READ_RANGE(ctx, locale, REAL(strlen)(locale) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, locale, internal_strlen(locale) + 1);
char *res = REAL(setlocale)(category, locale);
if (res) {
- COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, REAL(strlen)(res) + 1);
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, internal_strlen(res) + 1);
unpoison_ctype_arrays(ctx);
}
return res;
@@ -3373,7 +3511,7 @@ INTERCEPTOR(char *, getcwd, char *buf, SIZE_T size) {
// its metadata. See
// https://github.com/google/sanitizers/issues/321.
char *res = REAL(getcwd)(buf, size);
- if (res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, REAL(strlen)(res) + 1);
+ if (res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, internal_strlen(res) + 1);
return res;
}
#define INIT_GETCWD COMMON_INTERCEPT_FUNCTION(getcwd);
@@ -3389,7 +3527,7 @@ INTERCEPTOR(char *, get_current_dir_name, int fake) {
// its metadata. See
// https://github.com/google/sanitizers/issues/321.
char *res = REAL(get_current_dir_name)(fake);
- if (res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, REAL(strlen)(res) + 1);
+ if (res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, internal_strlen(res) + 1);
return res;
}
@@ -3429,30 +3567,26 @@ UNUSED static inline void StrtolFixAndCheck(void *ctx, const char *nptr,
(real_endptr - nptr) + 1 : 0);
}
-
#if SANITIZER_INTERCEPT_STRTOIMAX
-INTERCEPTOR(INTMAX_T, strtoimax, const char *nptr, char **endptr, int base) {
- void *ctx;
- COMMON_INTERCEPTOR_ENTER(ctx, strtoimax, nptr, endptr, base);
- // FIXME: under ASan the call below may write to freed memory and corrupt
- // its metadata. See
- // https://github.com/google/sanitizers/issues/321.
+template <typename Fn>
+static ALWAYS_INLINE auto StrtoimaxImpl(void *ctx, Fn real, const char *nptr,
+ char **endptr, int base)
+ -> decltype(real(nullptr, nullptr, 0)) {
char *real_endptr;
- INTMAX_T res = REAL(strtoimax)(nptr, &real_endptr, base);
+ auto res = real(nptr, &real_endptr, base);
StrtolFixAndCheck(ctx, nptr, endptr, real_endptr, base);
return res;
}
+INTERCEPTOR(INTMAX_T, strtoimax, const char *nptr, char **endptr, int base) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, strtoimax, nptr, endptr, base);
+ return StrtoimaxImpl(ctx, REAL(strtoimax), nptr, endptr, base);
+}
INTERCEPTOR(UINTMAX_T, strtoumax, const char *nptr, char **endptr, int base) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, strtoumax, nptr, endptr, base);
- // FIXME: under ASan the call below may write to freed memory and corrupt
- // its metadata. See
- // https://github.com/google/sanitizers/issues/321.
- char *real_endptr;
- UINTMAX_T res = REAL(strtoumax)(nptr, &real_endptr, base);
- StrtolFixAndCheck(ctx, nptr, endptr, real_endptr, base);
- return res;
+ return StrtoimaxImpl(ctx, REAL(strtoumax), nptr, endptr, base);
}
#define INIT_STRTOIMAX \
@@ -3462,6 +3596,25 @@ INTERCEPTOR(UINTMAX_T, strtoumax, const char *nptr, char **endptr, int base) {
#define INIT_STRTOIMAX
#endif
+#if SANITIZER_INTERCEPT_STRTOIMAX && SANITIZER_GLIBC
+INTERCEPTOR(INTMAX_T, __isoc23_strtoimax, const char *nptr, char **endptr, int base) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, __isoc23_strtoimax, nptr, endptr, base);
+ return StrtoimaxImpl(ctx, REAL(__isoc23_strtoimax), nptr, endptr, base);
+}
+INTERCEPTOR(UINTMAX_T, __isoc23_strtoumax, const char *nptr, char **endptr, int base) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, __isoc23_strtoumax, nptr, endptr, base);
+ return StrtoimaxImpl(ctx, REAL(__isoc23_strtoumax), nptr, endptr, base);
+}
+
+# define INIT_STRTOIMAX_C23 \
+ COMMON_INTERCEPT_FUNCTION(__isoc23_strtoimax); \
+ COMMON_INTERCEPT_FUNCTION(__isoc23_strtoumax);
+#else
+# define INIT_STRTOIMAX_C23
+#endif
+
#if SANITIZER_INTERCEPT_MBSTOWCS
INTERCEPTOR(SIZE_T, mbstowcs, wchar_t *dest, const char *src, SIZE_T len) {
void *ctx;
@@ -3663,7 +3816,7 @@ INTERCEPTOR(int, tcgetattr, int fd, void *termios_p) {
INTERCEPTOR(char *, realpath, const char *path, char *resolved_path) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, realpath, path, resolved_path);
- if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
+ if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, internal_strlen(path) + 1);
// Workaround a bug in glibc where dlsym(RTLD_NEXT, ...) returns the oldest
// version of a versioned symbol. For realpath(), this gives us something
@@ -3674,11 +3827,12 @@ INTERCEPTOR(char *, realpath, const char *path, char *resolved_path) {
allocated_path = resolved_path = (char *)WRAP(malloc)(path_max + 1);
char *res = REAL(realpath)(path, resolved_path);
- if (allocated_path && !res) WRAP(free)(allocated_path);
- if (res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, REAL(strlen)(res) + 1);
+ if (allocated_path && !res)
+ WRAP(free)(allocated_path);
+ if (res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, internal_strlen(res) + 1);
return res;
}
-#define INIT_REALPATH COMMON_INTERCEPT_FUNCTION(realpath);
+# define INIT_REALPATH COMMON_INTERCEPT_FUNCTION(realpath);
#else
#define INIT_REALPATH
#endif
@@ -3687,9 +3841,9 @@ INTERCEPTOR(char *, realpath, const char *path, char *resolved_path) {
INTERCEPTOR(char *, canonicalize_file_name, const char *path) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, canonicalize_file_name, path);
- if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
+ if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, internal_strlen(path) + 1);
char *res = REAL(canonicalize_file_name)(path);
- if (res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, REAL(strlen)(res) + 1);
+ if (res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, internal_strlen(res) + 1);
return res;
}
#define INIT_CANONICALIZE_FILE_NAME \
@@ -3750,7 +3904,7 @@ INTERCEPTOR(char *, strerror, int errnum) {
COMMON_INTERCEPTOR_ENTER(ctx, strerror, errnum);
COMMON_INTERCEPTOR_STRERROR();
char *res = REAL(strerror)(errnum);
- if (res) COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, REAL(strlen)(res) + 1);
+ if (res) COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, internal_strlen(res) + 1);
return res;
}
#define INIT_STRERROR COMMON_INTERCEPT_FUNCTION(strerror);
@@ -3765,7 +3919,7 @@ INTERCEPTOR(char *, strerror, int errnum) {
// * GNU version returns message pointer, which points to either buf or some
// static storage.
#if ((_POSIX_C_SOURCE >= 200112L || _XOPEN_SOURCE >= 600) && !_GNU_SOURCE) || \
- SANITIZER_MAC || SANITIZER_ANDROID || SANITIZER_NETBSD || \
+ SANITIZER_APPLE || SANITIZER_ANDROID || SANITIZER_NETBSD || \
SANITIZER_FREEBSD
// POSIX version. Spec is not clear on whether buf is NULL-terminated.
// At least on OSX, buf contents are valid even when the call fails.
@@ -3792,13 +3946,13 @@ INTERCEPTOR(char *, strerror_r, int errnum, char *buf, SIZE_T buflen) {
// https://github.com/google/sanitizers/issues/321.
char *res = REAL(strerror_r)(errnum, buf, buflen);
if (res == buf)
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, REAL(strlen)(res) + 1);
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, internal_strlen(res) + 1);
else
- COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, REAL(strlen)(res) + 1);
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, internal_strlen(res) + 1);
return res;
}
#endif //(_POSIX_C_SOURCE >= 200112L || _XOPEN_SOURCE >= 600) && !_GNU_SOURCE ||
- //SANITIZER_MAC
+ //SANITIZER_APPLE
#define INIT_STRERROR_R COMMON_INTERCEPT_FUNCTION(strerror_r);
#else
#define INIT_STRERROR_R
@@ -3814,7 +3968,7 @@ INTERCEPTOR(int, __xpg_strerror_r, int errnum, char *buf, SIZE_T buflen) {
int res = REAL(__xpg_strerror_r)(errnum, buf, buflen);
// This version always returns a null-terminated string.
if (buf && buflen)
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, REAL(strlen)(buf) + 1);
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, internal_strlen(buf) + 1);
return res;
}
#define INIT_XPG_STRERROR_R COMMON_INTERCEPT_FUNCTION(__xpg_strerror_r);
@@ -3832,7 +3986,7 @@ static THREADLOCAL scandir_compar_f scandir_compar;
static int wrapped_scandir_filter(const struct __sanitizer_dirent *dir) {
COMMON_INTERCEPTOR_UNPOISON_PARAM(1);
- COMMON_INTERCEPTOR_INITIALIZE_RANGE(dir, dir->d_reclen);
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(dir, __sanitizer_dirsiz(dir));
return scandir_filter(dir);
}
@@ -3840,9 +3994,9 @@ static int wrapped_scandir_compar(const struct __sanitizer_dirent **a,
const struct __sanitizer_dirent **b) {
COMMON_INTERCEPTOR_UNPOISON_PARAM(2);
COMMON_INTERCEPTOR_INITIALIZE_RANGE(a, sizeof(*a));
- COMMON_INTERCEPTOR_INITIALIZE_RANGE(*a, (*a)->d_reclen);
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(*a, __sanitizer_dirsiz(*a));
COMMON_INTERCEPTOR_INITIALIZE_RANGE(b, sizeof(*b));
- COMMON_INTERCEPTOR_INITIALIZE_RANGE(*b, (*b)->d_reclen);
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(*b, __sanitizer_dirsiz(*b));
return scandir_compar(a, b);
}
@@ -3850,7 +4004,7 @@ INTERCEPTOR(int, scandir, char *dirp, __sanitizer_dirent ***namelist,
scandir_filter_f filter, scandir_compar_f compar) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, scandir, dirp, namelist, filter, compar);
- if (dirp) COMMON_INTERCEPTOR_READ_RANGE(ctx, dirp, REAL(strlen)(dirp) + 1);
+ if (dirp) COMMON_INTERCEPTOR_READ_RANGE(ctx, dirp, internal_strlen(dirp) + 1);
scandir_filter = filter;
scandir_compar = compar;
// FIXME: under ASan the call below may write to freed memory and corrupt
@@ -3866,7 +4020,7 @@ INTERCEPTOR(int, scandir, char *dirp, __sanitizer_dirent ***namelist,
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *namelist, sizeof(**namelist) * res);
for (int i = 0; i < res; ++i)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, (*namelist)[i],
- (*namelist)[i]->d_reclen);
+ __sanitizer_dirsiz((*namelist)[i]));
}
return res;
}
@@ -3885,7 +4039,7 @@ static THREADLOCAL scandir64_compar_f scandir64_compar;
static int wrapped_scandir64_filter(const struct __sanitizer_dirent64 *dir) {
COMMON_INTERCEPTOR_UNPOISON_PARAM(1);
- COMMON_INTERCEPTOR_INITIALIZE_RANGE(dir, dir->d_reclen);
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(dir, __sanitizer_dirsiz(dir));
return scandir64_filter(dir);
}
@@ -3893,9 +4047,9 @@ static int wrapped_scandir64_compar(const struct __sanitizer_dirent64 **a,
const struct __sanitizer_dirent64 **b) {
COMMON_INTERCEPTOR_UNPOISON_PARAM(2);
COMMON_INTERCEPTOR_INITIALIZE_RANGE(a, sizeof(*a));
- COMMON_INTERCEPTOR_INITIALIZE_RANGE(*a, (*a)->d_reclen);
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(*a, __sanitizer_dirsiz(*a));
COMMON_INTERCEPTOR_INITIALIZE_RANGE(b, sizeof(*b));
- COMMON_INTERCEPTOR_INITIALIZE_RANGE(*b, (*b)->d_reclen);
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(*b, __sanitizer_dirsiz(*b));
return scandir64_compar(a, b);
}
@@ -3903,7 +4057,7 @@ INTERCEPTOR(int, scandir64, char *dirp, __sanitizer_dirent64 ***namelist,
scandir64_filter_f filter, scandir64_compar_f compar) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, scandir64, dirp, namelist, filter, compar);
- if (dirp) COMMON_INTERCEPTOR_READ_RANGE(ctx, dirp, REAL(strlen)(dirp) + 1);
+ if (dirp) COMMON_INTERCEPTOR_READ_RANGE(ctx, dirp, internal_strlen(dirp) + 1);
scandir64_filter = filter;
scandir64_compar = compar;
// FIXME: under ASan the call below may write to freed memory and corrupt
@@ -3920,7 +4074,7 @@ INTERCEPTOR(int, scandir64, char *dirp, __sanitizer_dirent64 ***namelist,
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *namelist, sizeof(**namelist) * res);
for (int i = 0; i < res; ++i)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, (*namelist)[i],
- (*namelist)[i]->d_reclen);
+ __sanitizer_dirsiz((*namelist)[i]));
}
return res;
}
@@ -3999,19 +4153,20 @@ INTERCEPTOR(int, ppoll, __sanitizer_pollfd *fds, __sanitizer_nfds_t nfds,
INTERCEPTOR(int, wordexp, char *s, __sanitizer_wordexp_t *p, int flags) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, wordexp, s, p, flags);
- if (s) COMMON_INTERCEPTOR_READ_RANGE(ctx, s, REAL(strlen)(s) + 1);
+ if (s) COMMON_INTERCEPTOR_READ_RANGE(ctx, s, internal_strlen(s) + 1);
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
// https://github.com/google/sanitizers/issues/321.
int res = REAL(wordexp)(s, p, flags);
if (!res && p) {
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p, sizeof(*p));
- if (p->we_wordc)
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p->we_wordv,
- sizeof(*p->we_wordv) * p->we_wordc);
- for (uptr i = 0; i < p->we_wordc; ++i) {
+ uptr we_wordc =
+ ((flags & wordexp_wrde_dooffs) ? p->we_offs : 0) + p->we_wordc;
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p->we_wordv,
+ sizeof(*p->we_wordv) * (we_wordc + 1));
+ for (uptr i = 0; i < we_wordc; ++i) {
char *w = p->we_wordv[i];
- if (w) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, w, REAL(strlen)(w) + 1);
+ if (w) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, w, internal_strlen(w) + 1);
}
}
return res;
@@ -4196,12 +4351,16 @@ INTERCEPTOR(int, pthread_sigmask, int how, __sanitizer_sigset_t *set,
INTERCEPTOR(int, backtrace, void **buffer, int size) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, backtrace, buffer, size);
- // FIXME: under ASan the call below may write to freed memory and corrupt
- // its metadata. See
- // https://github.com/google/sanitizers/issues/321.
- int res = REAL(backtrace)(buffer, size);
- if (res && buffer)
+ // 'buffer' might be freed memory, hence it is unsafe to directly call
+ // REAL(backtrace)(buffer, size). Instead, we use our own known-good
+ // scratch buffer.
+ void **scratch = (void**)InternalAlloc(sizeof(void*) * size);
+ int res = REAL(backtrace)(scratch, size);
+ if (res && buffer) {
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buffer, res * sizeof(*buffer));
+ internal_memcpy(buffer, scratch, res * sizeof(*buffer));
+ }
+ InternalFree(scratch);
return res;
}
@@ -4210,14 +4369,13 @@ INTERCEPTOR(char **, backtrace_symbols, void **buffer, int size) {
COMMON_INTERCEPTOR_ENTER(ctx, backtrace_symbols, buffer, size);
if (buffer && size)
COMMON_INTERCEPTOR_READ_RANGE(ctx, buffer, size * sizeof(*buffer));
- // FIXME: under ASan the call below may write to freed memory and corrupt
- // its metadata. See
- // https://github.com/google/sanitizers/issues/321.
+ // The COMMON_INTERCEPTOR_READ_RANGE above ensures that 'buffer' is
+ // valid for reading.
char **res = REAL(backtrace_symbols)(buffer, size);
if (res && size) {
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, size * sizeof(*res));
for (int i = 0; i < size; ++i)
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res[i], REAL(strlen(res[i])) + 1);
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res[i], internal_strlen(res[i]) + 1);
}
return res;
}
@@ -4243,90 +4401,13 @@ INTERCEPTOR(void, _exit, int status) {
#define INIT__EXIT
#endif
-#if SANITIZER_INTERCEPT_PTHREAD_MUTEX
-INTERCEPTOR(int, pthread_mutex_lock, void *m) {
- void *ctx;
- COMMON_INTERCEPTOR_ENTER(ctx, pthread_mutex_lock, m);
- COMMON_INTERCEPTOR_MUTEX_PRE_LOCK(ctx, m);
- int res = REAL(pthread_mutex_lock)(m);
- if (res == errno_EOWNERDEAD)
- COMMON_INTERCEPTOR_MUTEX_REPAIR(ctx, m);
- if (res == 0 || res == errno_EOWNERDEAD)
- COMMON_INTERCEPTOR_MUTEX_POST_LOCK(ctx, m);
- if (res == errno_EINVAL)
- COMMON_INTERCEPTOR_MUTEX_INVALID(ctx, m);
- return res;
-}
-
-INTERCEPTOR(int, pthread_mutex_unlock, void *m) {
- void *ctx;
- COMMON_INTERCEPTOR_ENTER(ctx, pthread_mutex_unlock, m);
- COMMON_INTERCEPTOR_MUTEX_UNLOCK(ctx, m);
- int res = REAL(pthread_mutex_unlock)(m);
- if (res == errno_EINVAL)
- COMMON_INTERCEPTOR_MUTEX_INVALID(ctx, m);
- return res;
-}
-
-#define INIT_PTHREAD_MUTEX_LOCK COMMON_INTERCEPT_FUNCTION(pthread_mutex_lock)
-#define INIT_PTHREAD_MUTEX_UNLOCK \
- COMMON_INTERCEPT_FUNCTION(pthread_mutex_unlock)
-#else
-#define INIT_PTHREAD_MUTEX_LOCK
-#define INIT_PTHREAD_MUTEX_UNLOCK
-#endif
-
-#if SANITIZER_INTERCEPT___PTHREAD_MUTEX
-INTERCEPTOR(int, __pthread_mutex_lock, void *m) {
- void *ctx;
- COMMON_INTERCEPTOR_ENTER(ctx, __pthread_mutex_lock, m);
- COMMON_INTERCEPTOR_MUTEX_PRE_LOCK(ctx, m);
- int res = REAL(__pthread_mutex_lock)(m);
- if (res == errno_EOWNERDEAD)
- COMMON_INTERCEPTOR_MUTEX_REPAIR(ctx, m);
- if (res == 0 || res == errno_EOWNERDEAD)
- COMMON_INTERCEPTOR_MUTEX_POST_LOCK(ctx, m);
- if (res == errno_EINVAL)
- COMMON_INTERCEPTOR_MUTEX_INVALID(ctx, m);
- return res;
-}
-
-INTERCEPTOR(int, __pthread_mutex_unlock, void *m) {
- void *ctx;
- COMMON_INTERCEPTOR_ENTER(ctx, __pthread_mutex_unlock, m);
- COMMON_INTERCEPTOR_MUTEX_UNLOCK(ctx, m);
- int res = REAL(__pthread_mutex_unlock)(m);
- if (res == errno_EINVAL)
- COMMON_INTERCEPTOR_MUTEX_INVALID(ctx, m);
- return res;
-}
-
-#define INIT___PTHREAD_MUTEX_LOCK \
- COMMON_INTERCEPT_FUNCTION(__pthread_mutex_lock)
-#define INIT___PTHREAD_MUTEX_UNLOCK \
- COMMON_INTERCEPT_FUNCTION(__pthread_mutex_unlock)
-#else
-#define INIT___PTHREAD_MUTEX_LOCK
-#define INIT___PTHREAD_MUTEX_UNLOCK
-#endif
-
#if SANITIZER_INTERCEPT___LIBC_MUTEX
-INTERCEPTOR(int, __libc_mutex_lock, void *m)
-ALIAS(WRAPPER_NAME(pthread_mutex_lock));
-
-INTERCEPTOR(int, __libc_mutex_unlock, void *m)
-ALIAS(WRAPPER_NAME(pthread_mutex_unlock));
-
INTERCEPTOR(int, __libc_thr_setcancelstate, int state, int *oldstate)
-ALIAS(WRAPPER_NAME(pthread_setcancelstate));
+ALIAS(WRAP(pthread_setcancelstate));
-#define INIT___LIBC_MUTEX_LOCK COMMON_INTERCEPT_FUNCTION(__libc_mutex_lock)
-#define INIT___LIBC_MUTEX_UNLOCK COMMON_INTERCEPT_FUNCTION(__libc_mutex_unlock)
#define INIT___LIBC_THR_SETCANCELSTATE \
COMMON_INTERCEPT_FUNCTION(__libc_thr_setcancelstate)
#else
-#define INIT___LIBC_MUTEX_LOCK
-#define INIT___LIBC_MUTEX_UNLOCK
#define INIT___LIBC_THR_SETCANCELSTATE
#endif
@@ -4335,16 +4416,16 @@ static void write_mntent(void *ctx, __sanitizer_mntent *mnt) {
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, mnt, sizeof(*mnt));
if (mnt->mnt_fsname)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, mnt->mnt_fsname,
- REAL(strlen)(mnt->mnt_fsname) + 1);
+ internal_strlen(mnt->mnt_fsname) + 1);
if (mnt->mnt_dir)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, mnt->mnt_dir,
- REAL(strlen)(mnt->mnt_dir) + 1);
+ internal_strlen(mnt->mnt_dir) + 1);
if (mnt->mnt_type)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, mnt->mnt_type,
- REAL(strlen)(mnt->mnt_type) + 1);
+ internal_strlen(mnt->mnt_type) + 1);
if (mnt->mnt_opts)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, mnt->mnt_opts,
- REAL(strlen)(mnt->mnt_opts) + 1);
+ internal_strlen(mnt->mnt_opts) + 1);
}
#endif
@@ -4379,7 +4460,7 @@ INTERCEPTOR(__sanitizer_mntent *, getmntent_r, void *fp,
INTERCEPTOR(int, statfs, char *path, void *buf) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, statfs, path, buf);
- if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
+ if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, internal_strlen(path) + 1);
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
// https://github.com/google/sanitizers/issues/321.
@@ -4408,7 +4489,7 @@ INTERCEPTOR(int, fstatfs, int fd, void *buf) {
INTERCEPTOR(int, statfs64, char *path, void *buf) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, statfs64, path, buf);
- if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
+ if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, internal_strlen(path) + 1);
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
// https://github.com/google/sanitizers/issues/321.
@@ -4437,7 +4518,7 @@ INTERCEPTOR(int, fstatfs64, int fd, void *buf) {
INTERCEPTOR(int, statvfs, char *path, void *buf) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, statvfs, path, buf);
- if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
+ if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, internal_strlen(path) + 1);
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
// https://github.com/google/sanitizers/issues/321.
@@ -4471,7 +4552,7 @@ INTERCEPTOR(int, fstatvfs, int fd, void *buf) {
INTERCEPTOR(int, statvfs64, char *path, void *buf) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, statvfs64, path, buf);
- if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
+ if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, internal_strlen(path) + 1);
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
// https://github.com/google/sanitizers/issues/321.
@@ -4500,7 +4581,7 @@ INTERCEPTOR(int, fstatvfs64, int fd, void *buf) {
INTERCEPTOR(int, initgroups, char *user, u32 group) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, initgroups, user, group);
- if (user) COMMON_INTERCEPTOR_READ_RANGE(ctx, user, REAL(strlen)(user) + 1);
+ if (user) COMMON_INTERCEPTOR_READ_RANGE(ctx, user, internal_strlen(user) + 1);
int res = REAL(initgroups)(user, group);
return res;
}
@@ -4515,13 +4596,13 @@ INTERCEPTOR(char *, ether_ntoa, __sanitizer_ether_addr *addr) {
COMMON_INTERCEPTOR_ENTER(ctx, ether_ntoa, addr);
if (addr) COMMON_INTERCEPTOR_READ_RANGE(ctx, addr, sizeof(*addr));
char *res = REAL(ether_ntoa)(addr);
- if (res) COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, REAL(strlen)(res) + 1);
+ if (res) COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, internal_strlen(res) + 1);
return res;
}
INTERCEPTOR(__sanitizer_ether_addr *, ether_aton, char *buf) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, ether_aton, buf);
- if (buf) COMMON_INTERCEPTOR_READ_RANGE(ctx, buf, REAL(strlen)(buf) + 1);
+ if (buf) COMMON_INTERCEPTOR_READ_RANGE(ctx, buf, internal_strlen(buf) + 1);
__sanitizer_ether_addr *res = REAL(ether_aton)(buf);
if (res) COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, sizeof(*res));
return res;
@@ -4543,14 +4624,14 @@ INTERCEPTOR(int, ether_ntohost, char *hostname, __sanitizer_ether_addr *addr) {
// https://github.com/google/sanitizers/issues/321.
int res = REAL(ether_ntohost)(hostname, addr);
if (!res && hostname)
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, hostname, REAL(strlen)(hostname) + 1);
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, hostname, internal_strlen(hostname) + 1);
return res;
}
INTERCEPTOR(int, ether_hostton, char *hostname, __sanitizer_ether_addr *addr) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, ether_hostton, hostname, addr);
if (hostname)
- COMMON_INTERCEPTOR_READ_RANGE(ctx, hostname, REAL(strlen)(hostname) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, hostname, internal_strlen(hostname) + 1);
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
// https://github.com/google/sanitizers/issues/321.
@@ -4562,7 +4643,7 @@ INTERCEPTOR(int, ether_line, char *line, __sanitizer_ether_addr *addr,
char *hostname) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, ether_line, line, addr, hostname);
- if (line) COMMON_INTERCEPTOR_READ_RANGE(ctx, line, REAL(strlen)(line) + 1);
+ if (line) COMMON_INTERCEPTOR_READ_RANGE(ctx, line, internal_strlen(line) + 1);
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
// https://github.com/google/sanitizers/issues/321.
@@ -4570,7 +4651,7 @@ INTERCEPTOR(int, ether_line, char *line, __sanitizer_ether_addr *addr,
if (!res) {
if (addr) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, addr, sizeof(*addr));
if (hostname)
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, hostname, REAL(strlen)(hostname) + 1);
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, hostname, internal_strlen(hostname) + 1);
}
return res;
}
@@ -4591,14 +4672,14 @@ INTERCEPTOR(char *, ether_ntoa_r, __sanitizer_ether_addr *addr, char *buf) {
// its metadata. See
// https://github.com/google/sanitizers/issues/321.
char *res = REAL(ether_ntoa_r)(addr, buf);
- if (res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, REAL(strlen)(res) + 1);
+ if (res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, internal_strlen(res) + 1);
return res;
}
INTERCEPTOR(__sanitizer_ether_addr *, ether_aton_r, char *buf,
__sanitizer_ether_addr *addr) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, ether_aton_r, buf, addr);
- if (buf) COMMON_INTERCEPTOR_READ_RANGE(ctx, buf, REAL(strlen)(buf) + 1);
+ if (buf) COMMON_INTERCEPTOR_READ_RANGE(ctx, buf, internal_strlen(buf) + 1);
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
// https://github.com/google/sanitizers/issues/321.
@@ -4766,6 +4847,27 @@ INTERCEPTOR(int, pthread_attr_getaffinity_np, void *attr, SIZE_T cpusetsize,
#define INIT_PTHREAD_ATTR_GETAFFINITY_NP
#endif
+#if SANITIZER_INTERCEPT_PTHREAD_GETAFFINITY_NP
+INTERCEPTOR(int, pthread_getaffinity_np, void *attr, SIZE_T cpusetsize,
+ void *cpuset) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, pthread_getaffinity_np, attr, cpusetsize,
+ cpuset);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ int res = REAL(pthread_getaffinity_np)(attr, cpusetsize, cpuset);
+ if (!res && cpusetsize && cpuset)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, cpuset, cpusetsize);
+ return res;
+}
+
+#define INIT_PTHREAD_GETAFFINITY_NP \
+ COMMON_INTERCEPT_FUNCTION(pthread_getaffinity_np);
+#else
+#define INIT_PTHREAD_GETAFFINITY_NP
+#endif
+
#if SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GETPSHARED
INTERCEPTOR_PTHREAD_MUTEXATTR_GET(pshared, sizeof(int))
#define INIT_PTHREAD_MUTEXATTR_GETPSHARED \
@@ -4864,9 +4966,9 @@ INTERCEPTOR(char *, tmpnam, char *s) {
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
// https://github.com/google/sanitizers/issues/321.
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, s, REAL(strlen)(s) + 1);
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, s, internal_strlen(s) + 1);
else
- COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, REAL(strlen)(res) + 1);
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, internal_strlen(res) + 1);
}
return res;
}
@@ -4883,7 +4985,7 @@ INTERCEPTOR(char *, tmpnam_r, char *s) {
// its metadata. See
// https://github.com/google/sanitizers/issues/321.
char *res = REAL(tmpnam_r)(s);
- if (res && s) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, s, REAL(strlen)(s) + 1);
+ if (res && s) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, s, internal_strlen(s) + 1);
return res;
}
#define INIT_TMPNAM_R COMMON_INTERCEPT_FUNCTION(tmpnam_r);
@@ -4897,7 +4999,7 @@ INTERCEPTOR(char *, ptsname, int fd) {
COMMON_INTERCEPTOR_ENTER(ctx, ptsname, fd);
char *res = REAL(ptsname)(fd);
if (res != nullptr)
- COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, REAL(strlen)(res) + 1);
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, internal_strlen(res) + 1);
return res;
}
#define INIT_PTSNAME COMMON_INTERCEPT_FUNCTION(ptsname);
@@ -4911,7 +5013,7 @@ INTERCEPTOR(int, ptsname_r, int fd, char *name, SIZE_T namesize) {
COMMON_INTERCEPTOR_ENTER(ctx, ptsname_r, fd, name, namesize);
int res = REAL(ptsname_r)(fd, name, namesize);
if (res == 0)
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, name, REAL(strlen)(name) + 1);
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, name, internal_strlen(name) + 1);
return res;
}
#define INIT_PTSNAME_R COMMON_INTERCEPT_FUNCTION(ptsname_r);
@@ -4925,7 +5027,7 @@ INTERCEPTOR(char *, ttyname, int fd) {
COMMON_INTERCEPTOR_ENTER(ctx, ttyname, fd);
char *res = REAL(ttyname)(fd);
if (res != nullptr)
- COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, REAL(strlen)(res) + 1);
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, internal_strlen(res) + 1);
return res;
}
#define INIT_TTYNAME COMMON_INTERCEPT_FUNCTION(ttyname);
@@ -4939,7 +5041,7 @@ INTERCEPTOR(int, ttyname_r, int fd, char *name, SIZE_T namesize) {
COMMON_INTERCEPTOR_ENTER(ctx, ttyname_r, fd, name, namesize);
int res = REAL(ttyname_r)(fd, name, namesize);
if (res == 0)
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, name, REAL(strlen)(name) + 1);
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, name, internal_strlen(name) + 1);
return res;
}
#define INIT_TTYNAME_R COMMON_INTERCEPT_FUNCTION(ttyname_r);
@@ -4951,10 +5053,10 @@ INTERCEPTOR(int, ttyname_r, int fd, char *name, SIZE_T namesize) {
INTERCEPTOR(char *, tempnam, char *dir, char *pfx) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, tempnam, dir, pfx);
- if (dir) COMMON_INTERCEPTOR_READ_RANGE(ctx, dir, REAL(strlen)(dir) + 1);
- if (pfx) COMMON_INTERCEPTOR_READ_RANGE(ctx, pfx, REAL(strlen)(pfx) + 1);
+ if (dir) COMMON_INTERCEPTOR_READ_RANGE(ctx, dir, internal_strlen(dir) + 1);
+ if (pfx) COMMON_INTERCEPTOR_READ_RANGE(ctx, pfx, internal_strlen(pfx) + 1);
char *res = REAL(tempnam)(dir, pfx);
- if (res) COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, REAL(strlen)(res) + 1);
+ if (res) COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, internal_strlen(res) + 1);
return res;
}
#define INIT_TEMPNAM COMMON_INTERCEPT_FUNCTION(tempnam);
@@ -5332,9 +5434,7 @@ INTERCEPTOR(void *, __tls_get_addr, void *arg) {
// On PowerPC, we also need to intercept __tls_get_addr_opt, which has
// mostly the same semantics as __tls_get_addr, but its presence enables
// some optimizations in linker (which are safe to ignore here).
-extern "C" __attribute__((alias("__interceptor___tls_get_addr"),
- visibility("default")))
-void *__tls_get_addr_opt(void *arg);
+INTERCEPTOR(void *, __tls_get_addr_opt, void *arg) ALIAS(WRAP(__tls_get_addr));
#endif
#else // SANITIZER_S390
// On s390, we have to intercept two functions here:
@@ -5368,21 +5468,20 @@ INTERCEPTOR(uptr, __tls_get_addr_internal, void *arg) {
#if SANITIZER_S390 && \
(SANITIZER_INTERCEPT_TLS_GET_ADDR || SANITIZER_INTERCEPT_TLS_GET_OFFSET)
-extern "C" uptr __tls_get_offset(void *arg);
-extern "C" uptr __interceptor___tls_get_offset(void *arg);
// We need a hidden symbol aliasing the above, so that we can jump
// directly to it from the assembly below.
-extern "C" __attribute__((alias("__interceptor___tls_get_addr_internal"),
- visibility("hidden")))
-uptr __tls_get_addr_hidden(void *arg);
+extern "C" __attribute__((visibility("hidden"))) uptr __tls_get_addr_hidden(
+ void *arg) ALIAS(WRAP(__tls_get_addr_internal));
+extern "C" uptr __tls_get_offset(void *arg);
+extern "C" uptr TRAMPOLINE(__tls_get_offset)(void *arg);
+extern "C" uptr WRAP(__tls_get_offset)(void *arg);
// Now carefully intercept __tls_get_offset.
asm(
".text\n"
// The __intercept_ version has to exist, so that gen_dynamic_list.py
// exports our symbol.
".weak __tls_get_offset\n"
- ".type __tls_get_offset, @function\n"
- "__tls_get_offset:\n"
+ ".set __tls_get_offset, __interceptor___tls_get_offset\n"
".global __interceptor___tls_get_offset\n"
".type __interceptor___tls_get_offset, @function\n"
"__interceptor___tls_get_offset:\n"
@@ -5414,7 +5513,7 @@ asm(
INTERCEPTOR(SSIZE_T, listxattr, const char *path, char *list, SIZE_T size) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, listxattr, path, list, size);
- if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
+ if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, internal_strlen(path) + 1);
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
// https://github.com/google/sanitizers/issues/321.
@@ -5427,7 +5526,7 @@ INTERCEPTOR(SSIZE_T, listxattr, const char *path, char *list, SIZE_T size) {
INTERCEPTOR(SSIZE_T, llistxattr, const char *path, char *list, SIZE_T size) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, llistxattr, path, list, size);
- if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
+ if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, internal_strlen(path) + 1);
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
// https://github.com/google/sanitizers/issues/321.
@@ -5458,8 +5557,8 @@ INTERCEPTOR(SSIZE_T, getxattr, const char *path, const char *name, char *value,
SIZE_T size) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, getxattr, path, name, value, size);
- if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
- if (name) COMMON_INTERCEPTOR_READ_RANGE(ctx, name, REAL(strlen)(name) + 1);
+ if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, internal_strlen(path) + 1);
+ if (name) COMMON_INTERCEPTOR_READ_RANGE(ctx, name, internal_strlen(name) + 1);
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
// https://github.com/google/sanitizers/issues/321.
@@ -5471,8 +5570,8 @@ INTERCEPTOR(SSIZE_T, lgetxattr, const char *path, const char *name, char *value,
SIZE_T size) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, lgetxattr, path, name, value, size);
- if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
- if (name) COMMON_INTERCEPTOR_READ_RANGE(ctx, name, REAL(strlen)(name) + 1);
+ if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, internal_strlen(path) + 1);
+ if (name) COMMON_INTERCEPTOR_READ_RANGE(ctx, name, internal_strlen(name) + 1);
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
// https://github.com/google/sanitizers/issues/321.
@@ -5484,7 +5583,7 @@ INTERCEPTOR(SSIZE_T, fgetxattr, int fd, const char *name, char *value,
SIZE_T size) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, fgetxattr, fd, name, value, size);
- if (name) COMMON_INTERCEPTOR_READ_RANGE(ctx, name, REAL(strlen)(name) + 1);
+ if (name) COMMON_INTERCEPTOR_READ_RANGE(ctx, name, internal_strlen(name) + 1);
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
// https://github.com/google/sanitizers/issues/321.
@@ -5554,7 +5653,7 @@ INTERCEPTOR(int, getifaddrs, __sanitizer_ifaddrs **ifap) {
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p, sizeof(__sanitizer_ifaddrs));
if (p->ifa_name)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p->ifa_name,
- REAL(strlen)(p->ifa_name) + 1);
+ internal_strlen(p->ifa_name) + 1);
if (p->ifa_addr)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p->ifa_addr, struct_sockaddr_sz);
if (p->ifa_netmask)
@@ -5584,14 +5683,14 @@ INTERCEPTOR(char *, if_indextoname, unsigned int ifindex, char* ifname) {
// https://github.com/google/sanitizers/issues/321.
char *res = REAL(if_indextoname)(ifindex, ifname);
if (res && ifname)
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ifname, REAL(strlen)(ifname) + 1);
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ifname, internal_strlen(ifname) + 1);
return res;
}
INTERCEPTOR(unsigned int, if_nametoindex, const char* ifname) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, if_nametoindex, ifname);
if (ifname)
- COMMON_INTERCEPTOR_READ_RANGE(ctx, ifname, REAL(strlen)(ifname) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, ifname, internal_strlen(ifname) + 1);
return REAL(if_nametoindex)(ifname);
}
#define INIT_IF_INDEXTONAME \
@@ -5611,8 +5710,10 @@ INTERCEPTOR(int, capget, void *hdrp, void *datap) {
// its metadata. See
// https://github.com/google/sanitizers/issues/321.
int res = REAL(capget)(hdrp, datap);
- if (res == 0 && datap)
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, datap, __user_cap_data_struct_sz);
+ if (res == 0 && datap) {
+ unsigned datasz = __user_cap_data_struct_sz(hdrp);
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, datap, datasz);
+ }
// We can also return -1 and write to hdrp->version if the version passed in
// hdrp->version is unsupported. But that's not a trivial condition to check,
// and anyway COMMON_INTERCEPTOR_READ_RANGE protects us to some extent.
@@ -5623,8 +5724,10 @@ INTERCEPTOR(int, capset, void *hdrp, const void *datap) {
COMMON_INTERCEPTOR_ENTER(ctx, capset, hdrp, datap);
if (hdrp)
COMMON_INTERCEPTOR_READ_RANGE(ctx, hdrp, __user_cap_header_struct_sz);
- if (datap)
- COMMON_INTERCEPTOR_READ_RANGE(ctx, datap, __user_cap_data_struct_sz);
+ if (datap) {
+ unsigned datasz = __user_cap_data_struct_sz(hdrp);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, datap, datasz);
+ }
return REAL(capset)(hdrp, datap);
}
#define INIT_CAPGET \
@@ -5634,105 +5737,6 @@ INTERCEPTOR(int, capset, void *hdrp, const void *datap) {
#define INIT_CAPGET
#endif
-#if SANITIZER_INTERCEPT_AEABI_MEM
-INTERCEPTOR(void *, __aeabi_memmove, void *to, const void *from, uptr size) {
- void *ctx;
- COMMON_INTERCEPTOR_MEMMOVE_IMPL(ctx, to, from, size);
-}
-
-INTERCEPTOR(void *, __aeabi_memmove4, void *to, const void *from, uptr size) {
- void *ctx;
- COMMON_INTERCEPTOR_MEMMOVE_IMPL(ctx, to, from, size);
-}
-
-INTERCEPTOR(void *, __aeabi_memmove8, void *to, const void *from, uptr size) {
- void *ctx;
- COMMON_INTERCEPTOR_MEMMOVE_IMPL(ctx, to, from, size);
-}
-
-INTERCEPTOR(void *, __aeabi_memcpy, void *to, const void *from, uptr size) {
- void *ctx;
- COMMON_INTERCEPTOR_MEMCPY_IMPL(ctx, to, from, size);
-}
-
-INTERCEPTOR(void *, __aeabi_memcpy4, void *to, const void *from, uptr size) {
- void *ctx;
- COMMON_INTERCEPTOR_MEMCPY_IMPL(ctx, to, from, size);
-}
-
-INTERCEPTOR(void *, __aeabi_memcpy8, void *to, const void *from, uptr size) {
- void *ctx;
- COMMON_INTERCEPTOR_MEMCPY_IMPL(ctx, to, from, size);
-}
-
-// Note the argument order.
-INTERCEPTOR(void *, __aeabi_memset, void *block, uptr size, int c) {
- void *ctx;
- COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, block, c, size);
-}
-
-INTERCEPTOR(void *, __aeabi_memset4, void *block, uptr size, int c) {
- void *ctx;
- COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, block, c, size);
-}
-
-INTERCEPTOR(void *, __aeabi_memset8, void *block, uptr size, int c) {
- void *ctx;
- COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, block, c, size);
-}
-
-INTERCEPTOR(void *, __aeabi_memclr, void *block, uptr size) {
- void *ctx;
- COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, block, 0, size);
-}
-
-INTERCEPTOR(void *, __aeabi_memclr4, void *block, uptr size) {
- void *ctx;
- COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, block, 0, size);
-}
-
-INTERCEPTOR(void *, __aeabi_memclr8, void *block, uptr size) {
- void *ctx;
- COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, block, 0, size);
-}
-
-#define INIT_AEABI_MEM \
- COMMON_INTERCEPT_FUNCTION(__aeabi_memmove); \
- COMMON_INTERCEPT_FUNCTION(__aeabi_memmove4); \
- COMMON_INTERCEPT_FUNCTION(__aeabi_memmove8); \
- COMMON_INTERCEPT_FUNCTION(__aeabi_memcpy); \
- COMMON_INTERCEPT_FUNCTION(__aeabi_memcpy4); \
- COMMON_INTERCEPT_FUNCTION(__aeabi_memcpy8); \
- COMMON_INTERCEPT_FUNCTION(__aeabi_memset); \
- COMMON_INTERCEPT_FUNCTION(__aeabi_memset4); \
- COMMON_INTERCEPT_FUNCTION(__aeabi_memset8); \
- COMMON_INTERCEPT_FUNCTION(__aeabi_memclr); \
- COMMON_INTERCEPT_FUNCTION(__aeabi_memclr4); \
- COMMON_INTERCEPT_FUNCTION(__aeabi_memclr8);
-#else
-#define INIT_AEABI_MEM
-#endif // SANITIZER_INTERCEPT_AEABI_MEM
-
-#if SANITIZER_INTERCEPT___BZERO
-INTERCEPTOR(void *, __bzero, void *block, uptr size) {
- void *ctx;
- COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, block, 0, size);
-}
-#define INIT___BZERO COMMON_INTERCEPT_FUNCTION(__bzero);
-#else
-#define INIT___BZERO
-#endif // SANITIZER_INTERCEPT___BZERO
-
-#if SANITIZER_INTERCEPT_BZERO
-INTERCEPTOR(void *, bzero, void *block, uptr size) {
- void *ctx;
- COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, block, 0, size);
-}
-#define INIT_BZERO COMMON_INTERCEPT_FUNCTION(bzero);
-#else
-#define INIT_BZERO
-#endif // SANITIZER_INTERCEPT_BZERO
-
#if SANITIZER_INTERCEPT_FTIME
INTERCEPTOR(int, ftime, __sanitizer_timeb *tp) {
void *ctx;
@@ -5849,7 +5853,7 @@ INTERCEPTOR(int, xdr_string, __sanitizer_XDR *xdrs, char **p,
COMMON_INTERCEPTOR_ENTER(ctx, xdr_string, xdrs, p, maxsize);
if (p && xdrs->x_op == __sanitizer_XDR_ENCODE) {
COMMON_INTERCEPTOR_READ_RANGE(ctx, p, sizeof(*p));
- COMMON_INTERCEPTOR_READ_RANGE(ctx, *p, REAL(strlen)(*p) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, *p, internal_strlen(*p) + 1);
}
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
@@ -5858,7 +5862,7 @@ INTERCEPTOR(int, xdr_string, __sanitizer_XDR *xdrs, char **p,
if (p && xdrs->x_op == __sanitizer_XDR_DECODE) {
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p, sizeof(*p));
if (res && *p)
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *p, REAL(strlen)(*p) + 1);
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *p, internal_strlen(*p) + 1);
}
return res;
}
@@ -6069,8 +6073,8 @@ INTERCEPTOR(int, __woverflow, __sanitizer_FILE *fp, int ch) {
INTERCEPTOR(__sanitizer_FILE *, fopen, const char *path, const char *mode) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, fopen, path, mode);
- if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
- COMMON_INTERCEPTOR_READ_RANGE(ctx, mode, REAL(strlen)(mode) + 1);
+ if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, internal_strlen(path) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, mode, internal_strlen(mode) + 1);
__sanitizer_FILE *res = REAL(fopen)(path, mode);
COMMON_INTERCEPTOR_FILE_OPEN(ctx, res, path);
if (res) unpoison_file(res);
@@ -6079,7 +6083,7 @@ INTERCEPTOR(__sanitizer_FILE *, fopen, const char *path, const char *mode) {
INTERCEPTOR(__sanitizer_FILE *, fdopen, int fd, const char *mode) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, fdopen, fd, mode);
- COMMON_INTERCEPTOR_READ_RANGE(ctx, mode, REAL(strlen)(mode) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, mode, internal_strlen(mode) + 1);
__sanitizer_FILE *res = REAL(fdopen)(fd, mode);
if (res) unpoison_file(res);
return res;
@@ -6088,8 +6092,8 @@ INTERCEPTOR(__sanitizer_FILE *, freopen, const char *path, const char *mode,
__sanitizer_FILE *fp) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, freopen, path, mode, fp);
- if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
- COMMON_INTERCEPTOR_READ_RANGE(ctx, mode, REAL(strlen)(mode) + 1);
+ if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, internal_strlen(path) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, mode, internal_strlen(mode) + 1);
COMMON_INTERCEPTOR_FILE_CLOSE(ctx, fp);
__sanitizer_FILE *res = REAL(freopen)(path, mode, fp);
COMMON_INTERCEPTOR_FILE_OPEN(ctx, res, path);
@@ -6113,7 +6117,7 @@ INTERCEPTOR(int, flopen, const char *path, int flags, ...) {
va_end(ap);
COMMON_INTERCEPTOR_ENTER(ctx, flopen, path, flags, mode);
if (path) {
- COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, path, internal_strlen(path) + 1);
}
return REAL(flopen)(path, flags, mode);
}
@@ -6126,7 +6130,7 @@ INTERCEPTOR(int, flopenat, int dirfd, const char *path, int flags, ...) {
va_end(ap);
COMMON_INTERCEPTOR_ENTER(ctx, flopen, path, flags, mode);
if (path) {
- COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, path, internal_strlen(path) + 1);
}
return REAL(flopenat)(dirfd, path, flags, mode);
}
@@ -6142,8 +6146,8 @@ INTERCEPTOR(int, flopenat, int dirfd, const char *path, int flags, ...) {
INTERCEPTOR(__sanitizer_FILE *, fopen64, const char *path, const char *mode) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, fopen64, path, mode);
- COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
- COMMON_INTERCEPTOR_READ_RANGE(ctx, mode, REAL(strlen)(mode) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, path, internal_strlen(path) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, mode, internal_strlen(mode) + 1);
__sanitizer_FILE *res = REAL(fopen64)(path, mode);
COMMON_INTERCEPTOR_FILE_OPEN(ctx, res, path);
if (res) unpoison_file(res);
@@ -6153,8 +6157,8 @@ INTERCEPTOR(__sanitizer_FILE *, freopen64, const char *path, const char *mode,
__sanitizer_FILE *fp) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, freopen64, path, mode, fp);
- if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
- COMMON_INTERCEPTOR_READ_RANGE(ctx, mode, REAL(strlen)(mode) + 1);
+ if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, internal_strlen(path) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, mode, internal_strlen(mode) + 1);
COMMON_INTERCEPTOR_FILE_CLOSE(ctx, fp);
__sanitizer_FILE *res = REAL(freopen64)(path, mode, fp);
COMMON_INTERCEPTOR_FILE_OPEN(ctx, res, path);
@@ -6304,9 +6308,37 @@ INTERCEPTOR(int, fclose, __sanitizer_FILE *fp) {
INTERCEPTOR(void*, dlopen, const char *filename, int flag) {
void *ctx;
COMMON_INTERCEPTOR_ENTER_NOIGNORE(ctx, dlopen, filename, flag);
- if (filename) COMMON_INTERCEPTOR_READ_STRING(ctx, filename, 0);
- COMMON_INTERCEPTOR_ON_DLOPEN(filename, flag);
- void *res = REAL(dlopen)(filename, flag);
+
+ if (filename) {
+ COMMON_INTERCEPTOR_READ_STRING(ctx, filename, 0);
+
+# if !SANITIZER_DYNAMIC
+ // We care about a very specific use-case: dladdr on
+ // statically-linked ASan may return <main program>
+ // instead of the library.
+ // We therefore only take effect if the sanitizer is statically
+ // linked, and we don't bother canonicalizing paths because
+ // dladdr should return the same address both times (we assume
+ // the user did not canonicalize the result from dladdr).
+ if (common_flags()->test_only_replace_dlopen_main_program) {
+ VPrintf(1, "dlopen interceptor: filename: %s\n", filename);
+
+ const char *SelfFName = DladdrSelfFName();
+ VPrintf(1, "dlopen interceptor: DladdrSelfFName: %p %s\n",
+ (void *)SelfFName, SelfFName);
+
+ if (SelfFName && internal_strcmp(SelfFName, filename) == 0) {
+ // It's possible they copied the string from dladdr, so
+ // we do a string comparison rather than pointer comparison.
+ VPrintf(1, "dlopen interceptor: replacing %s because it matches %s\n",
+ filename, SelfFName);
+ filename = (char *)0; // RTLD_DEFAULT
+ }
+ }
+# endif // !SANITIZER_DYNAMIC
+ }
+
+ void *res = COMMON_INTERCEPTOR_DLOPEN(filename, flag);
Symbolizer::GetOrInit()->InvalidateModuleList();
COMMON_INTERCEPTOR_LIBRARY_LOADED(filename, res);
return res;
@@ -6332,9 +6364,9 @@ INTERCEPTOR(char *, getpass, const char *prompt) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, getpass, prompt);
if (prompt)
- COMMON_INTERCEPTOR_READ_RANGE(ctx, prompt, REAL(strlen)(prompt)+1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, prompt, internal_strlen(prompt)+1);
char *res = REAL(getpass)(prompt);
- if (res) COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, REAL(strlen)(res)+1);
+ if (res) COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, internal_strlen(res)+1);
return res;
}
@@ -6475,7 +6507,7 @@ INTERCEPTOR(int, sem_init, __sanitizer_sem_t *s, int pshared, unsigned value) {
COMMON_INTERCEPTOR_ENTER(ctx, sem_init, s, pshared, value);
// Workaround a bug in glibc's "old" semaphore implementation by
// zero-initializing the sem_t contents. This has to be done here because
- // interceptors bind to the lowest symbols version by default, hitting the
+ // interceptors bind to the lowest version before glibc 2.36, hitting the
// buggy code path while the non-sanitized build of the same code works fine.
REAL(memset)(s, 0, sizeof(*s));
int res = REAL(sem_init)(s, pshared, value);
@@ -6538,17 +6570,42 @@ INTERCEPTOR(int, sem_getvalue, __sanitizer_sem_t *s, int *sval) {
}
return res;
}
-#define INIT_SEM \
- COMMON_INTERCEPT_FUNCTION(sem_init); \
- COMMON_INTERCEPT_FUNCTION(sem_destroy); \
- COMMON_INTERCEPT_FUNCTION(sem_wait); \
- COMMON_INTERCEPT_FUNCTION(sem_trywait); \
- COMMON_INTERCEPT_FUNCTION(sem_timedwait); \
- COMMON_INTERCEPT_FUNCTION(sem_post); \
- COMMON_INTERCEPT_FUNCTION(sem_getvalue);
+
+INTERCEPTOR(__sanitizer_sem_t *, sem_open, const char *name, int oflag, ...) {
+ void *ctx;
+ va_list ap;
+ va_start(ap, oflag);
+ u32 mode = va_arg(ap, u32);
+ u32 value = va_arg(ap, u32);
+ COMMON_INTERCEPTOR_ENTER(ctx, sem_open, name, oflag, mode, value);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, name, internal_strlen(name) + 1);
+ __sanitizer_sem_t *s = REAL(sem_open)(name, oflag, mode, value);
+ if (s)
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(s, sizeof(*s));
+ va_end(ap);
+ return s;
+}
+
+INTERCEPTOR(int, sem_unlink, const char *name) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, sem_unlink, name);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, name, internal_strlen(name) + 1);
+ return REAL(sem_unlink)(name);
+}
+
+# define INIT_SEM \
+ COMMON_INTERCEPT_FUNCTION(sem_init); \
+ COMMON_INTERCEPT_FUNCTION(sem_destroy); \
+ COMMON_INTERCEPT_FUNCTION(sem_wait); \
+ COMMON_INTERCEPT_FUNCTION(sem_trywait); \
+ COMMON_INTERCEPT_FUNCTION(sem_timedwait); \
+ COMMON_INTERCEPT_FUNCTION(sem_post); \
+ COMMON_INTERCEPT_FUNCTION(sem_getvalue); \
+ COMMON_INTERCEPT_FUNCTION(sem_open); \
+ COMMON_INTERCEPT_FUNCTION(sem_unlink);
#else
-#define INIT_SEM
-#endif // SANITIZER_INTERCEPT_SEM
+# define INIT_SEM
+#endif // SANITIZER_INTERCEPT_SEM
#if SANITIZER_INTERCEPT_PTHREAD_SETCANCEL
INTERCEPTOR(int, pthread_setcancelstate, int state, int *oldstate) {
@@ -6631,7 +6688,7 @@ INTERCEPTOR(char *, ctermid, char *s) {
COMMON_INTERCEPTOR_ENTER(ctx, ctermid, s);
char *res = REAL(ctermid)(s);
if (res) {
- COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, REAL(strlen)(res) + 1);
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, internal_strlen(res) + 1);
}
return res;
}
@@ -6646,7 +6703,7 @@ INTERCEPTOR(char *, ctermid_r, char *s) {
COMMON_INTERCEPTOR_ENTER(ctx, ctermid_r, s);
char *res = REAL(ctermid_r)(s);
if (res) {
- COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, REAL(strlen)(res) + 1);
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, internal_strlen(res) + 1);
}
return res;
}
@@ -6728,7 +6785,7 @@ INTERCEPTOR(SSIZE_T, sendto, int fd, void *buf, SIZE_T len, int flags,
#endif
#if SANITIZER_INTERCEPT_EVENTFD_READ_WRITE
-INTERCEPTOR(int, eventfd_read, int fd, u64 *value) {
+INTERCEPTOR(int, eventfd_read, int fd, __sanitizer_eventfd_t *value) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, eventfd_read, fd, value);
COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd);
@@ -6739,7 +6796,7 @@ INTERCEPTOR(int, eventfd_read, int fd, u64 *value) {
}
return res;
}
-INTERCEPTOR(int, eventfd_write, int fd, u64 value) {
+INTERCEPTOR(int, eventfd_write, int fd, __sanitizer_eventfd_t value) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, eventfd_write, fd, value);
if (fd >= 0) {
@@ -6772,6 +6829,23 @@ INTERCEPTOR(int, stat, const char *path, void *buf) {
#define INIT_STAT
#endif
+#if SANITIZER_INTERCEPT_STAT64
+INTERCEPTOR(int, stat64, const char *path, void *buf) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, stat64, path, buf);
+ if (common_flags()->intercept_stat)
+ COMMON_INTERCEPTOR_READ_STRING(ctx, path, 0);
+ int res = REAL(stat64)(path, buf);
+ if (!res)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, __sanitizer::struct_stat64_sz);
+ return res;
+}
+#define INIT_STAT64 COMMON_INTERCEPT_FUNCTION(stat64)
+#else
+#define INIT_STAT64
+#endif
+
+
#if SANITIZER_INTERCEPT_LSTAT
INTERCEPTOR(int, lstat, const char *path, void *buf) {
void *ctx;
@@ -6788,6 +6862,22 @@ INTERCEPTOR(int, lstat, const char *path, void *buf) {
#define INIT_LSTAT
#endif
+#if SANITIZER_INTERCEPT_STAT64
+INTERCEPTOR(int, lstat64, const char *path, void *buf) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, lstat64, path, buf);
+ if (common_flags()->intercept_stat)
+ COMMON_INTERCEPTOR_READ_STRING(ctx, path, 0);
+ int res = REAL(lstat64)(path, buf);
+ if (!res)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, __sanitizer::struct_stat64_sz);
+ return res;
+}
+#define INIT_LSTAT64 COMMON_INTERCEPT_FUNCTION(lstat64)
+#else
+#define INIT_LSTAT64
+#endif
+
#if SANITIZER_INTERCEPT___XSTAT
INTERCEPTOR(int, __xstat, int version, const char *path, void *buf) {
void *ctx;
@@ -6960,6 +7050,7 @@ INTERCEPTOR(int, mprobe, void *ptr) {
}
#endif
+#if SANITIZER_INTERCEPT_WCSLEN
INTERCEPTOR(SIZE_T, wcslen, const wchar_t *s) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, wcslen, s);
@@ -6978,13 +7069,16 @@ INTERCEPTOR(SIZE_T, wcsnlen, const wchar_t *s, SIZE_T n) {
#define INIT_WCSLEN \
COMMON_INTERCEPT_FUNCTION(wcslen); \
COMMON_INTERCEPT_FUNCTION(wcsnlen);
+#else
+#define INIT_WCSLEN
+#endif
#if SANITIZER_INTERCEPT_WCSCAT
INTERCEPTOR(wchar_t *, wcscat, wchar_t *dst, const wchar_t *src) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, wcscat, dst, src);
- SIZE_T src_size = REAL(wcslen)(src);
- SIZE_T dst_size = REAL(wcslen)(dst);
+ SIZE_T src_size = internal_wcslen(src);
+ SIZE_T dst_size = internal_wcslen(dst);
COMMON_INTERCEPTOR_READ_RANGE(ctx, src, (src_size + 1) * sizeof(wchar_t));
COMMON_INTERCEPTOR_READ_RANGE(ctx, dst, (dst_size + 1) * sizeof(wchar_t));
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst + dst_size,
@@ -6995,8 +7089,8 @@ INTERCEPTOR(wchar_t *, wcscat, wchar_t *dst, const wchar_t *src) {
INTERCEPTOR(wchar_t *, wcsncat, wchar_t *dst, const wchar_t *src, SIZE_T n) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, wcsncat, dst, src, n);
- SIZE_T src_size = REAL(wcsnlen)(src, n);
- SIZE_T dst_size = REAL(wcslen)(dst);
+ SIZE_T src_size = internal_wcsnlen(src, n);
+ SIZE_T dst_size = internal_wcslen(dst);
COMMON_INTERCEPTOR_READ_RANGE(ctx, src,
Min(src_size + 1, n) * sizeof(wchar_t));
COMMON_INTERCEPTOR_READ_RANGE(ctx, dst, (dst_size + 1) * sizeof(wchar_t));
@@ -7015,7 +7109,7 @@ INTERCEPTOR(wchar_t *, wcsncat, wchar_t *dst, const wchar_t *src, SIZE_T n) {
INTERCEPTOR(wchar_t *, wcsdup, wchar_t *s) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, wcsdup, s);
- SIZE_T len = REAL(wcslen)(s);
+ SIZE_T len = internal_wcslen(s);
COMMON_INTERCEPTOR_READ_RANGE(ctx, s, sizeof(wchar_t) * (len + 1));
wchar_t *result = REAL(wcsdup)(s);
if (result)
@@ -7029,9 +7123,9 @@ INTERCEPTOR(wchar_t *, wcsdup, wchar_t *s) {
#endif
#if SANITIZER_INTERCEPT_STRXFRM
-static SIZE_T RealStrLen(const char *str) { return REAL(strlen)(str); }
+static SIZE_T RealStrLen(const char *str) { return internal_strlen(str); }
-static SIZE_T RealStrLen(const wchar_t *str) { return REAL(wcslen)(str); }
+static SIZE_T RealStrLen(const wchar_t *str) { return internal_wcslen(str); }
#define STRXFRM_INTERCEPTOR_IMPL(strxfrm, dest, src, len, ...) \
{ \
@@ -7105,7 +7199,7 @@ INTERCEPTOR(int, acct, const char *file) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, acct, file);
if (file)
- COMMON_INTERCEPTOR_READ_RANGE(ctx, file, REAL(strlen)(file) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, file, internal_strlen(file) + 1);
return REAL(acct)(file);
}
#define INIT_ACCT COMMON_INTERCEPT_FUNCTION(acct)
@@ -7120,7 +7214,7 @@ INTERCEPTOR(const char *, user_from_uid, u32 uid, int nouser) {
COMMON_INTERCEPTOR_ENTER(ctx, user_from_uid, uid, nouser);
user = REAL(user_from_uid)(uid, nouser);
if (user)
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, user, REAL(strlen)(user) + 1);
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, user, internal_strlen(user) + 1);
return user;
}
#define INIT_USER_FROM_UID COMMON_INTERCEPT_FUNCTION(user_from_uid)
@@ -7134,7 +7228,7 @@ INTERCEPTOR(int, uid_from_user, const char *name, u32 *uid) {
int res;
COMMON_INTERCEPTOR_ENTER(ctx, uid_from_user, name, uid);
if (name)
- COMMON_INTERCEPTOR_READ_RANGE(ctx, name, REAL(strlen)(name) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, name, internal_strlen(name) + 1);
res = REAL(uid_from_user)(name, uid);
if (uid)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, uid, sizeof(*uid));
@@ -7152,7 +7246,7 @@ INTERCEPTOR(const char *, group_from_gid, u32 gid, int nogroup) {
COMMON_INTERCEPTOR_ENTER(ctx, group_from_gid, gid, nogroup);
group = REAL(group_from_gid)(gid, nogroup);
if (group)
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, group, REAL(strlen)(group) + 1);
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, group, internal_strlen(group) + 1);
return group;
}
#define INIT_GROUP_FROM_GID COMMON_INTERCEPT_FUNCTION(group_from_gid)
@@ -7166,7 +7260,7 @@ INTERCEPTOR(int, gid_from_group, const char *group, u32 *gid) {
int res;
COMMON_INTERCEPTOR_ENTER(ctx, gid_from_group, group, gid);
if (group)
- COMMON_INTERCEPTOR_READ_RANGE(ctx, group, REAL(strlen)(group) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, group, internal_strlen(group) + 1);
res = REAL(gid_from_group)(group, gid);
if (gid)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, gid, sizeof(*gid));
@@ -7182,7 +7276,7 @@ INTERCEPTOR(int, access, const char *path, int mode) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, access, path, mode);
if (path)
- COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, path, internal_strlen(path) + 1);
return REAL(access)(path, mode);
}
#define INIT_ACCESS COMMON_INTERCEPT_FUNCTION(access)
@@ -7195,7 +7289,7 @@ INTERCEPTOR(int, faccessat, int fd, const char *path, int mode, int flags) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, faccessat, fd, path, mode, flags);
if (path)
- COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, path, internal_strlen(path) + 1);
return REAL(faccessat)(fd, path, mode, flags);
}
#define INIT_FACCESSAT COMMON_INTERCEPT_FUNCTION(faccessat)
@@ -7210,7 +7304,7 @@ INTERCEPTOR(int, getgrouplist, const char *name, u32 basegid, u32 *groups,
int res;
COMMON_INTERCEPTOR_ENTER(ctx, getgrouplist, name, basegid, groups, ngroups);
if (name)
- COMMON_INTERCEPTOR_READ_RANGE(ctx, name, REAL(strlen)(name) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, name, internal_strlen(name) + 1);
if (ngroups)
COMMON_INTERCEPTOR_READ_RANGE(ctx, ngroups, sizeof(*ngroups));
res = REAL(getgrouplist)(name, basegid, groups, ngroups);
@@ -7234,7 +7328,7 @@ INTERCEPTOR(int, getgroupmembership, const char *name, u32 basegid, u32 *groups,
COMMON_INTERCEPTOR_ENTER(ctx, getgroupmembership, name, basegid, groups,
maxgrp, ngroups);
if (name)
- COMMON_INTERCEPTOR_READ_RANGE(ctx, name, REAL(strlen)(name) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, name, internal_strlen(name) + 1);
res = REAL(getgroupmembership)(name, basegid, groups, maxgrp, ngroups);
if (!res && groups && ngroups) {
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, groups, sizeof(*groups) * (*ngroups));
@@ -7252,7 +7346,7 @@ INTERCEPTOR(int, getgroupmembership, const char *name, u32 basegid, u32 *groups,
INTERCEPTOR(SSIZE_T, readlink, const char *path, char *buf, SIZE_T bufsiz) {
void* ctx;
COMMON_INTERCEPTOR_ENTER(ctx, readlink, path, buf, bufsiz);
- COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, path, internal_strlen(path) + 1);
SSIZE_T res = REAL(readlink)(path, buf, bufsiz);
if (res > 0)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, res);
@@ -7269,7 +7363,7 @@ INTERCEPTOR(SSIZE_T, readlinkat, int dirfd, const char *path, char *buf,
SIZE_T bufsiz) {
void* ctx;
COMMON_INTERCEPTOR_ENTER(ctx, readlinkat, dirfd, path, buf, bufsiz);
- COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, path, internal_strlen(path) + 1);
SSIZE_T res = REAL(readlinkat)(dirfd, path, buf, bufsiz);
if (res > 0)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, res);
@@ -7287,7 +7381,7 @@ INTERCEPTOR(int, name_to_handle_at, int dirfd, const char *pathname,
void* ctx;
COMMON_INTERCEPTOR_ENTER(ctx, name_to_handle_at, dirfd, pathname, handle,
mount_id, flags);
- COMMON_INTERCEPTOR_READ_RANGE(ctx, pathname, REAL(strlen)(pathname) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, pathname, internal_strlen(pathname) + 1);
__sanitizer_file_handle *sanitizer_handle =
reinterpret_cast<__sanitizer_file_handle*>(handle);
@@ -7351,7 +7445,7 @@ INTERCEPTOR(SIZE_T, strlcpy, char *dst, char *src, SIZE_T size) {
ctx, src, Min(internal_strnlen(src, size), size - 1) + 1);
}
res = REAL(strlcpy)(dst, src, size);
- COMMON_INTERCEPTOR_COPY_STRING(ctx, dst, src, REAL(strlen)(dst) + 1);
+ COMMON_INTERCEPTOR_COPY_STRING(ctx, dst, src, internal_strlen(dst) + 1);
return res;
}
@@ -7379,17 +7473,25 @@ INTERCEPTOR(void *, mmap, void *addr, SIZE_T sz, int prot, int flags, int fd,
OFF_T off) {
void *ctx;
if (common_flags()->detect_write_exec)
- ReportMmapWriteExec(prot);
+ ReportMmapWriteExec(prot, flags);
if (COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED)
return (void *)internal_mmap(addr, sz, prot, flags, fd, off);
COMMON_INTERCEPTOR_ENTER(ctx, mmap, addr, sz, prot, flags, fd, off);
COMMON_INTERCEPTOR_MMAP_IMPL(ctx, mmap, addr, sz, prot, flags, fd, off);
}
+INTERCEPTOR(int, munmap, void *addr, SIZE_T sz) {
+ void *ctx;
+ if (COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED)
+ return (int)internal_munmap(addr, sz);
+ COMMON_INTERCEPTOR_ENTER(ctx, munmap, addr, sz);
+ COMMON_INTERCEPTOR_MUNMAP_IMPL(ctx, addr, sz);
+}
+
INTERCEPTOR(int, mprotect, void *addr, SIZE_T sz, int prot) {
void *ctx;
if (common_flags()->detect_write_exec)
- ReportMmapWriteExec(prot);
+ ReportMmapWriteExec(prot, 0);
if (COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED)
return (int)internal_mprotect(addr, sz, prot);
COMMON_INTERCEPTOR_ENTER(ctx, mprotect, addr, sz, prot);
@@ -7398,6 +7500,7 @@ INTERCEPTOR(int, mprotect, void *addr, SIZE_T sz, int prot) {
}
#define INIT_MMAP \
COMMON_INTERCEPT_FUNCTION(mmap); \
+ COMMON_INTERCEPT_FUNCTION(munmap); \
COMMON_INTERCEPT_FUNCTION(mprotect);
#else
#define INIT_MMAP
@@ -7408,7 +7511,7 @@ INTERCEPTOR(void *, mmap64, void *addr, SIZE_T sz, int prot, int flags, int fd,
OFF64_T off) {
void *ctx;
if (common_flags()->detect_write_exec)
- ReportMmapWriteExec(prot);
+ ReportMmapWriteExec(prot, flags);
if (COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED)
return (void *)internal_mmap(addr, sz, prot, flags, fd, off);
COMMON_INTERCEPTOR_ENTER(ctx, mmap64, addr, sz, prot, flags, fd, off);
@@ -7426,7 +7529,7 @@ INTERCEPTOR(char *, devname, u64 dev, u32 type) {
COMMON_INTERCEPTOR_ENTER(ctx, devname, dev, type);
name = REAL(devname)(dev, type);
if (name)
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, name, REAL(strlen)(name) + 1);
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, name, internal_strlen(name) + 1);
return name;
}
#define INIT_DEVNAME COMMON_INTERCEPT_FUNCTION(devname);
@@ -7448,7 +7551,7 @@ INTERCEPTOR(DEVNAME_R_RETTYPE, devname_r, u64 dev, u32 type, char *path,
COMMON_INTERCEPTOR_ENTER(ctx, devname_r, dev, type, path, len);
DEVNAME_R_RETTYPE res = REAL(devname_r)(dev, type, path, len);
if (DEVNAME_R_SUCCESS(res))
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, path, REAL(strlen)(path) + 1);
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, path, internal_strlen(path) + 1);
return res;
}
#define INIT_DEVNAME_R COMMON_INTERCEPT_FUNCTION(devname_r);
@@ -7478,7 +7581,7 @@ INTERCEPTOR(void, strmode, u32 mode, char *bp) {
COMMON_INTERCEPTOR_ENTER(ctx, strmode, mode, bp);
REAL(strmode)(mode, bp);
if (bp)
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, bp, REAL(strlen)(bp) + 1);
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, bp, internal_strlen(bp) + 1);
}
#define INIT_STRMODE COMMON_INTERCEPT_FUNCTION(strmode)
#else
@@ -7498,40 +7601,44 @@ INTERCEPTOR(struct __sanitizer_ttyent *, getttynam, char *name) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, getttynam, name);
if (name)
- COMMON_INTERCEPTOR_READ_RANGE(ctx, name, REAL(strlen)(name) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, name, internal_strlen(name) + 1);
struct __sanitizer_ttyent *ttyent = REAL(getttynam)(name);
if (ttyent)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ttyent, struct_ttyent_sz);
return ttyent;
}
+#define INIT_TTYENT \
+ COMMON_INTERCEPT_FUNCTION(getttyent); \
+ COMMON_INTERCEPT_FUNCTION(getttynam);
+#else
+#define INIT_TTYENT
+#endif
+
+#if SANITIZER_INTERCEPT_TTYENTPATH
INTERCEPTOR(int, setttyentpath, char *path) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, setttyentpath, path);
if (path)
- COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, path, internal_strlen(path) + 1);
return REAL(setttyentpath)(path);
}
-#define INIT_TTYENT \
- COMMON_INTERCEPT_FUNCTION(getttyent); \
- COMMON_INTERCEPT_FUNCTION(getttynam); \
- COMMON_INTERCEPT_FUNCTION(setttyentpath)
+#define INIT_TTYENTPATH COMMON_INTERCEPT_FUNCTION(setttyentpath);
#else
-#define INIT_TTYENT
+#define INIT_TTYENTPATH
#endif
#if SANITIZER_INTERCEPT_PROTOENT
static void write_protoent(void *ctx, struct __sanitizer_protoent *p) {
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p, sizeof(*p));
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p->p_name, REAL(strlen)(p->p_name) + 1);
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p->p_name, internal_strlen(p->p_name) + 1);
SIZE_T pp_size = 1; // One handles the trailing \0
for (char **pp = p->p_aliases; *pp; ++pp, ++pp_size)
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *pp, REAL(strlen)(*pp) + 1);
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *pp, internal_strlen(*pp) + 1);
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p->p_aliases,
- pp_size * sizeof(char **));
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p->p_aliases, pp_size * sizeof(char *));
}
INTERCEPTOR(struct __sanitizer_protoent *, getprotoent) {
@@ -7547,7 +7654,7 @@ INTERCEPTOR(struct __sanitizer_protoent *, getprotobyname, const char *name) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, getprotobyname, name);
if (name)
- COMMON_INTERCEPTOR_READ_RANGE(ctx, name, REAL(strlen)(name) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, name, internal_strlen(name) + 1);
struct __sanitizer_protoent *p = REAL(getprotobyname)(name);
if (p)
write_protoent(ctx, p);
@@ -7591,7 +7698,7 @@ INTERCEPTOR(int, getprotobyname_r, const char *name,
COMMON_INTERCEPTOR_ENTER(ctx, getprotobyname_r, name, result_buf, buf,
buflen, result);
if (name)
- COMMON_INTERCEPTOR_READ_RANGE(ctx, name, REAL(strlen)(name) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, name, internal_strlen(name) + 1);
int res = REAL(getprotobyname_r)(name, result_buf, buf, buflen, result);
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, result, sizeof *result);
@@ -7630,15 +7737,14 @@ INTERCEPTOR(struct __sanitizer_netent *, getnetent) {
if (n) {
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, n, sizeof(*n));
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, n->n_name, REAL(strlen)(n->n_name) + 1);
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, n->n_name, internal_strlen(n->n_name) + 1);
SIZE_T nn_size = 1; // One handles the trailing \0
for (char **nn = n->n_aliases; *nn; ++nn, ++nn_size)
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *nn, REAL(strlen)(*nn) + 1);
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *nn, internal_strlen(*nn) + 1);
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, n->n_aliases,
- nn_size * sizeof(char **));
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, n->n_aliases, nn_size * sizeof(char *));
}
return n;
}
@@ -7647,20 +7753,19 @@ INTERCEPTOR(struct __sanitizer_netent *, getnetbyname, const char *name) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, getnetbyname, name);
if (name)
- COMMON_INTERCEPTOR_READ_RANGE(ctx, name, REAL(strlen)(name) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, name, internal_strlen(name) + 1);
struct __sanitizer_netent *n = REAL(getnetbyname)(name);
if (n) {
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, n, sizeof(*n));
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, n->n_name, REAL(strlen)(n->n_name) + 1);
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, n->n_name, internal_strlen(n->n_name) + 1);
SIZE_T nn_size = 1; // One handles the trailing \0
for (char **nn = n->n_aliases; *nn; ++nn, ++nn_size)
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *nn, REAL(strlen)(*nn) + 1);
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *nn, internal_strlen(*nn) + 1);
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, n->n_aliases,
- nn_size * sizeof(char **));
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, n->n_aliases, nn_size * sizeof(char *));
}
return n;
}
@@ -7672,15 +7777,14 @@ INTERCEPTOR(struct __sanitizer_netent *, getnetbyaddr, u32 net, int type) {
if (n) {
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, n, sizeof(*n));
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, n->n_name, REAL(strlen)(n->n_name) + 1);
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, n->n_name, internal_strlen(n->n_name) + 1);
SIZE_T nn_size = 1; // One handles the trailing \0
for (char **nn = n->n_aliases; *nn; ++nn, ++nn_size)
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *nn, REAL(strlen)(*nn) + 1);
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *nn, internal_strlen(*nn) + 1);
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, n->n_aliases,
- nn_size * sizeof(char **));
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, n->n_aliases, nn_size * sizeof(char *));
}
return n;
}
@@ -7753,12 +7857,12 @@ INTERCEPTOR(void, setbuf, __sanitizer_FILE *stream, char *buf) {
unpoison_file(stream);
}
-INTERCEPTOR(void, setbuffer, __sanitizer_FILE *stream, char *buf, int mode) {
+INTERCEPTOR(void, setbuffer, __sanitizer_FILE *stream, char *buf, SIZE_T size) {
void *ctx;
- COMMON_INTERCEPTOR_ENTER(ctx, setbuffer, stream, buf, mode);
- REAL(setbuffer)(stream, buf, mode);
+ COMMON_INTERCEPTOR_ENTER(ctx, setbuffer, stream, buf, size);
+ REAL(setbuffer)(stream, buf, size);
if (buf) {
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, __sanitizer_bufsiz);
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, size);
}
if (stream)
unpoison_file(stream);
@@ -7798,9 +7902,9 @@ INTERCEPTOR(int, regcomp, void *preg, const char *pattern, int cflags) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, regcomp, preg, pattern, cflags);
if (pattern)
- COMMON_INTERCEPTOR_READ_RANGE(ctx, pattern, REAL(strlen)(pattern) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, pattern, internal_strlen(pattern) + 1);
int res = REAL(regcomp)(preg, pattern, cflags);
- if (!res)
+ if (preg)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, preg, struct_regex_sz);
return res;
}
@@ -7811,7 +7915,7 @@ INTERCEPTOR(int, regexec, const void *preg, const char *string, SIZE_T nmatch,
if (preg)
COMMON_INTERCEPTOR_READ_RANGE(ctx, preg, struct_regex_sz);
if (string)
- COMMON_INTERCEPTOR_READ_RANGE(ctx, string, REAL(strlen)(string) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, string, internal_strlen(string) + 1);
int res = REAL(regexec)(preg, string, nmatch, pmatch, eflags);
if (!res && pmatch)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, pmatch, nmatch * struct_regmatch_sz);
@@ -7825,7 +7929,7 @@ INTERCEPTOR(SIZE_T, regerror, int errcode, const void *preg, char *errbuf,
COMMON_INTERCEPTOR_READ_RANGE(ctx, preg, struct_regex_sz);
SIZE_T res = REAL(regerror)(errcode, preg, errbuf, errbuf_size);
if (errbuf)
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, errbuf, REAL(strlen)(errbuf) + 1);
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, errbuf, internal_strlen(errbuf) + 1);
return res;
}
INTERCEPTOR(void, regfree, const void *preg) {
@@ -7850,15 +7954,15 @@ INTERCEPTOR(SSIZE_T, regnsub, char *buf, SIZE_T bufsiz, const char *sub,
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, regnsub, buf, bufsiz, sub, rm, str);
if (sub)
- COMMON_INTERCEPTOR_READ_RANGE(ctx, sub, REAL(strlen)(sub) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, sub, internal_strlen(sub) + 1);
// The implementation demands and hardcodes 10 elements
if (rm)
COMMON_INTERCEPTOR_READ_RANGE(ctx, rm, 10 * struct_regmatch_sz);
if (str)
- COMMON_INTERCEPTOR_READ_RANGE(ctx, str, REAL(strlen)(str) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, str, internal_strlen(str) + 1);
SSIZE_T res = REAL(regnsub)(buf, bufsiz, sub, rm, str);
if (res > 0 && buf)
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, REAL(strlen)(buf) + 1);
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, internal_strlen(buf) + 1);
return res;
}
INTERCEPTOR(SSIZE_T, regasub, char **buf, const char *sub,
@@ -7866,16 +7970,16 @@ INTERCEPTOR(SSIZE_T, regasub, char **buf, const char *sub,
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, regasub, buf, sub, rm, sstr);
if (sub)
- COMMON_INTERCEPTOR_READ_RANGE(ctx, sub, REAL(strlen)(sub) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, sub, internal_strlen(sub) + 1);
// Hardcode 10 elements as this is hardcoded size
if (rm)
COMMON_INTERCEPTOR_READ_RANGE(ctx, rm, 10 * struct_regmatch_sz);
if (sstr)
- COMMON_INTERCEPTOR_READ_RANGE(ctx, sstr, REAL(strlen)(sstr) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, sstr, internal_strlen(sstr) + 1);
SSIZE_T res = REAL(regasub)(buf, sub, rm, sstr);
if (res > 0 && buf) {
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, sizeof(char *));
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *buf, REAL(strlen)(*buf) + 1);
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *buf, internal_strlen(*buf) + 1);
}
return res;
}
@@ -7897,7 +8001,7 @@ INTERCEPTOR(void *, fts_open, char *const *path_argv, int options,
COMMON_INTERCEPTOR_READ_RANGE(ctx, pa, sizeof(char **));
if (!*pa)
break;
- COMMON_INTERCEPTOR_READ_RANGE(ctx, *pa, REAL(strlen)(*pa) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, *pa, internal_strlen(*pa) + 1);
}
}
// TODO(kamil): handle compar callback
@@ -7989,7 +8093,7 @@ INTERCEPTOR(int, sysctlbyname, char *sname, void *oldp, SIZE_T *oldlenp,
COMMON_INTERCEPTOR_ENTER(ctx, sysctlbyname, sname, oldp, oldlenp, newp,
newlen);
if (sname)
- COMMON_INTERCEPTOR_READ_RANGE(ctx, sname, REAL(strlen)(sname) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, sname, internal_strlen(sname) + 1);
if (oldlenp)
COMMON_INTERCEPTOR_READ_RANGE(ctx, oldlenp, sizeof(*oldlenp));
if (newp && newlen)
@@ -8010,7 +8114,7 @@ INTERCEPTOR(int, sysctlnametomib, const char *sname, int *name,
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, sysctlnametomib, sname, name, namelenp);
if (sname)
- COMMON_INTERCEPTOR_READ_RANGE(ctx, sname, REAL(strlen)(sname) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, sname, internal_strlen(sname) + 1);
if (namelenp)
COMMON_INTERCEPTOR_READ_RANGE(ctx, namelenp, sizeof(*namelenp));
int res = REAL(sysctlnametomib)(sname, name, namelenp);
@@ -8050,7 +8154,7 @@ INTERCEPTOR(void *, asysctlbyname, const char *sname, SIZE_T *len) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, asysctlbyname, sname, len);
if (sname)
- COMMON_INTERCEPTOR_READ_RANGE(ctx, sname, REAL(strlen)(sname) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, sname, internal_strlen(sname) + 1);
void *res = REAL(asysctlbyname)(sname, len);
if (res && len) {
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, len, sizeof(*len));
@@ -8073,7 +8177,7 @@ INTERCEPTOR(int, sysctlgetmibinfo, char *sname, int *name,
COMMON_INTERCEPTOR_ENTER(ctx, sysctlgetmibinfo, sname, name, namelenp, cname,
csz, rnode, v);
if (sname)
- COMMON_INTERCEPTOR_READ_RANGE(ctx, sname, REAL(strlen)(sname) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, sname, internal_strlen(sname) + 1);
if (namelenp)
COMMON_INTERCEPTOR_READ_RANGE(ctx, namelenp, sizeof(*namelenp));
if (csz)
@@ -8107,7 +8211,7 @@ INTERCEPTOR(char *, nl_langinfo, long item) {
COMMON_INTERCEPTOR_ENTER(ctx, nl_langinfo, item);
char *ret = REAL(nl_langinfo)(item);
if (ret)
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ret, REAL(strlen)(ret) + 1);
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ret, internal_strlen(ret) + 1);
return ret;
}
#define INIT_NL_LANGINFO COMMON_INTERCEPT_FUNCTION(nl_langinfo)
@@ -8127,7 +8231,7 @@ INTERCEPTOR(int, modctl, int operation, void *argp) {
COMMON_INTERCEPTOR_READ_RANGE(ctx, ml, sizeof(*ml));
if (ml->ml_filename)
COMMON_INTERCEPTOR_READ_RANGE(ctx, ml->ml_filename,
- REAL(strlen)(ml->ml_filename) + 1);
+ internal_strlen(ml->ml_filename) + 1);
if (ml->ml_props)
COMMON_INTERCEPTOR_READ_RANGE(ctx, ml->ml_props, ml->ml_propslen);
}
@@ -8135,7 +8239,7 @@ INTERCEPTOR(int, modctl, int operation, void *argp) {
} else if (operation == modctl_unload) {
if (argp) {
const char *name = (const char *)argp;
- COMMON_INTERCEPTOR_READ_RANGE(ctx, name, REAL(strlen)(name) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, name, internal_strlen(name) + 1);
}
ret = REAL(modctl)(operation, argp);
} else if (operation == modctl_stat) {
@@ -8177,7 +8281,7 @@ INTERCEPTOR(long long, strtonum, const char *nptr, long long minval,
if (errstr) {
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, errstr, sizeof(const char *));
if (*errstr)
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *errstr, REAL(strlen)(*errstr) + 1);
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *errstr, internal_strlen(*errstr) + 1);
}
return ret;
}
@@ -8197,7 +8301,7 @@ INTERCEPTOR(char *, fparseln, __sanitizer_FILE *stream, SIZE_T *len,
COMMON_INTERCEPTOR_READ_RANGE(ctx, delim, sizeof(delim[0]) * 3);
char *ret = REAL(fparseln)(stream, len, lineno, delim, flags);
if (ret) {
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ret, REAL(strlen)(ret) + 1);
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ret, internal_strlen(ret) + 1);
if (len)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, len, sizeof(*len));
if (lineno)
@@ -8214,7 +8318,7 @@ INTERCEPTOR(char *, fparseln, __sanitizer_FILE *stream, SIZE_T *len,
INTERCEPTOR(int, statvfs1, const char *path, void *buf, int flags) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, statvfs1, path, buf, flags);
- if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
+ if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, internal_strlen(path) + 1);
int res = REAL(statvfs1)(path, buf, flags);
if (!res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, struct_statvfs_sz);
return res;
@@ -8495,7 +8599,7 @@ INTERCEPTOR(char *, SHA1File, char *filename, char *buf) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, SHA1File, filename, buf);
if (filename)
- COMMON_INTERCEPTOR_READ_RANGE(ctx, filename, REAL(strlen)(filename) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, filename, internal_strlen(filename) + 1);
char *ret = REAL(SHA1File)(filename, buf);
if (ret)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ret, SHA1_return_length);
@@ -8506,7 +8610,7 @@ INTERCEPTOR(char *, SHA1FileChunk, char *filename, char *buf, OFF_T offset,
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, SHA1FileChunk, filename, buf, offset, length);
if (filename)
- COMMON_INTERCEPTOR_READ_RANGE(ctx, filename, REAL(strlen)(filename) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, filename, internal_strlen(filename) + 1);
char *ret = REAL(SHA1FileChunk)(filename, buf, offset, length);
if (ret)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ret, SHA1_return_length);
@@ -8582,7 +8686,7 @@ INTERCEPTOR(char *, MD4File, const char *filename, char *buf) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, MD4File, filename, buf);
if (filename)
- COMMON_INTERCEPTOR_READ_RANGE(ctx, filename, REAL(strlen)(filename) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, filename, internal_strlen(filename) + 1);
char *ret = REAL(MD4File)(filename, buf);
if (ret)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ret, MD4_return_length);
@@ -8665,7 +8769,7 @@ INTERCEPTOR(char *, RMD160File, char *filename, char *buf) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, RMD160File, filename, buf);
if (filename)
- COMMON_INTERCEPTOR_READ_RANGE(ctx, filename, REAL(strlen)(filename) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, filename, internal_strlen(filename) + 1);
char *ret = REAL(RMD160File)(filename, buf);
if (ret)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ret, RMD160_return_length);
@@ -8676,7 +8780,7 @@ INTERCEPTOR(char *, RMD160FileChunk, char *filename, char *buf, OFF_T offset,
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, RMD160FileChunk, filename, buf, offset, length);
if (filename)
- COMMON_INTERCEPTOR_READ_RANGE(ctx, filename, REAL(strlen)(filename) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, filename, internal_strlen(filename) + 1);
char *ret = REAL(RMD160FileChunk)(filename, buf, offset, length);
if (ret)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ret, RMD160_return_length);
@@ -8752,7 +8856,7 @@ INTERCEPTOR(char *, MD5File, const char *filename, char *buf) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, MD5File, filename, buf);
if (filename)
- COMMON_INTERCEPTOR_READ_RANGE(ctx, filename, REAL(strlen)(filename) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, filename, internal_strlen(filename) + 1);
char *ret = REAL(MD5File)(filename, buf);
if (ret)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ret, MD5_return_length);
@@ -8882,7 +8986,7 @@ INTERCEPTOR(char *, MD2File, const char *filename, char *buf) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, MD2File, filename, buf);
if (filename)
- COMMON_INTERCEPTOR_READ_RANGE(ctx, filename, REAL(strlen)(filename) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, filename, internal_strlen(filename) + 1);
char *ret = REAL(MD2File)(filename, buf);
if (ret)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ret, MD2_return_length);
@@ -8960,7 +9064,7 @@ INTERCEPTOR(char *, MD2Data, const unsigned char *data, unsigned int len,
void *ctx; \
COMMON_INTERCEPTOR_ENTER(ctx, SHA##LEN##_File, filename, buf); \
if (filename) \
- COMMON_INTERCEPTOR_READ_RANGE(ctx, filename, REAL(strlen)(filename) + 1);\
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, filename, internal_strlen(filename) + 1);\
char *ret = REAL(SHA##LEN##_File)(filename, buf); \
if (ret) \
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ret, SHA##LEN##_return_length); \
@@ -8972,7 +9076,7 @@ INTERCEPTOR(char *, MD2Data, const unsigned char *data, unsigned int len,
COMMON_INTERCEPTOR_ENTER(ctx, SHA##LEN##_FileChunk, filename, buf, offset, \
length); \
if (filename) \
- COMMON_INTERCEPTOR_READ_RANGE(ctx, filename, REAL(strlen)(filename) + 1);\
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, filename, internal_strlen(filename) + 1);\
char *ret = REAL(SHA##LEN##_FileChunk)(filename, buf, offset, length); \
if (ret) \
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ret, SHA##LEN##_return_length); \
@@ -8989,10 +9093,10 @@ INTERCEPTOR(char *, MD2Data, const unsigned char *data, unsigned int len,
return ret; \
}
-SHA2_INTERCEPTORS(224, u32);
-SHA2_INTERCEPTORS(256, u32);
-SHA2_INTERCEPTORS(384, u64);
-SHA2_INTERCEPTORS(512, u64);
+SHA2_INTERCEPTORS(224, u32)
+SHA2_INTERCEPTORS(256, u32)
+SHA2_INTERCEPTORS(384, u64)
+SHA2_INTERCEPTORS(512, u64)
#define INIT_SHA2_INTECEPTORS(LEN) \
COMMON_INTERCEPT_FUNCTION(SHA##LEN##_Init); \
@@ -9036,7 +9140,7 @@ INTERCEPTOR(int, strvis, char *dst, const char *src, int flag) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, strvis, dst, src, flag);
if (src)
- COMMON_INTERCEPTOR_READ_RANGE(ctx, src, REAL(strlen)(src) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, src, internal_strlen(src) + 1);
int len = REAL(strvis)(dst, src, flag);
if (dst)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, len + 1);
@@ -9046,7 +9150,7 @@ INTERCEPTOR(int, stravis, char **dst, const char *src, int flag) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, stravis, dst, src, flag);
if (src)
- COMMON_INTERCEPTOR_READ_RANGE(ctx, src, REAL(strlen)(src) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, src, internal_strlen(src) + 1);
int len = REAL(stravis)(dst, src, flag);
if (dst) {
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, sizeof(char *));
@@ -9059,7 +9163,7 @@ INTERCEPTOR(int, strnvis, char *dst, SIZE_T dlen, const char *src, int flag) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, strnvis, dst, dlen, src, flag);
if (src)
- COMMON_INTERCEPTOR_READ_RANGE(ctx, src, REAL(strlen)(src) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, src, internal_strlen(src) + 1);
int len = REAL(strnvis)(dst, dlen, src, flag);
// The interface will be valid even if there is no space for NULL char
if (dst && len > 0)
@@ -9109,7 +9213,7 @@ INTERCEPTOR(char *, svis, char *dst, int c, int flag, int nextc,
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, svis, dst, c, flag, nextc, extra);
if (extra)
- COMMON_INTERCEPTOR_READ_RANGE(ctx, extra, REAL(strlen)(extra) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, extra, internal_strlen(extra) + 1);
char *end = REAL(svis)(dst, c, flag, nextc, extra);
if (dst && end)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, end - dst + 1);
@@ -9120,7 +9224,7 @@ INTERCEPTOR(char *, snvis, char *dst, SIZE_T dlen, int c, int flag, int nextc,
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, snvis, dst, dlen, c, flag, nextc, extra);
if (extra)
- COMMON_INTERCEPTOR_READ_RANGE(ctx, extra, REAL(strlen)(extra) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, extra, internal_strlen(extra) + 1);
char *end = REAL(snvis)(dst, dlen, c, flag, nextc, extra);
if (dst && end)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst,
@@ -9132,9 +9236,9 @@ INTERCEPTOR(int, strsvis, char *dst, const char *src, int flag,
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, strsvis, dst, src, flag, extra);
if (src)
- COMMON_INTERCEPTOR_READ_RANGE(ctx, src, REAL(strlen)(src) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, src, internal_strlen(src) + 1);
if (extra)
- COMMON_INTERCEPTOR_READ_RANGE(ctx, extra, REAL(strlen)(extra) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, extra, internal_strlen(extra) + 1);
int len = REAL(strsvis)(dst, src, flag, extra);
if (dst)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, len + 1);
@@ -9145,9 +9249,9 @@ INTERCEPTOR(int, strsnvis, char *dst, SIZE_T dlen, const char *src, int flag,
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, strsnvis, dst, dlen, src, flag, extra);
if (src)
- COMMON_INTERCEPTOR_READ_RANGE(ctx, src, REAL(strlen)(src) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, src, internal_strlen(src) + 1);
if (extra)
- COMMON_INTERCEPTOR_READ_RANGE(ctx, extra, REAL(strlen)(extra) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, extra, internal_strlen(extra) + 1);
int len = REAL(strsnvis)(dst, dlen, src, flag, extra);
// The interface will be valid even if there is no space for NULL char
if (dst && len >= 0)
@@ -9161,7 +9265,7 @@ INTERCEPTOR(int, strsvisx, char *dst, const char *src, SIZE_T len, int flag,
if (src)
COMMON_INTERCEPTOR_READ_RANGE(ctx, src, len);
if (extra)
- COMMON_INTERCEPTOR_READ_RANGE(ctx, extra, REAL(strlen)(extra) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, extra, internal_strlen(extra) + 1);
int ret = REAL(strsvisx)(dst, src, len, flag, extra);
if (dst)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, ret + 1);
@@ -9174,7 +9278,7 @@ INTERCEPTOR(int, strsnvisx, char *dst, SIZE_T dlen, const char *src, SIZE_T len,
if (src)
COMMON_INTERCEPTOR_READ_RANGE(ctx, src, len);
if (extra)
- COMMON_INTERCEPTOR_READ_RANGE(ctx, extra, REAL(strlen)(extra) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, extra, internal_strlen(extra) + 1);
int ret = REAL(strsnvisx)(dst, dlen, src, len, flag, extra);
if (dst && ret >= 0)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, ret + 1);
@@ -9188,7 +9292,7 @@ INTERCEPTOR(int, strsenvisx, char *dst, SIZE_T dlen, const char *src,
if (src)
COMMON_INTERCEPTOR_READ_RANGE(ctx, src, len);
if (extra)
- COMMON_INTERCEPTOR_READ_RANGE(ctx, extra, REAL(strlen)(extra) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, extra, internal_strlen(extra) + 1);
// FIXME: only need to be checked when "flag | VIS_NOLOCALE" doesn't hold
// according to the implementation
if (cerr_ptr)
@@ -9215,7 +9319,7 @@ INTERCEPTOR(int, strunvis, char *dst, const char *src) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, strunvis, dst, src);
if (src)
- COMMON_INTERCEPTOR_READ_RANGE(ctx, src, REAL(strlen)(src) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, src, internal_strlen(src) + 1);
int ret = REAL(strunvis)(dst, src);
if (ret != -1)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, ret + 1);
@@ -9225,7 +9329,7 @@ INTERCEPTOR(int, strnunvis, char *dst, SIZE_T dlen, const char *src) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, strnunvis, dst, dlen, src);
if (src)
- COMMON_INTERCEPTOR_READ_RANGE(ctx, src, REAL(strlen)(src) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, src, internal_strlen(src) + 1);
int ret = REAL(strnunvis)(dst, dlen, src);
if (ret != -1)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, ret + 1);
@@ -9235,7 +9339,7 @@ INTERCEPTOR(int, strunvisx, char *dst, const char *src, int flag) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, strunvisx, dst, src, flag);
if (src)
- COMMON_INTERCEPTOR_READ_RANGE(ctx, src, REAL(strlen)(src) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, src, internal_strlen(src) + 1);
int ret = REAL(strunvisx)(dst, src, flag);
if (ret != -1)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, ret + 1);
@@ -9246,7 +9350,7 @@ INTERCEPTOR(int, strnunvisx, char *dst, SIZE_T dlen, const char *src,
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, strnunvisx, dst, dlen, src, flag);
if (src)
- COMMON_INTERCEPTOR_READ_RANGE(ctx, src, REAL(strlen)(src) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, src, internal_strlen(src) + 1);
int ret = REAL(strnunvisx)(dst, dlen, src, flag);
if (ret != -1)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, ret + 1);
@@ -9282,7 +9386,7 @@ INTERCEPTOR(struct __sanitizer_cdbr *, cdbr_open, const char *path, int flags) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, cdbr_open, path, flags);
if (path)
- COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, path, internal_strlen(path) + 1);
struct __sanitizer_cdbr *cdbr = REAL(cdbr_open)(path, flags);
if (cdbr)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, cdbr, sizeof(*cdbr));
@@ -9474,7 +9578,7 @@ INTERCEPTOR(void *, getfsspec, const char *spec) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, getfsspec, spec);
if (spec)
- COMMON_INTERCEPTOR_READ_RANGE(ctx, spec, REAL(strlen)(spec) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, spec, internal_strlen(spec) + 1);
void *ret = REAL(getfsspec)(spec);
if (ret)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ret, struct_fstab_sz);
@@ -9485,7 +9589,7 @@ INTERCEPTOR(void *, getfsfile, const char *file) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, getfsfile, file);
if (file)
- COMMON_INTERCEPTOR_READ_RANGE(ctx, file, REAL(strlen)(file) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, file, internal_strlen(file) + 1);
void *ret = REAL(getfsfile)(file);
if (ret)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ret, struct_fstab_sz);
@@ -9529,9 +9633,9 @@ INTERCEPTOR(__sanitizer_FILE *, popen, const char *command, const char *type) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, popen, command, type);
if (command)
- COMMON_INTERCEPTOR_READ_RANGE(ctx, command, REAL(strlen)(command) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, command, internal_strlen(command) + 1);
if (type)
- COMMON_INTERCEPTOR_READ_RANGE(ctx, type, REAL(strlen)(type) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, type, internal_strlen(type) + 1);
__sanitizer_FILE *res = REAL(popen)(command, type);
COMMON_INTERCEPTOR_FILE_OPEN(ctx, res, nullptr);
if (res) unpoison_file(res);
@@ -9548,13 +9652,13 @@ INTERCEPTOR(__sanitizer_FILE *, popenve, const char *path,
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, popenve, path, argv, envp, type);
if (path)
- COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, path, internal_strlen(path) + 1);
if (argv) {
for (char *const *pa = argv; ; ++pa) {
COMMON_INTERCEPTOR_READ_RANGE(ctx, pa, sizeof(char **));
if (!*pa)
break;
- COMMON_INTERCEPTOR_READ_RANGE(ctx, *pa, REAL(strlen)(*pa) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, *pa, internal_strlen(*pa) + 1);
}
}
if (envp) {
@@ -9562,11 +9666,11 @@ INTERCEPTOR(__sanitizer_FILE *, popenve, const char *path,
COMMON_INTERCEPTOR_READ_RANGE(ctx, pa, sizeof(char **));
if (!*pa)
break;
- COMMON_INTERCEPTOR_READ_RANGE(ctx, *pa, REAL(strlen)(*pa) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, *pa, internal_strlen(*pa) + 1);
}
}
if (type)
- COMMON_INTERCEPTOR_READ_RANGE(ctx, type, REAL(strlen)(type) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, type, internal_strlen(type) + 1);
__sanitizer_FILE *res = REAL(popenve)(path, argv, envp, type);
COMMON_INTERCEPTOR_FILE_OPEN(ctx, res, nullptr);
if (res) unpoison_file(res);
@@ -9762,7 +9866,7 @@ INTERCEPTOR(char *, fdevname, int fd) {
COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd);
char *name = REAL(fdevname)(fd);
if (name) {
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, name, REAL(strlen)(name) + 1);
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, name, internal_strlen(name) + 1);
if (fd > 0)
COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd);
}
@@ -9775,7 +9879,7 @@ INTERCEPTOR(char *, fdevname_r, int fd, char *buf, SIZE_T len) {
COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd);
char *name = REAL(fdevname_r)(fd, buf, len);
if (name && buf && len > 0) {
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, REAL(strlen)(buf) + 1);
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, internal_strlen(buf) + 1);
if (fd > 0)
COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd);
}
@@ -9795,7 +9899,7 @@ INTERCEPTOR(char *, getusershell) {
COMMON_INTERCEPTOR_ENTER(ctx, getusershell);
char *res = REAL(getusershell)();
if (res)
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, REAL(strlen)(res) + 1);
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, internal_strlen(res) + 1);
return res;
}
@@ -9820,7 +9924,7 @@ INTERCEPTOR(int, sl_add, void *sl, char *item) {
if (sl)
COMMON_INTERCEPTOR_READ_RANGE(ctx, sl, __sanitizer::struct_StringList_sz);
if (item)
- COMMON_INTERCEPTOR_READ_RANGE(ctx, item, REAL(strlen)(item) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, item, internal_strlen(item) + 1);
int res = REAL(sl_add)(sl, item);
if (!res)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, sl, __sanitizer::struct_StringList_sz);
@@ -9833,10 +9937,10 @@ INTERCEPTOR(char *, sl_find, void *sl, const char *item) {
if (sl)
COMMON_INTERCEPTOR_READ_RANGE(ctx, sl, __sanitizer::struct_StringList_sz);
if (item)
- COMMON_INTERCEPTOR_READ_RANGE(ctx, item, REAL(strlen)(item) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, item, internal_strlen(item) + 1);
char *res = REAL(sl_find)(sl, item);
if (res)
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, REAL(strlen)(res) + 1);
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, internal_strlen(res) + 1);
return res;
}
@@ -9861,7 +9965,13 @@ INTERCEPTOR(void, sl_free, void *sl, int freeall) {
INTERCEPTOR(SSIZE_T, getrandom, void *buf, SIZE_T buflen, unsigned int flags) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, getrandom, buf, buflen, flags);
- SSIZE_T n = REAL(getrandom)(buf, buflen, flags);
+ // If GRND_NONBLOCK is set in the flags, it is non blocking.
+ static const int grnd_nonblock = 1;
+ SSIZE_T n;
+ if ((flags & grnd_nonblock))
+ n = REAL(getrandom)(buf, buflen, flags);
+ else
+ n = COMMON_INTERCEPTOR_BLOCK_REAL(getrandom)(buf, buflen, flags);
if (n > 0) {
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, n);
}
@@ -9872,41 +9982,6 @@ INTERCEPTOR(SSIZE_T, getrandom, void *buf, SIZE_T buflen, unsigned int flags) {
#define INIT_GETRANDOM
#endif
-#if SANITIZER_INTERCEPT_CRYPT
-INTERCEPTOR(char *, crypt, char *key, char *salt) {
- void *ctx;
- COMMON_INTERCEPTOR_ENTER(ctx, crypt, key, salt);
- COMMON_INTERCEPTOR_READ_RANGE(ctx, key, internal_strlen(key) + 1);
- COMMON_INTERCEPTOR_READ_RANGE(ctx, salt, internal_strlen(salt) + 1);
- char *res = REAL(crypt)(key, salt);
- if (res != nullptr)
- COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, internal_strlen(res) + 1);
- return res;
-}
-#define INIT_CRYPT COMMON_INTERCEPT_FUNCTION(crypt);
-#else
-#define INIT_CRYPT
-#endif
-
-#if SANITIZER_INTERCEPT_CRYPT_R
-INTERCEPTOR(char *, crypt_r, char *key, char *salt, void *data) {
- void *ctx;
- COMMON_INTERCEPTOR_ENTER(ctx, crypt_r, key, salt, data);
- COMMON_INTERCEPTOR_READ_RANGE(ctx, key, internal_strlen(key) + 1);
- COMMON_INTERCEPTOR_READ_RANGE(ctx, salt, internal_strlen(salt) + 1);
- char *res = REAL(crypt_r)(key, salt, data);
- if (res != nullptr) {
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, data,
- __sanitizer::struct_crypt_data_sz);
- COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, internal_strlen(res) + 1);
- }
- return res;
-}
-#define INIT_CRYPT_R COMMON_INTERCEPT_FUNCTION(crypt_r);
-#else
-#define INIT_CRYPT_R
-#endif
-
#if SANITIZER_INTERCEPT_GETENTROPY
INTERCEPTOR(int, getentropy, void *buf, SIZE_T buflen) {
void *ctx;
@@ -9922,7 +9997,52 @@ INTERCEPTOR(int, getentropy, void *buf, SIZE_T buflen) {
#define INIT_GETENTROPY
#endif
-#if SANITIZER_INTERCEPT_QSORT
+#if SANITIZER_INTERCEPT_QSORT_R
+typedef int (*qsort_r_compar_f)(const void *, const void *, void *);
+struct qsort_r_compar_params {
+ SIZE_T size;
+ qsort_r_compar_f compar;
+ void *arg;
+};
+static int wrapped_qsort_r_compar(const void *a, const void *b, void *arg) {
+ qsort_r_compar_params *params = (qsort_r_compar_params *)arg;
+ COMMON_INTERCEPTOR_UNPOISON_PARAM(3);
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(a, params->size);
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(b, params->size);
+ return params->compar(a, b, params->arg);
+}
+
+INTERCEPTOR(void, qsort_r, void *base, SIZE_T nmemb, SIZE_T size,
+ qsort_r_compar_f compar, void *arg) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, qsort_r, base, nmemb, size, compar, arg);
+ // Run the comparator over all array elements to detect any memory issues.
+ if (nmemb > 1) {
+ for (SIZE_T i = 0; i < nmemb - 1; ++i) {
+ void *p = (void *)((char *)base + i * size);
+ void *q = (void *)((char *)base + (i + 1) * size);
+ COMMON_INTERCEPTOR_UNPOISON_PARAM(3);
+ compar(p, q, arg);
+ }
+ }
+ qsort_r_compar_params params = {size, compar, arg};
+ REAL(qsort_r)(base, nmemb, size, wrapped_qsort_r_compar, &params);
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, base, nmemb * size);
+}
+# define INIT_QSORT_R COMMON_INTERCEPT_FUNCTION(qsort_r)
+#else
+# define INIT_QSORT_R
+#endif
+
+#if SANITIZER_INTERCEPT_QSORT && SANITIZER_INTERCEPT_QSORT_R
+INTERCEPTOR(void, qsort, void *base, SIZE_T nmemb, SIZE_T size,
+ qsort_r_compar_f compar) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, qsort, base, nmemb, size, compar);
+ WRAP(qsort_r)(base, nmemb, size, compar, nullptr);
+}
+# define INIT_QSORT COMMON_INTERCEPT_FUNCTION(qsort)
+#elif SANITIZER_INTERCEPT_QSORT && !SANITIZER_INTERCEPT_QSORT_R
// Glibc qsort uses a temporary buffer allocated either on stack or on heap.
// Poisoned memory from there may get copied into the comparator arguments,
// where it needs to be dealt with. But even that is not enough - the results of
@@ -9937,7 +10057,7 @@ INTERCEPTOR(int, getentropy, void *buf, SIZE_T buflen) {
typedef int (*qsort_compar_f)(const void *, const void *);
static THREADLOCAL qsort_compar_f qsort_compar;
static THREADLOCAL SIZE_T qsort_size;
-int wrapped_qsort_compar(const void *a, const void *b) {
+static int wrapped_qsort_compar(const void *a, const void *b) {
COMMON_INTERCEPTOR_UNPOISON_PARAM(2);
COMMON_INTERCEPTOR_INITIALIZE_RANGE(a, qsort_size);
COMMON_INTERCEPTOR_INITIALIZE_RANGE(b, qsort_size);
@@ -9979,60 +10099,34 @@ INTERCEPTOR(void, qsort, void *base, SIZE_T nmemb, SIZE_T size,
}
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, base, nmemb * size);
}
-#define INIT_QSORT COMMON_INTERCEPT_FUNCTION(qsort)
+# define INIT_QSORT COMMON_INTERCEPT_FUNCTION(qsort)
#else
-#define INIT_QSORT
+# define INIT_QSORT
#endif
-#if SANITIZER_INTERCEPT_QSORT_R
-typedef int (*qsort_r_compar_f)(const void *, const void *, void *);
-static THREADLOCAL qsort_r_compar_f qsort_r_compar;
-static THREADLOCAL SIZE_T qsort_r_size;
-int wrapped_qsort_r_compar(const void *a, const void *b, void *arg) {
- COMMON_INTERCEPTOR_UNPOISON_PARAM(3);
- COMMON_INTERCEPTOR_INITIALIZE_RANGE(a, qsort_r_size);
- COMMON_INTERCEPTOR_INITIALIZE_RANGE(b, qsort_r_size);
- return qsort_r_compar(a, b, arg);
+#if SANITIZER_INTERCEPT_BSEARCH
+typedef int (*bsearch_compar_f)(const void *, const void *);
+struct bsearch_compar_params {
+ const void *key;
+ bsearch_compar_f compar;
+};
+
+static int wrapped_bsearch_compar(const void *key, const void *b) {
+ const bsearch_compar_params *params = (const bsearch_compar_params *)key;
+ COMMON_INTERCEPTOR_UNPOISON_PARAM(2);
+ return params->compar(params->key, b);
}
-INTERCEPTOR(void, qsort_r, void *base, SIZE_T nmemb, SIZE_T size,
- qsort_r_compar_f compar, void *arg) {
+INTERCEPTOR(void *, bsearch, const void *key, const void *base, SIZE_T nmemb,
+ SIZE_T size, bsearch_compar_f compar) {
void *ctx;
- COMMON_INTERCEPTOR_ENTER(ctx, qsort_r, base, nmemb, size, compar, arg);
- // Run the comparator over all array elements to detect any memory issues.
- if (nmemb > 1) {
- for (SIZE_T i = 0; i < nmemb - 1; ++i) {
- void *p = (void *)((char *)base + i * size);
- void *q = (void *)((char *)base + (i + 1) * size);
- COMMON_INTERCEPTOR_UNPOISON_PARAM(3);
- compar(p, q, arg);
- }
- }
- qsort_r_compar_f old_compar = qsort_r_compar;
- SIZE_T old_size = qsort_r_size;
- // Handle qsort_r() implementations that recurse using an
- // interposable function call:
- bool already_wrapped = compar == wrapped_qsort_r_compar;
- if (already_wrapped) {
- // This case should only happen if the qsort() implementation calls itself
- // using a preemptible function call (e.g. the FreeBSD libc version).
- // Check that the size and comparator arguments are as expected.
- CHECK_NE(compar, qsort_r_compar);
- CHECK_EQ(qsort_r_size, size);
- } else {
- qsort_r_compar = compar;
- qsort_r_size = size;
- }
- REAL(qsort_r)(base, nmemb, size, wrapped_qsort_r_compar, arg);
- if (!already_wrapped) {
- qsort_r_compar = old_compar;
- qsort_r_size = old_size;
- }
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, base, nmemb * size);
+ COMMON_INTERCEPTOR_ENTER(ctx, bsearch, key, base, nmemb, size, compar);
+ bsearch_compar_params params = {key, compar};
+ return REAL(bsearch)(&params, base, nmemb, size, wrapped_bsearch_compar);
}
-#define INIT_QSORT_R COMMON_INTERCEPT_FUNCTION(qsort_r)
+# define INIT_BSEARCH COMMON_INTERCEPT_FUNCTION(bsearch)
#else
-#define INIT_QSORT_R
+# define INIT_BSEARCH
#endif
#if SANITIZER_INTERCEPT_SIGALTSTACK
@@ -10050,6 +10144,42 @@ INTERCEPTOR(int, sigaltstack, void *ss, void *oss) {
#define INIT_SIGALTSTACK
#endif
+#if SANITIZER_INTERCEPT_PROCCTL
+INTERCEPTOR(int, procctl, int idtype, u64 id, int cmd, uptr data) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, procctl, idtype, id, cmd, data);
+ static const int PROC_REAP_ACQUIRE = 2;
+ static const int PROC_REAP_RELEASE = 3;
+ static const int PROC_REAP_STATUS = 4;
+ static const int PROC_REAP_GETPIDS = 5;
+ static const int PROC_REAP_KILL = 6;
+ if (cmd < PROC_REAP_ACQUIRE || cmd > PROC_REAP_KILL) {
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, (void *)data, sizeof(int));
+ } else {
+ // reap_acquire/reap_release bears no arguments.
+ if (cmd > PROC_REAP_RELEASE) {
+ unsigned int reapsz;
+ switch (cmd) {
+ case PROC_REAP_STATUS:
+ reapsz = struct_procctl_reaper_status_sz;
+ break;
+ case PROC_REAP_GETPIDS:
+ reapsz = struct_procctl_reaper_pids_sz;
+ break;
+ case PROC_REAP_KILL:
+ reapsz = struct_procctl_reaper_kill_sz;
+ break;
+ }
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, (void *)data, reapsz);
+ }
+ }
+ return REAL(procctl)(idtype, id, cmd, data);
+}
+#define INIT_PROCCTL COMMON_INTERCEPT_FUNCTION(procctl)
+#else
+#define INIT_PROCCTL
+#endif
+
#if SANITIZER_INTERCEPT_UNAME
INTERCEPTOR(int, uname, struct utsname *utsname) {
#if SANITIZER_LINUX
@@ -10088,14 +10218,52 @@ INTERCEPTOR(int, __xuname, int size, void *utsname) {
#define INIT___XUNAME
#endif
+#if SANITIZER_INTERCEPT_ARGP_PARSE
+INTERCEPTOR(int, argp_parse, const struct argp *argp, int argc, char **argv,
+ unsigned flags, int *arg_index, void *input) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, argp_parse, argp, argc, argv, flags, arg_index,
+ input);
+ for (int i = 0; i < argc; i++)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, argv[i], internal_strlen(argv[i]) + 1);
+ int res = REAL(argp_parse)(argp, argc, argv, flags, arg_index, input);
+ if (!res && arg_index)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, arg_index, sizeof(int));
+ return res;
+}
+
+#define INIT_ARGP_PARSE COMMON_INTERCEPT_FUNCTION(argp_parse);
+#else
+#define INIT_ARGP_PARSE
+#endif
+
+#if SANITIZER_INTERCEPT_CPUSET_GETAFFINITY
+INTERCEPTOR(int, cpuset_getaffinity, int level, int which, __int64_t id, SIZE_T cpusetsize, __sanitizer_cpuset_t *mask) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, cpuset_getaffinity, level, which, id, cpusetsize, mask);
+ int res = REAL(cpuset_getaffinity)(level, which, id, cpusetsize, mask);
+ if (mask && !res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, mask, cpusetsize);
+ return res;
+}
+#define INIT_CPUSET_GETAFFINITY COMMON_INTERCEPT_FUNCTION(cpuset_getaffinity);
+#else
+#define INIT_CPUSET_GETAFFINITY
+#endif
+
#include "sanitizer_common_interceptors_netbsd_compat.inc"
+namespace __sanitizer {
+void InitializeMemintrinsicInterceptors();
+} // namespace __sanitizer
+
static void InitializeCommonInterceptors() {
#if SI_POSIX
static u64 metadata_mem[sizeof(MetadataHashMap) / sizeof(u64) + 1];
interceptor_metadata_map = new ((void *)&metadata_mem) MetadataHashMap();
#endif
+ __sanitizer::InitializeMemintrinsicInterceptors();
+
INIT_MMAP;
INIT_MMAP64;
INIT_TEXTDOMAIN;
@@ -10117,9 +10285,6 @@ static void InitializeCommonInterceptors() {
INIT_STRPBRK;
INIT_STRXFRM;
INIT___STRXFRM_L;
- INIT_MEMSET;
- INIT_MEMMOVE;
- INIT_MEMCPY;
INIT_MEMCHR;
INIT_MEMCMP;
INIT_BCMP;
@@ -10166,6 +10331,9 @@ static void InitializeCommonInterceptors() {
INIT_TIME;
INIT_GLOB;
INIT_GLOB64;
+ INIT___B64_TO;
+ INIT_DN_COMP_EXPAND;
+ INIT_POSIX_SPAWN;
INIT_WAIT;
INIT_WAIT4;
INIT_INET;
@@ -10200,6 +10368,7 @@ static void InitializeCommonInterceptors() {
INIT_GETCWD;
INIT_GET_CURRENT_DIR_NAME;
INIT_STRTOIMAX;
+ INIT_STRTOIMAX_C23;
INIT_MBSTOWCS;
INIT_MBSNRTOWCS;
INIT_WCSTOMBS;
@@ -10231,12 +10400,6 @@ static void InitializeCommonInterceptors() {
INIT_PTHREAD_SIGMASK;
INIT_BACKTRACE;
INIT__EXIT;
- INIT_PTHREAD_MUTEX_LOCK;
- INIT_PTHREAD_MUTEX_UNLOCK;
- INIT___PTHREAD_MUTEX_LOCK;
- INIT___PTHREAD_MUTEX_UNLOCK;
- INIT___LIBC_MUTEX_LOCK;
- INIT___LIBC_MUTEX_UNLOCK;
INIT___LIBC_THR_SETCANCELSTATE;
INIT_GETMNTENT;
INIT_GETMNTENT_R;
@@ -10254,6 +10417,7 @@ static void InitializeCommonInterceptors() {
INIT_PTHREAD_ATTR_GET_SCHED;
INIT_PTHREAD_ATTR_GETINHERITSCHED;
INIT_PTHREAD_ATTR_GETAFFINITY_NP;
+ INIT_PTHREAD_GETAFFINITY_NP;
INIT_PTHREAD_MUTEXATTR_GETPSHARED;
INIT_PTHREAD_MUTEXATTR_GETTYPE;
INIT_PTHREAD_MUTEXATTR_GETPROTOCOL;
@@ -10293,9 +10457,6 @@ static void InitializeCommonInterceptors() {
INIT_GETIFADDRS;
INIT_IF_INDEXTONAME;
INIT_CAPGET;
- INIT_AEABI_MEM;
- INIT___BZERO;
- INIT_BZERO;
INIT_FTIME;
INIT_XDR;
INIT_XDRREC_LINUX;
@@ -10322,8 +10483,10 @@ static void InitializeCommonInterceptors() {
INIT_RECV_RECVFROM;
INIT_SEND_SENDTO;
INIT_STAT;
+ INIT_STAT64;
INIT_EVENTFD_READ_WRITE;
INIT_LSTAT;
+ INIT_LSTAT64;
INIT___XSTAT;
INIT___XSTAT64;
INIT___LXSTAT;
@@ -10396,14 +10559,16 @@ static void InitializeCommonInterceptors() {
INIT_GETUSERSHELL;
INIT_SL_INIT;
INIT_GETRANDOM;
- INIT_CRYPT;
- INIT_CRYPT_R;
INIT_GETENTROPY;
INIT_QSORT;
INIT_QSORT_R;
+ INIT_BSEARCH;
INIT_SIGALTSTACK;
+ INIT_PROCCTL
INIT_UNAME;
INIT___XUNAME;
+ INIT_ARGP_PARSE;
+ INIT_CPUSET_GETAFFINITY;
INIT___PRINTF_CHK;
}
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors_format.inc b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors_format.inc
index 082398ba960a..24e5dc0fb22f 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors_format.inc
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors_format.inc
@@ -324,8 +324,8 @@ static void scanf_common(void *ctx, int n_inputs, bool allowGnuMalloc,
continue;
int size = scanf_get_value_size(&dir);
if (size == FSS_INVALID) {
- Report("%s: WARNING: unexpected format specifier in scanf interceptor: ",
- SanitizerToolName, "%.*s\n", dir.end - dir.begin, dir.begin);
+ Report("%s: WARNING: unexpected format specifier in scanf interceptor: %.*s\n",
+ SanitizerToolName, static_cast<int>(dir.end - dir.begin), dir.begin);
break;
}
void *argp = va_arg(aq, void *);
@@ -340,11 +340,19 @@ static void scanf_common(void *ctx, int n_inputs, bool allowGnuMalloc,
size = 0;
}
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, argp, size);
- // For %ms/%mc, write the allocated output buffer as well.
+ // For %mc/%mC/%ms/%m[/%mS, write the allocated output buffer as well.
if (dir.allocate) {
- char *buf = *(char **)argp;
- if (buf)
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, internal_strlen(buf) + 1);
+ if (char *buf = *(char **)argp) {
+ if (dir.convSpecifier == 'c')
+ size = 1;
+ else if (dir.convSpecifier == 'C')
+ size = sizeof(wchar_t);
+ else if (dir.convSpecifier == 'S')
+ size = (internal_wcslen((wchar_t *)buf) + 1) * sizeof(wchar_t);
+ else // 's' or '['
+ size = internal_strlen(buf) + 1;
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, size);
+ }
}
}
}
@@ -469,7 +477,7 @@ static int printf_get_value_size(PrintfDirective *dir) {
break; \
default: \
Report("WARNING: unexpected floating-point arg size" \
- " in printf interceptor: %d\n", size); \
+ " in printf interceptor: %zu\n", static_cast<uptr>(size)); \
return; \
} \
} else { \
@@ -484,7 +492,7 @@ static int printf_get_value_size(PrintfDirective *dir) {
break; \
default: \
Report("WARNING: unexpected arg size" \
- " in printf interceptor: %d\n", size); \
+ " in printf interceptor: %zu\n", static_cast<uptr>(size)); \
return; \
} \
} \
@@ -530,7 +538,7 @@ static void printf_common(void *ctx, const char *format, va_list aq) {
Report(
"%s: WARNING: unexpected format specifier in printf "
"interceptor: %.*s (reported once per process)\n",
- SanitizerToolName, dir.end - dir.begin, dir.begin);
+ SanitizerToolName, static_cast<int>(dir.end - dir.begin), dir.begin);
break;
}
if (dir.convSpecifier == 'n') {
@@ -539,24 +547,25 @@ static void printf_common(void *ctx, const char *format, va_list aq) {
continue;
} else if (size == FSS_STRLEN) {
if (void *argp = va_arg(aq, void *)) {
+ uptr len;
if (dir.starredPrecision) {
// FIXME: properly support starred precision for strings.
- size = 0;
+ len = 0;
} else if (dir.fieldPrecision > 0) {
// Won't read more than "precision" symbols.
- size = internal_strnlen((const char *)argp, dir.fieldPrecision);
- if (size < dir.fieldPrecision) size++;
+ len = internal_strnlen((const char *)argp, dir.fieldPrecision);
+ if (len < (uptr)dir.fieldPrecision)
+ len++;
} else {
// Whole string will be accessed.
- size = internal_strlen((const char *)argp) + 1;
+ len = internal_strlen((const char *)argp) + 1;
}
- COMMON_INTERCEPTOR_READ_RANGE(ctx, argp, size);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, argp, len);
}
} else if (size == FSS_WCSLEN) {
if (void *argp = va_arg(aq, void *)) {
// FIXME: Properly support wide-character strings (via wcsrtombs).
- size = 0;
- COMMON_INTERCEPTOR_READ_RANGE(ctx, argp, size);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, argp, 0);
}
} else {
// Skip non-pointer args
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors_ioctl.inc b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors_ioctl.inc
index b7da65987557..49ec4097c900 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors_ioctl.inc
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors_ioctl.inc
@@ -115,11 +115,19 @@ static void ioctl_table_fill() {
// _(SOUND_MIXER_WRITE_MUTE, WRITE, sizeof(int)); // same as ...WRITE_ENHANCE
_(BLKFLSBUF, NONE, 0);
_(BLKGETSIZE, WRITE, sizeof(uptr));
- _(BLKRAGET, WRITE, sizeof(int));
+ _(BLKRAGET, WRITE, sizeof(uptr));
_(BLKRASET, NONE, 0);
_(BLKROGET, WRITE, sizeof(int));
_(BLKROSET, READ, sizeof(int));
_(BLKRRPART, NONE, 0);
+ _(BLKFRASET, NONE, 0);
+ _(BLKFRAGET, WRITE, sizeof(uptr));
+ _(BLKSECTSET, READ, sizeof(short));
+ _(BLKSECTGET, WRITE, sizeof(short));
+ _(BLKSSZGET, WRITE, sizeof(int));
+ _(BLKBSZGET, WRITE, sizeof(int));
+ _(BLKBSZSET, READ, sizeof(uptr));
+ _(BLKGETSIZE64, WRITE, sizeof(u64));
_(CDROMEJECT, NONE, 0);
_(CDROMEJECT_SW, NONE, 0);
_(CDROMMULTISESSION, WRITE, struct_cdrom_multisession_sz);
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors_memintrinsics.inc b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors_memintrinsics.inc
new file mode 100644
index 000000000000..52e489d02cda
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors_memintrinsics.inc
@@ -0,0 +1,244 @@
+//===-- sanitizer_common_interceptors_memintrinsics.inc ---------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Memintrinsic function interceptors for tools like AddressSanitizer,
+// ThreadSanitizer, MemorySanitizer, etc.
+//
+// These interceptors are part of the common interceptors, but separated out so
+// that implementations may add them, if necessary, to a separate source file
+// that should define SANITIZER_COMMON_NO_REDEFINE_BUILTINS at the top.
+//
+// This file should be included into the tool's memintrinsic interceptor file,
+// which has to define its own macros:
+// COMMON_INTERCEPTOR_ENTER
+// COMMON_INTERCEPTOR_READ_RANGE
+// COMMON_INTERCEPTOR_WRITE_RANGE
+// COMMON_INTERCEPTOR_MEMSET_IMPL
+// COMMON_INTERCEPTOR_MEMMOVE_IMPL
+// COMMON_INTERCEPTOR_MEMCPY_IMPL
+// COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED
+//===----------------------------------------------------------------------===//
+
+#ifdef SANITIZER_REDEFINE_BUILTINS_H
+#error "Define SANITIZER_COMMON_NO_REDEFINE_BUILTINS in .cpp file"
+#endif
+
+#include "interception/interception.h"
+#include "sanitizer_platform_interceptors.h"
+
+// Platform-specific options.
+#if SANITIZER_APPLE
+#define PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE 0
+#elif SANITIZER_WINDOWS64
+#define PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE 0
+#else
+#define PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE 1
+#endif // SANITIZER_APPLE
+
+#ifndef COMMON_INTERCEPTOR_MEMSET_IMPL
+#define COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, dst, v, size) \
+ { \
+ if (COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED) \
+ return internal_memset(dst, v, size); \
+ COMMON_INTERCEPTOR_ENTER(ctx, memset, dst, v, size); \
+ if (common_flags()->intercept_intrin) \
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, size); \
+ return REAL(memset)(dst, v, size); \
+ }
+#endif
+
+#ifndef COMMON_INTERCEPTOR_MEMMOVE_IMPL
+#define COMMON_INTERCEPTOR_MEMMOVE_IMPL(ctx, dst, src, size) \
+ { \
+ if (COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED) \
+ return internal_memmove(dst, src, size); \
+ COMMON_INTERCEPTOR_ENTER(ctx, memmove, dst, src, size); \
+ if (common_flags()->intercept_intrin) { \
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, size); \
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, src, size); \
+ } \
+ return REAL(memmove)(dst, src, size); \
+ }
+#endif
+
+#ifndef COMMON_INTERCEPTOR_MEMCPY_IMPL
+#define COMMON_INTERCEPTOR_MEMCPY_IMPL(ctx, dst, src, size) \
+ { \
+ if (COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED) { \
+ return internal_memmove(dst, src, size); \
+ } \
+ COMMON_INTERCEPTOR_ENTER(ctx, memcpy, dst, src, size); \
+ if (common_flags()->intercept_intrin) { \
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, size); \
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, src, size); \
+ } \
+ return REAL(memcpy)(dst, src, size); \
+ }
+#endif
+
+#if SANITIZER_INTERCEPT_MEMSET
+INTERCEPTOR(void *, memset, void *dst, int v, uptr size) {
+ void *ctx;
+ COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, dst, v, size);
+}
+
+#define INIT_MEMSET COMMON_INTERCEPT_FUNCTION(memset)
+#else
+#define INIT_MEMSET
+#endif
+
+#if SANITIZER_INTERCEPT_MEMMOVE
+INTERCEPTOR(void *, memmove, void *dst, const void *src, uptr size) {
+ void *ctx;
+ COMMON_INTERCEPTOR_MEMMOVE_IMPL(ctx, dst, src, size);
+}
+
+#define INIT_MEMMOVE COMMON_INTERCEPT_FUNCTION(memmove)
+#else
+#define INIT_MEMMOVE
+#endif
+
+#if SANITIZER_INTERCEPT_MEMCPY
+INTERCEPTOR(void *, memcpy, void *dst, const void *src, uptr size) {
+ // On OS X, calling internal_memcpy here will cause memory corruptions,
+ // because memcpy and memmove are actually aliases of the same
+ // implementation. We need to use internal_memmove here.
+ // N.B.: If we switch this to internal_ we'll have to use internal_memmove
+ // due to memcpy being an alias of memmove on OS X.
+ void *ctx;
+#if PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE
+ COMMON_INTERCEPTOR_MEMCPY_IMPL(ctx, dst, src, size);
+#else
+ COMMON_INTERCEPTOR_MEMMOVE_IMPL(ctx, dst, src, size);
+#endif
+}
+
+#define INIT_MEMCPY \
+ do { \
+ if (PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE) { \
+ COMMON_INTERCEPT_FUNCTION(memcpy); \
+ } else { \
+ ASSIGN_REAL(memcpy, memmove); \
+ } \
+ CHECK(REAL(memcpy)); \
+ } while (false)
+
+#else
+#define INIT_MEMCPY
+#endif
+
+#if SANITIZER_INTERCEPT_AEABI_MEM
+INTERCEPTOR(void *, __aeabi_memmove, void *to, const void *from, uptr size) {
+ void *ctx;
+ COMMON_INTERCEPTOR_MEMMOVE_IMPL(ctx, to, from, size);
+}
+
+INTERCEPTOR(void *, __aeabi_memmove4, void *to, const void *from, uptr size) {
+ void *ctx;
+ COMMON_INTERCEPTOR_MEMMOVE_IMPL(ctx, to, from, size);
+}
+
+INTERCEPTOR(void *, __aeabi_memmove8, void *to, const void *from, uptr size) {
+ void *ctx;
+ COMMON_INTERCEPTOR_MEMMOVE_IMPL(ctx, to, from, size);
+}
+
+INTERCEPTOR(void *, __aeabi_memcpy, void *to, const void *from, uptr size) {
+ void *ctx;
+ COMMON_INTERCEPTOR_MEMCPY_IMPL(ctx, to, from, size);
+}
+
+INTERCEPTOR(void *, __aeabi_memcpy4, void *to, const void *from, uptr size) {
+ void *ctx;
+ COMMON_INTERCEPTOR_MEMCPY_IMPL(ctx, to, from, size);
+}
+
+INTERCEPTOR(void *, __aeabi_memcpy8, void *to, const void *from, uptr size) {
+ void *ctx;
+ COMMON_INTERCEPTOR_MEMCPY_IMPL(ctx, to, from, size);
+}
+
+// Note the argument order.
+INTERCEPTOR(void *, __aeabi_memset, void *block, uptr size, int c) {
+ void *ctx;
+ COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, block, c, size);
+}
+
+INTERCEPTOR(void *, __aeabi_memset4, void *block, uptr size, int c) {
+ void *ctx;
+ COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, block, c, size);
+}
+
+INTERCEPTOR(void *, __aeabi_memset8, void *block, uptr size, int c) {
+ void *ctx;
+ COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, block, c, size);
+}
+
+INTERCEPTOR(void *, __aeabi_memclr, void *block, uptr size) {
+ void *ctx;
+ COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, block, 0, size);
+}
+
+INTERCEPTOR(void *, __aeabi_memclr4, void *block, uptr size) {
+ void *ctx;
+ COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, block, 0, size);
+}
+
+INTERCEPTOR(void *, __aeabi_memclr8, void *block, uptr size) {
+ void *ctx;
+ COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, block, 0, size);
+}
+
+#define INIT_AEABI_MEM \
+ COMMON_INTERCEPT_FUNCTION(__aeabi_memmove); \
+ COMMON_INTERCEPT_FUNCTION(__aeabi_memmove4); \
+ COMMON_INTERCEPT_FUNCTION(__aeabi_memmove8); \
+ COMMON_INTERCEPT_FUNCTION(__aeabi_memcpy); \
+ COMMON_INTERCEPT_FUNCTION(__aeabi_memcpy4); \
+ COMMON_INTERCEPT_FUNCTION(__aeabi_memcpy8); \
+ COMMON_INTERCEPT_FUNCTION(__aeabi_memset); \
+ COMMON_INTERCEPT_FUNCTION(__aeabi_memset4); \
+ COMMON_INTERCEPT_FUNCTION(__aeabi_memset8); \
+ COMMON_INTERCEPT_FUNCTION(__aeabi_memclr); \
+ COMMON_INTERCEPT_FUNCTION(__aeabi_memclr4); \
+ COMMON_INTERCEPT_FUNCTION(__aeabi_memclr8);
+#else
+#define INIT_AEABI_MEM
+#endif // SANITIZER_INTERCEPT_AEABI_MEM
+
+#if SANITIZER_INTERCEPT___BZERO
+INTERCEPTOR(void *, __bzero, void *block, uptr size) {
+ void *ctx;
+ COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, block, 0, size);
+}
+#define INIT___BZERO COMMON_INTERCEPT_FUNCTION(__bzero);
+#else
+#define INIT___BZERO
+#endif // SANITIZER_INTERCEPT___BZERO
+
+#if SANITIZER_INTERCEPT_BZERO
+INTERCEPTOR(void *, bzero, void *block, uptr size) {
+ void *ctx;
+ COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, block, 0, size);
+}
+#define INIT_BZERO COMMON_INTERCEPT_FUNCTION(bzero);
+#else
+#define INIT_BZERO
+#endif // SANITIZER_INTERCEPT_BZERO
+
+namespace __sanitizer {
+// This does not need to be called if InitializeCommonInterceptors() is called.
+void InitializeMemintrinsicInterceptors() {
+ INIT_MEMSET;
+ INIT_MEMMOVE;
+ INIT_MEMCPY;
+ INIT_AEABI_MEM;
+ INIT___BZERO;
+ INIT_BZERO;
+}
+} // namespace __sanitizer
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors_netbsd_compat.inc b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors_netbsd_compat.inc
index 6aa73ec8c6a2..f6ac3fa5af18 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors_netbsd_compat.inc
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors_netbsd_compat.inc
@@ -33,7 +33,7 @@
INTERCEPTOR(int, statvfs, char *path, void *buf) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, statvfs, path, buf);
- if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
+ if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, internal_strlen(path) + 1);
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
// https://github.com/google/sanitizers/issues/321.
@@ -99,7 +99,7 @@ INTERCEPTOR(int, getvfsstat, void *buf, SIZE_T bufsize, int flags) {
INTERCEPTOR(int, statvfs1, const char *path, void *buf, int flags) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, statvfs1, path, buf, flags);
- if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
+ if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, internal_strlen(path) + 1);
int res = REAL(statvfs1)(path, buf, flags);
if (!res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, struct_statvfs90_sz);
return res;
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors_vfork_aarch64.inc.S b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors_vfork_aarch64.inc.S
index 72e482754b62..cdfa6f1d7f53 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors_vfork_aarch64.inc.S
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors_vfork_aarch64.inc.S
@@ -40,8 +40,8 @@ ASM_WRAPPER_NAME(vfork):
ret
ASM_SIZE(vfork)
-.weak vfork
-.set vfork, ASM_WRAPPER_NAME(vfork)
+ASM_INTERCEPTOR_TRAMPOLINE(vfork)
+ASM_TRAMPOLINE_ALIAS(vfork, vfork)
GNU_PROPERTY_BTI_PAC
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors_vfork_arm.inc.S b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors_vfork_arm.inc.S
index 780a9d46e26a..87bb48380569 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors_vfork_arm.inc.S
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors_vfork_arm.inc.S
@@ -43,7 +43,7 @@ ASM_WRAPPER_NAME(vfork):
ASM_SIZE(vfork)
-.weak vfork
-.set vfork, ASM_WRAPPER_NAME(vfork)
+ASM_INTERCEPTOR_TRAMPOLINE(vfork)
+ASM_TRAMPOLINE_ALIAS(vfork, vfork)
#endif
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors_vfork_i386.inc.S b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors_vfork_i386.inc.S
index ed693819c6d4..c633014e2daa 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors_vfork_i386.inc.S
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors_vfork_i386.inc.S
@@ -6,6 +6,7 @@
.globl ASM_WRAPPER_NAME(vfork)
ASM_TYPE_FUNCTION(ASM_WRAPPER_NAME(vfork))
ASM_WRAPPER_NAME(vfork):
+ _CET_ENDBR
// Store return address in the spill area and tear down the stack frame.
sub $12, %esp
call COMMON_INTERCEPTOR_SPILL_AREA
@@ -57,7 +58,7 @@ ASM_WRAPPER_NAME(vfork):
ret
ASM_SIZE(vfork)
-.weak vfork
-.set vfork, ASM_WRAPPER_NAME(vfork)
+ASM_INTERCEPTOR_TRAMPOLINE(vfork)
+ASM_TRAMPOLINE_ALIAS(vfork, vfork)
#endif
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors_vfork_loongarch64.inc.S b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors_vfork_loongarch64.inc.S
new file mode 100644
index 000000000000..8429d57d669c
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors_vfork_loongarch64.inc.S
@@ -0,0 +1,57 @@
+#if defined(__loongarch_lp64) && defined(__linux__)
+
+#include "sanitizer_common/sanitizer_asm.h"
+
+ASM_HIDDEN(COMMON_INTERCEPTOR_SPILL_AREA)
+ASM_HIDDEN(_ZN14__interception10real_vforkE)
+
+.text
+.globl ASM_WRAPPER_NAME(vfork)
+ASM_TYPE_FUNCTION(ASM_WRAPPER_NAME(vfork))
+ASM_WRAPPER_NAME(vfork):
+ // Save ra in the off-stack spill area.
+ // allocate space on stack
+ addi.d $sp, $sp, -16
+ // store $ra value
+ st.d $ra, $sp, 8
+ bl COMMON_INTERCEPTOR_SPILL_AREA
+ // restore previous values from stack
+ ld.d $ra, $sp, 8
+ // adjust stack
+ addi.d $sp, $sp, 16
+ // store $ra by $a0
+ st.d $ra, $a0, 0
+
+ // Call real vfork. This may return twice. User code that runs between the first and the second return
+ // may clobber the stack frame of the interceptor; that's why it does not have a frame.
+ la.local $a0, _ZN14__interception10real_vforkE
+ ld.d $a0, $a0, 0
+ jirl $ra, $a0, 0
+
+ // adjust stack
+ addi.d $sp, $sp, -16
+ // store $a0 by adjusted stack
+ st.d $a0, $sp, 8
+ // jump to exit label if $a0 is 0
+ beqz $a0, .L_exit
+
+ // $a0 != 0 => parent process. Clear stack shadow.
+ // put old $sp to $a0
+ addi.d $a0, $sp, 16
+ bl %plt(COMMON_INTERCEPTOR_HANDLE_VFORK)
+
+.L_exit:
+ // Restore $ra
+ bl COMMON_INTERCEPTOR_SPILL_AREA
+ ld.d $ra, $a0, 0
+ // load value by stack
+ ld.d $a0, $sp, 8
+ // adjust stack
+ addi.d $sp, $sp, 16
+ jr $ra
+ASM_SIZE(vfork)
+
+ASM_INTERCEPTOR_TRAMPOLINE(vfork)
+ASM_TRAMPOLINE_ALIAS(vfork, vfork)
+
+#endif
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors_vfork_riscv64.inc.S b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors_vfork_riscv64.inc.S
index b7ec27859b8a..5b6ea6fe6c7a 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors_vfork_riscv64.inc.S
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors_vfork_riscv64.inc.S
@@ -50,7 +50,7 @@ ASM_WRAPPER_NAME(vfork):
ret
ASM_SIZE(vfork)
-.weak vfork
-.set vfork, ASM_WRAPPER_NAME(vfork)
+ASM_INTERCEPTOR_TRAMPOLINE(vfork)
+ASM_TRAMPOLINE_ALIAS(vfork, vfork)
#endif
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors_vfork_x86_64.inc.S b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors_vfork_x86_64.inc.S
index 8147cdd09247..5500f817aec5 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors_vfork_x86_64.inc.S
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors_vfork_x86_64.inc.S
@@ -6,6 +6,7 @@
.globl ASM_WRAPPER_NAME(vfork)
ASM_TYPE_FUNCTION(ASM_WRAPPER_NAME(vfork))
ASM_WRAPPER_NAME(vfork):
+ _CET_ENDBR
// Store return address in the spill area and tear down the stack frame.
push %rcx
call COMMON_INTERCEPTOR_SPILL_AREA
@@ -33,9 +34,9 @@ ASM_WRAPPER_NAME(vfork):
.L_exit:
pop %rax
ret
-ASM_SIZE(vfork)
+ASM_SIZE(ASM_WRAPPER_NAME(vfork))
-.weak vfork
-.set vfork, ASM_WRAPPER_NAME(vfork)
+ASM_INTERCEPTOR_TRAMPOLINE(vfork)
+ASM_TRAMPOLINE_ALIAS(vfork, vfork)
#endif
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common_interface.inc b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common_interface.inc
index 932e5478616d..557207fe62ac 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common_interface.inc
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common_interface.inc
@@ -9,12 +9,16 @@
//===----------------------------------------------------------------------===//
INTERFACE_FUNCTION(__sanitizer_acquire_crash_state)
INTERFACE_FUNCTION(__sanitizer_annotate_contiguous_container)
+INTERFACE_FUNCTION(__sanitizer_annotate_double_ended_contiguous_container)
INTERFACE_FUNCTION(__sanitizer_contiguous_container_find_bad_address)
+INTERFACE_FUNCTION(
+ __sanitizer_double_ended_contiguous_container_find_bad_address)
INTERFACE_FUNCTION(__sanitizer_set_death_callback)
INTERFACE_FUNCTION(__sanitizer_set_report_path)
INTERFACE_FUNCTION(__sanitizer_set_report_fd)
INTERFACE_FUNCTION(__sanitizer_get_report_path)
INTERFACE_FUNCTION(__sanitizer_verify_contiguous_container)
+INTERFACE_FUNCTION(__sanitizer_verify_double_ended_contiguous_container)
INTERFACE_WEAK_FUNCTION(__sanitizer_on_print)
INTERFACE_WEAK_FUNCTION(__sanitizer_report_error_summary)
INTERFACE_WEAK_FUNCTION(__sanitizer_sandbox_on_notify)
@@ -28,7 +32,9 @@ INTERFACE_FUNCTION(__sanitizer_get_module_and_offset_for_pc)
INTERFACE_FUNCTION(__sanitizer_symbolize_global)
INTERFACE_FUNCTION(__sanitizer_symbolize_pc)
// Allocator interface.
+INTERFACE_FUNCTION(__sanitizer_get_allocated_begin)
INTERFACE_FUNCTION(__sanitizer_get_allocated_size)
+INTERFACE_FUNCTION(__sanitizer_get_allocated_size_fast)
INTERFACE_FUNCTION(__sanitizer_get_current_allocated_bytes)
INTERFACE_FUNCTION(__sanitizer_get_estimated_allocated_size)
INTERFACE_FUNCTION(__sanitizer_get_free_bytes)
@@ -40,3 +46,7 @@ INTERFACE_FUNCTION(__sanitizer_purge_allocator)
INTERFACE_FUNCTION(__sanitizer_print_memory_profile)
INTERFACE_WEAK_FUNCTION(__sanitizer_free_hook)
INTERFACE_WEAK_FUNCTION(__sanitizer_malloc_hook)
+// Memintrinsic functions.
+INTERFACE_FUNCTION(__sanitizer_internal_memcpy)
+INTERFACE_FUNCTION(__sanitizer_internal_memmove)
+INTERFACE_FUNCTION(__sanitizer_internal_memset)
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common_interface_posix.inc b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common_interface_posix.inc
index 38f9531148d4..6b567edc97a8 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common_interface_posix.inc
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common_interface_posix.inc
@@ -9,5 +9,8 @@
//===----------------------------------------------------------------------===//
INTERFACE_WEAK_FUNCTION(__sanitizer_symbolize_code)
INTERFACE_WEAK_FUNCTION(__sanitizer_symbolize_data)
+INTERFACE_WEAK_FUNCTION(__sanitizer_symbolize_frame)
INTERFACE_WEAK_FUNCTION(__sanitizer_symbolize_demangle)
INTERFACE_WEAK_FUNCTION(__sanitizer_symbolize_flush)
+INTERFACE_WEAK_FUNCTION(__sanitizer_symbolize_set_demangle)
+INTERFACE_WEAK_FUNCTION(__sanitizer_symbolize_set_inline_frames)
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common_libcdep.cpp b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common_libcdep.cpp
index 01ccacc6f320..7b74bb1a7e0f 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common_libcdep.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common_libcdep.cpp
@@ -10,27 +10,22 @@
// run-time libraries.
//===----------------------------------------------------------------------===//
+#include "sanitizer_allocator.h"
#include "sanitizer_allocator_interface.h"
#include "sanitizer_common.h"
#include "sanitizer_flags.h"
+#include "sanitizer_interface_internal.h"
#include "sanitizer_procmaps.h"
-
+#include "sanitizer_stackdepot.h"
namespace __sanitizer {
-static void (*SoftRssLimitExceededCallback)(bool exceeded);
-void SetSoftRssLimitExceededCallback(void (*Callback)(bool exceeded)) {
- CHECK_EQ(SoftRssLimitExceededCallback, nullptr);
- SoftRssLimitExceededCallback = Callback;
-}
-
#if (SANITIZER_LINUX || SANITIZER_NETBSD) && !SANITIZER_GO
// Weak default implementation for when sanitizer_stackdepot is not linked in.
-SANITIZER_WEAK_ATTRIBUTE StackDepotStats *StackDepotGetStats() {
- return nullptr;
-}
+SANITIZER_WEAK_ATTRIBUTE StackDepotStats StackDepotGetStats() { return {}; }
void *BackgroundThread(void *arg) {
+ VPrintf(1, "%s: Started BackgroundThread\n", SanitizerToolName);
const uptr hard_rss_limit_mb = common_flags()->hard_rss_limit_mb;
const uptr soft_rss_limit_mb = common_flags()->soft_rss_limit_mb;
const bool heap_profile = common_flags()->heap_profile;
@@ -48,16 +43,12 @@ void *BackgroundThread(void *arg) {
prev_reported_rss = current_rss_mb;
}
// If stack depot has grown 10% since last time, print it too.
- StackDepotStats *stack_depot_stats = StackDepotGetStats();
- if (stack_depot_stats) {
- if (prev_reported_stack_depot_size * 11 / 10 <
- stack_depot_stats->allocated) {
- Printf("%s: StackDepot: %zd ids; %zdM allocated\n",
- SanitizerToolName,
- stack_depot_stats->n_uniq_ids,
- stack_depot_stats->allocated >> 20);
- prev_reported_stack_depot_size = stack_depot_stats->allocated;
- }
+ StackDepotStats stack_depot_stats = StackDepotGetStats();
+ if (prev_reported_stack_depot_size * 11 / 10 <
+ stack_depot_stats.allocated) {
+ Printf("%s: StackDepot: %zd ids; %zdM allocated\n", SanitizerToolName,
+ stack_depot_stats.n_uniq_ids, stack_depot_stats.allocated >> 20);
+ prev_reported_stack_depot_size = stack_depot_stats.allocated;
}
}
// Check RSS against the limit.
@@ -72,13 +63,13 @@ void *BackgroundThread(void *arg) {
reached_soft_rss_limit = true;
Report("%s: soft rss limit exhausted (%zdMb vs %zdMb)\n",
SanitizerToolName, soft_rss_limit_mb, current_rss_mb);
- if (SoftRssLimitExceededCallback)
- SoftRssLimitExceededCallback(true);
+ SetRssLimitExceeded(true);
} else if (soft_rss_limit_mb >= current_rss_mb &&
reached_soft_rss_limit) {
reached_soft_rss_limit = false;
- if (SoftRssLimitExceededCallback)
- SoftRssLimitExceededCallback(false);
+ Report("%s: soft rss limit unexhausted (%zdMb vs %zdMb)\n",
+ SanitizerToolName, soft_rss_limit_mb, current_rss_mb);
+ SetRssLimitExceeded(false);
}
}
if (heap_profile &&
@@ -89,11 +80,49 @@ void *BackgroundThread(void *arg) {
}
}
}
+
+void MaybeStartBackgroudThread() {
+ // Need to implement/test on other platforms.
+ // Start the background thread if one of the rss limits is given.
+ if (!common_flags()->hard_rss_limit_mb &&
+ !common_flags()->soft_rss_limit_mb &&
+ !common_flags()->heap_profile) return;
+ if (!&real_pthread_create) {
+ VPrintf(1, "%s: real_pthread_create undefined\n", SanitizerToolName);
+ return; // Can't spawn the thread anyway.
+ }
+
+ static bool started = false;
+ if (!started) {
+ started = true;
+ internal_start_thread(BackgroundThread, nullptr);
+ }
+}
+
+# if !SANITIZER_START_BACKGROUND_THREAD_IN_ASAN_INTERNAL
+# ifdef __clang__
+# pragma clang diagnostic push
+// We avoid global-constructors to be sure that globals are ready when
+// sanitizers need them. This can happend before global constructors executed.
+// Here we don't mind if thread is started on later stages.
+# pragma clang diagnostic ignored "-Wglobal-constructors"
+# endif
+static struct BackgroudThreadStarted {
+ BackgroudThreadStarted() { MaybeStartBackgroudThread(); }
+} background_thread_strarter UNUSED;
+# ifdef __clang__
+# pragma clang diagnostic pop
+# endif
+# endif
+#else
+void MaybeStartBackgroudThread() {}
#endif
void WriteToSyslog(const char *msg) {
+ if (!msg)
+ return;
InternalScopedString msg_copy;
- msg_copy.append("%s", msg);
+ msg_copy.Append(msg);
const char *p = msg_copy.data();
// Print one line at a time.
@@ -111,18 +140,6 @@ void WriteToSyslog(const char *msg) {
WriteOneLineToSyslog(p);
}
-void MaybeStartBackgroudThread() {
-#if (SANITIZER_LINUX || SANITIZER_NETBSD) && \
- !SANITIZER_GO // Need to implement/test on other platforms.
- // Start the background thread if one of the rss limits is given.
- if (!common_flags()->hard_rss_limit_mb &&
- !common_flags()->soft_rss_limit_mb &&
- !common_flags()->heap_profile) return;
- if (!&real_pthread_create) return; // Can't spawn the thread anyway.
- internal_start_thread(BackgroundThread, nullptr);
-#endif
-}
-
static void (*sandboxing_callback)();
void SetSandboxingCallback(void (*f)()) {
sandboxing_callback = f;
@@ -191,10 +208,22 @@ void ProtectGap(uptr addr, uptr size, uptr zero_base_shadow_start,
#endif // !SANITIZER_FUCHSIA
+#if !SANITIZER_WINDOWS && !SANITIZER_GO
+// Weak default implementation for when sanitizer_stackdepot is not linked in.
+SANITIZER_WEAK_ATTRIBUTE void StackDepotStopBackgroundThread() {}
+static void StopStackDepotBackgroundThread() {
+ StackDepotStopBackgroundThread();
+}
+#else
+// SANITIZER_WEAK_ATTRIBUTE is unsupported.
+static void StopStackDepotBackgroundThread() {}
+#endif
+
} // namespace __sanitizer
SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_sandbox_on_notify,
__sanitizer_sandbox_arguments *args) {
+ __sanitizer::StopStackDepotBackgroundThread();
__sanitizer::PlatformPrepareForSandboxing(args);
if (__sanitizer::sandboxing_callback)
__sanitizer::sandboxing_callback();
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common_nolibc.cpp b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common_nolibc.cpp
index 9a4e5388f24d..67e77a877781 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common_nolibc.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common_nolibc.cpp
@@ -25,9 +25,10 @@ void LogMessageOnPrintf(const char *str) {}
#endif
void WriteToSyslog(const char *buffer) {}
void Abort() { internal__exit(1); }
+bool CreateDir(const char *pathname) { return false; }
#endif // !SANITIZER_WINDOWS
-#if !SANITIZER_WINDOWS && !SANITIZER_MAC
+#if !SANITIZER_WINDOWS && !SANITIZER_APPLE
void ListOfModules::init() {}
void InitializePlatformCommonFlags(CommonFlags *cf) {}
#endif
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common_syscalls.inc b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common_syscalls.inc
index 1b89d6e17684..c10943b3e487 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common_syscalls.inc
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_common_syscalls.inc
@@ -43,45 +43,47 @@
#include "sanitizer_platform.h"
#if SANITIZER_LINUX
-#include "sanitizer_libc.h"
+# include "sanitizer_libc.h"
-#define PRE_SYSCALL(name) \
- SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_syscall_pre_impl_##name
-#define PRE_READ(p, s) COMMON_SYSCALL_PRE_READ_RANGE(p, s)
-#define PRE_WRITE(p, s) COMMON_SYSCALL_PRE_WRITE_RANGE(p, s)
+# define PRE_SYSCALL(name) \
+ SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_syscall_pre_impl_##name
+# define PRE_READ(p, s) COMMON_SYSCALL_PRE_READ_RANGE(p, s)
+# define PRE_WRITE(p, s) COMMON_SYSCALL_PRE_WRITE_RANGE(p, s)
-#define POST_SYSCALL(name) \
- SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_syscall_post_impl_##name
-#define POST_READ(p, s) COMMON_SYSCALL_POST_READ_RANGE(p, s)
-#define POST_WRITE(p, s) COMMON_SYSCALL_POST_WRITE_RANGE(p, s)
+# define POST_SYSCALL(name) \
+ SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_syscall_post_impl_##name
+# define POST_READ(p, s) COMMON_SYSCALL_POST_READ_RANGE(p, s)
+# define POST_WRITE(p, s) COMMON_SYSCALL_POST_WRITE_RANGE(p, s)
-#ifndef COMMON_SYSCALL_ACQUIRE
-# define COMMON_SYSCALL_ACQUIRE(addr) ((void)(addr))
-#endif
+# ifndef COMMON_SYSCALL_ACQUIRE
+# define COMMON_SYSCALL_ACQUIRE(addr) ((void)(addr))
+# endif
-#ifndef COMMON_SYSCALL_RELEASE
-# define COMMON_SYSCALL_RELEASE(addr) ((void)(addr))
-#endif
+# ifndef COMMON_SYSCALL_RELEASE
+# define COMMON_SYSCALL_RELEASE(addr) ((void)(addr))
+# endif
-#ifndef COMMON_SYSCALL_FD_CLOSE
-# define COMMON_SYSCALL_FD_CLOSE(fd) ((void)(fd))
-#endif
+# ifndef COMMON_SYSCALL_FD_CLOSE
+# define COMMON_SYSCALL_FD_CLOSE(fd) ((void)(fd))
+# endif
-#ifndef COMMON_SYSCALL_FD_ACQUIRE
-# define COMMON_SYSCALL_FD_ACQUIRE(fd) ((void)(fd))
-#endif
+# ifndef COMMON_SYSCALL_FD_ACQUIRE
+# define COMMON_SYSCALL_FD_ACQUIRE(fd) ((void)(fd))
+# endif
-#ifndef COMMON_SYSCALL_FD_RELEASE
-# define COMMON_SYSCALL_FD_RELEASE(fd) ((void)(fd))
-#endif
+# ifndef COMMON_SYSCALL_FD_RELEASE
+# define COMMON_SYSCALL_FD_RELEASE(fd) ((void)(fd))
+# endif
-#ifndef COMMON_SYSCALL_PRE_FORK
-# define COMMON_SYSCALL_PRE_FORK() {}
-#endif
+# ifndef COMMON_SYSCALL_PRE_FORK
+# define COMMON_SYSCALL_PRE_FORK() \
+ {}
+# endif
-#ifndef COMMON_SYSCALL_POST_FORK
-# define COMMON_SYSCALL_POST_FORK(res) {}
-#endif
+# ifndef COMMON_SYSCALL_POST_FORK
+# define COMMON_SYSCALL_POST_FORK(res) \
+ {}
+# endif
// FIXME: do some kind of PRE_READ for all syscall arguments (int(s) and such).
@@ -130,8 +132,8 @@ struct sanitizer_kernel_sockaddr {
// Declare it "void" to catch sizeof(kernel_sigset_t).
typedef void kernel_sigset_t;
-static void kernel_write_iovec(const __sanitizer_iovec *iovec,
- SIZE_T iovlen, SIZE_T maxlen) {
+static void kernel_write_iovec(const __sanitizer_iovec *iovec, SIZE_T iovlen,
+ SIZE_T maxlen) {
for (SIZE_T i = 0; i < iovlen && maxlen; ++i) {
SSIZE_T sz = Min(iovec[i].iov_len, maxlen);
POST_WRITE(iovec[i].iov_base, sz);
@@ -141,8 +143,8 @@ static void kernel_write_iovec(const __sanitizer_iovec *iovec,
// This functions uses POST_READ, because it needs to run after syscall to know
// the real read range.
-static void kernel_read_iovec(const __sanitizer_iovec *iovec,
- SIZE_T iovlen, SIZE_T maxlen) {
+static void kernel_read_iovec(const __sanitizer_iovec *iovec, SIZE_T iovlen,
+ SIZE_T maxlen) {
POST_READ(iovec, sizeof(*iovec) * iovlen);
for (SIZE_T i = 0; i < iovlen && maxlen; ++i) {
SSIZE_T sz = Min(iovec[i].iov_len, maxlen);
@@ -155,8 +157,8 @@ PRE_SYSCALL(recvmsg)(long sockfd, sanitizer_kernel_msghdr *msg, long flags) {
PRE_READ(msg, sizeof(*msg));
}
-POST_SYSCALL(recvmsg)(long res, long sockfd, sanitizer_kernel_msghdr *msg,
- long flags) {
+POST_SYSCALL(recvmsg)
+(long res, long sockfd, sanitizer_kernel_msghdr *msg, long flags) {
if (res >= 0) {
if (msg) {
for (unsigned long i = 0; i < msg->msg_iovlen; ++i) {
@@ -167,13 +169,14 @@ POST_SYSCALL(recvmsg)(long res, long sockfd, sanitizer_kernel_msghdr *msg,
}
}
-PRE_SYSCALL(recvmmsg)(long fd, sanitizer_kernel_mmsghdr *msg, long vlen,
- long flags, void *timeout) {
+PRE_SYSCALL(recvmmsg)
+(long fd, sanitizer_kernel_mmsghdr *msg, long vlen, long flags, void *timeout) {
PRE_READ(msg, vlen * sizeof(*msg));
}
-POST_SYSCALL(recvmmsg)(long res, long fd, sanitizer_kernel_mmsghdr *msg,
- long vlen, long flags, void *timeout) {
+POST_SYSCALL(recvmmsg)
+(long res, long fd, sanitizer_kernel_mmsghdr *msg, long vlen, long flags,
+ void *timeout) {
if (res >= 0) {
if (msg) {
for (unsigned long i = 0; i < msg->msg_hdr.msg_iovlen; ++i) {
@@ -183,7 +186,8 @@ POST_SYSCALL(recvmmsg)(long res, long fd, sanitizer_kernel_mmsghdr *msg,
POST_WRITE(msg->msg_hdr.msg_control, msg->msg_hdr.msg_controllen);
POST_WRITE(&msg->msg_len, sizeof(msg->msg_len));
}
- if (timeout) POST_WRITE(timeout, struct_timespec_sz);
+ if (timeout)
+ POST_WRITE(timeout, struct_timespec_sz);
}
}
@@ -203,7 +207,8 @@ PRE_SYSCALL(time)(void *tloc) {}
POST_SYSCALL(time)(long res, void *tloc) {
if (res >= 0) {
- if (tloc) POST_WRITE(tloc, sizeof(long));
+ if (tloc)
+ POST_WRITE(tloc, sizeof(long));
}
}
@@ -211,7 +216,8 @@ PRE_SYSCALL(stime)(void *tptr) {}
POST_SYSCALL(stime)(long res, void *tptr) {
if (res >= 0) {
- if (tptr) POST_WRITE(tptr, sizeof(long));
+ if (tptr)
+ POST_WRITE(tptr, sizeof(long));
}
}
@@ -219,8 +225,10 @@ PRE_SYSCALL(gettimeofday)(void *tv, void *tz) {}
POST_SYSCALL(gettimeofday)(long res, void *tv, void *tz) {
if (res >= 0) {
- if (tv) POST_WRITE(tv, timeval_sz);
- if (tz) POST_WRITE(tz, struct_timezone_sz);
+ if (tv)
+ POST_WRITE(tv, timeval_sz);
+ if (tz)
+ POST_WRITE(tz, struct_timezone_sz);
}
}
@@ -228,26 +236,30 @@ PRE_SYSCALL(settimeofday)(void *tv, void *tz) {}
POST_SYSCALL(settimeofday)(long res, void *tv, void *tz) {
if (res >= 0) {
- if (tv) POST_WRITE(tv, timeval_sz);
- if (tz) POST_WRITE(tz, struct_timezone_sz);
+ if (tv)
+ POST_WRITE(tv, timeval_sz);
+ if (tz)
+ POST_WRITE(tz, struct_timezone_sz);
}
}
-#if !SANITIZER_ANDROID
+# if !SANITIZER_ANDROID
PRE_SYSCALL(adjtimex)(void *txc_p) {}
POST_SYSCALL(adjtimex)(long res, void *txc_p) {
if (res >= 0) {
- if (txc_p) POST_WRITE(txc_p, struct_timex_sz);
+ if (txc_p)
+ POST_WRITE(txc_p, struct_timex_sz);
}
}
-#endif
+# endif
PRE_SYSCALL(times)(void *tbuf) {}
POST_SYSCALL(times)(long res, void *tbuf) {
if (res >= 0) {
- if (tbuf) POST_WRITE(tbuf, struct_tms_sz);
+ if (tbuf)
+ POST_WRITE(tbuf, struct_tms_sz);
}
}
@@ -259,8 +271,10 @@ PRE_SYSCALL(nanosleep)(void *rqtp, void *rmtp) {}
POST_SYSCALL(nanosleep)(long res, void *rqtp, void *rmtp) {
if (res >= 0) {
- if (rqtp) POST_WRITE(rqtp, struct_timespec_sz);
- if (rmtp) POST_WRITE(rmtp, struct_timespec_sz);
+ if (rqtp)
+ POST_WRITE(rqtp, struct_timespec_sz);
+ if (rmtp)
+ POST_WRITE(rmtp, struct_timespec_sz);
}
}
@@ -296,9 +310,12 @@ PRE_SYSCALL(getresuid)(void *ruid, void *euid, void *suid) {}
POST_SYSCALL(getresuid)(long res, void *ruid, void *euid, void *suid) {
if (res >= 0) {
- if (ruid) POST_WRITE(ruid, sizeof(unsigned));
- if (euid) POST_WRITE(euid, sizeof(unsigned));
- if (suid) POST_WRITE(suid, sizeof(unsigned));
+ if (ruid)
+ POST_WRITE(ruid, sizeof(unsigned));
+ if (euid)
+ POST_WRITE(euid, sizeof(unsigned));
+ if (suid)
+ POST_WRITE(suid, sizeof(unsigned));
}
}
@@ -306,9 +323,12 @@ PRE_SYSCALL(getresgid)(void *rgid, void *egid, void *sgid) {}
POST_SYSCALL(getresgid)(long res, void *rgid, void *egid, void *sgid) {
if (res >= 0) {
- if (rgid) POST_WRITE(rgid, sizeof(unsigned));
- if (egid) POST_WRITE(egid, sizeof(unsigned));
- if (sgid) POST_WRITE(sgid, sizeof(unsigned));
+ if (rgid)
+ POST_WRITE(rgid, sizeof(unsigned));
+ if (egid)
+ POST_WRITE(egid, sizeof(unsigned));
+ if (sgid)
+ POST_WRITE(sgid, sizeof(unsigned));
}
}
@@ -326,10 +346,11 @@ POST_SYSCALL(getsid)(long res, long pid) {}
PRE_SYSCALL(getgroups)(long gidsetsize, void *grouplist) {}
-POST_SYSCALL(getgroups)(long res, long gidsetsize,
- __sanitizer___kernel_gid_t *grouplist) {
+POST_SYSCALL(getgroups)
+(long res, long gidsetsize, __sanitizer___kernel_gid_t *grouplist) {
if (res >= 0) {
- if (grouplist) POST_WRITE(grouplist, res * sizeof(*grouplist));
+ if (grouplist)
+ POST_WRITE(grouplist, res * sizeof(*grouplist));
}
}
@@ -374,11 +395,12 @@ PRE_SYSCALL(setsid)() {}
POST_SYSCALL(setsid)(long res) {}
PRE_SYSCALL(setgroups)(long gidsetsize, __sanitizer___kernel_gid_t *grouplist) {
- if (grouplist) POST_WRITE(grouplist, gidsetsize * sizeof(*grouplist));
+ if (grouplist)
+ POST_WRITE(grouplist, gidsetsize * sizeof(*grouplist));
}
-POST_SYSCALL(setgroups)(long res, long gidsetsize,
- __sanitizer___kernel_gid_t *grouplist) {}
+POST_SYSCALL(setgroups)
+(long res, long gidsetsize, __sanitizer___kernel_gid_t *grouplist) {}
PRE_SYSCALL(acct)(const void *name) {
if (name)
@@ -388,17 +410,21 @@ PRE_SYSCALL(acct)(const void *name) {
POST_SYSCALL(acct)(long res, const void *name) {}
PRE_SYSCALL(capget)(void *header, void *dataptr) {
- if (header) PRE_READ(header, __user_cap_header_struct_sz);
+ if (header)
+ PRE_READ(header, __user_cap_header_struct_sz);
}
POST_SYSCALL(capget)(long res, void *header, void *dataptr) {
if (res >= 0)
- if (dataptr) POST_WRITE(dataptr, __user_cap_data_struct_sz);
+ if (dataptr)
+ POST_WRITE(dataptr, __user_cap_data_struct_sz(header));
}
PRE_SYSCALL(capset)(void *header, const void *data) {
- if (header) PRE_READ(header, __user_cap_header_struct_sz);
- if (data) PRE_READ(data, __user_cap_data_struct_sz);
+ if (header)
+ PRE_READ(header, __user_cap_header_struct_sz);
+ if (data)
+ PRE_READ(data, __user_cap_data_struct_sz(header));
}
POST_SYSCALL(capset)(long res, void *header, const void *data) {}
@@ -411,7 +437,8 @@ PRE_SYSCALL(sigpending)(void *set) {}
POST_SYSCALL(sigpending)(long res, void *set) {
if (res >= 0) {
- if (set) POST_WRITE(set, old_sigset_t_sz);
+ if (set)
+ POST_WRITE(set, old_sigset_t_sz);
}
}
@@ -419,8 +446,10 @@ PRE_SYSCALL(sigprocmask)(long how, void *set, void *oset) {}
POST_SYSCALL(sigprocmask)(long res, long how, void *set, void *oset) {
if (res >= 0) {
- if (set) POST_WRITE(set, old_sigset_t_sz);
- if (oset) POST_WRITE(oset, old_sigset_t_sz);
+ if (set)
+ POST_WRITE(set, old_sigset_t_sz);
+ if (oset)
+ POST_WRITE(oset, old_sigset_t_sz);
}
}
@@ -428,7 +457,8 @@ PRE_SYSCALL(getitimer)(long which, void *value) {}
POST_SYSCALL(getitimer)(long res, long which, void *value) {
if (res >= 0) {
- if (value) POST_WRITE(value, struct_itimerval_sz);
+ if (value)
+ POST_WRITE(value, struct_itimerval_sz);
}
}
@@ -436,19 +466,23 @@ PRE_SYSCALL(setitimer)(long which, void *value, void *ovalue) {}
POST_SYSCALL(setitimer)(long res, long which, void *value, void *ovalue) {
if (res >= 0) {
- if (value) POST_WRITE(value, struct_itimerval_sz);
- if (ovalue) POST_WRITE(ovalue, struct_itimerval_sz);
+ if (value)
+ POST_WRITE(value, struct_itimerval_sz);
+ if (ovalue)
+ POST_WRITE(ovalue, struct_itimerval_sz);
}
}
-PRE_SYSCALL(timer_create)(long which_clock, void *timer_event_spec,
- void *created_timer_id) {}
+PRE_SYSCALL(timer_create)
+(long which_clock, void *timer_event_spec, void *created_timer_id) {}
-POST_SYSCALL(timer_create)(long res, long which_clock, void *timer_event_spec,
- void *created_timer_id) {
+POST_SYSCALL(timer_create)
+(long res, long which_clock, void *timer_event_spec, void *created_timer_id) {
if (res >= 0) {
- if (timer_event_spec) POST_WRITE(timer_event_spec, struct_sigevent_sz);
- if (created_timer_id) POST_WRITE(created_timer_id, sizeof(long));
+ if (timer_event_spec)
+ POST_WRITE(timer_event_spec, struct_sigevent_sz);
+ if (created_timer_id)
+ POST_WRITE(created_timer_id, sizeof(long));
}
}
@@ -456,7 +490,8 @@ PRE_SYSCALL(timer_gettime)(long timer_id, void *setting) {}
POST_SYSCALL(timer_gettime)(long res, long timer_id, void *setting) {
if (res >= 0) {
- if (setting) POST_WRITE(setting, struct_itimerspec_sz);
+ if (setting)
+ POST_WRITE(setting, struct_itimerspec_sz);
}
}
@@ -464,15 +499,18 @@ PRE_SYSCALL(timer_getoverrun)(long timer_id) {}
POST_SYSCALL(timer_getoverrun)(long res, long timer_id) {}
-PRE_SYSCALL(timer_settime)(long timer_id, long flags, const void *new_setting,
- void *old_setting) {
- if (new_setting) PRE_READ(new_setting, struct_itimerspec_sz);
+PRE_SYSCALL(timer_settime)
+(long timer_id, long flags, const void *new_setting, void *old_setting) {
+ if (new_setting)
+ PRE_READ(new_setting, struct_itimerspec_sz);
}
-POST_SYSCALL(timer_settime)(long res, long timer_id, long flags,
- const void *new_setting, void *old_setting) {
+POST_SYSCALL(timer_settime)
+(long res, long timer_id, long flags, const void *new_setting,
+ void *old_setting) {
if (res >= 0) {
- if (old_setting) POST_WRITE(old_setting, struct_itimerspec_sz);
+ if (old_setting)
+ POST_WRITE(old_setting, struct_itimerspec_sz);
}
}
@@ -481,7 +519,8 @@ PRE_SYSCALL(timer_delete)(long timer_id) {}
POST_SYSCALL(timer_delete)(long res, long timer_id) {}
PRE_SYSCALL(clock_settime)(long which_clock, const void *tp) {
- if (tp) PRE_READ(tp, struct_timespec_sz);
+ if (tp)
+ PRE_READ(tp, struct_timespec_sz);
}
POST_SYSCALL(clock_settime)(long res, long which_clock, const void *tp) {}
@@ -490,37 +529,42 @@ PRE_SYSCALL(clock_gettime)(long which_clock, void *tp) {}
POST_SYSCALL(clock_gettime)(long res, long which_clock, void *tp) {
if (res >= 0) {
- if (tp) POST_WRITE(tp, struct_timespec_sz);
+ if (tp)
+ POST_WRITE(tp, struct_timespec_sz);
}
}
-#if !SANITIZER_ANDROID
+# if !SANITIZER_ANDROID
PRE_SYSCALL(clock_adjtime)(long which_clock, void *tx) {}
POST_SYSCALL(clock_adjtime)(long res, long which_clock, void *tx) {
if (res >= 0) {
- if (tx) POST_WRITE(tx, struct_timex_sz);
+ if (tx)
+ POST_WRITE(tx, struct_timex_sz);
}
}
-#endif
+# endif
PRE_SYSCALL(clock_getres)(long which_clock, void *tp) {}
POST_SYSCALL(clock_getres)(long res, long which_clock, void *tp) {
if (res >= 0) {
- if (tp) POST_WRITE(tp, struct_timespec_sz);
+ if (tp)
+ POST_WRITE(tp, struct_timespec_sz);
}
}
-PRE_SYSCALL(clock_nanosleep)(long which_clock, long flags, const void *rqtp,
- void *rmtp) {
- if (rqtp) PRE_READ(rqtp, struct_timespec_sz);
+PRE_SYSCALL(clock_nanosleep)
+(long which_clock, long flags, const void *rqtp, void *rmtp) {
+ if (rqtp)
+ PRE_READ(rqtp, struct_timespec_sz);
}
-POST_SYSCALL(clock_nanosleep)(long res, long which_clock, long flags,
- const void *rqtp, void *rmtp) {
+POST_SYSCALL(clock_nanosleep)
+(long res, long which_clock, long flags, const void *rqtp, void *rmtp) {
if (res >= 0) {
- if (rmtp) POST_WRITE(rmtp, struct_timespec_sz);
+ if (rmtp)
+ POST_WRITE(rmtp, struct_timespec_sz);
}
}
@@ -532,12 +576,14 @@ PRE_SYSCALL(sched_setscheduler)(long pid, long policy, void *param) {}
POST_SYSCALL(sched_setscheduler)(long res, long pid, long policy, void *param) {
if (res >= 0) {
- if (param) POST_WRITE(param, struct_sched_param_sz);
+ if (param)
+ POST_WRITE(param, struct_sched_param_sz);
}
}
PRE_SYSCALL(sched_setparam)(long pid, void *param) {
- if (param) PRE_READ(param, struct_sched_param_sz);
+ if (param)
+ PRE_READ(param, struct_sched_param_sz);
}
POST_SYSCALL(sched_setparam)(long res, long pid, void *param) {}
@@ -550,23 +596,26 @@ PRE_SYSCALL(sched_getparam)(long pid, void *param) {}
POST_SYSCALL(sched_getparam)(long res, long pid, void *param) {
if (res >= 0) {
- if (param) POST_WRITE(param, struct_sched_param_sz);
+ if (param)
+ POST_WRITE(param, struct_sched_param_sz);
}
}
PRE_SYSCALL(sched_setaffinity)(long pid, long len, void *user_mask_ptr) {
- if (user_mask_ptr) PRE_READ(user_mask_ptr, len);
+ if (user_mask_ptr)
+ PRE_READ(user_mask_ptr, len);
}
-POST_SYSCALL(sched_setaffinity)(long res, long pid, long len,
- void *user_mask_ptr) {}
+POST_SYSCALL(sched_setaffinity)
+(long res, long pid, long len, void *user_mask_ptr) {}
PRE_SYSCALL(sched_getaffinity)(long pid, long len, void *user_mask_ptr) {}
-POST_SYSCALL(sched_getaffinity)(long res, long pid, long len,
- void *user_mask_ptr) {
+POST_SYSCALL(sched_getaffinity)
+(long res, long pid, long len, void *user_mask_ptr) {
if (res >= 0) {
- if (user_mask_ptr) POST_WRITE(user_mask_ptr, len);
+ if (user_mask_ptr)
+ POST_WRITE(user_mask_ptr, len);
}
}
@@ -586,7 +635,8 @@ PRE_SYSCALL(sched_rr_get_interval)(long pid, void *interval) {}
POST_SYSCALL(sched_rr_get_interval)(long res, long pid, void *interval) {
if (res >= 0) {
- if (interval) POST_WRITE(interval, struct_timespec_sz);
+ if (interval)
+ POST_WRITE(interval, struct_timespec_sz);
}
}
@@ -610,13 +660,14 @@ PRE_SYSCALL(restart_syscall)() {}
POST_SYSCALL(restart_syscall)(long res) {}
-PRE_SYSCALL(kexec_load)(long entry, long nr_segments, void *segments,
- long flags) {}
+PRE_SYSCALL(kexec_load)
+(long entry, long nr_segments, void *segments, long flags) {}
-POST_SYSCALL(kexec_load)(long res, long entry, long nr_segments, void *segments,
- long flags) {
+POST_SYSCALL(kexec_load)
+(long res, long entry, long nr_segments, void *segments, long flags) {
if (res >= 0) {
- if (segments) POST_WRITE(segments, struct_kexec_segment_sz);
+ if (segments)
+ POST_WRITE(segments, struct_kexec_segment_sz);
}
}
@@ -630,22 +681,26 @@ POST_SYSCALL(exit_group)(long res, long error_code) {}
PRE_SYSCALL(wait4)(long pid, void *stat_addr, long options, void *ru) {}
-POST_SYSCALL(wait4)(long res, long pid, void *stat_addr, long options,
- void *ru) {
+POST_SYSCALL(wait4)
+(long res, long pid, void *stat_addr, long options, void *ru) {
if (res >= 0) {
- if (stat_addr) POST_WRITE(stat_addr, sizeof(int));
- if (ru) POST_WRITE(ru, struct_rusage_sz);
+ if (stat_addr)
+ POST_WRITE(stat_addr, sizeof(int));
+ if (ru)
+ POST_WRITE(ru, struct_rusage_sz);
}
}
-PRE_SYSCALL(waitid)(long which, long pid, void *infop, long options, void *ru) {
-}
+PRE_SYSCALL(waitid)
+(long which, long pid, void *infop, long options, void *ru) {}
-POST_SYSCALL(waitid)(long res, long which, long pid, void *infop, long options,
- void *ru) {
+POST_SYSCALL(waitid)
+(long res, long which, long pid, void *infop, long options, void *ru) {
if (res >= 0) {
- if (infop) POST_WRITE(infop, siginfo_t_sz);
- if (ru) POST_WRITE(ru, struct_rusage_sz);
+ if (infop)
+ POST_WRITE(infop, siginfo_t_sz);
+ if (ru)
+ POST_WRITE(ru, struct_rusage_sz);
}
}
@@ -653,7 +708,8 @@ PRE_SYSCALL(waitpid)(long pid, void *stat_addr, long options) {}
POST_SYSCALL(waitpid)(long res, long pid, void *stat_addr, long options) {
if (res >= 0) {
- if (stat_addr) POST_WRITE(stat_addr, sizeof(int));
+ if (stat_addr)
+ POST_WRITE(stat_addr, sizeof(int));
}
}
@@ -661,7 +717,8 @@ PRE_SYSCALL(set_tid_address)(void *tidptr) {}
POST_SYSCALL(set_tid_address)(long res, void *tidptr) {
if (res >= 0) {
- if (tidptr) POST_WRITE(tidptr, sizeof(int));
+ if (tidptr)
+ POST_WRITE(tidptr, sizeof(int));
}
}
@@ -682,11 +739,14 @@ POST_SYSCALL(delete_module)(long res, const void *name_user, long flags) {}
PRE_SYSCALL(rt_sigprocmask)(long how, void *set, void *oset, long sigsetsize) {}
-POST_SYSCALL(rt_sigprocmask)(long res, long how, kernel_sigset_t *set,
- kernel_sigset_t *oset, long sigsetsize) {
+POST_SYSCALL(rt_sigprocmask)
+(long res, long how, kernel_sigset_t *set, kernel_sigset_t *oset,
+ long sigsetsize) {
if (res >= 0) {
- if (set) POST_WRITE(set, sigsetsize);
- if (oset) POST_WRITE(oset, sigsetsize);
+ if (set)
+ POST_WRITE(set, sigsetsize);
+ if (oset)
+ POST_WRITE(oset, sigsetsize);
}
}
@@ -694,29 +754,34 @@ PRE_SYSCALL(rt_sigpending)(void *set, long sigsetsize) {}
POST_SYSCALL(rt_sigpending)(long res, kernel_sigset_t *set, long sigsetsize) {
if (res >= 0) {
- if (set) POST_WRITE(set, sigsetsize);
+ if (set)
+ POST_WRITE(set, sigsetsize);
}
}
-PRE_SYSCALL(rt_sigtimedwait)(const kernel_sigset_t *uthese, void *uinfo,
- const void *uts, long sigsetsize) {
- if (uthese) PRE_READ(uthese, sigsetsize);
- if (uts) PRE_READ(uts, struct_timespec_sz);
+PRE_SYSCALL(rt_sigtimedwait)
+(const kernel_sigset_t *uthese, void *uinfo, const void *uts, long sigsetsize) {
+ if (uthese)
+ PRE_READ(uthese, sigsetsize);
+ if (uts)
+ PRE_READ(uts, struct_timespec_sz);
}
-POST_SYSCALL(rt_sigtimedwait)(long res, const void *uthese, void *uinfo,
- const void *uts, long sigsetsize) {
+POST_SYSCALL(rt_sigtimedwait)
+(long res, const void *uthese, void *uinfo, const void *uts, long sigsetsize) {
if (res >= 0) {
- if (uinfo) POST_WRITE(uinfo, siginfo_t_sz);
+ if (uinfo)
+ POST_WRITE(uinfo, siginfo_t_sz);
}
}
PRE_SYSCALL(rt_tgsigqueueinfo)(long tgid, long pid, long sig, void *uinfo) {}
-POST_SYSCALL(rt_tgsigqueueinfo)(long res, long tgid, long pid, long sig,
- void *uinfo) {
+POST_SYSCALL(rt_tgsigqueueinfo)
+(long res, long tgid, long pid, long sig, void *uinfo) {
if (res >= 0) {
- if (uinfo) POST_WRITE(uinfo, siginfo_t_sz);
+ if (uinfo)
+ POST_WRITE(uinfo, siginfo_t_sz);
}
}
@@ -736,7 +801,8 @@ PRE_SYSCALL(rt_sigqueueinfo)(long pid, long sig, void *uinfo) {}
POST_SYSCALL(rt_sigqueueinfo)(long res, long pid, long sig, void *uinfo) {
if (res >= 0) {
- if (uinfo) POST_WRITE(uinfo, siginfo_t_sz);
+ if (uinfo)
+ POST_WRITE(uinfo, siginfo_t_sz);
}
}
@@ -772,11 +838,11 @@ PRE_SYSCALL(bdflush)(long func, long data) {}
POST_SYSCALL(bdflush)(long res, long func, long data) {}
-PRE_SYSCALL(mount)(void *dev_name, void *dir_name, void *type, long flags,
- void *data) {}
+PRE_SYSCALL(mount)
+(void *dev_name, void *dir_name, void *type, long flags, void *data) {}
-POST_SYSCALL(mount)(long res, void *dev_name, void *dir_name, void *type,
- long flags, void *data) {
+POST_SYSCALL(mount)
+(long res, void *dev_name, void *dir_name, void *type, long flags, void *data) {
if (res >= 0) {
if (dev_name)
POST_WRITE(dev_name,
@@ -826,11 +892,12 @@ PRE_SYSCALL(stat)(const void *filename, void *statbuf) {
POST_SYSCALL(stat)(long res, const void *filename, void *statbuf) {
if (res >= 0) {
- if (statbuf) POST_WRITE(statbuf, struct___old_kernel_stat_sz);
+ if (statbuf)
+ POST_WRITE(statbuf, struct___old_kernel_stat_sz);
}
}
-#if !SANITIZER_ANDROID
+# if !SANITIZER_ANDROID
PRE_SYSCALL(statfs)(const void *path, void *buf) {
if (path)
PRE_READ(path, __sanitizer::internal_strlen((const char *)path) + 1);
@@ -838,26 +905,31 @@ PRE_SYSCALL(statfs)(const void *path, void *buf) {
POST_SYSCALL(statfs)(long res, const void *path, void *buf) {
if (res >= 0) {
- if (buf) POST_WRITE(buf, struct_statfs_sz);
+ if (buf)
+ POST_WRITE(buf, struct_statfs_sz);
}
}
-PRE_SYSCALL(statfs64)(const void *path, long sz, void *buf) {
- if (path)
- PRE_READ(path, __sanitizer::internal_strlen((const char *)path) + 1);
-}
+PRE_SYSCALL(fstatfs)(long fd, void *buf) {}
-POST_SYSCALL(statfs64)(long res, const void *path, long sz, void *buf) {
+POST_SYSCALL(fstatfs)(long res, long fd, void *buf) {
if (res >= 0) {
- if (buf) POST_WRITE(buf, struct_statfs64_sz);
+ if (buf)
+ POST_WRITE(buf, struct_statfs_sz);
}
}
+# endif // !SANITIZER_ANDROID
-PRE_SYSCALL(fstatfs)(long fd, void *buf) {}
+# if SANITIZER_GLIBC
+PRE_SYSCALL(statfs64)(const void *path, long sz, void *buf) {
+ if (path)
+ PRE_READ(path, __sanitizer::internal_strlen((const char *)path) + 1);
+}
-POST_SYSCALL(fstatfs)(long res, long fd, void *buf) {
+POST_SYSCALL(statfs64)(long res, const void *path, long sz, void *buf) {
if (res >= 0) {
- if (buf) POST_WRITE(buf, struct_statfs_sz);
+ if (buf)
+ POST_WRITE(buf, struct_statfs64_sz);
}
}
@@ -865,10 +937,11 @@ PRE_SYSCALL(fstatfs64)(long fd, long sz, void *buf) {}
POST_SYSCALL(fstatfs64)(long res, long fd, long sz, void *buf) {
if (res >= 0) {
- if (buf) POST_WRITE(buf, struct_statfs64_sz);
+ if (buf)
+ POST_WRITE(buf, struct_statfs64_sz);
}
}
-#endif // !SANITIZER_ANDROID
+# endif // SANITIZER_GLIBC
PRE_SYSCALL(lstat)(const void *filename, void *statbuf) {
if (filename)
@@ -878,7 +951,8 @@ PRE_SYSCALL(lstat)(const void *filename, void *statbuf) {
POST_SYSCALL(lstat)(long res, const void *filename, void *statbuf) {
if (res >= 0) {
- if (statbuf) POST_WRITE(statbuf, struct___old_kernel_stat_sz);
+ if (statbuf)
+ POST_WRITE(statbuf, struct___old_kernel_stat_sz);
}
}
@@ -886,7 +960,8 @@ PRE_SYSCALL(fstat)(long fd, void *statbuf) {}
POST_SYSCALL(fstat)(long res, long fd, void *statbuf) {
if (res >= 0) {
- if (statbuf) POST_WRITE(statbuf, struct___old_kernel_stat_sz);
+ if (statbuf)
+ POST_WRITE(statbuf, struct___old_kernel_stat_sz);
}
}
@@ -898,7 +973,8 @@ PRE_SYSCALL(newstat)(const void *filename, void *statbuf) {
POST_SYSCALL(newstat)(long res, const void *filename, void *statbuf) {
if (res >= 0) {
- if (statbuf) POST_WRITE(statbuf, struct_kernel_stat_sz);
+ if (statbuf)
+ POST_WRITE(statbuf, struct_kernel_stat_sz);
}
}
@@ -910,7 +986,8 @@ PRE_SYSCALL(newlstat)(const void *filename, void *statbuf) {
POST_SYSCALL(newlstat)(long res, const void *filename, void *statbuf) {
if (res >= 0) {
- if (statbuf) POST_WRITE(statbuf, struct_kernel_stat_sz);
+ if (statbuf)
+ POST_WRITE(statbuf, struct_kernel_stat_sz);
}
}
@@ -918,19 +995,21 @@ PRE_SYSCALL(newfstat)(long fd, void *statbuf) {}
POST_SYSCALL(newfstat)(long res, long fd, void *statbuf) {
if (res >= 0) {
- if (statbuf) POST_WRITE(statbuf, struct_kernel_stat_sz);
+ if (statbuf)
+ POST_WRITE(statbuf, struct_kernel_stat_sz);
}
}
-#if !SANITIZER_ANDROID
+# if SANITIZER_GLIBC
PRE_SYSCALL(ustat)(long dev, void *ubuf) {}
POST_SYSCALL(ustat)(long res, long dev, void *ubuf) {
if (res >= 0) {
- if (ubuf) POST_WRITE(ubuf, struct_ustat_sz);
+ if (ubuf)
+ POST_WRITE(ubuf, struct_ustat_sz);
}
}
-#endif // !SANITIZER_ANDROID
+# endif // SANITIZER_GLIBC
PRE_SYSCALL(stat64)(const void *filename, void *statbuf) {
if (filename)
@@ -940,7 +1019,8 @@ PRE_SYSCALL(stat64)(const void *filename, void *statbuf) {
POST_SYSCALL(stat64)(long res, const void *filename, void *statbuf) {
if (res >= 0) {
- if (statbuf) POST_WRITE(statbuf, struct_kernel_stat64_sz);
+ if (statbuf)
+ POST_WRITE(statbuf, struct_kernel_stat64_sz);
}
}
@@ -948,7 +1028,8 @@ PRE_SYSCALL(fstat64)(long fd, void *statbuf) {}
POST_SYSCALL(fstat64)(long res, long fd, void *statbuf) {
if (res >= 0) {
- if (statbuf) POST_WRITE(statbuf, struct_kernel_stat64_sz);
+ if (statbuf)
+ POST_WRITE(statbuf, struct_kernel_stat64_sz);
}
}
@@ -960,71 +1041,80 @@ PRE_SYSCALL(lstat64)(const void *filename, void *statbuf) {
POST_SYSCALL(lstat64)(long res, const void *filename, void *statbuf) {
if (res >= 0) {
- if (statbuf) POST_WRITE(statbuf, struct_kernel_stat64_sz);
+ if (statbuf)
+ POST_WRITE(statbuf, struct_kernel_stat64_sz);
}
}
-PRE_SYSCALL(setxattr)(const void *path, const void *name, const void *value,
- long size, long flags) {
+PRE_SYSCALL(setxattr)
+(const void *path, const void *name, const void *value, long size, long flags) {
if (path)
PRE_READ(path, __sanitizer::internal_strlen((const char *)path) + 1);
if (name)
PRE_READ(name, __sanitizer::internal_strlen((const char *)name) + 1);
- if (value) PRE_READ(value, size);
+ if (value)
+ PRE_READ(value, size);
}
-POST_SYSCALL(setxattr)(long res, const void *path, const void *name,
- const void *value, long size, long flags) {}
+POST_SYSCALL(setxattr)
+(long res, const void *path, const void *name, const void *value, long size,
+ long flags) {}
-PRE_SYSCALL(lsetxattr)(const void *path, const void *name, const void *value,
- long size, long flags) {
+PRE_SYSCALL(lsetxattr)
+(const void *path, const void *name, const void *value, long size, long flags) {
if (path)
PRE_READ(path, __sanitizer::internal_strlen((const char *)path) + 1);
if (name)
PRE_READ(name, __sanitizer::internal_strlen((const char *)name) + 1);
- if (value) PRE_READ(value, size);
+ if (value)
+ PRE_READ(value, size);
}
-POST_SYSCALL(lsetxattr)(long res, const void *path, const void *name,
- const void *value, long size, long flags) {}
+POST_SYSCALL(lsetxattr)
+(long res, const void *path, const void *name, const void *value, long size,
+ long flags) {}
-PRE_SYSCALL(fsetxattr)(long fd, const void *name, const void *value, long size,
- long flags) {
+PRE_SYSCALL(fsetxattr)
+(long fd, const void *name, const void *value, long size, long flags) {
if (name)
PRE_READ(name, __sanitizer::internal_strlen((const char *)name) + 1);
- if (value) PRE_READ(value, size);
+ if (value)
+ PRE_READ(value, size);
}
-POST_SYSCALL(fsetxattr)(long res, long fd, const void *name, const void *value,
- long size, long flags) {}
+POST_SYSCALL(fsetxattr)
+(long res, long fd, const void *name, const void *value, long size,
+ long flags) {}
-PRE_SYSCALL(getxattr)(const void *path, const void *name, void *value,
- long size) {
+PRE_SYSCALL(getxattr)
+(const void *path, const void *name, void *value, long size) {
if (path)
PRE_READ(path, __sanitizer::internal_strlen((const char *)path) + 1);
if (name)
PRE_READ(name, __sanitizer::internal_strlen((const char *)name) + 1);
}
-POST_SYSCALL(getxattr)(long res, const void *path, const void *name,
- void *value, long size) {
+POST_SYSCALL(getxattr)
+(long res, const void *path, const void *name, void *value, long size) {
if (size && res > 0) {
- if (value) POST_WRITE(value, res);
+ if (value)
+ POST_WRITE(value, res);
}
}
-PRE_SYSCALL(lgetxattr)(const void *path, const void *name, void *value,
- long size) {
+PRE_SYSCALL(lgetxattr)
+(const void *path, const void *name, void *value, long size) {
if (path)
PRE_READ(path, __sanitizer::internal_strlen((const char *)path) + 1);
if (name)
PRE_READ(name, __sanitizer::internal_strlen((const char *)name) + 1);
}
-POST_SYSCALL(lgetxattr)(long res, const void *path, const void *name,
- void *value, long size) {
+POST_SYSCALL(lgetxattr)
+(long res, const void *path, const void *name, void *value, long size) {
if (size && res > 0) {
- if (value) POST_WRITE(value, res);
+ if (value)
+ POST_WRITE(value, res);
}
}
@@ -1033,10 +1123,11 @@ PRE_SYSCALL(fgetxattr)(long fd, const void *name, void *value, long size) {
PRE_READ(name, __sanitizer::internal_strlen((const char *)name) + 1);
}
-POST_SYSCALL(fgetxattr)(long res, long fd, const void *name, void *value,
- long size) {
+POST_SYSCALL(fgetxattr)
+(long res, long fd, const void *name, void *value, long size) {
if (size && res > 0) {
- if (value) POST_WRITE(value, res);
+ if (value)
+ POST_WRITE(value, res);
}
}
@@ -1047,7 +1138,8 @@ PRE_SYSCALL(listxattr)(const void *path, void *list, long size) {
POST_SYSCALL(listxattr)(long res, const void *path, void *list, long size) {
if (size && res > 0) {
- if (list) POST_WRITE(list, res);
+ if (list)
+ POST_WRITE(list, res);
}
}
@@ -1058,7 +1150,8 @@ PRE_SYSCALL(llistxattr)(const void *path, void *list, long size) {
POST_SYSCALL(llistxattr)(long res, const void *path, void *list, long size) {
if (size && res > 0) {
- if (list) POST_WRITE(list, res);
+ if (list)
+ POST_WRITE(list, res);
}
}
@@ -1066,7 +1159,8 @@ PRE_SYSCALL(flistxattr)(long fd, void *list, long size) {}
POST_SYSCALL(flistxattr)(long res, long fd, void *list, long size) {
if (size && res > 0) {
- if (list) POST_WRITE(list, res);
+ if (list)
+ POST_WRITE(list, res);
}
}
@@ -1103,17 +1197,17 @@ PRE_SYSCALL(mprotect)(long start, long len, long prot) {}
POST_SYSCALL(mprotect)(long res, long start, long len, long prot) {}
-PRE_SYSCALL(mremap)(long addr, long old_len, long new_len, long flags,
- long new_addr) {}
+PRE_SYSCALL(mremap)
+(long addr, long old_len, long new_len, long flags, long new_addr) {}
-POST_SYSCALL(mremap)(long res, long addr, long old_len, long new_len,
- long flags, long new_addr) {}
+POST_SYSCALL(mremap)
+(long res, long addr, long old_len, long new_len, long flags, long new_addr) {}
-PRE_SYSCALL(remap_file_pages)(long start, long size, long prot, long pgoff,
- long flags) {}
+PRE_SYSCALL(remap_file_pages)
+(long start, long size, long prot, long pgoff, long flags) {}
-POST_SYSCALL(remap_file_pages)(long res, long start, long size, long prot,
- long pgoff, long flags) {}
+POST_SYSCALL(remap_file_pages)
+(long res, long start, long size, long prot, long pgoff, long flags) {}
PRE_SYSCALL(msync)(long start, long len, long flags) {}
@@ -1189,7 +1283,8 @@ PRE_SYSCALL(link)(const void *oldname, const void *newname) {
POST_SYSCALL(link)(long res, const void *oldname, const void *newname) {}
PRE_SYSCALL(symlink)(const void *old, const void *new_) {
- if (old) PRE_READ(old, __sanitizer::internal_strlen((const char *)old) + 1);
+ if (old)
+ PRE_READ(old, __sanitizer::internal_strlen((const char *)old) + 1);
if (new_)
PRE_READ(new_, __sanitizer::internal_strlen((const char *)new_) + 1);
}
@@ -1237,14 +1332,16 @@ PRE_SYSCALL(pipe)(void *fildes) {}
POST_SYSCALL(pipe)(long res, void *fildes) {
if (res >= 0)
- if (fildes) POST_WRITE(fildes, sizeof(int) * 2);
+ if (fildes)
+ POST_WRITE(fildes, sizeof(int) * 2);
}
PRE_SYSCALL(pipe2)(void *fildes, long flags) {}
POST_SYSCALL(pipe2)(long res, void *fildes, long flags) {
if (res >= 0)
- if (fildes) POST_WRITE(fildes, sizeof(int) * 2);
+ if (fildes)
+ POST_WRITE(fildes, sizeof(int) * 2);
}
PRE_SYSCALL(dup)(long fildes) {}
@@ -1272,16 +1369,18 @@ PRE_SYSCALL(flock)(long fd, long cmd) {}
POST_SYSCALL(flock)(long res, long fd, long cmd) {}
PRE_SYSCALL(io_setup)(long nr_reqs, void **ctx) {
- if (ctx) PRE_WRITE(ctx, sizeof(*ctx));
+ if (ctx)
+ PRE_WRITE(ctx, sizeof(*ctx));
}
POST_SYSCALL(io_setup)(long res, long nr_reqs, void **ctx) {
- if (res >= 0) {
- if (ctx) POST_WRITE(ctx, sizeof(*ctx));
+ if (res >= 0 && ctx) {
+ POST_WRITE(ctx, sizeof(*ctx));
// (*ctx) is actually a pointer to a kernel mapped page, and there are
// people out there who are crazy enough to peek into that page's 32-byte
// header.
- if (*ctx) POST_WRITE(*ctx, 32);
+ if (*ctx)
+ POST_WRITE(*ctx, 32);
}
}
@@ -1289,16 +1388,21 @@ PRE_SYSCALL(io_destroy)(long ctx) {}
POST_SYSCALL(io_destroy)(long res, long ctx) {}
-PRE_SYSCALL(io_getevents)(long ctx_id, long min_nr, long nr,
- __sanitizer_io_event *ioevpp, void *timeout) {
- if (timeout) PRE_READ(timeout, struct_timespec_sz);
+PRE_SYSCALL(io_getevents)
+(long ctx_id, long min_nr, long nr, __sanitizer_io_event *ioevpp,
+ void *timeout) {
+ if (timeout)
+ PRE_READ(timeout, struct_timespec_sz);
}
-POST_SYSCALL(io_getevents)(long res, long ctx_id, long min_nr, long nr,
- __sanitizer_io_event *ioevpp, void *timeout) {
+POST_SYSCALL(io_getevents)
+(long res, long ctx_id, long min_nr, long nr, __sanitizer_io_event *ioevpp,
+ void *timeout) {
if (res >= 0) {
- if (ioevpp) POST_WRITE(ioevpp, res * sizeof(*ioevpp));
- if (timeout) POST_WRITE(timeout, struct_timespec_sz);
+ if (ioevpp)
+ POST_WRITE(ioevpp, res * sizeof(*ioevpp));
+ if (timeout)
+ POST_WRITE(timeout, struct_timespec_sz);
}
for (long i = 0; i < res; i++) {
// We synchronize io_submit -> io_getevents/io_cancel using the
@@ -1308,26 +1412,26 @@ POST_SYSCALL(io_getevents)(long res, long ctx_id, long min_nr, long nr,
// synchronize on 0. But there does not seem to be a better solution
// (except wrapping all operations in own context, which is unreliable).
// We can not reliably extract fildes in io_getevents.
- COMMON_SYSCALL_ACQUIRE((void*)ioevpp[i].data);
+ COMMON_SYSCALL_ACQUIRE((void *)ioevpp[i].data);
}
}
PRE_SYSCALL(io_submit)(long ctx_id, long nr, __sanitizer_iocb **iocbpp) {
for (long i = 0; i < nr; ++i) {
uptr op = iocbpp[i]->aio_lio_opcode;
- void *data = (void*)iocbpp[i]->aio_data;
- void *buf = (void*)iocbpp[i]->aio_buf;
+ void *data = (void *)iocbpp[i]->aio_data;
+ void *buf = (void *)iocbpp[i]->aio_buf;
uptr len = (uptr)iocbpp[i]->aio_nbytes;
if (op == iocb_cmd_pwrite && buf && len) {
PRE_READ(buf, len);
} else if (op == iocb_cmd_pread && buf && len) {
POST_WRITE(buf, len);
} else if (op == iocb_cmd_pwritev) {
- __sanitizer_iovec *iovec = (__sanitizer_iovec*)buf;
+ __sanitizer_iovec *iovec = (__sanitizer_iovec *)buf;
for (uptr v = 0; v < len; v++)
PRE_READ(iovec[v].iov_base, iovec[v].iov_len);
} else if (op == iocb_cmd_preadv) {
- __sanitizer_iovec *iovec = (__sanitizer_iovec*)buf;
+ __sanitizer_iovec *iovec = (__sanitizer_iovec *)buf;
for (uptr v = 0; v < len; v++)
POST_WRITE(iovec[v].iov_base, iovec[v].iov_len);
}
@@ -1336,19 +1440,18 @@ PRE_SYSCALL(io_submit)(long ctx_id, long nr, __sanitizer_iocb **iocbpp) {
}
}
-POST_SYSCALL(io_submit)(long res, long ctx_id, long nr,
- __sanitizer_iocb **iocbpp) {}
+POST_SYSCALL(io_submit)
+(long res, long ctx_id, long nr, __sanitizer_iocb **iocbpp) {}
-PRE_SYSCALL(io_cancel)(long ctx_id, __sanitizer_iocb *iocb,
- __sanitizer_io_event *result) {
-}
+PRE_SYSCALL(io_cancel)
+(long ctx_id, __sanitizer_iocb *iocb, __sanitizer_io_event *result) {}
-POST_SYSCALL(io_cancel)(long res, long ctx_id, __sanitizer_iocb *iocb,
- __sanitizer_io_event *result) {
+POST_SYSCALL(io_cancel)
+(long res, long ctx_id, __sanitizer_iocb *iocb, __sanitizer_io_event *result) {
if (res == 0) {
if (result) {
// See comment in io_getevents.
- COMMON_SYSCALL_ACQUIRE((void*)result->data);
+ COMMON_SYSCALL_ACQUIRE((void *)result->data);
POST_WRITE(result, sizeof(*result));
}
if (iocb)
@@ -1358,19 +1461,23 @@ POST_SYSCALL(io_cancel)(long res, long ctx_id, __sanitizer_iocb *iocb,
PRE_SYSCALL(sendfile)(long out_fd, long in_fd, void *offset, long count) {}
-POST_SYSCALL(sendfile)(long res, long out_fd, long in_fd,
- __sanitizer___kernel_off_t *offset, long count) {
+POST_SYSCALL(sendfile)
+(long res, long out_fd, long in_fd, __sanitizer___kernel_off_t *offset,
+ long count) {
if (res >= 0) {
- if (offset) POST_WRITE(offset, sizeof(*offset));
+ if (offset)
+ POST_WRITE(offset, sizeof(*offset));
}
}
PRE_SYSCALL(sendfile64)(long out_fd, long in_fd, void *offset, long count) {}
-POST_SYSCALL(sendfile64)(long res, long out_fd, long in_fd,
- __sanitizer___kernel_loff_t *offset, long count) {
+POST_SYSCALL(sendfile64)
+(long res, long out_fd, long in_fd, __sanitizer___kernel_loff_t *offset,
+ long count) {
if (res >= 0) {
- if (offset) POST_WRITE(offset, sizeof(*offset));
+ if (offset)
+ POST_WRITE(offset, sizeof(*offset));
}
}
@@ -1402,9 +1509,7 @@ PRE_SYSCALL(open)(const void *filename, long flags, long mode) {
POST_SYSCALL(open)(long res, const void *filename, long flags, long mode) {}
-PRE_SYSCALL(close)(long fd) {
- COMMON_SYSCALL_FD_CLOSE((int)fd);
-}
+PRE_SYSCALL(close)(long fd) { COMMON_SYSCALL_FD_CLOSE((int)fd); }
POST_SYSCALL(close)(long res, long fd) {}
@@ -1440,7 +1545,7 @@ PRE_SYSCALL(fchown)(long fd, long user, long group) {}
POST_SYSCALL(fchown)(long res, long fd, long user, long group) {}
-#if SANITIZER_USES_UID16_SYSCALLS
+# if SANITIZER_USES_UID16_SYSCALLS
PRE_SYSCALL(chown16)(const void *filename, long user, long group) {
if (filename)
PRE_READ(filename,
@@ -1483,13 +1588,16 @@ POST_SYSCALL(setresuid16)(long res, long ruid, long euid, long suid) {}
PRE_SYSCALL(getresuid16)(void *ruid, void *euid, void *suid) {}
-POST_SYSCALL(getresuid16)(long res, __sanitizer___kernel_old_uid_t *ruid,
- __sanitizer___kernel_old_uid_t *euid,
- __sanitizer___kernel_old_uid_t *suid) {
+POST_SYSCALL(getresuid16)
+(long res, __sanitizer___kernel_old_uid_t *ruid,
+ __sanitizer___kernel_old_uid_t *euid, __sanitizer___kernel_old_uid_t *suid) {
if (res >= 0) {
- if (ruid) POST_WRITE(ruid, sizeof(*ruid));
- if (euid) POST_WRITE(euid, sizeof(*euid));
- if (suid) POST_WRITE(suid, sizeof(*suid));
+ if (ruid)
+ POST_WRITE(ruid, sizeof(*ruid));
+ if (euid)
+ POST_WRITE(euid, sizeof(*euid));
+ if (suid)
+ POST_WRITE(suid, sizeof(*suid));
}
}
@@ -1499,13 +1607,16 @@ POST_SYSCALL(setresgid16)(long res, long rgid, long egid, long sgid) {}
PRE_SYSCALL(getresgid16)(void *rgid, void *egid, void *sgid) {}
-POST_SYSCALL(getresgid16)(long res, __sanitizer___kernel_old_gid_t *rgid,
- __sanitizer___kernel_old_gid_t *egid,
- __sanitizer___kernel_old_gid_t *sgid) {
+POST_SYSCALL(getresgid16)
+(long res, __sanitizer___kernel_old_gid_t *rgid,
+ __sanitizer___kernel_old_gid_t *egid, __sanitizer___kernel_old_gid_t *sgid) {
if (res >= 0) {
- if (rgid) POST_WRITE(rgid, sizeof(*rgid));
- if (egid) POST_WRITE(egid, sizeof(*egid));
- if (sgid) POST_WRITE(sgid, sizeof(*sgid));
+ if (rgid)
+ POST_WRITE(rgid, sizeof(*rgid));
+ if (egid)
+ POST_WRITE(egid, sizeof(*egid));
+ if (sgid)
+ POST_WRITE(sgid, sizeof(*sgid));
}
}
@@ -1517,23 +1628,25 @@ PRE_SYSCALL(setfsgid16)(long gid) {}
POST_SYSCALL(setfsgid16)(long res, long gid) {}
-PRE_SYSCALL(getgroups16)(long gidsetsize,
- __sanitizer___kernel_old_gid_t *grouplist) {}
+PRE_SYSCALL(getgroups16)
+(long gidsetsize, __sanitizer___kernel_old_gid_t *grouplist) {}
-POST_SYSCALL(getgroups16)(long res, long gidsetsize,
- __sanitizer___kernel_old_gid_t *grouplist) {
+POST_SYSCALL(getgroups16)
+(long res, long gidsetsize, __sanitizer___kernel_old_gid_t *grouplist) {
if (res >= 0) {
- if (grouplist) POST_WRITE(grouplist, res * sizeof(*grouplist));
+ if (grouplist)
+ POST_WRITE(grouplist, res * sizeof(*grouplist));
}
}
-PRE_SYSCALL(setgroups16)(long gidsetsize,
- __sanitizer___kernel_old_gid_t *grouplist) {
- if (grouplist) POST_WRITE(grouplist, gidsetsize * sizeof(*grouplist));
+PRE_SYSCALL(setgroups16)
+(long gidsetsize, __sanitizer___kernel_old_gid_t *grouplist) {
+ if (grouplist)
+ POST_WRITE(grouplist, gidsetsize * sizeof(*grouplist));
}
-POST_SYSCALL(setgroups16)(long res, long gidsetsize,
- __sanitizer___kernel_old_gid_t *grouplist) {}
+POST_SYSCALL(setgroups16)
+(long res, long gidsetsize, __sanitizer___kernel_old_gid_t *grouplist) {}
PRE_SYSCALL(getuid16)() {}
@@ -1550,7 +1663,7 @@ POST_SYSCALL(getgid16)(long res) {}
PRE_SYSCALL(getegid16)() {}
POST_SYSCALL(getegid16)(long res) {}
-#endif // SANITIZER_USES_UID16_SYSCALLS
+# endif // SANITIZER_USES_UID16_SYSCALLS
PRE_SYSCALL(utime)(void *filename, void *times) {}
@@ -1559,7 +1672,8 @@ POST_SYSCALL(utime)(long res, void *filename, void *times) {
if (filename)
POST_WRITE(filename,
__sanitizer::internal_strlen((const char *)filename) + 1);
- if (times) POST_WRITE(times, struct_utimbuf_sz);
+ if (times)
+ POST_WRITE(times, struct_utimbuf_sz);
}
}
@@ -1570,7 +1684,8 @@ POST_SYSCALL(utimes)(long res, void *filename, void *utimes) {
if (filename)
POST_WRITE(filename,
__sanitizer::internal_strlen((const char *)filename) + 1);
- if (utimes) POST_WRITE(utimes, timeval_sz);
+ if (utimes)
+ POST_WRITE(utimes, timeval_sz);
}
}
@@ -1578,91 +1693,104 @@ PRE_SYSCALL(lseek)(long fd, long offset, long origin) {}
POST_SYSCALL(lseek)(long res, long fd, long offset, long origin) {}
-PRE_SYSCALL(llseek)(long fd, long offset_high, long offset_low, void *result,
- long origin) {}
+PRE_SYSCALL(llseek)
+(long fd, long offset_high, long offset_low, void *result, long origin) {}
-POST_SYSCALL(llseek)(long res, long fd, long offset_high, long offset_low,
- void *result, long origin) {
+POST_SYSCALL(llseek)
+(long res, long fd, long offset_high, long offset_low, void *result,
+ long origin) {
if (res >= 0) {
- if (result) POST_WRITE(result, sizeof(long long));
+ if (result)
+ POST_WRITE(result, sizeof(long long));
}
}
PRE_SYSCALL(readv)(long fd, const __sanitizer_iovec *vec, long vlen) {}
-POST_SYSCALL(readv)(long res, long fd, const __sanitizer_iovec *vec,
- long vlen) {
+POST_SYSCALL(readv)
+(long res, long fd, const __sanitizer_iovec *vec, long vlen) {
if (res >= 0) {
- if (vec) kernel_write_iovec(vec, vlen, res);
+ if (vec)
+ kernel_write_iovec(vec, vlen, res);
}
}
PRE_SYSCALL(write)(long fd, const void *buf, long count) {
- if (buf) PRE_READ(buf, count);
+ if (buf)
+ PRE_READ(buf, count);
}
POST_SYSCALL(write)(long res, long fd, const void *buf, long count) {}
PRE_SYSCALL(writev)(long fd, const __sanitizer_iovec *vec, long vlen) {}
-POST_SYSCALL(writev)(long res, long fd, const __sanitizer_iovec *vec,
- long vlen) {
+POST_SYSCALL(writev)
+(long res, long fd, const __sanitizer_iovec *vec, long vlen) {
if (res >= 0) {
- if (vec) kernel_read_iovec(vec, vlen, res);
+ if (vec)
+ kernel_read_iovec(vec, vlen, res);
}
}
-#ifdef _LP64
+# ifdef _LP64
PRE_SYSCALL(pread64)(long fd, void *buf, long count, long pos) {}
POST_SYSCALL(pread64)(long res, long fd, void *buf, long count, long pos) {
if (res >= 0) {
- if (buf) POST_WRITE(buf, res);
+ if (buf)
+ POST_WRITE(buf, res);
}
}
PRE_SYSCALL(pwrite64)(long fd, const void *buf, long count, long pos) {
- if (buf) PRE_READ(buf, count);
+ if (buf)
+ PRE_READ(buf, count);
}
-POST_SYSCALL(pwrite64)(long res, long fd, const void *buf, long count,
- long pos) {}
-#else
+POST_SYSCALL(pwrite64)
+(long res, long fd, const void *buf, long count, long pos) {}
+# else
PRE_SYSCALL(pread64)(long fd, void *buf, long count, long pos0, long pos1) {}
-POST_SYSCALL(pread64)(long res, long fd, void *buf, long count, long pos0,
- long pos1) {
+POST_SYSCALL(pread64)
+(long res, long fd, void *buf, long count, long pos0, long pos1) {
if (res >= 0) {
- if (buf) POST_WRITE(buf, res);
+ if (buf)
+ POST_WRITE(buf, res);
}
}
-PRE_SYSCALL(pwrite64)(long fd, const void *buf, long count, long pos0,
- long pos1) {
- if (buf) PRE_READ(buf, count);
+PRE_SYSCALL(pwrite64)
+(long fd, const void *buf, long count, long pos0, long pos1) {
+ if (buf)
+ PRE_READ(buf, count);
}
-POST_SYSCALL(pwrite64)(long res, long fd, const void *buf, long count,
- long pos0, long pos1) {}
-#endif
+POST_SYSCALL(pwrite64)
+(long res, long fd, const void *buf, long count, long pos0, long pos1) {}
+# endif
-PRE_SYSCALL(preadv)(long fd, const __sanitizer_iovec *vec, long vlen,
- long pos_l, long pos_h) {}
+PRE_SYSCALL(preadv)
+(long fd, const __sanitizer_iovec *vec, long vlen, long pos_l, long pos_h) {}
-POST_SYSCALL(preadv)(long res, long fd, const __sanitizer_iovec *vec, long vlen,
- long pos_l, long pos_h) {
+POST_SYSCALL(preadv)
+(long res, long fd, const __sanitizer_iovec *vec, long vlen, long pos_l,
+ long pos_h) {
if (res >= 0) {
- if (vec) kernel_write_iovec(vec, vlen, res);
+ if (vec)
+ kernel_write_iovec(vec, vlen, res);
}
}
-PRE_SYSCALL(pwritev)(long fd, const __sanitizer_iovec *vec, long vlen,
- long pos_l, long pos_h) {}
+PRE_SYSCALL(pwritev)
+(long fd, const __sanitizer_iovec *vec, long vlen, long pos_l, long pos_h) {}
-POST_SYSCALL(pwritev)(long res, long fd, const __sanitizer_iovec *vec,
- long vlen, long pos_l, long pos_h) {
+POST_SYSCALL(pwritev)
+(long res, long fd, const __sanitizer_iovec *vec, long vlen, long pos_l,
+ long pos_h) {
if (res >= 0) {
- if (vec) kernel_read_iovec(vec, vlen, res);
+ if (vec)
+ kernel_read_iovec(vec, vlen, res);
}
}
@@ -1717,14 +1845,15 @@ PRE_SYSCALL(quotactl)(long cmd, const void *special, long id, void *addr) {
PRE_READ(special, __sanitizer::internal_strlen((const char *)special) + 1);
}
-POST_SYSCALL(quotactl)(long res, long cmd, const void *special, long id,
- void *addr) {}
+POST_SYSCALL(quotactl)
+(long res, long cmd, const void *special, long id, void *addr) {}
PRE_SYSCALL(getdents)(long fd, void *dirent, long count) {}
POST_SYSCALL(getdents)(long res, long fd, void *dirent, long count) {
if (res >= 0) {
- if (dirent) POST_WRITE(dirent, res);
+ if (dirent)
+ POST_WRITE(dirent, res);
}
}
@@ -1732,15 +1861,16 @@ PRE_SYSCALL(getdents64)(long fd, void *dirent, long count) {}
POST_SYSCALL(getdents64)(long res, long fd, void *dirent, long count) {
if (res >= 0) {
- if (dirent) POST_WRITE(dirent, res);
+ if (dirent)
+ POST_WRITE(dirent, res);
}
}
-PRE_SYSCALL(setsockopt)(long fd, long level, long optname, void *optval,
- long optlen) {}
+PRE_SYSCALL(setsockopt)
+(long fd, long level, long optname, void *optval, long optlen) {}
-POST_SYSCALL(setsockopt)(long res, long fd, long level, long optname,
- void *optval, long optlen) {
+POST_SYSCALL(setsockopt)
+(long res, long fd, long level, long optname, void *optval, long optlen) {
if (res >= 0) {
if (optval)
POST_WRITE(optval,
@@ -1748,77 +1878,88 @@ POST_SYSCALL(setsockopt)(long res, long fd, long level, long optname,
}
}
-PRE_SYSCALL(getsockopt)(long fd, long level, long optname, void *optval,
- void *optlen) {}
+PRE_SYSCALL(getsockopt)
+(long fd, long level, long optname, void *optval, void *optlen) {}
-POST_SYSCALL(getsockopt)(long res, long fd, long level, long optname,
- void *optval, void *optlen) {
+POST_SYSCALL(getsockopt)
+(long res, long fd, long level, long optname, void *optval, void *optlen) {
if (res >= 0) {
if (optval)
POST_WRITE(optval,
__sanitizer::internal_strlen((const char *)optval) + 1);
- if (optlen) POST_WRITE(optlen, sizeof(int));
+ if (optlen)
+ POST_WRITE(optlen, sizeof(int));
}
}
PRE_SYSCALL(bind)(long arg0, sanitizer_kernel_sockaddr *arg1, long arg2) {}
-POST_SYSCALL(bind)(long res, long arg0, sanitizer_kernel_sockaddr *arg1,
- long arg2) {
+POST_SYSCALL(bind)
+(long res, long arg0, sanitizer_kernel_sockaddr *arg1, long arg2) {
if (res >= 0) {
- if (arg1) POST_WRITE(arg1, sizeof(*arg1));
+ if (arg1)
+ POST_WRITE(arg1, sizeof(*arg1));
}
}
PRE_SYSCALL(connect)(long arg0, sanitizer_kernel_sockaddr *arg1, long arg2) {}
-POST_SYSCALL(connect)(long res, long arg0, sanitizer_kernel_sockaddr *arg1,
- long arg2) {
+POST_SYSCALL(connect)
+(long res, long arg0, sanitizer_kernel_sockaddr *arg1, long arg2) {
if (res >= 0) {
- if (arg1) POST_WRITE(arg1, sizeof(*arg1));
+ if (arg1)
+ POST_WRITE(arg1, sizeof(*arg1));
}
}
PRE_SYSCALL(accept)(long arg0, sanitizer_kernel_sockaddr *arg1, void *arg2) {}
-POST_SYSCALL(accept)(long res, long arg0, sanitizer_kernel_sockaddr *arg1,
- void *arg2) {
+POST_SYSCALL(accept)
+(long res, long arg0, sanitizer_kernel_sockaddr *arg1, void *arg2) {
if (res >= 0) {
- if (arg1) POST_WRITE(arg1, sizeof(*arg1));
- if (arg2) POST_WRITE(arg2, sizeof(unsigned));
+ if (arg1)
+ POST_WRITE(arg1, sizeof(*arg1));
+ if (arg2)
+ POST_WRITE(arg2, sizeof(unsigned));
}
}
-PRE_SYSCALL(accept4)(long arg0, sanitizer_kernel_sockaddr *arg1, void *arg2,
- long arg3) {}
+PRE_SYSCALL(accept4)
+(long arg0, sanitizer_kernel_sockaddr *arg1, void *arg2, long arg3) {}
-POST_SYSCALL(accept4)(long res, long arg0, sanitizer_kernel_sockaddr *arg1,
- void *arg2, long arg3) {
+POST_SYSCALL(accept4)
+(long res, long arg0, sanitizer_kernel_sockaddr *arg1, void *arg2, long arg3) {
if (res >= 0) {
- if (arg1) POST_WRITE(arg1, sizeof(*arg1));
- if (arg2) POST_WRITE(arg2, sizeof(unsigned));
+ if (arg1)
+ POST_WRITE(arg1, sizeof(*arg1));
+ if (arg2)
+ POST_WRITE(arg2, sizeof(unsigned));
}
}
-PRE_SYSCALL(getsockname)(long arg0, sanitizer_kernel_sockaddr *arg1,
- void *arg2) {}
+PRE_SYSCALL(getsockname)
+(long arg0, sanitizer_kernel_sockaddr *arg1, void *arg2) {}
-POST_SYSCALL(getsockname)(long res, long arg0, sanitizer_kernel_sockaddr *arg1,
- void *arg2) {
+POST_SYSCALL(getsockname)
+(long res, long arg0, sanitizer_kernel_sockaddr *arg1, void *arg2) {
if (res >= 0) {
- if (arg1) POST_WRITE(arg1, sizeof(*arg1));
- if (arg2) POST_WRITE(arg2, sizeof(unsigned));
+ if (arg1)
+ POST_WRITE(arg1, sizeof(*arg1));
+ if (arg2)
+ POST_WRITE(arg2, sizeof(unsigned));
}
}
-PRE_SYSCALL(getpeername)(long arg0, sanitizer_kernel_sockaddr *arg1,
- void *arg2) {}
+PRE_SYSCALL(getpeername)
+(long arg0, sanitizer_kernel_sockaddr *arg1, void *arg2) {}
-POST_SYSCALL(getpeername)(long res, long arg0, sanitizer_kernel_sockaddr *arg1,
- void *arg2) {
+POST_SYSCALL(getpeername)
+(long res, long arg0, sanitizer_kernel_sockaddr *arg1, void *arg2) {
if (res >= 0) {
- if (arg1) POST_WRITE(arg1, sizeof(*arg1));
- if (arg2) POST_WRITE(arg2, sizeof(unsigned));
+ if (arg1)
+ POST_WRITE(arg1, sizeof(*arg1));
+ if (arg2)
+ POST_WRITE(arg2, sizeof(unsigned));
}
}
@@ -1826,18 +1967,23 @@ PRE_SYSCALL(send)(long arg0, void *arg1, long arg2, long arg3) {}
POST_SYSCALL(send)(long res, long arg0, void *arg1, long arg2, long arg3) {
if (res) {
- if (arg1) POST_READ(arg1, res);
+ if (arg1)
+ POST_READ(arg1, res);
}
}
-PRE_SYSCALL(sendto)(long arg0, void *arg1, long arg2, long arg3,
- sanitizer_kernel_sockaddr *arg4, long arg5) {}
+PRE_SYSCALL(sendto)
+(long arg0, void *arg1, long arg2, long arg3, sanitizer_kernel_sockaddr *arg4,
+ long arg5) {}
-POST_SYSCALL(sendto)(long res, long arg0, void *arg1, long arg2, long arg3,
- sanitizer_kernel_sockaddr *arg4, long arg5) {
+POST_SYSCALL(sendto)
+(long res, long arg0, void *arg1, long arg2, long arg3,
+ sanitizer_kernel_sockaddr *arg4, long arg5) {
if (res >= 0) {
- if (arg1) POST_READ(arg1, res);
- if (arg4) POST_WRITE(arg4, sizeof(*arg4));
+ if (arg1)
+ POST_READ(arg1, res);
+ if (arg4)
+ POST_WRITE(arg4, sizeof(*arg4));
}
}
@@ -1857,19 +2003,25 @@ PRE_SYSCALL(recv)(long arg0, void *buf, long len, long flags) {}
POST_SYSCALL(recv)(long res, void *buf, long len, long flags) {
if (res >= 0) {
- if (buf) POST_WRITE(buf, res);
+ if (buf)
+ POST_WRITE(buf, res);
}
}
-PRE_SYSCALL(recvfrom)(long arg0, void *buf, long len, long flags,
- sanitizer_kernel_sockaddr *arg4, void *arg5) {}
+PRE_SYSCALL(recvfrom)
+(long arg0, void *buf, long len, long flags, sanitizer_kernel_sockaddr *arg4,
+ void *arg5) {}
-POST_SYSCALL(recvfrom)(long res, long arg0, void *buf, long len, long flags,
- sanitizer_kernel_sockaddr *arg4, void *arg5) {
+POST_SYSCALL(recvfrom)
+(long res, long arg0, void *buf, long len, long flags,
+ sanitizer_kernel_sockaddr *arg4, void *arg5) {
if (res >= 0) {
- if (buf) POST_WRITE(buf, res);
- if (arg4) POST_WRITE(arg4, sizeof(*arg4));
- if (arg5) POST_WRITE(arg5, sizeof(int));
+ if (buf)
+ POST_WRITE(buf, res);
+ if (arg4)
+ POST_WRITE(arg4, sizeof(*arg4));
+ if (arg5)
+ POST_WRITE(arg5, sizeof(int));
}
}
@@ -1881,14 +2033,16 @@ PRE_SYSCALL(socketpair)(long arg0, long arg1, long arg2, int *sv) {}
POST_SYSCALL(socketpair)(long res, long arg0, long arg1, long arg2, int *sv) {
if (res >= 0)
- if (sv) POST_WRITE(sv, sizeof(int) * 2);
+ if (sv)
+ POST_WRITE(sv, sizeof(int) * 2);
}
PRE_SYSCALL(socketcall)(long call, void *args) {}
POST_SYSCALL(socketcall)(long res, long call, void *args) {
if (res >= 0) {
- if (args) POST_WRITE(args, sizeof(long));
+ if (args)
+ POST_WRITE(args, sizeof(long));
}
}
@@ -1898,25 +2052,31 @@ POST_SYSCALL(listen)(long res, long arg0, long arg1) {}
PRE_SYSCALL(poll)(void *ufds, long nfds, long timeout) {}
-POST_SYSCALL(poll)(long res, __sanitizer_pollfd *ufds, long nfds,
- long timeout) {
+POST_SYSCALL(poll)
+(long res, __sanitizer_pollfd *ufds, long nfds, long timeout) {
if (res >= 0) {
- if (ufds) POST_WRITE(ufds, nfds * sizeof(*ufds));
+ if (ufds)
+ POST_WRITE(ufds, nfds * sizeof(*ufds));
}
}
-PRE_SYSCALL(select)(long n, __sanitizer___kernel_fd_set *inp,
- __sanitizer___kernel_fd_set *outp,
- __sanitizer___kernel_fd_set *exp, void *tvp) {}
+PRE_SYSCALL(select)
+(long n, __sanitizer___kernel_fd_set *inp, __sanitizer___kernel_fd_set *outp,
+ __sanitizer___kernel_fd_set *exp, void *tvp) {}
-POST_SYSCALL(select)(long res, long n, __sanitizer___kernel_fd_set *inp,
- __sanitizer___kernel_fd_set *outp,
- __sanitizer___kernel_fd_set *exp, void *tvp) {
+POST_SYSCALL(select)
+(long res, long n, __sanitizer___kernel_fd_set *inp,
+ __sanitizer___kernel_fd_set *outp, __sanitizer___kernel_fd_set *exp,
+ void *tvp) {
if (res >= 0) {
- if (inp) POST_WRITE(inp, sizeof(*inp));
- if (outp) POST_WRITE(outp, sizeof(*outp));
- if (exp) POST_WRITE(exp, sizeof(*exp));
- if (tvp) POST_WRITE(tvp, timeval_sz);
+ if (inp)
+ POST_WRITE(inp, sizeof(*inp));
+ if (outp)
+ POST_WRITE(outp, sizeof(*outp));
+ if (exp)
+ POST_WRITE(exp, sizeof(*exp));
+ if (tvp)
+ POST_WRITE(tvp, timeval_sz);
}
}
@@ -1936,29 +2096,58 @@ PRE_SYSCALL(epoll_ctl)(long epfd, long op, long fd, void *event) {}
POST_SYSCALL(epoll_ctl)(long res, long epfd, long op, long fd, void *event) {
if (res >= 0) {
- if (event) POST_WRITE(event, struct_epoll_event_sz);
+ if (event)
+ POST_WRITE(event, struct_epoll_event_sz);
+ }
+}
+
+PRE_SYSCALL(epoll_wait)
+(long epfd, void *events, long maxevents, long timeout) {}
+
+POST_SYSCALL(epoll_wait)
+(long res, long epfd, void *events, long maxevents, long timeout) {
+ if (res >= 0) {
+ COMMON_SYSCALL_FD_ACQUIRE(epfd);
+ if (events)
+ POST_WRITE(events, res * struct_epoll_event_sz);
}
}
-PRE_SYSCALL(epoll_wait)(long epfd, void *events, long maxevents, long timeout) {
+PRE_SYSCALL(epoll_pwait)
+(long epfd, void *events, long maxevents, long timeout,
+ const kernel_sigset_t *sigmask, long sigsetsize) {
+ if (sigmask)
+ PRE_READ(sigmask, sigsetsize);
}
-POST_SYSCALL(epoll_wait)(long res, long epfd, void *events, long maxevents,
- long timeout) {
+POST_SYSCALL(epoll_pwait)
+(long res, long epfd, void *events, long maxevents, long timeout,
+ const void *sigmask, long sigsetsize) {
if (res >= 0) {
- if (events) POST_WRITE(events, struct_epoll_event_sz);
+ COMMON_SYSCALL_FD_ACQUIRE(epfd);
+ if (events)
+ POST_WRITE(events, res * struct_epoll_event_sz);
}
}
-PRE_SYSCALL(epoll_pwait)(long epfd, void *events, long maxevents, long timeout,
- const kernel_sigset_t *sigmask, long sigsetsize) {
- if (sigmask) PRE_READ(sigmask, sigsetsize);
+PRE_SYSCALL(epoll_pwait2)
+(long epfd, void *events, long maxevents,
+ const sanitizer_kernel_timespec *timeout, const kernel_sigset_t *sigmask,
+ long sigsetsize) {
+ if (timeout)
+ PRE_READ(timeout, sizeof(*timeout));
+ if (sigmask)
+ PRE_READ(sigmask, sigsetsize);
}
-POST_SYSCALL(epoll_pwait)(long res, long epfd, void *events, long maxevents,
- long timeout, const void *sigmask, long sigsetsize) {
+POST_SYSCALL(epoll_pwait2)
+(long res, long epfd, void *events, long maxevents,
+ const sanitizer_kernel_timespec *timeout, const void *sigmask,
+ long sigsetsize) {
if (res >= 0) {
- if (events) POST_WRITE(events, struct_epoll_event_sz);
+ COMMON_SYSCALL_FD_ACQUIRE(epfd);
+ if (events)
+ POST_WRITE(events, res * struct_epoll_event_sz);
}
}
@@ -1993,7 +2182,8 @@ PRE_SYSCALL(newuname)(void *name) {}
POST_SYSCALL(newuname)(long res, void *name) {
if (res >= 0) {
- if (name) POST_WRITE(name, struct_new_utsname_sz);
+ if (name)
+ POST_WRITE(name, struct_new_utsname_sz);
}
}
@@ -2001,7 +2191,8 @@ PRE_SYSCALL(uname)(void *arg0) {}
POST_SYSCALL(uname)(long res, void *arg0) {
if (res >= 0) {
- if (arg0) POST_WRITE(arg0, struct_old_utsname_sz);
+ if (arg0)
+ POST_WRITE(arg0, struct_old_utsname_sz);
}
}
@@ -2009,7 +2200,8 @@ PRE_SYSCALL(olduname)(void *arg0) {}
POST_SYSCALL(olduname)(long res, void *arg0) {
if (res >= 0) {
- if (arg0) POST_WRITE(arg0, struct_oldold_utsname_sz);
+ if (arg0)
+ POST_WRITE(arg0, struct_oldold_utsname_sz);
}
}
@@ -2017,7 +2209,8 @@ PRE_SYSCALL(getrlimit)(long resource, void *rlim) {}
POST_SYSCALL(getrlimit)(long res, long resource, void *rlim) {
if (res >= 0) {
- if (rlim) POST_WRITE(rlim, struct_rlimit_sz);
+ if (rlim)
+ POST_WRITE(rlim, struct_rlimit_sz);
}
}
@@ -2025,7 +2218,8 @@ PRE_SYSCALL(old_getrlimit)(long resource, void *rlim) {}
POST_SYSCALL(old_getrlimit)(long res, long resource, void *rlim) {
if (res >= 0) {
- if (rlim) POST_WRITE(rlim, struct_rlimit_sz);
+ if (rlim)
+ POST_WRITE(rlim, struct_rlimit_sz);
}
}
@@ -2033,29 +2227,33 @@ PRE_SYSCALL(setrlimit)(long resource, void *rlim) {}
POST_SYSCALL(setrlimit)(long res, long resource, void *rlim) {
if (res >= 0) {
- if (rlim) POST_WRITE(rlim, struct_rlimit_sz);
+ if (rlim)
+ POST_WRITE(rlim, struct_rlimit_sz);
}
}
-#if !SANITIZER_ANDROID
-PRE_SYSCALL(prlimit64)(long pid, long resource, const void *new_rlim,
- void *old_rlim) {
- if (new_rlim) PRE_READ(new_rlim, struct_rlimit64_sz);
+# if SANITIZER_GLIBC
+PRE_SYSCALL(prlimit64)
+(long pid, long resource, const void *new_rlim, void *old_rlim) {
+ if (new_rlim)
+ PRE_READ(new_rlim, struct_rlimit64_sz);
}
-POST_SYSCALL(prlimit64)(long res, long pid, long resource, const void *new_rlim,
- void *old_rlim) {
+POST_SYSCALL(prlimit64)
+(long res, long pid, long resource, const void *new_rlim, void *old_rlim) {
if (res >= 0) {
- if (old_rlim) POST_WRITE(old_rlim, struct_rlimit64_sz);
+ if (old_rlim)
+ POST_WRITE(old_rlim, struct_rlimit64_sz);
}
}
-#endif
+# endif
PRE_SYSCALL(getrusage)(long who, void *ru) {}
POST_SYSCALL(getrusage)(long res, long who, void *ru) {
if (res >= 0) {
- if (ru) POST_WRITE(ru, struct_rusage_sz);
+ if (ru)
+ POST_WRITE(ru, struct_rusage_sz);
}
}
@@ -2068,31 +2266,34 @@ PRE_SYSCALL(msgget)(long key, long msgflg) {}
POST_SYSCALL(msgget)(long res, long key, long msgflg) {}
PRE_SYSCALL(msgsnd)(long msqid, void *msgp, long msgsz, long msgflg) {
- if (msgp) PRE_READ(msgp, msgsz);
+ if (msgp)
+ PRE_READ(msgp, msgsz);
}
-POST_SYSCALL(msgsnd)(long res, long msqid, void *msgp, long msgsz,
- long msgflg) {}
+POST_SYSCALL(msgsnd)
+(long res, long msqid, void *msgp, long msgsz, long msgflg) {}
-PRE_SYSCALL(msgrcv)(long msqid, void *msgp, long msgsz, long msgtyp,
- long msgflg) {}
+PRE_SYSCALL(msgrcv)
+(long msqid, void *msgp, long msgsz, long msgtyp, long msgflg) {}
-POST_SYSCALL(msgrcv)(long res, long msqid, void *msgp, long msgsz, long msgtyp,
- long msgflg) {
+POST_SYSCALL(msgrcv)
+(long res, long msqid, void *msgp, long msgsz, long msgtyp, long msgflg) {
if (res >= 0) {
- if (msgp) POST_WRITE(msgp, res);
+ if (msgp)
+ POST_WRITE(msgp, res);
}
}
-#if !SANITIZER_ANDROID
+# if !SANITIZER_ANDROID
PRE_SYSCALL(msgctl)(long msqid, long cmd, void *buf) {}
POST_SYSCALL(msgctl)(long res, long msqid, long cmd, void *buf) {
if (res >= 0) {
- if (buf) POST_WRITE(buf, struct_msqid_ds_sz);
+ if (buf)
+ POST_WRITE(buf, struct_msqid_ds_sz);
}
}
-#endif
+# endif
PRE_SYSCALL(semget)(long key, long nsems, long semflg) {}
@@ -2106,13 +2307,14 @@ PRE_SYSCALL(semctl)(long semid, long semnum, long cmd, void *arg) {}
POST_SYSCALL(semctl)(long res, long semid, long semnum, long cmd, void *arg) {}
-PRE_SYSCALL(semtimedop)(long semid, void *sops, long nsops,
- const void *timeout) {
- if (timeout) PRE_READ(timeout, struct_timespec_sz);
+PRE_SYSCALL(semtimedop)
+(long semid, void *sops, long nsops, const void *timeout) {
+ if (timeout)
+ PRE_READ(timeout, struct_timespec_sz);
}
-POST_SYSCALL(semtimedop)(long res, long semid, void *sops, long nsops,
- const void *timeout) {}
+POST_SYSCALL(semtimedop)
+(long res, long semid, void *sops, long nsops, const void *timeout) {}
PRE_SYSCALL(shmat)(long shmid, void *shmaddr, long shmflg) {}
@@ -2138,18 +2340,20 @@ POST_SYSCALL(shmdt)(long res, void *shmaddr) {
}
}
-PRE_SYSCALL(ipc)(long call, long first, long second, long third, void *ptr,
- long fifth) {}
+PRE_SYSCALL(ipc)
+(long call, long first, long second, long third, void *ptr, long fifth) {}
-POST_SYSCALL(ipc)(long res, long call, long first, long second, long third,
- void *ptr, long fifth) {}
+POST_SYSCALL(ipc)
+(long res, long call, long first, long second, long third, void *ptr,
+ long fifth) {}
-#if !SANITIZER_ANDROID
+# if !SANITIZER_ANDROID
PRE_SYSCALL(shmctl)(long shmid, long cmd, void *buf) {}
POST_SYSCALL(shmctl)(long res, long shmid, long cmd, void *buf) {
if (res >= 0) {
- if (buf) POST_WRITE(buf, sizeof(__sanitizer_shmid_ds));
+ if (buf)
+ POST_WRITE(buf, sizeof(__sanitizer_shmid_ds));
}
}
@@ -2158,10 +2362,11 @@ PRE_SYSCALL(mq_open)(const void *name, long oflag, long mode, void *attr) {
PRE_READ(name, __sanitizer::internal_strlen((const char *)name) + 1);
}
-POST_SYSCALL(mq_open)(long res, const void *name, long oflag, long mode,
- void *attr) {
+POST_SYSCALL(mq_open)
+(long res, const void *name, long oflag, long mode, void *attr) {
if (res >= 0) {
- if (attr) POST_WRITE(attr, struct_mq_attr_sz);
+ if (attr)
+ POST_WRITE(attr, struct_mq_attr_sz);
}
}
@@ -2172,62 +2377,73 @@ PRE_SYSCALL(mq_unlink)(const void *name) {
POST_SYSCALL(mq_unlink)(long res, const void *name) {}
-PRE_SYSCALL(mq_timedsend)(long mqdes, const void *msg_ptr, long msg_len,
- long msg_prio, const void *abs_timeout) {
- if (msg_ptr) PRE_READ(msg_ptr, msg_len);
- if (abs_timeout) PRE_READ(abs_timeout, struct_timespec_sz);
+PRE_SYSCALL(mq_timedsend)
+(long mqdes, const void *msg_ptr, long msg_len, long msg_prio,
+ const void *abs_timeout) {
+ if (msg_ptr)
+ PRE_READ(msg_ptr, msg_len);
+ if (abs_timeout)
+ PRE_READ(abs_timeout, struct_timespec_sz);
}
-POST_SYSCALL(mq_timedsend)(long res, long mqdes, const void *msg_ptr,
- long msg_len, long msg_prio,
- const void *abs_timeout) {}
+POST_SYSCALL(mq_timedsend)
+(long res, long mqdes, const void *msg_ptr, long msg_len, long msg_prio,
+ const void *abs_timeout) {}
-PRE_SYSCALL(mq_timedreceive)(long mqdes, void *msg_ptr, long msg_len,
- void *msg_prio, const void *abs_timeout) {
- if (abs_timeout) PRE_READ(abs_timeout, struct_timespec_sz);
+PRE_SYSCALL(mq_timedreceive)
+(long mqdes, void *msg_ptr, long msg_len, void *msg_prio,
+ const void *abs_timeout) {
+ if (abs_timeout)
+ PRE_READ(abs_timeout, struct_timespec_sz);
}
-POST_SYSCALL(mq_timedreceive)(long res, long mqdes, void *msg_ptr, long msg_len,
- int *msg_prio, const void *abs_timeout) {
+POST_SYSCALL(mq_timedreceive)
+(long res, long mqdes, void *msg_ptr, long msg_len, int *msg_prio,
+ const void *abs_timeout) {
if (res >= 0) {
- if (msg_ptr) POST_WRITE(msg_ptr, res);
- if (msg_prio) POST_WRITE(msg_prio, sizeof(*msg_prio));
+ if (msg_ptr)
+ POST_WRITE(msg_ptr, res);
+ if (msg_prio)
+ POST_WRITE(msg_prio, sizeof(*msg_prio));
}
}
PRE_SYSCALL(mq_notify)(long mqdes, const void *notification) {
- if (notification) PRE_READ(notification, struct_sigevent_sz);
+ if (notification)
+ PRE_READ(notification, struct_sigevent_sz);
}
POST_SYSCALL(mq_notify)(long res, long mqdes, const void *notification) {}
PRE_SYSCALL(mq_getsetattr)(long mqdes, const void *mqstat, void *omqstat) {
- if (mqstat) PRE_READ(mqstat, struct_mq_attr_sz);
+ if (mqstat)
+ PRE_READ(mqstat, struct_mq_attr_sz);
}
-POST_SYSCALL(mq_getsetattr)(long res, long mqdes, const void *mqstat,
- void *omqstat) {
+POST_SYSCALL(mq_getsetattr)
+(long res, long mqdes, const void *mqstat, void *omqstat) {
if (res >= 0) {
- if (omqstat) POST_WRITE(omqstat, struct_mq_attr_sz);
+ if (omqstat)
+ POST_WRITE(omqstat, struct_mq_attr_sz);
}
}
-#endif // SANITIZER_ANDROID
+# endif // SANITIZER_ANDROID
PRE_SYSCALL(pciconfig_iobase)(long which, long bus, long devfn) {}
POST_SYSCALL(pciconfig_iobase)(long res, long which, long bus, long devfn) {}
-PRE_SYSCALL(pciconfig_read)(long bus, long dfn, long off, long len, void *buf) {
-}
+PRE_SYSCALL(pciconfig_read)
+(long bus, long dfn, long off, long len, void *buf) {}
-POST_SYSCALL(pciconfig_read)(long res, long bus, long dfn, long off, long len,
- void *buf) {}
+POST_SYSCALL(pciconfig_read)
+(long res, long bus, long dfn, long off, long len, void *buf) {}
-PRE_SYSCALL(pciconfig_write)(long bus, long dfn, long off, long len,
- void *buf) {}
+PRE_SYSCALL(pciconfig_write)
+(long bus, long dfn, long off, long len, void *buf) {}
-POST_SYSCALL(pciconfig_write)(long res, long bus, long dfn, long off, long len,
- void *buf) {}
+POST_SYSCALL(pciconfig_write)
+(long res, long bus, long dfn, long off, long len, void *buf) {}
PRE_SYSCALL(swapon)(const void *specialfile, long swap_flags) {
if (specialfile)
@@ -2247,8 +2463,10 @@ POST_SYSCALL(swapoff)(long res, const void *specialfile) {}
PRE_SYSCALL(sysctl)(__sanitizer___sysctl_args *args) {
if (args) {
- if (args->name) PRE_READ(args->name, args->nlen * sizeof(*args->name));
- if (args->newval) PRE_READ(args->name, args->newlen);
+ if (args->name)
+ PRE_READ(args->name, args->nlen * sizeof(*args->name));
+ if (args->newval)
+ PRE_READ(args->name, args->newlen);
}
}
@@ -2265,7 +2483,8 @@ PRE_SYSCALL(sysinfo)(void *info) {}
POST_SYSCALL(sysinfo)(long res, void *info) {
if (res >= 0) {
- if (info) POST_WRITE(info, struct_sysinfo_sz);
+ if (info)
+ POST_WRITE(info, struct_sysinfo_sz);
}
}
@@ -2294,10 +2513,10 @@ PRE_SYSCALL(ni_syscall)() {}
POST_SYSCALL(ni_syscall)(long res) {}
PRE_SYSCALL(ptrace)(long request, long pid, long addr, long data) {
-#if !SANITIZER_ANDROID && \
- (defined(__i386) || defined(__x86_64) || defined(__mips64) || \
- defined(__powerpc64__) || defined(__aarch64__) || defined(__s390__) || \
- SANITIZER_RISCV64)
+# if !SANITIZER_ANDROID && \
+ (defined(__i386) || defined(__x86_64) || defined(__mips64) || \
+ defined(__powerpc64__) || defined(__aarch64__) || defined(__s390__) || \
+ defined(__loongarch__) || SANITIZER_RISCV64)
if (data) {
if (request == ptrace_setregs) {
PRE_READ((void *)data, struct_user_regs_struct_sz);
@@ -2312,14 +2531,14 @@ PRE_SYSCALL(ptrace)(long request, long pid, long addr, long data) {
PRE_READ(iov->iov_base, iov->iov_len);
}
}
-#endif
+# endif
}
POST_SYSCALL(ptrace)(long res, long request, long pid, long addr, long data) {
-#if !SANITIZER_ANDROID && \
- (defined(__i386) || defined(__x86_64) || defined(__mips64) || \
- defined(__powerpc64__) || defined(__aarch64__) || defined(__s390__) || \
- SANITIZER_RISCV64)
+# if !SANITIZER_ANDROID && \
+ (defined(__i386) || defined(__x86_64) || defined(__mips64) || \
+ defined(__powerpc64__) || defined(__aarch64__) || defined(__s390__) || \
+ defined(__loongarch__) || SANITIZER_RISCV64)
if (res >= 0 && data) {
// Note that this is different from the interceptor in
// sanitizer_common_interceptors.inc.
@@ -2340,11 +2559,12 @@ POST_SYSCALL(ptrace)(long res, long request, long pid, long addr, long data) {
POST_WRITE((void *)data, sizeof(void *));
}
}
-#endif
+# endif
}
-PRE_SYSCALL(add_key)(const void *_type, const void *_description,
- const void *_payload, long plen, long destringid) {
+PRE_SYSCALL(add_key)
+(const void *_type, const void *_description, const void *_payload, long plen,
+ long destringid) {
if (_type)
PRE_READ(_type, __sanitizer::internal_strlen((const char *)_type) + 1);
if (_description)
@@ -2352,11 +2572,13 @@ PRE_SYSCALL(add_key)(const void *_type, const void *_description,
__sanitizer::internal_strlen((const char *)_description) + 1);
}
-POST_SYSCALL(add_key)(long res, const void *_type, const void *_description,
- const void *_payload, long plen, long destringid) {}
+POST_SYSCALL(add_key)
+(long res, const void *_type, const void *_description, const void *_payload,
+ long plen, long destringid) {}
-PRE_SYSCALL(request_key)(const void *_type, const void *_description,
- const void *_callout_info, long destringid) {
+PRE_SYSCALL(request_key)
+(const void *_type, const void *_description, const void *_callout_info,
+ long destringid) {
if (_type)
PRE_READ(_type, __sanitizer::internal_strlen((const char *)_type) + 1);
if (_description)
@@ -2367,13 +2589,14 @@ PRE_SYSCALL(request_key)(const void *_type, const void *_description,
__sanitizer::internal_strlen((const char *)_callout_info) + 1);
}
-POST_SYSCALL(request_key)(long res, const void *_type, const void *_description,
- const void *_callout_info, long destringid) {}
+POST_SYSCALL(request_key)
+(long res, const void *_type, const void *_description,
+ const void *_callout_info, long destringid) {}
PRE_SYSCALL(keyctl)(long cmd, long arg2, long arg3, long arg4, long arg5) {}
-POST_SYSCALL(keyctl)(long res, long cmd, long arg2, long arg3, long arg4,
- long arg5) {}
+POST_SYSCALL(keyctl)
+(long res, long cmd, long arg2, long arg3, long arg4, long arg5) {}
PRE_SYSCALL(ioprio_set)(long which, long who, long ioprio) {}
@@ -2387,50 +2610,62 @@ PRE_SYSCALL(set_mempolicy)(long mode, void *nmask, long maxnode) {}
POST_SYSCALL(set_mempolicy)(long res, long mode, void *nmask, long maxnode) {
if (res >= 0) {
- if (nmask) POST_WRITE(nmask, sizeof(long));
+ if (nmask)
+ POST_WRITE(nmask, sizeof(long));
}
}
-PRE_SYSCALL(migrate_pages)(long pid, long maxnode, const void *from,
- const void *to) {
- if (from) PRE_READ(from, sizeof(long));
- if (to) PRE_READ(to, sizeof(long));
+PRE_SYSCALL(migrate_pages)
+(long pid, long maxnode, const void *from, const void *to) {
+ if (from)
+ PRE_READ(from, sizeof(long));
+ if (to)
+ PRE_READ(to, sizeof(long));
}
-POST_SYSCALL(migrate_pages)(long res, long pid, long maxnode, const void *from,
- const void *to) {}
+POST_SYSCALL(migrate_pages)
+(long res, long pid, long maxnode, const void *from, const void *to) {}
-PRE_SYSCALL(move_pages)(long pid, long nr_pages, const void **pages,
- const int *nodes, int *status, long flags) {
- if (pages) PRE_READ(pages, nr_pages * sizeof(*pages));
- if (nodes) PRE_READ(nodes, nr_pages * sizeof(*nodes));
+PRE_SYSCALL(move_pages)
+(long pid, long nr_pages, const void **pages, const int *nodes, int *status,
+ long flags) {
+ if (pages)
+ PRE_READ(pages, nr_pages * sizeof(*pages));
+ if (nodes)
+ PRE_READ(nodes, nr_pages * sizeof(*nodes));
}
-POST_SYSCALL(move_pages)(long res, long pid, long nr_pages, const void **pages,
- const int *nodes, int *status, long flags) {
+POST_SYSCALL(move_pages)
+(long res, long pid, long nr_pages, const void **pages, const int *nodes,
+ int *status, long flags) {
if (res >= 0) {
- if (status) POST_WRITE(status, nr_pages * sizeof(*status));
+ if (status)
+ POST_WRITE(status, nr_pages * sizeof(*status));
}
}
-PRE_SYSCALL(mbind)(long start, long len, long mode, void *nmask, long maxnode,
- long flags) {}
+PRE_SYSCALL(mbind)
+(long start, long len, long mode, void *nmask, long maxnode, long flags) {}
-POST_SYSCALL(mbind)(long res, long start, long len, long mode, void *nmask,
- long maxnode, long flags) {
+POST_SYSCALL(mbind)
+(long res, long start, long len, long mode, void *nmask, long maxnode,
+ long flags) {
if (res >= 0) {
- if (nmask) POST_WRITE(nmask, sizeof(long));
+ if (nmask)
+ POST_WRITE(nmask, sizeof(long));
}
}
-PRE_SYSCALL(get_mempolicy)(void *policy, void *nmask, long maxnode, long addr,
- long flags) {}
+PRE_SYSCALL(get_mempolicy)
+(void *policy, void *nmask, long maxnode, long addr, long flags) {}
-POST_SYSCALL(get_mempolicy)(long res, void *policy, void *nmask, long maxnode,
- long addr, long flags) {
+POST_SYSCALL(get_mempolicy)
+(long res, void *policy, void *nmask, long maxnode, long addr, long flags) {
if (res >= 0) {
- if (policy) POST_WRITE(policy, sizeof(int));
- if (nmask) POST_WRITE(nmask, sizeof(long));
+ if (policy)
+ POST_WRITE(policy, sizeof(int));
+ if (nmask)
+ POST_WRITE(nmask, sizeof(long));
}
}
@@ -2447,8 +2682,8 @@ PRE_SYSCALL(inotify_add_watch)(long fd, const void *path, long mask) {
PRE_READ(path, __sanitizer::internal_strlen((const char *)path) + 1);
}
-POST_SYSCALL(inotify_add_watch)(long res, long fd, const void *path,
- long mask) {}
+POST_SYSCALL(inotify_add_watch)
+(long res, long fd, const void *path, long mask) {}
PRE_SYSCALL(inotify_rm_watch)(long fd, long wd) {}
@@ -2458,8 +2693,10 @@ PRE_SYSCALL(spu_run)(long fd, void *unpc, void *ustatus) {}
POST_SYSCALL(spu_run)(long res, long fd, unsigned *unpc, unsigned *ustatus) {
if (res >= 0) {
- if (unpc) POST_WRITE(unpc, sizeof(*unpc));
- if (ustatus) POST_WRITE(ustatus, sizeof(*ustatus));
+ if (unpc)
+ POST_WRITE(unpc, sizeof(*unpc));
+ if (ustatus)
+ POST_WRITE(ustatus, sizeof(*ustatus));
}
}
@@ -2468,8 +2705,8 @@ PRE_SYSCALL(spu_create)(const void *name, long flags, long mode, long fd) {
PRE_READ(name, __sanitizer::internal_strlen((const char *)name) + 1);
}
-POST_SYSCALL(spu_create)(long res, const void *name, long flags, long mode,
- long fd) {}
+POST_SYSCALL(spu_create)
+(long res, const void *name, long flags, long mode, long fd) {}
PRE_SYSCALL(mknodat)(long dfd, const void *filename, long mode, long dev) {
if (filename)
@@ -2477,8 +2714,8 @@ PRE_SYSCALL(mknodat)(long dfd, const void *filename, long mode, long dev) {
__sanitizer::internal_strlen((const char *)filename) + 1);
}
-POST_SYSCALL(mknodat)(long res, long dfd, const void *filename, long mode,
- long dev) {}
+POST_SYSCALL(mknodat)
+(long res, long dfd, const void *filename, long mode, long dev) {}
PRE_SYSCALL(mkdirat)(long dfd, const void *pathname, long mode) {
if (pathname)
@@ -2503,30 +2740,33 @@ PRE_SYSCALL(symlinkat)(const void *oldname, long newdfd, const void *newname) {
PRE_READ(newname, __sanitizer::internal_strlen((const char *)newname) + 1);
}
-POST_SYSCALL(symlinkat)(long res, const void *oldname, long newdfd,
- const void *newname) {}
+POST_SYSCALL(symlinkat)
+(long res, const void *oldname, long newdfd, const void *newname) {}
-PRE_SYSCALL(linkat)(long olddfd, const void *oldname, long newdfd,
- const void *newname, long flags) {
+PRE_SYSCALL(linkat)
+(long olddfd, const void *oldname, long newdfd, const void *newname,
+ long flags) {
if (oldname)
PRE_READ(oldname, __sanitizer::internal_strlen((const char *)oldname) + 1);
if (newname)
PRE_READ(newname, __sanitizer::internal_strlen((const char *)newname) + 1);
}
-POST_SYSCALL(linkat)(long res, long olddfd, const void *oldname, long newdfd,
- const void *newname, long flags) {}
+POST_SYSCALL(linkat)
+(long res, long olddfd, const void *oldname, long newdfd, const void *newname,
+ long flags) {}
-PRE_SYSCALL(renameat)(long olddfd, const void *oldname, long newdfd,
- const void *newname) {
+PRE_SYSCALL(renameat)
+(long olddfd, const void *oldname, long newdfd, const void *newname) {
if (oldname)
PRE_READ(oldname, __sanitizer::internal_strlen((const char *)oldname) + 1);
if (newname)
PRE_READ(newname, __sanitizer::internal_strlen((const char *)newname) + 1);
}
-POST_SYSCALL(renameat)(long res, long olddfd, const void *oldname, long newdfd,
- const void *newname) {}
+POST_SYSCALL(renameat)
+(long res, long olddfd, const void *oldname, long newdfd, const void *newname) {
+}
PRE_SYSCALL(futimesat)(long dfd, const void *filename, void *utimes) {
if (filename)
@@ -2534,10 +2774,11 @@ PRE_SYSCALL(futimesat)(long dfd, const void *filename, void *utimes) {
__sanitizer::internal_strlen((const char *)filename) + 1);
}
-POST_SYSCALL(futimesat)(long res, long dfd, const void *filename,
- void *utimes) {
+POST_SYSCALL(futimesat)
+(long res, long dfd, const void *filename, void *utimes) {
if (res >= 0) {
- if (utimes) POST_WRITE(utimes, timeval_sz);
+ if (utimes)
+ POST_WRITE(utimes, timeval_sz);
}
}
@@ -2557,15 +2798,15 @@ PRE_SYSCALL(fchmodat)(long dfd, const void *filename, long mode) {
POST_SYSCALL(fchmodat)(long res, long dfd, const void *filename, long mode) {}
-PRE_SYSCALL(fchownat)(long dfd, const void *filename, long user, long group,
- long flag) {
+PRE_SYSCALL(fchownat)
+(long dfd, const void *filename, long user, long group, long flag) {
if (filename)
PRE_READ(filename,
__sanitizer::internal_strlen((const char *)filename) + 1);
}
-POST_SYSCALL(fchownat)(long res, long dfd, const void *filename, long user,
- long group, long flag) {}
+POST_SYSCALL(fchownat)
+(long res, long dfd, const void *filename, long user, long group, long flag) {}
PRE_SYSCALL(openat)(long dfd, const void *filename, long flags, long mode) {
if (filename)
@@ -2573,34 +2814,36 @@ PRE_SYSCALL(openat)(long dfd, const void *filename, long flags, long mode) {
__sanitizer::internal_strlen((const char *)filename) + 1);
}
-POST_SYSCALL(openat)(long res, long dfd, const void *filename, long flags,
- long mode) {}
+POST_SYSCALL(openat)
+(long res, long dfd, const void *filename, long flags, long mode) {}
-PRE_SYSCALL(newfstatat)(long dfd, const void *filename, void *statbuf,
- long flag) {
+PRE_SYSCALL(newfstatat)
+(long dfd, const void *filename, void *statbuf, long flag) {
if (filename)
PRE_READ(filename,
__sanitizer::internal_strlen((const char *)filename) + 1);
}
-POST_SYSCALL(newfstatat)(long res, long dfd, const void *filename,
- void *statbuf, long flag) {
+POST_SYSCALL(newfstatat)
+(long res, long dfd, const void *filename, void *statbuf, long flag) {
if (res >= 0) {
- if (statbuf) POST_WRITE(statbuf, struct_kernel_stat_sz);
+ if (statbuf)
+ POST_WRITE(statbuf, struct_kernel_stat_sz);
}
}
-PRE_SYSCALL(fstatat64)(long dfd, const void *filename, void *statbuf,
- long flag) {
+PRE_SYSCALL(fstatat64)
+(long dfd, const void *filename, void *statbuf, long flag) {
if (filename)
PRE_READ(filename,
__sanitizer::internal_strlen((const char *)filename) + 1);
}
-POST_SYSCALL(fstatat64)(long res, long dfd, const void *filename, void *statbuf,
- long flag) {
+POST_SYSCALL(fstatat64)
+(long res, long dfd, const void *filename, void *statbuf, long flag) {
if (res >= 0) {
- if (statbuf) POST_WRITE(statbuf, struct_kernel_stat64_sz);
+ if (statbuf)
+ POST_WRITE(statbuf, struct_kernel_stat64_sz);
}
}
@@ -2609,25 +2852,26 @@ PRE_SYSCALL(readlinkat)(long dfd, const void *path, void *buf, long bufsiz) {
PRE_READ(path, __sanitizer::internal_strlen((const char *)path) + 1);
}
-POST_SYSCALL(readlinkat)(long res, long dfd, const void *path, void *buf,
- long bufsiz) {
+POST_SYSCALL(readlinkat)
+(long res, long dfd, const void *path, void *buf, long bufsiz) {
if (res >= 0) {
if (buf)
POST_WRITE(buf, __sanitizer::internal_strlen((const char *)buf) + 1);
}
}
-PRE_SYSCALL(utimensat)(long dfd, const void *filename, void *utimes,
- long flags) {
+PRE_SYSCALL(utimensat)
+(long dfd, const void *filename, void *utimes, long flags) {
if (filename)
PRE_READ(filename,
__sanitizer::internal_strlen((const char *)filename) + 1);
}
-POST_SYSCALL(utimensat)(long res, long dfd, const void *filename, void *utimes,
- long flags) {
+POST_SYSCALL(utimensat)
+(long res, long dfd, const void *filename, void *utimes, long flags) {
if (res >= 0) {
- if (utimes) POST_WRITE(utimes, struct_timespec_sz);
+ if (utimes)
+ POST_WRITE(utimes, struct_timespec_sz);
}
}
@@ -2635,24 +2879,28 @@ PRE_SYSCALL(unshare)(long unshare_flags) {}
POST_SYSCALL(unshare)(long res, long unshare_flags) {}
-PRE_SYSCALL(splice)(long fd_in, void *off_in, long fd_out, void *off_out,
- long len, long flags) {}
+PRE_SYSCALL(splice)
+(long fd_in, void *off_in, long fd_out, void *off_out, long len, long flags) {}
-POST_SYSCALL(splice)(long res, long fd_in, void *off_in, long fd_out,
- void *off_out, long len, long flags) {
+POST_SYSCALL(splice)
+(long res, long fd_in, void *off_in, long fd_out, void *off_out, long len,
+ long flags) {
if (res >= 0) {
- if (off_in) POST_WRITE(off_in, sizeof(long long));
- if (off_out) POST_WRITE(off_out, sizeof(long long));
+ if (off_in)
+ POST_WRITE(off_in, sizeof(long long));
+ if (off_out)
+ POST_WRITE(off_out, sizeof(long long));
}
}
-PRE_SYSCALL(vmsplice)(long fd, const __sanitizer_iovec *iov, long nr_segs,
- long flags) {}
+PRE_SYSCALL(vmsplice)
+(long fd, const __sanitizer_iovec *iov, long nr_segs, long flags) {}
-POST_SYSCALL(vmsplice)(long res, long fd, const __sanitizer_iovec *iov,
- long nr_segs, long flags) {
+POST_SYSCALL(vmsplice)
+(long res, long fd, const __sanitizer_iovec *iov, long nr_segs, long flags) {
if (res >= 0) {
- if (iov) kernel_read_iovec(iov, nr_segs, res);
+ if (iov)
+ kernel_read_iovec(iov, nr_segs, res);
}
}
@@ -2662,8 +2910,8 @@ POST_SYSCALL(tee)(long res, long fdin, long fdout, long len, long flags) {}
PRE_SYSCALL(get_robust_list)(long pid, void *head_ptr, void *len_ptr) {}
-POST_SYSCALL(get_robust_list)(long res, long pid, void *head_ptr,
- void *len_ptr) {}
+POST_SYSCALL(get_robust_list)
+(long res, long pid, void *head_ptr, void *len_ptr) {}
PRE_SYSCALL(set_robust_list)(void *head, long len) {}
@@ -2673,27 +2921,31 @@ PRE_SYSCALL(getcpu)(void *cpu, void *node, void *cache) {}
POST_SYSCALL(getcpu)(long res, void *cpu, void *node, void *cache) {
if (res >= 0) {
- if (cpu) POST_WRITE(cpu, sizeof(unsigned));
- if (node) POST_WRITE(node, sizeof(unsigned));
+ if (cpu)
+ POST_WRITE(cpu, sizeof(unsigned));
+ if (node)
+ POST_WRITE(node, sizeof(unsigned));
// The third argument to this system call is nowadays unused.
}
}
PRE_SYSCALL(signalfd)(long ufd, void *user_mask, long sizemask) {}
-POST_SYSCALL(signalfd)(long res, long ufd, kernel_sigset_t *user_mask,
- long sizemask) {
+POST_SYSCALL(signalfd)
+(long res, long ufd, kernel_sigset_t *user_mask, long sizemask) {
if (res >= 0) {
- if (user_mask) POST_WRITE(user_mask, sizemask);
+ if (user_mask)
+ POST_WRITE(user_mask, sizemask);
}
}
PRE_SYSCALL(signalfd4)(long ufd, void *user_mask, long sizemask, long flags) {}
-POST_SYSCALL(signalfd4)(long res, long ufd, kernel_sigset_t *user_mask,
- long sizemask, long flags) {
+POST_SYSCALL(signalfd4)
+(long res, long ufd, kernel_sigset_t *user_mask, long sizemask, long flags) {
if (res >= 0) {
- if (user_mask) POST_WRITE(user_mask, sizemask);
+ if (user_mask)
+ POST_WRITE(user_mask, sizemask);
}
}
@@ -2701,15 +2953,17 @@ PRE_SYSCALL(timerfd_create)(long clockid, long flags) {}
POST_SYSCALL(timerfd_create)(long res, long clockid, long flags) {}
-PRE_SYSCALL(timerfd_settime)(long ufd, long flags, const void *utmr,
- void *otmr) {
- if (utmr) PRE_READ(utmr, struct_itimerspec_sz);
+PRE_SYSCALL(timerfd_settime)
+(long ufd, long flags, const void *utmr, void *otmr) {
+ if (utmr)
+ PRE_READ(utmr, struct_itimerspec_sz);
}
-POST_SYSCALL(timerfd_settime)(long res, long ufd, long flags, const void *utmr,
- void *otmr) {
+POST_SYSCALL(timerfd_settime)
+(long res, long ufd, long flags, const void *utmr, void *otmr) {
if (res >= 0) {
- if (otmr) POST_WRITE(otmr, struct_itimerspec_sz);
+ if (otmr)
+ POST_WRITE(otmr, struct_itimerspec_sz);
}
}
@@ -2717,7 +2971,8 @@ PRE_SYSCALL(timerfd_gettime)(long ufd, void *otmr) {}
POST_SYSCALL(timerfd_gettime)(long res, long ufd, void *otmr) {
if (res >= 0) {
- if (otmr) POST_WRITE(otmr, struct_itimerspec_sz);
+ if (otmr)
+ POST_WRITE(otmr, struct_itimerspec_sz);
}
}
@@ -2735,33 +2990,42 @@ POST_SYSCALL(old_readdir)(long res, long arg0, void *arg1, long arg2) {
// Missing definition of 'struct old_linux_dirent'.
}
-PRE_SYSCALL(pselect6)(long arg0, __sanitizer___kernel_fd_set *arg1,
- __sanitizer___kernel_fd_set *arg2,
- __sanitizer___kernel_fd_set *arg3, void *arg4,
- void *arg5) {}
+PRE_SYSCALL(pselect6)
+(long arg0, __sanitizer___kernel_fd_set *arg1,
+ __sanitizer___kernel_fd_set *arg2, __sanitizer___kernel_fd_set *arg3,
+ void *arg4, void *arg5) {}
-POST_SYSCALL(pselect6)(long res, long arg0, __sanitizer___kernel_fd_set *arg1,
- __sanitizer___kernel_fd_set *arg2,
- __sanitizer___kernel_fd_set *arg3, void *arg4,
- void *arg5) {
+POST_SYSCALL(pselect6)
+(long res, long arg0, __sanitizer___kernel_fd_set *arg1,
+ __sanitizer___kernel_fd_set *arg2, __sanitizer___kernel_fd_set *arg3,
+ void *arg4, void *arg5) {
if (res >= 0) {
- if (arg1) POST_WRITE(arg1, sizeof(*arg1));
- if (arg2) POST_WRITE(arg2, sizeof(*arg2));
- if (arg3) POST_WRITE(arg3, sizeof(*arg3));
- if (arg4) POST_WRITE(arg4, struct_timespec_sz);
+ if (arg1)
+ POST_WRITE(arg1, sizeof(*arg1));
+ if (arg2)
+ POST_WRITE(arg2, sizeof(*arg2));
+ if (arg3)
+ POST_WRITE(arg3, sizeof(*arg3));
+ if (arg4)
+ POST_WRITE(arg4, struct_timespec_sz);
}
}
-PRE_SYSCALL(ppoll)(__sanitizer_pollfd *arg0, long arg1, void *arg2,
- const kernel_sigset_t *arg3, long arg4) {
- if (arg3) PRE_READ(arg3, arg4);
+PRE_SYSCALL(ppoll)
+(__sanitizer_pollfd *arg0, long arg1, void *arg2, const kernel_sigset_t *arg3,
+ long arg4) {
+ if (arg3)
+ PRE_READ(arg3, arg4);
}
-POST_SYSCALL(ppoll)(long res, __sanitizer_pollfd *arg0, long arg1, void *arg2,
- const void *arg3, long arg4) {
+POST_SYSCALL(ppoll)
+(long res, __sanitizer_pollfd *arg0, long arg1, void *arg2, const void *arg3,
+ long arg4) {
if (res >= 0) {
- if (arg0) POST_WRITE(arg0, sizeof(*arg0));
- if (arg2) POST_WRITE(arg2, struct_timespec_sz);
+ if (arg0)
+ POST_WRITE(arg0, sizeof(*arg0));
+ if (arg2)
+ POST_WRITE(arg2, struct_timespec_sz);
}
}
@@ -2769,81 +3033,79 @@ PRE_SYSCALL(syncfs)(long fd) {}
POST_SYSCALL(syncfs)(long res, long fd) {}
-PRE_SYSCALL(perf_event_open)(__sanitizer_perf_event_attr *attr_uptr, long pid,
- long cpu, long group_fd, long flags) {
- if (attr_uptr) PRE_READ(attr_uptr, attr_uptr->size);
+PRE_SYSCALL(perf_event_open)
+(__sanitizer_perf_event_attr *attr_uptr, long pid, long cpu, long group_fd,
+ long flags) {
+ if (attr_uptr)
+ PRE_READ(attr_uptr, attr_uptr->size);
}
-POST_SYSCALL(perf_event_open)(long res, __sanitizer_perf_event_attr *attr_uptr,
- long pid, long cpu, long group_fd, long flags) {}
+POST_SYSCALL(perf_event_open)
+(long res, __sanitizer_perf_event_attr *attr_uptr, long pid, long cpu,
+ long group_fd, long flags) {}
-PRE_SYSCALL(mmap_pgoff)(long addr, long len, long prot, long flags, long fd,
- long pgoff) {}
+PRE_SYSCALL(mmap_pgoff)
+(long addr, long len, long prot, long flags, long fd, long pgoff) {}
-POST_SYSCALL(mmap_pgoff)(long res, long addr, long len, long prot, long flags,
- long fd, long pgoff) {}
+POST_SYSCALL(mmap_pgoff)
+(long res, long addr, long len, long prot, long flags, long fd, long pgoff) {}
PRE_SYSCALL(old_mmap)(void *arg) {}
POST_SYSCALL(old_mmap)(long res, void *arg) {}
-PRE_SYSCALL(name_to_handle_at)(long dfd, const void *name, void *handle,
- void *mnt_id, long flag) {}
+PRE_SYSCALL(name_to_handle_at)
+(long dfd, const void *name, void *handle, void *mnt_id, long flag) {}
-POST_SYSCALL(name_to_handle_at)(long res, long dfd, const void *name,
- void *handle, void *mnt_id, long flag) {}
+POST_SYSCALL(name_to_handle_at)
+(long res, long dfd, const void *name, void *handle, void *mnt_id, long flag) {}
PRE_SYSCALL(open_by_handle_at)(long mountdirfd, void *handle, long flags) {}
-POST_SYSCALL(open_by_handle_at)(long res, long mountdirfd, void *handle,
- long flags) {}
+POST_SYSCALL(open_by_handle_at)
+(long res, long mountdirfd, void *handle, long flags) {}
PRE_SYSCALL(setns)(long fd, long nstype) {}
POST_SYSCALL(setns)(long res, long fd, long nstype) {}
-PRE_SYSCALL(process_vm_readv)(long pid, const __sanitizer_iovec *lvec,
- long liovcnt, const void *rvec, long riovcnt,
- long flags) {}
+PRE_SYSCALL(process_vm_readv)
+(long pid, const __sanitizer_iovec *lvec, long liovcnt, const void *rvec,
+ long riovcnt, long flags) {}
-POST_SYSCALL(process_vm_readv)(long res, long pid,
- const __sanitizer_iovec *lvec, long liovcnt,
- const void *rvec, long riovcnt, long flags) {
+POST_SYSCALL(process_vm_readv)
+(long res, long pid, const __sanitizer_iovec *lvec, long liovcnt,
+ const void *rvec, long riovcnt, long flags) {
if (res >= 0) {
- if (lvec) kernel_write_iovec(lvec, liovcnt, res);
+ if (lvec)
+ kernel_write_iovec(lvec, liovcnt, res);
}
}
-PRE_SYSCALL(process_vm_writev)(long pid, const __sanitizer_iovec *lvec,
- long liovcnt, const void *rvec, long riovcnt,
- long flags) {}
+PRE_SYSCALL(process_vm_writev)
+(long pid, const __sanitizer_iovec *lvec, long liovcnt, const void *rvec,
+ long riovcnt, long flags) {}
-POST_SYSCALL(process_vm_writev)(long res, long pid,
- const __sanitizer_iovec *lvec, long liovcnt,
- const void *rvec, long riovcnt, long flags) {
+POST_SYSCALL(process_vm_writev)
+(long res, long pid, const __sanitizer_iovec *lvec, long liovcnt,
+ const void *rvec, long riovcnt, long flags) {
if (res >= 0) {
- if (lvec) kernel_read_iovec(lvec, liovcnt, res);
+ if (lvec)
+ kernel_read_iovec(lvec, liovcnt, res);
}
}
-PRE_SYSCALL(fork)() {
- COMMON_SYSCALL_PRE_FORK();
-}
+PRE_SYSCALL(fork)() { COMMON_SYSCALL_PRE_FORK(); }
-POST_SYSCALL(fork)(long res) {
- COMMON_SYSCALL_POST_FORK(res);
-}
+POST_SYSCALL(fork)(long res) { COMMON_SYSCALL_POST_FORK(res); }
-PRE_SYSCALL(vfork)() {
- COMMON_SYSCALL_PRE_FORK();
-}
+PRE_SYSCALL(vfork)() { COMMON_SYSCALL_PRE_FORK(); }
-POST_SYSCALL(vfork)(long res) {
- COMMON_SYSCALL_POST_FORK(res);
-}
+POST_SYSCALL(vfork)(long res) { COMMON_SYSCALL_POST_FORK(res); }
-PRE_SYSCALL(sigaction)(long signum, const __sanitizer_kernel_sigaction_t *act,
- __sanitizer_kernel_sigaction_t *oldact) {
+PRE_SYSCALL(sigaction)
+(long signum, const __sanitizer_kernel_sigaction_t *act,
+ __sanitizer_kernel_sigaction_t *oldact) {
if (act) {
PRE_READ(&act->sigaction, sizeof(act->sigaction));
PRE_READ(&act->sa_flags, sizeof(act->sa_flags));
@@ -2851,15 +3113,16 @@ PRE_SYSCALL(sigaction)(long signum, const __sanitizer_kernel_sigaction_t *act,
}
}
-POST_SYSCALL(sigaction)(long res, long signum,
- const __sanitizer_kernel_sigaction_t *act,
- __sanitizer_kernel_sigaction_t *oldact) {
- if (res >= 0 && oldact) POST_WRITE(oldact, sizeof(*oldact));
+POST_SYSCALL(sigaction)
+(long res, long signum, const __sanitizer_kernel_sigaction_t *act,
+ __sanitizer_kernel_sigaction_t *oldact) {
+ if (res >= 0 && oldact)
+ POST_WRITE(oldact, sizeof(*oldact));
}
-PRE_SYSCALL(rt_sigaction)(long signum,
- const __sanitizer_kernel_sigaction_t *act,
- __sanitizer_kernel_sigaction_t *oldact, SIZE_T sz) {
+PRE_SYSCALL(rt_sigaction)
+(long signum, const __sanitizer_kernel_sigaction_t *act,
+ __sanitizer_kernel_sigaction_t *oldact, SIZE_T sz) {
if (act) {
PRE_READ(&act->sigaction, sizeof(act->sigaction));
PRE_READ(&act->sa_flags, sizeof(act->sa_flags));
@@ -2867,9 +3130,9 @@ PRE_SYSCALL(rt_sigaction)(long signum,
}
}
-POST_SYSCALL(rt_sigaction)(long res, long signum,
- const __sanitizer_kernel_sigaction_t *act,
- __sanitizer_kernel_sigaction_t *oldact, SIZE_T sz) {
+POST_SYSCALL(rt_sigaction)
+(long res, long signum, const __sanitizer_kernel_sigaction_t *act,
+ __sanitizer_kernel_sigaction_t *oldact, SIZE_T sz) {
if (res >= 0 && oldact) {
SIZE_T oldact_sz = ((char *)&oldact->sa_mask) - ((char *)oldact) + sz;
POST_WRITE(oldact, oldact_sz);
@@ -2906,11 +3169,11 @@ POST_SYSCALL(sigaltstack)(long res, void *ss, void *oss) {
}
} // extern "C"
-#undef PRE_SYSCALL
-#undef PRE_READ
-#undef PRE_WRITE
-#undef POST_SYSCALL
-#undef POST_READ
-#undef POST_WRITE
+# undef PRE_SYSCALL
+# undef PRE_READ
+# undef PRE_WRITE
+# undef POST_SYSCALL
+# undef POST_READ
+# undef POST_WRITE
#endif // SANITIZER_LINUX
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_coverage_fuchsia.cpp b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_coverage_fuchsia.cpp
index a52db08433e3..73668a56218c 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_coverage_fuchsia.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_coverage_fuchsia.cpp
@@ -33,8 +33,9 @@
#include "sanitizer_atomic.h"
#include "sanitizer_common.h"
+#include "sanitizer_interface_internal.h"
#include "sanitizer_internal_defs.h"
-#include "sanitizer_symbolizer_fuchsia.h"
+# include "sanitizer_symbolizer_markup_constants.h"
using namespace __sanitizer;
@@ -51,6 +52,8 @@ constexpr const char kSancovSinkName[] = "sancov";
// This class relies on zero-initialization.
class TracePcGuardController final {
public:
+ constexpr TracePcGuardController() {}
+
// For each PC location being tracked, there is a u32 reserved in global
// data called the "guard". At startup, we assign each guard slot a
// unique index into the big results array. Later during runtime, the
@@ -87,7 +90,7 @@ class TracePcGuardController final {
}
void Dump() {
- BlockingMutexLock locked(&setup_lock_);
+ Lock locked(&setup_lock_);
if (array_) {
CHECK_NE(vmo_, ZX_HANDLE_INVALID);
@@ -114,7 +117,7 @@ class TracePcGuardController final {
// We can always spare the 32G of address space.
static constexpr size_t MappingSize = sizeof(uptr) << 32;
- BlockingMutex setup_lock_ = BlockingMutex(LINKER_INITIALIZED);
+ Mutex setup_lock_;
uptr *array_ = nullptr;
u32 next_index_ = 0;
zx_handle_t vmo_ = {};
@@ -123,7 +126,7 @@ class TracePcGuardController final {
size_t DataSize() const { return next_index_ * sizeof(uintptr_t); }
u32 Setup(u32 num_guards) {
- BlockingMutexLock locked(&setup_lock_);
+ Lock locked(&setup_lock_);
DCHECK(common_flags()->coverage);
if (next_index_ == 0) {
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_coverage_interface.inc b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_coverage_interface.inc
index d7ab0c3d98c1..9d36a40270d5 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_coverage_interface.inc
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_coverage_interface.inc
@@ -27,6 +27,16 @@ INTERFACE_WEAK_FUNCTION(__sanitizer_cov_trace_gep)
INTERFACE_WEAK_FUNCTION(__sanitizer_cov_trace_pc_guard)
INTERFACE_WEAK_FUNCTION(__sanitizer_cov_trace_pc_guard_init)
INTERFACE_WEAK_FUNCTION(__sanitizer_cov_trace_pc_indir)
+INTERFACE_WEAK_FUNCTION(__sanitizer_cov_load1)
+INTERFACE_WEAK_FUNCTION(__sanitizer_cov_load2)
+INTERFACE_WEAK_FUNCTION(__sanitizer_cov_load4)
+INTERFACE_WEAK_FUNCTION(__sanitizer_cov_load8)
+INTERFACE_WEAK_FUNCTION(__sanitizer_cov_load16)
+INTERFACE_WEAK_FUNCTION(__sanitizer_cov_store1)
+INTERFACE_WEAK_FUNCTION(__sanitizer_cov_store2)
+INTERFACE_WEAK_FUNCTION(__sanitizer_cov_store4)
+INTERFACE_WEAK_FUNCTION(__sanitizer_cov_store8)
+INTERFACE_WEAK_FUNCTION(__sanitizer_cov_store16)
INTERFACE_WEAK_FUNCTION(__sanitizer_cov_trace_switch)
INTERFACE_WEAK_FUNCTION(__sanitizer_cov_8bit_counters_init)
INTERFACE_WEAK_FUNCTION(__sanitizer_cov_bool_flag_init)
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_coverage_libcdep_new.cpp b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_coverage_libcdep_new.cpp
index 73ebeb5fa14a..ce4326967180 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_coverage_libcdep_new.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_coverage_libcdep_new.cpp
@@ -10,11 +10,13 @@
#include "sanitizer_platform.h"
#if !SANITIZER_FUCHSIA
-#include "sancov_flags.h"
-#include "sanitizer_allocator_internal.h"
-#include "sanitizer_atomic.h"
-#include "sanitizer_common.h"
-#include "sanitizer_file.h"
+# include "sancov_flags.h"
+# include "sanitizer_allocator_internal.h"
+# include "sanitizer_atomic.h"
+# include "sanitizer_common.h"
+# include "sanitizer_common/sanitizer_stacktrace.h"
+# include "sanitizer_file.h"
+# include "sanitizer_interface_internal.h"
using namespace __sanitizer;
@@ -72,8 +74,8 @@ static void SanitizerDumpCoverage(const uptr* unsorted_pcs, uptr len) {
const uptr pc = pcs[i];
if (!pc) continue;
- if (!__sanitizer_get_module_and_offset_for_pc(pc, nullptr, 0, &pcs[i])) {
- Printf("ERROR: unknown pc 0x%x (may happen if dlclose is used)\n", pc);
+ if (!GetModuleAndOffsetForPc(pc, nullptr, 0, &pcs[i])) {
+ Printf("ERROR: unknown pc 0x%zx (may happen if dlclose is used)\n", pc);
continue;
}
uptr module_base = pc - pcs[i];
@@ -87,8 +89,7 @@ static void SanitizerDumpCoverage(const uptr* unsorted_pcs, uptr len) {
last_base = module_base;
module_start_idx = i;
module_found = true;
- __sanitizer_get_module_and_offset_for_pc(pc, module_name, kMaxPathLength,
- &pcs[i]);
+ GetModuleAndOffsetForPc(pc, module_name, kMaxPathLength, &pcs[i]);
}
}
@@ -151,6 +152,55 @@ class TracePcGuardController {
static TracePcGuardController pc_guard_controller;
+// A basic default implementation of callbacks for
+// -fsanitize-coverage=inline-8bit-counters,pc-table.
+// Use TOOL_OPTIONS (UBSAN_OPTIONS, etc) to dump the coverage data:
+// * cov_8bit_counters_out=PATH to dump the 8bit counters.
+// * cov_pcs_out=PATH to dump the pc table.
+//
+// Most users will still need to define their own callbacks for greater
+// flexibility.
+namespace SingletonCounterCoverage {
+
+static char *counters_beg, *counters_end;
+static const uptr *pcs_beg, *pcs_end;
+
+static void DumpCoverage() {
+ const char* file_path = common_flags()->cov_8bit_counters_out;
+ if (file_path && internal_strlen(file_path)) {
+ fd_t fd = OpenFile(file_path);
+ FileCloser file_closer(fd);
+ uptr size = counters_end - counters_beg;
+ WriteToFile(fd, counters_beg, size);
+ if (common_flags()->verbosity)
+ __sanitizer::Printf("cov_8bit_counters_out: written %zd bytes to %s\n",
+ size, file_path);
+ }
+ file_path = common_flags()->cov_pcs_out;
+ if (file_path && internal_strlen(file_path)) {
+ fd_t fd = OpenFile(file_path);
+ FileCloser file_closer(fd);
+ uptr size = (pcs_end - pcs_beg) * sizeof(uptr);
+ WriteToFile(fd, pcs_beg, size);
+ if (common_flags()->verbosity)
+ __sanitizer::Printf("cov_pcs_out: written %zd bytes to %s\n", size,
+ file_path);
+ }
+}
+
+static void Cov8bitCountersInit(char* beg, char* end) {
+ counters_beg = beg;
+ counters_end = end;
+ Atexit(DumpCoverage);
+}
+
+static void CovPcsInit(const uptr* beg, const uptr* end) {
+ pcs_beg = beg;
+ pcs_end = end;
+}
+
+} // namespace SingletonCounterCoverage
+
} // namespace
} // namespace __sancov
@@ -173,7 +223,8 @@ SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_dump_coverage(const uptr* pcs,
SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_pc_guard, u32* guard) {
if (!*guard) return;
- __sancov::pc_guard_controller.TracePcGuard(guard, GET_CALLER_PC() - 1);
+ __sancov::pc_guard_controller.TracePcGuard(
+ guard, StackTrace::GetPreviousInstructionPc(GET_CALLER_PC()));
}
SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_pc_guard_init,
@@ -191,7 +242,9 @@ SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov_dump() {
SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov_reset() {
__sancov::pc_guard_controller.Reset();
}
-// Default empty implementations (weak). Users should redefine them.
+// Default implementations (weak).
+// Either empty or very simple.
+// Most users should redefine them.
SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_cmp, void) {}
SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_cmp1, void) {}
SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_cmp2, void) {}
@@ -206,14 +259,37 @@ SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_div4, void) {}
SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_div8, void) {}
SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_gep, void) {}
SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_pc_indir, void) {}
-SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_8bit_counters_init, void) {}
+SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_load1, void){}
+SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_load2, void){}
+SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_load4, void){}
+SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_load8, void){}
+SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_load16, void){}
+SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_store1, void){}
+SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_store2, void){}
+SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_store4, void){}
+SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_store8, void){}
+SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_store16, void){}
+SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_8bit_counters_init,
+ char* start, char* end) {
+ __sancov::SingletonCounterCoverage::Cov8bitCountersInit(start, end);
+}
SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_bool_flag_init, void) {}
-SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_pcs_init, void) {}
+SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_pcs_init, const uptr* beg,
+ const uptr* end) {
+ __sancov::SingletonCounterCoverage::CovPcsInit(beg, end);
+}
} // extern "C"
// Weak definition for code instrumented with -fsanitize-coverage=stack-depth
// and later linked with code containing a strong definition.
// E.g., -fsanitize=fuzzer-no-link
+// FIXME: Update Apple deployment target so that thread_local is always
+// supported, and remove the #if.
+// FIXME: Figure out how this should work on Windows, exported thread_local
+// symbols are not supported:
+// "data with thread storage duration may not have dll interface"
+#if !SANITIZER_APPLE && !SANITIZER_WINDOWS
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
-SANITIZER_TLS_INITIAL_EXEC_ATTRIBUTE uptr __sancov_lowest_stack;
+thread_local uptr __sancov_lowest_stack;
+#endif
#endif // !SANITIZER_FUCHSIA
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_deadlock_detector.h b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_deadlock_detector.h
index b80cff460eda..0749f633b4bc 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_deadlock_detector.h
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_deadlock_detector.h
@@ -293,7 +293,7 @@ class DeadlockDetector {
}
// Returns true iff dtls is empty (no locks are currently held) and we can
- // add the node to the currently held locks w/o chanding the global state.
+ // add the node to the currently held locks w/o changing the global state.
// This operation is thread-safe as it only touches the dtls.
bool onFirstLock(DeadlockDetectorTLS<BV> *dtls, uptr node, u32 stk = 0) {
if (!dtls->empty()) return false;
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_dense_map.h b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_dense_map.h
new file mode 100644
index 000000000000..046d77dddc9c
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_dense_map.h
@@ -0,0 +1,705 @@
+//===- sanitizer_dense_map.h - Dense probed hash table ----------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This is fork of llvm/ADT/DenseMap.h class with the following changes:
+// * Use mmap to allocate.
+// * No iterators.
+// * Does not shrink.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_DENSE_MAP_H
+#define SANITIZER_DENSE_MAP_H
+
+#include "sanitizer_common.h"
+#include "sanitizer_dense_map_info.h"
+#include "sanitizer_internal_defs.h"
+#include "sanitizer_type_traits.h"
+
+namespace __sanitizer {
+
+template <typename DerivedT, typename KeyT, typename ValueT, typename KeyInfoT,
+ typename BucketT>
+class DenseMapBase {
+ public:
+ using size_type = unsigned;
+ using key_type = KeyT;
+ using mapped_type = ValueT;
+ using value_type = BucketT;
+
+ WARN_UNUSED_RESULT bool empty() const { return getNumEntries() == 0; }
+ unsigned size() const { return getNumEntries(); }
+
+ /// Grow the densemap so that it can contain at least \p NumEntries items
+ /// before resizing again.
+ void reserve(size_type NumEntries) {
+ auto NumBuckets = getMinBucketToReserveForEntries(NumEntries);
+ if (NumBuckets > getNumBuckets())
+ grow(NumBuckets);
+ }
+
+ void clear() {
+ if (getNumEntries() == 0 && getNumTombstones() == 0)
+ return;
+
+ const KeyT EmptyKey = getEmptyKey(), TombstoneKey = getTombstoneKey();
+ if (__sanitizer::is_trivially_destructible<ValueT>::value) {
+ // Use a simpler loop when values don't need destruction.
+ for (BucketT *P = getBuckets(), *E = getBucketsEnd(); P != E; ++P)
+ P->getFirst() = EmptyKey;
+ } else {
+ unsigned NumEntries = getNumEntries();
+ for (BucketT *P = getBuckets(), *E = getBucketsEnd(); P != E; ++P) {
+ if (!KeyInfoT::isEqual(P->getFirst(), EmptyKey)) {
+ if (!KeyInfoT::isEqual(P->getFirst(), TombstoneKey)) {
+ P->getSecond().~ValueT();
+ --NumEntries;
+ }
+ P->getFirst() = EmptyKey;
+ }
+ }
+ CHECK_EQ(NumEntries, 0);
+ }
+ setNumEntries(0);
+ setNumTombstones(0);
+ }
+
+ /// Return 1 if the specified key is in the map, 0 otherwise.
+ size_type count(const KeyT &Key) const {
+ const BucketT *TheBucket;
+ return LookupBucketFor(Key, TheBucket) ? 1 : 0;
+ }
+
+ value_type *find(const KeyT &Key) {
+ BucketT *TheBucket;
+ if (LookupBucketFor(Key, TheBucket))
+ return TheBucket;
+ return nullptr;
+ }
+ const value_type *find(const KeyT &Key) const {
+ const BucketT *TheBucket;
+ if (LookupBucketFor(Key, TheBucket))
+ return TheBucket;
+ return nullptr;
+ }
+
+ /// Alternate version of find() which allows a different, and possibly
+ /// less expensive, key type.
+ /// The DenseMapInfo is responsible for supplying methods
+ /// getHashValue(LookupKeyT) and isEqual(LookupKeyT, KeyT) for each key
+ /// type used.
+ template <class LookupKeyT>
+ value_type *find_as(const LookupKeyT &Key) {
+ BucketT *TheBucket;
+ if (LookupBucketFor(Key, TheBucket))
+ return TheBucket;
+ return nullptr;
+ }
+ template <class LookupKeyT>
+ const value_type *find_as(const LookupKeyT &Key) const {
+ const BucketT *TheBucket;
+ if (LookupBucketFor(Key, TheBucket))
+ return TheBucket;
+ return nullptr;
+ }
+
+ /// lookup - Return the entry for the specified key, or a default
+ /// constructed value if no such entry exists.
+ ValueT lookup(const KeyT &Key) const {
+ const BucketT *TheBucket;
+ if (LookupBucketFor(Key, TheBucket))
+ return TheBucket->getSecond();
+ return ValueT();
+ }
+
+ // Inserts key,value pair into the map if the key isn't already in the map.
+ // If the key is already in the map, it returns false and doesn't update the
+ // value.
+ detail::DenseMapPair<value_type *, bool> insert(const value_type &KV) {
+ return try_emplace(KV.first, KV.second);
+ }
+
+ // Inserts key,value pair into the map if the key isn't already in the map.
+ // If the key is already in the map, it returns false and doesn't update the
+ // value.
+ detail::DenseMapPair<value_type *, bool> insert(value_type &&KV) {
+ return try_emplace(__sanitizer::move(KV.first),
+ __sanitizer::move(KV.second));
+ }
+
+ // Inserts key,value pair into the map if the key isn't already in the map.
+ // The value is constructed in-place if the key is not in the map, otherwise
+ // it is not moved.
+ template <typename... Ts>
+ detail::DenseMapPair<value_type *, bool> try_emplace(KeyT &&Key,
+ Ts &&...Args) {
+ BucketT *TheBucket;
+ if (LookupBucketFor(Key, TheBucket))
+ return {TheBucket, false}; // Already in map.
+
+ // Otherwise, insert the new element.
+ TheBucket = InsertIntoBucket(TheBucket, __sanitizer::move(Key),
+ __sanitizer::forward<Ts>(Args)...);
+ return {TheBucket, true};
+ }
+
+ // Inserts key,value pair into the map if the key isn't already in the map.
+ // The value is constructed in-place if the key is not in the map, otherwise
+ // it is not moved.
+ template <typename... Ts>
+ detail::DenseMapPair<value_type *, bool> try_emplace(const KeyT &Key,
+ Ts &&...Args) {
+ BucketT *TheBucket;
+ if (LookupBucketFor(Key, TheBucket))
+ return {TheBucket, false}; // Already in map.
+
+ // Otherwise, insert the new element.
+ TheBucket =
+ InsertIntoBucket(TheBucket, Key, __sanitizer::forward<Ts>(Args)...);
+ return {TheBucket, true};
+ }
+
+ /// Alternate version of insert() which allows a different, and possibly
+ /// less expensive, key type.
+ /// The DenseMapInfo is responsible for supplying methods
+ /// getHashValue(LookupKeyT) and isEqual(LookupKeyT, KeyT) for each key
+ /// type used.
+ template <typename LookupKeyT>
+ detail::DenseMapPair<value_type *, bool> insert_as(value_type &&KV,
+ const LookupKeyT &Val) {
+ BucketT *TheBucket;
+ if (LookupBucketFor(Val, TheBucket))
+ return {TheBucket, false}; // Already in map.
+
+ // Otherwise, insert the new element.
+ TheBucket =
+ InsertIntoBucketWithLookup(TheBucket, __sanitizer::move(KV.first),
+ __sanitizer::move(KV.second), Val);
+ return {TheBucket, true};
+ }
+
+ bool erase(const KeyT &Val) {
+ BucketT *TheBucket;
+ if (!LookupBucketFor(Val, TheBucket))
+ return false; // not in map.
+
+ TheBucket->getSecond().~ValueT();
+ TheBucket->getFirst() = getTombstoneKey();
+ decrementNumEntries();
+ incrementNumTombstones();
+ return true;
+ }
+
+ void erase(value_type *I) {
+ CHECK_NE(I, nullptr);
+ BucketT *TheBucket = &*I;
+ TheBucket->getSecond().~ValueT();
+ TheBucket->getFirst() = getTombstoneKey();
+ decrementNumEntries();
+ incrementNumTombstones();
+ }
+
+ value_type &FindAndConstruct(const KeyT &Key) {
+ BucketT *TheBucket;
+ if (LookupBucketFor(Key, TheBucket))
+ return *TheBucket;
+
+ return *InsertIntoBucket(TheBucket, Key);
+ }
+
+ ValueT &operator[](const KeyT &Key) { return FindAndConstruct(Key).second; }
+
+ value_type &FindAndConstruct(KeyT &&Key) {
+ BucketT *TheBucket;
+ if (LookupBucketFor(Key, TheBucket))
+ return *TheBucket;
+
+ return *InsertIntoBucket(TheBucket, __sanitizer::move(Key));
+ }
+
+ ValueT &operator[](KeyT &&Key) {
+ return FindAndConstruct(__sanitizer::move(Key)).second;
+ }
+
+ /// Iterate over active entries of the container.
+ ///
+ /// Function can return fast to stop the process.
+ template <class Fn>
+ void forEach(Fn fn) {
+ const KeyT EmptyKey = getEmptyKey(), TombstoneKey = getTombstoneKey();
+ for (auto *P = getBuckets(), *E = getBucketsEnd(); P != E; ++P) {
+ const KeyT K = P->getFirst();
+ if (!KeyInfoT::isEqual(K, EmptyKey) &&
+ !KeyInfoT::isEqual(K, TombstoneKey)) {
+ if (!fn(*P))
+ return;
+ }
+ }
+ }
+
+ template <class Fn>
+ void forEach(Fn fn) const {
+ const_cast<DenseMapBase *>(this)->forEach(
+ [&](const value_type &KV) { return fn(KV); });
+ }
+
+ protected:
+ DenseMapBase() = default;
+
+ void destroyAll() {
+ if (getNumBuckets() == 0) // Nothing to do.
+ return;
+
+ const KeyT EmptyKey = getEmptyKey(), TombstoneKey = getTombstoneKey();
+ for (BucketT *P = getBuckets(), *E = getBucketsEnd(); P != E; ++P) {
+ if (!KeyInfoT::isEqual(P->getFirst(), EmptyKey) &&
+ !KeyInfoT::isEqual(P->getFirst(), TombstoneKey))
+ P->getSecond().~ValueT();
+ P->getFirst().~KeyT();
+ }
+ }
+
+ void initEmpty() {
+ setNumEntries(0);
+ setNumTombstones(0);
+
+ CHECK_EQ((getNumBuckets() & (getNumBuckets() - 1)), 0);
+ const KeyT EmptyKey = getEmptyKey();
+ for (BucketT *B = getBuckets(), *E = getBucketsEnd(); B != E; ++B)
+ ::new (&B->getFirst()) KeyT(EmptyKey);
+ }
+
+ /// Returns the number of buckets to allocate to ensure that the DenseMap can
+ /// accommodate \p NumEntries without need to grow().
+ unsigned getMinBucketToReserveForEntries(unsigned NumEntries) {
+ // Ensure that "NumEntries * 4 < NumBuckets * 3"
+ if (NumEntries == 0)
+ return 0;
+ // +1 is required because of the strict equality.
+ // For example if NumEntries is 48, we need to return 401.
+ return RoundUpToPowerOfTwo((NumEntries * 4 / 3 + 1) + /* NextPowerOf2 */ 1);
+ }
+
+ void moveFromOldBuckets(BucketT *OldBucketsBegin, BucketT *OldBucketsEnd) {
+ initEmpty();
+
+ // Insert all the old elements.
+ const KeyT EmptyKey = getEmptyKey();
+ const KeyT TombstoneKey = getTombstoneKey();
+ for (BucketT *B = OldBucketsBegin, *E = OldBucketsEnd; B != E; ++B) {
+ if (!KeyInfoT::isEqual(B->getFirst(), EmptyKey) &&
+ !KeyInfoT::isEqual(B->getFirst(), TombstoneKey)) {
+ // Insert the key/value into the new table.
+ BucketT *DestBucket;
+ bool FoundVal = LookupBucketFor(B->getFirst(), DestBucket);
+ (void)FoundVal; // silence warning.
+ CHECK(!FoundVal);
+ DestBucket->getFirst() = __sanitizer::move(B->getFirst());
+ ::new (&DestBucket->getSecond())
+ ValueT(__sanitizer::move(B->getSecond()));
+ incrementNumEntries();
+
+ // Free the value.
+ B->getSecond().~ValueT();
+ }
+ B->getFirst().~KeyT();
+ }
+ }
+
+ template <typename OtherBaseT>
+ void copyFrom(
+ const DenseMapBase<OtherBaseT, KeyT, ValueT, KeyInfoT, BucketT> &other) {
+ CHECK_NE(&other, this);
+ CHECK_EQ(getNumBuckets(), other.getNumBuckets());
+
+ setNumEntries(other.getNumEntries());
+ setNumTombstones(other.getNumTombstones());
+
+ if (__sanitizer::is_trivially_copyable<KeyT>::value &&
+ __sanitizer::is_trivially_copyable<ValueT>::value)
+ internal_memcpy(reinterpret_cast<void *>(getBuckets()),
+ other.getBuckets(), getNumBuckets() * sizeof(BucketT));
+ else
+ for (uptr i = 0; i < getNumBuckets(); ++i) {
+ ::new (&getBuckets()[i].getFirst())
+ KeyT(other.getBuckets()[i].getFirst());
+ if (!KeyInfoT::isEqual(getBuckets()[i].getFirst(), getEmptyKey()) &&
+ !KeyInfoT::isEqual(getBuckets()[i].getFirst(), getTombstoneKey()))
+ ::new (&getBuckets()[i].getSecond())
+ ValueT(other.getBuckets()[i].getSecond());
+ }
+ }
+
+ static unsigned getHashValue(const KeyT &Val) {
+ return KeyInfoT::getHashValue(Val);
+ }
+
+ template <typename LookupKeyT>
+ static unsigned getHashValue(const LookupKeyT &Val) {
+ return KeyInfoT::getHashValue(Val);
+ }
+
+ static const KeyT getEmptyKey() { return KeyInfoT::getEmptyKey(); }
+
+ static const KeyT getTombstoneKey() { return KeyInfoT::getTombstoneKey(); }
+
+ private:
+ unsigned getNumEntries() const {
+ return static_cast<const DerivedT *>(this)->getNumEntries();
+ }
+
+ void setNumEntries(unsigned Num) {
+ static_cast<DerivedT *>(this)->setNumEntries(Num);
+ }
+
+ void incrementNumEntries() { setNumEntries(getNumEntries() + 1); }
+
+ void decrementNumEntries() { setNumEntries(getNumEntries() - 1); }
+
+ unsigned getNumTombstones() const {
+ return static_cast<const DerivedT *>(this)->getNumTombstones();
+ }
+
+ void setNumTombstones(unsigned Num) {
+ static_cast<DerivedT *>(this)->setNumTombstones(Num);
+ }
+
+ void incrementNumTombstones() { setNumTombstones(getNumTombstones() + 1); }
+
+ void decrementNumTombstones() { setNumTombstones(getNumTombstones() - 1); }
+
+ const BucketT *getBuckets() const {
+ return static_cast<const DerivedT *>(this)->getBuckets();
+ }
+
+ BucketT *getBuckets() { return static_cast<DerivedT *>(this)->getBuckets(); }
+
+ unsigned getNumBuckets() const {
+ return static_cast<const DerivedT *>(this)->getNumBuckets();
+ }
+
+ BucketT *getBucketsEnd() { return getBuckets() + getNumBuckets(); }
+
+ const BucketT *getBucketsEnd() const {
+ return getBuckets() + getNumBuckets();
+ }
+
+ void grow(unsigned AtLeast) { static_cast<DerivedT *>(this)->grow(AtLeast); }
+
+ template <typename KeyArg, typename... ValueArgs>
+ BucketT *InsertIntoBucket(BucketT *TheBucket, KeyArg &&Key,
+ ValueArgs &&...Values) {
+ TheBucket = InsertIntoBucketImpl(Key, Key, TheBucket);
+
+ TheBucket->getFirst() = __sanitizer::forward<KeyArg>(Key);
+ ::new (&TheBucket->getSecond())
+ ValueT(__sanitizer::forward<ValueArgs>(Values)...);
+ return TheBucket;
+ }
+
+ template <typename LookupKeyT>
+ BucketT *InsertIntoBucketWithLookup(BucketT *TheBucket, KeyT &&Key,
+ ValueT &&Value, LookupKeyT &Lookup) {
+ TheBucket = InsertIntoBucketImpl(Key, Lookup, TheBucket);
+
+ TheBucket->getFirst() = __sanitizer::move(Key);
+ ::new (&TheBucket->getSecond()) ValueT(__sanitizer::move(Value));
+ return TheBucket;
+ }
+
+ template <typename LookupKeyT>
+ BucketT *InsertIntoBucketImpl(const KeyT &Key, const LookupKeyT &Lookup,
+ BucketT *TheBucket) {
+ // If the load of the hash table is more than 3/4, or if fewer than 1/8 of
+ // the buckets are empty (meaning that many are filled with tombstones),
+ // grow the table.
+ //
+ // The later case is tricky. For example, if we had one empty bucket with
+ // tons of tombstones, failing lookups (e.g. for insertion) would have to
+ // probe almost the entire table until it found the empty bucket. If the
+ // table completely filled with tombstones, no lookup would ever succeed,
+ // causing infinite loops in lookup.
+ unsigned NewNumEntries = getNumEntries() + 1;
+ unsigned NumBuckets = getNumBuckets();
+ if (UNLIKELY(NewNumEntries * 4 >= NumBuckets * 3)) {
+ this->grow(NumBuckets * 2);
+ LookupBucketFor(Lookup, TheBucket);
+ NumBuckets = getNumBuckets();
+ } else if (UNLIKELY(NumBuckets - (NewNumEntries + getNumTombstones()) <=
+ NumBuckets / 8)) {
+ this->grow(NumBuckets);
+ LookupBucketFor(Lookup, TheBucket);
+ }
+ CHECK(TheBucket);
+
+ // Only update the state after we've grown our bucket space appropriately
+ // so that when growing buckets we have self-consistent entry count.
+ incrementNumEntries();
+
+ // If we are writing over a tombstone, remember this.
+ const KeyT EmptyKey = getEmptyKey();
+ if (!KeyInfoT::isEqual(TheBucket->getFirst(), EmptyKey))
+ decrementNumTombstones();
+
+ return TheBucket;
+ }
+
+ /// LookupBucketFor - Lookup the appropriate bucket for Val, returning it in
+ /// FoundBucket. If the bucket contains the key and a value, this returns
+ /// true, otherwise it returns a bucket with an empty marker or tombstone and
+ /// returns false.
+ template <typename LookupKeyT>
+ bool LookupBucketFor(const LookupKeyT &Val,
+ const BucketT *&FoundBucket) const {
+ const BucketT *BucketsPtr = getBuckets();
+ const unsigned NumBuckets = getNumBuckets();
+
+ if (NumBuckets == 0) {
+ FoundBucket = nullptr;
+ return false;
+ }
+
+ // FoundTombstone - Keep track of whether we find a tombstone while probing.
+ const BucketT *FoundTombstone = nullptr;
+ const KeyT EmptyKey = getEmptyKey();
+ const KeyT TombstoneKey = getTombstoneKey();
+ CHECK(!KeyInfoT::isEqual(Val, EmptyKey));
+ CHECK(!KeyInfoT::isEqual(Val, TombstoneKey));
+
+ unsigned BucketNo = getHashValue(Val) & (NumBuckets - 1);
+ unsigned ProbeAmt = 1;
+ while (true) {
+ const BucketT *ThisBucket = BucketsPtr + BucketNo;
+ // Found Val's bucket? If so, return it.
+ if (LIKELY(KeyInfoT::isEqual(Val, ThisBucket->getFirst()))) {
+ FoundBucket = ThisBucket;
+ return true;
+ }
+
+ // If we found an empty bucket, the key doesn't exist in the set.
+ // Insert it and return the default value.
+ if (LIKELY(KeyInfoT::isEqual(ThisBucket->getFirst(), EmptyKey))) {
+ // If we've already seen a tombstone while probing, fill it in instead
+ // of the empty bucket we eventually probed to.
+ FoundBucket = FoundTombstone ? FoundTombstone : ThisBucket;
+ return false;
+ }
+
+ // If this is a tombstone, remember it. If Val ends up not in the map, we
+ // prefer to return it than something that would require more probing.
+ if (KeyInfoT::isEqual(ThisBucket->getFirst(), TombstoneKey) &&
+ !FoundTombstone)
+ FoundTombstone = ThisBucket; // Remember the first tombstone found.
+
+ // Otherwise, it's a hash collision or a tombstone, continue quadratic
+ // probing.
+ BucketNo += ProbeAmt++;
+ BucketNo &= (NumBuckets - 1);
+ }
+ }
+
+ template <typename LookupKeyT>
+ bool LookupBucketFor(const LookupKeyT &Val, BucketT *&FoundBucket) {
+ const BucketT *ConstFoundBucket;
+ bool Result = const_cast<const DenseMapBase *>(this)->LookupBucketFor(
+ Val, ConstFoundBucket);
+ FoundBucket = const_cast<BucketT *>(ConstFoundBucket);
+ return Result;
+ }
+
+ public:
+ /// Return the approximate size (in bytes) of the actual map.
+ /// This is just the raw memory used by DenseMap.
+ /// If entries are pointers to objects, the size of the referenced objects
+ /// are not included.
+ uptr getMemorySize() const {
+ return RoundUpTo(getNumBuckets() * sizeof(BucketT), GetPageSizeCached());
+ }
+};
+
+/// Equality comparison for DenseMap.
+///
+/// Iterates over elements of LHS confirming that each (key, value) pair in LHS
+/// is also in RHS, and that no additional pairs are in RHS.
+/// Equivalent to N calls to RHS.find and N value comparisons. Amortized
+/// complexity is linear, worst case is O(N^2) (if every hash collides).
+template <typename DerivedT, typename KeyT, typename ValueT, typename KeyInfoT,
+ typename BucketT>
+bool operator==(
+ const DenseMapBase<DerivedT, KeyT, ValueT, KeyInfoT, BucketT> &LHS,
+ const DenseMapBase<DerivedT, KeyT, ValueT, KeyInfoT, BucketT> &RHS) {
+ if (LHS.size() != RHS.size())
+ return false;
+
+ bool R = true;
+ LHS.forEach(
+ [&](const typename DenseMapBase<DerivedT, KeyT, ValueT, KeyInfoT,
+ BucketT>::value_type &KV) -> bool {
+ const auto *I = RHS.find(KV.first);
+ if (!I || I->second != KV.second) {
+ R = false;
+ return false;
+ }
+ return true;
+ });
+
+ return R;
+}
+
+/// Inequality comparison for DenseMap.
+///
+/// Equivalent to !(LHS == RHS). See operator== for performance notes.
+template <typename DerivedT, typename KeyT, typename ValueT, typename KeyInfoT,
+ typename BucketT>
+bool operator!=(
+ const DenseMapBase<DerivedT, KeyT, ValueT, KeyInfoT, BucketT> &LHS,
+ const DenseMapBase<DerivedT, KeyT, ValueT, KeyInfoT, BucketT> &RHS) {
+ return !(LHS == RHS);
+}
+
+template <typename KeyT, typename ValueT,
+ typename KeyInfoT = DenseMapInfo<KeyT>,
+ typename BucketT = detail::DenseMapPair<KeyT, ValueT>>
+class DenseMap : public DenseMapBase<DenseMap<KeyT, ValueT, KeyInfoT, BucketT>,
+ KeyT, ValueT, KeyInfoT, BucketT> {
+ friend class DenseMapBase<DenseMap, KeyT, ValueT, KeyInfoT, BucketT>;
+
+ // Lift some types from the dependent base class into this class for
+ // simplicity of referring to them.
+ using BaseT = DenseMapBase<DenseMap, KeyT, ValueT, KeyInfoT, BucketT>;
+
+ BucketT *Buckets = nullptr;
+ unsigned NumEntries = 0;
+ unsigned NumTombstones = 0;
+ unsigned NumBuckets = 0;
+
+ public:
+ /// Create a DenseMap with an optional \p InitialReserve that guarantee that
+ /// this number of elements can be inserted in the map without grow()
+ explicit DenseMap(unsigned InitialReserve) { init(InitialReserve); }
+ constexpr DenseMap() = default;
+
+ DenseMap(const DenseMap &other) : BaseT() {
+ init(0);
+ copyFrom(other);
+ }
+
+ DenseMap(DenseMap &&other) : BaseT() {
+ init(0);
+ swap(other);
+ }
+
+ ~DenseMap() {
+ this->destroyAll();
+ deallocate_buffer(Buckets, sizeof(BucketT) * NumBuckets);
+ }
+
+ void swap(DenseMap &RHS) {
+ Swap(Buckets, RHS.Buckets);
+ Swap(NumEntries, RHS.NumEntries);
+ Swap(NumTombstones, RHS.NumTombstones);
+ Swap(NumBuckets, RHS.NumBuckets);
+ }
+
+ DenseMap &operator=(const DenseMap &other) {
+ if (&other != this)
+ copyFrom(other);
+ return *this;
+ }
+
+ DenseMap &operator=(DenseMap &&other) {
+ this->destroyAll();
+ deallocate_buffer(Buckets, sizeof(BucketT) * NumBuckets, alignof(BucketT));
+ init(0);
+ swap(other);
+ return *this;
+ }
+
+ void copyFrom(const DenseMap &other) {
+ this->destroyAll();
+ deallocate_buffer(Buckets, sizeof(BucketT) * NumBuckets);
+ if (allocateBuckets(other.NumBuckets)) {
+ this->BaseT::copyFrom(other);
+ } else {
+ NumEntries = 0;
+ NumTombstones = 0;
+ }
+ }
+
+ void init(unsigned InitNumEntries) {
+ auto InitBuckets = BaseT::getMinBucketToReserveForEntries(InitNumEntries);
+ if (allocateBuckets(InitBuckets)) {
+ this->BaseT::initEmpty();
+ } else {
+ NumEntries = 0;
+ NumTombstones = 0;
+ }
+ }
+
+ void grow(unsigned AtLeast) {
+ unsigned OldNumBuckets = NumBuckets;
+ BucketT *OldBuckets = Buckets;
+
+ allocateBuckets(RoundUpToPowerOfTwo(Max<unsigned>(64, AtLeast)));
+ CHECK(Buckets);
+ if (!OldBuckets) {
+ this->BaseT::initEmpty();
+ return;
+ }
+
+ this->moveFromOldBuckets(OldBuckets, OldBuckets + OldNumBuckets);
+
+ // Free the old table.
+ deallocate_buffer(OldBuckets, sizeof(BucketT) * OldNumBuckets);
+ }
+
+ private:
+ unsigned getNumEntries() const { return NumEntries; }
+
+ void setNumEntries(unsigned Num) { NumEntries = Num; }
+
+ unsigned getNumTombstones() const { return NumTombstones; }
+
+ void setNumTombstones(unsigned Num) { NumTombstones = Num; }
+
+ BucketT *getBuckets() const { return Buckets; }
+
+ unsigned getNumBuckets() const { return NumBuckets; }
+
+ bool allocateBuckets(unsigned Num) {
+ NumBuckets = Num;
+ if (NumBuckets == 0) {
+ Buckets = nullptr;
+ return false;
+ }
+
+ uptr Size = sizeof(BucketT) * NumBuckets;
+ if (Size * 2 <= GetPageSizeCached()) {
+ // We always allocate at least a page, so use entire space.
+ unsigned Log2 = MostSignificantSetBitIndex(GetPageSizeCached() / Size);
+ Size <<= Log2;
+ NumBuckets <<= Log2;
+ CHECK_EQ(Size, sizeof(BucketT) * NumBuckets);
+ CHECK_GT(Size * 2, GetPageSizeCached());
+ }
+ Buckets = static_cast<BucketT *>(allocate_buffer(Size));
+ return true;
+ }
+
+ static void *allocate_buffer(uptr Size) {
+ return MmapOrDie(RoundUpTo(Size, GetPageSizeCached()), "DenseMap");
+ }
+
+ static void deallocate_buffer(void *Ptr, uptr Size) {
+ UnmapOrDie(Ptr, RoundUpTo(Size, GetPageSizeCached()));
+ }
+};
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_DENSE_MAP_H
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_dense_map_info.h b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_dense_map_info.h
new file mode 100644
index 000000000000..f4640369ae58
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_dense_map_info.h
@@ -0,0 +1,282 @@
+//===- sanitizer_dense_map_info.h - Type traits for DenseMap ----*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_DENSE_MAP_INFO_H
+#define SANITIZER_DENSE_MAP_INFO_H
+
+#include "sanitizer_common.h"
+#include "sanitizer_internal_defs.h"
+#include "sanitizer_type_traits.h"
+
+namespace __sanitizer {
+
+namespace detail {
+
+/// Simplistic combination of 32-bit hash values into 32-bit hash values.
+static constexpr unsigned combineHashValue(unsigned a, unsigned b) {
+ u64 key = (u64)a << 32 | (u64)b;
+ key += ~(key << 32);
+ key ^= (key >> 22);
+ key += ~(key << 13);
+ key ^= (key >> 8);
+ key += (key << 3);
+ key ^= (key >> 15);
+ key += ~(key << 27);
+ key ^= (key >> 31);
+ return (unsigned)key;
+}
+
+// We extend a pair to allow users to override the bucket type with their own
+// implementation without requiring two members.
+template <typename KeyT, typename ValueT>
+struct DenseMapPair {
+ KeyT first = {};
+ ValueT second = {};
+ constexpr DenseMapPair() = default;
+ constexpr DenseMapPair(const KeyT &f, const ValueT &s)
+ : first(f), second(s) {}
+
+ template <typename KeyT2, typename ValueT2>
+ constexpr DenseMapPair(KeyT2 &&f, ValueT2 &&s)
+ : first(__sanitizer::forward<KeyT2>(f)),
+ second(__sanitizer::forward<ValueT2>(s)) {}
+
+ constexpr DenseMapPair(const DenseMapPair &other) = default;
+ constexpr DenseMapPair &operator=(const DenseMapPair &other) = default;
+ constexpr DenseMapPair(DenseMapPair &&other) = default;
+ constexpr DenseMapPair &operator=(DenseMapPair &&other) = default;
+
+ KeyT &getFirst() { return first; }
+ const KeyT &getFirst() const { return first; }
+ ValueT &getSecond() { return second; }
+ const ValueT &getSecond() const { return second; }
+};
+
+} // end namespace detail
+
+template <typename T>
+struct DenseMapInfo {
+ // static T getEmptyKey();
+ // static T getTombstoneKey();
+ // static unsigned getHashValue(const T &Val);
+ // static bool isEqual(const T &LHS, const T &RHS);
+};
+
+// Provide DenseMapInfo for all pointers. Come up with sentinel pointer values
+// that are aligned to alignof(T) bytes, but try to avoid requiring T to be
+// complete. This allows clients to instantiate DenseMap<T*, ...> with forward
+// declared key types. Assume that no pointer key type requires more than 4096
+// bytes of alignment.
+template <typename T>
+struct DenseMapInfo<T *> {
+ // The following should hold, but it would require T to be complete:
+ // static_assert(alignof(T) <= (1 << Log2MaxAlign),
+ // "DenseMap does not support pointer keys requiring more than "
+ // "Log2MaxAlign bits of alignment");
+ static constexpr uptr Log2MaxAlign = 12;
+
+ static constexpr T *getEmptyKey() {
+ uptr Val = static_cast<uptr>(-1);
+ Val <<= Log2MaxAlign;
+ return reinterpret_cast<T *>(Val);
+ }
+
+ static constexpr T *getTombstoneKey() {
+ uptr Val = static_cast<uptr>(-2);
+ Val <<= Log2MaxAlign;
+ return reinterpret_cast<T *>(Val);
+ }
+
+ static constexpr unsigned getHashValue(const T *PtrVal) {
+ return (unsigned((uptr)PtrVal) >> 4) ^ (unsigned((uptr)PtrVal) >> 9);
+ }
+
+ static constexpr bool isEqual(const T *LHS, const T *RHS) {
+ return LHS == RHS;
+ }
+};
+
+// Provide DenseMapInfo for chars.
+template <>
+struct DenseMapInfo<char> {
+ static constexpr char getEmptyKey() { return ~0; }
+ static constexpr char getTombstoneKey() { return ~0 - 1; }
+ static constexpr unsigned getHashValue(const char &Val) { return Val * 37U; }
+
+ static constexpr bool isEqual(const char &LHS, const char &RHS) {
+ return LHS == RHS;
+ }
+};
+
+// Provide DenseMapInfo for unsigned chars.
+template <>
+struct DenseMapInfo<unsigned char> {
+ static constexpr unsigned char getEmptyKey() { return ~0; }
+ static constexpr unsigned char getTombstoneKey() { return ~0 - 1; }
+ static constexpr unsigned getHashValue(const unsigned char &Val) {
+ return Val * 37U;
+ }
+
+ static constexpr bool isEqual(const unsigned char &LHS,
+ const unsigned char &RHS) {
+ return LHS == RHS;
+ }
+};
+
+// Provide DenseMapInfo for unsigned shorts.
+template <>
+struct DenseMapInfo<unsigned short> {
+ static constexpr unsigned short getEmptyKey() { return 0xFFFF; }
+ static constexpr unsigned short getTombstoneKey() { return 0xFFFF - 1; }
+ static constexpr unsigned getHashValue(const unsigned short &Val) {
+ return Val * 37U;
+ }
+
+ static constexpr bool isEqual(const unsigned short &LHS,
+ const unsigned short &RHS) {
+ return LHS == RHS;
+ }
+};
+
+// Provide DenseMapInfo for unsigned ints.
+template <>
+struct DenseMapInfo<unsigned> {
+ static constexpr unsigned getEmptyKey() { return ~0U; }
+ static constexpr unsigned getTombstoneKey() { return ~0U - 1; }
+ static constexpr unsigned getHashValue(const unsigned &Val) {
+ return Val * 37U;
+ }
+
+ static constexpr bool isEqual(const unsigned &LHS, const unsigned &RHS) {
+ return LHS == RHS;
+ }
+};
+
+// Provide DenseMapInfo for unsigned longs.
+template <>
+struct DenseMapInfo<unsigned long> {
+ static constexpr unsigned long getEmptyKey() { return ~0UL; }
+ static constexpr unsigned long getTombstoneKey() { return ~0UL - 1L; }
+
+ static constexpr unsigned getHashValue(const unsigned long &Val) {
+ return (unsigned)(Val * 37UL);
+ }
+
+ static constexpr bool isEqual(const unsigned long &LHS,
+ const unsigned long &RHS) {
+ return LHS == RHS;
+ }
+};
+
+// Provide DenseMapInfo for unsigned long longs.
+template <>
+struct DenseMapInfo<unsigned long long> {
+ static constexpr unsigned long long getEmptyKey() { return ~0ULL; }
+ static constexpr unsigned long long getTombstoneKey() { return ~0ULL - 1ULL; }
+
+ static constexpr unsigned getHashValue(const unsigned long long &Val) {
+ return (unsigned)(Val * 37ULL);
+ }
+
+ static constexpr bool isEqual(const unsigned long long &LHS,
+ const unsigned long long &RHS) {
+ return LHS == RHS;
+ }
+};
+
+// Provide DenseMapInfo for shorts.
+template <>
+struct DenseMapInfo<short> {
+ static constexpr short getEmptyKey() { return 0x7FFF; }
+ static constexpr short getTombstoneKey() { return -0x7FFF - 1; }
+ static constexpr unsigned getHashValue(const short &Val) { return Val * 37U; }
+ static constexpr bool isEqual(const short &LHS, const short &RHS) {
+ return LHS == RHS;
+ }
+};
+
+// Provide DenseMapInfo for ints.
+template <>
+struct DenseMapInfo<int> {
+ static constexpr int getEmptyKey() { return 0x7fffffff; }
+ static constexpr int getTombstoneKey() { return -0x7fffffff - 1; }
+ static constexpr unsigned getHashValue(const int &Val) {
+ return (unsigned)(Val * 37U);
+ }
+
+ static constexpr bool isEqual(const int &LHS, const int &RHS) {
+ return LHS == RHS;
+ }
+};
+
+// Provide DenseMapInfo for longs.
+template <>
+struct DenseMapInfo<long> {
+ static constexpr long getEmptyKey() {
+ return (1UL << (sizeof(long) * 8 - 1)) - 1UL;
+ }
+
+ static constexpr long getTombstoneKey() { return getEmptyKey() - 1L; }
+
+ static constexpr unsigned getHashValue(const long &Val) {
+ return (unsigned)(Val * 37UL);
+ }
+
+ static constexpr bool isEqual(const long &LHS, const long &RHS) {
+ return LHS == RHS;
+ }
+};
+
+// Provide DenseMapInfo for long longs.
+template <>
+struct DenseMapInfo<long long> {
+ static constexpr long long getEmptyKey() { return 0x7fffffffffffffffLL; }
+ static constexpr long long getTombstoneKey() {
+ return -0x7fffffffffffffffLL - 1;
+ }
+
+ static constexpr unsigned getHashValue(const long long &Val) {
+ return (unsigned)(Val * 37ULL);
+ }
+
+ static constexpr bool isEqual(const long long &LHS, const long long &RHS) {
+ return LHS == RHS;
+ }
+};
+
+// Provide DenseMapInfo for all pairs whose members have info.
+template <typename T, typename U>
+struct DenseMapInfo<detail::DenseMapPair<T, U>> {
+ using Pair = detail::DenseMapPair<T, U>;
+ using FirstInfo = DenseMapInfo<T>;
+ using SecondInfo = DenseMapInfo<U>;
+
+ static constexpr Pair getEmptyKey() {
+ return detail::DenseMapPair<T, U>(FirstInfo::getEmptyKey(),
+ SecondInfo::getEmptyKey());
+ }
+
+ static constexpr Pair getTombstoneKey() {
+ return detail::DenseMapPair<T, U>(FirstInfo::getTombstoneKey(),
+ SecondInfo::getTombstoneKey());
+ }
+
+ static constexpr unsigned getHashValue(const Pair &PairVal) {
+ return detail::combineHashValue(FirstInfo::getHashValue(PairVal.first),
+ SecondInfo::getHashValue(PairVal.second));
+ }
+
+ static constexpr bool isEqual(const Pair &LHS, const Pair &RHS) {
+ return FirstInfo::isEqual(LHS.first, RHS.first) &&
+ SecondInfo::isEqual(LHS.second, RHS.second);
+ }
+};
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_DENSE_MAP_INFO_H
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_dl.cpp b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_dl.cpp
new file mode 100644
index 000000000000..e957d529c2fe
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_dl.cpp
@@ -0,0 +1,37 @@
+//===-- sanitizer_dl.cpp --------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file has helper functions that depend on libc's dynamic loading
+// introspection.
+//
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_dl.h"
+
+#include "sanitizer_common/sanitizer_platform.h"
+
+#if SANITIZER_GLIBC
+# include <dlfcn.h>
+#endif
+
+namespace __sanitizer {
+extern const char *SanitizerToolName;
+
+const char *DladdrSelfFName(void) {
+#if SANITIZER_GLIBC
+ Dl_info info;
+ int ret = dladdr((void *)&SanitizerToolName, &info);
+ if (ret) {
+ return info.dli_fname;
+ }
+#endif
+
+ return nullptr;
+}
+
+} // namespace __sanitizer
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_dl.h b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_dl.h
new file mode 100644
index 000000000000..ecde0664eb04
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_dl.h
@@ -0,0 +1,26 @@
+//===-- sanitizer_dl.h ----------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file has helper functions that depend on libc's dynamic loading
+// introspection.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_DL_H
+#define SANITIZER_DL_H
+
+namespace __sanitizer {
+
+// Returns the path to the shared object or - in the case of statically linked
+// sanitizers
+// - the main program itself, that contains the sanitizer.
+const char* DladdrSelfFName(void);
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_DL_H
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_errno.h b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_errno.h
index 70a6e88dbaad..46c85364cef5 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_errno.h
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_errno.h
@@ -21,7 +21,7 @@
#include "sanitizer_errno_codes.h"
#include "sanitizer_platform.h"
-#if SANITIZER_FREEBSD || SANITIZER_MAC
+#if SANITIZER_FREEBSD || SANITIZER_APPLE
# define __errno_location __error
#elif SANITIZER_ANDROID || SANITIZER_NETBSD
# define __errno_location __errno
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_errno_codes.h b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_errno_codes.h
index 192e9392d494..3917b2817f2e 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_errno_codes.h
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_errno_codes.h
@@ -25,6 +25,7 @@ namespace __sanitizer {
#define errno_EBUSY 16
#define errno_EINVAL 22
#define errno_ENAMETOOLONG 36
+#define errno_ENOSYS 38
// Those might not present or their value differ on different platforms.
extern const int errno_EOWNERDEAD;
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_file.cpp b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_file.cpp
index 0b92dccde4a1..7ef499ce07b1 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_file.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_file.cpp
@@ -19,6 +19,7 @@
#include "sanitizer_common.h"
#include "sanitizer_file.h"
+# include "sanitizer_interface_internal.h"
namespace __sanitizer {
@@ -75,6 +76,24 @@ void ReportFile::ReopenIfNecessary() {
fd_pid = pid;
}
+static void RecursiveCreateParentDirs(char *path) {
+ if (path[0] == '\0')
+ return;
+ for (int i = 1; path[i] != '\0'; ++i) {
+ char save = path[i];
+ if (!IsPathSeparator(path[i]))
+ continue;
+ path[i] = '\0';
+ if (!DirExists(path) && !CreateDir(path)) {
+ const char *ErrorMsgPrefix = "ERROR: Can't create directory: ";
+ WriteToFile(kStderrFd, ErrorMsgPrefix, internal_strlen(ErrorMsgPrefix));
+ WriteToFile(kStderrFd, path, internal_strlen(path));
+ Die();
+ }
+ path[i] = save;
+ }
+}
+
void ReportFile::SetReportPath(const char *path) {
if (path) {
uptr len = internal_strlen(path);
@@ -95,6 +114,7 @@ void ReportFile::SetReportPath(const char *path) {
fd = kStdoutFd;
} else {
internal_snprintf(path_prefix, kMaxPathLength, "%s", path);
+ RecursiveCreateParentDirs(path_prefix);
}
}
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_file.h b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_file.h
index 08671ab67d0f..bef2c842d9f2 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_file.h
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_file.h
@@ -15,7 +15,7 @@
#ifndef SANITIZER_FILE_H
#define SANITIZER_FILE_H
-#include "sanitizer_interface_internal.h"
+#include "sanitizer_common.h"
#include "sanitizer_internal_defs.h"
#include "sanitizer_libc.h"
#include "sanitizer_mutex.h"
@@ -78,10 +78,13 @@ bool SupportsColoredOutput(fd_t fd);
// OS
const char *GetPwd();
bool FileExists(const char *filename);
+bool DirExists(const char *path);
char *FindPathToBinary(const char *name);
bool IsPathSeparator(const char c);
bool IsAbsolutePath(const char *path);
-// Starts a subprocess and returs its pid.
+// Returns true on success, false on failure.
+bool CreateDir(const char *pathname);
+// Starts a subprocess and returns its pid.
// If *_fd parameters are not kInvalidFd their corresponding input/output
// streams will be redirect to the file. The files will always be closed
// in parent process even in case of an error.
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_flag_parser.cpp b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_flag_parser.cpp
index 9e274268bf2a..ca37df348580 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_flag_parser.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_flag_parser.cpp
@@ -13,14 +13,12 @@
#include "sanitizer_flag_parser.h"
#include "sanitizer_common.h"
-#include "sanitizer_libc.h"
-#include "sanitizer_flags.h"
#include "sanitizer_flag_parser.h"
+#include "sanitizer_flags.h"
+#include "sanitizer_libc.h"
namespace __sanitizer {
-LowLevelAllocator FlagParser::Alloc;
-
class UnknownFlags {
static const int kMaxUnknownFlags = 20;
const char *unknown_flags_[kMaxUnknownFlags];
@@ -49,7 +47,7 @@ void ReportUnrecognizedFlags() {
char *FlagParser::ll_strndup(const char *s, uptr n) {
uptr len = internal_strnlen(s, n);
- char *s2 = (char*)Alloc.Allocate(len + 1);
+ char *s2 = (char *)GetGlobalLowLevelAllocator().Allocate(len + 1);
internal_memcpy(s2, s, len);
s2[len] = 0;
return s2;
@@ -185,7 +183,8 @@ void FlagParser::RegisterHandler(const char *name, FlagHandlerBase *handler,
}
FlagParser::FlagParser() : n_flags_(0), buf_(nullptr), pos_(0) {
- flags_ = (Flag *)Alloc.Allocate(sizeof(Flag) * kMaxFlags);
+ flags_ =
+ (Flag *)GetGlobalLowLevelAllocator().Allocate(sizeof(Flag) * kMaxFlags);
}
} // namespace __sanitizer
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_flag_parser.h b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_flag_parser.h
index acc71ccd89ee..dccdee4da2bd 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_flag_parser.h
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_flag_parser.h
@@ -13,9 +13,9 @@
#ifndef SANITIZER_FLAG_REGISTRY_H
#define SANITIZER_FLAG_REGISTRY_H
+#include "sanitizer_common.h"
#include "sanitizer_internal_defs.h"
#include "sanitizer_libc.h"
-#include "sanitizer_common.h"
namespace __sanitizer {
@@ -138,7 +138,7 @@ inline bool FlagHandler<uptr>::Parse(const char *value) {
template <>
inline bool FlagHandler<uptr>::Format(char *buffer, uptr size) {
- uptr num_symbols_should_write = internal_snprintf(buffer, size, "%p", *t_);
+ uptr num_symbols_should_write = internal_snprintf(buffer, size, "0x%zx", *t_);
return num_symbols_should_write < size;
}
@@ -178,8 +178,6 @@ class FlagParser {
bool ParseFile(const char *path, bool ignore_missing);
void PrintFlagDescriptions();
- static LowLevelAllocator Alloc;
-
private:
void fatal_error(const char *err);
bool is_space(char c);
@@ -193,7 +191,7 @@ class FlagParser {
template <typename T>
static void RegisterFlag(FlagParser *parser, const char *name, const char *desc,
T *var) {
- FlagHandler<T> *fh = new (FlagParser::Alloc) FlagHandler<T>(var);
+ FlagHandler<T> *fh = new (GetGlobalLowLevelAllocator()) FlagHandler<T>(var);
parser->RegisterHandler(name, fh, desc);
}
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_flags.cpp b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_flags.cpp
index d52e96a7c381..849a122386a4 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_flags.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_flags.cpp
@@ -108,11 +108,11 @@ class FlagHandlerInclude final : public FlagHandlerBase {
};
void RegisterIncludeFlags(FlagParser *parser, CommonFlags *cf) {
- FlagHandlerInclude *fh_include = new (FlagParser::Alloc)
+ FlagHandlerInclude *fh_include = new (GetGlobalLowLevelAllocator())
FlagHandlerInclude(parser, /*ignore_missing*/ false);
parser->RegisterHandler("include", fh_include,
"read more options from the given file");
- FlagHandlerInclude *fh_include_if_exists = new (FlagParser::Alloc)
+ FlagHandlerInclude *fh_include_if_exists = new (GetGlobalLowLevelAllocator())
FlagHandlerInclude(parser, /*ignore_missing*/ true);
parser->RegisterHandler(
"include_if_exists", fh_include_if_exists,
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_flags.inc b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_flags.inc
index 3bc44c6b1eb1..c1e3530618c2 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_flags.inc
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_flags.inc
@@ -62,16 +62,19 @@ COMMON_FLAG(
COMMON_FLAG(const char *, log_suffix, nullptr,
"String to append to log file name, e.g. \".txt\".")
COMMON_FLAG(
- bool, log_to_syslog, (bool)SANITIZER_ANDROID || (bool)SANITIZER_MAC,
+ bool, log_to_syslog, (bool)SANITIZER_ANDROID || (bool)SANITIZER_APPLE,
"Write all sanitizer output to syslog in addition to other means of "
"logging.")
COMMON_FLAG(
int, verbosity, 0,
"Verbosity level (0 - silent, 1 - a bit of output, 2+ - more output).")
-COMMON_FLAG(bool, strip_env, 1,
+COMMON_FLAG(bool, strip_env, true,
"Whether to remove the sanitizer from DYLD_INSERT_LIBRARIES to "
- "avoid passing it to children. Default is true.")
-COMMON_FLAG(bool, detect_leaks, !SANITIZER_MAC, "Enable memory leak detection.")
+ "avoid passing it to children on Apple platforms. Default is true.")
+COMMON_FLAG(bool, verify_interceptors, true,
+ "Verify that interceptors are working on Apple platforms. Default "
+ "is true.")
+COMMON_FLAG(bool, detect_leaks, !SANITIZER_APPLE, "Enable memory leak detection.")
COMMON_FLAG(
bool, leak_check_at_exit, true,
"Invoke leak checking in an atexit handler. Has no effect if "
@@ -160,6 +163,10 @@ COMMON_FLAG(
COMMON_FLAG(const char *, coverage_dir, ".",
"Target directory for coverage dumps. Defaults to the current "
"directory.")
+COMMON_FLAG(const char *, cov_8bit_counters_out, "",
+ "If non-empty, write 8bit counters to this file. ")
+COMMON_FLAG(const char *, cov_pcs_out, "",
+ "If non-empty, write the coverage pc table to this file. ")
COMMON_FLAG(bool, full_address_space, false,
"Sanitize complete address space; "
"by default kernel area on 32-bit platforms will not be sanitized")
@@ -175,6 +182,7 @@ COMMON_FLAG(bool, use_madv_dontdump, true,
"in core file.")
COMMON_FLAG(bool, symbolize_inline_frames, true,
"Print inlined frames in stacktraces. Defaults to true.")
+COMMON_FLAG(bool, demangle, true, "Print demangled symbols.")
COMMON_FLAG(bool, symbolize_vs_style, false,
"Print file locations in Visual Studio style (e.g: "
" file(10,42): ...")
@@ -187,6 +195,8 @@ COMMON_FLAG(const char *, stack_trace_format, "DEFAULT",
"Format string used to render stack frames. "
"See sanitizer_stacktrace_printer.h for the format description. "
"Use DEFAULT to get default format.")
+COMMON_FLAG(int, compress_stack_depot, 0,
+ "Compress stack depot to save memory.")
COMMON_FLAG(bool, no_huge_pages_for_shadow, true,
"If true, the shadow is not allowed to use huge pages. ")
COMMON_FLAG(bool, strict_string_checks, false,
@@ -238,7 +248,7 @@ COMMON_FLAG(bool, decorate_proc_maps, (bool)SANITIZER_ANDROID,
COMMON_FLAG(int, exitcode, 1, "Override the program exit status if the tool "
"found an error")
COMMON_FLAG(
- bool, abort_on_error, (bool)SANITIZER_ANDROID || (bool)SANITIZER_MAC,
+ bool, abort_on_error, (bool)SANITIZER_ANDROID || (bool)SANITIZER_APPLE,
"If set, the tool calls abort() instead of _exit() after printing the "
"error report.")
COMMON_FLAG(bool, suppress_equal_pcs, true,
@@ -259,3 +269,16 @@ COMMON_FLAG(bool, detect_write_exec, false,
COMMON_FLAG(bool, test_only_emulate_no_memorymap, false,
"TEST ONLY fail to read memory mappings to emulate sanitized "
"\"init\"")
+// With static linking, dladdr((void*)pthread_join) or similar will return the
+// path to the main program. This flag will replace dlopen(<main program,...>
+// with dlopen(NULL,...), which is the correct way to get a handle to the main
+// program.
+COMMON_FLAG(bool, test_only_replace_dlopen_main_program, false,
+ "TEST ONLY replace dlopen(<main program>,...) with dlopen(NULL)")
+
+COMMON_FLAG(bool, enable_symbolizer_markup, SANITIZER_FUCHSIA,
+ "Use sanitizer symbolizer markup, available on Linux "
+ "and always set true for Fuchsia.")
+
+COMMON_FLAG(bool, detect_invalid_join, true,
+ "If set, check invalid joins of threads.")
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_flat_map.h b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_flat_map.h
new file mode 100644
index 000000000000..d246781fe1df
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_flat_map.h
@@ -0,0 +1,166 @@
+//===-- sanitizer_flat_map.h ------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Part of the Sanitizer Allocator.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_FLAT_MAP_H
+#define SANITIZER_FLAT_MAP_H
+
+#include "sanitizer_atomic.h"
+#include "sanitizer_common.h"
+#include "sanitizer_internal_defs.h"
+#include "sanitizer_local_address_space_view.h"
+#include "sanitizer_mutex.h"
+
+namespace __sanitizer {
+
+// Maps integers in rage [0, kSize) to values.
+template <typename T, u64 kSize,
+ typename AddressSpaceViewTy = LocalAddressSpaceView>
+class FlatMap {
+ public:
+ using AddressSpaceView = AddressSpaceViewTy;
+ void Init() { internal_memset(map_, 0, sizeof(map_)); }
+
+ constexpr uptr size() const { return kSize; }
+
+ bool contains(uptr idx) const {
+ CHECK_LT(idx, kSize);
+ return true;
+ }
+
+ T &operator[](uptr idx) {
+ DCHECK_LT(idx, kSize);
+ return map_[idx];
+ }
+
+ const T &operator[](uptr idx) const {
+ DCHECK_LT(idx, kSize);
+ return map_[idx];
+ }
+
+ private:
+ T map_[kSize];
+};
+
+// TwoLevelMap maps integers in range [0, kSize1*kSize2) to values.
+// It is implemented as a two-dimensional array: array of kSize1 pointers
+// to kSize2-byte arrays. The secondary arrays are mmaped on demand.
+// Each value is initially zero and can be set to something else only once.
+// Setting and getting values from multiple threads is safe w/o extra locking.
+template <typename T, u64 kSize1, u64 kSize2,
+ typename AddressSpaceViewTy = LocalAddressSpaceView>
+class TwoLevelMap {
+ static_assert(IsPowerOfTwo(kSize2), "Use a power of two for performance.");
+
+ public:
+ using AddressSpaceView = AddressSpaceViewTy;
+ void Init() {
+ mu_.Init();
+ internal_memset(map1_, 0, sizeof(map1_));
+ }
+
+ void TestOnlyUnmap() {
+ for (uptr i = 0; i < kSize1; i++) {
+ T *p = Get(i);
+ if (!p)
+ continue;
+ UnmapOrDie(p, kSize2);
+ }
+ Init();
+ }
+
+ uptr MemoryUsage() const {
+ uptr res = 0;
+ for (uptr i = 0; i < kSize1; i++) {
+ T *p = Get(i);
+ if (!p)
+ continue;
+ res += MmapSize();
+ }
+ return res;
+ }
+
+ constexpr uptr size() const { return kSize1 * kSize2; }
+ constexpr uptr size1() const { return kSize1; }
+ constexpr uptr size2() const { return kSize2; }
+
+ bool contains(uptr idx) const {
+ CHECK_LT(idx, kSize1 * kSize2);
+ return Get(idx / kSize2);
+ }
+
+ const T &operator[](uptr idx) const {
+ DCHECK_LT(idx, kSize1 * kSize2);
+ T *map2 = GetOrCreate(idx / kSize2);
+ return *AddressSpaceView::Load(&map2[idx % kSize2]);
+ }
+
+ T &operator[](uptr idx) {
+ DCHECK_LT(idx, kSize1 * kSize2);
+ T *map2 = GetOrCreate(idx / kSize2);
+ return *AddressSpaceView::LoadWritable(&map2[idx % kSize2]);
+ }
+
+ void Lock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS { mu_.Lock(); }
+
+ void Unlock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS { mu_.Unlock(); }
+
+ private:
+ constexpr uptr MmapSize() const {
+ return RoundUpTo(kSize2 * sizeof(T), GetPageSizeCached());
+ }
+
+ T *Get(uptr idx) const {
+ DCHECK_LT(idx, kSize1);
+ return reinterpret_cast<T *>(
+ atomic_load(&map1_[idx], memory_order_acquire));
+ }
+
+ T *GetOrCreate(uptr idx) const {
+ DCHECK_LT(idx, kSize1);
+ // This code needs to use memory_order_acquire/consume, but we use
+ // memory_order_relaxed for performance reasons (matters for arm64). We
+ // expect memory_order_relaxed to be effectively equivalent to
+ // memory_order_consume in this case for all relevant architectures: all
+ // dependent data is reachable only by dereferencing the resulting pointer.
+ // If relaxed load fails to see stored ptr, the code will fall back to
+ // Create() and reload the value again with locked mutex as a memory
+ // barrier.
+ T *res = reinterpret_cast<T *>(atomic_load_relaxed(&map1_[idx]));
+ if (LIKELY(res))
+ return res;
+ return Create(idx);
+ }
+
+ NOINLINE T *Create(uptr idx) const {
+ SpinMutexLock l(&mu_);
+ T *res = Get(idx);
+ if (!res) {
+ res = reinterpret_cast<T *>(MmapOrDie(MmapSize(), "TwoLevelMap"));
+ atomic_store(&map1_[idx], reinterpret_cast<uptr>(res),
+ memory_order_release);
+ }
+ return res;
+ }
+
+ mutable StaticSpinMutex mu_;
+ mutable atomic_uintptr_t map1_[kSize1];
+};
+
+template <u64 kSize, typename AddressSpaceViewTy = LocalAddressSpaceView>
+using FlatByteMap = FlatMap<u8, kSize, AddressSpaceViewTy>;
+
+template <u64 kSize1, u64 kSize2,
+ typename AddressSpaceViewTy = LocalAddressSpaceView>
+using TwoLevelByteMap = TwoLevelMap<u8, kSize1, kSize2, AddressSpaceViewTy>;
+} // namespace __sanitizer
+
+#endif
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_freebsd.h b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_freebsd.h
deleted file mode 100644
index 82b227eab6da..000000000000
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_freebsd.h
+++ /dev/null
@@ -1,137 +0,0 @@
-//===-- sanitizer_freebsd.h -------------------------------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-//
-// This file is a part of Sanitizer runtime. It contains FreeBSD-specific
-// definitions.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef SANITIZER_FREEBSD_H
-#define SANITIZER_FREEBSD_H
-
-#include "sanitizer_internal_defs.h"
-
-// x86-64 FreeBSD 9.2 and older define 'ucontext_t' incorrectly in
-// 32-bit mode.
-#if SANITIZER_FREEBSD && (SANITIZER_WORDSIZE == 32)
-#include <osreldate.h>
-#if __FreeBSD_version <= 902001 // v9.2
-#include <link.h>
-#include <sys/param.h>
-#include <ucontext.h>
-
-namespace __sanitizer {
-
-typedef unsigned long long __xuint64_t;
-
-typedef __int32_t __xregister_t;
-
-typedef struct __xmcontext {
- __xregister_t mc_onstack;
- __xregister_t mc_gs;
- __xregister_t mc_fs;
- __xregister_t mc_es;
- __xregister_t mc_ds;
- __xregister_t mc_edi;
- __xregister_t mc_esi;
- __xregister_t mc_ebp;
- __xregister_t mc_isp;
- __xregister_t mc_ebx;
- __xregister_t mc_edx;
- __xregister_t mc_ecx;
- __xregister_t mc_eax;
- __xregister_t mc_trapno;
- __xregister_t mc_err;
- __xregister_t mc_eip;
- __xregister_t mc_cs;
- __xregister_t mc_eflags;
- __xregister_t mc_esp;
- __xregister_t mc_ss;
-
- int mc_len;
- int mc_fpformat;
- int mc_ownedfp;
- __xregister_t mc_flags;
-
- int mc_fpstate[128] __aligned(16);
- __xregister_t mc_fsbase;
- __xregister_t mc_gsbase;
- __xregister_t mc_xfpustate;
- __xregister_t mc_xfpustate_len;
-
- int mc_spare2[4];
-} xmcontext_t;
-
-typedef struct __xucontext {
- sigset_t uc_sigmask;
- xmcontext_t uc_mcontext;
-
- struct __ucontext *uc_link;
- stack_t uc_stack;
- int uc_flags;
- int __spare__[4];
-} xucontext_t;
-
-struct xkinfo_vmentry {
- int kve_structsize;
- int kve_type;
- __xuint64_t kve_start;
- __xuint64_t kve_end;
- __xuint64_t kve_offset;
- __xuint64_t kve_vn_fileid;
- __uint32_t kve_vn_fsid;
- int kve_flags;
- int kve_resident;
- int kve_private_resident;
- int kve_protection;
- int kve_ref_count;
- int kve_shadow_count;
- int kve_vn_type;
- __xuint64_t kve_vn_size;
- __uint32_t kve_vn_rdev;
- __uint16_t kve_vn_mode;
- __uint16_t kve_status;
- int _kve_ispare[12];
- char kve_path[PATH_MAX];
-};
-
-typedef struct {
- __uint32_t p_type;
- __uint32_t p_offset;
- __uint32_t p_vaddr;
- __uint32_t p_paddr;
- __uint32_t p_filesz;
- __uint32_t p_memsz;
- __uint32_t p_flags;
- __uint32_t p_align;
-} XElf32_Phdr;
-
-struct xdl_phdr_info {
- Elf_Addr dlpi_addr;
- const char *dlpi_name;
- const XElf32_Phdr *dlpi_phdr;
- Elf_Half dlpi_phnum;
- unsigned long long int dlpi_adds;
- unsigned long long int dlpi_subs;
- size_t dlpi_tls_modid;
- void *dlpi_tls_data;
-};
-
-typedef int (*__xdl_iterate_hdr_callback)(struct xdl_phdr_info *, size_t,
- void *);
-typedef int xdl_iterate_phdr_t(__xdl_iterate_hdr_callback, void *);
-
-#define xdl_iterate_phdr(callback, param) \
- (((xdl_iterate_phdr_t *)dl_iterate_phdr)((callback), (param)))
-
-} // namespace __sanitizer
-
-#endif // __FreeBSD_version <= 902001
-#endif // SANITIZER_FREEBSD && (SANITIZER_WORDSIZE == 32)
-
-#endif // SANITIZER_FREEBSD_H
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_fuchsia.cpp b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_fuchsia.cpp
index 65bc398656c9..5f4f8c8c0078 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_fuchsia.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_fuchsia.cpp
@@ -14,24 +14,25 @@
#include "sanitizer_fuchsia.h"
#if SANITIZER_FUCHSIA
-#include <pthread.h>
-#include <stdlib.h>
-#include <unistd.h>
-#include <zircon/errors.h>
-#include <zircon/process.h>
-#include <zircon/syscalls.h>
-#include <zircon/utc.h>
-
-#include "sanitizer_common.h"
-#include "sanitizer_libc.h"
-#include "sanitizer_mutex.h"
+# include <pthread.h>
+# include <stdlib.h>
+# include <unistd.h>
+# include <zircon/errors.h>
+# include <zircon/process.h>
+# include <zircon/syscalls.h>
+# include <zircon/utc.h>
+
+# include "sanitizer_common.h"
+# include "sanitizer_interface_internal.h"
+# include "sanitizer_libc.h"
+# include "sanitizer_mutex.h"
namespace __sanitizer {
void NORETURN internal__exit(int exitcode) { _zx_process_exit(exitcode); }
uptr internal_sched_yield() {
- zx_status_t status = _zx_nanosleep(0);
+ zx_status_t status = _zx_thread_legacy_yield(0u);
CHECK_EQ(status, ZX_OK);
return 0; // Why doesn't this return void?
}
@@ -86,10 +87,9 @@ void GetThreadStackTopAndBottom(bool, uptr *stack_top, uptr *stack_bottom) {
}
void InitializePlatformEarly() {}
-void MaybeReexec() {}
void CheckASLR() {}
void CheckMPROTECT() {}
-void PlatformPrepareForSandboxing(__sanitizer_sandbox_arguments *args) {}
+void PlatformPrepareForSandboxing(void *args) {}
void DisableCoreDumperIfNecessary() {}
void InstallDeadlySignalHandlers(SignalHandlerType handler) {}
void SetAlternateSignalStack() {}
@@ -112,47 +112,6 @@ void FutexWake(atomic_uint32_t *p, u32 count) {
CHECK_EQ(status, ZX_OK);
}
-enum MutexState : int { MtxUnlocked = 0, MtxLocked = 1, MtxSleeping = 2 };
-
-BlockingMutex::BlockingMutex() {
- // NOTE! It's important that this use internal_memset, because plain
- // memset might be intercepted (e.g., actually be __asan_memset).
- // Defining this so the compiler initializes each field, e.g.:
- // BlockingMutex::BlockingMutex() : BlockingMutex(LINKER_INITIALIZED) {}
- // might result in the compiler generating a call to memset, which would
- // have the same problem.
- internal_memset(this, 0, sizeof(*this));
-}
-
-void BlockingMutex::Lock() {
- CHECK_EQ(owner_, 0);
- atomic_uint32_t *m = reinterpret_cast<atomic_uint32_t *>(&opaque_storage_);
- if (atomic_exchange(m, MtxLocked, memory_order_acquire) == MtxUnlocked)
- return;
- while (atomic_exchange(m, MtxSleeping, memory_order_acquire) != MtxUnlocked) {
- zx_status_t status =
- _zx_futex_wait(reinterpret_cast<zx_futex_t *>(m), MtxSleeping,
- ZX_HANDLE_INVALID, ZX_TIME_INFINITE);
- if (status != ZX_ERR_BAD_STATE) // Normal race.
- CHECK_EQ(status, ZX_OK);
- }
-}
-
-void BlockingMutex::Unlock() {
- atomic_uint32_t *m = reinterpret_cast<atomic_uint32_t *>(&opaque_storage_);
- u32 v = atomic_exchange(m, MtxUnlocked, memory_order_release);
- CHECK_NE(v, MtxUnlocked);
- if (v == MtxSleeping) {
- zx_status_t status = _zx_futex_wake(reinterpret_cast<zx_futex_t *>(m), 1);
- CHECK_EQ(status, ZX_OK);
- }
-}
-
-void BlockingMutex::CheckLocked() const {
- auto m = reinterpret_cast<atomic_uint32_t const *>(&opaque_storage_);
- CHECK_NE(MtxUnlocked, atomic_load(m, memory_order_relaxed));
-}
-
uptr GetPageSize() { return _zx_system_get_page_size(); }
uptr GetMmapGranularity() { return _zx_system_get_page_size(); }
@@ -168,6 +127,62 @@ uptr GetMaxUserVirtualAddress() {
uptr GetMaxVirtualAddress() { return GetMaxUserVirtualAddress(); }
+bool ErrorIsOOM(error_t err) { return err == ZX_ERR_NO_MEMORY; }
+
+// For any sanitizer internal that needs to map something which can be unmapped
+// later, first attempt to map to a pre-allocated VMAR. This helps reduce
+// fragmentation from many small anonymous mmap calls. A good value for this
+// VMAR size would be the total size of your typical sanitizer internal objects
+// allocated in an "average" process lifetime. Examples of this include:
+// FakeStack, LowLevelAllocator mappings, TwoLevelMap, InternalMmapVector,
+// StackStore, CreateAsanThread, etc.
+//
+// This is roughly equal to the total sum of sanitizer internal mappings for a
+// large test case.
+constexpr size_t kSanitizerHeapVmarSize = 13ULL << 20;
+static zx_handle_t gSanitizerHeapVmar = ZX_HANDLE_INVALID;
+
+static zx_status_t GetSanitizerHeapVmar(zx_handle_t *vmar) {
+ zx_status_t status = ZX_OK;
+ if (gSanitizerHeapVmar == ZX_HANDLE_INVALID) {
+ CHECK_EQ(kSanitizerHeapVmarSize % GetPageSizeCached(), 0);
+ uintptr_t base;
+ status = _zx_vmar_allocate(
+ _zx_vmar_root_self(),
+ ZX_VM_CAN_MAP_READ | ZX_VM_CAN_MAP_WRITE | ZX_VM_CAN_MAP_SPECIFIC, 0,
+ kSanitizerHeapVmarSize, &gSanitizerHeapVmar, &base);
+ }
+ *vmar = gSanitizerHeapVmar;
+ if (status == ZX_OK)
+ CHECK_NE(gSanitizerHeapVmar, ZX_HANDLE_INVALID);
+ return status;
+}
+
+static zx_status_t TryVmoMapSanitizerVmar(zx_vm_option_t options,
+ size_t vmar_offset, zx_handle_t vmo,
+ size_t size, uintptr_t *addr,
+ zx_handle_t *vmar_used = nullptr) {
+ zx_handle_t vmar;
+ zx_status_t status = GetSanitizerHeapVmar(&vmar);
+ if (status != ZX_OK)
+ return status;
+
+ status = _zx_vmar_map(gSanitizerHeapVmar, options, vmar_offset, vmo,
+ /*vmo_offset=*/0, size, addr);
+ if (vmar_used)
+ *vmar_used = gSanitizerHeapVmar;
+ if (status == ZX_ERR_NO_RESOURCES || status == ZX_ERR_INVALID_ARGS) {
+ // This means there's no space in the heap VMAR, so fallback to the root
+ // VMAR.
+ status = _zx_vmar_map(_zx_vmar_root_self(), options, vmar_offset, vmo,
+ /*vmo_offset=*/0, size, addr);
+ if (vmar_used)
+ *vmar_used = _zx_vmar_root_self();
+ }
+
+ return status;
+}
+
static void *DoAnonymousMmapOrDie(uptr size, const char *mem_type,
bool raw_report, bool die_for_nomem) {
size = RoundUpTo(size, GetPageSize());
@@ -183,11 +198,9 @@ static void *DoAnonymousMmapOrDie(uptr size, const char *mem_type,
_zx_object_set_property(vmo, ZX_PROP_NAME, mem_type,
internal_strlen(mem_type));
- // TODO(mcgrathr): Maybe allocate a VMAR for all sanitizer heap and use that?
uintptr_t addr;
- status =
- _zx_vmar_map(_zx_vmar_root_self(), ZX_VM_PERM_READ | ZX_VM_PERM_WRITE, 0,
- vmo, 0, size, &addr);
+ status = TryVmoMapSanitizerVmar(ZX_VM_PERM_READ | ZX_VM_PERM_WRITE,
+ /*vmar_offset=*/0, vmo, size, &addr);
_zx_handle_close(vmo);
if (status != ZX_OK) {
@@ -265,13 +278,14 @@ static uptr DoMmapFixedOrDie(zx_handle_t vmar, uptr fixed_addr, uptr map_size,
uptr ReservedAddressRange::Map(uptr fixed_addr, uptr map_size,
const char *name) {
- return DoMmapFixedOrDie(os_handle_, fixed_addr, map_size, base_, name_,
- false);
+ return DoMmapFixedOrDie(os_handle_, fixed_addr, map_size, base_,
+ name ? name : name_, false);
}
uptr ReservedAddressRange::MapOrDie(uptr fixed_addr, uptr map_size,
const char *name) {
- return DoMmapFixedOrDie(os_handle_, fixed_addr, map_size, base_, name_, true);
+ return DoMmapFixedOrDie(os_handle_, fixed_addr, map_size, base_,
+ name ? name : name_, true);
}
void UnmapOrDieVmar(void *addr, uptr size, zx_handle_t target_vmar) {
@@ -281,6 +295,12 @@ void UnmapOrDieVmar(void *addr, uptr size, zx_handle_t target_vmar) {
zx_status_t status =
_zx_vmar_unmap(target_vmar, reinterpret_cast<uintptr_t>(addr), size);
+ if (status == ZX_ERR_INVALID_ARGS && target_vmar == gSanitizerHeapVmar) {
+ // If there wasn't any space in the heap vmar, the fallback was the root
+ // vmar.
+ status = _zx_vmar_unmap(_zx_vmar_root_self(),
+ reinterpret_cast<uintptr_t>(addr), size);
+ }
if (status != ZX_OK) {
Report("ERROR: %s failed to deallocate 0x%zx (%zd) bytes at address %p\n",
SanitizerToolName, size, size, addr);
@@ -315,6 +335,21 @@ void *MmapFixedNoAccess(uptr fixed_addr, uptr size, const char *name) {
UNIMPLEMENTED();
}
+bool MprotectNoAccess(uptr addr, uptr size) {
+ return _zx_vmar_protect(_zx_vmar_root_self(), 0, addr, size) == ZX_OK;
+}
+
+bool MprotectReadOnly(uptr addr, uptr size) {
+ return _zx_vmar_protect(_zx_vmar_root_self(), ZX_VM_PERM_READ, addr, size) ==
+ ZX_OK;
+}
+
+bool MprotectReadWrite(uptr addr, uptr size) {
+ return _zx_vmar_protect(_zx_vmar_root_self(),
+ ZX_VM_PERM_READ | ZX_VM_PERM_WRITE, addr,
+ size) == ZX_OK;
+}
+
void *MmapAlignedOrDieOnFatalError(uptr size, uptr alignment,
const char *mem_type) {
CHECK_GE(size, GetPageSize());
@@ -331,17 +366,16 @@ void *MmapAlignedOrDieOnFatalError(uptr size, uptr alignment,
_zx_object_set_property(vmo, ZX_PROP_NAME, mem_type,
internal_strlen(mem_type));
- // TODO(mcgrathr): Maybe allocate a VMAR for all sanitizer heap and use that?
-
// Map a larger size to get a chunk of address space big enough that
// it surely contains an aligned region of the requested size. Then
// overwrite the aligned middle portion with a mapping from the
// beginning of the VMO, and unmap the excess before and after.
size_t map_size = size + alignment;
uintptr_t addr;
- status =
- _zx_vmar_map(_zx_vmar_root_self(), ZX_VM_PERM_READ | ZX_VM_PERM_WRITE, 0,
- vmo, 0, map_size, &addr);
+ zx_handle_t vmar_used;
+ status = TryVmoMapSanitizerVmar(ZX_VM_PERM_READ | ZX_VM_PERM_WRITE,
+ /*vmar_offset=*/0, vmo, map_size, &addr,
+ &vmar_used);
if (status == ZX_OK) {
uintptr_t map_addr = addr;
uintptr_t map_end = map_addr + map_size;
@@ -349,12 +383,12 @@ void *MmapAlignedOrDieOnFatalError(uptr size, uptr alignment,
uintptr_t end = addr + size;
if (addr != map_addr) {
zx_info_vmar_t info;
- status = _zx_object_get_info(_zx_vmar_root_self(), ZX_INFO_VMAR, &info,
- sizeof(info), NULL, NULL);
+ status = _zx_object_get_info(vmar_used, ZX_INFO_VMAR, &info, sizeof(info),
+ NULL, NULL);
if (status == ZX_OK) {
uintptr_t new_addr;
status = _zx_vmar_map(
- _zx_vmar_root_self(),
+ vmar_used,
ZX_VM_PERM_READ | ZX_VM_PERM_WRITE | ZX_VM_SPECIFIC_OVERWRITE,
addr - info.base, vmo, 0, size, &new_addr);
if (status == ZX_OK)
@@ -362,9 +396,9 @@ void *MmapAlignedOrDieOnFatalError(uptr size, uptr alignment,
}
}
if (status == ZX_OK && addr != map_addr)
- status = _zx_vmar_unmap(_zx_vmar_root_self(), map_addr, addr - map_addr);
+ status = _zx_vmar_unmap(vmar_used, map_addr, addr - map_addr);
if (status == ZX_OK && end != map_end)
- status = _zx_vmar_unmap(_zx_vmar_root_self(), end, map_end - end);
+ status = _zx_vmar_unmap(vmar_used, end, map_end - end);
}
_zx_handle_close(vmo);
@@ -380,7 +414,7 @@ void *MmapAlignedOrDieOnFatalError(uptr size, uptr alignment,
}
void UnmapOrDie(void *addr, uptr size) {
- UnmapOrDieVmar(addr, size, _zx_vmar_root_self());
+ UnmapOrDieVmar(addr, size, gSanitizerHeapVmar);
}
void ReleaseMemoryPagesToOS(uptr beg, uptr end) {
@@ -413,33 +447,12 @@ bool IsAccessibleMemoryRange(uptr beg, uptr size) {
}
// FIXME implement on this platform.
-void GetMemoryProfile(fill_profile_f cb, uptr *stats, uptr stats_size) {}
+void GetMemoryProfile(fill_profile_f cb, uptr *stats) {}
bool ReadFileToBuffer(const char *file_name, char **buff, uptr *buff_size,
uptr *read_len, uptr max_len, error_t *errno_p) {
- zx_handle_t vmo;
- zx_status_t status = __sanitizer_get_configuration(file_name, &vmo);
- if (status == ZX_OK) {
- uint64_t vmo_size;
- status = _zx_vmo_get_size(vmo, &vmo_size);
- if (status == ZX_OK) {
- if (vmo_size < max_len)
- max_len = vmo_size;
- size_t map_size = RoundUpTo(max_len, GetPageSize());
- uintptr_t addr;
- status = _zx_vmar_map(_zx_vmar_root_self(), ZX_VM_PERM_READ, 0, vmo, 0,
- map_size, &addr);
- if (status == ZX_OK) {
- *buff = reinterpret_cast<char *>(addr);
- *buff_size = map_size;
- *read_len = max_len;
- }
- }
- _zx_handle_close(vmo);
- }
- if (status != ZX_OK && errno_p)
- *errno_p = status;
- return status == ZX_OK;
+ *errno_p = ZX_ERR_NOT_SUPPORTED;
+ return false;
}
void RawWrite(const char *buffer) {
@@ -516,6 +529,9 @@ u32 GetNumberOfCPUs() { return zx_system_get_num_cpus(); }
uptr GetRSS() { UNIMPLEMENTED(); }
+void *internal_start_thread(void *(*func)(void *arg), void *arg) { return 0; }
+void internal_join_thread(void *th) {}
+
void InitializePlatformCommonFlags(CommonFlags *cf) {}
} // namespace __sanitizer
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_hash.h b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_hash.h
index 3d97dcc5d280..f7cf9f234e6f 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_hash.h
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_hash.h
@@ -38,6 +38,30 @@ class MurMur2HashBuilder {
return x;
}
};
+
+class MurMur2Hash64Builder {
+ static const u64 m = 0xc6a4a7935bd1e995ull;
+ static const u64 seed = 0x9747b28c9747b28cull;
+ static const u64 r = 47;
+ u64 h;
+
+ public:
+ explicit MurMur2Hash64Builder(u64 init = 0) { h = seed ^ (init * m); }
+ void add(u64 k) {
+ k *= m;
+ k ^= k >> r;
+ k *= m;
+ h ^= k;
+ h *= m;
+ }
+ u64 get() {
+ u64 x = h;
+ x ^= x >> r;
+ x *= m;
+ x ^= x >> r;
+ return x;
+ }
+};
} //namespace __sanitizer
#endif // SANITIZER_HASH_H
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_interceptors_ioctl_netbsd.inc b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_interceptors_ioctl_netbsd.inc
index 576807ea3a6a..16b2a10d8b06 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_interceptors_ioctl_netbsd.inc
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_interceptors_ioctl_netbsd.inc
@@ -1267,8 +1267,6 @@ static void ioctl_table_fill() {
_(TIOCGFLAGS, WRITE, sizeof(int));
_(TIOCSFLAGS, READ, sizeof(int));
_(TIOCDCDTIMESTAMP, WRITE, struct_timeval_sz);
- _(TIOCRCVFRAME, READ, sizeof(uptr));
- _(TIOCXMTFRAME, READ, sizeof(uptr));
_(TIOCPTMGET, WRITE, struct_ptmget_sz);
_(TIOCGRANTPT, NONE, 0);
_(TIOCPTSNAME, WRITE, struct_ptmget_sz);
@@ -1406,7 +1404,7 @@ static void ioctl_table_fill() {
_(URIO_SEND_COMMAND, READWRITE, struct_urio_command_sz);
_(URIO_RECV_COMMAND, READWRITE, struct_urio_command_sz);
#undef _
-} // NOLINT
+}
static bool ioctl_initialized = false;
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_interface_internal.h b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_interface_internal.h
index 0b001c1c4830..cd0d45e2f3fa 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_interface_internal.h
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_interface_internal.h
@@ -20,103 +20,134 @@
#include "sanitizer_internal_defs.h"
extern "C" {
- // Tell the tools to write their reports to "path.<pid>" instead of stderr.
- // The special values are "stdout" and "stderr".
- SANITIZER_INTERFACE_ATTRIBUTE
- void __sanitizer_set_report_path(const char *path);
- // Tell the tools to write their reports to the provided file descriptor
- // (casted to void *).
- SANITIZER_INTERFACE_ATTRIBUTE
- void __sanitizer_set_report_fd(void *fd);
- // Get the current full report file path, if a path was specified by
- // an earlier call to __sanitizer_set_report_path. Returns null otherwise.
- SANITIZER_INTERFACE_ATTRIBUTE
- const char *__sanitizer_get_report_path();
+// Tell the tools to write their reports to "path.<pid>" instead of stderr.
+// The special values are "stdout" and "stderr".
+SANITIZER_INTERFACE_ATTRIBUTE
+void __sanitizer_set_report_path(const char *path);
+// Tell the tools to write their reports to the provided file descriptor
+// (casted to void *).
+SANITIZER_INTERFACE_ATTRIBUTE
+void __sanitizer_set_report_fd(void *fd);
+// Get the current full report file path, if a path was specified by
+// an earlier call to __sanitizer_set_report_path. Returns null otherwise.
+SANITIZER_INTERFACE_ATTRIBUTE
+const char *__sanitizer_get_report_path();
- typedef struct {
- int coverage_sandboxed;
- __sanitizer::sptr coverage_fd;
- unsigned int coverage_max_block_size;
- } __sanitizer_sandbox_arguments;
+typedef struct {
+ int coverage_sandboxed;
+ __sanitizer::sptr coverage_fd;
+ unsigned int coverage_max_block_size;
+} __sanitizer_sandbox_arguments;
- // Notify the tools that the sandbox is going to be turned on.
- SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
- __sanitizer_sandbox_on_notify(__sanitizer_sandbox_arguments *args);
+// Notify the tools that the sandbox is going to be turned on.
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
+__sanitizer_sandbox_on_notify(__sanitizer_sandbox_arguments *args);
- // This function is called by the tool when it has just finished reporting
- // an error. 'error_summary' is a one-line string that summarizes
- // the error message. This function can be overridden by the client.
- SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
- void __sanitizer_report_error_summary(const char *error_summary);
+// This function is called by the tool when it has just finished reporting
+// an error. 'error_summary' is a one-line string that summarizes
+// the error message. This function can be overridden by the client.
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
+__sanitizer_report_error_summary(const char *error_summary);
- SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov_dump();
- SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_dump_coverage(
- const __sanitizer::uptr *pcs, const __sanitizer::uptr len);
- SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_dump_trace_pc_guard_coverage();
+SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov_dump();
+SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_dump_coverage(
+ const __sanitizer::uptr *pcs, const __sanitizer::uptr len);
+SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_dump_trace_pc_guard_coverage();
- SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov(__sanitizer::u32 *guard);
+SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov(__sanitizer::u32 *guard);
- // Returns 1 on the first call, then returns 0 thereafter. Called by the tool
- // to ensure only one report is printed when multiple errors occur
- // simultaneously.
- SANITIZER_INTERFACE_ATTRIBUTE int __sanitizer_acquire_crash_state();
+// Returns 1 on the first call, then returns 0 thereafter. Called by the tool
+// to ensure only one report is printed when multiple errors occur
+// simultaneously.
+SANITIZER_INTERFACE_ATTRIBUTE int __sanitizer_acquire_crash_state();
- SANITIZER_INTERFACE_ATTRIBUTE
- void __sanitizer_annotate_contiguous_container(const void *beg,
- const void *end,
- const void *old_mid,
- const void *new_mid);
- SANITIZER_INTERFACE_ATTRIBUTE
- int __sanitizer_verify_contiguous_container(const void *beg, const void *mid,
- const void *end);
- SANITIZER_INTERFACE_ATTRIBUTE
- const void *__sanitizer_contiguous_container_find_bad_address(
- const void *beg, const void *mid, const void *end);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __sanitizer_annotate_contiguous_container(const void *beg, const void *end,
+ const void *old_mid,
+ const void *new_mid);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __sanitizer_annotate_double_ended_contiguous_container(
+ const void *storage_beg, const void *storage_end,
+ const void *old_container_beg, const void *old_container_end,
+ const void *new_container_beg, const void *new_container_end);
+SANITIZER_INTERFACE_ATTRIBUTE
+int __sanitizer_verify_contiguous_container(const void *beg, const void *mid,
+ const void *end);
+SANITIZER_INTERFACE_ATTRIBUTE
+int __sanitizer_verify_double_ended_contiguous_container(
+ const void *storage_beg, const void *container_beg,
+ const void *container_end, const void *storage_end);
+SANITIZER_INTERFACE_ATTRIBUTE
+const void *__sanitizer_contiguous_container_find_bad_address(const void *beg,
+ const void *mid,
+ const void *end);
+SANITIZER_INTERFACE_ATTRIBUTE
+const void *__sanitizer_double_ended_contiguous_container_find_bad_address(
+ const void *storage_beg, const void *container_beg,
+ const void *container_end, const void *storage_end);
- SANITIZER_INTERFACE_ATTRIBUTE
- int __sanitizer_get_module_and_offset_for_pc(
- __sanitizer::uptr pc, char *module_path,
- __sanitizer::uptr module_path_len, __sanitizer::uptr *pc_offset);
-
- SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
- void __sanitizer_cov_trace_cmp();
- SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
- void __sanitizer_cov_trace_cmp1();
- SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
- void __sanitizer_cov_trace_cmp2();
- SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
- void __sanitizer_cov_trace_cmp4();
- SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
- void __sanitizer_cov_trace_cmp8();
- SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
- void __sanitizer_cov_trace_const_cmp1();
- SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
- void __sanitizer_cov_trace_const_cmp2();
- SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
- void __sanitizer_cov_trace_const_cmp4();
- SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
- void __sanitizer_cov_trace_const_cmp8();
- SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
- void __sanitizer_cov_trace_switch();
- SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
- void __sanitizer_cov_trace_div4();
- SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
- void __sanitizer_cov_trace_div8();
- SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
- void __sanitizer_cov_trace_gep();
- SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
- void __sanitizer_cov_trace_pc_indir();
- SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
- void __sanitizer_cov_trace_pc_guard(__sanitizer::u32*);
- SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
- void __sanitizer_cov_trace_pc_guard_init(__sanitizer::u32*,
- __sanitizer::u32*);
- SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
- void __sanitizer_cov_8bit_counters_init();
- SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
- __sanitizer_cov_bool_flag_init();
- SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
- __sanitizer_cov_pcs_init();
-} // extern "C"
+SANITIZER_INTERFACE_ATTRIBUTE
+int __sanitizer_get_module_and_offset_for_pc(void *pc, char *module_path,
+ __sanitizer::uptr module_path_len,
+ void **pc_offset);
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
+__sanitizer_cov_trace_cmp();
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
+__sanitizer_cov_trace_cmp1();
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
+__sanitizer_cov_trace_cmp2();
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
+__sanitizer_cov_trace_cmp4();
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
+__sanitizer_cov_trace_cmp8();
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
+__sanitizer_cov_trace_const_cmp1();
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
+__sanitizer_cov_trace_const_cmp2();
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
+__sanitizer_cov_trace_const_cmp4();
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
+__sanitizer_cov_trace_const_cmp8();
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
+__sanitizer_cov_trace_switch();
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
+__sanitizer_cov_trace_div4();
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
+__sanitizer_cov_trace_div8();
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
+__sanitizer_cov_trace_gep();
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
+__sanitizer_cov_trace_pc_indir();
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
+__sanitizer_cov_load1();
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
+__sanitizer_cov_load2();
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
+__sanitizer_cov_load4();
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
+__sanitizer_cov_load8();
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
+__sanitizer_cov_load16();
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
+__sanitizer_cov_store1();
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
+__sanitizer_cov_store2();
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
+__sanitizer_cov_store4();
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
+__sanitizer_cov_store8();
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
+__sanitizer_cov_store16();
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
+__sanitizer_cov_trace_pc_guard(__sanitizer::u32 *);
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
+__sanitizer_cov_trace_pc_guard_init(__sanitizer::u32 *, __sanitizer::u32 *);
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
+__sanitizer_cov_8bit_counters_init(char *, char *);
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
+__sanitizer_cov_bool_flag_init();
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
+__sanitizer_cov_pcs_init(const __sanitizer::uptr *, const __sanitizer::uptr *);
+} // extern "C"
#endif // SANITIZER_INTERFACE_INTERNAL_H
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_internal_defs.h b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_internal_defs.h
index 84053fec2649..992721757e88 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_internal_defs.h
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_internal_defs.h
@@ -13,6 +13,12 @@
#define SANITIZER_DEFS_H
#include "sanitizer_platform.h"
+#include "sanitizer_redefine_builtins.h"
+
+// GCC does not understand __has_feature.
+#if !defined(__has_feature)
+#define __has_feature(x) 0
+#endif
#ifndef SANITIZER_DEBUG
# define SANITIZER_DEBUG 0
@@ -29,22 +35,20 @@
# define SANITIZER_INTERFACE_ATTRIBUTE __declspec(dllexport)
#endif
# define SANITIZER_WEAK_ATTRIBUTE
+# define SANITIZER_WEAK_IMPORT
#elif SANITIZER_GO
# define SANITIZER_INTERFACE_ATTRIBUTE
# define SANITIZER_WEAK_ATTRIBUTE
+# define SANITIZER_WEAK_IMPORT
#else
# define SANITIZER_INTERFACE_ATTRIBUTE __attribute__((visibility("default")))
# define SANITIZER_WEAK_ATTRIBUTE __attribute__((weak))
-#endif
-
-// TLS is handled differently on different platforms
-#if SANITIZER_LINUX || SANITIZER_NETBSD || \
- SANITIZER_FREEBSD
-# define SANITIZER_TLS_INITIAL_EXEC_ATTRIBUTE \
- __attribute__((tls_model("initial-exec"))) thread_local
-#else
-# define SANITIZER_TLS_INITIAL_EXEC_ATTRIBUTE
-#endif
+# if SANITIZER_APPLE
+# define SANITIZER_WEAK_IMPORT extern "C" __attribute((weak_import))
+# else
+# define SANITIZER_WEAK_IMPORT extern "C" SANITIZER_WEAK_ATTRIBUTE
+# endif // SANITIZER_APPLE
+#endif // SANITIZER_WINDOWS
//--------------------------- WEAK FUNCTIONS ---------------------------------//
// When working with weak functions, to simplify the code and make it more
@@ -73,7 +77,7 @@
// Before Xcode 4.5, the Darwin linker doesn't reliably support undefined
// weak symbols. Mac OS X 10.9/Darwin 13 is the first release only supported
// by Xcode >= 4.5.
-#elif SANITIZER_MAC && \
+#elif SANITIZER_APPLE && \
__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ >= 1090 && !SANITIZER_GO
# define SANITIZER_SUPPORTS_WEAK_HOOKS 1
#else
@@ -125,6 +129,10 @@
# define __has_attribute(x) 0
#endif
+#if !defined(__has_cpp_attribute)
+# define __has_cpp_attribute(x) 0
+#endif
+
// For portability reasons we do not include stddef.h, stdint.h or any other
// system header, but we do need some basic types that are not defined
// in a portable way by the language itself.
@@ -135,8 +143,13 @@ namespace __sanitizer {
typedef unsigned long long uptr;
typedef signed long long sptr;
#else
+# if (SANITIZER_WORDSIZE == 64) || SANITIZER_APPLE || SANITIZER_WINDOWS
typedef unsigned long uptr;
typedef signed long sptr;
+# else
+typedef unsigned int uptr;
+typedef signed int sptr;
+# endif
#endif // defined(_WIN64)
#if defined(__x86_64__)
// Since x32 uses ILP32 data model in 64-bit hardware mode, we must use
@@ -168,17 +181,17 @@ typedef long pid_t;
typedef int pid_t;
#endif
-#if SANITIZER_FREEBSD || SANITIZER_NETBSD || \
- SANITIZER_MAC || \
+#if SANITIZER_FREEBSD || SANITIZER_NETBSD || SANITIZER_APPLE || \
(SANITIZER_SOLARIS && (defined(_LP64) || _FILE_OFFSET_BITS == 64)) || \
- (SANITIZER_LINUX && defined(__x86_64__))
+ (SANITIZER_LINUX && !SANITIZER_GLIBC && !SANITIZER_ANDROID) || \
+ (SANITIZER_LINUX && (defined(__x86_64__) || defined(__hexagon__)))
typedef u64 OFF_T;
#else
typedef uptr OFF_T;
#endif
typedef u64 OFF64_T;
-#if (SANITIZER_WORDSIZE == 64) || SANITIZER_MAC
+#if (SANITIZER_WORDSIZE == 64) || SANITIZER_APPLE
typedef uptr operator_new_size_type;
#else
# if defined(__s390__) && !defined(__s390x__)
@@ -217,7 +230,7 @@ typedef u64 tid_t;
# define WARN_UNUSED_RESULT
#else // _MSC_VER
# define ALWAYS_INLINE inline __attribute__((always_inline))
-# define ALIAS(x) __attribute__((alias(x)))
+# define ALIAS(x) __attribute__((alias(SANITIZER_STRINGIFY(x))))
// Please only use the ALIGNED macro before the type.
// Using ALIGNED after the variable declaration is not portable!
# define ALIGNED(x) __attribute__((aligned(x)))
@@ -250,6 +263,20 @@ typedef u64 tid_t;
# define NOEXCEPT throw()
#endif
+#if __has_cpp_attribute(clang::fallthrough)
+# define FALLTHROUGH [[clang::fallthrough]]
+#elif __has_cpp_attribute(fallthrough)
+# define FALLTHROUGH [[fallthrough]]
+#else
+# define FALLTHROUGH
+#endif
+
+#if __has_attribute(uninitialized)
+# define UNINITIALIZED __attribute__((uninitialized))
+#else
+# define UNINITIALIZED
+#endif
+
// Unaligned versions of basic types.
typedef ALIGNED(1) u16 uu16;
typedef ALIGNED(1) u32 uu32;
@@ -277,14 +304,17 @@ void NORETURN CheckFailed(const char *file, int line, const char *cond,
u64 v1, u64 v2);
// Check macro
-#define RAW_CHECK_MSG(expr, msg) do { \
- if (UNLIKELY(!(expr))) { \
- RawWrite(msg); \
- Die(); \
- } \
-} while (0)
+#define RAW_CHECK_MSG(expr, msg, ...) \
+ do { \
+ if (UNLIKELY(!(expr))) { \
+ const char* msgs[] = {msg, __VA_ARGS__}; \
+ for (const char* m : msgs) RawWrite(m); \
+ Die(); \
+ } \
+ } while (0)
-#define RAW_CHECK(expr) RAW_CHECK_MSG(expr, #expr)
+#define RAW_CHECK(expr) RAW_CHECK_MSG(expr, #expr "\n", )
+#define RAW_CHECK_VA(expr, ...) RAW_CHECK_MSG(expr, #expr "\n", __VA_ARGS__)
#define CHECK_IMPL(c1, op, c2) \
do { \
@@ -366,13 +396,10 @@ void NORETURN CheckFailed(const char *file, int line, const char *cond,
enum LinkerInitialized { LINKER_INITIALIZED = 0 };
#if !defined(_MSC_VER) || defined(__clang__)
-#if SANITIZER_S390_31
-#define GET_CALLER_PC() \
- (__sanitizer::uptr) __builtin_extract_return_addr(__builtin_return_address(0))
-#else
-#define GET_CALLER_PC() (__sanitizer::uptr) __builtin_return_address(0)
-#endif
-#define GET_CURRENT_FRAME() (__sanitizer::uptr) __builtin_frame_address(0)
+# define GET_CALLER_PC() \
+ ((__sanitizer::uptr)__builtin_extract_return_addr( \
+ __builtin_return_address(0)))
+# define GET_CURRENT_FRAME() ((__sanitizer::uptr)__builtin_frame_address(0))
inline void Trap() {
__builtin_trap();
}
@@ -381,13 +408,13 @@ extern "C" void* _ReturnAddress(void);
extern "C" void* _AddressOfReturnAddress(void);
# pragma intrinsic(_ReturnAddress)
# pragma intrinsic(_AddressOfReturnAddress)
-#define GET_CALLER_PC() (__sanitizer::uptr) _ReturnAddress()
+# define GET_CALLER_PC() ((__sanitizer::uptr)_ReturnAddress())
// CaptureStackBackTrace doesn't need to know BP on Windows.
-#define GET_CURRENT_FRAME() \
- (((__sanitizer::uptr)_AddressOfReturnAddress()) + sizeof(__sanitizer::uptr))
+# define GET_CURRENT_FRAME() \
+ (((__sanitizer::uptr)_AddressOfReturnAddress()) + sizeof(__sanitizer::uptr))
extern "C" void __ud2(void);
-# pragma intrinsic(__ud2)
+# pragma intrinsic(__ud2)
inline void Trap() {
__ud2();
}
@@ -409,8 +436,14 @@ inline void Trap() {
(void)enable_fp; \
} while (0)
-constexpr u32 kInvalidTid = -1;
-constexpr u32 kMainTid = 0;
+// Internal thread identifier allocated by ThreadRegistry.
+typedef u32 Tid;
+constexpr Tid kInvalidTid = -1;
+constexpr Tid kMainTid = 0;
+
+// Stack depot stack identifier.
+typedef u32 StackID;
+const StackID kInvalidStackID = 0;
} // namespace __sanitizer
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_leb128.h b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_leb128.h
new file mode 100644
index 000000000000..553550d29552
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_leb128.h
@@ -0,0 +1,87 @@
+//===-- sanitizer_leb128.h --------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_LEB128_H
+#define SANITIZER_LEB128_H
+
+#include "sanitizer_common.h"
+#include "sanitizer_internal_defs.h"
+
+namespace __sanitizer {
+
+template <typename T, typename It>
+It EncodeSLEB128(T value, It begin, It end) {
+ bool more;
+ do {
+ u8 byte = value & 0x7f;
+ // NOTE: this assumes that this signed shift is an arithmetic right shift.
+ value >>= 7;
+ more = !((((value == 0) && ((byte & 0x40) == 0)) ||
+ ((value == -1) && ((byte & 0x40) != 0))));
+ if (more)
+ byte |= 0x80;
+ if (UNLIKELY(begin == end))
+ break;
+ *(begin++) = byte;
+ } while (more);
+ return begin;
+}
+
+template <typename T, typename It>
+It DecodeSLEB128(It begin, It end, T* v) {
+ T value = 0;
+ unsigned shift = 0;
+ u8 byte;
+ do {
+ if (UNLIKELY(begin == end))
+ return begin;
+ byte = *(begin++);
+ T slice = byte & 0x7f;
+ value |= slice << shift;
+ shift += 7;
+ } while (byte >= 128);
+ if (shift < 64 && (byte & 0x40))
+ value |= (-1ULL) << shift;
+ *v = value;
+ return begin;
+}
+
+template <typename T, typename It>
+It EncodeULEB128(T value, It begin, It end) {
+ do {
+ u8 byte = value & 0x7f;
+ value >>= 7;
+ if (value)
+ byte |= 0x80;
+ if (UNLIKELY(begin == end))
+ break;
+ *(begin++) = byte;
+ } while (value);
+ return begin;
+}
+
+template <typename T, typename It>
+It DecodeULEB128(It begin, It end, T* v) {
+ T value = 0;
+ unsigned shift = 0;
+ u8 byte;
+ do {
+ if (UNLIKELY(begin == end))
+ return begin;
+ byte = *(begin++);
+ T slice = byte & 0x7f;
+ value += slice << shift;
+ shift += 7;
+ } while (byte >= 128);
+ *v = value;
+ return begin;
+}
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_LEB128_H
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_libc.cpp b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_libc.cpp
index 4bc04b486870..9318066afed2 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_libc.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_libc.cpp
@@ -10,6 +10,9 @@
// run-time libraries. See sanitizer_libc.h for details.
//===----------------------------------------------------------------------===//
+// Do not redefine builtins; this file is defining the builtin replacements.
+#define SANITIZER_COMMON_NO_REDEFINE_BUILTINS
+
#include "sanitizer_allocator_internal.h"
#include "sanitizer_common.h"
#include "sanitizer_libc.h"
@@ -46,7 +49,10 @@ int internal_memcmp(const void* s1, const void* s2, uptr n) {
return 0;
}
-void *internal_memcpy(void *dest, const void *src, uptr n) {
+extern "C" {
+SANITIZER_INTERFACE_ATTRIBUTE void *__sanitizer_internal_memcpy(void *dest,
+ const void *src,
+ uptr n) {
char *d = (char*)dest;
const char *s = (const char *)src;
for (uptr i = 0; i < n; ++i)
@@ -54,7 +60,8 @@ void *internal_memcpy(void *dest, const void *src, uptr n) {
return dest;
}
-void *internal_memmove(void *dest, const void *src, uptr n) {
+SANITIZER_INTERFACE_ATTRIBUTE void *__sanitizer_internal_memmove(
+ void *dest, const void *src, uptr n) {
char *d = (char*)dest;
const char *s = (const char *)src;
sptr i, signed_n = (sptr)n;
@@ -72,7 +79,8 @@ void *internal_memmove(void *dest, const void *src, uptr n) {
return dest;
}
-void *internal_memset(void* s, int c, uptr n) {
+SANITIZER_INTERFACE_ATTRIBUTE void *__sanitizer_internal_memset(void *s, int c,
+ uptr n) {
// Optimize for the most performance-critical case:
if ((reinterpret_cast<uptr>(s) % 16) == 0 && (n % 16) == 0) {
u64 *p = reinterpret_cast<u64*>(s);
@@ -95,6 +103,7 @@ void *internal_memset(void* s, int c, uptr n) {
}
return s;
}
+} // extern "C"
uptr internal_strcspn(const char *s, const char *reject) {
uptr i;
@@ -190,6 +199,14 @@ char *internal_strncat(char *dst, const char *src, uptr n) {
return dst;
}
+wchar_t *internal_wcscpy(wchar_t *dst, const wchar_t *src) {
+ wchar_t *dst_it = dst;
+ do {
+ *dst_it++ = *src++;
+ } while (*src);
+ return dst;
+}
+
uptr internal_strlcpy(char *dst, const char *src, uptr maxlen) {
const uptr srclen = internal_strlen(src);
if (srclen < maxlen) {
@@ -209,6 +226,14 @@ char *internal_strncpy(char *dst, const char *src, uptr n) {
return dst;
}
+wchar_t *internal_wcsncpy(wchar_t *dst, const wchar_t *src, uptr n) {
+ uptr i;
+ for (i = 0; i < n && src[i]; ++i)
+ dst[i] = src[i];
+ internal_memset(dst + i, 0, (n - i) * sizeof(wchar_t));
+ return dst;
+}
+
uptr internal_strnlen(const char *s, uptr maxlen) {
uptr i = 0;
while (i < maxlen && s[i]) i++;
@@ -258,6 +283,18 @@ s64 internal_simple_strtoll(const char *nptr, const char **endptr, int base) {
}
}
+uptr internal_wcslen(const wchar_t *s) {
+ uptr i = 0;
+ while (s[i]) i++;
+ return i;
+}
+
+uptr internal_wcsnlen(const wchar_t *s, uptr maxlen) {
+ uptr i = 0;
+ while (i < maxlen && s[i]) i++;
+ return i;
+}
+
bool mem_is_zero(const char *beg, uptr size) {
CHECK_LE(size, 1ULL << FIRST_32_SECOND_64(30, 40)); // Sanity check.
const char *end = beg + size;
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_libc.h b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_libc.h
index bcb81ebbc803..1906569e2a5f 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_libc.h
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_libc.h
@@ -24,15 +24,33 @@ namespace __sanitizer {
// internal_X() is a custom implementation of X() for use in RTL.
+extern "C" {
+// These are used as builtin replacements; see sanitizer_redefine_builtins.h.
+// In normal runtime code, use the __sanitizer::internal_X() aliases instead.
+SANITIZER_INTERFACE_ATTRIBUTE void *__sanitizer_internal_memcpy(void *dest,
+ const void *src,
+ uptr n);
+SANITIZER_INTERFACE_ATTRIBUTE void *__sanitizer_internal_memmove(
+ void *dest, const void *src, uptr n);
+SANITIZER_INTERFACE_ATTRIBUTE void *__sanitizer_internal_memset(void *s, int c,
+ uptr n);
+} // extern "C"
+
// String functions
s64 internal_atoll(const char *nptr);
void *internal_memchr(const void *s, int c, uptr n);
void *internal_memrchr(const void *s, int c, uptr n);
int internal_memcmp(const void* s1, const void* s2, uptr n);
-void *internal_memcpy(void *dest, const void *src, uptr n);
-void *internal_memmove(void *dest, const void *src, uptr n);
+ALWAYS_INLINE void *internal_memcpy(void *dest, const void *src, uptr n) {
+ return __sanitizer_internal_memcpy(dest, src, n);
+}
+ALWAYS_INLINE void *internal_memmove(void *dest, const void *src, uptr n) {
+ return __sanitizer_internal_memmove(dest, src, n);
+}
// Should not be used in performance-critical places.
-void *internal_memset(void *s, int c, uptr n);
+ALWAYS_INLINE void *internal_memset(void *s, int c, uptr n) {
+ return __sanitizer_internal_memset(s, c, n);
+}
char* internal_strchr(const char *s, int c);
char *internal_strchrnul(const char *s, int c);
int internal_strcmp(const char *s1, const char *s2);
@@ -49,8 +67,12 @@ char *internal_strrchr(const char *s, int c);
char *internal_strstr(const char *haystack, const char *needle);
// Works only for base=10 and doesn't set errno.
s64 internal_simple_strtoll(const char *nptr, const char **endptr, int base);
-int internal_snprintf(char *buffer, uptr length, const char *format, ...);
-
+int internal_snprintf(char *buffer, uptr length, const char *format, ...)
+ FORMAT(3, 4);
+uptr internal_wcslen(const wchar_t *s);
+uptr internal_wcsnlen(const wchar_t *s, uptr maxlen);
+wchar_t *internal_wcscpy(wchar_t *dst, const wchar_t *src);
+wchar_t *internal_wcsncpy(wchar_t *dst, const wchar_t *src, uptr maxlen);
// Return true if all bytes in [mem, mem+size) are zero.
// Optimized for the case when the result is true.
bool mem_is_zero(const char *mem, uptr size);
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_libignore.cpp b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_libignore.cpp
index a65d3d896e33..b7fc9444cc66 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_libignore.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_libignore.cpp
@@ -8,7 +8,7 @@
#include "sanitizer_platform.h"
-#if SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_MAC || \
+#if SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_APPLE || \
SANITIZER_NETBSD
#include "sanitizer_libignore.h"
@@ -22,9 +22,9 @@ LibIgnore::LibIgnore(LinkerInitialized) {
}
void LibIgnore::AddIgnoredLibrary(const char *name_templ) {
- BlockingMutexLock lock(&mutex_);
+ Lock lock(&mutex_);
if (count_ >= kMaxLibs) {
- Report("%s: too many ignored libraries (max: %d)\n", SanitizerToolName,
+ Report("%s: too many ignored libraries (max: %zu)\n", SanitizerToolName,
kMaxLibs);
Die();
}
@@ -36,7 +36,7 @@ void LibIgnore::AddIgnoredLibrary(const char *name_templ) {
}
void LibIgnore::OnLibraryLoaded(const char *name) {
- BlockingMutexLock lock(&mutex_);
+ Lock lock(&mutex_);
// Try to match suppressions with symlink target.
InternalMmapVector<char> buf(kMaxPathLength);
if (name && internal_readlink(name, buf.data(), buf.size() - 1) > 0 &&
@@ -105,7 +105,7 @@ void LibIgnore::OnLibraryLoaded(const char *name) {
continue;
if (IsPcInstrumented(range.beg) && IsPcInstrumented(range.end - 1))
continue;
- VReport(1, "Adding instrumented range %p-%p from library '%s'\n",
+ VReport(1, "Adding instrumented range 0x%zx-0x%zx from library '%s'\n",
range.beg, range.end, mod.full_name());
const uptr idx =
atomic_load(&instrumented_ranges_count_, memory_order_relaxed);
@@ -125,5 +125,5 @@ void LibIgnore::OnLibraryUnloaded() {
} // namespace __sanitizer
-#endif // SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_MAC ||
+#endif // SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_APPLE ||
// SANITIZER_NETBSD
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_libignore.h b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_libignore.h
index 256f685979f4..18e4d83ed77f 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_libignore.h
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_libignore.h
@@ -77,7 +77,7 @@ class LibIgnore {
LibCodeRange instrumented_code_ranges_[kMaxInstrumentedRanges];
// Cold part:
- BlockingMutex mutex_;
+ Mutex mutex_;
uptr count_;
Lib libs_[kMaxLibs];
bool track_instrumented_libs_;
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_linux.cpp b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_linux.cpp
index 09b3f31831df..5d2dd3a7a658 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_linux.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_linux.cpp
@@ -16,97 +16,100 @@
#if SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD || \
SANITIZER_SOLARIS
-#include "sanitizer_common.h"
-#include "sanitizer_flags.h"
-#include "sanitizer_getauxval.h"
-#include "sanitizer_internal_defs.h"
-#include "sanitizer_libc.h"
-#include "sanitizer_linux.h"
-#include "sanitizer_mutex.h"
-#include "sanitizer_placement_new.h"
-#include "sanitizer_procmaps.h"
-
-#if SANITIZER_LINUX && !SANITIZER_GO
-#include <asm/param.h>
-#endif
+# include "sanitizer_common.h"
+# include "sanitizer_flags.h"
+# include "sanitizer_getauxval.h"
+# include "sanitizer_internal_defs.h"
+# include "sanitizer_libc.h"
+# include "sanitizer_linux.h"
+# include "sanitizer_mutex.h"
+# include "sanitizer_placement_new.h"
+# include "sanitizer_procmaps.h"
+
+# if SANITIZER_LINUX && !SANITIZER_GO
+# include <asm/param.h>
+# endif
// For mips64, syscall(__NR_stat) fills the buffer in the 'struct kernel_stat'
// format. Struct kernel_stat is defined as 'struct stat' in asm/stat.h. To
// access stat from asm/stat.h, without conflicting with definition in
// sys/stat.h, we use this trick.
-#if defined(__mips64)
-#include <asm/unistd.h>
-#include <sys/types.h>
-#define stat kernel_stat
-#if SANITIZER_GO
-#undef st_atime
-#undef st_mtime
-#undef st_ctime
-#define st_atime st_atim
-#define st_mtime st_mtim
-#define st_ctime st_ctim
-#endif
-#include <asm/stat.h>
-#undef stat
-#endif
-
-#include <dlfcn.h>
-#include <errno.h>
-#include <fcntl.h>
-#include <link.h>
-#include <pthread.h>
-#include <sched.h>
-#include <signal.h>
-#include <sys/mman.h>
-#include <sys/param.h>
-#if !SANITIZER_SOLARIS
-#include <sys/ptrace.h>
-#endif
-#include <sys/resource.h>
-#include <sys/stat.h>
-#include <sys/syscall.h>
-#include <sys/time.h>
-#include <sys/types.h>
-#include <ucontext.h>
-#include <unistd.h>
-
-#if SANITIZER_LINUX
-#include <sys/utsname.h>
-#endif
-
-#if SANITIZER_LINUX && !SANITIZER_ANDROID
-#include <sys/personality.h>
-#endif
-
-#if SANITIZER_FREEBSD
-#include <sys/exec.h>
-#include <sys/procctl.h>
-#include <sys/sysctl.h>
-#include <machine/atomic.h>
+# if SANITIZER_MIPS64
+# include <asm/unistd.h>
+# include <sys/types.h>
+# define stat kernel_stat
+# if SANITIZER_GO
+# undef st_atime
+# undef st_mtime
+# undef st_ctime
+# define st_atime st_atim
+# define st_mtime st_mtim
+# define st_ctime st_ctim
+# endif
+# include <asm/stat.h>
+# undef stat
+# endif
+
+# include <dlfcn.h>
+# include <errno.h>
+# include <fcntl.h>
+# include <link.h>
+# include <pthread.h>
+# include <sched.h>
+# include <signal.h>
+# include <sys/mman.h>
+# if !SANITIZER_SOLARIS
+# include <sys/ptrace.h>
+# endif
+# include <sys/resource.h>
+# include <sys/stat.h>
+# include <sys/syscall.h>
+# include <sys/time.h>
+# include <sys/types.h>
+# include <ucontext.h>
+# include <unistd.h>
+
+# if SANITIZER_LINUX
+# include <sys/utsname.h>
+# endif
+
+# if SANITIZER_LINUX && !SANITIZER_ANDROID
+# include <sys/personality.h>
+# endif
+
+# if SANITIZER_LINUX && defined(__loongarch__)
+# include <sys/sysmacros.h>
+# endif
+
+# if SANITIZER_FREEBSD
+# include <machine/atomic.h>
+# include <sys/exec.h>
+# include <sys/procctl.h>
+# include <sys/sysctl.h>
extern "C" {
// <sys/umtx.h> must be included after <errno.h> and <sys/types.h> on
// FreeBSD 9.2 and 10.0.
-#include <sys/umtx.h>
+# include <sys/umtx.h>
}
-#include <sys/thr.h>
-#endif // SANITIZER_FREEBSD
+# include <sys/thr.h>
+# endif // SANITIZER_FREEBSD
-#if SANITIZER_NETBSD
-#include <limits.h> // For NAME_MAX
-#include <sys/sysctl.h>
-#include <sys/exec.h>
+# if SANITIZER_NETBSD
+# include <limits.h> // For NAME_MAX
+# include <sys/exec.h>
+# include <sys/sysctl.h>
extern struct ps_strings *__ps_strings;
-#endif // SANITIZER_NETBSD
+# endif // SANITIZER_NETBSD
-#if SANITIZER_SOLARIS
-#include <stdlib.h>
-#include <thread.h>
-#define environ _environ
-#endif
+# if SANITIZER_SOLARIS
+# include <stdlib.h>
+# include <thread.h>
+# define environ _environ
+# endif
extern char **environ;
-#if SANITIZER_LINUX
+# if SANITIZER_LINUX
// <linux/time.h>
struct kernel_timeval {
long tv_sec;
@@ -119,78 +122,109 @@ const int FUTEX_WAKE = 1;
const int FUTEX_PRIVATE_FLAG = 128;
const int FUTEX_WAIT_PRIVATE = FUTEX_WAIT | FUTEX_PRIVATE_FLAG;
const int FUTEX_WAKE_PRIVATE = FUTEX_WAKE | FUTEX_PRIVATE_FLAG;
-#endif // SANITIZER_LINUX
+# endif // SANITIZER_LINUX
// Are we using 32-bit or 64-bit Linux syscalls?
// x32 (which defines __x86_64__) has SANITIZER_WORDSIZE == 32
// but it still needs to use 64-bit syscalls.
-#if SANITIZER_LINUX && (defined(__x86_64__) || defined(__powerpc64__) || \
- SANITIZER_WORDSIZE == 64)
-# define SANITIZER_LINUX_USES_64BIT_SYSCALLS 1
-#else
-# define SANITIZER_LINUX_USES_64BIT_SYSCALLS 0
-#endif
+# if SANITIZER_LINUX && (defined(__x86_64__) || defined(__powerpc64__) || \
+ SANITIZER_WORDSIZE == 64 || \
+ (defined(__mips__) && _MIPS_SIM == _ABIN32))
+# define SANITIZER_LINUX_USES_64BIT_SYSCALLS 1
+# else
+# define SANITIZER_LINUX_USES_64BIT_SYSCALLS 0
+# endif
+
+// Note : FreeBSD implemented both Linux and OpenBSD apis.
+# if SANITIZER_LINUX && defined(__NR_getrandom)
+# if !defined(GRND_NONBLOCK)
+# define GRND_NONBLOCK 1
+# endif
+# define SANITIZER_USE_GETRANDOM 1
+# else
+# define SANITIZER_USE_GETRANDOM 0
+# endif // SANITIZER_LINUX && defined(__NR_getrandom)
-// Note : FreeBSD had implemented both
-// Linux apis, available from
-// future 12.x version most likely
-#if SANITIZER_LINUX && defined(__NR_getrandom)
-# if !defined(GRND_NONBLOCK)
-# define GRND_NONBLOCK 1
-# endif
-# define SANITIZER_USE_GETRANDOM 1
-#else
-# define SANITIZER_USE_GETRANDOM 0
-#endif // SANITIZER_LINUX && defined(__NR_getrandom)
-
-#if SANITIZER_FREEBSD && __FreeBSD_version >= 1200000
-# define SANITIZER_USE_GETENTROPY 1
-#else
-# define SANITIZER_USE_GETENTROPY 0
-#endif
+# if SANITIZER_FREEBSD
+# define SANITIZER_USE_GETENTROPY 1
+# endif
namespace __sanitizer {
-#if SANITIZER_LINUX && defined(__x86_64__)
-#include "sanitizer_syscall_linux_x86_64.inc"
-#elif SANITIZER_LINUX && SANITIZER_RISCV64
-#include "sanitizer_syscall_linux_riscv64.inc"
-#elif SANITIZER_LINUX && defined(__aarch64__)
-#include "sanitizer_syscall_linux_aarch64.inc"
-#elif SANITIZER_LINUX && defined(__arm__)
-#include "sanitizer_syscall_linux_arm.inc"
-#else
-#include "sanitizer_syscall_generic.inc"
-#endif
+void SetSigProcMask(__sanitizer_sigset_t *set, __sanitizer_sigset_t *oldset) {
+ CHECK_EQ(0, internal_sigprocmask(SIG_SETMASK, set, oldset));
+}
+
+void BlockSignals(__sanitizer_sigset_t *oldset) {
+ __sanitizer_sigset_t set;
+ internal_sigfillset(&set);
+# if SANITIZER_LINUX && !SANITIZER_ANDROID
+ // Glibc uses SIGSETXID signal during setuid call. If this signal is blocked
+ // on any thread, setuid call hangs.
+ // See test/sanitizer_common/TestCases/Linux/setuid.c.
+ internal_sigdelset(&set, 33);
+# endif
+# if SANITIZER_LINUX
+ // Seccomp-BPF-sandboxed processes rely on SIGSYS to handle trapped syscalls.
+ // If this signal is blocked, such calls cannot be handled and the process may
+ // hang.
+ internal_sigdelset(&set, 31);
+# endif
+ SetSigProcMask(&set, oldset);
+}
+
+ScopedBlockSignals::ScopedBlockSignals(__sanitizer_sigset_t *copy) {
+ BlockSignals(&saved_);
+ if (copy)
+ internal_memcpy(copy, &saved_, sizeof(saved_));
+}
+
+ScopedBlockSignals::~ScopedBlockSignals() { SetSigProcMask(&saved_, nullptr); }
+
+# if SANITIZER_LINUX && defined(__x86_64__)
+# include "sanitizer_syscall_linux_x86_64.inc"
+# elif SANITIZER_LINUX && SANITIZER_RISCV64
+# include "sanitizer_syscall_linux_riscv64.inc"
+# elif SANITIZER_LINUX && defined(__aarch64__)
+# include "sanitizer_syscall_linux_aarch64.inc"
+# elif SANITIZER_LINUX && defined(__arm__)
+# include "sanitizer_syscall_linux_arm.inc"
+# elif SANITIZER_LINUX && defined(__hexagon__)
+# include "sanitizer_syscall_linux_hexagon.inc"
+# elif SANITIZER_LINUX && SANITIZER_LOONGARCH64
+# include "sanitizer_syscall_linux_loongarch64.inc"
+# else
+# include "sanitizer_syscall_generic.inc"
+# endif
// --------------- sanitizer_libc.h
-#if !SANITIZER_SOLARIS && !SANITIZER_NETBSD
-#if !SANITIZER_S390
+# if !SANITIZER_SOLARIS && !SANITIZER_NETBSD
+# if !SANITIZER_S390
uptr internal_mmap(void *addr, uptr length, int prot, int flags, int fd,
u64 offset) {
-#if SANITIZER_FREEBSD || SANITIZER_LINUX_USES_64BIT_SYSCALLS
+# if SANITIZER_FREEBSD || SANITIZER_LINUX_USES_64BIT_SYSCALLS
return internal_syscall(SYSCALL(mmap), (uptr)addr, length, prot, flags, fd,
offset);
-#else
+# else
// mmap2 specifies file offset in 4096-byte units.
CHECK(IsAligned(offset, 4096));
return internal_syscall(SYSCALL(mmap2), addr, length, prot, flags, fd,
offset / 4096);
-#endif
+# endif
}
-#endif // !SANITIZER_S390
+# endif // !SANITIZER_S390
uptr internal_munmap(void *addr, uptr length) {
return internal_syscall(SYSCALL(munmap), (uptr)addr, length);
}
-#if SANITIZER_LINUX
+# if SANITIZER_LINUX
uptr internal_mremap(void *old_address, uptr old_size, uptr new_size, int flags,
void *new_address) {
return internal_syscall(SYSCALL(mremap), (uptr)old_address, old_size,
new_size, flags, (uptr)new_address);
}
-#endif
+# endif
int internal_mprotect(void *addr, uptr length, int prot) {
return internal_syscall(SYSCALL(mprotect), (uptr)addr, length, prot);
@@ -200,25 +234,23 @@ int internal_madvise(uptr addr, uptr length, int advice) {
return internal_syscall(SYSCALL(madvise), addr, length, advice);
}
-uptr internal_close(fd_t fd) {
- return internal_syscall(SYSCALL(close), fd);
-}
+uptr internal_close(fd_t fd) { return internal_syscall(SYSCALL(close), fd); }
uptr internal_open(const char *filename, int flags) {
-#if SANITIZER_USES_CANONICAL_LINUX_SYSCALLS
+# if SANITIZER_LINUX
return internal_syscall(SYSCALL(openat), AT_FDCWD, (uptr)filename, flags);
-#else
+# else
return internal_syscall(SYSCALL(open), (uptr)filename, flags);
-#endif
+# endif
}
uptr internal_open(const char *filename, int flags, u32 mode) {
-#if SANITIZER_USES_CANONICAL_LINUX_SYSCALLS
+# if SANITIZER_LINUX
return internal_syscall(SYSCALL(openat), AT_FDCWD, (uptr)filename, flags,
mode);
-#else
+# else
return internal_syscall(SYSCALL(open), (uptr)filename, flags, mode);
-#endif
+# endif
}
uptr internal_read(fd_t fd, void *buf, uptr count) {
@@ -237,12 +269,13 @@ uptr internal_write(fd_t fd, const void *buf, uptr count) {
uptr internal_ftruncate(fd_t fd, uptr size) {
sptr res;
- HANDLE_EINTR(res, (sptr)internal_syscall(SYSCALL(ftruncate), fd,
- (OFF_T)size));
+ HANDLE_EINTR(res,
+ (sptr)internal_syscall(SYSCALL(ftruncate), fd, (OFF_T)size));
return res;
}
-#if !SANITIZER_LINUX_USES_64BIT_SYSCALLS && SANITIZER_LINUX
+# if (!SANITIZER_LINUX_USES_64BIT_SYSCALLS || SANITIZER_SPARC) && \
+ SANITIZER_LINUX
static void stat64_to_stat(struct stat64 *in, struct stat *out) {
internal_memset(out, 0, sizeof(*out));
out->st_dev = in->st_dev;
@@ -259,26 +292,48 @@ static void stat64_to_stat(struct stat64 *in, struct stat *out) {
out->st_mtime = in->st_mtime;
out->st_ctime = in->st_ctime;
}
-#endif
+# endif
+
+# if SANITIZER_LINUX && defined(__loongarch__)
+static void statx_to_stat(struct statx *in, struct stat *out) {
+ internal_memset(out, 0, sizeof(*out));
+ out->st_dev = makedev(in->stx_dev_major, in->stx_dev_minor);
+ out->st_ino = in->stx_ino;
+ out->st_mode = in->stx_mode;
+ out->st_nlink = in->stx_nlink;
+ out->st_uid = in->stx_uid;
+ out->st_gid = in->stx_gid;
+ out->st_rdev = makedev(in->stx_rdev_major, in->stx_rdev_minor);
+ out->st_size = in->stx_size;
+ out->st_blksize = in->stx_blksize;
+ out->st_blocks = in->stx_blocks;
+ out->st_atime = in->stx_atime.tv_sec;
+ out->st_atim.tv_nsec = in->stx_atime.tv_nsec;
+ out->st_mtime = in->stx_mtime.tv_sec;
+ out->st_mtim.tv_nsec = in->stx_mtime.tv_nsec;
+ out->st_ctime = in->stx_ctime.tv_sec;
+ out->st_ctim.tv_nsec = in->stx_ctime.tv_nsec;
+}
+# endif
-#if defined(__mips64)
+# if SANITIZER_MIPS64
// Undefine compatibility macros from <sys/stat.h>
// so that they would not clash with the kernel_stat
// st_[a|m|c]time fields
-#if !SANITIZER_GO
-#undef st_atime
-#undef st_mtime
-#undef st_ctime
-#endif
-#if defined(SANITIZER_ANDROID)
+# if !SANITIZER_GO
+# undef st_atime
+# undef st_mtime
+# undef st_ctime
+# endif
+# if defined(SANITIZER_ANDROID)
// Bionic sys/stat.h defines additional macros
// for compatibility with the old NDKs and
// they clash with the kernel_stat structure
// st_[a|m|c]time_nsec fields.
-#undef st_atime_nsec
-#undef st_mtime_nsec
-#undef st_ctime_nsec
-#endif
+# undef st_atime_nsec
+# undef st_mtime_nsec
+# undef st_ctime_nsec
+# endif
static void kernel_stat_to_stat(struct kernel_stat *in, struct stat *out) {
internal_memset(out, 0, sizeof(*out));
out->st_dev = in->st_dev;
@@ -291,92 +346,110 @@ static void kernel_stat_to_stat(struct kernel_stat *in, struct stat *out) {
out->st_size = in->st_size;
out->st_blksize = in->st_blksize;
out->st_blocks = in->st_blocks;
-#if defined(__USE_MISC) || \
- defined(__USE_XOPEN2K8) || \
- defined(SANITIZER_ANDROID)
+# if defined(__USE_MISC) || defined(__USE_XOPEN2K8) || \
+ defined(SANITIZER_ANDROID)
out->st_atim.tv_sec = in->st_atime;
out->st_atim.tv_nsec = in->st_atime_nsec;
out->st_mtim.tv_sec = in->st_mtime;
out->st_mtim.tv_nsec = in->st_mtime_nsec;
out->st_ctim.tv_sec = in->st_ctime;
out->st_ctim.tv_nsec = in->st_ctime_nsec;
-#else
+# else
out->st_atime = in->st_atime;
out->st_atimensec = in->st_atime_nsec;
out->st_mtime = in->st_mtime;
out->st_mtimensec = in->st_mtime_nsec;
out->st_ctime = in->st_ctime;
out->st_atimensec = in->st_ctime_nsec;
-#endif
+# endif
}
-#endif
+# endif
uptr internal_stat(const char *path, void *buf) {
-#if SANITIZER_FREEBSD
+# if SANITIZER_FREEBSD
return internal_syscall(SYSCALL(fstatat), AT_FDCWD, (uptr)path, (uptr)buf, 0);
-#elif SANITIZER_USES_CANONICAL_LINUX_SYSCALLS
+# elif SANITIZER_LINUX
+# if defined(__loongarch__)
+ struct statx bufx;
+ int res = internal_syscall(SYSCALL(statx), AT_FDCWD, (uptr)path,
+ AT_NO_AUTOMOUNT, STATX_BASIC_STATS, (uptr)&bufx);
+ statx_to_stat(&bufx, (struct stat *)buf);
+ return res;
+# elif (SANITIZER_WORDSIZE == 64 || SANITIZER_X32 || \
+ (defined(__mips__) && _MIPS_SIM == _ABIN32)) && \
+ !SANITIZER_SPARC
return internal_syscall(SYSCALL(newfstatat), AT_FDCWD, (uptr)path, (uptr)buf,
0);
-#elif SANITIZER_LINUX_USES_64BIT_SYSCALLS
-# if defined(__mips64)
- // For mips64, stat syscall fills buffer in the format of kernel_stat
- struct kernel_stat kbuf;
- int res = internal_syscall(SYSCALL(stat), path, &kbuf);
- kernel_stat_to_stat(&kbuf, (struct stat *)buf);
+# else
+ struct stat64 buf64;
+ int res = internal_syscall(SYSCALL(fstatat64), AT_FDCWD, (uptr)path,
+ (uptr)&buf64, 0);
+ stat64_to_stat(&buf64, (struct stat *)buf);
return res;
-# else
- return internal_syscall(SYSCALL(stat), (uptr)path, (uptr)buf);
-# endif
-#else
+# endif
+# else
struct stat64 buf64;
int res = internal_syscall(SYSCALL(stat64), path, &buf64);
stat64_to_stat(&buf64, (struct stat *)buf);
return res;
-#endif
+# endif
}
uptr internal_lstat(const char *path, void *buf) {
-#if SANITIZER_FREEBSD
+# if SANITIZER_FREEBSD
return internal_syscall(SYSCALL(fstatat), AT_FDCWD, (uptr)path, (uptr)buf,
AT_SYMLINK_NOFOLLOW);
-#elif SANITIZER_USES_CANONICAL_LINUX_SYSCALLS
+# elif SANITIZER_LINUX
+# if defined(__loongarch__)
+ struct statx bufx;
+ int res = internal_syscall(SYSCALL(statx), AT_FDCWD, (uptr)path,
+ AT_SYMLINK_NOFOLLOW | AT_NO_AUTOMOUNT,
+ STATX_BASIC_STATS, (uptr)&bufx);
+ statx_to_stat(&bufx, (struct stat *)buf);
+ return res;
+# elif (defined(_LP64) || SANITIZER_X32 || \
+ (defined(__mips__) && _MIPS_SIM == _ABIN32)) && \
+ !SANITIZER_SPARC
return internal_syscall(SYSCALL(newfstatat), AT_FDCWD, (uptr)path, (uptr)buf,
AT_SYMLINK_NOFOLLOW);
-#elif SANITIZER_LINUX_USES_64BIT_SYSCALLS
-# if SANITIZER_MIPS64
- // For mips64, lstat syscall fills buffer in the format of kernel_stat
- struct kernel_stat kbuf;
- int res = internal_syscall(SYSCALL(lstat), path, &kbuf);
- kernel_stat_to_stat(&kbuf, (struct stat *)buf);
+# else
+ struct stat64 buf64;
+ int res = internal_syscall(SYSCALL(fstatat64), AT_FDCWD, (uptr)path,
+ (uptr)&buf64, AT_SYMLINK_NOFOLLOW);
+ stat64_to_stat(&buf64, (struct stat *)buf);
return res;
-# else
- return internal_syscall(SYSCALL(lstat), (uptr)path, (uptr)buf);
-# endif
-#else
+# endif
+# else
struct stat64 buf64;
int res = internal_syscall(SYSCALL(lstat64), path, &buf64);
stat64_to_stat(&buf64, (struct stat *)buf);
return res;
-#endif
+# endif
}
uptr internal_fstat(fd_t fd, void *buf) {
-#if SANITIZER_FREEBSD || SANITIZER_LINUX_USES_64BIT_SYSCALLS
-#if SANITIZER_MIPS64
+# if SANITIZER_FREEBSD || SANITIZER_LINUX_USES_64BIT_SYSCALLS
+# if SANITIZER_MIPS64
// For mips64, fstat syscall fills buffer in the format of kernel_stat
struct kernel_stat kbuf;
int res = internal_syscall(SYSCALL(fstat), fd, &kbuf);
kernel_stat_to_stat(&kbuf, (struct stat *)buf);
return res;
-# else
+# elif SANITIZER_LINUX && defined(__loongarch__)
+ struct statx bufx;
+ int res = internal_syscall(SYSCALL(statx), fd, "", AT_EMPTY_PATH,
+ STATX_BASIC_STATS, (uptr)&bufx);
+ statx_to_stat(&bufx, (struct stat *)buf);
+ return res;
+# else
return internal_syscall(SYSCALL(fstat), fd, (uptr)buf);
-# endif
-#else
+# endif
+# else
struct stat64 buf64;
int res = internal_syscall(SYSCALL(fstat64), fd, &buf64);
stat64_to_stat(&buf64, (struct stat *)buf);
return res;
-#endif
+# endif
}
uptr internal_filesize(fd_t fd) {
@@ -386,50 +459,46 @@ uptr internal_filesize(fd_t fd) {
return (uptr)st.st_size;
}
-uptr internal_dup(int oldfd) {
- return internal_syscall(SYSCALL(dup), oldfd);
-}
+uptr internal_dup(int oldfd) { return internal_syscall(SYSCALL(dup), oldfd); }
uptr internal_dup2(int oldfd, int newfd) {
-#if SANITIZER_USES_CANONICAL_LINUX_SYSCALLS
+# if SANITIZER_LINUX
return internal_syscall(SYSCALL(dup3), oldfd, newfd, 0);
-#else
+# else
return internal_syscall(SYSCALL(dup2), oldfd, newfd);
-#endif
+# endif
}
uptr internal_readlink(const char *path, char *buf, uptr bufsize) {
-#if SANITIZER_USES_CANONICAL_LINUX_SYSCALLS
+# if SANITIZER_LINUX
return internal_syscall(SYSCALL(readlinkat), AT_FDCWD, (uptr)path, (uptr)buf,
bufsize);
-#else
+# else
return internal_syscall(SYSCALL(readlink), (uptr)path, (uptr)buf, bufsize);
-#endif
+# endif
}
uptr internal_unlink(const char *path) {
-#if SANITIZER_USES_CANONICAL_LINUX_SYSCALLS
+# if SANITIZER_LINUX
return internal_syscall(SYSCALL(unlinkat), AT_FDCWD, (uptr)path, 0);
-#else
+# else
return internal_syscall(SYSCALL(unlink), (uptr)path);
-#endif
+# endif
}
uptr internal_rename(const char *oldpath, const char *newpath) {
-#if defined(__riscv)
+# if (defined(__riscv) || defined(__loongarch__)) && defined(__linux__)
return internal_syscall(SYSCALL(renameat2), AT_FDCWD, (uptr)oldpath, AT_FDCWD,
(uptr)newpath, 0);
-#elif SANITIZER_USES_CANONICAL_LINUX_SYSCALLS
+# elif SANITIZER_LINUX
return internal_syscall(SYSCALL(renameat), AT_FDCWD, (uptr)oldpath, AT_FDCWD,
(uptr)newpath);
-#else
+# else
return internal_syscall(SYSCALL(rename), (uptr)oldpath, (uptr)newpath);
-#endif
+# endif
}
-uptr internal_sched_yield() {
- return internal_syscall(SYSCALL(sched_yield));
-}
+uptr internal_sched_yield() { return internal_syscall(SYSCALL(sched_yield)); }
void internal_usleep(u64 useconds) {
struct timespec ts;
@@ -443,60 +512,63 @@ uptr internal_execve(const char *filename, char *const argv[],
return internal_syscall(SYSCALL(execve), (uptr)filename, (uptr)argv,
(uptr)envp);
}
-#endif // !SANITIZER_SOLARIS && !SANITIZER_NETBSD
+# endif // !SANITIZER_SOLARIS && !SANITIZER_NETBSD
-#if !SANITIZER_NETBSD
+# if !SANITIZER_NETBSD
void internal__exit(int exitcode) {
-#if SANITIZER_FREEBSD || SANITIZER_SOLARIS
+# if SANITIZER_FREEBSD || SANITIZER_SOLARIS
internal_syscall(SYSCALL(exit), exitcode);
-#else
+# else
internal_syscall(SYSCALL(exit_group), exitcode);
-#endif
+# endif
Die(); // Unreachable.
}
-#endif // !SANITIZER_NETBSD
+# endif // !SANITIZER_NETBSD
// ----------------- sanitizer_common.h
bool FileExists(const char *filename) {
if (ShouldMockFailureToOpen(filename))
return false;
struct stat st;
-#if SANITIZER_USES_CANONICAL_LINUX_SYSCALLS
- if (internal_syscall(SYSCALL(newfstatat), AT_FDCWD, filename, &st, 0))
-#else
if (internal_stat(filename, &st))
-#endif
return false;
// Sanity check: filename is a regular file.
return S_ISREG(st.st_mode);
}
-#if !SANITIZER_NETBSD
+bool DirExists(const char *path) {
+ struct stat st;
+ if (internal_stat(path, &st))
+ return false;
+ return S_ISDIR(st.st_mode);
+}
+
+# if !SANITIZER_NETBSD
tid_t GetTid() {
-#if SANITIZER_FREEBSD
+# if SANITIZER_FREEBSD
long Tid;
thr_self(&Tid);
return Tid;
-#elif SANITIZER_SOLARIS
+# elif SANITIZER_SOLARIS
return thr_self();
-#else
+# else
return internal_syscall(SYSCALL(gettid));
-#endif
+# endif
}
int TgKill(pid_t pid, tid_t tid, int sig) {
-#if SANITIZER_LINUX
+# if SANITIZER_LINUX
return internal_syscall(SYSCALL(tgkill), pid, tid, sig);
-#elif SANITIZER_FREEBSD
+# elif SANITIZER_FREEBSD
return internal_syscall(SYSCALL(thr_kill2), pid, tid, sig);
-#elif SANITIZER_SOLARIS
+# elif SANITIZER_SOLARIS
(void)pid;
return thr_kill(tid, sig);
-#endif
+# endif
}
-#endif
+# endif
-#if SANITIZER_GLIBC
+# if SANITIZER_GLIBC
u64 NanoTime() {
kernel_timeval tv;
internal_memset(&tv, 0, sizeof(tv));
@@ -507,19 +579,19 @@ u64 NanoTime() {
uptr internal_clock_gettime(__sanitizer_clockid_t clk_id, void *tp) {
return internal_syscall(SYSCALL(clock_gettime), clk_id, tp);
}
-#elif !SANITIZER_SOLARIS && !SANITIZER_NETBSD
+# elif !SANITIZER_SOLARIS && !SANITIZER_NETBSD
u64 NanoTime() {
struct timespec ts;
clock_gettime(CLOCK_REALTIME, &ts);
return (u64)ts.tv_sec * 1000 * 1000 * 1000 + ts.tv_nsec;
}
-#endif
+# endif
// Like getenv, but reads env directly from /proc (on Linux) or parses the
// 'environ' array (on some others) and does not use libc. This function
// should be called first inside __asan_init.
const char *GetEnv(const char *name) {
-#if SANITIZER_FREEBSD || SANITIZER_NETBSD || SANITIZER_SOLARIS
+# if SANITIZER_FREEBSD || SANITIZER_NETBSD || SANITIZER_SOLARIS
if (::environ != 0) {
uptr NameLen = internal_strlen(name);
for (char **Env = ::environ; *Env != 0; Env++) {
@@ -528,7 +600,7 @@ const char *GetEnv(const char *name) {
}
}
return 0; // Not found.
-#elif SANITIZER_LINUX
+# elif SANITIZER_LINUX
static char *environ;
static uptr len;
static bool inited;
@@ -538,13 +610,13 @@ const char *GetEnv(const char *name) {
if (!ReadFileToBuffer("/proc/self/environ", &environ, &environ_size, &len))
environ = nullptr;
}
- if (!environ || len == 0) return nullptr;
+ if (!environ || len == 0)
+ return nullptr;
uptr namelen = internal_strlen(name);
const char *p = environ;
while (*p != '\0') { // will happen at the \0\0 that terminates the buffer
// proc file has the format NAME=value\0NAME=value\0NAME=value\0...
- const char* endp =
- (char*)internal_memchr(p, '\0', len - (p - environ));
+ const char *endp = (char *)internal_memchr(p, '\0', len - (p - environ));
if (!endp) // this entry isn't NUL terminated
return nullptr;
else if (!internal_memcmp(p, name, namelen) && p[namelen] == '=') // Match.
@@ -552,18 +624,18 @@ const char *GetEnv(const char *name) {
p = endp + 1;
}
return nullptr; // Not found.
-#else
-#error "Unsupported platform"
-#endif
+# else
+# error "Unsupported platform"
+# endif
}
-#if !SANITIZER_FREEBSD && !SANITIZER_NETBSD && !SANITIZER_GO
+# if !SANITIZER_FREEBSD && !SANITIZER_NETBSD && !SANITIZER_GO
extern "C" {
SANITIZER_WEAK_ATTRIBUTE extern void *__libc_stack_end;
}
-#endif
+# endif
-#if !SANITIZER_FREEBSD && !SANITIZER_NETBSD
+# if !SANITIZER_FREEBSD && !SANITIZER_NETBSD
static void ReadNullSepFileToArray(const char *path, char ***arr,
int arr_size) {
char *buff;
@@ -576,20 +648,21 @@ static void ReadNullSepFileToArray(const char *path, char ***arr,
}
(*arr)[0] = buff;
int count, i;
- for (count = 1, i = 1; ; i++) {
+ for (count = 1, i = 1;; i++) {
if (buff[i] == 0) {
- if (buff[i+1] == 0) break;
- (*arr)[count] = &buff[i+1];
+ if (buff[i + 1] == 0)
+ break;
+ (*arr)[count] = &buff[i + 1];
CHECK_LE(count, arr_size - 1); // FIXME: make this more flexible.
count++;
}
}
(*arr)[count] = nullptr;
}
-#endif
+# endif
static void GetArgsAndEnv(char ***argv, char ***envp) {
-#if SANITIZER_FREEBSD
+# if SANITIZER_FREEBSD
// On FreeBSD, retrieving the argument and environment arrays is done via the
// kern.ps_strings sysctl, which returns a pointer to a structure containing
// this information. See also <sys/exec.h>.
@@ -601,30 +674,30 @@ static void GetArgsAndEnv(char ***argv, char ***envp) {
}
*argv = pss->ps_argvstr;
*envp = pss->ps_envstr;
-#elif SANITIZER_NETBSD
+# elif SANITIZER_NETBSD
*argv = __ps_strings->ps_argvstr;
*envp = __ps_strings->ps_envstr;
-#else // SANITIZER_FREEBSD
-#if !SANITIZER_GO
+# else // SANITIZER_FREEBSD
+# if !SANITIZER_GO
if (&__libc_stack_end) {
- uptr* stack_end = (uptr*)__libc_stack_end;
+ uptr *stack_end = (uptr *)__libc_stack_end;
// Normally argc can be obtained from *stack_end, however, on ARM glibc's
// _start clobbers it:
// https://sourceware.org/git/?p=glibc.git;a=blob;f=sysdeps/arm/start.S;hb=refs/heads/release/2.31/master#l75
// Do not special-case ARM and infer argc from argv everywhere.
int argc = 0;
while (stack_end[argc + 1]) argc++;
- *argv = (char**)(stack_end + 1);
- *envp = (char**)(stack_end + argc + 2);
+ *argv = (char **)(stack_end + 1);
+ *envp = (char **)(stack_end + argc + 2);
} else {
-#endif // !SANITIZER_GO
+# endif // !SANITIZER_GO
static const int kMaxArgv = 2000, kMaxEnvp = 2000;
ReadNullSepFileToArray("/proc/self/cmdline", argv, kMaxArgv);
ReadNullSepFileToArray("/proc/self/environ", envp, kMaxEnvp);
-#if !SANITIZER_GO
+# if !SANITIZER_GO
}
-#endif // !SANITIZER_GO
-#endif // SANITIZER_FREEBSD
+# endif // !SANITIZER_GO
+# endif // SANITIZER_FREEBSD
}
char **GetArgv() {
@@ -639,12 +712,12 @@ char **GetEnviron() {
return envp;
}
-#if !SANITIZER_SOLARIS
+# if !SANITIZER_SOLARIS
void FutexWait(atomic_uint32_t *p, u32 cmp) {
# if SANITIZER_FREEBSD
_umtx_op(p, UMTX_OP_WAIT_UINT, cmp, 0, 0);
# elif SANITIZER_NETBSD
- sched_yield(); /* No userspace futex-like synchronization */
+ sched_yield(); /* No userspace futex-like synchronization */
# else
internal_syscall(SYSCALL(futex), (uptr)p, FUTEX_WAIT_PRIVATE, cmp, 0, 0, 0);
# endif
@@ -654,80 +727,38 @@ void FutexWake(atomic_uint32_t *p, u32 count) {
# if SANITIZER_FREEBSD
_umtx_op(p, UMTX_OP_WAKE, count, 0, 0);
# elif SANITIZER_NETBSD
- /* No userspace futex-like synchronization */
+ /* No userspace futex-like synchronization */
# else
internal_syscall(SYSCALL(futex), (uptr)p, FUTEX_WAKE_PRIVATE, count, 0, 0, 0);
# endif
}
-enum { MtxUnlocked = 0, MtxLocked = 1, MtxSleeping = 2 };
-
-BlockingMutex::BlockingMutex() {
- internal_memset(this, 0, sizeof(*this));
-}
-
-void BlockingMutex::Lock() {
- CHECK_EQ(owner_, 0);
- atomic_uint32_t *m = reinterpret_cast<atomic_uint32_t *>(&opaque_storage_);
- if (atomic_exchange(m, MtxLocked, memory_order_acquire) == MtxUnlocked)
- return;
- while (atomic_exchange(m, MtxSleeping, memory_order_acquire) != MtxUnlocked) {
-#if SANITIZER_FREEBSD
- _umtx_op(m, UMTX_OP_WAIT_UINT, MtxSleeping, 0, 0);
-#elif SANITIZER_NETBSD
- sched_yield(); /* No userspace futex-like synchronization */
-#else
- internal_syscall(SYSCALL(futex), (uptr)m, FUTEX_WAIT_PRIVATE, MtxSleeping,
- 0, 0, 0);
-#endif
- }
-}
-
-void BlockingMutex::Unlock() {
- atomic_uint32_t *m = reinterpret_cast<atomic_uint32_t *>(&opaque_storage_);
- u32 v = atomic_exchange(m, MtxUnlocked, memory_order_release);
- CHECK_NE(v, MtxUnlocked);
- if (v == MtxSleeping) {
-#if SANITIZER_FREEBSD
- _umtx_op(m, UMTX_OP_WAKE, 1, 0, 0);
-#elif SANITIZER_NETBSD
- /* No userspace futex-like synchronization */
-#else
- internal_syscall(SYSCALL(futex), (uptr)m, FUTEX_WAKE_PRIVATE, 1, 0, 0, 0);
-#endif
- }
-}
-
-void BlockingMutex::CheckLocked() const {
- auto m = reinterpret_cast<atomic_uint32_t const *>(&opaque_storage_);
- CHECK_NE(MtxUnlocked, atomic_load(m, memory_order_relaxed));
-}
# endif // !SANITIZER_SOLARIS
// ----------------- sanitizer_linux.h
// The actual size of this structure is specified by d_reclen.
// Note that getdents64 uses a different structure format. We only provide the
// 32-bit syscall here.
-#if SANITIZER_NETBSD
+# if SANITIZER_NETBSD
// Not used
-#else
+# else
struct linux_dirent {
-#if SANITIZER_X32 || defined(__aarch64__) || SANITIZER_RISCV64
+# if SANITIZER_X32 || SANITIZER_LINUX
u64 d_ino;
u64 d_off;
-#else
- unsigned long d_ino;
- unsigned long d_off;
-#endif
- unsigned short d_reclen;
-#if defined(__aarch64__) || SANITIZER_RISCV64
- unsigned char d_type;
-#endif
- char d_name[256];
+# else
+ unsigned long d_ino;
+ unsigned long d_off;
+# endif
+ unsigned short d_reclen;
+# if SANITIZER_LINUX
+ unsigned char d_type;
+# endif
+ char d_name[256];
};
-#endif
+# endif
-#if !SANITIZER_SOLARIS && !SANITIZER_NETBSD
+# if !SANITIZER_SOLARIS && !SANITIZER_NETBSD
// Syscall wrappers.
uptr internal_ptrace(int request, int pid, void *addr, void *data) {
return internal_syscall(SYSCALL(ptrace), request, pid, (uptr)addr,
@@ -739,55 +770,62 @@ uptr internal_waitpid(int pid, int *status, int options) {
0 /* rusage */);
}
-uptr internal_getpid() {
- return internal_syscall(SYSCALL(getpid));
-}
+uptr internal_getpid() { return internal_syscall(SYSCALL(getpid)); }
-uptr internal_getppid() {
- return internal_syscall(SYSCALL(getppid));
-}
+uptr internal_getppid() { return internal_syscall(SYSCALL(getppid)); }
int internal_dlinfo(void *handle, int request, void *p) {
-#if SANITIZER_FREEBSD
+# if SANITIZER_FREEBSD
return dlinfo(handle, request, p);
-#else
+# else
UNIMPLEMENTED();
-#endif
+# endif
}
uptr internal_getdents(fd_t fd, struct linux_dirent *dirp, unsigned int count) {
-#if SANITIZER_FREEBSD
+# if SANITIZER_FREEBSD
return internal_syscall(SYSCALL(getdirentries), fd, (uptr)dirp, count, NULL);
-#elif SANITIZER_USES_CANONICAL_LINUX_SYSCALLS
+# elif SANITIZER_LINUX
return internal_syscall(SYSCALL(getdents64), fd, (uptr)dirp, count);
-#else
+# else
return internal_syscall(SYSCALL(getdents), fd, (uptr)dirp, count);
-#endif
+# endif
}
uptr internal_lseek(fd_t fd, OFF_T offset, int whence) {
return internal_syscall(SYSCALL(lseek), fd, offset, whence);
}
-#if SANITIZER_LINUX
+# if SANITIZER_LINUX
uptr internal_prctl(int option, uptr arg2, uptr arg3, uptr arg4, uptr arg5) {
return internal_syscall(SYSCALL(prctl), option, arg2, arg3, arg4, arg5);
}
-#endif
+# if defined(__x86_64__)
+# include <asm/unistd_64.h>
+// Currently internal_arch_prctl() is only needed on x86_64.
+uptr internal_arch_prctl(int option, uptr arg2) {
+ return internal_syscall(__NR_arch_prctl, option, arg2);
+}
+# endif
+# endif
uptr internal_sigaltstack(const void *ss, void *oss) {
return internal_syscall(SYSCALL(sigaltstack), (uptr)ss, (uptr)oss);
}
int internal_fork() {
-#if SANITIZER_USES_CANONICAL_LINUX_SYSCALLS
+# if SANITIZER_LINUX
+# if SANITIZER_S390
+ return internal_syscall(SYSCALL(clone), 0, SIGCHLD);
+# else
return internal_syscall(SYSCALL(clone), SIGCHLD, 0);
-#else
+# endif
+# else
return internal_syscall(SYSCALL(fork));
-#endif
+# endif
}
-#if SANITIZER_FREEBSD
+# if SANITIZER_FREEBSD
int internal_sysctl(const int *name, unsigned int namelen, void *oldp,
uptr *oldlenp, const void *newp, uptr newlen) {
return internal_syscall(SYSCALL(__sysctl), name, namelen, oldp,
@@ -802,11 +840,11 @@ int internal_sysctlbyname(const char *sname, void *oldp, uptr *oldlenp,
// followed by sysctl(). To avoid calling the intercepted version and
// asserting if this happens during startup, call the real sysctlnametomib()
// followed by internal_sysctl() if the syscall is not available.
-#ifdef SYS___sysctlbyname
+# ifdef SYS___sysctlbyname
return internal_syscall(SYSCALL(__sysctlbyname), sname,
internal_strlen(sname), oldp, (size_t *)oldlenp, newp,
(size_t)newlen);
-#else
+# else
static decltype(sysctlnametomib) *real_sysctlnametomib = nullptr;
if (!real_sysctlnametomib)
real_sysctlnametomib =
@@ -818,12 +856,12 @@ int internal_sysctlbyname(const char *sname, void *oldp, uptr *oldlenp,
if (real_sysctlnametomib(sname, oid, &len) == -1)
return (-1);
return internal_sysctl(oid, len, oldp, oldlenp, newp, newlen);
-#endif
+# endif
}
-#endif
+# endif
-#if SANITIZER_LINUX
-#define SA_RESTORER 0x04000000
+# if SANITIZER_LINUX
+# define SA_RESTORER 0x04000000
// Doesn't set sa_restorer if the caller did not set it, so use with caution
//(see below).
int internal_sigaction_norestorer(int signum, const void *act, void *oldact) {
@@ -847,15 +885,15 @@ int internal_sigaction_norestorer(int signum, const void *act, void *oldact) {
// rt_sigaction, so we need to do the same (we'll need to reimplement the
// restorers; for x86_64 the restorer address can be obtained from
// oldact->sa_restorer upon a call to sigaction(xxx, NULL, oldact).
-#if !SANITIZER_ANDROID || !SANITIZER_MIPS32
+# if !SANITIZER_ANDROID || !SANITIZER_MIPS32
k_act.sa_restorer = u_act->sa_restorer;
-#endif
+# endif
}
uptr result = internal_syscall(SYSCALL(rt_sigaction), (uptr)signum,
- (uptr)(u_act ? &k_act : nullptr),
- (uptr)(u_oldact ? &k_oldact : nullptr),
- (uptr)sizeof(__sanitizer_kernel_sigset_t));
+ (uptr)(u_act ? &k_act : nullptr),
+ (uptr)(u_oldact ? &k_oldact : nullptr),
+ (uptr)sizeof(__sanitizer_kernel_sigset_t));
if ((result == 0) && u_oldact) {
u_oldact->handler = k_oldact.handler;
@@ -863,24 +901,24 @@ int internal_sigaction_norestorer(int signum, const void *act, void *oldact) {
internal_memcpy(&u_oldact->sa_mask, &k_oldact.sa_mask,
sizeof(__sanitizer_kernel_sigset_t));
u_oldact->sa_flags = k_oldact.sa_flags;
-#if !SANITIZER_ANDROID || !SANITIZER_MIPS32
+# if !SANITIZER_ANDROID || !SANITIZER_MIPS32
u_oldact->sa_restorer = k_oldact.sa_restorer;
-#endif
+# endif
}
return result;
}
-#endif // SANITIZER_LINUX
+# endif // SANITIZER_LINUX
uptr internal_sigprocmask(int how, __sanitizer_sigset_t *set,
__sanitizer_sigset_t *oldset) {
-#if SANITIZER_FREEBSD
+# if SANITIZER_FREEBSD
return internal_syscall(SYSCALL(sigprocmask), how, set, oldset);
-#else
+# else
__sanitizer_kernel_sigset_t *k_set = (__sanitizer_kernel_sigset_t *)set;
__sanitizer_kernel_sigset_t *k_oldset = (__sanitizer_kernel_sigset_t *)oldset;
return internal_syscall(SYSCALL(rt_sigprocmask), (uptr)how, (uptr)k_set,
(uptr)k_oldset, sizeof(__sanitizer_kernel_sigset_t));
-#endif
+# endif
}
void internal_sigfillset(__sanitizer_sigset_t *set) {
@@ -891,7 +929,7 @@ void internal_sigemptyset(__sanitizer_sigset_t *set) {
internal_memset(set, 0, sizeof(*set));
}
-#if SANITIZER_LINUX
+# if SANITIZER_LINUX
void internal_sigdelset(__sanitizer_sigset_t *set, int signum) {
signum -= 1;
CHECK_GE(signum, 0);
@@ -911,7 +949,11 @@ bool internal_sigismember(__sanitizer_sigset_t *set, int signum) {
const uptr bit = signum % (sizeof(k_set->sig[0]) * 8);
return k_set->sig[idx] & ((uptr)1 << bit);
}
-#elif SANITIZER_FREEBSD
+# elif SANITIZER_FREEBSD
+uptr internal_procctl(int type, int id, int cmd, void *data) {
+ return internal_syscall(SYSCALL(procctl), type, id, cmd, data);
+}
+
void internal_sigdelset(__sanitizer_sigset_t *set, int signum) {
sigset_t *rset = reinterpret_cast<sigset_t *>(set);
sigdelset(rset, signum);
@@ -921,10 +963,10 @@ bool internal_sigismember(__sanitizer_sigset_t *set, int signum) {
sigset_t *rset = reinterpret_cast<sigset_t *>(set);
return sigismember(rset, signum);
}
-#endif
-#endif // !SANITIZER_SOLARIS
+# endif
+# endif // !SANITIZER_SOLARIS
-#if !SANITIZER_NETBSD
+# if !SANITIZER_NETBSD
// ThreadLister implementation.
ThreadLister::ThreadLister(pid_t pid) : pid_(pid), buffer_(4096) {
char task_directory_path[80];
@@ -1011,25 +1053,26 @@ ThreadLister::~ThreadLister() {
if (!internal_iserror(descriptor_))
internal_close(descriptor_);
}
-#endif
+# endif
-#if SANITIZER_WORDSIZE == 32
+# if SANITIZER_WORDSIZE == 32
// Take care of unusable kernel area in top gigabyte.
static uptr GetKernelAreaSize() {
-#if SANITIZER_LINUX && !SANITIZER_X32
+# if SANITIZER_LINUX && !SANITIZER_X32
const uptr gbyte = 1UL << 30;
// Firstly check if there are writable segments
// mapped to top gigabyte (e.g. stack).
- MemoryMappingLayout proc_maps(/*cache_enabled*/true);
+ MemoryMappingLayout proc_maps(/*cache_enabled*/ true);
if (proc_maps.Error())
return 0;
MemoryMappedSegment segment;
while (proc_maps.Next(&segment)) {
- if ((segment.end >= 3 * gbyte) && segment.IsWritable()) return 0;
+ if ((segment.end >= 3 * gbyte) && segment.IsWritable())
+ return 0;
}
-#if !SANITIZER_ANDROID
+# if !SANITIZER_ANDROID
// Even if nothing is mapped, top Gb may still be accessible
// if we are running on 64-bit kernel.
// Uname may report misleading results if personality type
@@ -1039,21 +1082,21 @@ static uptr GetKernelAreaSize() {
if (!(pers & PER_MASK) && internal_uname(&uname_info) == 0 &&
internal_strstr(uname_info.machine, "64"))
return 0;
-#endif // SANITIZER_ANDROID
+# endif // SANITIZER_ANDROID
// Top gigabyte is reserved for kernel.
return gbyte;
-#else
+# else
return 0;
-#endif // SANITIZER_LINUX && !SANITIZER_X32
+# endif // SANITIZER_LINUX && !SANITIZER_X32
}
-#endif // SANITIZER_WORDSIZE == 32
+# endif // SANITIZER_WORDSIZE == 32
uptr GetMaxVirtualAddress() {
-#if SANITIZER_NETBSD && defined(__x86_64__)
+# if SANITIZER_NETBSD && defined(__x86_64__)
return 0x7f7ffffff000ULL; // (0x00007f8000000000 - PAGE_SIZE)
-#elif SANITIZER_WORDSIZE == 64
-# if defined(__powerpc64__) || defined(__aarch64__)
+# elif SANITIZER_WORDSIZE == 64
+# if defined(__powerpc64__) || defined(__aarch64__) || defined(__loongarch__)
// On PowerPC64 we have two different address space layouts: 44- and 46-bit.
// We somehow need to figure out which one we are using now and choose
// one of 0x00000fffffffffffUL and 0x00003fffffffffffUL.
@@ -1061,97 +1104,99 @@ uptr GetMaxVirtualAddress() {
// of the address space, so simply checking the stack address is not enough.
// This should (does) work for both PowerPC64 Endian modes.
// Similarly, aarch64 has multiple address space layouts: 39, 42 and 47-bit.
+ // loongarch64 also has multiple address space layouts: default is 47-bit.
return (1ULL << (MostSignificantSetBitIndex(GET_CURRENT_FRAME()) + 1)) - 1;
-#elif SANITIZER_RISCV64
+# elif SANITIZER_RISCV64
return (1ULL << 38) - 1;
-# elif defined(__mips64)
+# elif SANITIZER_MIPS64
return (1ULL << 40) - 1; // 0x000000ffffffffffUL;
-# elif defined(__s390x__)
+# elif defined(__s390x__)
return (1ULL << 53) - 1; // 0x001fffffffffffffUL;
-#elif defined(__sparc__)
+# elif defined(__sparc__)
return ~(uptr)0;
-# else
+# else
return (1ULL << 47) - 1; // 0x00007fffffffffffUL;
-# endif
-#else // SANITIZER_WORDSIZE == 32
-# if defined(__s390__)
+# endif
+# else // SANITIZER_WORDSIZE == 32
+# if defined(__s390__)
return (1ULL << 31) - 1; // 0x7fffffff;
-# else
+# else
return (1ULL << 32) - 1; // 0xffffffff;
-# endif
-#endif // SANITIZER_WORDSIZE
+# endif
+# endif // SANITIZER_WORDSIZE
}
uptr GetMaxUserVirtualAddress() {
uptr addr = GetMaxVirtualAddress();
-#if SANITIZER_WORDSIZE == 32 && !defined(__s390__)
+# if SANITIZER_WORDSIZE == 32 && !defined(__s390__)
if (!common_flags()->full_address_space)
addr -= GetKernelAreaSize();
CHECK_LT(reinterpret_cast<uptr>(&addr), addr);
-#endif
+# endif
return addr;
}
-#if !SANITIZER_ANDROID
+# if !SANITIZER_ANDROID
uptr GetPageSize() {
-#if SANITIZER_LINUX && (defined(__x86_64__) || defined(__i386__)) && \
- defined(EXEC_PAGESIZE)
+# if SANITIZER_LINUX && (defined(__x86_64__) || defined(__i386__)) && \
+ defined(EXEC_PAGESIZE)
return EXEC_PAGESIZE;
-#elif SANITIZER_FREEBSD || SANITIZER_NETBSD
-// Use sysctl as sysconf can trigger interceptors internally.
+# elif SANITIZER_FREEBSD || SANITIZER_NETBSD
+ // Use sysctl as sysconf can trigger interceptors internally.
int pz = 0;
uptr pzl = sizeof(pz);
int mib[2] = {CTL_HW, HW_PAGESIZE};
int rv = internal_sysctl(mib, 2, &pz, &pzl, nullptr, 0);
CHECK_EQ(rv, 0);
return (uptr)pz;
-#elif SANITIZER_USE_GETAUXVAL
+# elif SANITIZER_USE_GETAUXVAL
return getauxval(AT_PAGESZ);
-#else
+# else
return sysconf(_SC_PAGESIZE); // EXEC_PAGESIZE may not be trustworthy.
-#endif
+# endif
}
-#endif // !SANITIZER_ANDROID
+# endif // !SANITIZER_ANDROID
-uptr ReadBinaryName(/*out*/char *buf, uptr buf_len) {
-#if SANITIZER_SOLARIS
+uptr ReadBinaryName(/*out*/ char *buf, uptr buf_len) {
+# if SANITIZER_SOLARIS
const char *default_module_name = getexecname();
CHECK_NE(default_module_name, NULL);
return internal_snprintf(buf, buf_len, "%s", default_module_name);
-#else
-#if SANITIZER_FREEBSD || SANITIZER_NETBSD
-#if SANITIZER_FREEBSD
+# else
+# if SANITIZER_FREEBSD || SANITIZER_NETBSD
+# if SANITIZER_FREEBSD
const int Mib[4] = {CTL_KERN, KERN_PROC, KERN_PROC_PATHNAME, -1};
-#else
+# else
const int Mib[4] = {CTL_KERN, KERN_PROC_ARGS, -1, KERN_PROC_PATHNAME};
-#endif
+# endif
const char *default_module_name = "kern.proc.pathname";
uptr Size = buf_len;
bool IsErr =
(internal_sysctl(Mib, ARRAY_SIZE(Mib), buf, &Size, NULL, 0) != 0);
int readlink_error = IsErr ? errno : 0;
uptr module_name_len = Size;
-#else
+# else
const char *default_module_name = "/proc/self/exe";
- uptr module_name_len = internal_readlink(
- default_module_name, buf, buf_len);
+ uptr module_name_len = internal_readlink(default_module_name, buf, buf_len);
int readlink_error;
bool IsErr = internal_iserror(module_name_len, &readlink_error);
-#endif // SANITIZER_SOLARIS
+# endif // SANITIZER_SOLARIS
if (IsErr) {
// We can't read binary name for some reason, assume it's unknown.
- Report("WARNING: reading executable name failed with errno %d, "
- "some stack frames may not be symbolized\n", readlink_error);
- module_name_len = internal_snprintf(buf, buf_len, "%s",
- default_module_name);
+ Report(
+ "WARNING: reading executable name failed with errno %d, "
+ "some stack frames may not be symbolized\n",
+ readlink_error);
+ module_name_len =
+ internal_snprintf(buf, buf_len, "%s", default_module_name);
CHECK_LT(module_name_len, buf_len);
}
return module_name_len;
-#endif
+# endif
}
uptr ReadLongProcessName(/*out*/ char *buf, uptr buf_len) {
-#if SANITIZER_LINUX
+# if SANITIZER_LINUX
char *tmpbuf;
uptr tmpsize;
uptr tmplen;
@@ -1161,7 +1206,7 @@ uptr ReadLongProcessName(/*out*/ char *buf, uptr buf_len) {
UnmapOrDie(tmpbuf, tmpsize);
return internal_strlen(buf);
}
-#endif
+# endif
return ReadBinaryName(buf, buf_len);
}
@@ -1171,20 +1216,22 @@ bool LibraryNameIs(const char *full_name, const char *base_name) {
// Strip path.
while (*name != '\0') name++;
while (name > full_name && *name != '/') name--;
- if (*name == '/') name++;
+ if (*name == '/')
+ name++;
uptr base_name_length = internal_strlen(base_name);
- if (internal_strncmp(name, base_name, base_name_length)) return false;
+ if (internal_strncmp(name, base_name, base_name_length))
+ return false;
return (name[base_name_length] == '-' || name[base_name_length] == '.');
}
-#if !SANITIZER_ANDROID
+# if !SANITIZER_ANDROID
// Call cb for each region mapped by map.
void ForEachMappedRegion(link_map *map, void (*cb)(const void *, uptr)) {
CHECK_NE(map, nullptr);
-#if !SANITIZER_FREEBSD
+# if !SANITIZER_FREEBSD
typedef ElfW(Phdr) Elf_Phdr;
typedef ElfW(Ehdr) Elf_Ehdr;
-#endif // !SANITIZER_FREEBSD
+# endif // !SANITIZER_FREEBSD
char *base = (char *)map->l_addr;
Elf_Ehdr *ehdr = (Elf_Ehdr *)base;
char *phdrs = base + ehdr->e_phoff;
@@ -1216,9 +1263,10 @@ void ForEachMappedRegion(link_map *map, void (*cb)(const void *, uptr)) {
}
}
}
-#endif
+# endif
-#if defined(__x86_64__) && SANITIZER_LINUX
+# if SANITIZER_LINUX
+# if defined(__x86_64__)
// We cannot use glibc's clone wrapper, because it messes with the child
// task's TLS. It writes the PID and TID of the child task to its thread
// descriptor, but in our case the child task shares the thread descriptor with
@@ -1237,50 +1285,46 @@ uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,
register void *r8 __asm__("r8") = newtls;
register int *r10 __asm__("r10") = child_tidptr;
__asm__ __volatile__(
- /* %rax = syscall(%rax = SYSCALL(clone),
- * %rdi = flags,
- * %rsi = child_stack,
- * %rdx = parent_tidptr,
- * %r8 = new_tls,
- * %r10 = child_tidptr)
- */
- "syscall\n"
-
- /* if (%rax != 0)
- * return;
- */
- "testq %%rax,%%rax\n"
- "jnz 1f\n"
-
- /* In the child. Terminate unwind chain. */
- // XXX: We should also terminate the CFI unwind chain
- // here. Unfortunately clang 3.2 doesn't support the
- // necessary CFI directives, so we skip that part.
- "xorq %%rbp,%%rbp\n"
-
- /* Call "fn(arg)". */
- "popq %%rax\n"
- "popq %%rdi\n"
- "call *%%rax\n"
-
- /* Call _exit(%rax). */
- "movq %%rax,%%rdi\n"
- "movq %2,%%rax\n"
- "syscall\n"
-
- /* Return to parent. */
- "1:\n"
- : "=a" (res)
- : "a"(SYSCALL(clone)), "i"(SYSCALL(exit)),
- "S"(child_stack),
- "D"(flags),
- "d"(parent_tidptr),
- "r"(r8),
- "r"(r10)
- : "memory", "r11", "rcx");
+ /* %rax = syscall(%rax = SYSCALL(clone),
+ * %rdi = flags,
+ * %rsi = child_stack,
+ * %rdx = parent_tidptr,
+ * %r8 = new_tls,
+ * %r10 = child_tidptr)
+ */
+ "syscall\n"
+
+ /* if (%rax != 0)
+ * return;
+ */
+ "testq %%rax,%%rax\n"
+ "jnz 1f\n"
+
+ /* In the child. Terminate unwind chain. */
+ // XXX: We should also terminate the CFI unwind chain
+ // here. Unfortunately clang 3.2 doesn't support the
+ // necessary CFI directives, so we skip that part.
+ "xorq %%rbp,%%rbp\n"
+
+ /* Call "fn(arg)". */
+ "popq %%rax\n"
+ "popq %%rdi\n"
+ "call *%%rax\n"
+
+ /* Call _exit(%rax). */
+ "movq %%rax,%%rdi\n"
+ "movq %2,%%rax\n"
+ "syscall\n"
+
+ /* Return to parent. */
+ "1:\n"
+ : "=a"(res)
+ : "a"(SYSCALL(clone)), "i"(SYSCALL(exit)), "S"(child_stack), "D"(flags),
+ "d"(parent_tidptr), "r"(r8), "r"(r10)
+ : "memory", "r11", "rcx");
return res;
}
-#elif defined(__mips__)
+# elif defined(__mips__)
uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,
int *parent_tidptr, void *newtls, int *child_tidptr) {
long long res;
@@ -1295,68 +1339,63 @@ uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,
// We don't have proper CFI directives here because it requires alot of code
// for very marginal benefits.
__asm__ __volatile__(
- /* $v0 = syscall($v0 = __NR_clone,
- * $a0 = flags,
- * $a1 = child_stack,
- * $a2 = parent_tidptr,
- * $a3 = new_tls,
- * $a4 = child_tidptr)
- */
- ".cprestore 16;\n"
- "move $4,%1;\n"
- "move $5,%2;\n"
- "move $6,%3;\n"
- "move $7,%4;\n"
- /* Store the fifth argument on stack
- * if we are using 32-bit abi.
- */
-#if SANITIZER_WORDSIZE == 32
- "lw %5,16($29);\n"
-#else
- "move $8,%5;\n"
-#endif
- "li $2,%6;\n"
- "syscall;\n"
-
- /* if ($v0 != 0)
- * return;
- */
- "bnez $2,1f;\n"
-
- /* Call "fn(arg)". */
-#if SANITIZER_WORDSIZE == 32
-#ifdef __BIG_ENDIAN__
- "lw $25,4($29);\n"
- "lw $4,12($29);\n"
-#else
- "lw $25,0($29);\n"
- "lw $4,8($29);\n"
-#endif
-#else
- "ld $25,0($29);\n"
- "ld $4,8($29);\n"
-#endif
- "jal $25;\n"
-
- /* Call _exit($v0). */
- "move $4,$2;\n"
- "li $2,%7;\n"
- "syscall;\n"
-
- /* Return to parent. */
- "1:\n"
- : "=r" (res)
- : "r"(flags),
- "r"(child_stack),
- "r"(parent_tidptr),
- "r"(a3),
- "r"(a4),
- "i"(__NR_clone),
- "i"(__NR_exit)
- : "memory", "$29" );
+ /* $v0 = syscall($v0 = __NR_clone,
+ * $a0 = flags,
+ * $a1 = child_stack,
+ * $a2 = parent_tidptr,
+ * $a3 = new_tls,
+ * $a4 = child_tidptr)
+ */
+ ".cprestore 16;\n"
+ "move $4,%1;\n"
+ "move $5,%2;\n"
+ "move $6,%3;\n"
+ "move $7,%4;\n"
+ /* Store the fifth argument on stack
+ * if we are using 32-bit abi.
+ */
+# if SANITIZER_WORDSIZE == 32
+ "lw %5,16($29);\n"
+# else
+ "move $8,%5;\n"
+# endif
+ "li $2,%6;\n"
+ "syscall;\n"
+
+ /* if ($v0 != 0)
+ * return;
+ */
+ "bnez $2,1f;\n"
+
+ /* Call "fn(arg)". */
+# if SANITIZER_WORDSIZE == 32
+# ifdef __BIG_ENDIAN__
+ "lw $25,4($29);\n"
+ "lw $4,12($29);\n"
+# else
+ "lw $25,0($29);\n"
+ "lw $4,8($29);\n"
+# endif
+# else
+ "ld $25,0($29);\n"
+ "ld $4,8($29);\n"
+# endif
+ "jal $25;\n"
+
+ /* Call _exit($v0). */
+ "move $4,$2;\n"
+ "li $2,%7;\n"
+ "syscall;\n"
+
+ /* Return to parent. */
+ "1:\n"
+ : "=r"(res)
+ : "r"(flags), "r"(child_stack), "r"(parent_tidptr), "r"(a3), "r"(a4),
+ "i"(__NR_clone), "i"(__NR_exit)
+ : "memory", "$29");
return res;
}
-#elif SANITIZER_RISCV64
+# elif SANITIZER_RISCV64
uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,
int *parent_tidptr, void *newtls, int *child_tidptr) {
if (!fn || !child_stack)
@@ -1397,10 +1436,10 @@ uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,
: "memory");
return res;
}
-#elif defined(__aarch64__)
+# elif defined(__aarch64__)
uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,
int *parent_tidptr, void *newtls, int *child_tidptr) {
- long long res;
+ register long long res __asm__("x0");
if (!fn || !child_stack)
return -EINVAL;
CHECK_EQ(0, (uptr)child_stack % 16);
@@ -1408,156 +1447,186 @@ uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,
((unsigned long long *)child_stack)[0] = (uptr)fn;
((unsigned long long *)child_stack)[1] = (uptr)arg;
- register int (*__fn)(void *) __asm__("x0") = fn;
+ register int (*__fn)(void *) __asm__("x0") = fn;
register void *__stack __asm__("x1") = child_stack;
- register int __flags __asm__("x2") = flags;
- register void *__arg __asm__("x3") = arg;
- register int *__ptid __asm__("x4") = parent_tidptr;
- register void *__tls __asm__("x5") = newtls;
- register int *__ctid __asm__("x6") = child_tidptr;
+ register int __flags __asm__("x2") = flags;
+ register void *__arg __asm__("x3") = arg;
+ register int *__ptid __asm__("x4") = parent_tidptr;
+ register void *__tls __asm__("x5") = newtls;
+ register int *__ctid __asm__("x6") = child_tidptr;
+
+ __asm__ __volatile__(
+ "mov x0,x2\n" /* flags */
+ "mov x2,x4\n" /* ptid */
+ "mov x3,x5\n" /* tls */
+ "mov x4,x6\n" /* ctid */
+ "mov x8,%9\n" /* clone */
+
+ "svc 0x0\n"
+
+ /* if (%r0 != 0)
+ * return %r0;
+ */
+ "cmp x0, #0\n"
+ "bne 1f\n"
+
+ /* In the child, now. Call "fn(arg)". */
+ "ldp x1, x0, [sp], #16\n"
+ "blr x1\n"
+
+ /* Call _exit(%r0). */
+ "mov x8, %10\n"
+ "svc 0x0\n"
+ "1:\n"
+
+ : "=r"(res)
+ : "i"(-EINVAL), "r"(__fn), "r"(__stack), "r"(__flags), "r"(__arg),
+ "r"(__ptid), "r"(__tls), "r"(__ctid), "i"(__NR_clone), "i"(__NR_exit)
+ : "x30", "memory");
+ return res;
+}
+# elif SANITIZER_LOONGARCH64
+uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,
+ int *parent_tidptr, void *newtls, int *child_tidptr) {
+ if (!fn || !child_stack)
+ return -EINVAL;
+
+ CHECK_EQ(0, (uptr)child_stack % 16);
+
+ register int res __asm__("$a0");
+ register int __flags __asm__("$a0") = flags;
+ register void *__stack __asm__("$a1") = child_stack;
+ register int *__ptid __asm__("$a2") = parent_tidptr;
+ register int *__ctid __asm__("$a3") = child_tidptr;
+ register void *__tls __asm__("$a4") = newtls;
+ register int (*__fn)(void *) __asm__("$a5") = fn;
+ register void *__arg __asm__("$a6") = arg;
+ register int nr_clone __asm__("$a7") = __NR_clone;
__asm__ __volatile__(
- "mov x0,x2\n" /* flags */
- "mov x2,x4\n" /* ptid */
- "mov x3,x5\n" /* tls */
- "mov x4,x6\n" /* ctid */
- "mov x8,%9\n" /* clone */
-
- "svc 0x0\n"
-
- /* if (%r0 != 0)
- * return %r0;
- */
- "cmp x0, #0\n"
- "bne 1f\n"
-
- /* In the child, now. Call "fn(arg)". */
- "ldp x1, x0, [sp], #16\n"
- "blr x1\n"
-
- /* Call _exit(%r0). */
- "mov x8, %10\n"
- "svc 0x0\n"
- "1:\n"
-
- : "=r" (res)
- : "i"(-EINVAL),
- "r"(__fn), "r"(__stack), "r"(__flags), "r"(__arg),
- "r"(__ptid), "r"(__tls), "r"(__ctid),
- "i"(__NR_clone), "i"(__NR_exit)
- : "x30", "memory");
+ "syscall 0\n"
+
+ // if ($a0 != 0)
+ // return $a0;
+ "bnez $a0, 1f\n"
+
+ // In the child, now. Call "fn(arg)".
+ "move $a0, $a6\n"
+ "jirl $ra, $a5, 0\n"
+
+ // Call _exit($a0).
+ "addi.d $a7, $zero, %9\n"
+ "syscall 0\n"
+
+ "1:\n"
+
+ : "=r"(res)
+ : "0"(__flags), "r"(__stack), "r"(__ptid), "r"(__ctid), "r"(__tls),
+ "r"(__fn), "r"(__arg), "r"(nr_clone), "i"(__NR_exit)
+ : "memory", "$t0", "$t1", "$t2", "$t3", "$t4", "$t5", "$t6", "$t7",
+ "$t8");
return res;
}
-#elif defined(__powerpc64__)
+# elif defined(__powerpc64__)
uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,
- int *parent_tidptr, void *newtls, int *child_tidptr) {
+ int *parent_tidptr, void *newtls, int *child_tidptr) {
long long res;
// Stack frame structure.
-#if SANITIZER_PPC64V1
-// Back chain == 0 (SP + 112)
-// Frame (112 bytes):
-// Parameter save area (SP + 48), 8 doublewords
-// TOC save area (SP + 40)
-// Link editor doubleword (SP + 32)
-// Compiler doubleword (SP + 24)
-// LR save area (SP + 16)
-// CR save area (SP + 8)
-// Back chain (SP + 0)
-# define FRAME_SIZE 112
-# define FRAME_TOC_SAVE_OFFSET 40
-#elif SANITIZER_PPC64V2
-// Back chain == 0 (SP + 32)
-// Frame (32 bytes):
-// TOC save area (SP + 24)
-// LR save area (SP + 16)
-// CR save area (SP + 8)
-// Back chain (SP + 0)
-# define FRAME_SIZE 32
-# define FRAME_TOC_SAVE_OFFSET 24
-#else
-# error "Unsupported PPC64 ABI"
-#endif
+# if SANITIZER_PPC64V1
+ // Back chain == 0 (SP + 112)
+ // Frame (112 bytes):
+ // Parameter save area (SP + 48), 8 doublewords
+ // TOC save area (SP + 40)
+ // Link editor doubleword (SP + 32)
+ // Compiler doubleword (SP + 24)
+ // LR save area (SP + 16)
+ // CR save area (SP + 8)
+ // Back chain (SP + 0)
+# define FRAME_SIZE 112
+# define FRAME_TOC_SAVE_OFFSET 40
+# elif SANITIZER_PPC64V2
+ // Back chain == 0 (SP + 32)
+ // Frame (32 bytes):
+ // TOC save area (SP + 24)
+ // LR save area (SP + 16)
+ // CR save area (SP + 8)
+ // Back chain (SP + 0)
+# define FRAME_SIZE 32
+# define FRAME_TOC_SAVE_OFFSET 24
+# else
+# error "Unsupported PPC64 ABI"
+# endif
if (!fn || !child_stack)
return -EINVAL;
CHECK_EQ(0, (uptr)child_stack % 16);
register int (*__fn)(void *) __asm__("r3") = fn;
- register void *__cstack __asm__("r4") = child_stack;
- register int __flags __asm__("r5") = flags;
- register void *__arg __asm__("r6") = arg;
- register int *__ptidptr __asm__("r7") = parent_tidptr;
- register void *__newtls __asm__("r8") = newtls;
- register int *__ctidptr __asm__("r9") = child_tidptr;
-
- __asm__ __volatile__(
- /* fn and arg are saved across the syscall */
- "mr 28, %5\n\t"
- "mr 27, %8\n\t"
-
- /* syscall
- r0 == __NR_clone
- r3 == flags
- r4 == child_stack
- r5 == parent_tidptr
- r6 == newtls
- r7 == child_tidptr */
- "mr 3, %7\n\t"
- "mr 5, %9\n\t"
- "mr 6, %10\n\t"
- "mr 7, %11\n\t"
- "li 0, %3\n\t"
- "sc\n\t"
-
- /* Test if syscall was successful */
- "cmpdi cr1, 3, 0\n\t"
- "crandc cr1*4+eq, cr1*4+eq, cr0*4+so\n\t"
- "bne- cr1, 1f\n\t"
-
- /* Set up stack frame */
- "li 29, 0\n\t"
- "stdu 29, -8(1)\n\t"
- "stdu 1, -%12(1)\n\t"
- /* Do the function call */
- "std 2, %13(1)\n\t"
-#if SANITIZER_PPC64V1
- "ld 0, 0(28)\n\t"
- "ld 2, 8(28)\n\t"
- "mtctr 0\n\t"
-#elif SANITIZER_PPC64V2
- "mr 12, 28\n\t"
- "mtctr 12\n\t"
-#else
-# error "Unsupported PPC64 ABI"
-#endif
- "mr 3, 27\n\t"
- "bctrl\n\t"
- "ld 2, %13(1)\n\t"
-
- /* Call _exit(r3) */
- "li 0, %4\n\t"
- "sc\n\t"
-
- /* Return to parent */
- "1:\n\t"
- "mr %0, 3\n\t"
- : "=r" (res)
- : "0" (-1),
- "i" (EINVAL),
- "i" (__NR_clone),
- "i" (__NR_exit),
- "r" (__fn),
- "r" (__cstack),
- "r" (__flags),
- "r" (__arg),
- "r" (__ptidptr),
- "r" (__newtls),
- "r" (__ctidptr),
- "i" (FRAME_SIZE),
- "i" (FRAME_TOC_SAVE_OFFSET)
- : "cr0", "cr1", "memory", "ctr", "r0", "r27", "r28", "r29");
+ register void *__cstack __asm__("r4") = child_stack;
+ register int __flags __asm__("r5") = flags;
+ register void *__arg __asm__("r6") = arg;
+ register int *__ptidptr __asm__("r7") = parent_tidptr;
+ register void *__newtls __asm__("r8") = newtls;
+ register int *__ctidptr __asm__("r9") = child_tidptr;
+
+ __asm__ __volatile__(
+ /* fn and arg are saved across the syscall */
+ "mr 28, %5\n\t"
+ "mr 27, %8\n\t"
+
+ /* syscall
+ r0 == __NR_clone
+ r3 == flags
+ r4 == child_stack
+ r5 == parent_tidptr
+ r6 == newtls
+ r7 == child_tidptr */
+ "mr 3, %7\n\t"
+ "mr 5, %9\n\t"
+ "mr 6, %10\n\t"
+ "mr 7, %11\n\t"
+ "li 0, %3\n\t"
+ "sc\n\t"
+
+ /* Test if syscall was successful */
+ "cmpdi cr1, 3, 0\n\t"
+ "crandc cr1*4+eq, cr1*4+eq, cr0*4+so\n\t"
+ "bne- cr1, 1f\n\t"
+
+ /* Set up stack frame */
+ "li 29, 0\n\t"
+ "stdu 29, -8(1)\n\t"
+ "stdu 1, -%12(1)\n\t"
+ /* Do the function call */
+ "std 2, %13(1)\n\t"
+# if SANITIZER_PPC64V1
+ "ld 0, 0(28)\n\t"
+ "ld 2, 8(28)\n\t"
+ "mtctr 0\n\t"
+# elif SANITIZER_PPC64V2
+ "mr 12, 28\n\t"
+ "mtctr 12\n\t"
+# else
+# error "Unsupported PPC64 ABI"
+# endif
+ "mr 3, 27\n\t"
+ "bctrl\n\t"
+ "ld 2, %13(1)\n\t"
+
+ /* Call _exit(r3) */
+ "li 0, %4\n\t"
+ "sc\n\t"
+
+ /* Return to parent */
+ "1:\n\t"
+ "mr %0, 3\n\t"
+ : "=r"(res)
+ : "0"(-1), "i"(EINVAL), "i"(__NR_clone), "i"(__NR_exit), "r"(__fn),
+ "r"(__cstack), "r"(__flags), "r"(__arg), "r"(__ptidptr), "r"(__newtls),
+ "r"(__ctidptr), "i"(FRAME_SIZE), "i"(FRAME_TOC_SAVE_OFFSET)
+ : "cr0", "cr1", "memory", "ctr", "r0", "r27", "r28", "r29");
return res;
}
-#elif defined(__i386__) && SANITIZER_LINUX
+# elif defined(__i386__)
uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,
int *parent_tidptr, void *newtls, int *child_tidptr) {
int res;
@@ -1570,59 +1639,56 @@ uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,
((unsigned int *)child_stack)[2] = (uptr)fn;
((unsigned int *)child_stack)[3] = (uptr)arg;
__asm__ __volatile__(
- /* %eax = syscall(%eax = SYSCALL(clone),
- * %ebx = flags,
- * %ecx = child_stack,
- * %edx = parent_tidptr,
- * %esi = new_tls,
- * %edi = child_tidptr)
- */
-
- /* Obtain flags */
- "movl (%%ecx), %%ebx\n"
- /* Do the system call */
- "pushl %%ebx\n"
- "pushl %%esi\n"
- "pushl %%edi\n"
- /* Remember the flag value. */
- "movl %%ebx, (%%ecx)\n"
- "int $0x80\n"
- "popl %%edi\n"
- "popl %%esi\n"
- "popl %%ebx\n"
-
- /* if (%eax != 0)
- * return;
- */
-
- "test %%eax,%%eax\n"
- "jnz 1f\n"
-
- /* terminate the stack frame */
- "xorl %%ebp,%%ebp\n"
- /* Call FN. */
- "call *%%ebx\n"
-#ifdef PIC
- "call here\n"
- "here:\n"
- "popl %%ebx\n"
- "addl $_GLOBAL_OFFSET_TABLE_+[.-here], %%ebx\n"
-#endif
- /* Call exit */
- "movl %%eax, %%ebx\n"
- "movl %2, %%eax\n"
- "int $0x80\n"
- "1:\n"
- : "=a" (res)
- : "a"(SYSCALL(clone)), "i"(SYSCALL(exit)),
- "c"(child_stack),
- "d"(parent_tidptr),
- "S"(newtls),
- "D"(child_tidptr)
- : "memory");
+ /* %eax = syscall(%eax = SYSCALL(clone),
+ * %ebx = flags,
+ * %ecx = child_stack,
+ * %edx = parent_tidptr,
+ * %esi = new_tls,
+ * %edi = child_tidptr)
+ */
+
+ /* Obtain flags */
+ "movl (%%ecx), %%ebx\n"
+ /* Do the system call */
+ "pushl %%ebx\n"
+ "pushl %%esi\n"
+ "pushl %%edi\n"
+ /* Remember the flag value. */
+ "movl %%ebx, (%%ecx)\n"
+ "int $0x80\n"
+ "popl %%edi\n"
+ "popl %%esi\n"
+ "popl %%ebx\n"
+
+ /* if (%eax != 0)
+ * return;
+ */
+
+ "test %%eax,%%eax\n"
+ "jnz 1f\n"
+
+ /* terminate the stack frame */
+ "xorl %%ebp,%%ebp\n"
+ /* Call FN. */
+ "call *%%ebx\n"
+# ifdef PIC
+ "call here\n"
+ "here:\n"
+ "popl %%ebx\n"
+ "addl $_GLOBAL_OFFSET_TABLE_+[.-here], %%ebx\n"
+# endif
+ /* Call exit */
+ "movl %%eax, %%ebx\n"
+ "movl %2, %%eax\n"
+ "int $0x80\n"
+ "1:\n"
+ : "=a"(res)
+ : "a"(SYSCALL(clone)), "i"(SYSCALL(exit)), "c"(child_stack),
+ "d"(parent_tidptr), "S"(newtls), "D"(child_tidptr)
+ : "memory");
return res;
}
-#elif defined(__arm__) && SANITIZER_LINUX
+# elif defined(__arm__)
uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,
int *parent_tidptr, void *newtls, int *child_tidptr) {
unsigned int res;
@@ -1638,69 +1704,68 @@ uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,
register int *r4 __asm__("r4") = child_tidptr;
register int r7 __asm__("r7") = __NR_clone;
-#if __ARM_ARCH > 4 || defined (__ARM_ARCH_4T__)
-# define ARCH_HAS_BX
-#endif
-#if __ARM_ARCH > 4
-# define ARCH_HAS_BLX
-#endif
-
-#ifdef ARCH_HAS_BX
-# ifdef ARCH_HAS_BLX
-# define BLX(R) "blx " #R "\n"
-# else
-# define BLX(R) "mov lr, pc; bx " #R "\n"
-# endif
-#else
-# define BLX(R) "mov lr, pc; mov pc," #R "\n"
-#endif
+# if __ARM_ARCH > 4 || defined(__ARM_ARCH_4T__)
+# define ARCH_HAS_BX
+# endif
+# if __ARM_ARCH > 4
+# define ARCH_HAS_BLX
+# endif
+
+# ifdef ARCH_HAS_BX
+# ifdef ARCH_HAS_BLX
+# define BLX(R) "blx " #R "\n"
+# else
+# define BLX(R) "mov lr, pc; bx " #R "\n"
+# endif
+# else
+# define BLX(R) "mov lr, pc; mov pc," #R "\n"
+# endif
__asm__ __volatile__(
- /* %r0 = syscall(%r7 = SYSCALL(clone),
- * %r0 = flags,
- * %r1 = child_stack,
- * %r2 = parent_tidptr,
- * %r3 = new_tls,
- * %r4 = child_tidptr)
- */
-
- /* Do the system call */
- "swi 0x0\n"
-
- /* if (%r0 != 0)
- * return %r0;
- */
- "cmp r0, #0\n"
- "bne 1f\n"
-
- /* In the child, now. Call "fn(arg)". */
- "ldr r0, [sp, #4]\n"
- "ldr ip, [sp], #8\n"
- BLX(ip)
- /* Call _exit(%r0). */
- "mov r7, %7\n"
- "swi 0x0\n"
- "1:\n"
- "mov %0, r0\n"
- : "=r"(res)
- : "r"(r0), "r"(r1), "r"(r2), "r"(r3), "r"(r4), "r"(r7),
- "i"(__NR_exit)
- : "memory");
+ /* %r0 = syscall(%r7 = SYSCALL(clone),
+ * %r0 = flags,
+ * %r1 = child_stack,
+ * %r2 = parent_tidptr,
+ * %r3 = new_tls,
+ * %r4 = child_tidptr)
+ */
+
+ /* Do the system call */
+ "swi 0x0\n"
+
+ /* if (%r0 != 0)
+ * return %r0;
+ */
+ "cmp r0, #0\n"
+ "bne 1f\n"
+
+ /* In the child, now. Call "fn(arg)". */
+ "ldr r0, [sp, #4]\n"
+ "ldr ip, [sp], #8\n" BLX(ip)
+ /* Call _exit(%r0). */
+ "mov r7, %7\n"
+ "swi 0x0\n"
+ "1:\n"
+ "mov %0, r0\n"
+ : "=r"(res)
+ : "r"(r0), "r"(r1), "r"(r2), "r"(r3), "r"(r4), "r"(r7), "i"(__NR_exit)
+ : "memory");
return res;
}
-#endif // defined(__x86_64__) && SANITIZER_LINUX
+# endif
+# endif // SANITIZER_LINUX
-#if SANITIZER_LINUX
+# if SANITIZER_LINUX
int internal_uname(struct utsname *buf) {
return internal_syscall(SYSCALL(uname), buf);
}
-#endif
+# endif
-#if SANITIZER_ANDROID
-#if __ANDROID_API__ < 21
+# if SANITIZER_ANDROID
+# if __ANDROID_API__ < 21
extern "C" __attribute__((weak)) int dl_iterate_phdr(
int (*)(struct dl_phdr_info *, size_t, void *), void *);
-#endif
+# endif
static int dl_iterate_phdr_test_cb(struct dl_phdr_info *info, size_t size,
void *data) {
@@ -1717,40 +1782,41 @@ static int dl_iterate_phdr_test_cb(struct dl_phdr_info *info, size_t size,
static atomic_uint32_t android_api_level;
static AndroidApiLevel AndroidDetectApiLevelStatic() {
-#if __ANDROID_API__ <= 19
+# if __ANDROID_API__ <= 19
return ANDROID_KITKAT;
-#elif __ANDROID_API__ <= 22
+# elif __ANDROID_API__ <= 22
return ANDROID_LOLLIPOP_MR1;
-#else
+# else
return ANDROID_POST_LOLLIPOP;
-#endif
+# endif
}
static AndroidApiLevel AndroidDetectApiLevel() {
if (!&dl_iterate_phdr)
- return ANDROID_KITKAT; // K or lower
+ return ANDROID_KITKAT; // K or lower
bool base_name_seen = false;
dl_iterate_phdr(dl_iterate_phdr_test_cb, &base_name_seen);
if (base_name_seen)
- return ANDROID_LOLLIPOP_MR1; // L MR1
+ return ANDROID_LOLLIPOP_MR1; // L MR1
return ANDROID_POST_LOLLIPOP; // post-L
// Plain L (API level 21) is completely broken wrt ASan and not very
// interesting to detect.
}
-extern "C" __attribute__((weak)) void* _DYNAMIC;
+extern "C" __attribute__((weak)) void *_DYNAMIC;
AndroidApiLevel AndroidGetApiLevel() {
AndroidApiLevel level =
(AndroidApiLevel)atomic_load(&android_api_level, memory_order_relaxed);
- if (level) return level;
+ if (level)
+ return level;
level = &_DYNAMIC == nullptr ? AndroidDetectApiLevelStatic()
: AndroidDetectApiLevel();
atomic_store(&android_api_level, level, memory_order_relaxed);
return level;
}
-#endif
+# endif
static HandleSignalMode GetHandleSignalModeImpl(int signum) {
switch (signum) {
@@ -1777,33 +1843,28 @@ HandleSignalMode GetHandleSignalMode(int signum) {
return result;
}
-#if !SANITIZER_GO
+# if !SANITIZER_GO
void *internal_start_thread(void *(*func)(void *arg), void *arg) {
+ if (&real_pthread_create == 0)
+ return nullptr;
// Start the thread with signals blocked, otherwise it can steal user signals.
- __sanitizer_sigset_t set, old;
- internal_sigfillset(&set);
-#if SANITIZER_LINUX && !SANITIZER_ANDROID
- // Glibc uses SIGSETXID signal during setuid call. If this signal is blocked
- // on any thread, setuid call hangs (see test/tsan/setuid.c).
- internal_sigdelset(&set, 33);
-#endif
- internal_sigprocmask(SIG_SETMASK, &set, &old);
+ ScopedBlockSignals block(nullptr);
void *th;
real_pthread_create(&th, nullptr, func, arg);
- internal_sigprocmask(SIG_SETMASK, &old, nullptr);
return th;
}
void internal_join_thread(void *th) {
- real_pthread_join(th, nullptr);
+ if (&real_pthread_join)
+ real_pthread_join(th, nullptr);
}
-#else
+# else
void *internal_start_thread(void *(*func)(void *), void *arg) { return 0; }
void internal_join_thread(void *th) {}
-#endif
+# endif
-#if defined(__aarch64__)
+# if SANITIZER_LINUX && defined(__aarch64__)
// Android headers in the older NDK releases miss this definition.
struct __sanitizer_esr_context {
struct _aarch64_ctx head;
@@ -1812,10 +1873,11 @@ struct __sanitizer_esr_context {
static bool Aarch64GetESR(ucontext_t *ucontext, u64 *esr) {
static const u32 kEsrMagic = 0x45535201;
- u8 *aux = ucontext->uc_mcontext.__reserved;
+ u8 *aux = reinterpret_cast<u8 *>(ucontext->uc_mcontext.__reserved);
while (true) {
_aarch64_ctx *ctx = (_aarch64_ctx *)aux;
- if (ctx->size == 0) break;
+ if (ctx->size == 0)
+ break;
if (ctx->magic == kEsrMagic) {
*esr = ((__sanitizer_esr_context *)ctx)->esr;
return true;
@@ -1824,26 +1886,29 @@ static bool Aarch64GetESR(ucontext_t *ucontext, u64 *esr) {
}
return false;
}
-#endif
+# elif SANITIZER_FREEBSD && defined(__aarch64__)
+// FreeBSD doesn't provide ESR in the ucontext.
+static bool Aarch64GetESR(ucontext_t *ucontext, u64 *esr) { return false; }
+# endif
using Context = ucontext_t;
SignalContext::WriteFlag SignalContext::GetWriteFlag() const {
Context *ucontext = (Context *)context;
-#if defined(__x86_64__) || defined(__i386__)
+# if defined(__x86_64__) || defined(__i386__)
static const uptr PF_WRITE = 1U << 1;
-#if SANITIZER_FREEBSD
+# if SANITIZER_FREEBSD
uptr err = ucontext->uc_mcontext.mc_err;
-#elif SANITIZER_NETBSD
+# elif SANITIZER_NETBSD
uptr err = ucontext->uc_mcontext.__gregs[_REG_ERR];
-#elif SANITIZER_SOLARIS && defined(__i386__)
+# elif SANITIZER_SOLARIS && defined(__i386__)
const int Err = 13;
uptr err = ucontext->uc_mcontext.gregs[Err];
-#else
+# else
uptr err = ucontext->uc_mcontext.gregs[REG_ERR];
-#endif // SANITIZER_FREEBSD
- return err & PF_WRITE ? WRITE : READ;
-#elif defined(__mips__)
+# endif // SANITIZER_FREEBSD
+ return err & PF_WRITE ? Write : Read;
+# elif defined(__mips__)
uint32_t *exception_source;
uint32_t faulty_instruction;
uint32_t op_code;
@@ -1859,13 +1924,13 @@ SignalContext::WriteFlag SignalContext::GetWriteFlag() const {
case 0x29: // sh
case 0x2b: // sw
case 0x3f: // sd
-#if __mips_isa_rev < 6
+# if __mips_isa_rev < 6
case 0x2c: // sdl
case 0x2d: // sdr
case 0x2a: // swl
case 0x2e: // swr
-#endif
- return SignalContext::WRITE;
+# endif
+ return SignalContext::Write;
case 0x20: // lb
case 0x24: // lbu
@@ -1874,54 +1939,66 @@ SignalContext::WriteFlag SignalContext::GetWriteFlag() const {
case 0x23: // lw
case 0x27: // lwu
case 0x37: // ld
-#if __mips_isa_rev < 6
+# if __mips_isa_rev < 6
case 0x1a: // ldl
case 0x1b: // ldr
case 0x22: // lwl
case 0x26: // lwr
-#endif
- return SignalContext::READ;
-#if __mips_isa_rev == 6
+# endif
+ return SignalContext::Read;
+# if __mips_isa_rev == 6
case 0x3b: // pcrel
op_code = (faulty_instruction >> 19) & 0x3;
switch (op_code) {
case 0x1: // lwpc
case 0x2: // lwupc
- return SignalContext::READ;
+ return SignalContext::Read;
}
-#endif
+# endif
}
- return SignalContext::UNKNOWN;
-#elif defined(__arm__)
+ return SignalContext::Unknown;
+# elif defined(__arm__)
static const uptr FSR_WRITE = 1U << 11;
uptr fsr = ucontext->uc_mcontext.error_code;
- return fsr & FSR_WRITE ? WRITE : READ;
-#elif defined(__aarch64__)
+ return fsr & FSR_WRITE ? Write : Read;
+# elif defined(__aarch64__)
static const u64 ESR_ELx_WNR = 1U << 6;
u64 esr;
- if (!Aarch64GetESR(ucontext, &esr)) return UNKNOWN;
- return esr & ESR_ELx_WNR ? WRITE : READ;
-#elif defined(__sparc__)
+ if (!Aarch64GetESR(ucontext, &esr))
+ return Unknown;
+ return esr & ESR_ELx_WNR ? Write : Read;
+# elif defined(__loongarch__)
+ u32 flags = ucontext->uc_mcontext.__flags;
+ if (flags & SC_ADDRERR_RD)
+ return SignalContext::Read;
+ if (flags & SC_ADDRERR_WR)
+ return SignalContext::Write;
+ return SignalContext::Unknown;
+# elif defined(__sparc__)
// Decode the instruction to determine the access type.
// From OpenSolaris $SRC/uts/sun4/os/trap.c (get_accesstype).
-#if SANITIZER_SOLARIS
+# if SANITIZER_SOLARIS
uptr pc = ucontext->uc_mcontext.gregs[REG_PC];
-#else
+# else
// Historical BSDism here.
struct sigcontext *scontext = (struct sigcontext *)context;
-#if defined(__arch64__)
+# if defined(__arch64__)
uptr pc = scontext->sigc_regs.tpc;
-#else
+# else
uptr pc = scontext->si_regs.pc;
-#endif
-#endif
+# endif
+# endif
u32 instr = *(u32 *)pc;
- return (instr >> 21) & 1 ? WRITE: READ;
-#elif defined(__riscv)
+ return (instr >> 21) & 1 ? Write : Read;
+# elif defined(__riscv)
+# if SANITIZER_FREEBSD
+ unsigned long pc = ucontext->uc_mcontext.mc_gpregs.gp_sepc;
+# else
unsigned long pc = ucontext->uc_mcontext.__gregs[REG_PC];
+# endif
unsigned faulty_instruction = *(uint16_t *)pc;
-#if defined(__riscv_compressed)
+# if defined(__riscv_compressed)
if ((faulty_instruction & 0x3) != 0x3) { // it's a compressed instruction
// set op_bits to the instruction bits [1, 0, 15, 14, 13]
unsigned op_bits =
@@ -1929,38 +2006,38 @@ SignalContext::WriteFlag SignalContext::GetWriteFlag() const {
unsigned rd = faulty_instruction & 0xF80; // bits 7-11, inclusive
switch (op_bits) {
case 0b10'010: // c.lwsp (rd != x0)
-#if __riscv_xlen == 64
+# if __riscv_xlen == 64
case 0b10'011: // c.ldsp (rd != x0)
-#endif
- return rd ? SignalContext::READ : SignalContext::UNKNOWN;
+# endif
+ return rd ? SignalContext::Read : SignalContext::Unknown;
case 0b00'010: // c.lw
-#if __riscv_flen >= 32 && __riscv_xlen == 32
+# if __riscv_flen >= 32 && __riscv_xlen == 32
case 0b10'011: // c.flwsp
-#endif
-#if __riscv_flen >= 32 || __riscv_xlen == 64
+# endif
+# if __riscv_flen >= 32 || __riscv_xlen == 64
case 0b00'011: // c.flw / c.ld
-#endif
-#if __riscv_flen == 64
+# endif
+# if __riscv_flen == 64
case 0b00'001: // c.fld
case 0b10'001: // c.fldsp
-#endif
- return SignalContext::READ;
+# endif
+ return SignalContext::Read;
case 0b00'110: // c.sw
case 0b10'110: // c.swsp
-#if __riscv_flen >= 32 || __riscv_xlen == 64
+# if __riscv_flen >= 32 || __riscv_xlen == 64
case 0b00'111: // c.fsw / c.sd
case 0b10'111: // c.fswsp / c.sdsp
-#endif
-#if __riscv_flen == 64
+# endif
+# if __riscv_flen == 64
case 0b00'101: // c.fsd
case 0b10'101: // c.fsdsp
-#endif
- return SignalContext::WRITE;
+# endif
+ return SignalContext::Write;
default:
- return SignalContext::UNKNOWN;
+ return SignalContext::Unknown;
}
}
-#endif
+# endif
unsigned opcode = faulty_instruction & 0x7f; // lower 7 bits
unsigned funct3 = (faulty_instruction >> 12) & 0x7; // bits 12-14, inclusive
@@ -1970,56 +2047,56 @@ SignalContext::WriteFlag SignalContext::GetWriteFlag() const {
case 0b000: // lb
case 0b001: // lh
case 0b010: // lw
-#if __riscv_xlen == 64
+# if __riscv_xlen == 64
case 0b011: // ld
-#endif
+# endif
case 0b100: // lbu
case 0b101: // lhu
- return SignalContext::READ;
+ return SignalContext::Read;
default:
- return SignalContext::UNKNOWN;
+ return SignalContext::Unknown;
}
case 0b0100011: // stores
switch (funct3) {
case 0b000: // sb
case 0b001: // sh
case 0b010: // sw
-#if __riscv_xlen == 64
+# if __riscv_xlen == 64
case 0b011: // sd
-#endif
- return SignalContext::WRITE;
+# endif
+ return SignalContext::Write;
default:
- return SignalContext::UNKNOWN;
+ return SignalContext::Unknown;
}
-#if __riscv_flen >= 32
+# if __riscv_flen >= 32
case 0b0000111: // floating-point loads
switch (funct3) {
case 0b010: // flw
-#if __riscv_flen == 64
+# if __riscv_flen == 64
case 0b011: // fld
-#endif
- return SignalContext::READ;
+# endif
+ return SignalContext::Read;
default:
- return SignalContext::UNKNOWN;
+ return SignalContext::Unknown;
}
case 0b0100111: // floating-point stores
switch (funct3) {
case 0b010: // fsw
-#if __riscv_flen == 64
+# if __riscv_flen == 64
case 0b011: // fsd
-#endif
- return SignalContext::WRITE;
+# endif
+ return SignalContext::Write;
default:
- return SignalContext::UNKNOWN;
+ return SignalContext::Unknown;
}
-#endif
+# endif
default:
- return SignalContext::UNKNOWN;
+ return SignalContext::Unknown;
}
-#else
+# else
(void)ucontext;
- return UNKNOWN; // FIXME: Implement.
-#endif
+ return Unknown; // FIXME: Implement.
+# endif
}
bool SignalContext::IsTrueFaultingAddress() const {
@@ -2033,116 +2110,146 @@ void SignalContext::DumpAllRegisters(void *context) {
}
static void GetPcSpBp(void *context, uptr *pc, uptr *sp, uptr *bp) {
-#if SANITIZER_NETBSD
+# if SANITIZER_NETBSD
// This covers all NetBSD architectures
ucontext_t *ucontext = (ucontext_t *)context;
*pc = _UC_MACHINE_PC(ucontext);
*bp = _UC_MACHINE_FP(ucontext);
*sp = _UC_MACHINE_SP(ucontext);
-#elif defined(__arm__)
- ucontext_t *ucontext = (ucontext_t*)context;
+# elif defined(__arm__)
+ ucontext_t *ucontext = (ucontext_t *)context;
*pc = ucontext->uc_mcontext.arm_pc;
*bp = ucontext->uc_mcontext.arm_fp;
*sp = ucontext->uc_mcontext.arm_sp;
-#elif defined(__aarch64__)
- ucontext_t *ucontext = (ucontext_t*)context;
+# elif defined(__aarch64__)
+# if SANITIZER_FREEBSD
+ ucontext_t *ucontext = (ucontext_t *)context;
+ *pc = ucontext->uc_mcontext.mc_gpregs.gp_elr;
+ *bp = ucontext->uc_mcontext.mc_gpregs.gp_x[29];
+ *sp = ucontext->uc_mcontext.mc_gpregs.gp_sp;
+# else
+ ucontext_t *ucontext = (ucontext_t *)context;
*pc = ucontext->uc_mcontext.pc;
*bp = ucontext->uc_mcontext.regs[29];
*sp = ucontext->uc_mcontext.sp;
-#elif defined(__hppa__)
- ucontext_t *ucontext = (ucontext_t*)context;
+# endif
+# elif defined(__hppa__)
+ ucontext_t *ucontext = (ucontext_t *)context;
*pc = ucontext->uc_mcontext.sc_iaoq[0];
/* GCC uses %r3 whenever a frame pointer is needed. */
*bp = ucontext->uc_mcontext.sc_gr[3];
*sp = ucontext->uc_mcontext.sc_gr[30];
-#elif defined(__x86_64__)
-# if SANITIZER_FREEBSD
- ucontext_t *ucontext = (ucontext_t*)context;
+# elif defined(__x86_64__)
+# if SANITIZER_FREEBSD
+ ucontext_t *ucontext = (ucontext_t *)context;
*pc = ucontext->uc_mcontext.mc_rip;
*bp = ucontext->uc_mcontext.mc_rbp;
*sp = ucontext->uc_mcontext.mc_rsp;
-# else
- ucontext_t *ucontext = (ucontext_t*)context;
+# else
+ ucontext_t *ucontext = (ucontext_t *)context;
*pc = ucontext->uc_mcontext.gregs[REG_RIP];
*bp = ucontext->uc_mcontext.gregs[REG_RBP];
*sp = ucontext->uc_mcontext.gregs[REG_RSP];
-# endif
-#elif defined(__i386__)
-# if SANITIZER_FREEBSD
- ucontext_t *ucontext = (ucontext_t*)context;
+# endif
+# elif defined(__i386__)
+# if SANITIZER_FREEBSD
+ ucontext_t *ucontext = (ucontext_t *)context;
*pc = ucontext->uc_mcontext.mc_eip;
*bp = ucontext->uc_mcontext.mc_ebp;
*sp = ucontext->uc_mcontext.mc_esp;
-# else
- ucontext_t *ucontext = (ucontext_t*)context;
-# if SANITIZER_SOLARIS
+# else
+ ucontext_t *ucontext = (ucontext_t *)context;
+# if SANITIZER_SOLARIS
/* Use the numeric values: the symbolic ones are undefined by llvm
include/llvm/Support/Solaris.h. */
-# ifndef REG_EIP
-# define REG_EIP 14 // REG_PC
-# endif
-# ifndef REG_EBP
-# define REG_EBP 6 // REG_FP
-# endif
-# ifndef REG_UESP
-# define REG_UESP 17 // REG_SP
-# endif
-# endif
+# ifndef REG_EIP
+# define REG_EIP 14 // REG_PC
+# endif
+# ifndef REG_EBP
+# define REG_EBP 6 // REG_FP
+# endif
+# ifndef REG_UESP
+# define REG_UESP 17 // REG_SP
+# endif
+# endif
*pc = ucontext->uc_mcontext.gregs[REG_EIP];
*bp = ucontext->uc_mcontext.gregs[REG_EBP];
*sp = ucontext->uc_mcontext.gregs[REG_UESP];
-# endif
-#elif defined(__powerpc__) || defined(__powerpc64__)
- ucontext_t *ucontext = (ucontext_t*)context;
+# endif
+# elif defined(__powerpc__) || defined(__powerpc64__)
+# if SANITIZER_FREEBSD
+ ucontext_t *ucontext = (ucontext_t *)context;
+ *pc = ucontext->uc_mcontext.mc_srr0;
+ *sp = ucontext->uc_mcontext.mc_frame[1];
+ *bp = ucontext->uc_mcontext.mc_frame[31];
+# else
+ ucontext_t *ucontext = (ucontext_t *)context;
*pc = ucontext->uc_mcontext.regs->nip;
*sp = ucontext->uc_mcontext.regs->gpr[PT_R1];
// The powerpc{,64}-linux ABIs do not specify r31 as the frame
// pointer, but GCC always uses r31 when we need a frame pointer.
*bp = ucontext->uc_mcontext.regs->gpr[PT_R31];
-#elif defined(__sparc__)
-#if defined(__arch64__) || defined(__sparcv9)
-#define STACK_BIAS 2047
-#else
-#define STACK_BIAS 0
-# endif
-# if SANITIZER_SOLARIS
+# endif
+# elif defined(__sparc__)
+# if defined(__arch64__) || defined(__sparcv9)
+# define STACK_BIAS 2047
+# else
+# define STACK_BIAS 0
+# endif
+# if SANITIZER_SOLARIS
ucontext_t *ucontext = (ucontext_t *)context;
*pc = ucontext->uc_mcontext.gregs[REG_PC];
*sp = ucontext->uc_mcontext.gregs[REG_O6] + STACK_BIAS;
-#else
+# else
// Historical BSDism here.
struct sigcontext *scontext = (struct sigcontext *)context;
-#if defined(__arch64__)
+# if defined(__arch64__)
*pc = scontext->sigc_regs.tpc;
*sp = scontext->sigc_regs.u_regs[14] + STACK_BIAS;
-#else
+# else
*pc = scontext->si_regs.pc;
*sp = scontext->si_regs.u_regs[14];
-#endif
-# endif
+# endif
+# endif
*bp = (uptr)((uhwptr *)*sp)[14] + STACK_BIAS;
-#elif defined(__mips__)
- ucontext_t *ucontext = (ucontext_t*)context;
+# elif defined(__mips__)
+ ucontext_t *ucontext = (ucontext_t *)context;
*pc = ucontext->uc_mcontext.pc;
*bp = ucontext->uc_mcontext.gregs[30];
*sp = ucontext->uc_mcontext.gregs[29];
-#elif defined(__s390__)
- ucontext_t *ucontext = (ucontext_t*)context;
-# if defined(__s390x__)
+# elif defined(__s390__)
+ ucontext_t *ucontext = (ucontext_t *)context;
+# if defined(__s390x__)
*pc = ucontext->uc_mcontext.psw.addr;
-# else
+# else
*pc = ucontext->uc_mcontext.psw.addr & 0x7fffffff;
-# endif
+# endif
*bp = ucontext->uc_mcontext.gregs[11];
*sp = ucontext->uc_mcontext.gregs[15];
-#elif defined(__riscv)
- ucontext_t *ucontext = (ucontext_t*)context;
+# elif defined(__riscv)
+ ucontext_t *ucontext = (ucontext_t *)context;
+# if SANITIZER_FREEBSD
+ *pc = ucontext->uc_mcontext.mc_gpregs.gp_sepc;
+ *bp = ucontext->uc_mcontext.mc_gpregs.gp_s[0];
+ *sp = ucontext->uc_mcontext.mc_gpregs.gp_sp;
+# else
*pc = ucontext->uc_mcontext.__gregs[REG_PC];
*bp = ucontext->uc_mcontext.__gregs[REG_S0];
*sp = ucontext->uc_mcontext.__gregs[REG_SP];
-#else
-# error "Unsupported arch"
-#endif
+# endif
+# elif defined(__hexagon__)
+ ucontext_t *ucontext = (ucontext_t *)context;
+ *pc = ucontext->uc_mcontext.pc;
+ *bp = ucontext->uc_mcontext.r30;
+ *sp = ucontext->uc_mcontext.r29;
+# elif defined(__loongarch__)
+ ucontext_t *ucontext = (ucontext_t *)context;
+ *pc = ucontext->uc_mcontext.__pc;
+ *bp = ucontext->uc_mcontext.__gregs[22];
+ *sp = ucontext->uc_mcontext.__gregs[3];
+# else
+# error "Unsupported arch"
+# endif
}
void SignalContext::InitPcSpBp() { GetPcSpBp(context, &pc, &sp, &bp); }
@@ -2151,12 +2258,8 @@ void InitializePlatformEarly() {
// Do nothing.
}
-void MaybeReexec() {
- // No need to re-exec on Linux.
-}
-
void CheckASLR() {
-#if SANITIZER_NETBSD
+# if SANITIZER_NETBSD
int mib[3];
int paxflags;
uptr len = sizeof(paxflags);
@@ -2171,42 +2274,49 @@ void CheckASLR() {
}
if (UNLIKELY(paxflags & CTL_PROC_PAXFLAGS_ASLR)) {
- Printf("This sanitizer is not compatible with enabled ASLR.\n"
- "To disable ASLR, please run \"paxctl +a %s\" and try again.\n",
- GetArgv()[0]);
+ Printf(
+ "This sanitizer is not compatible with enabled ASLR.\n"
+ "To disable ASLR, please run \"paxctl +a %s\" and try again.\n",
+ GetArgv()[0]);
Die();
}
-#elif SANITIZER_PPC64V2
- // Disable ASLR for Linux PPC64LE.
- int old_personality = personality(0xffffffff);
- if (old_personality != -1 && (old_personality & ADDR_NO_RANDOMIZE) == 0) {
- VReport(1, "WARNING: Program is being run with address space layout "
- "randomization (ASLR) enabled which prevents the thread and "
- "memory sanitizers from working on powerpc64le.\n"
- "ASLR will be disabled and the program re-executed.\n");
- CHECK_NE(personality(old_personality | ADDR_NO_RANDOMIZE), -1);
- ReExec();
- }
-#elif SANITIZER_FREEBSD
+# elif SANITIZER_FREEBSD
int aslr_status;
- if (UNLIKELY(procctl(P_PID, 0, PROC_ASLR_STATUS, &aslr_status) == -1)) {
+ int r = internal_procctl(P_PID, 0, PROC_ASLR_STATUS, &aslr_status);
+ if (UNLIKELY(r == -1)) {
// We're making things less 'dramatic' here since
// the cmd is not necessarily guaranteed to be here
// just yet regarding FreeBSD release
return;
}
if ((aslr_status & PROC_ASLR_ACTIVE) != 0) {
- Printf("This sanitizer is not compatible with enabled ASLR "
- "and binaries compiled with PIE\n");
- Die();
+ VReport(1,
+ "This sanitizer is not compatible with enabled ASLR "
+ "and binaries compiled with PIE\n"
+ "ASLR will be disabled and the program re-executed.\n");
+ int aslr_ctl = PROC_ASLR_FORCE_DISABLE;
+ CHECK_NE(internal_procctl(P_PID, 0, PROC_ASLR_CTL, &aslr_ctl), -1);
+ ReExec();
+ }
+# elif SANITIZER_PPC64V2
+ // Disable ASLR for Linux PPC64LE.
+ int old_personality = personality(0xffffffff);
+ if (old_personality != -1 && (old_personality & ADDR_NO_RANDOMIZE) == 0) {
+ VReport(1,
+ "WARNING: Program is being run with address space layout "
+ "randomization (ASLR) enabled which prevents the thread and "
+ "memory sanitizers from working on powerpc64le.\n"
+ "ASLR will be disabled and the program re-executed.\n");
+ CHECK_NE(personality(old_personality | ADDR_NO_RANDOMIZE), -1);
+ ReExec();
}
-#else
+# else
// Do nothing
-#endif
+# endif
}
void CheckMPROTECT() {
-#if SANITIZER_NETBSD
+# if SANITIZER_NETBSD
int mib[3];
int paxflags;
uptr len = sizeof(paxflags);
@@ -2224,13 +2334,13 @@ void CheckMPROTECT() {
Printf("This sanitizer is not compatible with enabled MPROTECT\n");
Die();
}
-#else
+# else
// Do nothing
-#endif
+# endif
}
void CheckNoDeepBind(const char *filename, int flag) {
-#ifdef RTLD_DEEPBIND
+# ifdef RTLD_DEEPBIND
if (flag & RTLD_DEEPBIND) {
Report(
"You are trying to dlopen a %s shared library with RTLD_DEEPBIND flag"
@@ -2241,7 +2351,7 @@ void CheckNoDeepBind(const char *filename, int flag) {
filename, filename);
Die();
}
-#endif
+# endif
}
uptr FindAvailableMemoryRange(uptr size, uptr alignment, uptr left_padding,
@@ -2254,16 +2364,16 @@ uptr FindAvailableMemoryRange(uptr size, uptr alignment, uptr left_padding,
bool GetRandom(void *buffer, uptr length, bool blocking) {
if (!buffer || !length || length > 256)
return false;
-#if SANITIZER_USE_GETENTROPY
+# if SANITIZER_USE_GETENTROPY
uptr rnd = getentropy(buffer, length);
int rverrno = 0;
if (internal_iserror(rnd, &rverrno) && rverrno == EFAULT)
return false;
else if (rnd == 0)
return true;
-#endif // SANITIZER_USE_GETENTROPY
+# endif // SANITIZER_USE_GETENTROPY
-#if SANITIZER_USE_GETRANDOM
+# if SANITIZER_USE_GETRANDOM
static atomic_uint8_t skip_getrandom_syscall;
if (!atomic_load_relaxed(&skip_getrandom_syscall)) {
// Up to 256 bytes, getrandom will not be interrupted.
@@ -2275,7 +2385,7 @@ bool GetRandom(void *buffer, uptr length, bool blocking) {
else if (res == length)
return true;
}
-#endif // SANITIZER_USE_GETRANDOM
+# endif // SANITIZER_USE_GETRANDOM
// Up to 256 bytes, a read off /dev/urandom will not be interrupted.
// blocking is moot here, O_NONBLOCK has no effect when opening /dev/urandom.
uptr fd = internal_open("/dev/urandom", O_RDONLY);
@@ -2288,6 +2398,6 @@ bool GetRandom(void *buffer, uptr length, bool blocking) {
return true;
}
-} // namespace __sanitizer
+} // namespace __sanitizer
#endif
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_linux.h b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_linux.h
index 9a23fcfb3b93..c30f0326793d 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_linux.h
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_linux.h
@@ -13,15 +13,15 @@
#define SANITIZER_LINUX_H
#include "sanitizer_platform.h"
-#if SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD || \
+#if SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD || \
SANITIZER_SOLARIS
-#include "sanitizer_common.h"
-#include "sanitizer_internal_defs.h"
-#include "sanitizer_platform_limits_freebsd.h"
-#include "sanitizer_platform_limits_netbsd.h"
-#include "sanitizer_platform_limits_posix.h"
-#include "sanitizer_platform_limits_solaris.h"
-#include "sanitizer_posix.h"
+# include "sanitizer_common.h"
+# include "sanitizer_internal_defs.h"
+# include "sanitizer_platform_limits_freebsd.h"
+# include "sanitizer_platform_limits_netbsd.h"
+# include "sanitizer_platform_limits_posix.h"
+# include "sanitizer_platform_limits_solaris.h"
+# include "sanitizer_posix.h"
struct link_map; // Opaque type returned by dlopen().
struct utsname;
@@ -46,34 +46,52 @@ void ReadProcMaps(ProcSelfMapsBuff *proc_maps);
// Syscall wrappers.
uptr internal_getdents(fd_t fd, struct linux_dirent *dirp, unsigned int count);
-uptr internal_sigaltstack(const void* ss, void* oss);
+uptr internal_sigaltstack(const void *ss, void *oss);
uptr internal_sigprocmask(int how, __sanitizer_sigset_t *set,
- __sanitizer_sigset_t *oldset);
-#if SANITIZER_GLIBC
+ __sanitizer_sigset_t *oldset);
+
+void SetSigProcMask(__sanitizer_sigset_t *set, __sanitizer_sigset_t *oldset);
+void BlockSignals(__sanitizer_sigset_t *oldset = nullptr);
+struct ScopedBlockSignals {
+ explicit ScopedBlockSignals(__sanitizer_sigset_t *copy);
+ ~ScopedBlockSignals();
+
+ ScopedBlockSignals &operator=(const ScopedBlockSignals &) = delete;
+ ScopedBlockSignals(const ScopedBlockSignals &) = delete;
+
+ private:
+ __sanitizer_sigset_t saved_;
+};
+
+# if SANITIZER_GLIBC
uptr internal_clock_gettime(__sanitizer_clockid_t clk_id, void *tp);
-#endif
+# endif
// Linux-only syscalls.
-#if SANITIZER_LINUX
+# if SANITIZER_LINUX
uptr internal_prctl(int option, uptr arg2, uptr arg3, uptr arg4, uptr arg5);
+# if defined(__x86_64__)
+uptr internal_arch_prctl(int option, uptr arg2);
+# endif
// Used only by sanitizer_stoptheworld. Signal handlers that are actually used
// (like the process-wide error reporting SEGV handler) must use
// internal_sigaction instead.
int internal_sigaction_norestorer(int signum, const void *act, void *oldact);
void internal_sigdelset(__sanitizer_sigset_t *set, int signum);
-#if defined(__x86_64__) || defined(__mips__) || defined(__aarch64__) || \
- defined(__powerpc64__) || defined(__s390__) || defined(__i386__) || \
- defined(__arm__) || SANITIZER_RISCV64
+# if defined(__x86_64__) || defined(__mips__) || defined(__aarch64__) || \
+ defined(__powerpc64__) || defined(__s390__) || defined(__i386__) || \
+ defined(__arm__) || SANITIZER_RISCV64 || SANITIZER_LOONGARCH64
uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,
int *parent_tidptr, void *newtls, int *child_tidptr);
-#endif
+# endif
int internal_uname(struct utsname *buf);
-#elif SANITIZER_FREEBSD
+# elif SANITIZER_FREEBSD
+uptr internal_procctl(int type, int id, int cmd, void *data);
void internal_sigdelset(__sanitizer_sigset_t *set, int signum);
-#elif SANITIZER_NETBSD
+# elif SANITIZER_NETBSD
void internal_sigdelset(__sanitizer_sigset_t *set, int signum);
uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg);
-#endif // SANITIZER_LINUX
+# endif // SANITIZER_LINUX
// This class reads thread IDs from /proc/<pid>/task using only syscalls.
class ThreadLister {
@@ -117,33 +135,60 @@ inline void ReleaseMemoryPagesToOSAndZeroFill(uptr beg, uptr end) {
ReleaseMemoryPagesToOS(beg, end);
}
-#if SANITIZER_ANDROID
-
-#if defined(__aarch64__)
-# define __get_tls() \
- ({ void** __v; __asm__("mrs %0, tpidr_el0" : "=r"(__v)); __v; })
-#elif defined(__arm__)
-# define __get_tls() \
- ({ void** __v; __asm__("mrc p15, 0, %0, c13, c0, 3" : "=r"(__v)); __v; })
-#elif defined(__mips__)
+# if SANITIZER_ANDROID
+
+# if defined(__aarch64__)
+# define __get_tls() \
+ ({ \
+ void **__v; \
+ __asm__("mrs %0, tpidr_el0" : "=r"(__v)); \
+ __v; \
+ })
+# elif defined(__arm__)
+# define __get_tls() \
+ ({ \
+ void **__v; \
+ __asm__("mrc p15, 0, %0, c13, c0, 3" : "=r"(__v)); \
+ __v; \
+ })
+# elif defined(__mips__)
// On mips32r1, this goes via a kernel illegal instruction trap that's
// optimized for v1.
-# define __get_tls() \
- ({ register void** __v asm("v1"); \
- __asm__(".set push\n" \
- ".set mips32r2\n" \
- "rdhwr %0,$29\n" \
- ".set pop\n" : "=r"(__v)); \
- __v; })
-#elif defined(__i386__)
-# define __get_tls() \
- ({ void** __v; __asm__("movl %%gs:0, %0" : "=r"(__v)); __v; })
-#elif defined(__x86_64__)
-# define __get_tls() \
- ({ void** __v; __asm__("mov %%fs:0, %0" : "=r"(__v)); __v; })
-#else
-#error "Unsupported architecture."
-#endif
+# define __get_tls() \
+ ({ \
+ register void **__v asm("v1"); \
+ __asm__( \
+ ".set push\n" \
+ ".set mips32r2\n" \
+ "rdhwr %0,$29\n" \
+ ".set pop\n" \
+ : "=r"(__v)); \
+ __v; \
+ })
+# elif defined(__riscv)
+# define __get_tls() \
+ ({ \
+ void **__v; \
+ __asm__("mv %0, tp" : "=r"(__v)); \
+ __v; \
+ })
+# elif defined(__i386__)
+# define __get_tls() \
+ ({ \
+ void **__v; \
+ __asm__("movl %%gs:0, %0" : "=r"(__v)); \
+ __v; \
+ })
+# elif defined(__x86_64__)
+# define __get_tls() \
+ ({ \
+ void **__v; \
+ __asm__("mov %%fs:0, %0" : "=r"(__v)); \
+ __v; \
+ })
+# else
+# error "Unsupported architecture."
+# endif
// The Android Bionic team has allocated a TLS slot for sanitizers starting
// with Q, given that Android currently doesn't support ELF TLS. It is used to
@@ -154,7 +199,7 @@ ALWAYS_INLINE uptr *get_android_tls_ptr() {
return reinterpret_cast<uptr *>(&__get_tls()[TLS_SLOT_SANITIZER]);
}
-#endif // SANITIZER_ANDROID
+# endif // SANITIZER_ANDROID
} // namespace __sanitizer
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_linux_libcdep.cpp b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_linux_libcdep.cpp
index 7ce9e25da342..962fff53e447 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_linux_libcdep.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_linux_libcdep.cpp
@@ -16,85 +16,87 @@
#if SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD || \
SANITIZER_SOLARIS
-#include "sanitizer_allocator_internal.h"
-#include "sanitizer_atomic.h"
-#include "sanitizer_common.h"
-#include "sanitizer_file.h"
-#include "sanitizer_flags.h"
-#include "sanitizer_freebsd.h"
-#include "sanitizer_getauxval.h"
-#include "sanitizer_glibc_version.h"
-#include "sanitizer_linux.h"
-#include "sanitizer_placement_new.h"
-#include "sanitizer_procmaps.h"
-
-#if SANITIZER_NETBSD
-#define _RTLD_SOURCE // for __lwp_gettcb_fast() / __lwp_getprivate_fast()
-#endif
-
-#include <dlfcn.h> // for dlsym()
-#include <link.h>
-#include <pthread.h>
-#include <signal.h>
-#include <sys/mman.h>
-#include <sys/resource.h>
-#include <syslog.h>
-
-#if !defined(ElfW)
-#define ElfW(type) Elf_##type
-#endif
-
-#if SANITIZER_FREEBSD
-#include <pthread_np.h>
-#include <osreldate.h>
-#include <sys/sysctl.h>
-#define pthread_getattr_np pthread_attr_get_np
+# include "sanitizer_allocator_internal.h"
+# include "sanitizer_atomic.h"
+# include "sanitizer_common.h"
+# include "sanitizer_file.h"
+# include "sanitizer_flags.h"
+# include "sanitizer_getauxval.h"
+# include "sanitizer_glibc_version.h"
+# include "sanitizer_linux.h"
+# include "sanitizer_placement_new.h"
+# include "sanitizer_procmaps.h"
+# include "sanitizer_solaris.h"
+
+# if SANITIZER_NETBSD
+# define _RTLD_SOURCE // for __lwp_gettcb_fast() / __lwp_getprivate_fast()
+# endif
+
+# include <dlfcn.h> // for dlsym()
+# include <link.h>
+# include <pthread.h>
+# include <signal.h>
+# include <sys/mman.h>
+# include <sys/resource.h>
+# include <syslog.h>
+
+# if !defined(ElfW)
+# define ElfW(type) Elf_##type
+# endif
+
+# if SANITIZER_FREEBSD
+# include <pthread_np.h>
+# include <sys/auxv.h>
+# include <sys/sysctl.h>
+# define pthread_getattr_np pthread_attr_get_np
// The MAP_NORESERVE define has been removed in FreeBSD 11.x, and even before
// that, it was never implemented. So just define it to zero.
-#undef MAP_NORESERVE
-#define MAP_NORESERVE 0
-#endif
-
-#if SANITIZER_NETBSD
-#include <sys/sysctl.h>
-#include <sys/tls.h>
-#include <lwp.h>
-#endif
-
-#if SANITIZER_SOLARIS
-#include <stdlib.h>
-#include <thread.h>
-#endif
-
-#if SANITIZER_ANDROID
-#include <android/api-level.h>
-#if !defined(CPU_COUNT) && !defined(__aarch64__)
-#include <dirent.h>
-#include <fcntl.h>
+# undef MAP_NORESERVE
+# define MAP_NORESERVE 0
+extern const Elf_Auxinfo *__elf_aux_vector __attribute__ ((weak));
+# endif
+
+# if SANITIZER_NETBSD
+# include <lwp.h>
+# include <sys/sysctl.h>
+# include <sys/tls.h>
+# endif
+
+# if SANITIZER_SOLARIS
+# include <stddef.h>
+# include <stdlib.h>
+# include <thread.h>
+# endif
+
+# if SANITIZER_ANDROID
+# include <android/api-level.h>
+# if !defined(CPU_COUNT) && !defined(__aarch64__)
+# include <dirent.h>
+# include <fcntl.h>
struct __sanitizer::linux_dirent {
- long d_ino;
- off_t d_off;
+ long d_ino;
+ off_t d_off;
unsigned short d_reclen;
- char d_name[];
+ char d_name[];
};
-#endif
-#endif
+# endif
+# endif
-#if !SANITIZER_ANDROID
-#include <elf.h>
-#include <unistd.h>
-#endif
+# if !SANITIZER_ANDROID
+# include <elf.h>
+# include <unistd.h>
+# endif
namespace __sanitizer {
-SANITIZER_WEAK_ATTRIBUTE int
-real_sigaction(int signum, const void *act, void *oldact);
+SANITIZER_WEAK_ATTRIBUTE int real_sigaction(int signum, const void *act,
+ void *oldact);
int internal_sigaction(int signum, const void *act, void *oldact) {
-#if !SANITIZER_GO
+# if !SANITIZER_GO
if (&real_sigaction)
return real_sigaction(signum, act, oldact);
-#endif
+# endif
return sigaction(signum, (const struct sigaction *)act,
(struct sigaction *)oldact);
}
@@ -109,7 +111,7 @@ void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top,
CHECK_EQ(getrlimit(RLIMIT_STACK, &rl), 0);
// Find the mapping that contains a stack variable.
- MemoryMappingLayout proc_maps(/*cache_enabled*/true);
+ MemoryMappingLayout proc_maps(/*cache_enabled*/ true);
if (proc_maps.Error()) {
*stack_top = *stack_bottom = 0;
return;
@@ -117,7 +119,8 @@ void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top,
MemoryMappedSegment segment;
uptr prev_end = 0;
while (proc_maps.Next(&segment)) {
- if ((uptr)&rl < segment.end) break;
+ if ((uptr)&rl < segment.end)
+ break;
prev_end = segment.end;
}
CHECK((uptr)&rl >= segment.start && (uptr)&rl < segment.end);
@@ -125,7 +128,8 @@ void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top,
// Get stacksize from rlimit, but clip it so that it does not overlap
// with other mappings.
uptr stacksize = rl.rlim_cur;
- if (stacksize > segment.end - prev_end) stacksize = segment.end - prev_end;
+ if (stacksize > segment.end - prev_end)
+ stacksize = segment.end - prev_end;
// When running with unlimited stack size, we still want to set some limit.
// The unlimited stack size is caused by 'ulimit -s unlimited'.
// Also, for some reason, GNU make spawns subprocesses with unlimited stack.
@@ -137,39 +141,39 @@ void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top,
}
uptr stacksize = 0;
void *stackaddr = nullptr;
-#if SANITIZER_SOLARIS
+# if SANITIZER_SOLARIS
stack_t ss;
CHECK_EQ(thr_stksegment(&ss), 0);
stacksize = ss.ss_size;
stackaddr = (char *)ss.ss_sp - stacksize;
-#else // !SANITIZER_SOLARIS
+# else // !SANITIZER_SOLARIS
pthread_attr_t attr;
pthread_attr_init(&attr);
CHECK_EQ(pthread_getattr_np(pthread_self(), &attr), 0);
- my_pthread_attr_getstack(&attr, &stackaddr, &stacksize);
+ internal_pthread_attr_getstack(&attr, &stackaddr, &stacksize);
pthread_attr_destroy(&attr);
-#endif // SANITIZER_SOLARIS
+# endif // SANITIZER_SOLARIS
*stack_top = (uptr)stackaddr + stacksize;
*stack_bottom = (uptr)stackaddr;
}
-#if !SANITIZER_GO
+# if !SANITIZER_GO
bool SetEnv(const char *name, const char *value) {
void *f = dlsym(RTLD_NEXT, "setenv");
if (!f)
return false;
- typedef int(*setenv_ft)(const char *name, const char *value, int overwrite);
+ typedef int (*setenv_ft)(const char *name, const char *value, int overwrite);
setenv_ft setenv_f;
CHECK_EQ(sizeof(setenv_f), sizeof(f));
internal_memcpy(&setenv_f, &f, sizeof(f));
return setenv_f(name, value, 1) == 0;
}
-#endif
+# endif
__attribute__((unused)) static bool GetLibcVersion(int *major, int *minor,
int *patch) {
-#ifdef _CS_GNU_LIBC_VERSION
+# ifdef _CS_GNU_LIBC_VERSION
char buf[64];
uptr len = confstr(_CS_GNU_LIBC_VERSION, buf, sizeof(buf));
if (len >= sizeof(buf))
@@ -183,9 +187,9 @@ __attribute__((unused)) static bool GetLibcVersion(int *major, int *minor,
*minor = (*p == '.') ? internal_simple_strtoll(p + 1, &p, 10) : 0;
*patch = (*p == '.') ? internal_simple_strtoll(p + 1, &p, 10) : 0;
return true;
-#else
+# else
return false;
-#endif
+# endif
}
// True if we can use dlpi_tls_data. glibc before 2.25 may leave NULL (BZ
@@ -196,42 +200,42 @@ __attribute__((unused)) static bool GetLibcVersion(int *major, int *minor,
// https://bugs.freebsd.org/bugzilla/show_bug.cgi?id=254774
__attribute__((unused)) static int g_use_dlpi_tls_data;
-#if SANITIZER_GLIBC && !SANITIZER_GO
+# if SANITIZER_GLIBC && !SANITIZER_GO
__attribute__((unused)) static size_t g_tls_size;
void InitTlsSize() {
int major, minor, patch;
g_use_dlpi_tls_data =
GetLibcVersion(&major, &minor, &patch) && major == 2 && minor >= 25;
-#if defined(__aarch64__) || defined(__x86_64__) || defined(__powerpc64__)
+# if defined(__aarch64__) || defined(__x86_64__) || \
+ defined(__powerpc64__) || defined(__loongarch__)
void *get_tls_static_info = dlsym(RTLD_NEXT, "_dl_get_tls_static_info");
size_t tls_align;
((void (*)(size_t *, size_t *))get_tls_static_info)(&g_tls_size, &tls_align);
-#endif
+# endif
}
-#else
-void InitTlsSize() { }
-#endif // SANITIZER_GLIBC && !SANITIZER_GO
+# else
+void InitTlsSize() {}
+# endif // SANITIZER_GLIBC && !SANITIZER_GO
// On glibc x86_64, ThreadDescriptorSize() needs to be precise due to the usage
// of g_tls_size. On other targets, ThreadDescriptorSize() is only used by lsan
// to get the pointer to thread-specific data keys in the thread control block.
-#if (SANITIZER_FREEBSD || SANITIZER_LINUX) && !SANITIZER_ANDROID
+# if (SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_SOLARIS) && \
+ !SANITIZER_ANDROID && !SANITIZER_GO
// sizeof(struct pthread) from glibc.
static atomic_uintptr_t thread_descriptor_size;
-uptr ThreadDescriptorSize() {
- uptr val = atomic_load_relaxed(&thread_descriptor_size);
- if (val)
- return val;
-#if defined(__x86_64__) || defined(__i386__) || defined(__arm__)
+static uptr ThreadDescriptorSizeFallback() {
+ uptr val = 0;
+# if defined(__x86_64__) || defined(__i386__) || defined(__arm__)
int major;
int minor;
int patch;
if (GetLibcVersion(&major, &minor, &patch) && major == 2) {
/* sizeof(struct pthread) values from various glibc versions. */
if (SANITIZER_X32)
- val = 1728; // Assume only one particular version for x32.
+ val = 1728; // Assume only one particular version for x32.
// For ARM sizeof(struct pthread) changed in Glibc 2.23.
else if (SANITIZER_ARM)
val = minor <= 22 ? 1120 : 1216;
@@ -254,17 +258,19 @@ uptr ThreadDescriptorSize() {
else // minor == 32
val = FIRST_32_SECOND_64(1344, 2496);
}
-#elif defined(__s390__) || defined(__sparc__)
+# elif defined(__s390__) || defined(__sparc__)
// The size of a prefix of TCB including pthread::{specific_1stblock,specific}
// suffices. Just return offsetof(struct pthread, specific_used), which hasn't
// changed since 2007-05. Technically this applies to i386/x86_64 as well but
// we call _dl_get_tls_static_info and need the precise size of struct
// pthread.
return FIRST_32_SECOND_64(524, 1552);
-#elif defined(__mips__)
+# elif defined(__mips__)
// TODO(sagarthakur): add more values as per different glibc versions.
val = FIRST_32_SECOND_64(1152, 1776);
-#elif SANITIZER_RISCV64
+# elif SANITIZER_LOONGARCH64
+ val = 1856; // from glibc 2.36
+# elif SANITIZER_RISCV64
int major;
int minor;
int patch;
@@ -279,36 +285,51 @@ uptr ThreadDescriptorSize() {
val = 1936; // tested against glibc 2.32
}
-#elif defined(__aarch64__)
+# elif defined(__aarch64__)
// The sizeof (struct pthread) is the same from GLIBC 2.17 to 2.22.
val = 1776;
-#elif defined(__powerpc64__)
- val = 1776; // from glibc.ppc64le 2.20-8.fc21
-#endif
+# elif defined(__powerpc64__)
+ val = 1776; // from glibc.ppc64le 2.20-8.fc21
+# endif
+ return val;
+}
+
+uptr ThreadDescriptorSize() {
+ uptr val = atomic_load_relaxed(&thread_descriptor_size);
if (val)
- atomic_store_relaxed(&thread_descriptor_size, val);
+ return val;
+ // _thread_db_sizeof_pthread is a GLIBC_PRIVATE symbol that is exported in
+ // glibc 2.34 and later.
+ if (unsigned *psizeof = static_cast<unsigned *>(
+ dlsym(RTLD_DEFAULT, "_thread_db_sizeof_pthread")))
+ val = *psizeof;
+ if (!val)
+ val = ThreadDescriptorSizeFallback();
+ atomic_store_relaxed(&thread_descriptor_size, val);
return val;
}
-#if defined(__mips__) || defined(__powerpc64__) || SANITIZER_RISCV64
+# if defined(__mips__) || defined(__powerpc64__) || SANITIZER_RISCV64 || \
+ SANITIZER_LOONGARCH64
// TlsPreTcbSize includes size of struct pthread_descr and size of tcb
// head structure. It lies before the static tls blocks.
static uptr TlsPreTcbSize() {
-#if defined(__mips__)
- const uptr kTcbHead = 16; // sizeof (tcbhead_t)
-#elif defined(__powerpc64__)
- const uptr kTcbHead = 88; // sizeof (tcbhead_t)
-#elif SANITIZER_RISCV64
+# if defined(__mips__)
const uptr kTcbHead = 16; // sizeof (tcbhead_t)
-#endif
+# elif defined(__powerpc64__)
+ const uptr kTcbHead = 88; // sizeof (tcbhead_t)
+# elif SANITIZER_RISCV64
+ const uptr kTcbHead = 16; // sizeof (tcbhead_t)
+# elif SANITIZER_LOONGARCH64
+ const uptr kTcbHead = 16; // sizeof (tcbhead_t)
+# endif
const uptr kTlsAlign = 16;
const uptr kTlsPreTcbSize =
RoundUpTo(ThreadDescriptorSize() + kTcbHead, kTlsAlign);
return kTlsPreTcbSize;
}
-#endif
+# endif
-#if !SANITIZER_GO
namespace {
struct TlsBlock {
uptr begin, end, align;
@@ -317,7 +338,7 @@ struct TlsBlock {
};
} // namespace
-#ifdef __s390__
+# ifdef __s390__
extern "C" uptr __tls_get_offset(void *arg);
static uptr TlsGetOffset(uptr ti_module, uptr ti_offset) {
@@ -335,31 +356,54 @@ static uptr TlsGetOffset(uptr ti_module, uptr ti_offset) {
: "memory", "cc", "0", "1", "3", "4", "5", "14");
return r2;
}
-#else
+# else
extern "C" void *__tls_get_addr(size_t *);
-#endif
+# endif
+
+static size_t main_tls_modid;
static int CollectStaticTlsBlocks(struct dl_phdr_info *info, size_t size,
void *data) {
- if (!info->dlpi_tls_modid)
+ size_t tls_modid;
+# if SANITIZER_SOLARIS
+ // dlpi_tls_modid is only available since Solaris 11.4 SRU 10. Use
+ // dlinfo(RTLD_DI_LINKMAP) instead which works on all of Solaris 11.3,
+ // 11.4, and Illumos. The tlsmodid of the executable was changed to 1 in
+ // 11.4 to match other implementations.
+ if (size >= offsetof(dl_phdr_info_test, dlpi_tls_modid))
+ main_tls_modid = 1;
+ else
+ main_tls_modid = 0;
+ g_use_dlpi_tls_data = 0;
+ Rt_map *map;
+ dlinfo(RTLD_SELF, RTLD_DI_LINKMAP, &map);
+ tls_modid = map->rt_tlsmodid;
+# else
+ main_tls_modid = 1;
+ tls_modid = info->dlpi_tls_modid;
+# endif
+
+ if (tls_modid < main_tls_modid)
return 0;
- uptr begin = (uptr)info->dlpi_tls_data;
+ uptr begin;
+# if !SANITIZER_SOLARIS
+ begin = (uptr)info->dlpi_tls_data;
+# endif
if (!g_use_dlpi_tls_data) {
// Call __tls_get_addr as a fallback. This forces TLS allocation on glibc
// and FreeBSD.
-#ifdef __s390__
- begin = (uptr)__builtin_thread_pointer() +
- TlsGetOffset(info->dlpi_tls_modid, 0);
-#else
- size_t mod_and_off[2] = {info->dlpi_tls_modid, 0};
+# ifdef __s390__
+ begin = (uptr)__builtin_thread_pointer() + TlsGetOffset(tls_modid, 0);
+# else
+ size_t mod_and_off[2] = {tls_modid, 0};
begin = (uptr)__tls_get_addr(mod_and_off);
-#endif
+# endif
}
for (unsigned i = 0; i != info->dlpi_phnum; ++i)
if (info->dlpi_phdr[i].p_type == PT_TLS) {
static_cast<InternalMmapVector<TlsBlock> *>(data)->push_back(
TlsBlock{begin, begin + info->dlpi_phdr[i].p_memsz,
- info->dlpi_phdr[i].p_align, info->dlpi_tls_modid});
+ info->dlpi_phdr[i].p_align, tls_modid});
break;
}
return 0;
@@ -371,11 +415,11 @@ __attribute__((unused)) static void GetStaticTlsBoundary(uptr *addr, uptr *size,
dl_iterate_phdr(CollectStaticTlsBlocks, &ranges);
uptr len = ranges.size();
Sort(ranges.begin(), len);
- // Find the range with tls_modid=1. For glibc, because libc.so uses PT_TLS,
- // this module is guaranteed to exist and is one of the initially loaded
- // modules.
+ // Find the range with tls_modid == main_tls_modid. For glibc, because
+ // libc.so uses PT_TLS, this module is guaranteed to exist and is one of
+ // the initially loaded modules.
uptr one = 0;
- while (one != len && ranges[one].tls_modid != 1) ++one;
+ while (one != len && ranges[one].tls_modid != main_tls_modid) ++one;
if (one == len) {
// This may happen with musl if no module uses PT_TLS.
*addr = 0;
@@ -384,36 +428,33 @@ __attribute__((unused)) static void GetStaticTlsBoundary(uptr *addr, uptr *size,
return;
}
// Find the maximum consecutive ranges. We consider two modules consecutive if
- // the gap is smaller than the alignment. The dynamic loader places static TLS
- // blocks this way not to waste space.
+ // the gap is smaller than the alignment of the latter range. The dynamic
+ // loader places static TLS blocks this way not to waste space.
uptr l = one;
*align = ranges[l].align;
- while (l != 0 && ranges[l].begin < ranges[l - 1].end + ranges[l - 1].align)
+ while (l != 0 && ranges[l].begin < ranges[l - 1].end + ranges[l].align)
*align = Max(*align, ranges[--l].align);
uptr r = one + 1;
- while (r != len && ranges[r].begin < ranges[r - 1].end + ranges[r - 1].align)
+ while (r != len && ranges[r].begin < ranges[r - 1].end + ranges[r].align)
*align = Max(*align, ranges[r++].align);
*addr = ranges[l].begin;
*size = ranges[r - 1].end - ranges[l].begin;
}
-#endif // !SANITIZER_GO
-#endif // (x86_64 || i386 || mips || ...) && (SANITIZER_FREEBSD ||
- // SANITIZER_LINUX) && !SANITIZER_ANDROID
+# endif // (x86_64 || i386 || mips || ...) && (SANITIZER_FREEBSD ||
+ // SANITIZER_LINUX) && !SANITIZER_ANDROID && !SANITIZER_GO
-#if SANITIZER_NETBSD
-static struct tls_tcb * ThreadSelfTlsTcb() {
+# if SANITIZER_NETBSD
+static struct tls_tcb *ThreadSelfTlsTcb() {
struct tls_tcb *tcb = nullptr;
-#ifdef __HAVE___LWP_GETTCB_FAST
+# ifdef __HAVE___LWP_GETTCB_FAST
tcb = (struct tls_tcb *)__lwp_gettcb_fast();
-#elif defined(__HAVE___LWP_GETPRIVATE_FAST)
+# elif defined(__HAVE___LWP_GETPRIVATE_FAST)
tcb = (struct tls_tcb *)__lwp_getprivate_fast();
-#endif
+# endif
return tcb;
}
-uptr ThreadSelf() {
- return (uptr)ThreadSelfTlsTcb()->tcb_pthread;
-}
+uptr ThreadSelf() { return (uptr)ThreadSelfTlsTcb()->tcb_pthread; }
int GetSizeFromHdr(struct dl_phdr_info *info, size_t size, void *data) {
const Elf_Phdr *hdr = info->dlpi_phdr;
@@ -421,23 +462,23 @@ int GetSizeFromHdr(struct dl_phdr_info *info, size_t size, void *data) {
for (; hdr != last_hdr; ++hdr) {
if (hdr->p_type == PT_TLS && info->dlpi_tls_modid == 1) {
- *(uptr*)data = hdr->p_memsz;
+ *(uptr *)data = hdr->p_memsz;
break;
}
}
return 0;
}
-#endif // SANITIZER_NETBSD
+# endif // SANITIZER_NETBSD
-#if SANITIZER_ANDROID
+# if SANITIZER_ANDROID
// Bionic provides this API since S.
extern "C" SANITIZER_WEAK_ATTRIBUTE void __libc_get_static_tls_bounds(void **,
void **);
-#endif
+# endif
-#if !SANITIZER_GO
+# if !SANITIZER_GO
static void GetTls(uptr *addr, uptr *size) {
-#if SANITIZER_ANDROID
+# if SANITIZER_ANDROID
if (&__libc_get_static_tls_bounds) {
void *start_addr;
void *end_addr;
@@ -449,35 +490,48 @@ static void GetTls(uptr *addr, uptr *size) {
*addr = 0;
*size = 0;
}
-#elif SANITIZER_GLIBC && defined(__x86_64__)
+# elif SANITIZER_GLIBC && defined(__x86_64__)
// For aarch64 and x86-64, use an O(1) approach which requires relatively
// precise ThreadDescriptorSize. g_tls_size was initialized in InitTlsSize.
+# if SANITIZER_X32
+ asm("mov %%fs:8,%0" : "=r"(*addr));
+# else
asm("mov %%fs:16,%0" : "=r"(*addr));
+# endif
*size = g_tls_size;
*addr -= *size;
*addr += ThreadDescriptorSize();
-#elif SANITIZER_GLIBC && defined(__aarch64__)
+# elif SANITIZER_GLIBC && defined(__aarch64__)
*addr = reinterpret_cast<uptr>(__builtin_thread_pointer()) -
ThreadDescriptorSize();
*size = g_tls_size + ThreadDescriptorSize();
-#elif SANITIZER_GLIBC && defined(__powerpc64__)
+# elif SANITIZER_GLIBC && defined(__loongarch__)
+# ifdef __clang__
+ *addr = reinterpret_cast<uptr>(__builtin_thread_pointer()) -
+ ThreadDescriptorSize();
+# else
+ asm("or %0,$tp,$zero" : "=r"(*addr));
+ *addr -= ThreadDescriptorSize();
+# endif
+ *size = g_tls_size + ThreadDescriptorSize();
+# elif SANITIZER_GLIBC && defined(__powerpc64__)
// Workaround for glibc<2.25(?). 2.27 is known to not need this.
uptr tp;
asm("addi %0,13,-0x7000" : "=r"(tp));
const uptr pre_tcb_size = TlsPreTcbSize();
*addr = tp - pre_tcb_size;
*size = g_tls_size + pre_tcb_size;
-#elif SANITIZER_FREEBSD || SANITIZER_LINUX
+# elif SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_SOLARIS
uptr align;
GetStaticTlsBoundary(addr, size, &align);
-#if defined(__x86_64__) || defined(__i386__) || defined(__s390__) || \
- defined(__sparc__)
+# if defined(__x86_64__) || defined(__i386__) || defined(__s390__) || \
+ defined(__sparc__)
if (SANITIZER_GLIBC) {
-#if defined(__x86_64__) || defined(__i386__)
+# if defined(__x86_64__) || defined(__i386__)
align = Max<uptr>(align, 64);
-#else
+# else
align = Max<uptr>(align, 16);
-#endif
+# endif
}
const uptr tp = RoundUpTo(*addr + *size, align);
@@ -496,26 +550,26 @@ static void GetTls(uptr *addr, uptr *size) {
// because the number of bytes after pthread::specific is larger.
*addr = tp - RoundUpTo(*size, align);
*size = tp - *addr + ThreadDescriptorSize();
-#else
+# else
if (SANITIZER_GLIBC)
*size += 1664;
else if (SANITIZER_FREEBSD)
*size += 128; // RTLD_STATIC_TLS_EXTRA
-#if defined(__mips__) || defined(__powerpc64__) || SANITIZER_RISCV64
+# if defined(__mips__) || defined(__powerpc64__) || SANITIZER_RISCV64
const uptr pre_tcb_size = TlsPreTcbSize();
*addr -= pre_tcb_size;
*size += pre_tcb_size;
-#else
+# else
// arm and aarch64 reserve two words at TP, so this underestimates the range.
// However, this is sufficient for the purpose of finding the pointers to
// thread-specific data keys.
const uptr tcb_size = ThreadDescriptorSize();
*addr -= tcb_size;
*size += tcb_size;
-#endif
-#endif
-#elif SANITIZER_NETBSD
- struct tls_tcb * const tcb = ThreadSelfTlsTcb();
+# endif
+# endif
+# elif SANITIZER_NETBSD
+ struct tls_tcb *const tcb = ThreadSelfTlsTcb();
*addr = 0;
*size = 0;
if (tcb != 0) {
@@ -528,35 +582,31 @@ static void GetTls(uptr *addr, uptr *size) {
*addr = (uptr)tcb->tcb_dtv[1];
}
}
-#elif SANITIZER_SOLARIS
- // FIXME
- *addr = 0;
- *size = 0;
-#else
-#error "Unknown OS"
-#endif
+# else
+# error "Unknown OS"
+# endif
}
-#endif
+# endif
-#if !SANITIZER_GO
+# if !SANITIZER_GO
uptr GetTlsSize() {
-#if SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD || \
- SANITIZER_SOLARIS
+# if SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD || \
+ SANITIZER_SOLARIS
uptr addr, size;
GetTls(&addr, &size);
return size;
-#else
+# else
return 0;
-#endif
+# endif
}
-#endif
+# endif
void GetThreadStackAndTls(bool main, uptr *stk_addr, uptr *stk_size,
uptr *tls_addr, uptr *tls_size) {
-#if SANITIZER_GO
+# if SANITIZER_GO
// Stub implementation for Go.
*stk_addr = *stk_size = *tls_addr = *tls_size = 0;
-#else
+# else
GetTls(tls_addr, tls_size);
uptr stack_top, stack_bottom;
@@ -572,16 +622,12 @@ void GetThreadStackAndTls(bool main, uptr *stk_addr, uptr *stk_size,
*stk_size = *tls_addr - *stk_addr;
}
}
-#endif
+# endif
}
-#if !SANITIZER_FREEBSD
+# if !SANITIZER_FREEBSD
typedef ElfW(Phdr) Elf_Phdr;
-#elif SANITIZER_WORDSIZE == 32 && __FreeBSD_version <= 902001 // v9.2
-#define Elf_Phdr XElf32_Phdr
-#define dl_phdr_info xdl_phdr_info
-#define dl_iterate_phdr(c, b) xdl_iterate_phdr((c), (b))
-#endif // !SANITIZER_FREEBSD
+# endif
struct DlIteratePhdrData {
InternalMmapVectorNoCtor<LoadedModule> *modules;
@@ -601,8 +647,35 @@ static int AddModuleSegments(const char *module_name, dl_phdr_info *info,
uptr cur_end = cur_beg + phdr->p_memsz;
bool executable = phdr->p_flags & PF_X;
bool writable = phdr->p_flags & PF_W;
- cur_module.addAddressRange(cur_beg, cur_end, executable,
- writable);
+ cur_module.addAddressRange(cur_beg, cur_end, executable, writable);
+ } else if (phdr->p_type == PT_NOTE) {
+# ifdef NT_GNU_BUILD_ID
+ uptr off = 0;
+ while (off + sizeof(ElfW(Nhdr)) < phdr->p_memsz) {
+ auto *nhdr = reinterpret_cast<const ElfW(Nhdr) *>(info->dlpi_addr +
+ phdr->p_vaddr + off);
+ constexpr auto kGnuNamesz = 4; // "GNU" with NUL-byte.
+ static_assert(kGnuNamesz % 4 == 0, "kGnuNameSize is aligned to 4.");
+ if (nhdr->n_type == NT_GNU_BUILD_ID && nhdr->n_namesz == kGnuNamesz) {
+ if (off + sizeof(ElfW(Nhdr)) + nhdr->n_namesz + nhdr->n_descsz >
+ phdr->p_memsz) {
+ // Something is very wrong, bail out instead of reading potentially
+ // arbitrary memory.
+ break;
+ }
+ const char *name =
+ reinterpret_cast<const char *>(nhdr) + sizeof(*nhdr);
+ if (internal_memcmp(name, "GNU", 3) == 0) {
+ const char *value = reinterpret_cast<const char *>(nhdr) +
+ sizeof(*nhdr) + kGnuNamesz;
+ cur_module.setUuid(value, nhdr->n_descsz);
+ break;
+ }
+ }
+ off += sizeof(*nhdr) + RoundUpTo(nhdr->n_namesz, 4) +
+ RoundUpTo(nhdr->n_descsz, 4);
+ }
+# endif
}
}
modules->push_back(cur_module);
@@ -619,33 +692,30 @@ static int dl_iterate_phdr_cb(dl_phdr_info *info, size_t size, void *arg) {
return AddModuleSegments(module_name.data(), info, data->modules);
}
- if (info->dlpi_name) {
- InternalScopedString module_name;
- module_name.append("%s", info->dlpi_name);
- return AddModuleSegments(module_name.data(), info, data->modules);
- }
+ if (info->dlpi_name)
+ return AddModuleSegments(info->dlpi_name, info, data->modules);
return 0;
}
-#if SANITIZER_ANDROID && __ANDROID_API__ < 21
+# if SANITIZER_ANDROID && __ANDROID_API__ < 21
extern "C" __attribute__((weak)) int dl_iterate_phdr(
int (*)(struct dl_phdr_info *, size_t, void *), void *);
-#endif
+# endif
static bool requiresProcmaps() {
-#if SANITIZER_ANDROID && __ANDROID_API__ <= 22
+# if SANITIZER_ANDROID && __ANDROID_API__ <= 22
// Fall back to /proc/maps if dl_iterate_phdr is unavailable or broken.
// The runtime check allows the same library to work with
// both K and L (and future) Android releases.
return AndroidGetApiLevel() <= ANDROID_LOLLIPOP_MR1;
-#else
+# else
return false;
-#endif
+# endif
}
static void procmapsInit(InternalMmapVectorNoCtor<LoadedModule> *modules) {
- MemoryMappingLayout memory_mapping(/*cache_enabled*/true);
+ MemoryMappingLayout memory_mapping(/*cache_enabled*/ true);
memory_mapping.DumpListOfModules(modules);
}
@@ -697,22 +767,19 @@ uptr GetRSS() {
// We need the second number which is RSS in pages.
char *pos = buf;
// Skip the first number.
- while (*pos >= '0' && *pos <= '9')
- pos++;
+ while (*pos >= '0' && *pos <= '9') pos++;
// Skip whitespaces.
- while (!(*pos >= '0' && *pos <= '9') && *pos != 0)
- pos++;
+ while (!(*pos >= '0' && *pos <= '9') && *pos != 0) pos++;
// Read the number.
uptr rss = 0;
- while (*pos >= '0' && *pos <= '9')
- rss = rss * 10 + *pos++ - '0';
+ while (*pos >= '0' && *pos <= '9') rss = rss * 10 + *pos++ - '0';
return rss * GetPageSizeCached();
}
// sysconf(_SC_NPROCESSORS_{CONF,ONLN}) cannot be used on most platforms as
// they allocate memory.
u32 GetNumberOfCPUs() {
-#if SANITIZER_FREEBSD || SANITIZER_NETBSD
+# if SANITIZER_FREEBSD || SANITIZER_NETBSD
u32 ncpu;
int req[2];
uptr len = sizeof(ncpu);
@@ -720,7 +787,7 @@ u32 GetNumberOfCPUs() {
req[1] = HW_NCPU;
CHECK_EQ(internal_sysctl(req, 2, &ncpu, &len, NULL, 0), 0);
return ncpu;
-#elif SANITIZER_ANDROID && !defined(CPU_COUNT) && !defined(__aarch64__)
+# elif SANITIZER_ANDROID && !defined(CPU_COUNT) && !defined(__aarch64__)
// Fall back to /sys/devices/system/cpu on Android when cpu_set_t doesn't
// exist in sched.h. That is the case for toolchains generated with older
// NDKs.
@@ -748,26 +815,26 @@ u32 GetNumberOfCPUs() {
break;
if (entry->d_ino != 0 && *d_type == DT_DIR) {
if (entry->d_name[0] == 'c' && entry->d_name[1] == 'p' &&
- entry->d_name[2] == 'u' &&
- entry->d_name[3] >= '0' && entry->d_name[3] <= '9')
+ entry->d_name[2] == 'u' && entry->d_name[3] >= '0' &&
+ entry->d_name[3] <= '9')
n_cpus++;
}
entry = (struct linux_dirent *)(((u8 *)entry) + entry->d_reclen);
}
internal_close(fd);
return n_cpus;
-#elif SANITIZER_SOLARIS
+# elif SANITIZER_SOLARIS
return sysconf(_SC_NPROCESSORS_ONLN);
-#else
+# else
cpu_set_t CPUs;
CHECK_EQ(sched_getaffinity(0, sizeof(cpu_set_t), &CPUs), 0);
return CPU_COUNT(&CPUs);
-#endif
+# endif
}
-#if SANITIZER_LINUX
+# if SANITIZER_LINUX
-#if SANITIZER_ANDROID
+# if SANITIZER_ANDROID
static atomic_uint8_t android_log_initialized;
void AndroidLogInit() {
@@ -779,13 +846,15 @@ static bool ShouldLogAfterPrintf() {
return atomic_load(&android_log_initialized, memory_order_acquire);
}
-extern "C" SANITIZER_WEAK_ATTRIBUTE
-int async_safe_write_log(int pri, const char* tag, const char* msg);
-extern "C" SANITIZER_WEAK_ATTRIBUTE
-int __android_log_write(int prio, const char* tag, const char* msg);
+extern "C" SANITIZER_WEAK_ATTRIBUTE int async_safe_write_log(int pri,
+ const char *tag,
+ const char *msg);
+extern "C" SANITIZER_WEAK_ATTRIBUTE int __android_log_write(int prio,
+ const char *tag,
+ const char *msg);
// ANDROID_LOG_INFO is 4, but can't be resolved at runtime.
-#define SANITIZER_ANDROID_LOG_INFO 4
+# define SANITIZER_ANDROID_LOG_INFO 4
// async_safe_write_log is a new public version of __libc_write_log that is
// used behind syslog. It is preferable to syslog as it will not do any dynamic
@@ -804,14 +873,14 @@ void WriteOneLineToSyslog(const char *s) {
}
}
-extern "C" SANITIZER_WEAK_ATTRIBUTE
-void android_set_abort_message(const char *);
+extern "C" SANITIZER_WEAK_ATTRIBUTE void android_set_abort_message(
+ const char *);
void SetAbortMessage(const char *str) {
if (&android_set_abort_message)
android_set_abort_message(str);
}
-#else
+# else
void AndroidLogInit() {}
static bool ShouldLogAfterPrintf() { return true; }
@@ -819,16 +888,16 @@ static bool ShouldLogAfterPrintf() { return true; }
void WriteOneLineToSyslog(const char *s) { syslog(LOG_INFO, "%s", s); }
void SetAbortMessage(const char *str) {}
-#endif // SANITIZER_ANDROID
+# endif // SANITIZER_ANDROID
void LogMessageOnPrintf(const char *str) {
if (common_flags()->log_to_syslog && ShouldLogAfterPrintf())
WriteToSyslog(str);
}
-#endif // SANITIZER_LINUX
+# endif // SANITIZER_LINUX
-#if SANITIZER_GLIBC && !SANITIZER_GO
+# if SANITIZER_GLIBC && !SANITIZER_GO
// glibc crashes when using clock_gettime from a preinit_array function as the
// vDSO function pointers haven't been initialized yet. __progname is
// initialized after the vDSO function pointers, so if it exists, is not null
@@ -839,8 +908,8 @@ inline bool CanUseVDSO() { return &__progname && __progname && *__progname; }
// MonotonicNanoTime is a timing function that can leverage the vDSO by calling
// clock_gettime. real_clock_gettime only exists if clock_gettime is
// intercepted, so define it weakly and use it if available.
-extern "C" SANITIZER_WEAK_ATTRIBUTE
-int real_clock_gettime(u32 clk_id, void *tp);
+extern "C" SANITIZER_WEAK_ATTRIBUTE int real_clock_gettime(u32 clk_id,
+ void *tp);
u64 MonotonicNanoTime() {
timespec ts;
if (CanUseVDSO()) {
@@ -853,19 +922,26 @@ u64 MonotonicNanoTime() {
}
return (u64)ts.tv_sec * (1000ULL * 1000 * 1000) + ts.tv_nsec;
}
-#else
+# else
// Non-glibc & Go always use the regular function.
u64 MonotonicNanoTime() {
timespec ts;
clock_gettime(CLOCK_MONOTONIC, &ts);
return (u64)ts.tv_sec * (1000ULL * 1000 * 1000) + ts.tv_nsec;
}
-#endif // SANITIZER_GLIBC && !SANITIZER_GO
+# endif // SANITIZER_GLIBC && !SANITIZER_GO
void ReExec() {
const char *pathname = "/proc/self/exe";
-#if SANITIZER_NETBSD
+# if SANITIZER_FREEBSD
+ for (const auto *aux = __elf_aux_vector; aux->a_type != AT_NULL; aux++) {
+ if (aux->a_type == AT_EXECPATH) {
+ pathname = static_cast<const char *>(aux->a_un.a_ptr);
+ break;
+ }
+ }
+# elif SANITIZER_NETBSD
static const int name[] = {
CTL_KERN,
KERN_PROC_ARGS,
@@ -878,14 +954,14 @@ void ReExec() {
len = sizeof(path);
if (internal_sysctl(name, ARRAY_SIZE(name), path, &len, NULL, 0) != -1)
pathname = path;
-#elif SANITIZER_SOLARIS
+# elif SANITIZER_SOLARIS
pathname = getexecname();
CHECK_NE(pathname, NULL);
-#elif SANITIZER_USE_GETAUXVAL
+# elif SANITIZER_USE_GETAUXVAL
// Calling execve with /proc/self/exe sets that as $EXEC_ORIGIN. Binaries that
// rely on that will fail to load shared libraries. Query AT_EXECFN instead.
pathname = reinterpret_cast<const char *>(getauxval(AT_EXECFN));
-#endif
+# endif
uptr rv = internal_execve(pathname, GetArgv(), GetEnviron());
int rverrno;
@@ -937,14 +1013,14 @@ static uptr MmapSharedNoReserve(uptr addr, uptr size) {
static uptr MremapCreateAlias(uptr base_addr, uptr alias_addr,
uptr alias_size) {
-#if SANITIZER_LINUX
+# if SANITIZER_LINUX
return internal_mremap(reinterpret_cast<void *>(base_addr), 0, alias_size,
MREMAP_MAYMOVE | MREMAP_FIXED,
reinterpret_cast<void *>(alias_addr));
-#else
+# else
CHECK(false && "mremap is not supported outside of Linux");
return 0;
-#endif
+# endif
}
static void CreateAliases(uptr start_addr, uptr alias_size, uptr num_aliases) {
@@ -989,12 +1065,12 @@ uptr MapDynamicShadowAndAliases(uptr shadow_size, uptr alias_size,
}
void InitializePlatformCommonFlags(CommonFlags *cf) {
-#if SANITIZER_ANDROID
+# if SANITIZER_ANDROID
if (&__libc_get_static_tls_bounds == nullptr)
cf->detect_leaks = false;
-#endif
+# endif
}
-} // namespace __sanitizer
+} // namespace __sanitizer
#endif
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_linux_s390.cpp b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_linux_s390.cpp
index bb2f5b5f9f7d..8523b540f2e5 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_linux_s390.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_linux_s390.cpp
@@ -15,14 +15,14 @@
#if SANITIZER_LINUX && SANITIZER_S390
-#include <dlfcn.h>
-#include <errno.h>
-#include <sys/syscall.h>
-#include <sys/utsname.h>
-#include <unistd.h>
+# include <dlfcn.h>
+# include <errno.h>
+# include <sys/syscall.h>
+# include <sys/utsname.h>
+# include <unistd.h>
-#include "sanitizer_libc.h"
-#include "sanitizer_linux.h"
+# include "sanitizer_libc.h"
+# include "sanitizer_linux.h"
namespace __sanitizer {
@@ -37,86 +37,85 @@ uptr internal_mmap(void *addr, uptr length, int prot, int flags, int fd,
unsigned long fd;
unsigned long offset;
} params = {
- (unsigned long)addr,
- (unsigned long)length,
- (unsigned long)prot,
- (unsigned long)flags,
- (unsigned long)fd,
-# ifdef __s390x__
- (unsigned long)offset,
-# else
+ (unsigned long)addr, (unsigned long)length, (unsigned long)prot,
+ (unsigned long)flags, (unsigned long)fd,
+# ifdef __s390x__
+ (unsigned long)offset,
+# else
(unsigned long)(offset / 4096),
-# endif
+# endif
};
-# ifdef __s390x__
+# ifdef __s390x__
return syscall(__NR_mmap, &params);
-# else
+# else
return syscall(__NR_mmap2, &params);
-# endif
+# endif
}
uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,
int *parent_tidptr, void *newtls, int *child_tidptr) {
- if (!fn || !child_stack)
- return -EINVAL;
+ if (!fn || !child_stack) {
+ errno = EINVAL;
+ return -1;
+ }
CHECK_EQ(0, (uptr)child_stack % 16);
// Minimum frame size.
-#ifdef __s390x__
+# ifdef __s390x__
child_stack = (char *)child_stack - 160;
-#else
+# else
child_stack = (char *)child_stack - 96;
-#endif
+# endif
// Terminate unwind chain.
((unsigned long *)child_stack)[0] = 0;
// And pass parameters.
((unsigned long *)child_stack)[1] = (uptr)fn;
((unsigned long *)child_stack)[2] = (uptr)arg;
- register long res __asm__("r2");
- register void *__cstack __asm__("r2") = child_stack;
- register int __flags __asm__("r3") = flags;
- register int * __ptidptr __asm__("r4") = parent_tidptr;
- register int * __ctidptr __asm__("r5") = child_tidptr;
- register void * __newtls __asm__("r6") = newtls;
+ register uptr res __asm__("r2");
+ register void *__cstack __asm__("r2") = child_stack;
+ register long __flags __asm__("r3") = flags;
+ register int *__ptidptr __asm__("r4") = parent_tidptr;
+ register int *__ctidptr __asm__("r5") = child_tidptr;
+ register void *__newtls __asm__("r6") = newtls;
__asm__ __volatile__(
- /* Clone. */
- "svc %1\n"
-
- /* if (%r2 != 0)
- * return;
- */
-#ifdef __s390x__
- "cghi %%r2, 0\n"
-#else
- "chi %%r2, 0\n"
-#endif
- "jne 1f\n"
-
- /* Call "fn(arg)". */
-#ifdef __s390x__
- "lmg %%r1, %%r2, 8(%%r15)\n"
-#else
- "lm %%r1, %%r2, 4(%%r15)\n"
-#endif
- "basr %%r14, %%r1\n"
-
- /* Call _exit(%r2). */
- "svc %2\n"
-
- /* Return to parent. */
- "1:\n"
- : "=r" (res)
- : "i"(__NR_clone), "i"(__NR_exit),
- "r"(__cstack),
- "r"(__flags),
- "r"(__ptidptr),
- "r"(__ctidptr),
- "r"(__newtls)
- : "memory", "cc");
+ /* Clone. */
+ "svc %1\n"
+
+ /* if (%r2 != 0)
+ * return;
+ */
+# ifdef __s390x__
+ "cghi %%r2, 0\n"
+# else
+ "chi %%r2, 0\n"
+# endif
+ "jne 1f\n"
+
+ /* Call "fn(arg)". */
+# ifdef __s390x__
+ "lmg %%r1, %%r2, 8(%%r15)\n"
+# else
+ "lm %%r1, %%r2, 4(%%r15)\n"
+# endif
+ "basr %%r14, %%r1\n"
+
+ /* Call _exit(%r2). */
+ "svc %2\n"
+
+ /* Return to parent. */
+ "1:\n"
+ : "=r"(res)
+ : "i"(__NR_clone), "i"(__NR_exit), "r"(__cstack), "r"(__flags),
+ "r"(__ptidptr), "r"(__ctidptr), "r"(__newtls)
+ : "memory", "cc");
+ if (res >= (uptr)-4095) {
+ errno = -res;
+ return -1;
+ }
return res;
}
-#if SANITIZER_S390_64
+# if SANITIZER_S390_64
static bool FixedCVE_2016_2143() {
// Try to determine if the running kernel has a fix for CVE-2016-2143,
// return false if in doubt (better safe than sorry). Distros may want to
@@ -131,20 +130,20 @@ static bool FixedCVE_2016_2143() {
// At least first 2 should be matched.
if (ptr[0] != '.')
return false;
- minor = internal_simple_strtoll(ptr+1, &ptr, 10);
+ minor = internal_simple_strtoll(ptr + 1, &ptr, 10);
// Third is optional.
if (ptr[0] == '.')
- patch = internal_simple_strtoll(ptr+1, &ptr, 10);
+ patch = internal_simple_strtoll(ptr + 1, &ptr, 10);
if (major < 3) {
if (major == 2 && minor == 6 && patch == 32 && ptr[0] == '-' &&
internal_strstr(ptr, ".el6")) {
// Check RHEL6
- int r1 = internal_simple_strtoll(ptr+1, &ptr, 10);
- if (r1 >= 657) // 2.6.32-657.el6 or later
+ int r1 = internal_simple_strtoll(ptr + 1, &ptr, 10);
+ if (r1 >= 657) // 2.6.32-657.el6 or later
return true;
if (r1 == 642 && ptr[0] == '.') {
- int r2 = internal_simple_strtoll(ptr+1, &ptr, 10);
- if (r2 >= 9) // 2.6.32-642.9.1.el6 or later
+ int r2 = internal_simple_strtoll(ptr + 1, &ptr, 10);
+ if (r2 >= 9) // 2.6.32-642.9.1.el6 or later
return true;
}
}
@@ -160,12 +159,12 @@ static bool FixedCVE_2016_2143() {
if (minor == 10 && patch == 0 && ptr[0] == '-' &&
internal_strstr(ptr, ".el7")) {
// Check RHEL7
- int r1 = internal_simple_strtoll(ptr+1, &ptr, 10);
- if (r1 >= 426) // 3.10.0-426.el7 or later
+ int r1 = internal_simple_strtoll(ptr + 1, &ptr, 10);
+ if (r1 >= 426) // 3.10.0-426.el7 or later
return true;
if (r1 == 327 && ptr[0] == '.') {
- int r2 = internal_simple_strtoll(ptr+1, &ptr, 10);
- if (r2 >= 27) // 3.10.0-327.27.1.el7 or later
+ int r2 = internal_simple_strtoll(ptr + 1, &ptr, 10);
+ if (r2 >= 27) // 3.10.0-327.27.1.el7 or later
return true;
}
}
@@ -181,8 +180,8 @@ static bool FixedCVE_2016_2143() {
if (minor == 4 && patch == 0 && ptr[0] == '-' &&
internal_strstr(buf.version, "Ubuntu")) {
// Check Ubuntu 16.04
- int r1 = internal_simple_strtoll(ptr+1, &ptr, 10);
- if (r1 >= 13) // 4.4.0-13 or later
+ int r1 = internal_simple_strtoll(ptr + 1, &ptr, 10);
+ if (r1 >= 13) // 4.4.0-13 or later
return true;
}
// Otherwise, OK if 4.5+.
@@ -205,18 +204,19 @@ void AvoidCVE_2016_2143() {
if (GetEnv("SANITIZER_IGNORE_CVE_2016_2143"))
return;
Report(
- "ERROR: Your kernel seems to be vulnerable to CVE-2016-2143. Using ASan,\n"
- "MSan, TSan, DFSan or LSan with such kernel can and will crash your\n"
- "machine, or worse.\n"
- "\n"
- "If you are certain your kernel is not vulnerable (you have compiled it\n"
- "yourself, or are using an unrecognized distribution kernel), you can\n"
- "override this safety check by exporting SANITIZER_IGNORE_CVE_2016_2143\n"
- "with any value.\n");
+ "ERROR: Your kernel seems to be vulnerable to CVE-2016-2143. Using "
+ "ASan,\n"
+ "MSan, TSan, DFSan or LSan with such kernel can and will crash your\n"
+ "machine, or worse.\n"
+ "\n"
+ "If you are certain your kernel is not vulnerable (you have compiled it\n"
+ "yourself, or are using an unrecognized distribution kernel), you can\n"
+ "override this safety check by exporting SANITIZER_IGNORE_CVE_2016_2143\n"
+ "with any value.\n");
Die();
}
-#endif
+# endif
-} // namespace __sanitizer
+} // namespace __sanitizer
-#endif // SANITIZER_LINUX && SANITIZER_S390
+#endif // SANITIZER_LINUX && SANITIZER_S390
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_local_address_space_view.h b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_local_address_space_view.h
index 0e19c4d4a801..a47cfc945cd8 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_local_address_space_view.h
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_local_address_space_view.h
@@ -17,7 +17,7 @@
// instantiated with the `LocalAddressSpaceView` type. This type is used to
// load any pointers in instance methods. This implementation is effectively
// a no-op. When an object is to be used in an out-of-process manner it is
-// instansiated with the `RemoteAddressSpaceView` type.
+// instantiated with the `RemoteAddressSpaceView` type.
//
// By making `AddressSpaceView` a template parameter of an object, it can
// change its implementation at compile time which has no run time overhead.
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_lzw.h b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_lzw.h
new file mode 100644
index 000000000000..42acfbdcea09
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_lzw.h
@@ -0,0 +1,159 @@
+//===-- sanitizer_lzw.h -----------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Lempel–Ziv–Welch encoding/decoding
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_LZW_H
+#define SANITIZER_LZW_H
+
+#include "sanitizer_dense_map.h"
+
+namespace __sanitizer {
+
+using LzwCodeType = u32;
+
+template <class T, class ItIn, class ItOut>
+ItOut LzwEncode(ItIn begin, ItIn end, ItOut out) {
+ using Substring =
+ detail::DenseMapPair<LzwCodeType /* Prefix */, T /* Next input */>;
+
+ // Sentinel value for substrings of len 1.
+ static constexpr LzwCodeType kNoPrefix =
+ Min(DenseMapInfo<Substring>::getEmptyKey().first,
+ DenseMapInfo<Substring>::getTombstoneKey().first) -
+ 1;
+ DenseMap<Substring, LzwCodeType> prefix_to_code;
+ {
+ // Add all substring of len 1 as initial dictionary.
+ InternalMmapVector<T> dict_len1;
+ for (auto it = begin; it != end; ++it)
+ if (prefix_to_code.try_emplace({kNoPrefix, *it}, 0).second)
+ dict_len1.push_back(*it);
+
+ // Slightly helps with later delta encoding.
+ Sort(dict_len1.data(), dict_len1.size());
+
+ // For large sizeof(T) we have to store dict_len1. Smaller types like u8 can
+ // just generate them.
+ *out = dict_len1.size();
+ ++out;
+
+ for (uptr i = 0; i != dict_len1.size(); ++i) {
+ // Remap after the Sort.
+ prefix_to_code[{kNoPrefix, dict_len1[i]}] = i;
+ *out = dict_len1[i];
+ ++out;
+ }
+ CHECK_EQ(prefix_to_code.size(), dict_len1.size());
+ }
+
+ if (begin == end)
+ return out;
+
+ // Main LZW encoding loop.
+ LzwCodeType match = prefix_to_code.find({kNoPrefix, *begin})->second;
+ ++begin;
+ for (auto it = begin; it != end; ++it) {
+ // Extend match with the new item.
+ auto ins = prefix_to_code.try_emplace({match, *it}, prefix_to_code.size());
+ if (ins.second) {
+ // This is a new substring, but emit the code for the current match
+ // (before extend). This allows LZW decoder to recover the dictionary.
+ *out = match;
+ ++out;
+ // Reset the match to a single item, which must be already in the map.
+ match = prefix_to_code.find({kNoPrefix, *it})->second;
+ } else {
+ // Already known, use as the current match.
+ match = ins.first->second;
+ }
+ }
+
+ *out = match;
+ ++out;
+
+ return out;
+}
+
+template <class T, class ItIn, class ItOut>
+ItOut LzwDecode(ItIn begin, ItIn end, ItOut out) {
+ if (begin == end)
+ return out;
+
+ // Load dictionary of len 1 substrings. Theses correspont to lowest codes.
+ InternalMmapVector<T> dict_len1(*begin);
+ ++begin;
+
+ if (begin == end)
+ return out;
+
+ for (auto& v : dict_len1) {
+ v = *begin;
+ ++begin;
+ }
+
+ // Substrings of len 2 and up. Indexes are shifted because [0,
+ // dict_len1.size()) stored in dict_len1. Substings get here after being
+ // emitted to the output, so we can use output position.
+ InternalMmapVector<detail::DenseMapPair<ItOut /* begin. */, ItOut /* end */>>
+ code_to_substr;
+
+ // Copies already emitted substrings into the output again.
+ auto copy = [&code_to_substr, &dict_len1](LzwCodeType code, ItOut out) {
+ if (code < dict_len1.size()) {
+ *out = dict_len1[code];
+ ++out;
+ return out;
+ }
+ const auto& s = code_to_substr[code - dict_len1.size()];
+
+ for (ItOut it = s.first; it != s.second; ++it, ++out) *out = *it;
+ return out;
+ };
+
+ // Returns lens of the substring with the given code.
+ auto code_to_len = [&code_to_substr, &dict_len1](LzwCodeType code) -> uptr {
+ if (code < dict_len1.size())
+ return 1;
+ const auto& s = code_to_substr[code - dict_len1.size()];
+ return s.second - s.first;
+ };
+
+ // Main LZW decoding loop.
+ LzwCodeType prev_code = *begin;
+ ++begin;
+ out = copy(prev_code, out);
+ for (auto it = begin; it != end; ++it) {
+ LzwCodeType code = *it;
+ auto start = out;
+ if (code == dict_len1.size() + code_to_substr.size()) {
+ // Special LZW case. The code is not in the dictionary yet. This is
+ // possible only when the new substring is the same as previous one plus
+ // the first item of the previous substring. We can emit that in two
+ // steps.
+ out = copy(prev_code, out);
+ *out = *start;
+ ++out;
+ } else {
+ out = copy(code, out);
+ }
+
+ // Every time encoded emits the code, it also creates substing of len + 1
+ // including the first item of the just emmited substring. Do the same here.
+ uptr len = code_to_len(prev_code);
+ code_to_substr.push_back({start - len, start + 1});
+
+ prev_code = code;
+ }
+ return out;
+}
+
+} // namespace __sanitizer
+#endif
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_mac.cpp b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_mac.cpp
index 083595d1505f..24e3d1112520 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_mac.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_mac.cpp
@@ -11,80 +11,82 @@
//===----------------------------------------------------------------------===//
#include "sanitizer_platform.h"
-#if SANITIZER_MAC
-#include "sanitizer_mac.h"
-#include "interception/interception.h"
+#if SANITIZER_APPLE
+# include "interception/interception.h"
+# include "sanitizer_mac.h"
// Use 64-bit inodes in file operations. ASan does not support OS X 10.5, so
// the clients will most certainly use 64-bit ones as well.
-#ifndef _DARWIN_USE_64_BIT_INODE
-#define _DARWIN_USE_64_BIT_INODE 1
-#endif
-#include <stdio.h>
-
-#include "sanitizer_common.h"
-#include "sanitizer_file.h"
-#include "sanitizer_flags.h"
-#include "sanitizer_internal_defs.h"
-#include "sanitizer_libc.h"
-#include "sanitizer_platform_limits_posix.h"
-#include "sanitizer_procmaps.h"
-#include "sanitizer_ptrauth.h"
-
-#if !SANITIZER_IOS
-#include <crt_externs.h> // for _NSGetEnviron
-#else
+# ifndef _DARWIN_USE_64_BIT_INODE
+# define _DARWIN_USE_64_BIT_INODE 1
+# endif
+# include <stdio.h>
+
+# include "sanitizer_common.h"
+# include "sanitizer_file.h"
+# include "sanitizer_flags.h"
+# include "sanitizer_interface_internal.h"
+# include "sanitizer_internal_defs.h"
+# include "sanitizer_libc.h"
+# include "sanitizer_platform_limits_posix.h"
+# include "sanitizer_procmaps.h"
+# include "sanitizer_ptrauth.h"
+
+# if !SANITIZER_IOS
+# include <crt_externs.h> // for _NSGetEnviron
+# else
extern char **environ;
-#endif
+# endif
-#if defined(__has_include) && __has_include(<os/trace.h>)
-#define SANITIZER_OS_TRACE 1
-#include <os/trace.h>
-#else
-#define SANITIZER_OS_TRACE 0
-#endif
+# if defined(__has_include) && __has_include(<os/trace.h>)
+# define SANITIZER_OS_TRACE 1
+# include <os/trace.h>
+# else
+# define SANITIZER_OS_TRACE 0
+# endif
// import new crash reporting api
-#if defined(__has_include) && __has_include(<CrashReporterClient.h>)
-#define HAVE_CRASHREPORTERCLIENT_H 1
-#include <CrashReporterClient.h>
-#else
-#define HAVE_CRASHREPORTERCLIENT_H 0
-#endif
-
-#if !SANITIZER_IOS
-#include <crt_externs.h> // for _NSGetArgv and _NSGetEnviron
-#else
+# if defined(__has_include) && __has_include(<CrashReporterClient.h>)
+# define HAVE_CRASHREPORTERCLIENT_H 1
+# include <CrashReporterClient.h>
+# else
+# define HAVE_CRASHREPORTERCLIENT_H 0
+# endif
+
+# if !SANITIZER_IOS
+# include <crt_externs.h> // for _NSGetArgv and _NSGetEnviron
+# else
extern "C" {
- extern char ***_NSGetArgv(void);
-}
-#endif
-
-#include <asl.h>
-#include <dlfcn.h> // for dladdr()
-#include <errno.h>
-#include <fcntl.h>
-#include <libkern/OSAtomic.h>
-#include <mach-o/dyld.h>
-#include <mach/mach.h>
-#include <mach/mach_time.h>
-#include <mach/vm_statistics.h>
-#include <malloc/malloc.h>
-#include <os/log.h>
-#include <pthread.h>
-#include <sched.h>
-#include <signal.h>
-#include <spawn.h>
-#include <stdlib.h>
-#include <sys/ioctl.h>
-#include <sys/mman.h>
-#include <sys/resource.h>
-#include <sys/stat.h>
-#include <sys/sysctl.h>
-#include <sys/types.h>
-#include <sys/wait.h>
-#include <unistd.h>
-#include <util.h>
+extern char ***_NSGetArgv(void);
+}
+# endif
+
+# include <asl.h>
+# include <dlfcn.h> // for dladdr()
+# include <errno.h>
+# include <fcntl.h>
+# include <libkern/OSAtomic.h>
+# include <mach-o/dyld.h>
+# include <mach/mach.h>
+# include <mach/mach_time.h>
+# include <mach/vm_statistics.h>
+# include <malloc/malloc.h>
+# include <os/log.h>
+# include <pthread.h>
+# include <pthread/introspection.h>
+# include <sched.h>
+# include <signal.h>
+# include <spawn.h>
+# include <stdlib.h>
+# include <sys/ioctl.h>
+# include <sys/mman.h>
+# include <sys/resource.h>
+# include <sys/stat.h>
+# include <sys/sysctl.h>
+# include <sys/types.h>
+# include <sys/wait.h>
+# include <unistd.h>
+# include <util.h>
// From <crt_externs.h>, but we don't have that file on iOS.
extern "C" {
@@ -265,30 +267,32 @@ int internal_sysctlbyname(const char *sname, void *oldp, uptr *oldlenp,
static fd_t internal_spawn_impl(const char *argv[], const char *envp[],
pid_t *pid) {
- fd_t master_fd = kInvalidFd;
- fd_t slave_fd = kInvalidFd;
+ fd_t primary_fd = kInvalidFd;
+ fd_t secondary_fd = kInvalidFd;
auto fd_closer = at_scope_exit([&] {
- internal_close(master_fd);
- internal_close(slave_fd);
+ internal_close(primary_fd);
+ internal_close(secondary_fd);
});
// We need a new pseudoterminal to avoid buffering problems. The 'atos' tool
// in particular detects when it's talking to a pipe and forgets to flush the
// output stream after sending a response.
- master_fd = posix_openpt(O_RDWR);
- if (master_fd == kInvalidFd) return kInvalidFd;
+ primary_fd = posix_openpt(O_RDWR);
+ if (primary_fd == kInvalidFd)
+ return kInvalidFd;
- int res = grantpt(master_fd) || unlockpt(master_fd);
+ int res = grantpt(primary_fd) || unlockpt(primary_fd);
if (res != 0) return kInvalidFd;
// Use TIOCPTYGNAME instead of ptsname() to avoid threading problems.
- char slave_pty_name[128];
- res = ioctl(master_fd, TIOCPTYGNAME, slave_pty_name);
+ char secondary_pty_name[128];
+ res = ioctl(primary_fd, TIOCPTYGNAME, secondary_pty_name);
if (res == -1) return kInvalidFd;
- slave_fd = internal_open(slave_pty_name, O_RDWR);
- if (slave_fd == kInvalidFd) return kInvalidFd;
+ secondary_fd = internal_open(secondary_pty_name, O_RDWR);
+ if (secondary_fd == kInvalidFd)
+ return kInvalidFd;
// File descriptor actions
posix_spawn_file_actions_t acts;
@@ -299,9 +303,9 @@ static fd_t internal_spawn_impl(const char *argv[], const char *envp[],
posix_spawn_file_actions_destroy(&acts);
});
- res = posix_spawn_file_actions_adddup2(&acts, slave_fd, STDIN_FILENO) ||
- posix_spawn_file_actions_adddup2(&acts, slave_fd, STDOUT_FILENO) ||
- posix_spawn_file_actions_addclose(&acts, slave_fd);
+ res = posix_spawn_file_actions_adddup2(&acts, secondary_fd, STDIN_FILENO) ||
+ posix_spawn_file_actions_adddup2(&acts, secondary_fd, STDOUT_FILENO) ||
+ posix_spawn_file_actions_addclose(&acts, secondary_fd);
if (res != 0) return kInvalidFd;
// Spawn attributes
@@ -326,14 +330,14 @@ static fd_t internal_spawn_impl(const char *argv[], const char *envp[],
// Disable echo in the new terminal, disable CR.
struct termios termflags;
- tcgetattr(master_fd, &termflags);
+ tcgetattr(primary_fd, &termflags);
termflags.c_oflag &= ~ONLCR;
termflags.c_lflag &= ~ECHO;
- tcsetattr(master_fd, TCSANOW, &termflags);
+ tcsetattr(primary_fd, TCSANOW, &termflags);
- // On success, do not close master_fd on scope exit.
- fd_t fd = master_fd;
- master_fd = kInvalidFd;
+ // On success, do not close primary_fd on scope exit.
+ fd_t fd = primary_fd;
+ primary_fd = kInvalidFd;
return fd;
}
@@ -390,6 +394,13 @@ bool FileExists(const char *filename) {
return S_ISREG(st.st_mode);
}
+bool DirExists(const char *path) {
+ struct stat st;
+ if (stat(path, &st))
+ return false;
+ return S_ISDIR(st.st_mode);
+}
+
tid_t GetTid() {
tid_t tid;
pthread_threadid_np(nullptr, &tid);
@@ -516,25 +527,6 @@ void FutexWait(atomic_uint32_t *p, u32 cmp) {
void FutexWake(atomic_uint32_t *p, u32 count) {}
-BlockingMutex::BlockingMutex() {
- internal_memset(this, 0, sizeof(*this));
-}
-
-void BlockingMutex::Lock() {
- CHECK(sizeof(OSSpinLock) <= sizeof(opaque_storage_));
- CHECK_EQ(OS_SPINLOCK_INIT, 0);
- CHECK_EQ(owner_, 0);
- OSSpinLockLock((OSSpinLock*)&opaque_storage_);
-}
-
-void BlockingMutex::Unlock() {
- OSSpinLockUnlock((OSSpinLock*)&opaque_storage_);
-}
-
-void BlockingMutex::CheckLocked() const {
- CHECK_NE(*(const OSSpinLock*)&opaque_storage_, 0);
-}
-
u64 NanoTime() {
timeval tv;
internal_memset(&tv, 0, sizeof(tv));
@@ -562,6 +554,9 @@ uptr TlsBaseAddr() {
asm("movq %%gs:0,%0" : "=r"(segbase));
#elif defined(__i386__)
asm("movl %%gs:0,%0" : "=r"(segbase));
+#elif defined(__aarch64__)
+ asm("mrs %x0, tpidrro_el0" : "=r"(segbase));
+ segbase &= 0x07ul; // clearing lower bits, cpu id stored there
#endif
return segbase;
}
@@ -784,8 +779,8 @@ void *internal_start_thread(void *(*func)(void *arg), void *arg) {
void internal_join_thread(void *th) { pthread_join((pthread_t)th, 0); }
#if !SANITIZER_GO
-static BlockingMutex syslog_lock(LINKER_INITIALIZED);
-#endif
+static Mutex syslog_lock;
+# endif
void WriteOneLineToSyslog(const char *s) {
#if !SANITIZER_GO
@@ -800,7 +795,7 @@ void WriteOneLineToSyslog(const char *s) {
// buffer to store crash report application information
static char crashreporter_info_buff[__sanitizer::kErrorMessageBufferSize] = {};
-static BlockingMutex crashreporter_info_mutex(LINKER_INITIALIZED);
+static Mutex crashreporter_info_mutex;
extern "C" {
// Integrate with crash reporter libraries.
@@ -830,7 +825,7 @@ asm(".desc ___crashreporter_info__, 0x10");
} // extern "C"
static void CRAppendCrashLogMessage(const char *msg) {
- BlockingMutexLock l(&crashreporter_info_mutex);
+ Lock l(&crashreporter_info_mutex);
internal_strlcat(crashreporter_info_buff, msg,
sizeof(crashreporter_info_buff));
#if HAVE_CRASHREPORTERCLIENT_H
@@ -874,7 +869,7 @@ void LogFullErrorReport(const char *buffer) {
// the reporting thread holds the thread registry mutex, and asl_log waits
// for GCD to dispatch a new thread, the process will deadlock, because the
// pthread_create wrapper needs to acquire the lock as well.
- BlockingMutexLock l(&syslog_lock);
+ Lock l(&syslog_lock);
if (common_flags()->log_to_syslog)
WriteToSyslog(buffer);
@@ -885,9 +880,12 @@ void LogFullErrorReport(const char *buffer) {
SignalContext::WriteFlag SignalContext::GetWriteFlag() const {
#if defined(__x86_64__) || defined(__i386__)
ucontext_t *ucontext = static_cast<ucontext_t*>(context);
- return ucontext->uc_mcontext->__es.__err & 2 /*T_PF_WRITE*/ ? WRITE : READ;
+ return ucontext->uc_mcontext->__es.__err & 2 /*T_PF_WRITE*/ ? Write : Read;
+#elif defined(__arm64__)
+ ucontext_t *ucontext = static_cast<ucontext_t*>(context);
+ return ucontext->uc_mcontext->__es.__esr & 0x40 /*ISS_DA_WNR*/ ? Write : Read;
#else
- return UNKNOWN;
+ return Unknown;
#endif
}
@@ -902,18 +900,14 @@ bool SignalContext::IsTrueFaultingAddress() const {
(uptr)ptrauth_strip( \
(void *)arm_thread_state64_get_##r(ucontext->uc_mcontext->__ss), 0)
#else
- #define AARCH64_GET_REG(r) ucontext->uc_mcontext->__ss.__##r
+ #define AARCH64_GET_REG(r) (uptr)ucontext->uc_mcontext->__ss.__##r
#endif
static void GetPcSpBp(void *context, uptr *pc, uptr *sp, uptr *bp) {
ucontext_t *ucontext = (ucontext_t*)context;
# if defined(__aarch64__)
*pc = AARCH64_GET_REG(pc);
-# if defined(__IPHONE_8_0) && __IPHONE_OS_VERSION_MAX_ALLOWED >= __IPHONE_8_0
*bp = AARCH64_GET_REG(fp);
-# else
- *bp = AARCH64_GET_REG(lr);
-# endif
*sp = AARCH64_GET_REG(sp);
# elif defined(__x86_64__)
*pc = ucontext->uc_mcontext->__ss.__rip;
@@ -950,6 +944,9 @@ static void DisableMmapExcGuardExceptions() {
set_behavior(mach_task_self(), task_exc_guard_none);
}
+static void VerifyInterceptorsWorking();
+static void StripEnv();
+
void InitializePlatformEarly() {
// Only use xnu_fast_mmap when on x86_64 and the kernel supports it.
use_xnu_fast_mmap =
@@ -960,17 +957,54 @@ void InitializePlatformEarly() {
#endif
if (GetDarwinKernelVersion() >= DarwinKernelVersion(19, 0))
DisableMmapExcGuardExceptions();
+
+# if !SANITIZER_GO
+ MonotonicNanoTime(); // Call to initialize mach_timebase_info
+ VerifyInterceptorsWorking();
+ StripEnv();
+# endif
}
#if !SANITIZER_GO
static const char kDyldInsertLibraries[] = "DYLD_INSERT_LIBRARIES";
LowLevelAllocator allocator_for_env;
+static bool ShouldCheckInterceptors() {
+ // Restrict "interceptors working?" check to ASan and TSan.
+ const char *sanitizer_names[] = {"AddressSanitizer", "ThreadSanitizer"};
+ size_t count = sizeof(sanitizer_names) / sizeof(sanitizer_names[0]);
+ for (size_t i = 0; i < count; i++) {
+ if (internal_strcmp(sanitizer_names[i], SanitizerToolName) == 0)
+ return true;
+ }
+ return false;
+}
+
+static void VerifyInterceptorsWorking() {
+ if (!common_flags()->verify_interceptors || !ShouldCheckInterceptors())
+ return;
+
+ // Verify that interceptors really work. We'll use dlsym to locate
+ // "puts", if interceptors are working, it should really point to
+ // "wrap_puts" within our own dylib.
+ Dl_info info_puts, info_runtime;
+ RAW_CHECK(dladdr(dlsym(RTLD_DEFAULT, "puts"), &info_puts));
+ RAW_CHECK(dladdr((void *)&VerifyInterceptorsWorking, &info_runtime));
+ if (internal_strcmp(info_puts.dli_fname, info_runtime.dli_fname) != 0) {
+ Report(
+ "ERROR: Interceptors are not working. This may be because %s is "
+ "loaded too late (e.g. via dlopen). Please launch the executable "
+ "with:\n%s=%s\n",
+ SanitizerToolName, kDyldInsertLibraries, info_runtime.dli_fname);
+ RAW_CHECK("interceptors not installed" && 0);
+ }
+}
+
// Change the value of the env var |name|, leaking the original value.
// If |name_value| is NULL, the variable is deleted from the environment,
// otherwise the corresponding "NAME=value" string is replaced with
// |name_value|.
-void LeakyResetEnv(const char *name, const char *name_value) {
+static void LeakyResetEnv(const char *name, const char *name_value) {
char **env = GetEnviron();
uptr name_len = internal_strlen(name);
while (*env != 0) {
@@ -995,100 +1029,28 @@ void LeakyResetEnv(const char *name, const char *name_value) {
}
}
-SANITIZER_WEAK_CXX_DEFAULT_IMPL
-bool ReexecDisabled() {
- return false;
-}
-
-static bool DyldNeedsEnvVariable() {
- // If running on OS X 10.11+ or iOS 9.0+, dyld will interpose even if
- // DYLD_INSERT_LIBRARIES is not set.
- return GetMacosAlignedVersion() < MacosVersion(10, 11);
-}
-
-void MaybeReexec() {
- // FIXME: This should really live in some "InitializePlatform" method.
- MonotonicNanoTime();
+static void StripEnv() {
+ if (!common_flags()->strip_env)
+ return;
- if (ReexecDisabled()) return;
+ char *dyld_insert_libraries =
+ const_cast<char *>(GetEnv(kDyldInsertLibraries));
+ if (!dyld_insert_libraries)
+ return;
- // Make sure the dynamic runtime library is preloaded so that the
- // wrappers work. If it is not, set DYLD_INSERT_LIBRARIES and re-exec
- // ourselves.
Dl_info info;
- RAW_CHECK(dladdr((void*)((uptr)&__sanitizer_report_error_summary), &info));
- char *dyld_insert_libraries =
- const_cast<char*>(GetEnv(kDyldInsertLibraries));
- uptr old_env_len = dyld_insert_libraries ?
- internal_strlen(dyld_insert_libraries) : 0;
- uptr fname_len = internal_strlen(info.dli_fname);
+ RAW_CHECK(dladdr((void *)&StripEnv, &info));
const char *dylib_name = StripModuleName(info.dli_fname);
- uptr dylib_name_len = internal_strlen(dylib_name);
-
- bool lib_is_in_env = dyld_insert_libraries &&
- internal_strstr(dyld_insert_libraries, dylib_name);
- if (DyldNeedsEnvVariable() && !lib_is_in_env) {
- // DYLD_INSERT_LIBRARIES is not set or does not contain the runtime
- // library.
- InternalMmapVector<char> program_name(1024);
- uint32_t buf_size = program_name.size();
- _NSGetExecutablePath(program_name.data(), &buf_size);
- char *new_env = const_cast<char*>(info.dli_fname);
- if (dyld_insert_libraries) {
- // Append the runtime dylib name to the existing value of
- // DYLD_INSERT_LIBRARIES.
- new_env = (char*)allocator_for_env.Allocate(old_env_len + fname_len + 2);
- internal_strncpy(new_env, dyld_insert_libraries, old_env_len);
- new_env[old_env_len] = ':';
- // Copy fname_len and add a trailing zero.
- internal_strncpy(new_env + old_env_len + 1, info.dli_fname,
- fname_len + 1);
- // Ok to use setenv() since the wrappers don't depend on the value of
- // asan_inited.
- setenv(kDyldInsertLibraries, new_env, /*overwrite*/1);
- } else {
- // Set DYLD_INSERT_LIBRARIES equal to the runtime dylib name.
- setenv(kDyldInsertLibraries, info.dli_fname, /*overwrite*/0);
- }
- VReport(1, "exec()-ing the program with\n");
- VReport(1, "%s=%s\n", kDyldInsertLibraries, new_env);
- VReport(1, "to enable wrappers.\n");
- execv(program_name.data(), *_NSGetArgv());
-
- // We get here only if execv() failed.
- Report("ERROR: The process is launched without DYLD_INSERT_LIBRARIES, "
- "which is required for the sanitizer to work. We tried to set the "
- "environment variable and re-execute itself, but execv() failed, "
- "possibly because of sandbox restrictions. Make sure to launch the "
- "executable with:\n%s=%s\n", kDyldInsertLibraries, new_env);
- RAW_CHECK("execv failed" && 0);
- }
-
- // Verify that interceptors really work. We'll use dlsym to locate
- // "pthread_create", if interceptors are working, it should really point to
- // "wrap_pthread_create" within our own dylib.
- Dl_info info_pthread_create;
- void *dlopen_addr = dlsym(RTLD_DEFAULT, "pthread_create");
- RAW_CHECK(dladdr(dlopen_addr, &info_pthread_create));
- if (internal_strcmp(info.dli_fname, info_pthread_create.dli_fname) != 0) {
- Report(
- "ERROR: Interceptors are not working. This may be because %s is "
- "loaded too late (e.g. via dlopen). Please launch the executable "
- "with:\n%s=%s\n",
- SanitizerToolName, kDyldInsertLibraries, info.dli_fname);
- RAW_CHECK("interceptors not installed" && 0);
- }
-
+ bool lib_is_in_env = internal_strstr(dyld_insert_libraries, dylib_name);
if (!lib_is_in_env)
return;
- if (!common_flags()->strip_env)
- return;
-
// DYLD_INSERT_LIBRARIES is set and contains the runtime library. Let's remove
// the dylib from the environment variable, because interceptors are installed
// and we don't want our children to inherit the variable.
+ uptr old_env_len = internal_strlen(dyld_insert_libraries);
+ uptr dylib_name_len = internal_strlen(dylib_name);
uptr env_name_len = internal_strlen(kDyldInsertLibraries);
// Allocate memory to hold the previous env var name, its value, the '='
// sign and the '\0' char.
@@ -1237,7 +1199,7 @@ uptr MapDynamicShadow(uptr shadow_size_bytes, uptr shadow_scale,
uptr largest_gap_found = 0;
uptr max_occupied_addr = 0;
- VReport(2, "FindDynamicShadowStart, space_size = %p\n", space_size);
+ VReport(2, "FindDynamicShadowStart, space_size = %p\n", (void *)space_size);
uptr shadow_start =
FindAvailableMemoryRange(space_size, alignment, granularity,
&largest_gap_found, &max_occupied_addr);
@@ -1246,20 +1208,21 @@ uptr MapDynamicShadow(uptr shadow_size_bytes, uptr shadow_scale,
VReport(
2,
"Shadow doesn't fit, largest_gap_found = %p, max_occupied_addr = %p\n",
- largest_gap_found, max_occupied_addr);
+ (void *)largest_gap_found, (void *)max_occupied_addr);
uptr new_max_vm = RoundDownTo(largest_gap_found << shadow_scale, alignment);
if (new_max_vm < max_occupied_addr) {
Report("Unable to find a memory range for dynamic shadow.\n");
Report(
"space_size = %p, largest_gap_found = %p, max_occupied_addr = %p, "
"new_max_vm = %p\n",
- space_size, largest_gap_found, max_occupied_addr, new_max_vm);
+ (void *)space_size, (void *)largest_gap_found,
+ (void *)max_occupied_addr, (void *)new_max_vm);
CHECK(0 && "cannot place shadow");
}
RestrictMemoryToMaxAddress(new_max_vm);
high_mem_end = new_max_vm - 1;
space_size = (high_mem_end >> shadow_scale) + left_padding;
- VReport(2, "FindDynamicShadowStart, space_size = %p\n", space_size);
+ VReport(2, "FindDynamicShadowStart, space_size = %p\n", (void *)space_size);
shadow_start = FindAvailableMemoryRange(space_size, alignment, granularity,
nullptr, nullptr);
if (shadow_start == 0) {
@@ -1288,6 +1251,7 @@ uptr FindAvailableMemoryRange(uptr size, uptr alignment, uptr left_padding,
mach_vm_address_t start_address =
(SANITIZER_WORDSIZE == 32) ? 0x000000001000 : 0x000100000000;
+ const mach_vm_address_t max_vm_address = GetMaxVirtualAddress() + 1;
mach_vm_address_t address = start_address;
mach_vm_address_t free_begin = start_address;
kern_return_t kr = KERN_SUCCESS;
@@ -1302,7 +1266,7 @@ uptr FindAvailableMemoryRange(uptr size, uptr alignment, uptr left_padding,
(vm_region_info_t)&vminfo, &count);
if (kr == KERN_INVALID_ADDRESS) {
// No more regions beyond "address", consider the gap at the end of VM.
- address = GetMaxVirtualAddress() + 1;
+ address = max_vm_address;
vmsize = 0;
} else {
if (max_occupied_addr) *max_occupied_addr = address + vmsize;
@@ -1310,7 +1274,7 @@ uptr FindAvailableMemoryRange(uptr size, uptr alignment, uptr left_padding,
if (free_begin != address) {
// We found a free region [free_begin..address-1].
uptr gap_start = RoundUpTo((uptr)free_begin + left_padding, alignment);
- uptr gap_end = RoundDownTo((uptr)address, alignment);
+ uptr gap_end = RoundDownTo((uptr)Min(address, max_vm_address), alignment);
uptr gap_size = gap_end > gap_start ? gap_end - gap_start : 0;
if (size < gap_size) {
return gap_start;
@@ -1330,7 +1294,7 @@ uptr FindAvailableMemoryRange(uptr size, uptr alignment, uptr left_padding,
}
// FIXME implement on this platform.
-void GetMemoryProfile(fill_profile_f cb, uptr *stats, uptr stats_size) { }
+void GetMemoryProfile(fill_profile_f cb, uptr *stats) {}
void SignalContext::DumpAllRegisters(void *context) {
Report("Register values:\n");
@@ -1339,7 +1303,7 @@ void SignalContext::DumpAllRegisters(void *context) {
# define DUMPREG64(r) \
Printf("%s = 0x%016llx ", #r, ucontext->uc_mcontext->__ss.__ ## r);
# define DUMPREGA64(r) \
- Printf(" %s = 0x%016llx ", #r, AARCH64_GET_REG(r));
+ Printf(" %s = 0x%016lx ", #r, AARCH64_GET_REG(r));
# define DUMPREG32(r) \
Printf("%s = 0x%08x ", #r, ucontext->uc_mcontext->__ss.__ ## r);
# define DUMPREG_(r) Printf(" "); DUMPREG(r);
@@ -1409,7 +1373,7 @@ void DumpProcessMap() {
char uuid_str[128];
FormatUUID(uuid_str, sizeof(uuid_str), modules[i].uuid());
Printf("0x%zx-0x%zx %s (%s) %s\n", modules[i].base_address(),
- modules[i].max_executable_address(), modules[i].full_name(),
+ modules[i].max_address(), modules[i].full_name(),
ModuleArchToString(modules[i].arch()), uuid_str);
}
Printf("End of module map.\n");
@@ -1433,6 +1397,61 @@ u32 GetNumberOfCPUs() {
void InitializePlatformCommonFlags(CommonFlags *cf) {}
+// Pthread introspection hook
+//
+// * GCD worker threads are created without a call to pthread_create(), but we
+// still need to register these threads (with ThreadCreate/Start()).
+// * We use the "pthread introspection hook" below to observe the creation of
+// such threads.
+// * GCD worker threads don't have parent threads and the CREATE event is
+// delivered in the context of the thread itself. CREATE events for regular
+// threads, are delivered on the parent. We use this to tell apart which
+// threads are GCD workers with `thread == pthread_self()`.
+//
+static pthread_introspection_hook_t prev_pthread_introspection_hook;
+static ThreadEventCallbacks thread_event_callbacks;
+
+static void sanitizer_pthread_introspection_hook(unsigned int event,
+ pthread_t thread, void *addr,
+ size_t size) {
+ // create -> start -> terminate -> destroy
+ // * create/destroy are usually (not guaranteed) delivered on the parent and
+ // track resource allocation/reclamation
+ // * start/terminate are guaranteed to be delivered in the context of the
+ // thread and give hooks into "just after (before) thread starts (stops)
+ // executing"
+ DCHECK(event >= PTHREAD_INTROSPECTION_THREAD_CREATE &&
+ event <= PTHREAD_INTROSPECTION_THREAD_DESTROY);
+
+ if (event == PTHREAD_INTROSPECTION_THREAD_CREATE) {
+ bool gcd_worker = (thread == pthread_self());
+ if (thread_event_callbacks.create)
+ thread_event_callbacks.create((uptr)thread, gcd_worker);
+ } else if (event == PTHREAD_INTROSPECTION_THREAD_START) {
+ CHECK_EQ(thread, pthread_self());
+ if (thread_event_callbacks.start)
+ thread_event_callbacks.start((uptr)thread);
+ }
+
+ if (prev_pthread_introspection_hook)
+ prev_pthread_introspection_hook(event, thread, addr, size);
+
+ if (event == PTHREAD_INTROSPECTION_THREAD_TERMINATE) {
+ CHECK_EQ(thread, pthread_self());
+ if (thread_event_callbacks.terminate)
+ thread_event_callbacks.terminate((uptr)thread);
+ } else if (event == PTHREAD_INTROSPECTION_THREAD_DESTROY) {
+ if (thread_event_callbacks.destroy)
+ thread_event_callbacks.destroy((uptr)thread);
+ }
+}
+
+void InstallPthreadIntrospectionHook(const ThreadEventCallbacks &callbacks) {
+ thread_event_callbacks = callbacks;
+ prev_pthread_introspection_hook =
+ pthread_introspection_hook_install(&sanitizer_pthread_introspection_hook);
+}
+
} // namespace __sanitizer
-#endif // SANITIZER_MAC
+#endif // SANITIZER_APPLE
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_mac.h b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_mac.h
index 0b6af5a3c0ed..f0a97d098eea 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_mac.h
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_mac.h
@@ -9,12 +9,12 @@
// This file is shared between various sanitizers' runtime libraries and
// provides definitions for OSX-specific functions.
//===----------------------------------------------------------------------===//
-#ifndef SANITIZER_MAC_H
-#define SANITIZER_MAC_H
+#ifndef SANITIZER_APPLE_H
+#define SANITIZER_APPLE_H
#include "sanitizer_common.h"
#include "sanitizer_platform.h"
-#if SANITIZER_MAC
+#if SANITIZER_APPLE
#include "sanitizer_posix.h"
namespace __sanitizer {
@@ -62,7 +62,18 @@ char **GetEnviron();
void RestrictMemoryToMaxAddress(uptr max_address);
+using ThreadEventCallback = void (*)(uptr thread);
+using ThreadCreateEventCallback = void (*)(uptr thread, bool gcd_worker);
+struct ThreadEventCallbacks {
+ ThreadCreateEventCallback create;
+ ThreadEventCallback start;
+ ThreadEventCallback terminate;
+ ThreadEventCallback destroy;
+};
+
+void InstallPthreadIntrospectionHook(const ThreadEventCallbacks &callbacks);
+
} // namespace __sanitizer
-#endif // SANITIZER_MAC
-#endif // SANITIZER_MAC_H
+#endif // SANITIZER_APPLE
+#endif // SANITIZER_APPLE_H
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_mac_libcdep.cpp b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_mac_libcdep.cpp
index ac7e328946bf..b452dc4a49e2 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_mac_libcdep.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_mac_libcdep.cpp
@@ -11,7 +11,7 @@
//===----------------------------------------------------------------------===//
#include "sanitizer_platform.h"
-#if SANITIZER_MAC
+#if SANITIZER_APPLE
#include "sanitizer_mac.h"
#include <sys/mman.h>
@@ -26,4 +26,4 @@ void RestrictMemoryToMaxAddress(uptr max_address) {
} // namespace __sanitizer
-#endif // SANITIZER_MAC
+#endif // SANITIZER_APPLE
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_mallinfo.h b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_mallinfo.h
new file mode 100644
index 000000000000..1c07e68e55a7
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_mallinfo.h
@@ -0,0 +1,42 @@
+//===-- sanitizer_mallinfo.h ----------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of Sanitizer common code.
+//
+// Definition for mallinfo on different platforms.
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_MALLINFO_H
+#define SANITIZER_MALLINFO_H
+
+#include "sanitizer_internal_defs.h"
+#include "sanitizer_platform.h"
+
+namespace __sanitizer {
+
+#if SANITIZER_ANDROID
+
+struct __sanitizer_struct_mallinfo {
+ uptr v[10];
+};
+
+#elif SANITIZER_LINUX || SANITIZER_APPLE || SANITIZER_FUCHSIA
+
+struct __sanitizer_struct_mallinfo {
+ int v[10];
+};
+
+struct __sanitizer_struct_mallinfo2 {
+ uptr v[10];
+};
+
+#endif
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_MALLINFO_H
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_malloc_mac.inc b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_malloc_mac.inc
index e3b664f68b61..6343eb284afb 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_malloc_mac.inc
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_malloc_mac.inc
@@ -12,7 +12,7 @@
//===----------------------------------------------------------------------===//
#include "sanitizer_common/sanitizer_platform.h"
-#if !SANITIZER_MAC
+#if !SANITIZER_APPLE
#error "This file should only be compiled on Darwin."
#endif
@@ -23,6 +23,7 @@
#include <sys/mman.h>
#include "interception/interception.h"
+#include "sanitizer_common/sanitizer_allocator_dlsym.h"
#include "sanitizer_common/sanitizer_mac.h"
// Similar code is used in Google Perftools,
@@ -122,7 +123,7 @@ INTERCEPTOR(void, malloc_set_zone_name, malloc_zone_t *zone, const char *name) {
COMMON_MALLOC_ENTER();
InternalScopedString new_name;
if (name && zone->introspect == sanitizer_zone.introspect) {
- new_name.append(COMMON_MALLOC_ZONE_NAME "-%s", name);
+ new_name.AppendF(COMMON_MALLOC_ZONE_NAME "-%s", name);
name = new_name.data();
}
@@ -192,20 +193,15 @@ void *__sanitizer_mz_malloc(malloc_zone_t *zone, uptr size) {
return p;
}
+struct DlsymAlloc : public DlSymAllocator<DlsymAlloc> {
+ static bool UseImpl() { return !COMMON_MALLOC_SANITIZER_INITIALIZED; }
+};
+
extern "C"
SANITIZER_INTERFACE_ATTRIBUTE
void *__sanitizer_mz_calloc(malloc_zone_t *zone, size_t nmemb, size_t size) {
- if (UNLIKELY(!COMMON_MALLOC_SANITIZER_INITIALIZED)) {
- // Hack: dlsym calls calloc before REAL(calloc) is retrieved from dlsym.
- const size_t kCallocPoolSize = 1024;
- static uptr calloc_memory_for_dlsym[kCallocPoolSize];
- static size_t allocated;
- size_t size_in_words = ((nmemb * size) + kWordSize - 1) / kWordSize;
- void *mem = (void*)&calloc_memory_for_dlsym[allocated];
- allocated += size_in_words;
- CHECK(allocated < kCallocPoolSize);
- return mem;
- }
+ if (DlsymAlloc::Use())
+ return DlsymAlloc::Callocate(nmemb, size);
COMMON_MALLOC_CALLOC(nmemb, size);
return p;
}
@@ -223,6 +219,8 @@ extern "C"
SANITIZER_INTERFACE_ATTRIBUTE
void __sanitizer_mz_free(malloc_zone_t *zone, void *ptr) {
if (!ptr) return;
+ if (DlsymAlloc::PointerIsMine(ptr))
+ return DlsymAlloc::Free(ptr);
COMMON_MALLOC_FREE(ptr);
}
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_mutex.cpp b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_mutex.cpp
index 46f1d0279ca1..40fe56661250 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_mutex.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_mutex.cpp
@@ -73,7 +73,7 @@ void DebugMutexInit() {
// Build adjacency matrix.
bool leaf[kMutexTypeMax];
internal_memset(&leaf, 0, sizeof(leaf));
- int cnt[kMutexTypeMax] = {};
+ int cnt[kMutexTypeMax];
internal_memset(&cnt, 0, sizeof(cnt));
for (int t = 0; t < kMutexTypeMax; t++) {
mutex_type_count = t;
@@ -174,7 +174,7 @@ struct InternalDeadlockDetector {
if (max_idx != MutexInvalid && !mutex_can_lock[max_idx][type]) {
Printf("%s: internal deadlock: can't lock %s under %s mutex\n", SanitizerToolName,
mutex_meta[type].name, mutex_meta[max_idx].name);
- PrintMutexPC(pc);
+ PrintMutexPC(locked[max_idx].pc);
CHECK(0);
}
locked[type].seq = ++sequence;
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_mutex.h b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_mutex.h
index cbd1c25eb69f..b1a58e421d81 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_mutex.h
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_mutex.h
@@ -20,25 +20,27 @@
namespace __sanitizer {
-class MUTEX StaticSpinMutex {
+class SANITIZER_MUTEX StaticSpinMutex {
public:
void Init() {
atomic_store(&state_, 0, memory_order_relaxed);
}
- void Lock() ACQUIRE() {
+ void Lock() SANITIZER_ACQUIRE() {
if (LIKELY(TryLock()))
return;
LockSlow();
}
- bool TryLock() TRY_ACQUIRE(true) {
+ bool TryLock() SANITIZER_TRY_ACQUIRE(true) {
return atomic_exchange(&state_, 1, memory_order_acquire) == 0;
}
- void Unlock() RELEASE() { atomic_store(&state_, 0, memory_order_release); }
+ void Unlock() SANITIZER_RELEASE() {
+ atomic_store(&state_, 0, memory_order_release);
+ }
- void CheckLocked() const CHECK_LOCKED() {
+ void CheckLocked() const SANITIZER_CHECK_LOCKED() {
CHECK_EQ(atomic_load(&state_, memory_order_relaxed), 1);
}
@@ -48,7 +50,7 @@ class MUTEX StaticSpinMutex {
void LockSlow();
};
-class MUTEX SpinMutex : public StaticSpinMutex {
+class SANITIZER_MUTEX SpinMutex : public StaticSpinMutex {
public:
SpinMutex() {
Init();
@@ -95,7 +97,11 @@ enum {
// Go linker does not support THREADLOCAL variables,
// so we can't use per-thread state.
-#define SANITIZER_CHECK_DEADLOCKS (SANITIZER_DEBUG && !SANITIZER_GO)
+// Disable checked locks on Darwin. Although Darwin platforms support
+// THREADLOCAL variables they are not usable early on during process init when
+// `__sanitizer::Mutex` is used.
+#define SANITIZER_CHECK_DEADLOCKS \
+ (SANITIZER_DEBUG && !SANITIZER_GO && SANITIZER_SUPPORTS_THREADLOCAL && !SANITIZER_APPLE)
#if SANITIZER_CHECK_DEADLOCKS
struct MutexMeta {
@@ -111,7 +117,7 @@ struct MutexMeta {
class CheckedMutex {
public:
- constexpr CheckedMutex(MutexType type)
+ explicit constexpr CheckedMutex(MutexType type)
#if SANITIZER_CHECK_DEADLOCKS
: type_(type)
#endif
@@ -152,15 +158,15 @@ class CheckedMutex {
// Derive from CheckedMutex for the purposes of EBO.
// We could make it a field marked with [[no_unique_address]],
// but this attribute is not supported by some older compilers.
-class MUTEX Mutex : CheckedMutex {
+class SANITIZER_MUTEX Mutex : CheckedMutex {
public:
- constexpr Mutex(MutexType type = MutexUnchecked) : CheckedMutex(type) {}
+ explicit constexpr Mutex(MutexType type = MutexUnchecked)
+ : CheckedMutex(type) {}
- void Lock() ACQUIRE() {
+ void Lock() SANITIZER_ACQUIRE() {
CheckedMutex::Lock();
u64 reset_mask = ~0ull;
u64 state = atomic_load_relaxed(&state_);
- const uptr kMaxSpinIters = 1500;
for (uptr spin_iters = 0;; spin_iters++) {
u64 new_state;
bool locked = (state & (kWriterLock | kReaderLockMask)) != 0;
@@ -189,8 +195,6 @@ class MUTEX Mutex : CheckedMutex {
// We've incremented waiting writers, so now block.
writers_.Wait();
spin_iters = 0;
- state = atomic_load(&state_, memory_order_relaxed);
- DCHECK_NE(state & kWriterSpinWait, 0);
} else {
// We've set kWriterSpinWait, but we are still in active spinning.
}
@@ -199,10 +203,26 @@ class MUTEX Mutex : CheckedMutex {
// Either way we need to reset kWriterSpinWait
// next time we take the lock or block again.
reset_mask = ~kWriterSpinWait;
+ state = atomic_load(&state_, memory_order_relaxed);
+ DCHECK_NE(state & kWriterSpinWait, 0);
+ }
+ }
+
+ bool TryLock() SANITIZER_TRY_ACQUIRE(true) {
+ u64 state = atomic_load_relaxed(&state_);
+ for (;;) {
+ if (UNLIKELY(state & (kWriterLock | kReaderLockMask)))
+ return false;
+ // The mutex is not read-/write-locked, try to lock.
+ if (LIKELY(atomic_compare_exchange_weak(
+ &state_, &state, state | kWriterLock, memory_order_acquire))) {
+ CheckedMutex::Lock();
+ return true;
+ }
}
}
- void Unlock() RELEASE() {
+ void Unlock() SANITIZER_RELEASE() {
CheckedMutex::Unlock();
bool wake_writer;
u64 wake_readers;
@@ -212,17 +232,16 @@ class MUTEX Mutex : CheckedMutex {
DCHECK_NE(state & kWriterLock, 0);
DCHECK_EQ(state & kReaderLockMask, 0);
new_state = state & ~kWriterLock;
- wake_writer =
- (state & kWriterSpinWait) == 0 && (state & kWaitingWriterMask) != 0;
+ wake_writer = (state & (kWriterSpinWait | kReaderSpinWait)) == 0 &&
+ (state & kWaitingWriterMask) != 0;
if (wake_writer)
new_state = (new_state - kWaitingWriterInc) | kWriterSpinWait;
wake_readers =
- (state & (kWriterSpinWait | kWaitingWriterMask)) != 0
+ wake_writer || (state & kWriterSpinWait) != 0
? 0
: ((state & kWaitingReaderMask) >> kWaitingReaderShift);
if (wake_readers)
- new_state = (new_state & ~kWaitingReaderMask) +
- (wake_readers << kReaderLockShift);
+ new_state = (new_state & ~kWaitingReaderMask) | kReaderSpinWait;
} while (UNLIKELY(!atomic_compare_exchange_weak(&state_, &state, new_state,
memory_order_release)));
if (UNLIKELY(wake_writer))
@@ -231,37 +250,54 @@ class MUTEX Mutex : CheckedMutex {
readers_.Post(wake_readers);
}
- void ReadLock() ACQUIRE_SHARED() {
+ void ReadLock() SANITIZER_ACQUIRE_SHARED() {
CheckedMutex::Lock();
- bool locked;
- u64 new_state;
+ u64 reset_mask = ~0ull;
u64 state = atomic_load_relaxed(&state_);
- do {
- locked =
- (state & kReaderLockMask) == 0 &&
- (state & (kWriterLock | kWriterSpinWait | kWaitingWriterMask)) != 0;
+ for (uptr spin_iters = 0;; spin_iters++) {
+ bool locked = (state & kWriterLock) != 0;
+ u64 new_state;
+ if (LIKELY(!locked)) {
+ new_state = (state + kReaderLockInc) & reset_mask;
+ } else if (spin_iters > kMaxSpinIters) {
+ new_state = (state + kWaitingReaderInc) & reset_mask;
+ } else if ((state & kReaderSpinWait) == 0) {
+ // Active spinning, but denote our presence so that unlocking
+ // thread does not wake up other threads.
+ new_state = state | kReaderSpinWait;
+ } else {
+ // Active spinning.
+ state = atomic_load(&state_, memory_order_relaxed);
+ continue;
+ }
+ if (UNLIKELY(!atomic_compare_exchange_weak(&state_, &state, new_state,
+ memory_order_acquire)))
+ continue;
if (LIKELY(!locked))
- new_state = state + kReaderLockInc;
- else
- new_state = state + kWaitingReaderInc;
- } while (UNLIKELY(!atomic_compare_exchange_weak(&state_, &state, new_state,
- memory_order_acquire)));
- if (UNLIKELY(locked))
- readers_.Wait();
- DCHECK_EQ(atomic_load_relaxed(&state_) & kWriterLock, 0);
- DCHECK_NE(atomic_load_relaxed(&state_) & kReaderLockMask, 0);
+ return; // We've locked the mutex.
+ if (spin_iters > kMaxSpinIters) {
+ // We've incremented waiting readers, so now block.
+ readers_.Wait();
+ spin_iters = 0;
+ } else {
+ // We've set kReaderSpinWait, but we are still in active spinning.
+ }
+ reset_mask = ~kReaderSpinWait;
+ state = atomic_load(&state_, memory_order_relaxed);
+ }
}
- void ReadUnlock() RELEASE_SHARED() {
+ void ReadUnlock() SANITIZER_RELEASE_SHARED() {
CheckedMutex::Unlock();
bool wake;
u64 new_state;
u64 state = atomic_load_relaxed(&state_);
do {
DCHECK_NE(state & kReaderLockMask, 0);
- DCHECK_EQ(state & (kWaitingReaderMask | kWriterLock), 0);
+ DCHECK_EQ(state & kWriterLock, 0);
new_state = state - kReaderLockInc;
- wake = (new_state & (kReaderLockMask | kWriterSpinWait)) == 0 &&
+ wake = (new_state &
+ (kReaderLockMask | kWriterSpinWait | kReaderSpinWait)) == 0 &&
(new_state & kWaitingWriterMask) != 0;
if (wake)
new_state = (new_state - kWaitingWriterInc) | kWriterSpinWait;
@@ -277,13 +313,13 @@ class MUTEX Mutex : CheckedMutex {
// owns the mutex but a child checks that it is locked. Rather than
// maintaining complex state to work around those situations, the check only
// checks that the mutex is owned.
- void CheckWriteLocked() const CHECK_LOCKED() {
+ void CheckWriteLocked() const SANITIZER_CHECK_LOCKED() {
CHECK(atomic_load(&state_, memory_order_relaxed) & kWriterLock);
}
- void CheckLocked() const CHECK_LOCKED() { CheckWriteLocked(); }
+ void CheckLocked() const SANITIZER_CHECK_LOCKED() { CheckWriteLocked(); }
- void CheckReadLocked() const CHECK_LOCKED() {
+ void CheckReadLocked() const SANITIZER_CHECK_LOCKED() {
CHECK(atomic_load(&state_, memory_order_relaxed) & kReaderLockMask);
}
@@ -305,16 +341,14 @@ class MUTEX Mutex : CheckedMutex {
// - a writer is awake and spin-waiting
// the flag is used to prevent thundering herd problem
// (new writers are not woken if this flag is set)
+ // - a reader is awake and spin-waiting
//
- // Writer support active spinning, readers does not.
+ // Both writers and readers use active spinning before blocking.
// But readers are more aggressive and always take the mutex
// if there are any other readers.
- // Writers hand off the mutex to readers: after wake up readers
- // already assume ownership of the mutex (don't need to do any
- // state updates). But the mutex is not handed off to writers,
- // after wake up writers compete to lock the mutex again.
- // This is needed to allow repeated write locks even in presence
- // of other blocked writers.
+ // After wake up both writers and readers compete to lock the
+ // mutex again. This is needed to allow repeated locks even in presence
+ // of other blocked threads.
static constexpr u64 kCounterWidth = 20;
static constexpr u64 kReaderLockShift = 0;
static constexpr u64 kReaderLockInc = 1ull << kReaderLockShift;
@@ -330,7 +364,11 @@ class MUTEX Mutex : CheckedMutex {
<< kWaitingWriterShift;
static constexpr u64 kWriterLock = 1ull << (3 * kCounterWidth);
static constexpr u64 kWriterSpinWait = 1ull << (3 * kCounterWidth + 1);
+ static constexpr u64 kReaderSpinWait = 1ull << (3 * kCounterWidth + 2);
+
+ static constexpr uptr kMaxSpinIters = 1500;
+ Mutex(LinkerInitialized) = delete;
Mutex(const Mutex &) = delete;
void operator=(const Mutex &) = delete;
};
@@ -338,149 +376,70 @@ class MUTEX Mutex : CheckedMutex {
void FutexWait(atomic_uint32_t *p, u32 cmp);
void FutexWake(atomic_uint32_t *p, u32 count);
-class MUTEX BlockingMutex {
- public:
- explicit constexpr BlockingMutex(LinkerInitialized)
- : opaque_storage_ {0, }, owner_ {0} {}
- BlockingMutex();
- void Lock() ACQUIRE();
- void Unlock() RELEASE();
-
- // This function does not guarantee an explicit check that the calling thread
- // is the thread which owns the mutex. This behavior, while more strictly
- // correct, causes problems in cases like StopTheWorld, where a parent thread
- // owns the mutex but a child checks that it is locked. Rather than
- // maintaining complex state to work around those situations, the check only
- // checks that the mutex is owned, and assumes callers to be generally
- // well-behaved.
- void CheckLocked() const CHECK_LOCKED();
-
- private:
- // Solaris mutex_t has a member that requires 64-bit alignment.
- ALIGNED(8) uptr opaque_storage_[10];
- uptr owner_; // for debugging
-};
-
-// Reader-writer spin mutex.
-class MUTEX RWMutex {
+template <typename MutexType>
+class SANITIZER_SCOPED_LOCK GenericScopedLock {
public:
- RWMutex() {
- atomic_store(&state_, kUnlocked, memory_order_relaxed);
- }
-
- ~RWMutex() {
- CHECK_EQ(atomic_load(&state_, memory_order_relaxed), kUnlocked);
- }
-
- void Lock() ACQUIRE() {
- u32 cmp = kUnlocked;
- if (atomic_compare_exchange_strong(&state_, &cmp, kWriteLock,
- memory_order_acquire))
- return;
- LockSlow();
- }
-
- void Unlock() RELEASE() {
- u32 prev = atomic_fetch_sub(&state_, kWriteLock, memory_order_release);
- DCHECK_NE(prev & kWriteLock, 0);
- (void)prev;
- }
-
- void ReadLock() ACQUIRE_SHARED() {
- u32 prev = atomic_fetch_add(&state_, kReadLock, memory_order_acquire);
- if ((prev & kWriteLock) == 0)
- return;
- ReadLockSlow();
- }
-
- void ReadUnlock() RELEASE_SHARED() {
- u32 prev = atomic_fetch_sub(&state_, kReadLock, memory_order_release);
- DCHECK_EQ(prev & kWriteLock, 0);
- DCHECK_GT(prev & ~kWriteLock, 0);
- (void)prev;
+ explicit GenericScopedLock(MutexType *mu) SANITIZER_ACQUIRE(mu) : mu_(mu) {
+ mu_->Lock();
}
- void CheckLocked() const CHECK_LOCKED() {
- CHECK_NE(atomic_load(&state_, memory_order_relaxed), kUnlocked);
- }
+ ~GenericScopedLock() SANITIZER_RELEASE() { mu_->Unlock(); }
private:
- atomic_uint32_t state_;
-
- enum {
- kUnlocked = 0,
- kWriteLock = 1,
- kReadLock = 2
- };
-
- void NOINLINE LockSlow() {
- for (int i = 0;; i++) {
- if (i < 10)
- proc_yield(10);
- else
- internal_sched_yield();
- u32 cmp = atomic_load(&state_, memory_order_relaxed);
- if (cmp == kUnlocked &&
- atomic_compare_exchange_weak(&state_, &cmp, kWriteLock,
- memory_order_acquire))
- return;
- }
- }
-
- void NOINLINE ReadLockSlow() {
- for (int i = 0;; i++) {
- if (i < 10)
- proc_yield(10);
- else
- internal_sched_yield();
- u32 prev = atomic_load(&state_, memory_order_acquire);
- if ((prev & kWriteLock) == 0)
- return;
- }
- }
+ MutexType *mu_;
- RWMutex(const RWMutex &) = delete;
- void operator=(const RWMutex &) = delete;
+ GenericScopedLock(const GenericScopedLock &) = delete;
+ void operator=(const GenericScopedLock &) = delete;
};
template <typename MutexType>
-class SCOPED_LOCK GenericScopedLock {
+class SANITIZER_SCOPED_LOCK GenericScopedReadLock {
public:
- explicit GenericScopedLock(MutexType *mu) ACQUIRE(mu) : mu_(mu) {
- mu_->Lock();
+ explicit GenericScopedReadLock(MutexType *mu) SANITIZER_ACQUIRE(mu)
+ : mu_(mu) {
+ mu_->ReadLock();
}
- ~GenericScopedLock() RELEASE() { mu_->Unlock(); }
+ ~GenericScopedReadLock() SANITIZER_RELEASE() { mu_->ReadUnlock(); }
private:
MutexType *mu_;
- GenericScopedLock(const GenericScopedLock &) = delete;
- void operator=(const GenericScopedLock &) = delete;
+ GenericScopedReadLock(const GenericScopedReadLock &) = delete;
+ void operator=(const GenericScopedReadLock &) = delete;
};
template <typename MutexType>
-class SCOPED_LOCK GenericScopedReadLock {
+class SANITIZER_SCOPED_LOCK GenericScopedRWLock {
public:
- explicit GenericScopedReadLock(MutexType *mu) ACQUIRE(mu) : mu_(mu) {
- mu_->ReadLock();
+ ALWAYS_INLINE explicit GenericScopedRWLock(MutexType *mu, bool write)
+ SANITIZER_ACQUIRE(mu)
+ : mu_(mu), write_(write) {
+ if (write_)
+ mu_->Lock();
+ else
+ mu_->ReadLock();
}
- ~GenericScopedReadLock() RELEASE() { mu_->ReadUnlock(); }
+ ALWAYS_INLINE ~GenericScopedRWLock() SANITIZER_RELEASE() {
+ if (write_)
+ mu_->Unlock();
+ else
+ mu_->ReadUnlock();
+ }
private:
MutexType *mu_;
+ bool write_;
- GenericScopedReadLock(const GenericScopedReadLock &) = delete;
- void operator=(const GenericScopedReadLock &) = delete;
+ GenericScopedRWLock(const GenericScopedRWLock &) = delete;
+ void operator=(const GenericScopedRWLock &) = delete;
};
typedef GenericScopedLock<StaticSpinMutex> SpinMutexLock;
-typedef GenericScopedLock<BlockingMutex> BlockingMutexLock;
-typedef GenericScopedLock<RWMutex> RWMutexLock;
-typedef GenericScopedReadLock<RWMutex> RWMutexReadLock;
typedef GenericScopedLock<Mutex> Lock;
typedef GenericScopedReadLock<Mutex> ReadLock;
+typedef GenericScopedRWLock<Mutex> RWLock;
} // namespace __sanitizer
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_openbsd.cpp b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_openbsd.cpp
deleted file mode 100644
index e69de29bb2d1..000000000000
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_openbsd.cpp
+++ /dev/null
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_persistent_allocator.h b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_persistent_allocator.h
deleted file mode 100644
index de4fb6ebc3cf..000000000000
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_persistent_allocator.h
+++ /dev/null
@@ -1,71 +0,0 @@
-//===-- sanitizer_persistent_allocator.h ------------------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-//
-// A fast memory allocator that does not support free() nor realloc().
-// All allocations are forever.
-//===----------------------------------------------------------------------===//
-
-#ifndef SANITIZER_PERSISTENT_ALLOCATOR_H
-#define SANITIZER_PERSISTENT_ALLOCATOR_H
-
-#include "sanitizer_internal_defs.h"
-#include "sanitizer_mutex.h"
-#include "sanitizer_atomic.h"
-#include "sanitizer_common.h"
-
-namespace __sanitizer {
-
-class PersistentAllocator {
- public:
- void *alloc(uptr size);
-
- private:
- void *tryAlloc(uptr size);
- StaticSpinMutex mtx; // Protects alloc of new blocks for region allocator.
- atomic_uintptr_t region_pos; // Region allocator for Node's.
- atomic_uintptr_t region_end;
-};
-
-inline void *PersistentAllocator::tryAlloc(uptr size) {
- // Optimisic lock-free allocation, essentially try to bump the region ptr.
- for (;;) {
- uptr cmp = atomic_load(&region_pos, memory_order_acquire);
- uptr end = atomic_load(&region_end, memory_order_acquire);
- if (cmp == 0 || cmp + size > end) return nullptr;
- if (atomic_compare_exchange_weak(&region_pos, &cmp, cmp + size,
- memory_order_acquire))
- return (void *)cmp;
- }
-}
-
-inline void *PersistentAllocator::alloc(uptr size) {
- // First, try to allocate optimisitically.
- void *s = tryAlloc(size);
- if (s) return s;
- // If failed, lock, retry and alloc new superblock.
- SpinMutexLock l(&mtx);
- for (;;) {
- s = tryAlloc(size);
- if (s) return s;
- atomic_store(&region_pos, 0, memory_order_relaxed);
- uptr allocsz = 64 * 1024;
- if (allocsz < size) allocsz = size;
- uptr mem = (uptr)MmapOrDie(allocsz, "stack depot");
- atomic_store(&region_end, mem + allocsz, memory_order_release);
- atomic_store(&region_pos, mem, memory_order_release);
- }
-}
-
-extern PersistentAllocator thePersistentAllocator;
-inline void *PersistentAlloc(uptr sz) {
- return thePersistentAllocator.alloc(sz);
-}
-
-} // namespace __sanitizer
-
-#endif // SANITIZER_PERSISTENT_ALLOCATOR_H
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform.h b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform.h
index 4d3c08893c11..596528155505 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform.h
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform.h
@@ -22,103 +22,123 @@
// function declarations into a .S file which doesn't compile.
// https://crbug.com/1162741
#if __has_include(<features.h>) && !defined(__ANDROID__)
-#include <features.h>
+# include <features.h>
#endif
#if defined(__linux__)
-# define SANITIZER_LINUX 1
+# define SANITIZER_LINUX 1
#else
-# define SANITIZER_LINUX 0
+# define SANITIZER_LINUX 0
#endif
#if defined(__GLIBC__)
-# define SANITIZER_GLIBC 1
+# define SANITIZER_GLIBC 1
#else
-# define SANITIZER_GLIBC 0
+# define SANITIZER_GLIBC 0
#endif
#if defined(__FreeBSD__)
-# define SANITIZER_FREEBSD 1
+# define SANITIZER_FREEBSD 1
#else
-# define SANITIZER_FREEBSD 0
+# define SANITIZER_FREEBSD 0
#endif
#if defined(__NetBSD__)
-# define SANITIZER_NETBSD 1
+# define SANITIZER_NETBSD 1
#else
-# define SANITIZER_NETBSD 0
+# define SANITIZER_NETBSD 0
#endif
#if defined(__sun__) && defined(__svr4__)
-# define SANITIZER_SOLARIS 1
+# define SANITIZER_SOLARIS 1
#else
-# define SANITIZER_SOLARIS 0
+# define SANITIZER_SOLARIS 0
#endif
+// - SANITIZER_APPLE: all Apple code
+// - TARGET_OS_OSX: macOS
+// - SANITIZER_IOS: devices (iOS and iOS-like)
+// - SANITIZER_WATCHOS
+// - SANITIZER_TVOS
+// - SANITIZER_IOSSIM: simulators (iOS and iOS-like)
+// - SANITIZER_DRIVERKIT
#if defined(__APPLE__)
-# define SANITIZER_MAC 1
-# include <TargetConditionals.h>
-# if TARGET_OS_OSX
-# define SANITIZER_OSX 1
-# else
-# define SANITIZER_OSX 0
-# endif
-# if TARGET_OS_IPHONE
-# define SANITIZER_IOS 1
-# else
-# define SANITIZER_IOS 0
-# endif
-# if TARGET_OS_SIMULATOR
-# define SANITIZER_IOSSIM 1
-# else
-# define SANITIZER_IOSSIM 0
-# endif
-#else
-# define SANITIZER_MAC 0
-# define SANITIZER_IOS 0
-# define SANITIZER_IOSSIM 0
-# define SANITIZER_OSX 0
-#endif
-
-#if defined(__APPLE__) && TARGET_OS_IPHONE && TARGET_OS_WATCH
-# define SANITIZER_WATCHOS 1
-#else
-# define SANITIZER_WATCHOS 0
-#endif
-
-#if defined(__APPLE__) && TARGET_OS_IPHONE && TARGET_OS_TV
-# define SANITIZER_TVOS 1
+# define SANITIZER_APPLE 1
+# include <TargetConditionals.h>
+# if TARGET_OS_OSX
+# define SANITIZER_OSX 1
+# else
+# define SANITIZER_OSX 0
+# endif
+# if TARGET_OS_IPHONE
+# define SANITIZER_IOS 1
+# else
+# define SANITIZER_IOS 0
+# endif
+# if TARGET_OS_WATCH
+# define SANITIZER_WATCHOS 1
+# else
+# define SANITIZER_WATCHOS 0
+# endif
+# if TARGET_OS_TV
+# define SANITIZER_TVOS 1
+# else
+# define SANITIZER_TVOS 0
+# endif
+# if TARGET_OS_SIMULATOR
+# define SANITIZER_IOSSIM 1
+# else
+# define SANITIZER_IOSSIM 0
+# endif
+# if defined(TARGET_OS_DRIVERKIT) && TARGET_OS_DRIVERKIT
+# define SANITIZER_DRIVERKIT 1
+# else
+# define SANITIZER_DRIVERKIT 0
+# endif
#else
-# define SANITIZER_TVOS 0
+# define SANITIZER_APPLE 0
+# define SANITIZER_OSX 0
+# define SANITIZER_IOS 0
+# define SANITIZER_WATCHOS 0
+# define SANITIZER_TVOS 0
+# define SANITIZER_IOSSIM 0
+# define SANITIZER_DRIVERKIT 0
#endif
#if defined(_WIN32)
-# define SANITIZER_WINDOWS 1
+# define SANITIZER_WINDOWS 1
#else
-# define SANITIZER_WINDOWS 0
+# define SANITIZER_WINDOWS 0
#endif
#if defined(_WIN64)
-# define SANITIZER_WINDOWS64 1
+# define SANITIZER_WINDOWS64 1
#else
-# define SANITIZER_WINDOWS64 0
+# define SANITIZER_WINDOWS64 0
#endif
#if defined(__ANDROID__)
-# define SANITIZER_ANDROID 1
+# define SANITIZER_ANDROID 1
#else
-# define SANITIZER_ANDROID 0
+# define SANITIZER_ANDROID 0
#endif
#if defined(__Fuchsia__)
-# define SANITIZER_FUCHSIA 1
+# define SANITIZER_FUCHSIA 1
+#else
+# define SANITIZER_FUCHSIA 0
+#endif
+
+// Assume linux that is not glibc or android is musl libc.
+#if SANITIZER_LINUX && !SANITIZER_GLIBC && !SANITIZER_ANDROID
+# define SANITIZER_MUSL 1
#else
-# define SANITIZER_FUCHSIA 0
+# define SANITIZER_MUSL 0
#endif
-#define SANITIZER_POSIX \
- (SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_MAC || \
- SANITIZER_NETBSD || SANITIZER_SOLARIS)
+#define SANITIZER_POSIX \
+ (SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_APPLE || \
+ SANITIZER_NETBSD || SANITIZER_SOLARIS)
#if __LP64__ || defined(_WIN64)
# define SANITIZER_WORDSIZE 64
@@ -127,58 +147,79 @@
#endif
#if SANITIZER_WORDSIZE == 64
-# define FIRST_32_SECOND_64(a, b) (b)
+# define FIRST_32_SECOND_64(a, b) (b)
#else
-# define FIRST_32_SECOND_64(a, b) (a)
+# define FIRST_32_SECOND_64(a, b) (a)
#endif
#if defined(__x86_64__) && !defined(_LP64)
-# define SANITIZER_X32 1
+# define SANITIZER_X32 1
+#else
+# define SANITIZER_X32 0
+#endif
+
+#if defined(__x86_64__) || defined(_M_X64)
+# define SANITIZER_X64 1
#else
-# define SANITIZER_X32 0
+# define SANITIZER_X64 0
#endif
#if defined(__i386__) || defined(_M_IX86)
-# define SANITIZER_I386 1
+# define SANITIZER_I386 1
#else
-# define SANITIZER_I386 0
+# define SANITIZER_I386 0
#endif
#if defined(__mips__)
-# define SANITIZER_MIPS 1
-# if defined(__mips64)
+# define SANITIZER_MIPS 1
+# if defined(__mips64) && _MIPS_SIM == _ABI64
+# define SANITIZER_MIPS32 0
+# define SANITIZER_MIPS64 1
+# else
+# define SANITIZER_MIPS32 1
+# define SANITIZER_MIPS64 0
+# endif
+#else
+# define SANITIZER_MIPS 0
# define SANITIZER_MIPS32 0
-# define SANITIZER_MIPS64 1
-# else
-# define SANITIZER_MIPS32 1
# define SANITIZER_MIPS64 0
-# endif
-#else
-# define SANITIZER_MIPS 0
-# define SANITIZER_MIPS32 0
-# define SANITIZER_MIPS64 0
#endif
#if defined(__s390__)
-# define SANITIZER_S390 1
-# if defined(__s390x__)
+# define SANITIZER_S390 1
+# if defined(__s390x__)
+# define SANITIZER_S390_31 0
+# define SANITIZER_S390_64 1
+# else
+# define SANITIZER_S390_31 1
+# define SANITIZER_S390_64 0
+# endif
+#else
+# define SANITIZER_S390 0
# define SANITIZER_S390_31 0
-# define SANITIZER_S390_64 1
-# else
-# define SANITIZER_S390_31 1
# define SANITIZER_S390_64 0
-# endif
+#endif
+
+#if defined(__sparc__)
+# define SANITIZER_SPARC 1
+# if defined(__arch64__)
+# define SANITIZER_SPARC32 0
+# define SANITIZER_SPARC64 1
+# else
+# define SANITIZER_SPARC32 1
+# define SANITIZER_SPARC64 0
+# endif
#else
-# define SANITIZER_S390 0
-# define SANITIZER_S390_31 0
-# define SANITIZER_S390_64 0
+# define SANITIZER_SPARC 0
+# define SANITIZER_SPARC32 0
+# define SANITIZER_SPARC64 0
#endif
#if defined(__powerpc__)
-# define SANITIZER_PPC 1
-# if defined(__powerpc64__)
-# define SANITIZER_PPC32 0
-# define SANITIZER_PPC64 1
+# define SANITIZER_PPC 1
+# if defined(__powerpc64__)
+# define SANITIZER_PPC32 0
+# define SANITIZER_PPC64 1
// 64-bit PPC has two ABIs (v1 and v2). The old powerpc64 target is
// big-endian, and uses v1 ABI (known for its function descriptors),
// while the new powerpc64le target is little-endian and uses v2.
@@ -186,106 +227,129 @@
// (eg. big-endian v2), but you won't find such combinations in the wild
// (it'd require bootstrapping a whole system, which would be quite painful
// - there's no target triple for that). LLVM doesn't support them either.
-# if _CALL_ELF == 2
-# define SANITIZER_PPC64V1 0
-# define SANITIZER_PPC64V2 1
+# if _CALL_ELF == 2
+# define SANITIZER_PPC64V1 0
+# define SANITIZER_PPC64V2 1
+# else
+# define SANITIZER_PPC64V1 1
+# define SANITIZER_PPC64V2 0
+# endif
# else
-# define SANITIZER_PPC64V1 1
-# define SANITIZER_PPC64V2 0
+# define SANITIZER_PPC32 1
+# define SANITIZER_PPC64 0
+# define SANITIZER_PPC64V1 0
+# define SANITIZER_PPC64V2 0
# endif
-# else
-# define SANITIZER_PPC32 1
+#else
+# define SANITIZER_PPC 0
+# define SANITIZER_PPC32 0
# define SANITIZER_PPC64 0
# define SANITIZER_PPC64V1 0
# define SANITIZER_PPC64V2 0
-# endif
+#endif
+
+#if defined(__arm__) || defined(_M_ARM)
+# define SANITIZER_ARM 1
#else
-# define SANITIZER_PPC 0
-# define SANITIZER_PPC32 0
-# define SANITIZER_PPC64 0
-# define SANITIZER_PPC64V1 0
-# define SANITIZER_PPC64V2 0
+# define SANITIZER_ARM 0
#endif
-#if defined(__arm__)
-# define SANITIZER_ARM 1
+#if defined(__aarch64__) || defined(_M_ARM64)
+# define SANITIZER_ARM64 1
#else
-# define SANITIZER_ARM 0
+# define SANITIZER_ARM64 0
+#endif
+
+#if SANITIZER_WINDOWS64 && SANITIZER_ARM64
+# define SANITIZER_WINDOWS_ARM64 1
+# define SANITIZER_WINDOWS_x64 0
+#elif SANITIZER_WINDOWS64 && !SANITIZER_ARM64
+# define SANITIZER_WINDOWS_ARM64 0
+# define SANITIZER_WINDOWS_x64 1
+#else
+# define SANITIZER_WINDOWS_ARM64 0
+# define SANITIZER_WINDOWS_x64 0
#endif
#if SANITIZER_SOLARIS && SANITIZER_WORDSIZE == 32
-# define SANITIZER_SOLARIS32 1
+# define SANITIZER_SOLARIS32 1
#else
-# define SANITIZER_SOLARIS32 0
+# define SANITIZER_SOLARIS32 0
#endif
#if defined(__riscv) && (__riscv_xlen == 64)
-#define SANITIZER_RISCV64 1
+# define SANITIZER_RISCV64 1
+#else
+# define SANITIZER_RISCV64 0
+#endif
+
+#if defined(__loongarch_lp64)
+# define SANITIZER_LOONGARCH64 1
#else
-#define SANITIZER_RISCV64 0
+# define SANITIZER_LOONGARCH64 0
#endif
// By default we allow to use SizeClassAllocator64 on 64-bit platform.
-// But in some cases (e.g. AArch64's 39-bit address space) SizeClassAllocator64
-// does not work well and we need to fallback to SizeClassAllocator32.
+// But in some cases SizeClassAllocator64 does not work well and we need to
+// fallback to SizeClassAllocator32.
// For such platforms build this code with -DSANITIZER_CAN_USE_ALLOCATOR64=0 or
// change the definition of SANITIZER_CAN_USE_ALLOCATOR64 here.
#ifndef SANITIZER_CAN_USE_ALLOCATOR64
-# if (SANITIZER_ANDROID && defined(__aarch64__)) || SANITIZER_FUCHSIA
-# define SANITIZER_CAN_USE_ALLOCATOR64 1
-# elif defined(__mips64) || defined(__aarch64__)
-# define SANITIZER_CAN_USE_ALLOCATOR64 0
-# else
-# define SANITIZER_CAN_USE_ALLOCATOR64 (SANITIZER_WORDSIZE == 64)
-# endif
+# if (SANITIZER_RISCV64 && !SANITIZER_FUCHSIA) || SANITIZER_IOS || \
+ SANITIZER_DRIVERKIT
+# define SANITIZER_CAN_USE_ALLOCATOR64 0
+# elif defined(__mips64) || defined(__hexagon__)
+# define SANITIZER_CAN_USE_ALLOCATOR64 0
+# else
+# define SANITIZER_CAN_USE_ALLOCATOR64 (SANITIZER_WORDSIZE == 64)
+# endif
#endif
// The range of addresses which can be returned my mmap.
// FIXME: this value should be different on different platforms. Larger values
// will still work but will consume more memory for TwoLevelByteMap.
#if defined(__mips__)
-#if SANITIZER_GO && defined(__mips64)
-#define SANITIZER_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 47)
-#else
-# define SANITIZER_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 40)
-#endif
+# if SANITIZER_GO && defined(__mips64)
+# define SANITIZER_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 47)
+# else
+# define SANITIZER_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 40)
+# endif
#elif SANITIZER_RISCV64
-#define SANITIZER_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 38)
+// FIXME: Rather than hardcoding the VMA here, we should rely on
+// GetMaxUserVirtualAddress(). This will require some refactoring though since
+// many places either hardcode some value or SANITIZER_MMAP_RANGE_SIZE is
+// assumed to be some constant integer.
+# if SANITIZER_FUCHSIA
+# define SANITIZER_MMAP_RANGE_SIZE (1ULL << 38)
+# else
+# define SANITIZER_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 47)
+# endif
#elif defined(__aarch64__)
-# if SANITIZER_MAC
-# if SANITIZER_OSX || SANITIZER_IOSSIM
-# define SANITIZER_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 47)
+# if SANITIZER_APPLE
+# if SANITIZER_OSX || SANITIZER_IOSSIM
+# define SANITIZER_MMAP_RANGE_SIZE \
+ FIRST_32_SECOND_64(1ULL << 32, 1ULL << 47)
+# else
+// Darwin iOS/ARM64 has a 36-bit VMA, 64GiB VM
+# define SANITIZER_MMAP_RANGE_SIZE \
+ FIRST_32_SECOND_64(1ULL << 32, 1ULL << 36)
+# endif
# else
- // Darwin iOS/ARM64 has a 36-bit VMA, 64GiB VM
-# define SANITIZER_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 36)
+# define SANITIZER_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 48)
# endif
-# else
-# define SANITIZER_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 48)
-# endif
#elif defined(__sparc__)
-#define SANITIZER_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 52)
+# define SANITIZER_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 52)
#else
-# define SANITIZER_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 47)
+# define SANITIZER_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 47)
#endif
// Whether the addresses are sign-extended from the VMA range to the word.
// The SPARC64 Linux port implements this to split the VMA space into two
// non-contiguous halves with a huge hole in the middle.
#if defined(__sparc__) && SANITIZER_WORDSIZE == 64
-#define SANITIZER_SIGN_EXTENDED_ADDRESSES 1
+# define SANITIZER_SIGN_EXTENDED_ADDRESSES 1
#else
-#define SANITIZER_SIGN_EXTENDED_ADDRESSES 0
-#endif
-
-// The AArch64 and RISC-V linux ports use the canonical syscall set as
-// mandated by the upstream linux community for all new ports. Other ports
-// may still use legacy syscalls.
-#ifndef SANITIZER_USES_CANONICAL_LINUX_SYSCALLS
-# if (defined(__aarch64__) || defined(__riscv)) && SANITIZER_LINUX
-# define SANITIZER_USES_CANONICAL_LINUX_SYSCALLS 1
-# else
-# define SANITIZER_USES_CANONICAL_LINUX_SYSCALLS 0
-# endif
+# define SANITIZER_SIGN_EXTENDED_ADDRESSES 0
#endif
// udi16 syscalls can only be used when the following conditions are
@@ -296,15 +360,15 @@
// Since we don't want to include libc headers here, we check the
// target only.
#if defined(__arm__) || SANITIZER_X32 || defined(__sparc__)
-#define SANITIZER_USES_UID16_SYSCALLS 1
+# define SANITIZER_USES_UID16_SYSCALLS 1
#else
-#define SANITIZER_USES_UID16_SYSCALLS 0
+# define SANITIZER_USES_UID16_SYSCALLS 0
#endif
#if defined(__mips__)
-# define SANITIZER_POINTER_FORMAT_LENGTH FIRST_32_SECOND_64(8, 10)
+# define SANITIZER_POINTER_FORMAT_LENGTH FIRST_32_SECOND_64(8, 10)
#else
-# define SANITIZER_POINTER_FORMAT_LENGTH FIRST_32_SECOND_64(8, 12)
+# define SANITIZER_POINTER_FORMAT_LENGTH FIRST_32_SECOND_64(8, 12)
#endif
/// \macro MSC_PREREQ
@@ -313,15 +377,15 @@
/// * 1800: Microsoft Visual Studio 2013 / 12.0
/// * 1900: Microsoft Visual Studio 2015 / 14.0
#ifdef _MSC_VER
-# define MSC_PREREQ(version) (_MSC_VER >= (version))
+# define MSC_PREREQ(version) (_MSC_VER >= (version))
#else
-# define MSC_PREREQ(version) 0
+# define MSC_PREREQ(version) 0
#endif
-#if SANITIZER_MAC && !(defined(__arm64__) && SANITIZER_IOS)
-# define SANITIZER_NON_UNIQUE_TYPEINFO 0
+#if SANITIZER_APPLE && defined(__x86_64__)
+# define SANITIZER_NON_UNIQUE_TYPEINFO 0
#else
-# define SANITIZER_NON_UNIQUE_TYPEINFO 1
+# define SANITIZER_NON_UNIQUE_TYPEINFO 1
#endif
// On linux, some architectures had an ABI transition from 64-bit long double
@@ -329,11 +393,11 @@
// involving long doubles come in two versions, and we need to pass the
// correct one to dlvsym when intercepting them.
#if SANITIZER_LINUX && (SANITIZER_S390 || SANITIZER_PPC32 || SANITIZER_PPC64V1)
-#define SANITIZER_NLDBL_VERSION "GLIBC_2.4"
+# define SANITIZER_NLDBL_VERSION "GLIBC_2.4"
#endif
#if SANITIZER_GO == 0
-# define SANITIZER_GO 0
+# define SANITIZER_GO 0
#endif
// On PowerPC and ARM Thumb, calling pthread_exit() causes LSan to detect leaks.
@@ -341,40 +405,64 @@
// dlopen mallocs "libgcc_s.so" string which confuses LSan, it fails to realize
// that this allocation happens in dynamic linker and should be ignored.
#if SANITIZER_PPC || defined(__thumb__)
-# define SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT 1
+# define SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT 1
#else
-# define SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT 0
+# define SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT 0
#endif
-#if SANITIZER_FREEBSD || SANITIZER_MAC || SANITIZER_NETBSD || \
- SANITIZER_SOLARIS
-# define SANITIZER_MADVISE_DONTNEED MADV_FREE
+#if SANITIZER_FREEBSD || SANITIZER_APPLE || SANITIZER_NETBSD || SANITIZER_SOLARIS
+# define SANITIZER_MADVISE_DONTNEED MADV_FREE
#else
-# define SANITIZER_MADVISE_DONTNEED MADV_DONTNEED
+# define SANITIZER_MADVISE_DONTNEED MADV_DONTNEED
#endif
// Older gcc have issues aligning to a constexpr, and require an integer.
// See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=56859 among others.
#if defined(__powerpc__) || defined(__powerpc64__)
-# define SANITIZER_CACHE_LINE_SIZE 128
+# define SANITIZER_CACHE_LINE_SIZE 128
#else
-# define SANITIZER_CACHE_LINE_SIZE 64
+# define SANITIZER_CACHE_LINE_SIZE 64
#endif
// Enable offline markup symbolizer for Fuchsia.
#if SANITIZER_FUCHSIA
# define SANITIZER_SYMBOLIZER_MARKUP 1
#else
-#define SANITIZER_SYMBOLIZER_MARKUP 0
+# define SANITIZER_SYMBOLIZER_MARKUP 0
#endif
// Enable ability to support sanitizer initialization that is
// compatible with the sanitizer library being loaded via
// `dlopen()`.
-#if SANITIZER_MAC
-#define SANITIZER_SUPPORTS_INIT_FOR_DLOPEN 1
+#if SANITIZER_APPLE
+# define SANITIZER_SUPPORTS_INIT_FOR_DLOPEN 1
+#else
+# define SANITIZER_SUPPORTS_INIT_FOR_DLOPEN 0
+#endif
+
+// SANITIZER_SUPPORTS_THREADLOCAL
+// 1 - THREADLOCAL macro is supported by target
+// 0 - THREADLOCAL macro is not supported by target
+#ifndef __has_feature
+// TODO: Support other compilers here
+# define SANITIZER_SUPPORTS_THREADLOCAL 1
+#else
+# if __has_feature(tls)
+# define SANITIZER_SUPPORTS_THREADLOCAL 1
+# else
+# define SANITIZER_SUPPORTS_THREADLOCAL 0
+# endif
+#endif
+
+#if defined(__thumb__) && defined(__linux__)
+// Workaround for
+// https://lab.llvm.org/buildbot/#/builders/clang-thumbv7-full-2stage
+// or
+// https://lab.llvm.org/staging/#/builders/clang-thumbv7-full-2stage
+// It fails *rss_limit_mb_test* without meaningful errors.
+# define SANITIZER_START_BACKGROUND_THREAD_IN_ASAN_INTERNAL 1
#else
-#define SANITIZER_SUPPORTS_INIT_FOR_DLOPEN 0
+# define SANITIZER_START_BACKGROUND_THREAD_IN_ASAN_INTERNAL 0
#endif
-#endif // SANITIZER_PLATFORM_H
+#endif // SANITIZER_PLATFORM_H
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_interceptors.h b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_interceptors.h
index 5b710c23fd00..de55c736d0e1 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_interceptors.h
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_interceptors.h
@@ -76,7 +76,7 @@
#define SI_LINUX 0
#endif
-#if SANITIZER_MAC
+#if SANITIZER_APPLE
#define SI_MAC 1
#define SI_NOT_MAC 0
#else
@@ -126,7 +126,7 @@
#define SI_SOLARIS32 0
#endif
-#if SANITIZER_POSIX && !SANITIZER_MAC
+#if SANITIZER_POSIX && !SANITIZER_APPLE
#define SI_POSIX_NOT_MAC 1
#else
#define SI_POSIX_NOT_MAC 0
@@ -191,7 +191,8 @@
#define SANITIZER_INTERCEPT_PREADV \
(SI_FREEBSD || SI_NETBSD || SI_LINUX_NOT_ANDROID)
-#define SANITIZER_INTERCEPT_PWRITEV SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_PWRITEV \
+ (SI_FREEBSD || SI_NETBSD || SI_LINUX_NOT_ANDROID)
#define SANITIZER_INTERCEPT_PREADV64 SI_GLIBC
#define SANITIZER_INTERCEPT_PWRITEV64 SI_GLIBC
@@ -229,11 +230,15 @@
(SI_MAC || SI_LINUX_NOT_ANDROID || SI_SOLARIS)
#define SANITIZER_INTERCEPT_CLOCK_GETTIME \
(SI_FREEBSD || SI_NETBSD || SI_LINUX || SI_SOLARIS)
-#define SANITIZER_INTERCEPT_CLOCK_GETCPUCLOCKID SI_LINUX
+#define SANITIZER_INTERCEPT_CLOCK_GETCPUCLOCKID \
+ (SI_LINUX || SI_FREEBSD || SI_NETBSD)
#define SANITIZER_INTERCEPT_GETITIMER SI_POSIX
#define SANITIZER_INTERCEPT_TIME SI_POSIX
#define SANITIZER_INTERCEPT_GLOB (SI_GLIBC || SI_SOLARIS)
#define SANITIZER_INTERCEPT_GLOB64 SI_GLIBC
+#define SANITIZER_INTERCEPT___B64_TO SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_DN_COMP_EXPAND SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_POSIX_SPAWN SI_POSIX
#define SANITIZER_INTERCEPT_WAIT SI_POSIX
#define SANITIZER_INTERCEPT_INET SI_POSIX
#define SANITIZER_INTERCEPT_PTHREAD_GETSCHEDPARAM SI_POSIX
@@ -251,7 +256,8 @@
#define SANITIZER_INTERCEPT_GETHOSTENT_R (SI_FREEBSD || SI_GLIBC || SI_SOLARIS)
#define SANITIZER_INTERCEPT_GETSOCKOPT SI_POSIX
#define SANITIZER_INTERCEPT_ACCEPT SI_POSIX
-#define SANITIZER_INTERCEPT_ACCEPT4 (SI_LINUX_NOT_ANDROID || SI_NETBSD)
+#define SANITIZER_INTERCEPT_ACCEPT4 \
+ (SI_LINUX_NOT_ANDROID || SI_NETBSD || SI_FREEBSD)
#define SANITIZER_INTERCEPT_PACCEPT SI_NETBSD
#define SANITIZER_INTERCEPT_MODF SI_POSIX
#define SANITIZER_INTERCEPT_RECVMSG SI_POSIX
@@ -264,11 +270,11 @@
#define SANITIZER_INTERCEPT_INET_ATON SI_POSIX
#define SANITIZER_INTERCEPT_SYSINFO SI_LINUX
#define SANITIZER_INTERCEPT_READDIR SI_POSIX
-#define SANITIZER_INTERCEPT_READDIR64 SI_LINUX_NOT_ANDROID || SI_SOLARIS32
+#define SANITIZER_INTERCEPT_READDIR64 SI_GLIBC || SI_SOLARIS32
#if SI_LINUX_NOT_ANDROID && \
(defined(__i386) || defined(__x86_64) || defined(__mips64) || \
defined(__powerpc64__) || defined(__aarch64__) || defined(__arm__) || \
- defined(__s390__) || SANITIZER_RISCV64)
+ defined(__s390__) || defined(__loongarch__) || SANITIZER_RISCV64)
#define SANITIZER_INTERCEPT_PTRACE 1
#else
#define SANITIZER_INTERCEPT_PTRACE 0
@@ -296,20 +302,21 @@
#define SANITIZER_INTERCEPT_CANONICALIZE_FILE_NAME (SI_GLIBC || SI_SOLARIS)
#define SANITIZER_INTERCEPT_CONFSTR \
(SI_FREEBSD || SI_NETBSD || SI_MAC || SI_LINUX_NOT_ANDROID || SI_SOLARIS)
-#define SANITIZER_INTERCEPT_SCHED_GETAFFINITY SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_SCHED_GETAFFINITY \
+ (SI_LINUX_NOT_ANDROID || SI_FREEBSD)
#define SANITIZER_INTERCEPT_SCHED_GETPARAM SI_LINUX_NOT_ANDROID || SI_SOLARIS
#define SANITIZER_INTERCEPT_STRERROR SI_POSIX
#define SANITIZER_INTERCEPT_STRERROR_R SI_POSIX
#define SANITIZER_INTERCEPT_XPG_STRERROR_R SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_SCANDIR \
(SI_FREEBSD || SI_NETBSD || SI_LINUX_NOT_ANDROID || SI_SOLARIS)
-#define SANITIZER_INTERCEPT_SCANDIR64 SI_LINUX_NOT_ANDROID || SI_SOLARIS32
+#define SANITIZER_INTERCEPT_SCANDIR64 SI_GLIBC || SI_SOLARIS32
#define SANITIZER_INTERCEPT_GETGROUPS SI_POSIX
#define SANITIZER_INTERCEPT_POLL SI_POSIX
#define SANITIZER_INTERCEPT_PPOLL SI_LINUX_NOT_ANDROID || SI_SOLARIS
#define SANITIZER_INTERCEPT_WORDEXP \
(SI_FREEBSD || SI_NETBSD || (SI_MAC && !SI_IOS) || SI_LINUX_NOT_ANDROID || \
- SI_SOLARIS) // NOLINT
+ SI_SOLARIS)
#define SANITIZER_INTERCEPT_SIGWAIT SI_POSIX
#define SANITIZER_INTERCEPT_SIGWAITINFO SI_LINUX_NOT_ANDROID || SI_SOLARIS
#define SANITIZER_INTERCEPT_SIGTIMEDWAIT SI_LINUX_NOT_ANDROID || SI_SOLARIS
@@ -325,11 +332,10 @@
#define SANITIZER_INTERCEPT_GETMNTENT_R SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_STATFS \
(SI_FREEBSD || SI_MAC || SI_LINUX_NOT_ANDROID || SI_SOLARIS)
-#define SANITIZER_INTERCEPT_STATFS64 \
- (((SI_MAC && !TARGET_CPU_ARM64) && !SI_IOS) || SI_LINUX_NOT_ANDROID)
+#define SANITIZER_INTERCEPT_STATFS64 SI_GLIBC && SANITIZER_HAS_STATFS64
#define SANITIZER_INTERCEPT_STATVFS \
(SI_FREEBSD || SI_NETBSD || SI_LINUX_NOT_ANDROID)
-#define SANITIZER_INTERCEPT_STATVFS64 SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_STATVFS64 SI_GLIBC
#define SANITIZER_INTERCEPT_INITGROUPS SI_POSIX
#define SANITIZER_INTERCEPT_ETHER_NTOA_ATON SI_POSIX
#define SANITIZER_INTERCEPT_ETHER_HOST \
@@ -337,12 +343,14 @@
#define SANITIZER_INTERCEPT_ETHER_R (SI_FREEBSD || SI_LINUX_NOT_ANDROID)
#define SANITIZER_INTERCEPT_SHMCTL \
(((SI_FREEBSD || SI_LINUX_NOT_ANDROID) && SANITIZER_WORDSIZE == 64) || \
- SI_NETBSD || SI_SOLARIS) // NOLINT
+ SI_NETBSD || SI_SOLARIS)
#define SANITIZER_INTERCEPT_RANDOM_R SI_GLIBC
#define SANITIZER_INTERCEPT_PTHREAD_ATTR_GET SI_POSIX
#define SANITIZER_INTERCEPT_PTHREAD_ATTR_GETINHERITSCHED \
(SI_FREEBSD || SI_NETBSD || SI_MAC || SI_LINUX_NOT_ANDROID || SI_SOLARIS)
#define SANITIZER_INTERCEPT_PTHREAD_ATTR_GETAFFINITY_NP SI_GLIBC
+#define SANITIZER_INTERCEPT_PTHREAD_GETAFFINITY_NP \
+ (SI_LINUX_NOT_ANDROID || SI_FREEBSD)
#define SANITIZER_INTERCEPT_PTHREAD_ATTR_GET_SCHED SI_POSIX
#define SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GETPSHARED \
(SI_POSIX && !SI_NETBSD)
@@ -362,6 +370,8 @@
(SI_LINUX_NOT_ANDROID || SI_SOLARIS)
#define SANITIZER_INTERCEPT_PTHREAD_BARRIERATTR_GETPSHARED \
(SI_LINUX_NOT_ANDROID && !SI_NETBSD)
+#define SANITIZER_INTERCEPT_TRYJOIN SI_GLIBC
+#define SANITIZER_INTERCEPT_TIMEDJOIN SI_GLIBC
#define SANITIZER_INTERCEPT_THR_EXIT SI_FREEBSD
#define SANITIZER_INTERCEPT_TMPNAM SI_POSIX
#define SANITIZER_INTERCEPT_TMPNAM_R (SI_GLIBC || SI_SOLARIS)
@@ -391,8 +401,6 @@
#define SANITIZER_INTERCEPT__EXIT \
(SI_LINUX || SI_FREEBSD || SI_NETBSD || SI_MAC || SI_SOLARIS)
-#define SANITIZER_INTERCEPT_PTHREAD_MUTEX SI_POSIX
-#define SANITIZER_INTERCEPT___PTHREAD_MUTEX SI_GLIBC
#define SANITIZER_INTERCEPT___LIBC_MUTEX SI_NETBSD
#define SANITIZER_INTERCEPT_PTHREAD_SETNAME_NP \
(SI_FREEBSD || SI_NETBSD || SI_GLIBC || SI_SOLARIS)
@@ -400,7 +408,7 @@
(SI_FREEBSD || SI_NETBSD || SI_GLIBC || SI_SOLARIS)
#define SANITIZER_INTERCEPT_TLS_GET_ADDR \
- (SI_FREEBSD || SI_NETBSD || SI_LINUX_NOT_ANDROID || SI_SOLARIS)
+ (SI_FREEBSD || SI_NETBSD || SI_LINUX_NOT_ANDROID)
#define SANITIZER_INTERCEPT_LISTXATTR SI_LINUX
#define SANITIZER_INTERCEPT_GETXATTR SI_LINUX
@@ -445,7 +453,8 @@
#define SANITIZER_INTERCEPT_SEM \
(SI_LINUX || SI_FREEBSD || SI_NETBSD || SI_SOLARIS)
#define SANITIZER_INTERCEPT_PTHREAD_SETCANCEL SI_POSIX
-#define SANITIZER_INTERCEPT_MINCORE (SI_LINUX || SI_NETBSD || SI_SOLARIS)
+#define SANITIZER_INTERCEPT_MINCORE \
+ (SI_LINUX || SI_NETBSD || SI_FREEBSD || SI_SOLARIS)
#define SANITIZER_INTERCEPT_PROCESS_VM_READV SI_LINUX
#define SANITIZER_INTERCEPT_CTERMID \
(SI_LINUX || SI_MAC || SI_FREEBSD || SI_NETBSD || SI_SOLARIS)
@@ -455,15 +464,19 @@
(SI_LINUX || SI_MAC || SI_WINDOWS || SI_FREEBSD || SI_NETBSD || SI_SOLARIS)
#define SANITIZER_INTERCEPT_RECV_RECVFROM SI_POSIX
#define SANITIZER_INTERCEPT_SEND_SENDTO SI_POSIX
-#define SANITIZER_INTERCEPT_EVENTFD_READ_WRITE SI_LINUX
-
-#define SANITIZER_INTERCEPT_STAT \
- (SI_FREEBSD || SI_MAC || SI_ANDROID || SI_NETBSD || SI_SOLARIS)
-#define SANITIZER_INTERCEPT_LSTAT (SI_NETBSD || SI_FREEBSD)
-#define SANITIZER_INTERCEPT___XSTAT (!SANITIZER_INTERCEPT_STAT && SI_POSIX)
-#define SANITIZER_INTERCEPT___XSTAT64 SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_EVENTFD_READ_WRITE (SI_LINUX || SI_FREEBSD)
+
+#define SI_STAT_LINUX (SI_LINUX && __GLIBC_PREREQ(2, 33))
+#define SANITIZER_INTERCEPT_STAT \
+ (SI_FREEBSD || SI_MAC || SI_ANDROID || SI_NETBSD || SI_SOLARIS || \
+ SI_STAT_LINUX)
+#define SANITIZER_INTERCEPT_STAT64 SI_STAT_LINUX && SANITIZER_HAS_STAT64
+#define SANITIZER_INTERCEPT_LSTAT (SI_NETBSD || SI_FREEBSD || SI_STAT_LINUX)
+#define SANITIZER_INTERCEPT___XSTAT \
+ ((!SANITIZER_INTERCEPT_STAT && SI_POSIX) || SI_STAT_LINUX)
+#define SANITIZER_INTERCEPT___XSTAT64 SI_GLIBC
#define SANITIZER_INTERCEPT___LXSTAT SANITIZER_INTERCEPT___XSTAT
-#define SANITIZER_INTERCEPT___LXSTAT64 SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT___LXSTAT64 SI_GLIBC
#define SANITIZER_INTERCEPT_UTMP \
(SI_POSIX && !SI_MAC && !SI_FREEBSD && !SI_NETBSD)
@@ -474,7 +487,7 @@
(SI_LINUX_NOT_ANDROID || SI_MAC || SI_FREEBSD || SI_NETBSD)
#define SANITIZER_INTERCEPT_MMAP SI_POSIX
-#define SANITIZER_INTERCEPT_MMAP64 SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_MMAP64 SI_GLIBC || SI_SOLARIS
#define SANITIZER_INTERCEPT_MALLOPT_AND_MALLINFO (SI_GLIBC || SI_ANDROID)
#define SANITIZER_INTERCEPT_MEMALIGN (!SI_FREEBSD && !SI_MAC && !SI_NETBSD)
#define SANITIZER_INTERCEPT___LIBC_MEMALIGN SI_GLIBC
@@ -484,6 +497,7 @@
#define SANITIZER_INTERCEPT_ALIGNED_ALLOC (!SI_MAC)
#define SANITIZER_INTERCEPT_MALLOC_USABLE_SIZE (!SI_MAC && !SI_NETBSD)
#define SANITIZER_INTERCEPT_MCHECK_MPROBE SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_WCSLEN 1
#define SANITIZER_INTERCEPT_WCSCAT SI_POSIX
#define SANITIZER_INTERCEPT_WCSDUP SI_POSIX
#define SANITIZER_INTERCEPT_SIGNAL_AND_SIGACTION (!SI_WINDOWS && SI_NOT_FUCHSIA)
@@ -496,7 +510,8 @@
#define SANITIZER_INTERCEPT_GID_FROM_GROUP SI_NETBSD
#define SANITIZER_INTERCEPT_ACCESS (SI_NETBSD || SI_FREEBSD)
#define SANITIZER_INTERCEPT_FACCESSAT (SI_NETBSD || SI_FREEBSD)
-#define SANITIZER_INTERCEPT_GETGROUPLIST SI_NETBSD
+#define SANITIZER_INTERCEPT_GETGROUPLIST \
+ (SI_NETBSD || SI_FREEBSD || SI_LINUX)
#define SANITIZER_INTERCEPT_STRLCPY \
(SI_NETBSD || SI_FREEBSD || SI_MAC || SI_ANDROID)
@@ -517,10 +532,11 @@
#define SANITIZER_INTERCEPT_DEVNAME_R (SI_NETBSD || SI_FREEBSD)
#define SANITIZER_INTERCEPT_FGETLN (SI_NETBSD || SI_FREEBSD)
#define SANITIZER_INTERCEPT_STRMODE (SI_NETBSD || SI_FREEBSD)
-#define SANITIZER_INTERCEPT_TTYENT SI_NETBSD
-#define SANITIZER_INTERCEPT_PROTOENT (SI_NETBSD || SI_LINUX)
+#define SANITIZER_INTERCEPT_TTYENT (SI_NETBSD || SI_FREEBSD)
+#define SANITIZER_INTERCEPT_TTYENTPATH SI_NETBSD
+#define SANITIZER_INTERCEPT_PROTOENT (SI_LINUX || SI_NETBSD || SI_FREEBSD)
#define SANITIZER_INTERCEPT_PROTOENT_R SI_GLIBC
-#define SANITIZER_INTERCEPT_NETENT SI_NETBSD
+#define SANITIZER_INTERCEPT_NETENT (SI_LINUX || SI_NETBSD || SI_FREEBSD)
#define SANITIZER_INTERCEPT_SETVBUF \
(SI_NETBSD || SI_FREEBSD || SI_LINUX || SI_MAC)
#define SANITIZER_INTERCEPT_GETMNTINFO (SI_NETBSD || SI_FREEBSD || SI_MAC)
@@ -536,17 +552,17 @@
#define SANITIZER_INTERCEPT_MODCTL SI_NETBSD
#define SANITIZER_INTERCEPT_CAPSICUM SI_FREEBSD
#define SANITIZER_INTERCEPT_STRTONUM (SI_NETBSD || SI_FREEBSD)
-#define SANITIZER_INTERCEPT_FPARSELN SI_NETBSD
+#define SANITIZER_INTERCEPT_FPARSELN (SI_NETBSD || SI_FREEBSD)
#define SANITIZER_INTERCEPT_STATVFS1 SI_NETBSD
#define SANITIZER_INTERCEPT_STRTOI SI_NETBSD
#define SANITIZER_INTERCEPT_CAPSICUM SI_FREEBSD
#define SANITIZER_INTERCEPT_SHA1 SI_NETBSD
#define SANITIZER_INTERCEPT_MD4 SI_NETBSD
#define SANITIZER_INTERCEPT_RMD160 SI_NETBSD
-#define SANITIZER_INTERCEPT_MD5 SI_NETBSD
+#define SANITIZER_INTERCEPT_MD5 (SI_NETBSD || SI_FREEBSD)
#define SANITIZER_INTERCEPT_FSEEK (SI_NETBSD || SI_FREEBSD)
#define SANITIZER_INTERCEPT_MD2 SI_NETBSD
-#define SANITIZER_INTERCEPT_SHA2 SI_NETBSD
+#define SANITIZER_INTERCEPT_SHA2 (SI_NETBSD || SI_FREEBSD)
#define SANITIZER_INTERCEPT_CDB SI_NETBSD
#define SANITIZER_INTERCEPT_VIS (SI_NETBSD || SI_FREEBSD)
#define SANITIZER_INTERCEPT_POPEN SI_POSIX
@@ -559,25 +575,29 @@
#define SANITIZER_INTERCEPT_FDEVNAME SI_FREEBSD
#define SANITIZER_INTERCEPT_GETUSERSHELL (SI_POSIX && !SI_ANDROID)
#define SANITIZER_INTERCEPT_SL_INIT (SI_FREEBSD || SI_NETBSD)
-#define SANITIZER_INTERCEPT_CRYPT (SI_POSIX && !SI_ANDROID)
-#define SANITIZER_INTERCEPT_CRYPT_R (SI_LINUX && !SI_ANDROID)
#define SANITIZER_INTERCEPT_GETRANDOM \
- ((SI_LINUX && __GLIBC_PREREQ(2, 25)) || SI_FREEBSD)
+ ((SI_LINUX && __GLIBC_PREREQ(2, 25)) || SI_FREEBSD || SI_SOLARIS)
#define SANITIZER_INTERCEPT___CXA_ATEXIT SI_NETBSD
#define SANITIZER_INTERCEPT_ATEXIT SI_NETBSD
#define SANITIZER_INTERCEPT_PTHREAD_ATFORK SI_NETBSD
-#define SANITIZER_INTERCEPT_GETENTROPY SI_FREEBSD
+#define SANITIZER_INTERCEPT_GETENTROPY \
+ ((SI_LINUX && __GLIBC_PREREQ(2, 25)) || SI_FREEBSD || SI_SOLARIS)
#define SANITIZER_INTERCEPT_QSORT \
(SI_POSIX && !SI_IOSSIM && !SI_WATCHOS && !SI_TVOS && !SI_ANDROID)
#define SANITIZER_INTERCEPT_QSORT_R SI_GLIBC
+#define SANITIZER_INTERCEPT_BSEARCH \
+ (SI_POSIX && !SI_IOSSIM && !SI_WATCHOS && !SI_TVOS && !SI_ANDROID)
// sigaltstack on i386 macOS cannot be intercepted due to setjmp()
// calling it and assuming that it does not clobber registers.
#define SANITIZER_INTERCEPT_SIGALTSTACK \
- (SI_POSIX && !(SANITIZER_MAC && SANITIZER_I386))
+ (SI_POSIX && !(SANITIZER_APPLE && SANITIZER_I386))
#define SANITIZER_INTERCEPT_UNAME (SI_POSIX && !SI_FREEBSD)
#define SANITIZER_INTERCEPT___XUNAME SI_FREEBSD
#define SANITIZER_INTERCEPT_FLOPEN SI_FREEBSD
+#define SANITIZER_INTERCEPT_PROCCTL SI_FREEBSD
+#define SANITIZER_INTERCEPT_ARGP_PARSE SI_GLIBC
+#define SANITIZER_INTERCEPT_CPUSET_GETAFFINITY SI_FREEBSD
// This macro gives a way for downstream users to override the above
// interceptor macros irrespective of the platform they are on. They have
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_freebsd.cpp b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_freebsd.cpp
index b5a45ae72cd9..38f968d533b1 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_freebsd.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_freebsd.cpp
@@ -17,6 +17,7 @@
#include <sys/capsicum.h>
#include <sys/consio.h>
+#include <sys/cpuset.h>
#include <sys/filio.h>
#include <sys/ipc.h>
#include <sys/kbio.h>
@@ -69,11 +70,17 @@
#include <semaphore.h>
#include <signal.h>
#include <stddef.h>
+#include <md5.h>
+#include <sha224.h>
+#include <sha256.h>
+#include <sha384.h>
+#include <sha512.h>
#include <stdio.h>
#include <stringlist.h>
#include <term.h>
#include <termios.h>
#include <time.h>
+#include <ttyent.h>
#include <utime.h>
#include <utmpx.h>
#include <vis.h>
@@ -97,6 +104,7 @@ void *__sanitizer_get_link_map_by_dlopen_handle(void *handle) {
return internal_dlinfo(handle, RTLD_DI_LINKMAP, &p) == 0 ? p : nullptr;
}
+unsigned struct_cpuset_sz = sizeof(cpuset_t);
unsigned struct_cap_rights_sz = sizeof(cap_rights_t);
unsigned struct_utsname_sz = sizeof(struct utsname);
unsigned struct_stat_sz = sizeof(struct stat);
@@ -124,7 +132,7 @@ unsigned struct_sigevent_sz = sizeof(struct sigevent);
unsigned struct_sched_param_sz = sizeof(struct sched_param);
unsigned struct_statfs_sz = sizeof(struct statfs);
unsigned struct_sockaddr_sz = sizeof(struct sockaddr);
-unsigned ucontext_t_sz = sizeof(ucontext_t);
+unsigned ucontext_t_sz(void *ctx) { return sizeof(ucontext_t); }
unsigned struct_rlimit_sz = sizeof(struct rlimit);
unsigned struct_timespec_sz = sizeof(struct timespec);
unsigned struct_utimbuf_sz = sizeof(struct utimbuf);
@@ -167,12 +175,21 @@ uptr __sanitizer_in_addr_sz(int af) {
return 0;
}
+// For FreeBSD the actual size of a directory entry is not always in d_reclen.
+// Use the appropriate macro to get the correct size for all cases (e.g. NFS).
+u16 __sanitizer_dirsiz(const __sanitizer_dirent *dp) {
+ return _GENERIC_DIRSIZ(dp);
+}
+
unsigned struct_ElfW_Phdr_sz = sizeof(Elf_Phdr);
int glob_nomatch = GLOB_NOMATCH;
int glob_altdirfunc = GLOB_ALTDIRFUNC;
+const int wordexp_wrde_dooffs = WRDE_DOOFFS;
unsigned path_max = PATH_MAX;
+int struct_ttyent_sz = sizeof(struct ttyent);
+
// ioctl arguments
unsigned struct_ifreq_sz = sizeof(struct ifreq);
unsigned struct_termios_sz = sizeof(struct termios);
@@ -196,6 +213,10 @@ unsigned struct_audio_buf_info_sz = sizeof(struct audio_buf_info);
unsigned struct_ppp_stats_sz = sizeof(struct ppp_stats);
unsigned struct_sioc_sg_req_sz = sizeof(struct sioc_sg_req);
unsigned struct_sioc_vif_req_sz = sizeof(struct sioc_vif_req);
+unsigned struct_procctl_reaper_status_sz = sizeof(struct __sanitizer_procctl_reaper_status);
+unsigned struct_procctl_reaper_pidinfo_sz = sizeof(struct __sanitizer_procctl_reaper_pidinfo);
+unsigned struct_procctl_reaper_pids_sz = sizeof(struct __sanitizer_procctl_reaper_pids);
+unsigned struct_procctl_reaper_kill_sz = sizeof(struct __sanitizer_procctl_reaper_kill);
const unsigned long __sanitizer_bufsiz = BUFSIZ;
const unsigned IOCTL_NOT_PRESENT = 0;
@@ -357,6 +378,22 @@ const int si_SEGV_MAPERR = SEGV_MAPERR;
const int si_SEGV_ACCERR = SEGV_ACCERR;
const int unvis_valid = UNVIS_VALID;
const int unvis_validpush = UNVIS_VALIDPUSH;
+
+const unsigned MD5_CTX_sz = sizeof(MD5_CTX);
+const unsigned MD5_return_length = MD5_DIGEST_STRING_LENGTH;
+
+#define SHA2_CONST(LEN) \
+ const unsigned SHA##LEN##_CTX_sz = sizeof(SHA##LEN##_CTX); \
+ const unsigned SHA##LEN##_return_length = SHA##LEN##_DIGEST_STRING_LENGTH; \
+ const unsigned SHA##LEN##_block_length = SHA##LEN##_BLOCK_LENGTH; \
+ const unsigned SHA##LEN##_digest_length = SHA##LEN##_DIGEST_LENGTH
+
+SHA2_CONST(224);
+SHA2_CONST(256);
+SHA2_CONST(384);
+SHA2_CONST(512);
+
+#undef SHA2_CONST
} // namespace __sanitizer
using namespace __sanitizer;
@@ -529,4 +566,5 @@ COMPILER_CHECK(__sanitizer_XDR_FREE == XDR_FREE);
CHECK_TYPE_SIZE(sem_t);
COMPILER_CHECK(sizeof(__sanitizer_cap_rights_t) >= sizeof(cap_rights_t));
+COMPILER_CHECK(sizeof(__sanitizer_cpuset_t) >= sizeof(cpuset_t));
#endif // SANITIZER_FREEBSD
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_freebsd.h b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_freebsd.h
index 5e0ca9c7d782..43b8a38f39be 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_freebsd.h
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_freebsd.h
@@ -16,26 +16,26 @@
#if SANITIZER_FREEBSD
-#include "sanitizer_internal_defs.h"
-#include "sanitizer_platform.h"
-#include "sanitizer_platform_limits_posix.h"
+# include "sanitizer_internal_defs.h"
+# include "sanitizer_platform.h"
+# include "sanitizer_platform_limits_posix.h"
// Get sys/_types.h, because that tells us whether 64-bit inodes are
// used in struct dirent below.
-#include <sys/_types.h>
+# include <sys/_types.h>
namespace __sanitizer {
void *__sanitizer_get_link_map_by_dlopen_handle(void *handle);
-#define GET_LINK_MAP_BY_DLOPEN_HANDLE(handle) \
- (link_map *)__sanitizer_get_link_map_by_dlopen_handle(handle)
+# define GET_LINK_MAP_BY_DLOPEN_HANDLE(handle) \
+ (link_map *)__sanitizer_get_link_map_by_dlopen_handle(handle)
extern unsigned struct_utsname_sz;
extern unsigned struct_stat_sz;
-#if defined(__powerpc64__)
+# if defined(__powerpc64__)
const unsigned struct___old_kernel_stat_sz = 0;
-#else
+# else
const unsigned struct___old_kernel_stat_sz = 32;
-#endif
+# endif
extern unsigned struct_rusage_sz;
extern unsigned siginfo_t_sz;
extern unsigned struct_itimerval_sz;
@@ -57,7 +57,7 @@ extern unsigned struct_sched_param_sz;
extern unsigned struct_statfs64_sz;
extern unsigned struct_statfs_sz;
extern unsigned struct_sockaddr_sz;
-extern unsigned ucontext_t_sz;
+unsigned ucontext_t_sz(void *ctx);
extern unsigned struct_rlimit_sz;
extern unsigned struct_utimbuf_sz;
extern unsigned struct_timespec_sz;
@@ -114,11 +114,24 @@ struct __sanitizer_ipc_perm {
long key;
};
-#if !defined(__i386__)
+struct __sanitizer_protoent {
+ char *p_name;
+ char **p_aliases;
+ int p_proto;
+};
+
+struct __sanitizer_netent {
+ char *n_name;
+ char **n_aliases;
+ int n_addrtype;
+ u32 n_net;
+};
+
+# if !defined(__i386__)
typedef long long __sanitizer_time_t;
-#else
+# else
typedef long __sanitizer_time_t;
-#endif
+# endif
struct __sanitizer_shmid_ds {
__sanitizer_ipc_perm shm_perm;
@@ -147,7 +160,7 @@ struct __sanitizer_ifaddrs {
unsigned int ifa_flags;
void *ifa_addr; // (struct sockaddr *)
void *ifa_netmask; // (struct sockaddr *)
-#undef ifa_dstaddr
+# undef ifa_dstaddr
void *ifa_dstaddr; // (struct sockaddr *)
void *ifa_data;
};
@@ -229,37 +242,43 @@ struct __sanitizer_cmsghdr {
};
struct __sanitizer_dirent {
-#if defined(__INO64)
+# if defined(__INO64)
unsigned long long d_fileno;
unsigned long long d_off;
-#else
+# else
unsigned int d_fileno;
-#endif
+# endif
unsigned short d_reclen;
- // more fields that we don't care about
+ u8 d_type;
+ u8 d_pad0;
+ u16 d_namlen;
+ u16 d_pad1;
+ char d_name[256];
};
+u16 __sanitizer_dirsiz(const __sanitizer_dirent *dp);
+
// 'clock_t' is 32 bits wide on x64 FreeBSD
typedef int __sanitizer_clock_t;
typedef int __sanitizer_clockid_t;
-#if defined(_LP64) || defined(__x86_64__) || defined(__powerpc__) || \
- defined(__mips__)
+# if defined(_LP64) || defined(__x86_64__) || defined(__powerpc__) || \
+ defined(__mips__)
typedef unsigned __sanitizer___kernel_uid_t;
typedef unsigned __sanitizer___kernel_gid_t;
-#else
+# else
typedef unsigned short __sanitizer___kernel_uid_t;
typedef unsigned short __sanitizer___kernel_gid_t;
-#endif
+# endif
typedef long long __sanitizer___kernel_off_t;
-#if defined(__powerpc__) || defined(__mips__)
+# if defined(__powerpc__) || defined(__mips__)
typedef unsigned int __sanitizer___kernel_old_uid_t;
typedef unsigned int __sanitizer___kernel_old_gid_t;
-#else
+# else
typedef unsigned short __sanitizer___kernel_old_uid_t;
typedef unsigned short __sanitizer___kernel_old_gid_t;
-#endif
+# endif
typedef long long __sanitizer___kernel_loff_t;
typedef struct {
@@ -366,9 +385,12 @@ struct __sanitizer_glob_t {
extern int glob_nomatch;
extern int glob_altdirfunc;
+extern const int wordexp_wrde_dooffs;
extern unsigned path_max;
+extern int struct_ttyent_sz;
+
struct __sanitizer_wordexp_t {
uptr we_wordc;
char **we_wordv;
@@ -398,39 +420,81 @@ struct __sanitizer_ifconf {
} ifc_ifcu;
};
-#define IOC_NRBITS 8
-#define IOC_TYPEBITS 8
-#if defined(__powerpc__) || defined(__powerpc64__) || defined(__mips__)
-#define IOC_SIZEBITS 13
-#define IOC_DIRBITS 3
-#define IOC_NONE 1U
-#define IOC_WRITE 4U
-#define IOC_READ 2U
-#else
-#define IOC_SIZEBITS 14
-#define IOC_DIRBITS 2
-#define IOC_NONE 0U
-#define IOC_WRITE 1U
-#define IOC_READ 2U
-#endif
-#define IOC_NRMASK ((1 << IOC_NRBITS) - 1)
-#define IOC_TYPEMASK ((1 << IOC_TYPEBITS) - 1)
-#define IOC_SIZEMASK ((1 << IOC_SIZEBITS) - 1)
-#if defined(IOC_DIRMASK)
-#undef IOC_DIRMASK
-#endif
-#define IOC_DIRMASK ((1 << IOC_DIRBITS) - 1)
-#define IOC_NRSHIFT 0
-#define IOC_TYPESHIFT (IOC_NRSHIFT + IOC_NRBITS)
-#define IOC_SIZESHIFT (IOC_TYPESHIFT + IOC_TYPEBITS)
-#define IOC_DIRSHIFT (IOC_SIZESHIFT + IOC_SIZEBITS)
-#define EVIOC_EV_MAX 0x1f
-#define EVIOC_ABS_MAX 0x3f
-
-#define IOC_DIR(nr) (((nr) >> IOC_DIRSHIFT) & IOC_DIRMASK)
-#define IOC_TYPE(nr) (((nr) >> IOC_TYPESHIFT) & IOC_TYPEMASK)
-#define IOC_NR(nr) (((nr) >> IOC_NRSHIFT) & IOC_NRMASK)
-#define IOC_SIZE(nr) (((nr) >> IOC_SIZESHIFT) & IOC_SIZEMASK)
+struct __sanitizer__ttyent {
+ char *ty_name;
+ char *ty_getty;
+ char *ty_type;
+ int ty_status;
+ char *ty_window;
+ char *ty_comment;
+ char *ty_group;
+};
+
+// procctl reaper data for PROCCTL_REAPER flags
+struct __sanitizer_procctl_reaper_status {
+ unsigned int rs_flags;
+ unsigned int rs_children;
+ unsigned int rs_descendants;
+ pid_t rs_reaper;
+ pid_t rs_pid;
+ unsigned int rs_pad0[15];
+};
+
+struct __sanitizer_procctl_reaper_pidinfo {
+ pid_t pi_pid;
+ pid_t pi_subtree;
+ unsigned int pi_flags;
+ unsigned int pi_pad0[15];
+};
+
+struct __sanitizer_procctl_reaper_pids {
+ unsigned int rp_count;
+ unsigned int rp_pad0[15];
+ struct __sanitize_procctl_reapper_pidinfo *rp_pids;
+};
+
+struct __sanitizer_procctl_reaper_kill {
+ int rk_sig;
+ unsigned int rk_flags;
+ pid_t rk_subtree;
+ unsigned int rk_killed;
+ pid_t rk_fpid;
+ unsigned int rk_pad[15];
+};
+
+# define IOC_NRBITS 8
+# define IOC_TYPEBITS 8
+# if defined(__powerpc__) || defined(__powerpc64__) || defined(__mips__)
+# define IOC_SIZEBITS 13
+# define IOC_DIRBITS 3
+# define IOC_NONE 1U
+# define IOC_WRITE 4U
+# define IOC_READ 2U
+# else
+# define IOC_SIZEBITS 14
+# define IOC_DIRBITS 2
+# define IOC_NONE 0U
+# define IOC_WRITE 1U
+# define IOC_READ 2U
+# endif
+# define IOC_NRMASK ((1 << IOC_NRBITS) - 1)
+# define IOC_TYPEMASK ((1 << IOC_TYPEBITS) - 1)
+# define IOC_SIZEMASK ((1 << IOC_SIZEBITS) - 1)
+# if defined(IOC_DIRMASK)
+# undef IOC_DIRMASK
+# endif
+# define IOC_DIRMASK ((1 << IOC_DIRBITS) - 1)
+# define IOC_NRSHIFT 0
+# define IOC_TYPESHIFT (IOC_NRSHIFT + IOC_NRBITS)
+# define IOC_SIZESHIFT (IOC_TYPESHIFT + IOC_TYPEBITS)
+# define IOC_DIRSHIFT (IOC_SIZESHIFT + IOC_SIZEBITS)
+# define EVIOC_EV_MAX 0x1f
+# define EVIOC_ABS_MAX 0x3f
+
+# define IOC_DIR(nr) (((nr) >> IOC_DIRSHIFT) & IOC_DIRMASK)
+# define IOC_TYPE(nr) (((nr) >> IOC_TYPESHIFT) & IOC_TYPEMASK)
+# define IOC_NR(nr) (((nr) >> IOC_NRSHIFT) & IOC_NRMASK)
+# define IOC_SIZE(nr) (((nr) >> IOC_SIZESHIFT) & IOC_SIZEMASK)
extern unsigned struct_ifreq_sz;
extern unsigned struct_termios_sz;
@@ -454,6 +518,11 @@ extern unsigned struct_ppp_stats_sz;
extern unsigned struct_sioc_sg_req_sz;
extern unsigned struct_sioc_vif_req_sz;
+extern unsigned struct_procctl_reaper_status_sz;
+extern unsigned struct_procctl_reaper_pidinfo_sz;
+extern unsigned struct_procctl_reaper_pids_sz;
+extern unsigned struct_procctl_reaper_kill_sz;
+
// ioctl request identifiers
// A special value to mark ioctls that are not present on the target platform,
@@ -621,6 +690,22 @@ extern unsigned IOCTL_KDSKBMODE;
extern const int si_SEGV_MAPERR;
extern const int si_SEGV_ACCERR;
+extern const unsigned MD5_CTX_sz;
+extern const unsigned MD5_return_length;
+
+#define SHA2_EXTERN(LEN) \
+ extern const unsigned SHA##LEN##_CTX_sz; \
+ extern const unsigned SHA##LEN##_return_length; \
+ extern const unsigned SHA##LEN##_block_length; \
+ extern const unsigned SHA##LEN##_digest_length
+
+SHA2_EXTERN(224);
+SHA2_EXTERN(256);
+SHA2_EXTERN(384);
+SHA2_EXTERN(512);
+
+#undef SHA2_EXTERN
+
struct __sanitizer_cap_rights {
u64 cr_rights[2];
};
@@ -630,26 +715,39 @@ extern unsigned struct_cap_rights_sz;
extern unsigned struct_fstab_sz;
extern unsigned struct_StringList_sz;
+
+struct __sanitizer_cpuset {
+#if __FreeBSD_version >= 1400090
+ long __bits[(1024 + (sizeof(long) * 8) - 1) / (sizeof(long) * 8)];
+#else
+ long __bits[(256 + (sizeof(long) * 8) - 1) / (sizeof(long) * 8)];
+#endif
+};
+
+typedef struct __sanitizer_cpuset __sanitizer_cpuset_t;
+extern unsigned struct_cpuset_sz;
+
+typedef unsigned long long __sanitizer_eventfd_t;
} // namespace __sanitizer
-#define CHECK_TYPE_SIZE(TYPE) \
- COMPILER_CHECK(sizeof(__sanitizer_##TYPE) == sizeof(TYPE))
+# define CHECK_TYPE_SIZE(TYPE) \
+ COMPILER_CHECK(sizeof(__sanitizer_##TYPE) == sizeof(TYPE))
-#define CHECK_SIZE_AND_OFFSET(CLASS, MEMBER) \
- COMPILER_CHECK(sizeof(((__sanitizer_##CLASS *)NULL)->MEMBER) == \
- sizeof(((CLASS *)NULL)->MEMBER)); \
- COMPILER_CHECK(offsetof(__sanitizer_##CLASS, MEMBER) == \
- offsetof(CLASS, MEMBER))
+# define CHECK_SIZE_AND_OFFSET(CLASS, MEMBER) \
+ COMPILER_CHECK(sizeof(((__sanitizer_##CLASS *)NULL)->MEMBER) == \
+ sizeof(((CLASS *)NULL)->MEMBER)); \
+ COMPILER_CHECK(offsetof(__sanitizer_##CLASS, MEMBER) == \
+ offsetof(CLASS, MEMBER))
// For sigaction, which is a function and struct at the same time,
// and thus requires explicit "struct" in sizeof() expression.
-#define CHECK_STRUCT_SIZE_AND_OFFSET(CLASS, MEMBER) \
- COMPILER_CHECK(sizeof(((struct __sanitizer_##CLASS *)NULL)->MEMBER) == \
- sizeof(((struct CLASS *)NULL)->MEMBER)); \
- COMPILER_CHECK(offsetof(struct __sanitizer_##CLASS, MEMBER) == \
- offsetof(struct CLASS, MEMBER))
+# define CHECK_STRUCT_SIZE_AND_OFFSET(CLASS, MEMBER) \
+ COMPILER_CHECK(sizeof(((struct __sanitizer_##CLASS *)NULL)->MEMBER) == \
+ sizeof(((struct CLASS *)NULL)->MEMBER)); \
+ COMPILER_CHECK(offsetof(struct __sanitizer_##CLASS, MEMBER) == \
+ offsetof(struct CLASS, MEMBER))
-#define SIGACTION_SYMNAME sigaction
+# define SIGACTION_SYMNAME sigaction
#endif
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_linux.cpp b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_linux.cpp
index c51327e1269e..bf0f355847cb 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_linux.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_linux.cpp
@@ -28,44 +28,39 @@
// are not defined anywhere in userspace headers. Fake them. This seems to work
// fine with newer headers, too.
#include <linux/posix_types.h>
-#if defined(__x86_64__) || defined(__mips__)
-#include <sys/stat.h>
-#else
-#define ino_t __kernel_ino_t
-#define mode_t __kernel_mode_t
-#define nlink_t __kernel_nlink_t
-#define uid_t __kernel_uid_t
-#define gid_t __kernel_gid_t
-#define off_t __kernel_off_t
-#define time_t __kernel_time_t
+# if defined(__x86_64__) || defined(__mips__) || defined(__hexagon__)
+# include <sys/stat.h>
+# else
+# define ino_t __kernel_ino_t
+# define mode_t __kernel_mode_t
+# define nlink_t __kernel_nlink_t
+# define uid_t __kernel_uid_t
+# define gid_t __kernel_gid_t
+# define off_t __kernel_off_t
+# define time_t __kernel_time_t
// This header seems to contain the definitions of _kernel_ stat* structs.
-#include <asm/stat.h>
-#undef ino_t
-#undef mode_t
-#undef nlink_t
-#undef uid_t
-#undef gid_t
-#undef off_t
-#endif
-
-#include <linux/aio_abi.h>
-
-#if !SANITIZER_ANDROID
-#include <sys/statfs.h>
-#include <linux/perf_event.h>
-#endif
+# include <asm/stat.h>
+# undef ino_t
+# undef mode_t
+# undef nlink_t
+# undef uid_t
+# undef gid_t
+# undef off_t
+# endif
+
+# include <linux/aio_abi.h>
+
+# if !SANITIZER_ANDROID
+# include <sys/statfs.h>
+# include <linux/perf_event.h>
+# endif
using namespace __sanitizer;
-namespace __sanitizer {
-#if !SANITIZER_ANDROID
- unsigned struct_statfs64_sz = sizeof(struct statfs64);
-#endif
-} // namespace __sanitizer
-
-#if !defined(__powerpc64__) && !defined(__x86_64__) && !defined(__aarch64__)\
- && !defined(__mips__) && !defined(__s390__)\
- && !defined(__sparc__) && !defined(__riscv)
+# if !defined(__powerpc64__) && !defined(__x86_64__) && \
+ !defined(__aarch64__) && !defined(__mips__) && !defined(__s390__) && \
+ !defined(__sparc__) && !defined(__riscv) && !defined(__hexagon__) && \
+ !defined(__loongarch__)
COMPILER_CHECK(struct___old_kernel_stat_sz == sizeof(struct __old_kernel_stat));
#endif
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_netbsd.cpp b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_netbsd.cpp
index c8f2aa5dba4a..c40877ba48d0 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_netbsd.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_netbsd.cpp
@@ -554,7 +554,7 @@ unsigned struct_tms_sz = sizeof(struct tms);
unsigned struct_sigevent_sz = sizeof(struct sigevent);
unsigned struct_sched_param_sz = sizeof(struct sched_param);
unsigned struct_sockaddr_sz = sizeof(struct sockaddr);
-unsigned ucontext_t_sz = sizeof(ucontext_t);
+unsigned ucontext_t_sz(void *ctx) { return sizeof(ucontext_t); }
unsigned struct_rlimit_sz = sizeof(struct rlimit);
unsigned struct_timespec_sz = sizeof(struct timespec);
unsigned struct_sembuf_sz = sizeof(struct sembuf);
@@ -666,6 +666,7 @@ unsigned struct_ElfW_Phdr_sz = sizeof(Elf_Phdr);
int glob_nomatch = GLOB_NOMATCH;
int glob_altdirfunc = GLOB_ALTDIRFUNC;
+const int wordexp_wrde_dooffs = WRDE_DOOFFS;
unsigned path_max = PATH_MAX;
@@ -2341,8 +2342,6 @@ unsigned IOCTL_TIOCDRAIN = TIOCDRAIN;
unsigned IOCTL_TIOCGFLAGS = TIOCGFLAGS;
unsigned IOCTL_TIOCSFLAGS = TIOCSFLAGS;
unsigned IOCTL_TIOCDCDTIMESTAMP = TIOCDCDTIMESTAMP;
-unsigned IOCTL_TIOCRCVFRAME = TIOCRCVFRAME;
-unsigned IOCTL_TIOCXMTFRAME = TIOCXMTFRAME;
unsigned IOCTL_TIOCPTMGET = TIOCPTMGET;
unsigned IOCTL_TIOCGRANTPT = TIOCGRANTPT;
unsigned IOCTL_TIOCPTSNAME = TIOCPTSNAME;
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_netbsd.h b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_netbsd.h
index 9e28dcfef041..4c697b4d107d 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_netbsd.h
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_netbsd.h
@@ -45,7 +45,7 @@ extern unsigned struct_stack_t_sz;
extern unsigned struct_sched_param_sz;
extern unsigned struct_statfs_sz;
extern unsigned struct_sockaddr_sz;
-extern unsigned ucontext_t_sz;
+unsigned ucontext_t_sz(void *ctx);
extern unsigned struct_rlimit_sz;
extern unsigned struct_utimbuf_sz;
@@ -394,6 +394,7 @@ struct __sanitizer_glob_t {
extern int glob_nomatch;
extern int glob_altdirfunc;
+extern const int wordexp_wrde_dooffs;
extern unsigned path_max;
@@ -2194,8 +2195,6 @@ extern unsigned IOCTL_TIOCDRAIN;
extern unsigned IOCTL_TIOCGFLAGS;
extern unsigned IOCTL_TIOCSFLAGS;
extern unsigned IOCTL_TIOCDCDTIMESTAMP;
-extern unsigned IOCTL_TIOCRCVFRAME;
-extern unsigned IOCTL_TIOCXMTFRAME;
extern unsigned IOCTL_TIOCPTMGET;
extern unsigned IOCTL_TIOCGRANTPT;
extern unsigned IOCTL_TIOCPTSNAME;
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_openbsd.cpp b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_openbsd.cpp
deleted file mode 100644
index e69de29bb2d1..000000000000
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_openbsd.cpp
+++ /dev/null
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_openbsd.h b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_openbsd.h
deleted file mode 100644
index e69de29bb2d1..000000000000
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_openbsd.h
+++ /dev/null
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_posix.cpp b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_posix.cpp
index 6e5c330b98ef..6d61d276d77e 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_posix.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_posix.cpp
@@ -18,12 +18,13 @@
// depends on _FILE_OFFSET_BITS setting.
// To get this "true" dirent definition, we undefine _FILE_OFFSET_BITS below.
#undef _FILE_OFFSET_BITS
+#undef _TIME_BITS
#endif
// Must go after undef _FILE_OFFSET_BITS.
#include "sanitizer_platform.h"
-#if SANITIZER_LINUX || SANITIZER_MAC
+#if SANITIZER_LINUX || SANITIZER_APPLE
// Must go after undef _FILE_OFFSET_BITS.
#include "sanitizer_glibc_version.h"
@@ -51,7 +52,7 @@
#include <time.h>
#include <wchar.h>
#include <regex.h>
-#if !SANITIZER_MAC
+#if !SANITIZER_APPLE
#include <utmp.h>
#endif
@@ -73,7 +74,9 @@
#include <sys/vt.h>
#include <linux/cdrom.h>
#include <linux/fd.h>
+#if SANITIZER_ANDROID
#include <linux/fs.h>
+#endif
#include <linux/hdreg.h>
#include <linux/input.h>
#include <linux/ioctl.h>
@@ -91,10 +94,10 @@
#if SANITIZER_LINUX
# include <utime.h>
# include <sys/ptrace.h>
-#if defined(__mips64) || defined(__aarch64__) || defined(__arm__) || \
- SANITIZER_RISCV64
-# include <asm/ptrace.h>
-# ifdef __arm__
+# if defined(__mips64) || defined(__aarch64__) || defined(__arm__) || \
+ defined(__hexagon__) || defined(__loongarch__) ||SANITIZER_RISCV64
+# include <asm/ptrace.h>
+# ifdef __arm__
typedef struct user_fpregs elf_fpregset_t;
# define ARM_VFPREGS_SIZE_ASAN (32 * 8 /*fpregs*/ + 4 /*fpscr*/)
# if !defined(ARM_VFPREGS_SIZE)
@@ -152,7 +155,6 @@ typedef struct user_fpregs elf_fpregset_t;
#include <linux/serial.h>
#include <sys/msg.h>
#include <sys/ipc.h>
-#include <crypt.h>
#endif // SANITIZER_ANDROID
#include <link.h>
@@ -163,22 +165,24 @@ typedef struct user_fpregs elf_fpregset_t;
#include <fstab.h>
#endif // SANITIZER_LINUX
-#if SANITIZER_MAC
+#if SANITIZER_APPLE
#include <net/ethernet.h>
#include <sys/filio.h>
#include <sys/sockio.h>
#endif
// Include these after system headers to avoid name clashes and ambiguities.
-#include "sanitizer_internal_defs.h"
-#include "sanitizer_platform_limits_posix.h"
+# include "sanitizer_common.h"
+# include "sanitizer_internal_defs.h"
+# include "sanitizer_platform_interceptors.h"
+# include "sanitizer_platform_limits_posix.h"
namespace __sanitizer {
unsigned struct_utsname_sz = sizeof(struct utsname);
unsigned struct_stat_sz = sizeof(struct stat);
-#if !SANITIZER_IOS && !(SANITIZER_MAC && TARGET_CPU_ARM64)
+#if SANITIZER_HAS_STAT64
unsigned struct_stat64_sz = sizeof(struct stat64);
-#endif // !SANITIZER_IOS && !(SANITIZER_MAC && TARGET_CPU_ARM64)
+#endif // SANITIZER_HAS_STAT64
unsigned struct_rusage_sz = sizeof(struct rusage);
unsigned struct_tm_sz = sizeof(struct tm);
unsigned struct_passwd_sz = sizeof(struct passwd);
@@ -203,26 +207,60 @@ namespace __sanitizer {
unsigned struct_regex_sz = sizeof(regex_t);
unsigned struct_regmatch_sz = sizeof(regmatch_t);
-#if (SANITIZER_MAC && !TARGET_CPU_ARM64) && !SANITIZER_IOS
+#if SANITIZER_HAS_STATFS64
unsigned struct_statfs64_sz = sizeof(struct statfs64);
-#endif // (SANITIZER_MAC && !TARGET_CPU_ARM64) && !SANITIZER_IOS
+#endif // SANITIZER_HAS_STATFS64
-#if SANITIZER_GLIBC || SANITIZER_FREEBSD || SANITIZER_NETBSD || SANITIZER_MAC
+#if SANITIZER_GLIBC || SANITIZER_FREEBSD || SANITIZER_NETBSD || SANITIZER_APPLE
unsigned struct_fstab_sz = sizeof(struct fstab);
#endif // SANITIZER_GLIBC || SANITIZER_FREEBSD || SANITIZER_NETBSD ||
- // SANITIZER_MAC
+ // SANITIZER_APPLE
#if !SANITIZER_ANDROID
unsigned struct_statfs_sz = sizeof(struct statfs);
unsigned struct_sockaddr_sz = sizeof(struct sockaddr);
- unsigned ucontext_t_sz = sizeof(ucontext_t);
-#endif // !SANITIZER_ANDROID
-#if SANITIZER_LINUX
+ unsigned ucontext_t_sz(void *ctx) {
+# if SANITIZER_GLIBC && SANITIZER_X64
+ // Added in Linux kernel 3.4.0, merged to glibc in 2.16
+# ifndef FP_XSTATE_MAGIC1
+# define FP_XSTATE_MAGIC1 0x46505853U
+# endif
+ // See kernel arch/x86/kernel/fpu/signal.c for details.
+ const auto *fpregs = static_cast<ucontext_t *>(ctx)->uc_mcontext.fpregs;
+ // The member names differ across header versions, but the actual layout
+ // is always the same. So avoid using members, just use arithmetic.
+ const uint32_t *after_xmm =
+ reinterpret_cast<const uint32_t *>(fpregs + 1) - 24;
+ if (after_xmm[12] == FP_XSTATE_MAGIC1)
+ return reinterpret_cast<const char *>(fpregs) + after_xmm[13] -
+ static_cast<const char *>(ctx);
+# endif
+ return sizeof(ucontext_t);
+ }
+# endif // !SANITIZER_ANDROID
+
+# if SANITIZER_LINUX
unsigned struct_epoll_event_sz = sizeof(struct epoll_event);
unsigned struct_sysinfo_sz = sizeof(struct sysinfo);
unsigned __user_cap_header_struct_sz =
sizeof(struct __user_cap_header_struct);
- unsigned __user_cap_data_struct_sz = sizeof(struct __user_cap_data_struct);
+ unsigned __user_cap_data_struct_sz(void *hdrp) {
+ int u32s = 0;
+ if (hdrp) {
+ switch (((struct __user_cap_header_struct *)hdrp)->version) {
+ case _LINUX_CAPABILITY_VERSION_1:
+ u32s = _LINUX_CAPABILITY_U32S_1;
+ break;
+ case _LINUX_CAPABILITY_VERSION_2:
+ u32s = _LINUX_CAPABILITY_U32S_2;
+ break;
+ case _LINUX_CAPABILITY_VERSION_3:
+ u32s = _LINUX_CAPABILITY_U32S_3;
+ break;
+ }
+ }
+ return sizeof(struct __user_cap_data_struct) * u32s;
+ }
unsigned struct_new_utsname_sz = sizeof(struct new_utsname);
unsigned struct_old_utsname_sz = sizeof(struct old_utsname);
unsigned struct_oldold_utsname_sz = sizeof(struct oldold_utsname);
@@ -235,24 +273,28 @@ namespace __sanitizer {
unsigned struct_itimerspec_sz = sizeof(struct itimerspec);
#endif // SANITIZER_LINUX
-#if SANITIZER_LINUX && !SANITIZER_ANDROID
+#if SANITIZER_GLIBC
// Use pre-computed size of struct ustat to avoid <sys/ustat.h> which
// has been removed from glibc 2.28.
#if defined(__aarch64__) || defined(__s390x__) || defined(__mips64) || \
defined(__powerpc64__) || defined(__arch64__) || defined(__sparcv9) || \
defined(__x86_64__) || SANITIZER_RISCV64
#define SIZEOF_STRUCT_USTAT 32
-#elif defined(__arm__) || defined(__i386__) || defined(__mips__) \
- || defined(__powerpc__) || defined(__s390__) || defined(__sparc__)
-#define SIZEOF_STRUCT_USTAT 20
-#else
-#error Unknown size of struct ustat
-#endif
+# elif defined(__arm__) || defined(__i386__) || defined(__mips__) || \
+ defined(__powerpc__) || defined(__s390__) || defined(__sparc__) || \
+ defined(__hexagon__)
+# define SIZEOF_STRUCT_USTAT 20
+# elif defined(__loongarch__)
+ // Not used. The minimum Glibc version available for LoongArch is 2.36
+ // so ustat() wrapper is already gone.
+# define SIZEOF_STRUCT_USTAT 0
+# else
+# error Unknown size of struct ustat
+# endif
unsigned struct_ustat_sz = SIZEOF_STRUCT_USTAT;
unsigned struct_rlimit64_sz = sizeof(struct rlimit64);
unsigned struct_statvfs64_sz = sizeof(struct statvfs64);
- unsigned struct_crypt_data_sz = sizeof(struct crypt_data);
-#endif // SANITIZER_LINUX && !SANITIZER_ANDROID
+#endif // SANITIZER_GLIBC
#if SANITIZER_LINUX && !SANITIZER_ANDROID
unsigned struct_timex_sz = sizeof(struct timex);
@@ -280,7 +322,7 @@ namespace __sanitizer {
int shmctl_shm_stat = (int)SHM_STAT;
#endif
-#if !SANITIZER_MAC && !SANITIZER_FREEBSD
+#if !SANITIZER_APPLE && !SANITIZER_FREEBSD
unsigned struct_utmp_sz = sizeof(struct utmp);
#endif
#if !SANITIZER_ANDROID
@@ -312,10 +354,14 @@ unsigned struct_ElfW_Phdr_sz = sizeof(Elf_Phdr);
int glob_altdirfunc = GLOB_ALTDIRFUNC;
#endif
+# if !SANITIZER_ANDROID
+ const int wordexp_wrde_dooffs = WRDE_DOOFFS;
+# endif // !SANITIZER_ANDROID
+
#if SANITIZER_LINUX && !SANITIZER_ANDROID && \
(defined(__i386) || defined(__x86_64) || defined(__mips64) || \
defined(__powerpc64__) || defined(__aarch64__) || defined(__arm__) || \
- defined(__s390__) || SANITIZER_RISCV64)
+ defined(__s390__) || defined(__loongarch__)|| SANITIZER_RISCV64)
#if defined(__mips64) || defined(__powerpc64__) || defined(__arm__)
unsigned struct_user_regs_struct_sz = sizeof(struct pt_regs);
unsigned struct_user_fpregs_struct_sz = sizeof(elf_fpregset_t);
@@ -325,21 +371,24 @@ unsigned struct_ElfW_Phdr_sz = sizeof(Elf_Phdr);
#elif defined(__aarch64__)
unsigned struct_user_regs_struct_sz = sizeof(struct user_pt_regs);
unsigned struct_user_fpregs_struct_sz = sizeof(struct user_fpsimd_state);
+#elif defined(__loongarch__)
+ unsigned struct_user_regs_struct_sz = sizeof(struct user_pt_regs);
+ unsigned struct_user_fpregs_struct_sz = sizeof(struct user_fp_state);
#elif defined(__s390__)
unsigned struct_user_regs_struct_sz = sizeof(struct _user_regs_struct);
unsigned struct_user_fpregs_struct_sz = sizeof(struct _user_fpregs_struct);
#else
unsigned struct_user_regs_struct_sz = sizeof(struct user_regs_struct);
unsigned struct_user_fpregs_struct_sz = sizeof(struct user_fpregs_struct);
-#endif // __mips64 || __powerpc64__ || __aarch64__
+#endif // __mips64 || __powerpc64__ || __aarch64__ || __loongarch__
#if defined(__x86_64) || defined(__mips64) || defined(__powerpc64__) || \
defined(__aarch64__) || defined(__arm__) || defined(__s390__) || \
- SANITIZER_RISCV64
+ defined(__loongarch__) || SANITIZER_RISCV64
unsigned struct_user_fpxregs_struct_sz = 0;
#else
unsigned struct_user_fpxregs_struct_sz = sizeof(struct user_fpxregs_struct);
#endif // __x86_64 || __mips64 || __powerpc64__ || __aarch64__ || __arm__
-// || __s390__
+// || __s390__ || __loongarch__
#ifdef __arm__
unsigned struct_user_vfpregs_struct_sz = ARM_VFPREGS_SIZE;
#else
@@ -484,7 +533,7 @@ unsigned struct_ElfW_Phdr_sz = sizeof(Elf_Phdr);
unsigned struct_ppp_stats_sz = sizeof(struct ppp_stats);
#endif // SANITIZER_GLIBC
-#if !SANITIZER_ANDROID && !SANITIZER_MAC
+#if !SANITIZER_ANDROID && !SANITIZER_APPLE
unsigned struct_sioc_sg_req_sz = sizeof(struct sioc_sg_req);
unsigned struct_sioc_vif_req_sz = sizeof(struct sioc_vif_req);
#endif
@@ -570,6 +619,14 @@ unsigned struct_ElfW_Phdr_sz = sizeof(Elf_Phdr);
unsigned IOCTL_BLKROGET = BLKROGET;
unsigned IOCTL_BLKROSET = BLKROSET;
unsigned IOCTL_BLKRRPART = BLKRRPART;
+ unsigned IOCTL_BLKFRASET = BLKFRASET;
+ unsigned IOCTL_BLKFRAGET = BLKFRAGET;
+ unsigned IOCTL_BLKSECTSET = BLKSECTSET;
+ unsigned IOCTL_BLKSECTGET = BLKSECTGET;
+ unsigned IOCTL_BLKSSZGET = BLKSSZGET;
+ unsigned IOCTL_BLKBSZGET = BLKBSZGET;
+ unsigned IOCTL_BLKBSZSET = BLKBSZSET;
+ unsigned IOCTL_BLKGETSIZE64 = BLKGETSIZE64;
unsigned IOCTL_CDROMAUDIOBUFSIZ = CDROMAUDIOBUFSIZ;
unsigned IOCTL_CDROMEJECT = CDROMEJECT;
unsigned IOCTL_CDROMEJECT_SW = CDROMEJECT_SW;
@@ -837,10 +894,10 @@ unsigned struct_ElfW_Phdr_sz = sizeof(Elf_Phdr);
unsigned IOCTL_EVIOCGPROP = IOCTL_NOT_PRESENT;
unsigned IOCTL_EVIOCSKEYCODE_V2 = IOCTL_NOT_PRESENT;
#endif
- unsigned IOCTL_FS_IOC_GETFLAGS = FS_IOC_GETFLAGS;
- unsigned IOCTL_FS_IOC_GETVERSION = FS_IOC_GETVERSION;
- unsigned IOCTL_FS_IOC_SETFLAGS = FS_IOC_SETFLAGS;
- unsigned IOCTL_FS_IOC_SETVERSION = FS_IOC_SETVERSION;
+ unsigned IOCTL_FS_IOC_GETFLAGS = _IOR('f', 1, long);
+ unsigned IOCTL_FS_IOC_GETVERSION = _IOR('v', 1, long);
+ unsigned IOCTL_FS_IOC_SETFLAGS = _IOW('f', 2, long);
+ unsigned IOCTL_FS_IOC_SETVERSION = _IOW('v', 2, long);
unsigned IOCTL_GIO_CMAP = GIO_CMAP;
unsigned IOCTL_GIO_FONT = GIO_FONT;
unsigned IOCTL_GIO_UNIMAP = GIO_UNIMAP;
@@ -1035,7 +1092,7 @@ CHECK_SIZE_AND_OFFSET(mmsghdr, msg_len);
COMPILER_CHECK(sizeof(__sanitizer_dirent) <= sizeof(dirent));
CHECK_SIZE_AND_OFFSET(dirent, d_ino);
-#if SANITIZER_MAC
+#if SANITIZER_APPLE
CHECK_SIZE_AND_OFFSET(dirent, d_seekoff);
#elif SANITIZER_FREEBSD
// There is no 'd_off' field on FreeBSD.
@@ -1044,7 +1101,7 @@ CHECK_SIZE_AND_OFFSET(dirent, d_off);
#endif
CHECK_SIZE_AND_OFFSET(dirent, d_reclen);
-#if SANITIZER_LINUX && !SANITIZER_ANDROID
+#if SANITIZER_GLIBC
COMPILER_CHECK(sizeof(__sanitizer_dirent64) <= sizeof(dirent64));
CHECK_SIZE_AND_OFFSET(dirent64, d_ino);
CHECK_SIZE_AND_OFFSET(dirent64, d_off);
@@ -1077,6 +1134,15 @@ CHECK_STRUCT_SIZE_AND_OFFSET(sigaction, sa_flags);
CHECK_STRUCT_SIZE_AND_OFFSET(sigaction, sa_restorer);
#endif
+#if SANITIZER_HAS_SIGINFO
+COMPILER_CHECK(alignof(siginfo_t) == alignof(__sanitizer_siginfo));
+using __sanitizer_siginfo_t = __sanitizer_siginfo;
+CHECK_TYPE_SIZE(siginfo_t);
+CHECK_SIZE_AND_OFFSET(siginfo_t, si_signo);
+CHECK_SIZE_AND_OFFSET(siginfo_t, si_errno);
+CHECK_SIZE_AND_OFFSET(siginfo_t, si_code);
+#endif
+
#if SANITIZER_LINUX
CHECK_TYPE_SIZE(__sysctl_args);
CHECK_SIZE_AND_OFFSET(__sysctl_args, name);
@@ -1217,7 +1283,7 @@ CHECK_SIZE_AND_OFFSET(passwd, pw_shell);
CHECK_SIZE_AND_OFFSET(passwd, pw_gecos);
#endif
-#if SANITIZER_MAC
+#if SANITIZER_APPLE
CHECK_SIZE_AND_OFFSET(passwd, pw_change);
CHECK_SIZE_AND_OFFSET(passwd, pw_expire);
CHECK_SIZE_AND_OFFSET(passwd, pw_class);
@@ -1230,7 +1296,7 @@ CHECK_SIZE_AND_OFFSET(group, gr_passwd);
CHECK_SIZE_AND_OFFSET(group, gr_gid);
CHECK_SIZE_AND_OFFSET(group, gr_mem);
-#if HAVE_RPC_XDR_H
+#if HAVE_RPC_XDR_H && !SANITIZER_APPLE
CHECK_TYPE_SIZE(XDR);
CHECK_SIZE_AND_OFFSET(XDR, x_op);
CHECK_SIZE_AND_OFFSET(XDR, x_ops);
@@ -1285,4 +1351,4 @@ CHECK_TYPE_SIZE(sem_t);
COMPILER_CHECK(ARM_VFPREGS_SIZE == ARM_VFPREGS_SIZE_ASAN);
#endif
-#endif // SANITIZER_LINUX || SANITIZER_FREEBSD || SANITIZER_MAC
+#endif // SANITIZER_LINUX || SANITIZER_FREEBSD || SANITIZER_APPLE
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_posix.h b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_posix.h
index 15bf7359fa6e..08371a108bb6 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_posix.h
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_posix.h
@@ -14,10 +14,25 @@
#ifndef SANITIZER_PLATFORM_LIMITS_POSIX_H
#define SANITIZER_PLATFORM_LIMITS_POSIX_H
-#if SANITIZER_LINUX || SANITIZER_MAC
+#if SANITIZER_LINUX || SANITIZER_APPLE
#include "sanitizer_internal_defs.h"
#include "sanitizer_platform.h"
+#include "sanitizer_mallinfo.h"
+
+#if SANITIZER_APPLE
+#include <sys/cdefs.h>
+#if !__DARWIN_ONLY_64_BIT_INO_T
+#define SANITIZER_HAS_STAT64 1
+#define SANITIZER_HAS_STATFS64 1
+#else
+#define SANITIZER_HAS_STAT64 0
+#define SANITIZER_HAS_STATFS64 0
+#endif
+#elif SANITIZER_GLIBC || SANITIZER_ANDROID
+#define SANITIZER_HAS_STAT64 1
+#define SANITIZER_HAS_STATFS64 1
+#endif
#if defined(__sparc__)
// FIXME: This can't be included from tsan which does not support sparc yet.
@@ -29,7 +44,7 @@
namespace __sanitizer {
extern unsigned struct_utsname_sz;
extern unsigned struct_stat_sz;
-#if !SANITIZER_IOS
+#if SANITIZER_HAS_STAT64
extern unsigned struct_stat64_sz;
#endif
extern unsigned struct_rusage_sz;
@@ -49,7 +64,9 @@ extern unsigned struct_itimerspec_sz;
extern unsigned struct_sigevent_sz;
extern unsigned struct_stack_t_sz;
extern unsigned struct_sched_param_sz;
+#if SANITIZER_HAS_STATFS64
extern unsigned struct_statfs64_sz;
+#endif
extern unsigned struct_regex_sz;
extern unsigned struct_regmatch_sz;
@@ -57,12 +74,12 @@ extern unsigned struct_regmatch_sz;
extern unsigned struct_fstab_sz;
extern unsigned struct_statfs_sz;
extern unsigned struct_sockaddr_sz;
-extern unsigned ucontext_t_sz;
-#endif // !SANITIZER_ANDROID
+unsigned ucontext_t_sz(void *uctx);
+# endif // !SANITIZER_ANDROID
-#if SANITIZER_LINUX
+# if SANITIZER_LINUX
-#if defined(__x86_64__)
+# if defined(__x86_64__)
const unsigned struct_kernel_stat_sz = 144;
const unsigned struct_kernel_stat64_sz = 0;
#elif defined(__i386__)
@@ -81,9 +98,10 @@ const unsigned struct_kernel_stat64_sz = 104;
const unsigned struct_kernel_stat_sz = 144;
const unsigned struct_kernel_stat64_sz = 104;
#elif defined(__mips__)
-const unsigned struct_kernel_stat_sz = SANITIZER_ANDROID
- ? FIRST_32_SECOND_64(104, 128)
- : FIRST_32_SECOND_64(160, 216);
+const unsigned struct_kernel_stat_sz =
+ SANITIZER_ANDROID
+ ? FIRST_32_SECOND_64(104, 128)
+ : FIRST_32_SECOND_64((_MIPS_SIM == _ABIN32) ? 176 : 160, 216);
const unsigned struct_kernel_stat64_sz = 104;
#elif defined(__s390__) && !defined(__s390x__)
const unsigned struct_kernel_stat_sz = 64;
@@ -102,7 +120,13 @@ const unsigned struct_kernel_stat64_sz = 104;
#elif SANITIZER_RISCV64
const unsigned struct_kernel_stat_sz = 128;
const unsigned struct_kernel_stat64_sz = 0; // RISCV64 does not use stat64
-#endif
+# elif defined(__hexagon__)
+const unsigned struct_kernel_stat_sz = 128;
+const unsigned struct_kernel_stat64_sz = 0;
+# elif defined(__loongarch__)
+const unsigned struct_kernel_stat_sz = 128;
+const unsigned struct_kernel_stat64_sz = 0;
+# endif
struct __sanitizer_perf_event_attr {
unsigned type;
unsigned size;
@@ -112,7 +136,7 @@ struct __sanitizer_perf_event_attr {
extern unsigned struct_epoll_event_sz;
extern unsigned struct_sysinfo_sz;
extern unsigned __user_cap_header_struct_sz;
-extern unsigned __user_cap_data_struct_sz;
+extern unsigned __user_cap_data_struct_sz(void *hdrp);
extern unsigned struct_new_utsname_sz;
extern unsigned struct_old_utsname_sz;
extern unsigned struct_oldold_utsname_sz;
@@ -122,7 +146,8 @@ const unsigned struct_kexec_segment_sz = 4 * sizeof(unsigned long);
#if SANITIZER_LINUX
-#if defined(__powerpc64__) || defined(__riscv) || defined(__s390__)
+#if defined(__powerpc64__) || defined(__s390__) || defined(__loongarch__) || \
+ defined(__riscv)
const unsigned struct___old_kernel_stat_sz = 0;
#elif !defined(__sparc__)
const unsigned struct___old_kernel_stat_sz = 32;
@@ -181,17 +206,7 @@ struct __sanitizer_sem_t {
};
#endif // SANITIZER_LINUX
-#if SANITIZER_ANDROID
-struct __sanitizer_struct_mallinfo {
- uptr v[10];
-};
-#endif
-
#if SANITIZER_LINUX && !SANITIZER_ANDROID
-struct __sanitizer_struct_mallinfo {
- int v[10];
-};
-
extern unsigned struct_ustat_sz;
extern unsigned struct_rlimit64_sz;
extern unsigned struct_statvfs64_sz;
@@ -295,7 +310,6 @@ extern unsigned struct_msqid_ds_sz;
extern unsigned struct_mq_attr_sz;
extern unsigned struct_timex_sz;
extern unsigned struct_statvfs_sz;
-extern unsigned struct_crypt_data_sz;
#endif // SANITIZER_LINUX && !SANITIZER_ANDROID
struct __sanitizer_iovec {
@@ -319,7 +333,7 @@ struct __sanitizer_ifaddrs {
};
#endif // !SANITIZER_ANDROID
-#if SANITIZER_MAC
+#if SANITIZER_APPLE
typedef unsigned long __sanitizer_pthread_key_t;
#else
typedef unsigned __sanitizer_pthread_key_t;
@@ -346,7 +360,7 @@ struct __sanitizer_passwd {
char *pw_passwd;
int pw_uid;
int pw_gid;
-#if SANITIZER_MAC
+#if SANITIZER_APPLE
long pw_change;
char *pw_class;
#endif
@@ -355,7 +369,7 @@ struct __sanitizer_passwd {
#endif
char *pw_dir;
char *pw_shell;
-#if SANITIZER_MAC
+#if SANITIZER_APPLE
long pw_expire;
#endif
};
@@ -367,7 +381,8 @@ struct __sanitizer_group {
char **gr_mem;
};
-#if defined(__x86_64__) && !defined(_LP64)
+# if (SANITIZER_LINUX && !SANITIZER_GLIBC && !SANITIZER_ANDROID) || \
+ (defined(__x86_64__) && !defined(_LP64)) || defined(__hexagon__)
typedef long long __sanitizer_time_t;
#else
typedef long __sanitizer_time_t;
@@ -427,7 +442,7 @@ struct __sanitizer_file_handle {
};
#endif
-#if SANITIZER_MAC
+#if SANITIZER_APPLE
struct __sanitizer_msghdr {
void *msg_name;
unsigned msg_namelen;
@@ -468,30 +483,31 @@ struct __sanitizer_mmsghdr {
};
#endif
-#if SANITIZER_MAC
+#if SANITIZER_APPLE
struct __sanitizer_dirent {
unsigned long long d_ino;
unsigned long long d_seekoff;
unsigned short d_reclen;
// more fields that we don't care about
};
-#elif SANITIZER_ANDROID || defined(__x86_64__)
+# elif (SANITIZER_LINUX && !SANITIZER_GLIBC) || defined(__x86_64__) || \
+ defined(__hexagon__)
struct __sanitizer_dirent {
unsigned long long d_ino;
unsigned long long d_off;
unsigned short d_reclen;
// more fields that we don't care about
};
-#else
+# else
struct __sanitizer_dirent {
uptr d_ino;
uptr d_off;
unsigned short d_reclen;
// more fields that we don't care about
};
-#endif
+# endif
-#if SANITIZER_LINUX && !SANITIZER_ANDROID
+# if SANITIZER_GLIBC
struct __sanitizer_dirent64 {
unsigned long long d_ino;
unsigned long long d_off;
@@ -508,11 +524,12 @@ typedef long __sanitizer_clock_t;
#if SANITIZER_LINUX
typedef int __sanitizer_clockid_t;
+typedef unsigned long long __sanitizer_eventfd_t;
#endif
#if SANITIZER_LINUX
-#if defined(_LP64) || defined(__x86_64__) || defined(__powerpc__) || \
- defined(__mips__)
+# if defined(_LP64) || defined(__x86_64__) || defined(__powerpc__) || \
+ defined(__mips__) || defined(__hexagon__)
typedef unsigned __sanitizer___kernel_uid_t;
typedef unsigned __sanitizer___kernel_gid_t;
#else
@@ -552,7 +569,7 @@ typedef unsigned long __sanitizer_sigset_t[16 / sizeof(unsigned long)];
# else
typedef unsigned long __sanitizer_sigset_t;
# endif
-#elif SANITIZER_MAC
+#elif SANITIZER_APPLE
typedef unsigned __sanitizer_sigset_t;
#elif SANITIZER_LINUX
struct __sanitizer_sigset_t {
@@ -561,10 +578,35 @@ struct __sanitizer_sigset_t {
};
#endif
-struct __sanitizer_siginfo {
- // The size is determined by looking at sizeof of real siginfo_t on linux.
- u64 opaque[128 / sizeof(u64)];
+struct __sanitizer_siginfo_pad {
+#if SANITIZER_X32
+ // x32 siginfo_t is aligned to 8 bytes.
+ u64 pad[128 / sizeof(u64)];
+#else
+ // Require uptr, because siginfo_t is always pointer-size aligned on Linux.
+ uptr pad[128 / sizeof(uptr)];
+#endif
+};
+
+#if SANITIZER_LINUX
+# define SANITIZER_HAS_SIGINFO 1
+union __sanitizer_siginfo {
+ struct {
+ int si_signo;
+# if SANITIZER_MIPS
+ int si_code;
+ int si_errno;
+# else
+ int si_errno;
+ int si_code;
+# endif
+ };
+ __sanitizer_siginfo_pad pad;
};
+#else
+# define SANITIZER_HAS_SIGINFO 0
+typedef __sanitizer_siginfo_pad __sanitizer_siginfo;
+#endif
using __sanitizer_sighandler_ptr = void (*)(int sig);
using __sanitizer_sigactionhandler_ptr = void (*)(int sig,
@@ -712,12 +754,19 @@ struct __sanitizer_protoent {
int p_proto;
};
+struct __sanitizer_netent {
+ char *n_name;
+ char **n_aliases;
+ int n_addrtype;
+ u32 n_net;
+};
+
struct __sanitizer_addrinfo {
int ai_flags;
int ai_family;
int ai_socktype;
int ai_protocol;
-#if SANITIZER_ANDROID || SANITIZER_MAC
+#if SANITIZER_ANDROID || SANITIZER_APPLE
unsigned ai_addrlen;
char *ai_canonname;
void *ai_addr;
@@ -743,7 +792,7 @@ struct __sanitizer_pollfd {
short revents;
};
-#if SANITIZER_ANDROID || SANITIZER_MAC
+#if SANITIZER_ANDROID || SANITIZER_APPLE
typedef unsigned __sanitizer_nfds_t;
#else
typedef unsigned long __sanitizer_nfds_t;
@@ -773,6 +822,10 @@ extern int glob_altdirfunc;
extern unsigned path_max;
+# if !SANITIZER_ANDROID
+extern const int wordexp_wrde_dooffs;
+# endif // !SANITIZER_ANDROID
+
struct __sanitizer_wordexp_t {
uptr we_wordc;
char **we_wordv;
@@ -806,7 +859,7 @@ typedef void __sanitizer_FILE;
#if SANITIZER_LINUX && !SANITIZER_ANDROID && \
(defined(__i386) || defined(__x86_64) || defined(__mips64) || \
defined(__powerpc64__) || defined(__aarch64__) || defined(__arm__) || \
- defined(__s390__) || SANITIZER_RISCV64)
+ defined(__s390__) || defined(__loongarch__) || SANITIZER_RISCV64)
extern unsigned struct_user_regs_struct_sz;
extern unsigned struct_user_fpregs_struct_sz;
extern unsigned struct_user_fpxregs_struct_sz;
@@ -839,7 +892,7 @@ extern int shmctl_shm_info;
extern int shmctl_shm_stat;
#endif
-#if !SANITIZER_MAC && !SANITIZER_FREEBSD
+#if !SANITIZER_APPLE && !SANITIZER_FREEBSD
extern unsigned struct_utmp_sz;
#endif
#if !SANITIZER_ANDROID
@@ -854,7 +907,7 @@ struct __sanitizer_ifconf {
union {
void *ifcu_req;
} ifc_ifcu;
-#if SANITIZER_MAC
+#if SANITIZER_APPLE
} __attribute__((packed));
#else
};
@@ -1007,7 +1060,7 @@ extern unsigned struct_audio_buf_info_sz;
extern unsigned struct_ppp_stats_sz;
#endif // (SANITIZER_LINUX || SANITIZER_FREEBSD) && !SANITIZER_ANDROID
-#if !SANITIZER_ANDROID && !SANITIZER_MAC
+#if !SANITIZER_ANDROID && !SANITIZER_APPLE
extern unsigned struct_sioc_sg_req_sz;
extern unsigned struct_sioc_vif_req_sz;
#endif
@@ -1094,6 +1147,14 @@ extern unsigned IOCTL_BLKRASET;
extern unsigned IOCTL_BLKROGET;
extern unsigned IOCTL_BLKROSET;
extern unsigned IOCTL_BLKRRPART;
+extern unsigned IOCTL_BLKFRASET;
+extern unsigned IOCTL_BLKFRAGET;
+extern unsigned IOCTL_BLKSECTSET;
+extern unsigned IOCTL_BLKSECTGET;
+extern unsigned IOCTL_BLKSSZGET;
+extern unsigned IOCTL_BLKBSZGET;
+extern unsigned IOCTL_BLKBSZSET;
+extern unsigned IOCTL_BLKGETSIZE64;
extern unsigned IOCTL_CDROMAUDIOBUFSIZ;
extern unsigned IOCTL_CDROMEJECT;
extern unsigned IOCTL_CDROMEJECT_SW;
@@ -1440,6 +1501,6 @@ extern const int si_SEGV_ACCERR;
#define SIGACTION_SYMNAME sigaction
-#endif // SANITIZER_LINUX || SANITIZER_MAC
+#endif // SANITIZER_LINUX || SANITIZER_APPLE
#endif
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_solaris.cpp b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_solaris.cpp
index 565b31f68aae..dad7bde1498a 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_solaris.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_solaris.cpp
@@ -89,7 +89,7 @@ namespace __sanitizer {
unsigned struct_sched_param_sz = sizeof(struct sched_param);
unsigned struct_statfs_sz = sizeof(struct statfs);
unsigned struct_sockaddr_sz = sizeof(struct sockaddr);
- unsigned ucontext_t_sz = sizeof(ucontext_t);
+ unsigned ucontext_t_sz(void *ctx) { return sizeof(ucontext_t); }
unsigned struct_timespec_sz = sizeof(struct timespec);
#if SANITIZER_SOLARIS32
unsigned struct_statvfs64_sz = sizeof(struct statvfs64);
@@ -123,6 +123,7 @@ namespace __sanitizer {
unsigned struct_ElfW_Phdr_sz = sizeof(ElfW(Phdr));
int glob_nomatch = GLOB_NOMATCH;
+ const int wordexp_wrde_dooffs = WRDE_DOOFFS;
unsigned path_max = PATH_MAX;
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_solaris.h b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_solaris.h
index 85995e79792d..84a81265162c 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_solaris.h
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_solaris.h
@@ -43,7 +43,7 @@ extern unsigned struct_sched_param_sz;
extern unsigned struct_statfs64_sz;
extern unsigned struct_statfs_sz;
extern unsigned struct_sockaddr_sz;
-extern unsigned ucontext_t_sz;
+unsigned ucontext_t_sz(void *ctx);
extern unsigned struct_timespec_sz;
extern unsigned struct_rlimit_sz;
@@ -341,6 +341,7 @@ struct __sanitizer_glob_t {
extern int glob_nomatch;
extern int glob_altdirfunc;
+extern const int wordexp_wrde_dooffs;
extern unsigned path_max;
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_posix.cpp b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_posix.cpp
index f8457a6aac41..8d2c5b2cefbe 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_posix.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_posix.cpp
@@ -41,6 +41,8 @@ uptr GetMmapGranularity() {
return GetPageSize();
}
+bool ErrorIsOOM(error_t err) { return err == ENOMEM; }
+
void *MmapOrDie(uptr size, const char *mem_type, bool raw_report) {
size = RoundUpTo(size, GetPageSizeCached());
uptr res = MmapNamed(nullptr, size, PROT_READ | PROT_WRITE,
@@ -55,11 +57,9 @@ void *MmapOrDie(uptr size, const char *mem_type, bool raw_report) {
void UnmapOrDie(void *addr, uptr size) {
if (!addr || !size) return;
uptr res = internal_munmap(addr, size);
- if (UNLIKELY(internal_iserror(res))) {
- Report("ERROR: %s failed to deallocate 0x%zx (%zd) bytes at address %p\n",
- SanitizerToolName, size, size, addr);
- CHECK("unable to unmap" && 0);
- }
+ int reserrno;
+ if (UNLIKELY(internal_iserror(res, &reserrno)))
+ ReportMunmapFailureAndDie(addr, size, reserrno);
DecreaseTotalMmap(size);
}
@@ -85,18 +85,26 @@ void *MmapAlignedOrDieOnFatalError(uptr size, uptr alignment,
CHECK(IsPowerOfTwo(size));
CHECK(IsPowerOfTwo(alignment));
uptr map_size = size + alignment;
+ // mmap maps entire pages and rounds up map_size needs to be a an integral
+ // number of pages.
+ // We need to be aware of this size for calculating end and for unmapping
+ // fragments before and after the alignment region.
+ map_size = RoundUpTo(map_size, GetPageSizeCached());
uptr map_res = (uptr)MmapOrDieOnFatalError(map_size, mem_type);
if (UNLIKELY(!map_res))
return nullptr;
- uptr map_end = map_res + map_size;
uptr res = map_res;
if (!IsAligned(res, alignment)) {
res = (map_res + alignment - 1) & ~(alignment - 1);
UnmapOrDie((void*)map_res, res - map_res);
}
+ uptr map_end = map_res + map_size;
uptr end = res + size;
- if (end != map_end)
+ end = RoundUpTo(end, GetPageSizeCached());
+ if (end != map_end) {
+ CHECK_LT(end, map_end);
UnmapOrDie((void*)end, map_end - end);
+ }
return (void*)res;
}
@@ -146,7 +154,11 @@ bool MprotectReadOnly(uptr addr, uptr size) {
return 0 == internal_mprotect((void *)addr, size, PROT_READ);
}
-#if !SANITIZER_MAC
+bool MprotectReadWrite(uptr addr, uptr size) {
+ return 0 == internal_mprotect((void *)addr, size, PROT_READ | PROT_WRITE);
+}
+
+#if !SANITIZER_APPLE
void MprotectMallocZones(void *addr, int prot) {}
#endif
@@ -239,7 +251,7 @@ bool MemoryRangeIsAvailable(uptr range_start, uptr range_end) {
return true;
}
-#if !SANITIZER_MAC
+#if !SANITIZER_APPLE
void DumpProcessMap() {
MemoryMappingLayout proc_maps(/*cache_enabled*/true);
const sptr kBufSize = 4095;
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_posix.h b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_posix.h
index b65dae644767..c5811dffea94 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_posix.h
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_posix.h
@@ -20,10 +20,7 @@
#include "sanitizer_platform_limits_posix.h"
#include "sanitizer_platform_limits_solaris.h"
-#if !SANITIZER_POSIX
-// Make it hard to accidentally use any of functions declared in this file:
-#error This file should only be included on POSIX
-#endif
+#if SANITIZER_POSIX
namespace __sanitizer {
@@ -93,7 +90,7 @@ int real_pthread_join(void *th, void **ret);
} \
} // namespace __sanitizer
-int my_pthread_attr_getstack(void *attr, void **addr, uptr *size);
+int internal_pthread_attr_getstack(void *attr, void **addr, uptr *size);
// A routine named real_sigaction() must be implemented by each sanitizer in
// order for internal_sigaction() to bypass interceptors.
@@ -123,7 +120,12 @@ int GetNamedMappingFd(const char *name, uptr size, int *flags);
// alive at least as long as the mapping exists.
void DecorateMapping(uptr addr, uptr size, const char *name);
+# if !SANITIZER_FREEBSD
+# define __sanitizer_dirsiz(dp) ((dp)->d_reclen)
+# endif
} // namespace __sanitizer
+#endif // SANITIZER_POSIX
+
#endif // SANITIZER_POSIX_H
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_posix_libcdep.cpp b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_posix_libcdep.cpp
index ddf6844bed13..e88e654eec5a 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_posix_libcdep.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_posix_libcdep.cpp
@@ -151,6 +151,8 @@ int Atexit(void (*function)(void)) {
#endif
}
+bool CreateDir(const char *pathname) { return mkdir(pathname, 0755) == 0; }
+
bool SupportsColoredOutput(fd_t fd) {
return isatty(fd) != 0;
}
@@ -288,7 +290,7 @@ bool IsAccessibleMemoryRange(uptr beg, uptr size) {
return result;
}
-void PlatformPrepareForSandboxing(__sanitizer_sandbox_arguments *args) {
+void PlatformPrepareForSandboxing(void *args) {
// Some kinds of sandboxes may forbid filesystem access, so we won't be able
// to read the file mappings from /proc/self/maps. Luckily, neither the
// process will be able to load additional libraries, so it's fine to use the
@@ -381,8 +383,8 @@ SANITIZER_WEAK_ATTRIBUTE int
real_pthread_attr_getstack(void *attr, void **addr, size_t *size);
} // extern "C"
-int my_pthread_attr_getstack(void *attr, void **addr, uptr *size) {
-#if !SANITIZER_GO && !SANITIZER_MAC
+int internal_pthread_attr_getstack(void *attr, void **addr, uptr *size) {
+#if !SANITIZER_GO && !SANITIZER_APPLE
if (&real_pthread_attr_getstack)
return real_pthread_attr_getstack((pthread_attr_t *)attr, addr,
(size_t *)size);
@@ -395,7 +397,7 @@ void AdjustStackSize(void *attr_) {
pthread_attr_t *attr = (pthread_attr_t *)attr_;
uptr stackaddr = 0;
uptr stacksize = 0;
- my_pthread_attr_getstack(attr, (void**)&stackaddr, &stacksize);
+ internal_pthread_attr_getstack(attr, (void **)&stackaddr, &stacksize);
// GLibC will return (0 - stacksize) as the stack address in the case when
// stacksize is set, but stackaddr is not.
bool stack_set = (stackaddr != 0) && (stackaddr + stacksize != 0);
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_printf.cpp b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_printf.cpp
index b913c92e16f1..62c1cf4abe42 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_printf.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_printf.cpp
@@ -20,10 +20,6 @@
#include <stdio.h>
#include <stdarg.h>
-#if defined(__x86_64__)
-# include <emmintrin.h>
-#endif
-
#if SANITIZER_WINDOWS && defined(_MSC_VER) && _MSC_VER < 1800 && \
!defined(va_copy)
# define va_copy(dst, src) ((dst) = (src))
@@ -132,8 +128,8 @@ static int AppendPointer(char **buff, const char *buff_end, u64 ptr_value) {
int VSNPrintf(char *buff, int buff_length,
const char *format, va_list args) {
static const char *kPrintfFormatsHelp =
- "Supported Printf formats: %([0-9]*)?(z|ll)?{d,u,x,X,V}; %p; "
- "%[-]([0-9]*)?(\\.\\*)?s; %c\n";
+ "Supported Printf formats: %([0-9]*)?(z|l|ll)?{d,u,x,X}; %p; "
+ "%[-]([0-9]*)?(\\.\\*)?s; %c\nProvided format: ";
RAW_CHECK(format);
RAW_CHECK(buff_length > 0);
const char *buff_end = &buff[buff_length - 1];
@@ -164,9 +160,11 @@ int VSNPrintf(char *buff, int buff_length,
}
bool have_z = (*cur == 'z');
cur += have_z;
- bool have_ll = !have_z && (cur[0] == 'l' && cur[1] == 'l');
+ bool have_l = cur[0] == 'l' && cur[1] != 'l';
+ cur += have_l;
+ bool have_ll = cur[0] == 'l' && cur[1] == 'l';
cur += have_ll * 2;
- const bool have_length = have_z || have_ll;
+ const bool have_length = have_z || have_l || have_ll;
const bool have_flags = have_width || have_length;
// At the moment only %s supports precision and left-justification.
CHECK(!((precision >= 0 || left_justified) && *cur != 's'));
@@ -174,6 +172,7 @@ int VSNPrintf(char *buff, int buff_length,
case 'd': {
s64 dval = have_ll ? va_arg(args, s64)
: have_z ? va_arg(args, sptr)
+ : have_l ? va_arg(args, long)
: va_arg(args, int);
result += AppendSignedDecimal(&buff, buff_end, dval, width,
pad_with_zero);
@@ -184,26 +183,20 @@ int VSNPrintf(char *buff, int buff_length,
case 'X': {
u64 uval = have_ll ? va_arg(args, u64)
: have_z ? va_arg(args, uptr)
+ : have_l ? va_arg(args, unsigned long)
: va_arg(args, unsigned);
bool uppercase = (*cur == 'X');
result += AppendUnsigned(&buff, buff_end, uval, (*cur == 'u') ? 10 : 16,
width, pad_with_zero, uppercase);
break;
}
- case 'V': {
- for (uptr i = 0; i < 16; i++) {
- unsigned x = va_arg(args, unsigned);
- result += AppendUnsigned(&buff, buff_end, x, 16, 2, true, false);
- }
- break;
- }
case 'p': {
- RAW_CHECK_MSG(!have_flags, kPrintfFormatsHelp);
+ RAW_CHECK_VA(!have_flags, kPrintfFormatsHelp, format);
result += AppendPointer(&buff, buff_end, va_arg(args, uptr));
break;
}
case 's': {
- RAW_CHECK_MSG(!have_length, kPrintfFormatsHelp);
+ RAW_CHECK_VA(!have_length, kPrintfFormatsHelp, format);
// Only left-justified width is supported.
CHECK(!have_width || left_justified);
result += AppendString(&buff, buff_end, left_justified ? -width : width,
@@ -211,17 +204,17 @@ int VSNPrintf(char *buff, int buff_length,
break;
}
case 'c': {
- RAW_CHECK_MSG(!have_flags, kPrintfFormatsHelp);
+ RAW_CHECK_VA(!have_flags, kPrintfFormatsHelp, format);
result += AppendChar(&buff, buff_end, va_arg(args, int));
break;
}
case '%' : {
- RAW_CHECK_MSG(!have_flags, kPrintfFormatsHelp);
+ RAW_CHECK_VA(!have_flags, kPrintfFormatsHelp, format);
result += AppendChar(&buff, buff_end, '%');
break;
}
default: {
- RAW_CHECK_MSG(false, kPrintfFormatsHelp);
+ RAW_CHECK_VA(false, kPrintfFormatsHelp, format);
}
}
}
@@ -317,7 +310,6 @@ static void NOINLINE SharedPrintfCode(bool append_pid, const char *format,
format, args);
}
-FORMAT(1, 2)
void Printf(const char *format, ...) {
va_list args;
va_start(args, format);
@@ -326,7 +318,6 @@ void Printf(const char *format, ...) {
}
// Like Printf, but prints the current PID before the output string.
-FORMAT(1, 2)
void Report(const char *format, ...) {
va_list args;
va_start(args, format);
@@ -338,7 +329,6 @@ void Report(const char *format, ...) {
// Returns the number of symbols that should have been written to buffer
// (not including trailing '\0'). Thus, the string is truncated
// iff return value is not less than "length".
-FORMAT(3, 4)
int internal_snprintf(char *buffer, uptr length, const char *format, ...) {
va_list args;
va_start(args, format);
@@ -347,8 +337,14 @@ int internal_snprintf(char *buffer, uptr length, const char *format, ...) {
return needed_length;
}
-FORMAT(2, 3)
-void InternalScopedString::append(const char *format, ...) {
+void InternalScopedString::Append(const char *str) {
+ uptr prev_len = length();
+ uptr str_len = internal_strlen(str);
+ buffer_.resize(prev_len + str_len + 1);
+ internal_memcpy(buffer_.data() + prev_len, str, str_len + 1);
+}
+
+void InternalScopedString::AppendF(const char *format, ...) {
uptr prev_len = length();
while (true) {
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_procmaps.h b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_procmaps.h
index a56640db43e8..bf3c2c28e32e 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_procmaps.h
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_procmaps.h
@@ -16,7 +16,7 @@
#include "sanitizer_platform.h"
#if SANITIZER_LINUX || SANITIZER_FREEBSD || SANITIZER_NETBSD || \
- SANITIZER_MAC || SANITIZER_SOLARIS || \
+ SANITIZER_APPLE || SANITIZER_SOLARIS || \
SANITIZER_FUCHSIA
#include "sanitizer_common.h"
@@ -65,13 +65,37 @@ class MemoryMappedSegment {
MemoryMappedSegmentData *data_;
};
-class MemoryMappingLayout {
+struct ImageHeader;
+
+class MemoryMappingLayoutBase {
+ public:
+ virtual bool Next(MemoryMappedSegment *segment) { UNIMPLEMENTED(); }
+ virtual bool Error() const { UNIMPLEMENTED(); };
+ virtual void Reset() { UNIMPLEMENTED(); }
+
+ protected:
+ ~MemoryMappingLayoutBase() {}
+};
+
+class MemoryMappingLayout : public MemoryMappingLayoutBase {
public:
explicit MemoryMappingLayout(bool cache_enabled);
+
+// This destructor cannot be virtual, as it would cause an operator new() linking
+// failures in hwasan test cases. However non-virtual destructors emit warnings
+// in macOS build, hence disabling those
+#ifdef __clang__
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wnon-virtual-dtor"
+#endif
~MemoryMappingLayout();
- bool Next(MemoryMappedSegment *segment);
- bool Error() const;
- void Reset();
+#ifdef __clang__
+#pragma clang diagnostic pop
+#endif
+
+ virtual bool Next(MemoryMappedSegment *segment) override;
+ virtual bool Error() const override;
+ virtual void Reset() override;
// In some cases, e.g. when running under a sandbox on Linux, ASan is unable
// to obtain the memory mappings. It should fall back to pre-cached data
// instead of aborting.
@@ -80,10 +104,14 @@ class MemoryMappingLayout {
// Adds all mapped objects into a vector.
void DumpListOfModules(InternalMmapVectorNoCtor<LoadedModule> *modules);
+ protected:
+#if SANITIZER_APPLE
+ virtual const ImageHeader *CurrentImageHeader();
+#endif
+ MemoryMappingLayoutData data_;
+
private:
void LoadFromCache();
-
- MemoryMappingLayoutData data_;
};
// Returns code range for the specified module.
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_procmaps_bsd.cpp b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_procmaps_bsd.cpp
index 1f489b71ad99..dcfd94fe3225 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_procmaps_bsd.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_procmaps_bsd.cpp
@@ -13,9 +13,6 @@
#include "sanitizer_platform.h"
#if SANITIZER_FREEBSD || SANITIZER_NETBSD
#include "sanitizer_common.h"
-#if SANITIZER_FREEBSD
-#include "sanitizer_freebsd.h"
-#endif
#include "sanitizer_procmaps.h"
// clang-format off
@@ -29,16 +26,24 @@
#include <limits.h>
-// Fix 'kinfo_vmentry' definition on FreeBSD prior v9.2 in 32-bit mode.
-#if SANITIZER_FREEBSD && (SANITIZER_WORDSIZE == 32)
-#include <osreldate.h>
-#if __FreeBSD_version <= 902001 // v9.2
-#define kinfo_vmentry xkinfo_vmentry
-#endif
-#endif
-
namespace __sanitizer {
+#if SANITIZER_FREEBSD
+void GetMemoryProfile(fill_profile_f cb, uptr *stats) {
+ const int Mib[] = {
+ CTL_KERN,
+ KERN_PROC,
+ KERN_PROC_PID,
+ getpid()
+ };
+
+ struct kinfo_proc InfoProc;
+ uptr Len = sizeof(InfoProc);
+ CHECK_EQ(internal_sysctl(Mib, ARRAY_SIZE(Mib), nullptr, (uptr *)&InfoProc, &Len, 0), 0);
+ cb(0, InfoProc.ki_rssize * GetPageSizeCached(), false, stats);
+}
+#endif
+
void ReadProcMaps(ProcSelfMapsBuff *proc_maps) {
const int Mib[] = {
#if SANITIZER_FREEBSD
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_procmaps_common.cpp b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_procmaps_common.cpp
index 1b7dd46d8de4..a7805ad1b083 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_procmaps_common.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_procmaps_common.cpp
@@ -145,29 +145,47 @@ void MemoryMappingLayout::DumpListOfModules(
}
}
-void GetMemoryProfile(fill_profile_f cb, uptr *stats, uptr stats_size) {
+#if SANITIZER_LINUX || SANITIZER_ANDROID || SANITIZER_SOLARIS || SANITIZER_NETBSD
+void GetMemoryProfile(fill_profile_f cb, uptr *stats) {
char *smaps = nullptr;
uptr smaps_cap = 0;
uptr smaps_len = 0;
if (!ReadFileToBuffer("/proc/self/smaps", &smaps, &smaps_cap, &smaps_len))
return;
+ ParseUnixMemoryProfile(cb, stats, smaps, smaps_len);
+ UnmapOrDie(smaps, smaps_cap);
+}
+
+void ParseUnixMemoryProfile(fill_profile_f cb, uptr *stats, char *smaps,
+ uptr smaps_len) {
uptr start = 0;
bool file = false;
const char *pos = smaps;
- while (pos < smaps + smaps_len) {
+ char *end = smaps + smaps_len;
+ if (smaps_len < 2)
+ return;
+ // The following parsing can crash on almost every line
+ // in the case of malformed/truncated input.
+ // Fixing that is hard b/c e.g. ParseDecimal does not
+ // even accept end of the buffer and assumes well-formed input.
+ // So instead we patch end of the input a bit,
+ // it does not affect well-formed complete inputs.
+ *--end = 0;
+ *--end = '\n';
+ while (pos < end) {
if (IsHex(pos[0])) {
start = ParseHex(&pos);
for (; *pos != '/' && *pos > '\n'; pos++) {}
file = *pos == '/';
} else if (internal_strncmp(pos, "Rss:", 4) == 0) {
- while (!IsDecimal(*pos)) pos++;
+ while (pos < end && !IsDecimal(*pos)) pos++;
uptr rss = ParseDecimal(&pos) * 1024;
- cb(start, rss, file, stats, stats_size);
+ cb(start, rss, file, stats);
}
while (*pos++ != '\n') {}
}
- UnmapOrDie(smaps, smaps_cap);
}
+#endif
} // namespace __sanitizer
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_procmaps_mac.cpp b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_procmaps_mac.cpp
index 1f53e3e46d8f..b44e016a0e5b 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_procmaps_mac.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_procmaps_mac.cpp
@@ -10,7 +10,7 @@
//===----------------------------------------------------------------------===//
#include "sanitizer_platform.h"
-#if SANITIZER_MAC
+#if SANITIZER_APPLE
#include "sanitizer_common.h"
#include "sanitizer_placement_new.h"
#include "sanitizer_procmaps.h"
@@ -136,29 +136,34 @@ void MemoryMappingLayout::LoadFromCache() {
// No-op on Mac for now.
}
+static bool IsDyldHdr(const mach_header *hdr) {
+ return (hdr->magic == MH_MAGIC || hdr->magic == MH_MAGIC_64) &&
+ hdr->filetype == MH_DYLINKER;
+}
+
// _dyld_get_image_header() and related APIs don't report dyld itself.
// We work around this by manually recursing through the memory map
// until we hit a Mach header matching dyld instead. These recurse
// calls are expensive, but the first memory map generation occurs
// early in the process, when dyld is one of the only images loaded,
-// so it will be hit after only a few iterations.
-static mach_header *get_dyld_image_header() {
- unsigned depth = 1;
- vm_size_t size = 0;
+// so it will be hit after only a few iterations. These assumptions don't hold
+// on macOS 13+ anymore (dyld itself has moved into the shared cache).
+static mach_header *GetDyldImageHeaderViaVMRegion() {
vm_address_t address = 0;
- kern_return_t err = KERN_SUCCESS;
- mach_msg_type_number_t count = VM_REGION_SUBMAP_INFO_COUNT_64;
while (true) {
+ vm_size_t size = 0;
+ unsigned depth = 1;
struct vm_region_submap_info_64 info;
- err = vm_region_recurse_64(mach_task_self(), &address, &size, &depth,
- (vm_region_info_t)&info, &count);
+ mach_msg_type_number_t count = VM_REGION_SUBMAP_INFO_COUNT_64;
+ kern_return_t err =
+ vm_region_recurse_64(mach_task_self(), &address, &size, &depth,
+ (vm_region_info_t)&info, &count);
if (err != KERN_SUCCESS) return nullptr;
if (size >= sizeof(mach_header) && info.protection & kProtectionRead) {
mach_header *hdr = (mach_header *)address;
- if ((hdr->magic == MH_MAGIC || hdr->magic == MH_MAGIC_64) &&
- hdr->filetype == MH_DYLINKER) {
+ if (IsDyldHdr(hdr)) {
return hdr;
}
}
@@ -166,8 +171,69 @@ static mach_header *get_dyld_image_header() {
}
}
+extern "C" {
+struct dyld_shared_cache_dylib_text_info {
+ uint64_t version; // current version 2
+ // following fields all exist in version 1
+ uint64_t loadAddressUnslid;
+ uint64_t textSegmentSize;
+ uuid_t dylibUuid;
+ const char *path; // pointer invalid at end of iterations
+ // following fields all exist in version 2
+ uint64_t textSegmentOffset; // offset from start of cache
+};
+typedef struct dyld_shared_cache_dylib_text_info
+ dyld_shared_cache_dylib_text_info;
+
+extern bool _dyld_get_shared_cache_uuid(uuid_t uuid);
+extern const void *_dyld_get_shared_cache_range(size_t *length);
+extern int dyld_shared_cache_iterate_text(
+ const uuid_t cacheUuid,
+ void (^callback)(const dyld_shared_cache_dylib_text_info *info));
+} // extern "C"
+
+static mach_header *GetDyldImageHeaderViaSharedCache() {
+ uuid_t uuid;
+ bool hasCache = _dyld_get_shared_cache_uuid(uuid);
+ if (!hasCache)
+ return nullptr;
+
+ size_t cacheLength;
+ __block uptr cacheStart = (uptr)_dyld_get_shared_cache_range(&cacheLength);
+ CHECK(cacheStart && cacheLength);
+
+ __block mach_header *dyldHdr = nullptr;
+ int res = dyld_shared_cache_iterate_text(
+ uuid, ^(const dyld_shared_cache_dylib_text_info *info) {
+ CHECK_GE(info->version, 2);
+ mach_header *hdr =
+ (mach_header *)(cacheStart + info->textSegmentOffset);
+ if (IsDyldHdr(hdr))
+ dyldHdr = hdr;
+ });
+ CHECK_EQ(res, 0);
+
+ return dyldHdr;
+}
+
const mach_header *get_dyld_hdr() {
- if (!dyld_hdr) dyld_hdr = get_dyld_image_header();
+ if (!dyld_hdr) {
+ // On macOS 13+, dyld itself has moved into the shared cache. Looking it up
+ // via vm_region_recurse_64() causes spins/hangs/crashes.
+ if (GetMacosAlignedVersion() >= MacosVersion(13, 0)) {
+ dyld_hdr = GetDyldImageHeaderViaSharedCache();
+ if (!dyld_hdr) {
+ VReport(1,
+ "Failed to lookup the dyld image header in the shared cache on "
+ "macOS 13+ (or no shared cache in use). Falling back to "
+ "lookup via vm_region_recurse_64().\n");
+ dyld_hdr = GetDyldImageHeaderViaVMRegion();
+ }
+ } else {
+ dyld_hdr = GetDyldImageHeaderViaVMRegion();
+ }
+ CHECK(dyld_hdr);
+ }
return dyld_hdr;
}
@@ -184,7 +250,9 @@ static bool NextSegmentLoad(MemoryMappedSegment *segment,
MemoryMappedSegmentData *seg_data,
MemoryMappingLayoutData *layout_data) {
const char *lc = layout_data->current_load_cmd_addr;
+
layout_data->current_load_cmd_addr += ((const load_command *)lc)->cmdsize;
+ layout_data->current_load_cmd_count--;
if (((const load_command *)lc)->cmd == kLCSegment) {
const SegmentCommand* sc = (const SegmentCommand *)lc;
uptr base_virt_addr, addr_mask;
@@ -292,11 +360,16 @@ static bool IsModuleInstrumented(const load_command *first_lc) {
return false;
}
+const ImageHeader *MemoryMappingLayout::CurrentImageHeader() {
+ const mach_header *hdr = (data_.current_image == kDyldImageIdx)
+ ? get_dyld_hdr()
+ : _dyld_get_image_header(data_.current_image);
+ return (const ImageHeader *)hdr;
+}
+
bool MemoryMappingLayout::Next(MemoryMappedSegment *segment) {
for (; data_.current_image >= kDyldImageIdx; data_.current_image--) {
- const mach_header *hdr = (data_.current_image == kDyldImageIdx)
- ? get_dyld_hdr()
- : _dyld_get_image_header(data_.current_image);
+ const mach_header *hdr = (const mach_header *)CurrentImageHeader();
if (!hdr) continue;
if (data_.current_load_cmd_count < 0) {
// Set up for this image;
@@ -326,7 +399,7 @@ bool MemoryMappingLayout::Next(MemoryMappedSegment *segment) {
(const load_command *)data_.current_load_cmd_addr);
}
- for (; data_.current_load_cmd_count >= 0; data_.current_load_cmd_count--) {
+ while (data_.current_load_cmd_count > 0) {
switch (data_.current_magic) {
// data_.current_magic may be only one of MH_MAGIC, MH_MAGIC_64.
#ifdef MH_MAGIC_64
@@ -347,6 +420,7 @@ bool MemoryMappingLayout::Next(MemoryMappedSegment *segment) {
}
// If we get here, no more load_cmd's in this image talk about
// segments. Go on to the next image.
+ data_.current_load_cmd_count = -1; // This will trigger loading next image
}
return false;
}
@@ -376,4 +450,4 @@ void MemoryMappingLayout::DumpListOfModules(
} // namespace __sanitizer
-#endif // SANITIZER_MAC
+#endif // SANITIZER_APPLE
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_procmaps_solaris.cpp b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_procmaps_solaris.cpp
index bf813f235bb7..eeb49e2afe34 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_procmaps_solaris.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_procmaps_solaris.cpp
@@ -13,21 +13,30 @@
#undef _FILE_OFFSET_BITS
#include "sanitizer_platform.h"
#if SANITIZER_SOLARIS
-#include "sanitizer_common.h"
-#include "sanitizer_procmaps.h"
+# include <fcntl.h>
+# include <limits.h>
+# include <procfs.h>
-#include <procfs.h>
-#include <limits.h>
+# include "sanitizer_common.h"
+# include "sanitizer_procmaps.h"
namespace __sanitizer {
void ReadProcMaps(ProcSelfMapsBuff *proc_maps) {
- if (!ReadFileToBuffer("/proc/self/xmap", &proc_maps->data,
- &proc_maps->mmaped_size, &proc_maps->len)) {
- proc_maps->data = nullptr;
- proc_maps->mmaped_size = 0;
- proc_maps->len = 0;
- }
+ uptr fd = internal_open("/proc/self/xmap", O_RDONLY);
+ CHECK_NE(fd, -1);
+ uptr Size = internal_filesize(fd);
+ CHECK_GT(Size, 0);
+
+ // Allow for additional entries by following mmap.
+ size_t MmapedSize = Size * 4 / 3;
+ void *VmMap = MmapOrDie(MmapedSize, "ReadProcMaps()");
+ Size = internal_read(fd, VmMap, MmapedSize);
+ CHECK_NE(Size, -1);
+ internal_close(fd);
+ proc_maps->data = (char *)VmMap;
+ proc_maps->mmaped_size = MmapedSize;
+ proc_maps->len = Size;
}
bool MemoryMappingLayout::Next(MemoryMappedSegment *segment) {
@@ -49,13 +58,28 @@ bool MemoryMappingLayout::Next(MemoryMappedSegment *segment) {
segment->protection |= kProtectionWrite;
if ((xmapentry->pr_mflags & MA_EXEC) != 0)
segment->protection |= kProtectionExecute;
+ if ((xmapentry->pr_mflags & MA_SHARED) != 0)
+ segment->protection |= kProtectionShared;
if (segment->filename != NULL && segment->filename_size > 0) {
char proc_path[PATH_MAX + 1];
- internal_snprintf(proc_path, sizeof(proc_path), "/proc/self/path/%s",
- xmapentry->pr_mapname);
- internal_readlink(proc_path, segment->filename, segment->filename_size);
+ // Avoid unnecessary readlink on unnamed entires.
+ if (xmapentry->pr_mapname[0] == '\0')
+ segment->filename[0] = '\0';
+ else {
+ internal_snprintf(proc_path, sizeof(proc_path), "/proc/self/path/%s",
+ xmapentry->pr_mapname);
+ ssize_t sz = internal_readlink(proc_path, segment->filename,
+ segment->filename_size - 1);
+
+ // If readlink failed, the map is anonymous.
+ if (sz == -1)
+ segment->filename[0] = '\0';
+ else if ((size_t)sz < segment->filename_size)
+ // readlink doesn't NUL-terminate.
+ segment->filename[sz] = '\0';
+ }
}
data_.current += sizeof(prxmap_t);
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_quarantine.h b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_quarantine.h
index 1a074d2bb700..460d96ea681b 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_quarantine.h
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_quarantine.h
@@ -68,10 +68,6 @@ struct QuarantineBatch {
COMPILER_CHECK(sizeof(QuarantineBatch) <= (1 << 13)); // 8Kb.
-// The callback interface is:
-// void Callback::Recycle(Node *ptr);
-// void *cb.Allocate(uptr size);
-// void cb.Deallocate(void *ptr);
template<typename Callback, typename Node>
class Quarantine {
public:
@@ -94,21 +90,20 @@ class Quarantine {
recycle_mutex_.Init();
}
- uptr GetSize() const { return atomic_load_relaxed(&max_size_); }
- uptr GetCacheSize() const {
- return atomic_load_relaxed(&max_cache_size_);
- }
+ uptr GetMaxSize() const { return atomic_load_relaxed(&max_size_); }
+ uptr GetMaxCacheSize() const { return atomic_load_relaxed(&max_cache_size_); }
void Put(Cache *c, Callback cb, Node *ptr, uptr size) {
- uptr cache_size = GetCacheSize();
- if (cache_size) {
+ uptr max_cache_size = GetMaxCacheSize();
+ if (max_cache_size && size <= GetMaxSize()) {
+ cb.PreQuarantine(ptr);
c->Enqueue(cb, ptr, size);
} else {
- // GetCacheSize() == 0 only when GetSize() == 0 (see Init).
- cb.Recycle(ptr);
+ // GetMaxCacheSize() == 0 only when GetMaxSize() == 0 (see Init).
+ cb.RecyclePassThrough(ptr);
}
// Check cache size anyway to accommodate for runtime cache_size change.
- if (c->Size() > cache_size)
+ if (c->Size() > max_cache_size)
Drain(c, cb);
}
@@ -117,7 +112,7 @@ class Quarantine {
SpinMutexLock l(&cache_mutex_);
cache_.Transfer(c);
}
- if (cache_.Size() > GetSize() && recycle_mutex_.TryLock())
+ if (cache_.Size() > GetMaxSize() && recycle_mutex_.TryLock())
Recycle(atomic_load_relaxed(&min_size_), cb);
}
@@ -133,7 +128,7 @@ class Quarantine {
void PrintStats() const {
// It assumes that the world is stopped, just as the allocator's PrintStats.
Printf("Quarantine limits: global: %zdMb; thread local: %zdKb\n",
- GetSize() >> 20, GetCacheSize() >> 10);
+ GetMaxSize() >> 20, GetMaxCacheSize() >> 10);
cache_.PrintStats();
}
@@ -149,8 +144,8 @@ class Quarantine {
Cache cache_;
char pad2_[kCacheLineSize];
- void NOINLINE Recycle(uptr min_size, Callback cb) REQUIRES(recycle_mutex_)
- RELEASE(recycle_mutex_) {
+ void NOINLINE Recycle(uptr min_size, Callback cb)
+ SANITIZER_REQUIRES(recycle_mutex_) SANITIZER_RELEASE(recycle_mutex_) {
Cache tmp;
{
SpinMutexLock l(&cache_mutex_);
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_range.cpp b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_range.cpp
new file mode 100644
index 000000000000..68d79f18ac8d
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_range.cpp
@@ -0,0 +1,62 @@
+//===-- sanitizer_range.cpp -----------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_range.h"
+
+#include "sanitizer_common/sanitizer_array_ref.h"
+
+namespace __sanitizer {
+
+void Intersect(ArrayRef<Range> a, ArrayRef<Range> b,
+ InternalMmapVectorNoCtor<Range> &output) {
+ output.clear();
+
+ struct Event {
+ uptr val;
+ s8 diff1;
+ s8 diff2;
+ };
+
+ InternalMmapVector<Event> events;
+ for (const Range &r : a) {
+ CHECK_LE(r.begin, r.end);
+ events.push_back({r.begin, 1, 0});
+ events.push_back({r.end, -1, 0});
+ }
+
+ for (const Range &r : b) {
+ CHECK_LE(r.begin, r.end);
+ events.push_back({r.begin, 0, 1});
+ events.push_back({r.end, 0, -1});
+ }
+
+ Sort(events.data(), events.size(),
+ [](const Event &lh, const Event &rh) { return lh.val < rh.val; });
+
+ uptr start = 0;
+ sptr state1 = 0;
+ sptr state2 = 0;
+ for (const auto &e : events) {
+ if (e.val != start) {
+ DCHECK_GE(state1, 0);
+ DCHECK_GE(state2, 0);
+ if (state1 && state2) {
+ if (!output.empty() && start == output.back().end)
+ output.back().end = e.val;
+ else
+ output.push_back({start, e.val});
+ }
+ start = e.val;
+ }
+
+ state1 += e.diff1;
+ state2 += e.diff2;
+ }
+}
+
+} // namespace __sanitizer
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_range.h b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_range.h
new file mode 100644
index 000000000000..7c593e171ba2
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_range.h
@@ -0,0 +1,40 @@
+//===-- sanitizer_range.h ---------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Contais Range and related utilities.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_RANGE_H
+#define SANITIZER_RANGE_H
+
+#include "sanitizer_common.h"
+#include "sanitizer_common/sanitizer_array_ref.h"
+
+namespace __sanitizer {
+
+struct Range {
+ uptr begin;
+ uptr end;
+};
+
+inline bool operator==(const Range &lhs, const Range &rhs) {
+ return lhs.begin == rhs.begin && lhs.end == rhs.end;
+}
+
+inline bool operator!=(const Range &lhs, const Range &rhs) {
+ return !(lhs == rhs);
+}
+
+// Calculates intersection of two sets of regions in O(N log N) time.
+void Intersect(ArrayRef<Range> a, ArrayRef<Range> b,
+ InternalMmapVectorNoCtor<Range> &output);
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_RANGE_H
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_redefine_builtins.h b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_redefine_builtins.h
new file mode 100644
index 000000000000..d24b179ef320
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_redefine_builtins.h
@@ -0,0 +1,56 @@
+//===-- sanitizer_redefine_builtins.h ---------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Redefine builtin functions to use internal versions. This is needed where
+// compiler optimizations end up producing unwanted libcalls!
+//
+//===----------------------------------------------------------------------===//
+#ifndef SANITIZER_COMMON_NO_REDEFINE_BUILTINS
+# ifndef SANITIZER_REDEFINE_BUILTINS_H
+# define SANITIZER_REDEFINE_BUILTINS_H
+
+// The asm hack only works with GCC and Clang.
+# if !defined(_WIN32)
+
+asm("memcpy = __sanitizer_internal_memcpy");
+asm("memmove = __sanitizer_internal_memmove");
+asm("memset = __sanitizer_internal_memset");
+
+# if defined(__cplusplus) && \
+ !defined(SANITIZER_COMMON_REDEFINE_BUILTINS_IN_STD)
+
+// The builtins should not be redefined in source files that make use of C++
+// standard libraries, in particular where C++STL headers with inline functions
+// are used. The redefinition in such cases would lead to ODR violations.
+//
+// Try to break the build in common cases where builtins shouldn't be redefined.
+namespace std {
+class Define_SANITIZER_COMMON_NO_REDEFINE_BUILTINS_in_cpp_file {
+ Define_SANITIZER_COMMON_NO_REDEFINE_BUILTINS_in_cpp_file(
+ const Define_SANITIZER_COMMON_NO_REDEFINE_BUILTINS_in_cpp_file&) = delete;
+ Define_SANITIZER_COMMON_NO_REDEFINE_BUILTINS_in_cpp_file& operator=(
+ const Define_SANITIZER_COMMON_NO_REDEFINE_BUILTINS_in_cpp_file&) = delete;
+};
+using array = Define_SANITIZER_COMMON_NO_REDEFINE_BUILTINS_in_cpp_file;
+using atomic = Define_SANITIZER_COMMON_NO_REDEFINE_BUILTINS_in_cpp_file;
+using function = Define_SANITIZER_COMMON_NO_REDEFINE_BUILTINS_in_cpp_file;
+using map = Define_SANITIZER_COMMON_NO_REDEFINE_BUILTINS_in_cpp_file;
+using set = Define_SANITIZER_COMMON_NO_REDEFINE_BUILTINS_in_cpp_file;
+using shared_ptr = Define_SANITIZER_COMMON_NO_REDEFINE_BUILTINS_in_cpp_file;
+using string = Define_SANITIZER_COMMON_NO_REDEFINE_BUILTINS_in_cpp_file;
+using unique_ptr = Define_SANITIZER_COMMON_NO_REDEFINE_BUILTINS_in_cpp_file;
+using unordered_map = Define_SANITIZER_COMMON_NO_REDEFINE_BUILTINS_in_cpp_file;
+using unordered_set = Define_SANITIZER_COMMON_NO_REDEFINE_BUILTINS_in_cpp_file;
+using vector = Define_SANITIZER_COMMON_NO_REDEFINE_BUILTINS_in_cpp_file;
+} // namespace std
+
+# endif // __cpluplus
+# endif // !_WIN32
+
+# endif // SANITIZER_REDEFINE_BUILTINS_H
+#endif // SANITIZER_COMMON_NO_REDEFINE_BUILTINS
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_ring_buffer.h b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_ring_buffer.h
index 2a46e933b75d..6222a958b116 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_ring_buffer.h
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_ring_buffer.h
@@ -47,7 +47,9 @@ class RingBuffer {
void push(T t) {
*next_ = t;
next_--;
- // The condition below works only if sizeof(T) is divisible by sizeof(T*).
+ static_assert((sizeof(T) % sizeof(T *)) == 0,
+ "The condition below works only if sizeof(T) is divisible by "
+ "sizeof(T*).");
if (next_ <= reinterpret_cast<T*>(&next_))
next_ = last_;
}
@@ -86,10 +88,13 @@ class CompactRingBuffer {
// Lower bytes store the address of the next buffer element.
static constexpr int kPageSizeBits = 12;
static constexpr int kSizeShift = 56;
+ static constexpr int kSizeBits = 64 - kSizeShift;
static constexpr uptr kNextMask = (1ULL << kSizeShift) - 1;
uptr GetStorageSize() const { return (long_ >> kSizeShift) << kPageSizeBits; }
+ static uptr SignExtend(uptr x) { return ((sptr)x) << kSizeBits >> kSizeBits; }
+
void Init(void *storage, uptr size) {
CHECK_EQ(sizeof(CompactRingBuffer<T>), sizeof(void *));
CHECK(IsPowerOfTwo(size));
@@ -97,12 +102,14 @@ class CompactRingBuffer {
CHECK_LE(size, 128 << kPageSizeBits);
CHECK_EQ(size % 4096, 0);
CHECK_EQ(size % sizeof(T), 0);
- CHECK_EQ((uptr)storage % (size * 2), 0);
- long_ = (uptr)storage | ((size >> kPageSizeBits) << kSizeShift);
+ uptr st = (uptr)storage;
+ CHECK_EQ(st % (size * 2), 0);
+ CHECK_EQ(st, SignExtend(st & kNextMask));
+ long_ = (st & kNextMask) | ((size >> kPageSizeBits) << kSizeShift);
}
void SetNext(const T *next) {
- long_ = (long_ & ~kNextMask) | (uptr)next;
+ long_ = (long_ & ~kNextMask) | ((uptr)next & kNextMask);
}
public:
@@ -119,7 +126,7 @@ class CompactRingBuffer {
SetNext((const T *)storage + Idx);
}
- T *Next() const { return (T *)(long_ & kNextMask); }
+ T *Next() const { return (T *)(SignExtend(long_ & kNextMask)); }
void *StartOfStorage() const {
return (void *)((uptr)Next() & ~(GetStorageSize() - 1));
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_signal_interceptors.inc b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_signal_interceptors.inc
index cefb870f7e25..94e4e2954a3b 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_signal_interceptors.inc
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_signal_interceptors.inc
@@ -29,12 +29,21 @@ using namespace __sanitizer;
#endif
#ifndef SIGNAL_INTERCEPTOR_SIGACTION_IMPL
-#define SIGNAL_INTERCEPTOR_SIGACTION_IMPL(signum, act, oldact) \
- { return REAL(sigaction_symname)(signum, act, oldact); }
+# define SIGNAL_INTERCEPTOR_SIGACTION_IMPL(signum, act, oldact) \
+ { \
+ if (!REAL(sigaction_symname)) { \
+ Printf( \
+ "Warning: REAL(sigaction_symname) == nullptr. This may happen " \
+ "if you link with ubsan statically. Sigaction will not work.\n"); \
+ return -1; \
+ } \
+ return REAL(sigaction_symname)(signum, act, oldact); \
+ }
#endif
#if SANITIZER_INTERCEPT_BSD_SIGNAL
INTERCEPTOR(uptr, bsd_signal, int signum, uptr handler) {
+ SIGNAL_INTERCEPTOR_ENTER();
if (GetHandleSignalMode(signum) == kHandleSignalExclusive) return 0;
SIGNAL_INTERCEPTOR_SIGNAL_IMPL(bsd_signal, signum, handler);
}
@@ -45,6 +54,7 @@ INTERCEPTOR(uptr, bsd_signal, int signum, uptr handler) {
#if SANITIZER_INTERCEPT_SIGNAL_AND_SIGACTION
INTERCEPTOR(uptr, signal, int signum, uptr handler) {
+ SIGNAL_INTERCEPTOR_ENTER();
if (GetHandleSignalMode(signum) == kHandleSignalExclusive)
return (uptr) nullptr;
SIGNAL_INTERCEPTOR_SIGNAL_IMPL(signal, signum, handler);
@@ -53,6 +63,7 @@ INTERCEPTOR(uptr, signal, int signum, uptr handler) {
INTERCEPTOR(int, sigaction_symname, int signum,
const __sanitizer_sigaction *act, __sanitizer_sigaction *oldact) {
+ SIGNAL_INTERCEPTOR_ENTER();
if (GetHandleSignalMode(signum) == kHandleSignalExclusive) {
if (!oldact) return 0;
act = nullptr;
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_solaris.cpp b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_solaris.cpp
index cb53eab8da15..62c40affc9ac 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_solaris.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_solaris.cpp
@@ -225,28 +225,6 @@ void FutexWait(atomic_uint32_t *p, u32 cmp) {
void FutexWake(atomic_uint32_t *p, u32 count) {}
-BlockingMutex::BlockingMutex() {
- CHECK(sizeof(mutex_t) <= sizeof(opaque_storage_));
- internal_memset(this, 0, sizeof(*this));
- CHECK_EQ(mutex_init((mutex_t *)&opaque_storage_, USYNC_THREAD, NULL), 0);
-}
-
-void BlockingMutex::Lock() {
- CHECK(sizeof(mutex_t) <= sizeof(opaque_storage_));
- CHECK_NE(owner_, (uptr)thr_self());
- CHECK_EQ(mutex_lock((mutex_t *)&opaque_storage_), 0);
- CHECK(!owner_);
- owner_ = (uptr)thr_self();
-}
-
-void BlockingMutex::Unlock() {
- CHECK(owner_ == (uptr)thr_self());
- owner_ = 0;
- CHECK_EQ(mutex_unlock((mutex_t *)&opaque_storage_), 0);
-}
-
-void BlockingMutex::CheckLocked() const { CHECK_EQ((uptr)thr_self(), owner_); }
-
} // namespace __sanitizer
#endif // SANITIZER_SOLARIS
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_solaris.h b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_solaris.h
new file mode 100644
index 000000000000..2a21693efbf1
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_solaris.h
@@ -0,0 +1,56 @@
+//===-- sanitizer_solaris.h -------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of Sanitizer runtime. It contains Solaris-specific
+// definitions.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_SOLARIS_H
+#define SANITIZER_SOLARIS_H
+
+#include "sanitizer_internal_defs.h"
+
+#if SANITIZER_SOLARIS
+
+#include <link.h>
+
+namespace __sanitizer {
+
+// Beginning of declaration from OpenSolaris/Illumos
+// $SRC/cmd/sgs/include/rtld.h.
+struct Rt_map {
+ Link_map rt_public;
+ const char *rt_pathname;
+ ulong_t rt_padstart;
+ ulong_t rt_padimlen;
+ ulong_t rt_msize;
+ uint_t rt_flags;
+ uint_t rt_flags1;
+ ulong_t rt_tlsmodid;
+};
+
+// Structure matching the Solaris 11.4 struct dl_phdr_info used to determine
+// presence of dlpi_tls_modid field at runtime. Cf. Solaris 11.4
+// dl_iterate_phdr(3C), Example 2.
+struct dl_phdr_info_test {
+ ElfW(Addr) dlpi_addr;
+ const char *dlpi_name;
+ const ElfW(Phdr) * dlpi_phdr;
+ ElfW(Half) dlpi_phnum;
+ u_longlong_t dlpi_adds;
+ u_longlong_t dlpi_subs;
+ size_t dlpi_tls_modid;
+ void *dlpi_tls_data;
+};
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_SOLARIS
+
+#endif // SANITIZER_SOLARIS_H
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stack_store.cpp b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stack_store.cpp
new file mode 100644
index 000000000000..c11df0ddfde4
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stack_store.cpp
@@ -0,0 +1,384 @@
+//===-- sanitizer_stack_store.cpp -------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_stack_store.h"
+
+#include "sanitizer_atomic.h"
+#include "sanitizer_common.h"
+#include "sanitizer_internal_defs.h"
+#include "sanitizer_leb128.h"
+#include "sanitizer_lzw.h"
+#include "sanitizer_placement_new.h"
+#include "sanitizer_stacktrace.h"
+
+namespace __sanitizer {
+
+namespace {
+struct StackTraceHeader {
+ static constexpr u32 kStackSizeBits = 8;
+
+ u8 size;
+ u8 tag;
+ explicit StackTraceHeader(const StackTrace &trace)
+ : size(Min<uptr>(trace.size, (1u << 8) - 1)), tag(trace.tag) {
+ CHECK_EQ(trace.tag, static_cast<uptr>(tag));
+ }
+ explicit StackTraceHeader(uptr h)
+ : size(h & ((1 << kStackSizeBits) - 1)), tag(h >> kStackSizeBits) {}
+
+ uptr ToUptr() const {
+ return static_cast<uptr>(size) | (static_cast<uptr>(tag) << kStackSizeBits);
+ }
+};
+} // namespace
+
+StackStore::Id StackStore::Store(const StackTrace &trace, uptr *pack) {
+ if (!trace.size && !trace.tag)
+ return 0;
+ StackTraceHeader h(trace);
+ uptr idx = 0;
+ *pack = 0;
+ uptr *stack_trace = Alloc(h.size + 1, &idx, pack);
+ // No more space.
+ if (stack_trace == nullptr)
+ return 0;
+ *stack_trace = h.ToUptr();
+ internal_memcpy(stack_trace + 1, trace.trace, h.size * sizeof(uptr));
+ *pack += blocks_[GetBlockIdx(idx)].Stored(h.size + 1);
+ return OffsetToId(idx);
+}
+
+StackTrace StackStore::Load(Id id) {
+ if (!id)
+ return {};
+ uptr idx = IdToOffset(id);
+ uptr block_idx = GetBlockIdx(idx);
+ CHECK_LT(block_idx, ARRAY_SIZE(blocks_));
+ const uptr *stack_trace = blocks_[block_idx].GetOrUnpack(this);
+ if (!stack_trace)
+ return {};
+ stack_trace += GetInBlockIdx(idx);
+ StackTraceHeader h(*stack_trace);
+ return StackTrace(stack_trace + 1, h.size, h.tag);
+}
+
+uptr StackStore::Allocated() const {
+ return atomic_load_relaxed(&allocated_) + sizeof(*this);
+}
+
+uptr *StackStore::Alloc(uptr count, uptr *idx, uptr *pack) {
+ for (;;) {
+ // Optimisic lock-free allocation, essentially try to bump the
+ // total_frames_.
+ uptr start = atomic_fetch_add(&total_frames_, count, memory_order_relaxed);
+ uptr block_idx = GetBlockIdx(start);
+ uptr last_idx = GetBlockIdx(start + count - 1);
+ if (LIKELY(block_idx == last_idx)) {
+ // Fits into a single block.
+ // No more available blocks. Indicate inability to allocate more memory.
+ if (block_idx >= ARRAY_SIZE(blocks_))
+ return nullptr;
+ *idx = start;
+ return blocks_[block_idx].GetOrCreate(this) + GetInBlockIdx(start);
+ }
+
+ // Retry. We can't use range allocated in two different blocks.
+ CHECK_LE(count, kBlockSizeFrames);
+ uptr in_first = kBlockSizeFrames - GetInBlockIdx(start);
+ // Mark tail/head of these blocks as "stored".to avoid waiting before we can
+ // Pack().
+ *pack += blocks_[block_idx].Stored(in_first);
+ *pack += blocks_[last_idx].Stored(count - in_first);
+ }
+}
+
+void *StackStore::Map(uptr size, const char *mem_type) {
+ atomic_fetch_add(&allocated_, size, memory_order_relaxed);
+ return MmapNoReserveOrDie(size, mem_type);
+}
+
+void StackStore::Unmap(void *addr, uptr size) {
+ atomic_fetch_sub(&allocated_, size, memory_order_relaxed);
+ UnmapOrDie(addr, size);
+}
+
+uptr StackStore::Pack(Compression type) {
+ uptr res = 0;
+ for (BlockInfo &b : blocks_) res += b.Pack(type, this);
+ return res;
+}
+
+void StackStore::LockAll() {
+ for (BlockInfo &b : blocks_) b.Lock();
+}
+
+void StackStore::UnlockAll() {
+ for (BlockInfo &b : blocks_) b.Unlock();
+}
+
+void StackStore::TestOnlyUnmap() {
+ for (BlockInfo &b : blocks_) b.TestOnlyUnmap(this);
+ internal_memset(this, 0, sizeof(*this));
+}
+
+uptr *StackStore::BlockInfo::Get() const {
+ // Idiomatic double-checked locking uses memory_order_acquire here. But
+ // relaxed is fine for us, justification is similar to
+ // TwoLevelMap::GetOrCreate.
+ return reinterpret_cast<uptr *>(atomic_load_relaxed(&data_));
+}
+
+uptr *StackStore::BlockInfo::Create(StackStore *store) {
+ SpinMutexLock l(&mtx_);
+ uptr *ptr = Get();
+ if (!ptr) {
+ ptr = reinterpret_cast<uptr *>(store->Map(kBlockSizeBytes, "StackStore"));
+ atomic_store(&data_, reinterpret_cast<uptr>(ptr), memory_order_release);
+ }
+ return ptr;
+}
+
+uptr *StackStore::BlockInfo::GetOrCreate(StackStore *store) {
+ uptr *ptr = Get();
+ if (LIKELY(ptr))
+ return ptr;
+ return Create(store);
+}
+
+class SLeb128Encoder {
+ public:
+ SLeb128Encoder(u8 *begin, u8 *end) : begin(begin), end(end) {}
+
+ bool operator==(const SLeb128Encoder &other) const {
+ return begin == other.begin;
+ }
+
+ bool operator!=(const SLeb128Encoder &other) const {
+ return begin != other.begin;
+ }
+
+ SLeb128Encoder &operator=(uptr v) {
+ sptr diff = v - previous;
+ begin = EncodeSLEB128(diff, begin, end);
+ previous = v;
+ return *this;
+ }
+ SLeb128Encoder &operator*() { return *this; }
+ SLeb128Encoder &operator++() { return *this; }
+
+ u8 *base() const { return begin; }
+
+ private:
+ u8 *begin;
+ u8 *end;
+ uptr previous = 0;
+};
+
+class SLeb128Decoder {
+ public:
+ SLeb128Decoder(const u8 *begin, const u8 *end) : begin(begin), end(end) {}
+
+ bool operator==(const SLeb128Decoder &other) const {
+ return begin == other.begin;
+ }
+
+ bool operator!=(const SLeb128Decoder &other) const {
+ return begin != other.begin;
+ }
+
+ uptr operator*() {
+ sptr diff;
+ begin = DecodeSLEB128(begin, end, &diff);
+ previous += diff;
+ return previous;
+ }
+ SLeb128Decoder &operator++() { return *this; }
+
+ SLeb128Decoder operator++(int) { return *this; }
+
+ private:
+ const u8 *begin;
+ const u8 *end;
+ uptr previous = 0;
+};
+
+static u8 *CompressDelta(const uptr *from, const uptr *from_end, u8 *to,
+ u8 *to_end) {
+ SLeb128Encoder encoder(to, to_end);
+ for (; from != from_end; ++from, ++encoder) *encoder = *from;
+ return encoder.base();
+}
+
+static uptr *UncompressDelta(const u8 *from, const u8 *from_end, uptr *to,
+ uptr *to_end) {
+ SLeb128Decoder decoder(from, from_end);
+ SLeb128Decoder end(from_end, from_end);
+ for (; decoder != end; ++to, ++decoder) *to = *decoder;
+ CHECK_EQ(to, to_end);
+ return to;
+}
+
+static u8 *CompressLzw(const uptr *from, const uptr *from_end, u8 *to,
+ u8 *to_end) {
+ SLeb128Encoder encoder(to, to_end);
+ encoder = LzwEncode<uptr>(from, from_end, encoder);
+ return encoder.base();
+}
+
+static uptr *UncompressLzw(const u8 *from, const u8 *from_end, uptr *to,
+ uptr *to_end) {
+ SLeb128Decoder decoder(from, from_end);
+ SLeb128Decoder end(from_end, from_end);
+ to = LzwDecode<uptr>(decoder, end, to);
+ CHECK_EQ(to, to_end);
+ return to;
+}
+
+#if defined(_MSC_VER) && !defined(__clang__)
+# pragma warning(push)
+// Disable 'nonstandard extension used: zero-sized array in struct/union'.
+# pragma warning(disable : 4200)
+#endif
+namespace {
+struct PackedHeader {
+ uptr size;
+ StackStore::Compression type;
+ u8 data[];
+};
+} // namespace
+#if defined(_MSC_VER) && !defined(__clang__)
+# pragma warning(pop)
+#endif
+
+uptr *StackStore::BlockInfo::GetOrUnpack(StackStore *store) {
+ SpinMutexLock l(&mtx_);
+ switch (state) {
+ case State::Storing:
+ state = State::Unpacked;
+ FALLTHROUGH;
+ case State::Unpacked:
+ return Get();
+ case State::Packed:
+ break;
+ }
+
+ u8 *ptr = reinterpret_cast<u8 *>(Get());
+ CHECK_NE(nullptr, ptr);
+ const PackedHeader *header = reinterpret_cast<const PackedHeader *>(ptr);
+ CHECK_LE(header->size, kBlockSizeBytes);
+ CHECK_GE(header->size, sizeof(PackedHeader));
+
+ uptr packed_size_aligned = RoundUpTo(header->size, GetPageSizeCached());
+
+ uptr *unpacked =
+ reinterpret_cast<uptr *>(store->Map(kBlockSizeBytes, "StackStoreUnpack"));
+
+ uptr *unpacked_end;
+ switch (header->type) {
+ case Compression::Delta:
+ unpacked_end = UncompressDelta(header->data, ptr + header->size, unpacked,
+ unpacked + kBlockSizeFrames);
+ break;
+ case Compression::LZW:
+ unpacked_end = UncompressLzw(header->data, ptr + header->size, unpacked,
+ unpacked + kBlockSizeFrames);
+ break;
+ default:
+ UNREACHABLE("Unexpected type");
+ break;
+ }
+
+ CHECK_EQ(kBlockSizeFrames, unpacked_end - unpacked);
+
+ MprotectReadOnly(reinterpret_cast<uptr>(unpacked), kBlockSizeBytes);
+ atomic_store(&data_, reinterpret_cast<uptr>(unpacked), memory_order_release);
+ store->Unmap(ptr, packed_size_aligned);
+
+ state = State::Unpacked;
+ return Get();
+}
+
+uptr StackStore::BlockInfo::Pack(Compression type, StackStore *store) {
+ if (type == Compression::None)
+ return 0;
+
+ SpinMutexLock l(&mtx_);
+ switch (state) {
+ case State::Unpacked:
+ case State::Packed:
+ return 0;
+ case State::Storing:
+ break;
+ }
+
+ uptr *ptr = Get();
+ if (!ptr || !Stored(0))
+ return 0;
+
+ u8 *packed =
+ reinterpret_cast<u8 *>(store->Map(kBlockSizeBytes, "StackStorePack"));
+ PackedHeader *header = reinterpret_cast<PackedHeader *>(packed);
+ u8 *alloc_end = packed + kBlockSizeBytes;
+
+ u8 *packed_end = nullptr;
+ switch (type) {
+ case Compression::Delta:
+ packed_end =
+ CompressDelta(ptr, ptr + kBlockSizeFrames, header->data, alloc_end);
+ break;
+ case Compression::LZW:
+ packed_end =
+ CompressLzw(ptr, ptr + kBlockSizeFrames, header->data, alloc_end);
+ break;
+ default:
+ UNREACHABLE("Unexpected type");
+ break;
+ }
+
+ header->type = type;
+ header->size = packed_end - packed;
+
+ VPrintf(1, "Packed block of %zu KiB to %zu KiB\n", kBlockSizeBytes >> 10,
+ header->size >> 10);
+
+ if (kBlockSizeBytes - header->size < kBlockSizeBytes / 8) {
+ VPrintf(1, "Undo and keep block unpacked\n");
+ MprotectReadOnly(reinterpret_cast<uptr>(ptr), kBlockSizeBytes);
+ store->Unmap(packed, kBlockSizeBytes);
+ state = State::Unpacked;
+ return 0;
+ }
+
+ uptr packed_size_aligned = RoundUpTo(header->size, GetPageSizeCached());
+ store->Unmap(packed + packed_size_aligned,
+ kBlockSizeBytes - packed_size_aligned);
+ MprotectReadOnly(reinterpret_cast<uptr>(packed), packed_size_aligned);
+
+ atomic_store(&data_, reinterpret_cast<uptr>(packed), memory_order_release);
+ store->Unmap(ptr, kBlockSizeBytes);
+
+ state = State::Packed;
+ return kBlockSizeBytes - packed_size_aligned;
+}
+
+void StackStore::BlockInfo::TestOnlyUnmap(StackStore *store) {
+ if (uptr *ptr = Get())
+ store->Unmap(ptr, kBlockSizeBytes);
+}
+
+bool StackStore::BlockInfo::Stored(uptr n) {
+ return n + atomic_fetch_add(&stored_, n, memory_order_release) ==
+ kBlockSizeFrames;
+}
+
+bool StackStore::BlockInfo::IsPacked() const {
+ SpinMutexLock l(&mtx_);
+ return state == State::Packed;
+}
+
+} // namespace __sanitizer
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stack_store.h b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stack_store.h
new file mode 100644
index 000000000000..4f1a8caac6ed
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stack_store.h
@@ -0,0 +1,121 @@
+//===-- sanitizer_stack_store.h ---------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_STACK_STORE_H
+#define SANITIZER_STACK_STORE_H
+
+#include "sanitizer_atomic.h"
+#include "sanitizer_common.h"
+#include "sanitizer_internal_defs.h"
+#include "sanitizer_mutex.h"
+#include "sanitizer_stacktrace.h"
+
+namespace __sanitizer {
+
+class StackStore {
+ static constexpr uptr kBlockSizeFrames = 0x100000;
+ static constexpr uptr kBlockCount = 0x1000;
+ static constexpr uptr kBlockSizeBytes = kBlockSizeFrames * sizeof(uptr);
+
+ public:
+ enum class Compression : u8 {
+ None = 0,
+ Delta,
+ LZW,
+ };
+
+ constexpr StackStore() = default;
+
+ using Id = u32; // Enough for 2^32 * sizeof(uptr) bytes of traces.
+ static_assert(u64(kBlockCount) * kBlockSizeFrames == 1ull << (sizeof(Id) * 8),
+ "");
+
+ Id Store(const StackTrace &trace,
+ uptr *pack /* number of blocks completed by this call */);
+ StackTrace Load(Id id);
+ uptr Allocated() const;
+
+ // Packs all blocks which don't expect any more writes. A block is going to be
+ // packed once. As soon trace from that block was requested, it will unpack
+ // and stay unpacked after that.
+ // Returns the number of released bytes.
+ uptr Pack(Compression type);
+
+ void LockAll();
+ void UnlockAll();
+
+ void TestOnlyUnmap();
+
+ private:
+ friend class StackStoreTest;
+ static constexpr uptr GetBlockIdx(uptr frame_idx) {
+ return frame_idx / kBlockSizeFrames;
+ }
+
+ static constexpr uptr GetInBlockIdx(uptr frame_idx) {
+ return frame_idx % kBlockSizeFrames;
+ }
+
+ static constexpr uptr IdToOffset(Id id) {
+ CHECK_NE(id, 0);
+ return id - 1; // Avoid zero as id.
+ }
+
+ static constexpr uptr OffsetToId(Id id) {
+ // This makes UINT32_MAX to 0 and it will be retrived as and empty stack.
+ // But this is not a problem as we will not be able to store anything after
+ // that anyway.
+ return id + 1; // Avoid zero as id.
+ }
+
+ uptr *Alloc(uptr count, uptr *idx, uptr *pack);
+
+ void *Map(uptr size, const char *mem_type);
+ void Unmap(void *addr, uptr size);
+
+ // Total number of allocated frames.
+ atomic_uintptr_t total_frames_ = {};
+
+ // Tracks total allocated memory in bytes.
+ atomic_uintptr_t allocated_ = {};
+
+ // Each block will hold pointer to exactly kBlockSizeFrames.
+ class BlockInfo {
+ atomic_uintptr_t data_;
+ // Counter to track store progress to know when we can Pack() the block.
+ atomic_uint32_t stored_;
+ // Protects alloc of new blocks.
+ mutable StaticSpinMutex mtx_;
+
+ enum class State : u8 {
+ Storing = 0,
+ Packed,
+ Unpacked,
+ };
+ State state SANITIZER_GUARDED_BY(mtx_);
+
+ uptr *Create(StackStore *store);
+
+ public:
+ uptr *Get() const;
+ uptr *GetOrCreate(StackStore *store);
+ uptr *GetOrUnpack(StackStore *store);
+ uptr Pack(Compression type, StackStore *store);
+ void TestOnlyUnmap(StackStore *store);
+ bool Stored(uptr n);
+ bool IsPacked() const;
+ void Lock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS { mtx_.Lock(); }
+ void Unlock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS { mtx_.Unlock(); }
+ };
+
+ BlockInfo blocks_[kBlockCount] = {};
+};
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_STACK_STORE_H
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stackdepot.cpp b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stackdepot.cpp
index 44a95214e38b..3776e8c97057 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stackdepot.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stackdepot.cpp
@@ -12,107 +12,219 @@
#include "sanitizer_stackdepot.h"
+#include "sanitizer_atomic.h"
#include "sanitizer_common.h"
#include "sanitizer_hash.h"
+#include "sanitizer_mutex.h"
+#include "sanitizer_stack_store.h"
#include "sanitizer_stackdepotbase.h"
namespace __sanitizer {
struct StackDepotNode {
- StackDepotNode *link;
- u32 id;
- atomic_uint32_t hash_and_use_count; // hash_bits : 12; use_count : 20;
- u32 size;
- u32 tag;
- uptr stack[1]; // [size]
+ using hash_type = u64;
+ hash_type stack_hash;
+ u32 link;
+ StackStore::Id store_id;
static const u32 kTabSizeLog = SANITIZER_ANDROID ? 16 : 20;
- // Lower kTabSizeLog bits are equal for all items in one bucket.
- // We use these bits to store the per-stack use counter.
- static const u32 kUseCountBits = kTabSizeLog;
- static const u32 kMaxUseCount = 1 << kUseCountBits;
- static const u32 kUseCountMask = (1 << kUseCountBits) - 1;
- static const u32 kHashMask = ~kUseCountMask;
typedef StackTrace args_type;
- bool eq(u32 hash, const args_type &args) const {
- u32 hash_bits =
- atomic_load(&hash_and_use_count, memory_order_relaxed) & kHashMask;
- if ((hash & kHashMask) != hash_bits || args.size != size || args.tag != tag)
- return false;
- uptr i = 0;
- for (; i < size; i++) {
- if (stack[i] != args.trace[i]) return false;
- }
- return true;
- }
- static uptr storage_size(const args_type &args) {
- return sizeof(StackDepotNode) + (args.size - 1) * sizeof(uptr);
+ bool eq(hash_type hash, const args_type &args) const {
+ return hash == stack_hash;
}
- static u32 hash(const args_type &args) {
- MurMur2HashBuilder H(args.size * sizeof(uptr));
+ static uptr allocated();
+ static hash_type hash(const args_type &args) {
+ MurMur2Hash64Builder H(args.size * sizeof(uptr));
for (uptr i = 0; i < args.size; i++) H.add(args.trace[i]);
+ H.add(args.tag);
return H.get();
}
static bool is_valid(const args_type &args) {
return args.size > 0 && args.trace;
}
- void store(const args_type &args, u32 hash) {
- atomic_store(&hash_and_use_count, hash & kHashMask, memory_order_relaxed);
- size = args.size;
- tag = args.tag;
- internal_memcpy(stack, args.trace, size * sizeof(uptr));
- }
- args_type load() const {
- return args_type(&stack[0], size, tag);
- }
- StackDepotHandle get_handle() { return StackDepotHandle(this); }
+ void store(u32 id, const args_type &args, hash_type hash);
+ args_type load(u32 id) const;
+ static StackDepotHandle get_handle(u32 id);
typedef StackDepotHandle handle_type;
};
-COMPILER_CHECK(StackDepotNode::kMaxUseCount == (u32)kStackDepotMaxUseCount);
-
-u32 StackDepotHandle::id() { return node_->id; }
-int StackDepotHandle::use_count() {
- return atomic_load(&node_->hash_and_use_count, memory_order_relaxed) &
- StackDepotNode::kUseCountMask;
-}
-void StackDepotHandle::inc_use_count_unsafe() {
- u32 prev =
- atomic_fetch_add(&node_->hash_and_use_count, 1, memory_order_relaxed) &
- StackDepotNode::kUseCountMask;
- CHECK_LT(prev + 1, StackDepotNode::kMaxUseCount);
-}
+static StackStore stackStore;
// FIXME(dvyukov): this single reserved bit is used in TSan.
typedef StackDepotBase<StackDepotNode, 1, StackDepotNode::kTabSizeLog>
StackDepot;
static StackDepot theDepot;
+// Keep mutable data out of frequently access nodes to improve caching
+// efficiency.
+static TwoLevelMap<atomic_uint32_t, StackDepot::kNodesSize1,
+ StackDepot::kNodesSize2>
+ useCounts;
+
+int StackDepotHandle::use_count() const {
+ return atomic_load_relaxed(&useCounts[id_]);
+}
+
+void StackDepotHandle::inc_use_count_unsafe() {
+ atomic_fetch_add(&useCounts[id_], 1, memory_order_relaxed);
+}
+
+uptr StackDepotNode::allocated() {
+ return stackStore.Allocated() + useCounts.MemoryUsage();
+}
+
+static void CompressStackStore() {
+ u64 start = Verbosity() >= 1 ? MonotonicNanoTime() : 0;
+ uptr diff = stackStore.Pack(static_cast<StackStore::Compression>(
+ Abs(common_flags()->compress_stack_depot)));
+ if (!diff)
+ return;
+ if (Verbosity() >= 1) {
+ u64 finish = MonotonicNanoTime();
+ uptr total_before = theDepot.GetStats().allocated + diff;
+ VPrintf(1, "%s: StackDepot released %zu KiB out of %zu KiB in %llu ms\n",
+ SanitizerToolName, diff >> 10, total_before >> 10,
+ (finish - start) / 1000000);
+ }
+}
+
+namespace {
+
+class CompressThread {
+ public:
+ constexpr CompressThread() = default;
+ void NewWorkNotify();
+ void Stop();
+ void LockAndStop() SANITIZER_NO_THREAD_SAFETY_ANALYSIS;
+ void Unlock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS;
+
+ private:
+ enum class State {
+ NotStarted = 0,
+ Started,
+ Failed,
+ Stopped,
+ };
+
+ void Run();
+
+ bool WaitForWork() {
+ semaphore_.Wait();
+ return atomic_load(&run_, memory_order_acquire);
+ }
+
+ Semaphore semaphore_ = {};
+ StaticSpinMutex mutex_ = {};
+ State state_ SANITIZER_GUARDED_BY(mutex_) = State::NotStarted;
+ void *thread_ SANITIZER_GUARDED_BY(mutex_) = nullptr;
+ atomic_uint8_t run_ = {};
+};
+
+static CompressThread compress_thread;
+
+void CompressThread::NewWorkNotify() {
+ int compress = common_flags()->compress_stack_depot;
+ if (!compress)
+ return;
+ if (compress > 0 /* for testing or debugging */) {
+ SpinMutexLock l(&mutex_);
+ if (state_ == State::NotStarted) {
+ atomic_store(&run_, 1, memory_order_release);
+ CHECK_EQ(nullptr, thread_);
+ thread_ = internal_start_thread(
+ [](void *arg) -> void * {
+ reinterpret_cast<CompressThread *>(arg)->Run();
+ return nullptr;
+ },
+ this);
+ state_ = thread_ ? State::Started : State::Failed;
+ }
+ if (state_ == State::Started) {
+ semaphore_.Post();
+ return;
+ }
+ }
+ CompressStackStore();
+}
+
+void CompressThread::Run() {
+ VPrintf(1, "%s: StackDepot compression thread started\n", SanitizerToolName);
+ while (WaitForWork()) CompressStackStore();
+ VPrintf(1, "%s: StackDepot compression thread stopped\n", SanitizerToolName);
+}
+
+void CompressThread::Stop() {
+ void *t = nullptr;
+ {
+ SpinMutexLock l(&mutex_);
+ if (state_ != State::Started)
+ return;
+ state_ = State::Stopped;
+ CHECK_NE(nullptr, thread_);
+ t = thread_;
+ thread_ = nullptr;
+ }
+ atomic_store(&run_, 0, memory_order_release);
+ semaphore_.Post();
+ internal_join_thread(t);
+}
+
+void CompressThread::LockAndStop() {
+ mutex_.Lock();
+ if (state_ != State::Started)
+ return;
+ CHECK_NE(nullptr, thread_);
+
+ atomic_store(&run_, 0, memory_order_release);
+ semaphore_.Post();
+ internal_join_thread(thread_);
+ // Allow to restart after Unlock() if needed.
+ state_ = State::NotStarted;
+ thread_ = nullptr;
+}
+
+void CompressThread::Unlock() { mutex_.Unlock(); }
+
+} // namespace
-StackDepotStats *StackDepotGetStats() {
- return theDepot.GetStats();
+void StackDepotNode::store(u32 id, const args_type &args, hash_type hash) {
+ stack_hash = hash;
+ uptr pack = 0;
+ store_id = stackStore.Store(args, &pack);
+ if (LIKELY(!pack))
+ return;
+ compress_thread.NewWorkNotify();
}
-u32 StackDepotPut(StackTrace stack) {
- StackDepotHandle h = theDepot.Put(stack);
- return h.valid() ? h.id() : 0;
+StackDepotNode::args_type StackDepotNode::load(u32 id) const {
+ if (!store_id)
+ return {};
+ return stackStore.Load(store_id);
}
+StackDepotStats StackDepotGetStats() { return theDepot.GetStats(); }
+
+u32 StackDepotPut(StackTrace stack) { return theDepot.Put(stack); }
+
StackDepotHandle StackDepotPut_WithHandle(StackTrace stack) {
- return theDepot.Put(stack);
+ return StackDepotNode::get_handle(theDepot.Put(stack));
}
StackTrace StackDepotGet(u32 id) {
return theDepot.Get(id);
}
-void StackDepotLockAll() {
- theDepot.LockAll();
+void StackDepotLockBeforeFork() {
+ theDepot.LockBeforeFork();
+ compress_thread.LockAndStop();
+ stackStore.LockAll();
}
-void StackDepotUnlockAll() {
- theDepot.UnlockAll();
+void StackDepotUnlockAfterFork(bool fork_child) {
+ stackStore.UnlockAll();
+ compress_thread.Unlock();
+ theDepot.UnlockAfterFork(fork_child);
}
void StackDepotPrintAll() {
@@ -121,34 +233,15 @@ void StackDepotPrintAll() {
#endif
}
-bool StackDepotReverseMap::IdDescPair::IdComparator(
- const StackDepotReverseMap::IdDescPair &a,
- const StackDepotReverseMap::IdDescPair &b) {
- return a.id < b.id;
-}
+void StackDepotStopBackgroundThread() { compress_thread.Stop(); }
-StackDepotReverseMap::StackDepotReverseMap() {
- map_.reserve(StackDepotGetStats()->n_uniq_ids + 100);
- for (int idx = 0; idx < StackDepot::kTabSize; idx++) {
- atomic_uintptr_t *p = &theDepot.tab[idx];
- uptr v = atomic_load(p, memory_order_consume);
- StackDepotNode *s = (StackDepotNode*)(v & ~1);
- for (; s; s = s->link) {
- IdDescPair pair = {s->id, s};
- map_.push_back(pair);
- }
- }
- Sort(map_.data(), map_.size(), &IdDescPair::IdComparator);
+StackDepotHandle StackDepotNode::get_handle(u32 id) {
+ return StackDepotHandle(&theDepot.nodes[id], id);
}
-StackTrace StackDepotReverseMap::Get(u32 id) {
- if (!map_.size())
- return StackTrace();
- IdDescPair pair = {id, nullptr};
- uptr idx = InternalLowerBound(map_, pair, IdDescPair::IdComparator);
- if (idx > map_.size() || map_[idx].id != id)
- return StackTrace();
- return map_[idx].desc->load();
+void StackDepotTestOnlyUnmap() {
+ theDepot.TestOnlyUnmap();
+ stackStore.TestOnlyUnmap();
}
} // namespace __sanitizer
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stackdepot.h b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stackdepot.h
index 0e26c1fc37c4..82cf7578d0fb 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stackdepot.h
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stackdepot.h
@@ -22,50 +22,29 @@ namespace __sanitizer {
// StackDepot efficiently stores huge amounts of stack traces.
struct StackDepotNode;
struct StackDepotHandle {
- StackDepotNode *node_;
- StackDepotHandle() : node_(nullptr) {}
- explicit StackDepotHandle(StackDepotNode *node) : node_(node) {}
- bool valid() { return node_; }
- u32 id();
- int use_count();
+ StackDepotNode *node_ = nullptr;
+ u32 id_ = 0;
+ StackDepotHandle(StackDepotNode *node, u32 id) : node_(node), id_(id) {}
+ bool valid() const { return node_; }
+ u32 id() const { return id_; }
+ int use_count() const;
void inc_use_count_unsafe();
};
const int kStackDepotMaxUseCount = 1U << (SANITIZER_ANDROID ? 16 : 20);
-StackDepotStats *StackDepotGetStats();
+StackDepotStats StackDepotGetStats();
u32 StackDepotPut(StackTrace stack);
StackDepotHandle StackDepotPut_WithHandle(StackTrace stack);
// Retrieves a stored stack trace by the id.
StackTrace StackDepotGet(u32 id);
-void StackDepotLockAll();
-void StackDepotUnlockAll();
+void StackDepotLockBeforeFork();
+void StackDepotUnlockAfterFork(bool fork_child);
void StackDepotPrintAll();
+void StackDepotStopBackgroundThread();
-// Instantiating this class creates a snapshot of StackDepot which can be
-// efficiently queried with StackDepotGet(). You can use it concurrently with
-// StackDepot, but the snapshot is only guaranteed to contain those stack traces
-// which were stored before it was instantiated.
-class StackDepotReverseMap {
- public:
- StackDepotReverseMap();
- StackTrace Get(u32 id);
-
- private:
- struct IdDescPair {
- u32 id;
- StackDepotNode *desc;
-
- static bool IdComparator(const IdDescPair &a, const IdDescPair &b);
- };
-
- InternalMmapVector<IdDescPair> map_;
-
- // Disallow evil constructors.
- StackDepotReverseMap(const StackDepotReverseMap&);
- void operator=(const StackDepotReverseMap&);
-};
+void StackDepotTestOnlyUnmap();
} // namespace __sanitizer
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stackdepotbase.h b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stackdepotbase.h
index 1af2c1892eff..279bc5de3bb9 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stackdepotbase.h
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stackdepotbase.h
@@ -16,71 +16,87 @@
#include <stdio.h>
#include "sanitizer_atomic.h"
+#include "sanitizer_flat_map.h"
#include "sanitizer_internal_defs.h"
#include "sanitizer_mutex.h"
-#include "sanitizer_persistent_allocator.h"
namespace __sanitizer {
template <class Node, int kReservedBits, int kTabSizeLog>
class StackDepotBase {
+ static constexpr u32 kIdSizeLog =
+ sizeof(u32) * 8 - Max(kReservedBits, 1 /* At least 1 bit for locking. */);
+ static constexpr u32 kNodesSize1Log = kIdSizeLog / 2;
+ static constexpr u32 kNodesSize2Log = kIdSizeLog - kNodesSize1Log;
+ static constexpr int kTabSize = 1 << kTabSizeLog; // Hash table size.
+ static constexpr u32 kUnlockMask = (1ull << kIdSizeLog) - 1;
+ static constexpr u32 kLockMask = ~kUnlockMask;
+
public:
typedef typename Node::args_type args_type;
typedef typename Node::handle_type handle_type;
+ typedef typename Node::hash_type hash_type;
+
+ static constexpr u64 kNodesSize1 = 1ull << kNodesSize1Log;
+ static constexpr u64 kNodesSize2 = 1ull << kNodesSize2Log;
+
// Maps stack trace to an unique id.
- handle_type Put(args_type args, bool *inserted = nullptr);
+ u32 Put(args_type args, bool *inserted = nullptr);
// Retrieves a stored stack trace by the id.
args_type Get(u32 id);
- StackDepotStats *GetStats() { return &stats; }
+ StackDepotStats GetStats() const {
+ return {
+ atomic_load_relaxed(&n_uniq_ids),
+ nodes.MemoryUsage() + Node::allocated(),
+ };
+ }
- void LockAll();
- void UnlockAll();
+ void LockBeforeFork();
+ void UnlockAfterFork(bool fork_child);
void PrintAll();
- private:
- static Node *find(Node *s, args_type args, u32 hash);
- static Node *lock(atomic_uintptr_t *p);
- static void unlock(atomic_uintptr_t *p, Node *s);
+ void TestOnlyUnmap() {
+ nodes.TestOnlyUnmap();
+ internal_memset(this, 0, sizeof(*this));
+ }
- static const int kTabSize = 1 << kTabSizeLog; // Hash table size.
- static const int kPartBits = 8;
- static const int kPartShift = sizeof(u32) * 8 - kPartBits - kReservedBits;
- static const int kPartCount =
- 1 << kPartBits; // Number of subparts in the table.
- static const int kPartSize = kTabSize / kPartCount;
- static const int kMaxId = 1 << kPartShift;
+ private:
+ friend Node;
+ u32 find(u32 s, args_type args, hash_type hash) const;
+ static u32 lock(atomic_uint32_t *p);
+ static void unlock(atomic_uint32_t *p, u32 s);
+ atomic_uint32_t tab[kTabSize]; // Hash table of Node's.
- atomic_uintptr_t tab[kTabSize]; // Hash table of Node's.
- atomic_uint32_t seq[kPartCount]; // Unique id generators.
+ atomic_uint32_t n_uniq_ids;
- StackDepotStats stats;
+ TwoLevelMap<Node, kNodesSize1, kNodesSize2> nodes;
friend class StackDepotReverseMap;
};
template <class Node, int kReservedBits, int kTabSizeLog>
-Node *StackDepotBase<Node, kReservedBits, kTabSizeLog>::find(Node *s,
- args_type args,
- u32 hash) {
+u32 StackDepotBase<Node, kReservedBits, kTabSizeLog>::find(
+ u32 s, args_type args, hash_type hash) const {
// Searches linked list s for the stack, returns its id.
- for (; s; s = s->link) {
- if (s->eq(hash, args)) {
+ for (; s;) {
+ const Node &node = nodes[s];
+ if (node.eq(hash, args))
return s;
- }
+ s = node.link;
}
- return nullptr;
+ return 0;
}
template <class Node, int kReservedBits, int kTabSizeLog>
-Node *StackDepotBase<Node, kReservedBits, kTabSizeLog>::lock(
- atomic_uintptr_t *p) {
+u32 StackDepotBase<Node, kReservedBits, kTabSizeLog>::lock(atomic_uint32_t *p) {
// Uses the pointer lsb as mutex.
for (int i = 0;; i++) {
- uptr cmp = atomic_load(p, memory_order_relaxed);
- if ((cmp & 1) == 0 &&
- atomic_compare_exchange_weak(p, &cmp, cmp | 1, memory_order_acquire))
- return (Node *)cmp;
+ u32 cmp = atomic_load(p, memory_order_relaxed);
+ if ((cmp & kLockMask) == 0 &&
+ atomic_compare_exchange_weak(p, &cmp, cmp | kLockMask,
+ memory_order_acquire))
+ return cmp;
if (i < 10)
proc_yield(10);
else
@@ -90,103 +106,101 @@ Node *StackDepotBase<Node, kReservedBits, kTabSizeLog>::lock(
template <class Node, int kReservedBits, int kTabSizeLog>
void StackDepotBase<Node, kReservedBits, kTabSizeLog>::unlock(
- atomic_uintptr_t *p, Node *s) {
- DCHECK_EQ((uptr)s & 1, 0);
- atomic_store(p, (uptr)s, memory_order_release);
+ atomic_uint32_t *p, u32 s) {
+ DCHECK_EQ(s & kLockMask, 0);
+ atomic_store(p, s, memory_order_release);
}
template <class Node, int kReservedBits, int kTabSizeLog>
-typename StackDepotBase<Node, kReservedBits, kTabSizeLog>::handle_type
-StackDepotBase<Node, kReservedBits, kTabSizeLog>::Put(args_type args,
- bool *inserted) {
- if (inserted) *inserted = false;
- if (!Node::is_valid(args)) return handle_type();
- uptr h = Node::hash(args);
- atomic_uintptr_t *p = &tab[h % kTabSize];
- uptr v = atomic_load(p, memory_order_consume);
- Node *s = (Node *)(v & ~1);
+u32 StackDepotBase<Node, kReservedBits, kTabSizeLog>::Put(args_type args,
+ bool *inserted) {
+ if (inserted)
+ *inserted = false;
+ if (!LIKELY(Node::is_valid(args)))
+ return 0;
+ hash_type h = Node::hash(args);
+ atomic_uint32_t *p = &tab[h % kTabSize];
+ u32 v = atomic_load(p, memory_order_consume);
+ u32 s = v & kUnlockMask;
// First, try to find the existing stack.
- Node *node = find(s, args, h);
- if (node) return node->get_handle();
+ u32 node = find(s, args, h);
+ if (LIKELY(node))
+ return node;
+
// If failed, lock, retry and insert new.
- Node *s2 = lock(p);
+ u32 s2 = lock(p);
if (s2 != s) {
node = find(s2, args, h);
if (node) {
unlock(p, s2);
- return node->get_handle();
+ return node;
}
}
- uptr part = (h % kTabSize) / kPartSize;
- u32 id = atomic_fetch_add(&seq[part], 1, memory_order_relaxed) + 1;
- stats.n_uniq_ids++;
- CHECK_LT(id, kMaxId);
- id |= part << kPartShift;
- CHECK_NE(id, 0);
- CHECK_EQ(id & (((u32)-1) >> kReservedBits), id);
- uptr memsz = Node::storage_size(args);
- s = (Node *)PersistentAlloc(memsz);
- stats.allocated += memsz;
- s->id = id;
- s->store(args, h);
- s->link = s2;
+ s = atomic_fetch_add(&n_uniq_ids, 1, memory_order_relaxed) + 1;
+ CHECK_EQ(s & kUnlockMask, s);
+ CHECK_EQ(s & (((u32)-1) >> kReservedBits), s);
+ Node &new_node = nodes[s];
+ new_node.store(s, args, h);
+ new_node.link = s2;
unlock(p, s);
if (inserted) *inserted = true;
- return s->get_handle();
+ return s;
}
template <class Node, int kReservedBits, int kTabSizeLog>
typename StackDepotBase<Node, kReservedBits, kTabSizeLog>::args_type
StackDepotBase<Node, kReservedBits, kTabSizeLog>::Get(u32 id) {
- if (id == 0) {
+ if (id == 0)
return args_type();
- }
CHECK_EQ(id & (((u32)-1) >> kReservedBits), id);
- // High kPartBits contain part id, so we need to scan at most kPartSize lists.
- uptr part = id >> kPartShift;
- for (int i = 0; i != kPartSize; i++) {
- uptr idx = part * kPartSize + i;
- CHECK_LT(idx, kTabSize);
- atomic_uintptr_t *p = &tab[idx];
- uptr v = atomic_load(p, memory_order_consume);
- Node *s = (Node *)(v & ~1);
- for (; s; s = s->link) {
- if (s->id == id) {
- return s->load();
- }
- }
- }
- return args_type();
+ if (!nodes.contains(id))
+ return args_type();
+ const Node &node = nodes[id];
+ return node.load(id);
}
template <class Node, int kReservedBits, int kTabSizeLog>
-void StackDepotBase<Node, kReservedBits, kTabSizeLog>::LockAll() {
- for (int i = 0; i < kTabSize; ++i) {
- lock(&tab[i]);
- }
+void StackDepotBase<Node, kReservedBits, kTabSizeLog>::LockBeforeFork() {
+ // Do not lock hash table. It's very expensive, but it's not rely needed. The
+ // parent process will neither lock nor unlock. Child process risks to be
+ // deadlocked on already locked buckets. To avoid deadlock we will unlock
+ // every locked buckets in `UnlockAfterFork`. This may affect consistency of
+ // the hash table, but the only issue is a few items inserted by parent
+ // process will be not found by child, and the child may insert them again,
+ // wasting some space in `stackStore`.
+
+ // We still need to lock nodes.
+ nodes.Lock();
}
template <class Node, int kReservedBits, int kTabSizeLog>
-void StackDepotBase<Node, kReservedBits, kTabSizeLog>::UnlockAll() {
+void StackDepotBase<Node, kReservedBits, kTabSizeLog>::UnlockAfterFork(
+ bool fork_child) {
+ nodes.Unlock();
+
+ // Only unlock in child process to avoid deadlock. See `LockBeforeFork`.
+ if (!fork_child)
+ return;
+
for (int i = 0; i < kTabSize; ++i) {
- atomic_uintptr_t *p = &tab[i];
+ atomic_uint32_t *p = &tab[i];
uptr s = atomic_load(p, memory_order_relaxed);
- unlock(p, (Node *)(s & ~1UL));
+ if (s & kLockMask)
+ unlock(p, s & kUnlockMask);
}
}
template <class Node, int kReservedBits, int kTabSizeLog>
void StackDepotBase<Node, kReservedBits, kTabSizeLog>::PrintAll() {
for (int i = 0; i < kTabSize; ++i) {
- atomic_uintptr_t *p = &tab[i];
- lock(p);
- uptr v = atomic_load(p, memory_order_relaxed);
- Node *s = (Node *)(v & ~1UL);
- for (; s; s = s->link) {
- Printf("Stack for id %u:\n", s->id);
- s->load().Print();
+ atomic_uint32_t *p = &tab[i];
+ u32 s = atomic_load(p, memory_order_consume) & kUnlockMask;
+ for (; s;) {
+ const Node &node = nodes[s];
+ Printf("Stack for id %u:\n", s);
+ node.load(s).Print();
+ s = node.link;
}
- unlock(p, s);
}
}
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stacktrace.cpp b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stacktrace.cpp
index 07e4409f4a5d..d24fae98213a 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stacktrace.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stacktrace.cpp
@@ -20,10 +20,10 @@
namespace __sanitizer {
uptr StackTrace::GetNextInstructionPc(uptr pc) {
-#if defined(__sparc__) || defined(__mips__)
+#if defined(__aarch64__)
+ return STRIP_PAC_PC((void *)pc) + 4;
+#elif defined(__sparc__) || defined(__mips__)
return pc + 8;
-#elif defined(__powerpc__) || defined(__arm__) || defined(__aarch64__)
- return pc + 4;
#elif SANITIZER_RISCV64
// Current check order is 4 -> 2 -> 6 -> 8
u8 InsnByte = *(u8 *)(pc);
@@ -46,8 +46,10 @@ uptr StackTrace::GetNextInstructionPc(uptr pc) {
}
// bail-out if could not figure out the instruction size
return 0;
-#else
+#elif SANITIZER_S390 || SANITIZER_I386 || SANITIZER_X32 || SANITIZER_X64
return pc + 1;
+#else
+ return pc + 4;
#endif
}
@@ -64,7 +66,7 @@ void BufferedStackTrace::Init(const uptr *pcs, uptr cnt, uptr extra_top_pc) {
top_frame_bp = 0;
}
-// Sparc implemention is in its own file.
+// Sparc implementation is in its own file.
#if !defined(__sparc__)
// In GCC on ARM bp points to saved lr, not fp, so we should check the next
@@ -119,7 +121,7 @@ void BufferedStackTrace::UnwindFast(uptr pc, uptr bp, uptr stack_top,
uhwptr pc1 = caller_frame[2];
#elif defined(__s390__)
uhwptr pc1 = frame[14];
-#elif defined(__riscv)
+#elif defined(__loongarch__) || defined(__riscv)
// frame[-1] contains the return address
uhwptr pc1 = frame[-1];
#else
@@ -134,7 +136,7 @@ void BufferedStackTrace::UnwindFast(uptr pc, uptr bp, uptr stack_top,
trace_buffer[size++] = (uptr) pc1;
}
bottom = (uptr)frame;
-#if defined(__riscv)
+#if defined(__loongarch__) || defined(__riscv)
// frame[-2] contain fp of the previous frame
uptr new_bp = (uptr)frame[-2];
#else
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stacktrace.h b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stacktrace.h
index ea330f36f7d7..47aed488c71a 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stacktrace.h
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stacktrace.h
@@ -20,7 +20,7 @@ namespace __sanitizer {
struct BufferedStackTrace;
-static const u32 kStackTraceMax = 256;
+static const u32 kStackTraceMax = 255;
#if SANITIZER_LINUX && defined(__mips__)
# define SANITIZER_CAN_FAST_UNWIND 0
@@ -33,7 +33,7 @@ static const u32 kStackTraceMax = 256;
// Fast unwind is the only option on Mac for now; we will need to
// revisit this macro when slow unwind works on Mac, see
// https://github.com/google/sanitizers/issues/137
-#if SANITIZER_MAC
+#if SANITIZER_APPLE
# define SANITIZER_CAN_SLOW_UNWIND 0
#else
# define SANITIZER_CAN_SLOW_UNWIND 1
@@ -88,21 +88,20 @@ uptr StackTrace::GetPreviousInstructionPc(uptr pc) {
// so we return (pc-2) in that case in order to be safe.
// For A32 mode we return (pc-4) because all instructions are 32 bit long.
return (pc - 3) & (~1);
-#elif defined(__powerpc__) || defined(__powerpc64__) || defined(__aarch64__)
- // PCs are always 4 byte aligned.
- return pc - 4;
#elif defined(__sparc__) || defined(__mips__)
return pc - 8;
#elif SANITIZER_RISCV64
- // RV-64 has variable instruciton length...
+ // RV-64 has variable instruction length...
// C extentions gives us 2-byte instructoins
// RV-64 has 4-byte instructions
- // + RISCV architecture allows instructions up to 8 bytes
+ // + RISC-V architecture allows instructions up to 8 bytes
// It seems difficult to figure out the exact instruction length -
// pc - 2 seems like a safe option for the purposes of stack tracing
return pc - 2;
-#else
+#elif SANITIZER_S390 || SANITIZER_I386 || SANITIZER_X32 || SANITIZER_X64
return pc - 1;
+#else
+ return pc - 4;
#endif
}
@@ -209,11 +208,11 @@ static inline bool IsValidFrame(uptr frame, uptr stack_top, uptr stack_bottom) {
// StackTrace::GetCurrentPc() faster.
#if defined(__x86_64__)
# define GET_CURRENT_PC() \
- ({ \
+ (__extension__({ \
uptr pc; \
asm("lea 0(%%rip), %0" : "=r"(pc)); \
pc; \
- })
+ }))
#else
# define GET_CURRENT_PC() StackTrace::GetCurrentPc()
#endif
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stacktrace_libcdep.cpp b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stacktrace_libcdep.cpp
index f60ea7731748..561eae9ab780 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stacktrace_libcdep.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stacktrace_libcdep.cpp
@@ -29,42 +29,43 @@ class StackTraceTextPrinter {
frame_delimiter_(frame_delimiter),
output_(output),
dedup_token_(dedup_token),
- symbolize_(RenderNeedsSymbolization(stack_trace_fmt)) {}
+ symbolize_(StackTracePrinter::GetOrInit()->RenderNeedsSymbolization(
+ stack_trace_fmt)) {}
bool ProcessAddressFrames(uptr pc) {
- SymbolizedStack *frames = symbolize_
- ? Symbolizer::GetOrInit()->SymbolizePC(pc)
- : SymbolizedStack::New(pc);
+ SymbolizedStackHolder symbolized_stack(
+ symbolize_ ? Symbolizer::GetOrInit()->SymbolizePC(pc)
+ : SymbolizedStack::New(pc));
+ const SymbolizedStack *frames = symbolized_stack.get();
if (!frames)
return false;
- for (SymbolizedStack *cur = frames; cur; cur = cur->next) {
+ for (const SymbolizedStack *cur = frames; cur; cur = cur->next) {
uptr prev_len = output_->length();
- RenderFrame(output_, stack_trace_fmt_, frame_num_++, cur->info.address,
- symbolize_ ? &cur->info : nullptr,
- common_flags()->symbolize_vs_style,
- common_flags()->strip_path_prefix);
+ StackTracePrinter::GetOrInit()->RenderFrame(
+ output_, stack_trace_fmt_, frame_num_++, cur->info.address,
+ symbolize_ ? &cur->info : nullptr, common_flags()->symbolize_vs_style,
+ common_flags()->strip_path_prefix);
if (prev_len != output_->length())
- output_->append("%c", frame_delimiter_);
+ output_->AppendF("%c", frame_delimiter_);
ExtendDedupToken(cur);
}
- frames->ClearAll();
return true;
}
private:
// Extend the dedup token by appending a new frame.
- void ExtendDedupToken(SymbolizedStack *stack) {
+ void ExtendDedupToken(const SymbolizedStack *stack) {
if (!dedup_token_)
return;
if (dedup_frames_-- > 0) {
if (dedup_token_->length())
- dedup_token_->append("--");
- if (stack->info.function != nullptr)
- dedup_token_->append(stack->info.function);
+ dedup_token_->AppendF("--");
+ if (stack->info.function)
+ dedup_token_->Append(stack->info.function);
}
}
@@ -98,7 +99,7 @@ void StackTrace::PrintTo(InternalScopedString *output) const {
output, &dedup_token);
if (trace == nullptr || size == 0) {
- output->append(" <empty stack>\n\n");
+ output->AppendF(" <empty stack>\n\n");
return;
}
@@ -110,11 +111,11 @@ void StackTrace::PrintTo(InternalScopedString *output) const {
}
// Always add a trailing empty line after stack trace.
- output->append("\n");
+ output->AppendF("\n");
// Append deduplication token, if non-empty.
if (dedup_token.length())
- output->append("DEDUP_TOKEN: %s\n", dedup_token.data());
+ output->AppendF("DEDUP_TOKEN: %s\n", dedup_token.data());
}
uptr StackTrace::PrintTo(char *out_buf, uptr out_buf_size) const {
@@ -166,8 +167,8 @@ void BufferedStackTrace::Unwind(u32 max_depth, uptr pc, uptr bp, void *context,
UnwindFast(pc, bp, stack_top, stack_bottom, max_depth);
}
-static int GetModuleAndOffsetForPc(uptr pc, char *module_name,
- uptr module_name_len, uptr *pc_offset) {
+int GetModuleAndOffsetForPc(uptr pc, char *module_name, uptr module_name_len,
+ uptr *pc_offset) {
const char *found_module_name = nullptr;
bool ok = Symbolizer::GetOrInit()->GetModuleNameAndOffsetForPC(
pc, &found_module_name, pc_offset);
@@ -197,7 +198,7 @@ void __sanitizer_symbolize_pc(uptr pc, const char *fmt, char *out_buf,
StackTraceTextPrinter printer(fmt, '\0', &output, nullptr);
if (!printer.ProcessAddressFrames(pc)) {
output.clear();
- output.append("<can't symbolize>");
+ output.AppendF("<can't symbolize>");
}
CopyStringToBuffer(output, out_buf, out_buf_size);
}
@@ -210,16 +211,18 @@ void __sanitizer_symbolize_global(uptr data_addr, const char *fmt,
DataInfo DI;
if (!Symbolizer::GetOrInit()->SymbolizeData(data_addr, &DI)) return;
InternalScopedString data_desc;
- RenderData(&data_desc, fmt, &DI, common_flags()->strip_path_prefix);
+ StackTracePrinter::GetOrInit()->RenderData(&data_desc, fmt, &DI,
+ common_flags()->strip_path_prefix);
internal_strncpy(out_buf, data_desc.data(), out_buf_size);
out_buf[out_buf_size - 1] = 0;
}
SANITIZER_INTERFACE_ATTRIBUTE
-int __sanitizer_get_module_and_offset_for_pc(uptr pc, char *module_name,
+int __sanitizer_get_module_and_offset_for_pc(void *pc, char *module_name,
uptr module_name_len,
- uptr *pc_offset) {
- return __sanitizer::GetModuleAndOffsetForPc(pc, module_name, module_name_len,
- pc_offset);
+ void **pc_offset) {
+ return __sanitizer::GetModuleAndOffsetForPc(
+ reinterpret_cast<uptr>(pc), module_name, module_name_len,
+ reinterpret_cast<uptr *>(pc_offset));
}
} // extern "C"
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stacktrace_printer.cpp b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stacktrace_printer.cpp
index c998322d3944..748d832ccc21 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stacktrace_printer.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stacktrace_printer.cpp
@@ -11,25 +11,69 @@
//===----------------------------------------------------------------------===//
#include "sanitizer_stacktrace_printer.h"
+
+#include "sanitizer_common.h"
#include "sanitizer_file.h"
+#include "sanitizer_flags.h"
#include "sanitizer_fuchsia.h"
+#include "sanitizer_symbolizer_markup.h"
namespace __sanitizer {
+StackTracePrinter *StackTracePrinter::GetOrInit() {
+ static StackTracePrinter *stacktrace_printer;
+ static StaticSpinMutex init_mu;
+ SpinMutexLock l(&init_mu);
+ if (stacktrace_printer)
+ return stacktrace_printer;
+
+ stacktrace_printer = StackTracePrinter::NewStackTracePrinter();
+
+ CHECK(stacktrace_printer);
+ return stacktrace_printer;
+}
+
+const char *StackTracePrinter::StripFunctionName(const char *function) {
+ if (!common_flags()->demangle)
+ return function;
+ if (!function)
+ return nullptr;
+ auto try_strip = [function](const char *prefix) -> const char * {
+ const uptr prefix_len = internal_strlen(prefix);
+ if (!internal_strncmp(function, prefix, prefix_len))
+ return function + prefix_len;
+ return nullptr;
+ };
+ if (SANITIZER_APPLE) {
+ if (const char *s = try_strip("wrap_"))
+ return s;
+ } else if (SANITIZER_WINDOWS) {
+ if (const char *s = try_strip("__asan_wrap_"))
+ return s;
+ } else {
+ if (const char *s = try_strip("___interceptor_"))
+ return s;
+ if (const char *s = try_strip("__interceptor_"))
+ return s;
+ }
+ return function;
+}
+
// sanitizer_symbolizer_markup.cpp implements these differently.
#if !SANITIZER_SYMBOLIZER_MARKUP
-static const char *StripFunctionName(const char *function, const char *prefix) {
- if (!function) return nullptr;
- if (!prefix) return function;
- uptr prefix_len = internal_strlen(prefix);
- if (0 == internal_strncmp(function, prefix, prefix_len))
- return function + prefix_len;
- return function;
+StackTracePrinter *StackTracePrinter::NewStackTracePrinter() {
+ if (common_flags()->enable_symbolizer_markup)
+ return new (GetGlobalLowLevelAllocator()) MarkupStackTracePrinter();
+
+ return new (GetGlobalLowLevelAllocator()) FormattedStackTracePrinter();
}
static const char *DemangleFunctionName(const char *function) {
- if (!function) return nullptr;
+ if (!common_flags()->demangle)
+ return function;
+ if (!function)
+ return nullptr;
// NetBSD uses indirection for old threading functions for historical reasons
// The mangled names are internal implementation detail and should not be
@@ -104,11 +148,27 @@ static const char *DemangleFunctionName(const char *function) {
return function;
}
+static void MaybeBuildIdToBuffer(const AddressInfo &info, bool PrefixSpace,
+ InternalScopedString *buffer) {
+ if (info.uuid_size) {
+ if (PrefixSpace)
+ buffer->AppendF(" ");
+ buffer->AppendF("(BuildId: ");
+ for (uptr i = 0; i < info.uuid_size; ++i) {
+ buffer->AppendF("%02x", info.uuid[i]);
+ }
+ buffer->AppendF(")");
+ }
+}
+
static const char kDefaultFormat[] = " #%n %p %F %L";
-void RenderFrame(InternalScopedString *buffer, const char *format, int frame_no,
- uptr address, const AddressInfo *info, bool vs_style,
- const char *strip_path_prefix, const char *strip_func_prefix) {
+void FormattedStackTracePrinter::RenderFrame(InternalScopedString *buffer,
+ const char *format, int frame_no,
+ uptr address,
+ const AddressInfo *info,
+ bool vs_style,
+ const char *strip_path_prefix) {
// info will be null in the case where symbolization is not needed for the
// given format. This ensures that the code below will get a hard failure
// rather than print incorrect information in case RenderNeedsSymbolization
@@ -119,53 +179,56 @@ void RenderFrame(InternalScopedString *buffer, const char *format, int frame_no,
format = kDefaultFormat;
for (const char *p = format; *p != '\0'; p++) {
if (*p != '%') {
- buffer->append("%c", *p);
+ buffer->AppendF("%c", *p);
continue;
}
p++;
switch (*p) {
case '%':
- buffer->append("%%");
+ buffer->Append("%");
break;
// Frame number and all fields of AddressInfo structure.
case 'n':
- buffer->append("%zu", frame_no);
+ buffer->AppendF("%u", frame_no);
break;
case 'p':
- buffer->append("0x%zx", address);
+ buffer->AppendF("0x%zx", address);
break;
case 'm':
- buffer->append("%s", StripPathPrefix(info->module, strip_path_prefix));
+ buffer->AppendF("%s", StripPathPrefix(info->module, strip_path_prefix));
break;
case 'o':
- buffer->append("0x%zx", info->module_offset);
+ buffer->AppendF("0x%zx", info->module_offset);
+ break;
+ case 'b':
+ MaybeBuildIdToBuffer(*info, /*PrefixSpace=*/false, buffer);
break;
case 'f':
- buffer->append("%s", DemangleFunctionName(StripFunctionName(
- info->function, strip_func_prefix)));
+ buffer->AppendF("%s",
+ DemangleFunctionName(StripFunctionName(info->function)));
break;
case 'q':
- buffer->append("0x%zx", info->function_offset != AddressInfo::kUnknown
- ? info->function_offset
- : 0x0);
+ buffer->AppendF("0x%zx", info->function_offset != AddressInfo::kUnknown
+ ? info->function_offset
+ : 0x0);
break;
case 's':
- buffer->append("%s", StripPathPrefix(info->file, strip_path_prefix));
+ buffer->AppendF("%s", StripPathPrefix(info->file, strip_path_prefix));
break;
case 'l':
- buffer->append("%d", info->line);
+ buffer->AppendF("%d", info->line);
break;
case 'c':
- buffer->append("%d", info->column);
+ buffer->AppendF("%d", info->column);
break;
// Smarter special cases.
case 'F':
// Function name and offset, if file is unknown.
if (info->function) {
- buffer->append("in %s", DemangleFunctionName(StripFunctionName(
- info->function, strip_func_prefix)));
+ buffer->AppendF(
+ "in %s", DemangleFunctionName(StripFunctionName(info->function)));
if (!info->file && info->function_offset != AddressInfo::kUnknown)
- buffer->append("+0x%zx", info->function_offset);
+ buffer->AppendF("+0x%zx", info->function_offset);
}
break;
case 'S':
@@ -181,8 +244,12 @@ void RenderFrame(InternalScopedString *buffer, const char *format, int frame_no,
} else if (info->module) {
RenderModuleLocation(buffer, info->module, info->module_offset,
info->module_arch, strip_path_prefix);
+
+#if !SANITIZER_APPLE
+ MaybeBuildIdToBuffer(*info, /*PrefixSpace=*/true, buffer);
+#endif
} else {
- buffer->append("(<unknown module>)");
+ buffer->AppendF("(<unknown module>)");
}
break;
case 'M':
@@ -193,19 +260,22 @@ void RenderFrame(InternalScopedString *buffer, const char *format, int frame_no,
// Always strip the module name for %M.
RenderModuleLocation(buffer, StripModuleName(info->module),
info->module_offset, info->module_arch, "");
+#if !SANITIZER_APPLE
+ MaybeBuildIdToBuffer(*info, /*PrefixSpace=*/true, buffer);
+#endif
} else {
- buffer->append("(%p)", (void *)address);
+ buffer->AppendF("(%p)", (void *)address);
}
break;
default:
- Report("Unsupported specifier in stack frame format: %c (0x%zx)!\n", *p,
- *p);
+ Report("Unsupported specifier in stack frame format: %c (%p)!\n", *p,
+ (void *)p);
Die();
}
}
}
-bool RenderNeedsSymbolization(const char *format) {
+bool FormattedStackTracePrinter::RenderNeedsSymbolization(const char *format) {
if (0 == internal_strcmp(format, "DEFAULT"))
format = kDefaultFormat;
for (const char *p = format; *p != '\0'; p++) {
@@ -228,30 +298,32 @@ bool RenderNeedsSymbolization(const char *format) {
return false;
}
-void RenderData(InternalScopedString *buffer, const char *format,
- const DataInfo *DI, const char *strip_path_prefix) {
+void FormattedStackTracePrinter::RenderData(InternalScopedString *buffer,
+ const char *format,
+ const DataInfo *DI,
+ const char *strip_path_prefix) {
for (const char *p = format; *p != '\0'; p++) {
if (*p != '%') {
- buffer->append("%c", *p);
+ buffer->AppendF("%c", *p);
continue;
}
p++;
switch (*p) {
case '%':
- buffer->append("%%");
+ buffer->Append("%");
break;
case 's':
- buffer->append("%s", StripPathPrefix(DI->file, strip_path_prefix));
+ buffer->AppendF("%s", StripPathPrefix(DI->file, strip_path_prefix));
break;
case 'l':
- buffer->append("%d", DI->line);
+ buffer->AppendF("%zu", DI->line);
break;
case 'g':
- buffer->append("%s", DI->name);
+ buffer->AppendF("%s", DI->name);
break;
default:
- Report("Unsupported specifier in stack frame format: %c (0x%zx)!\n", *p,
- *p);
+ Report("Unsupported specifier in stack frame format: %c (%p)!\n", *p,
+ (void *)p);
Die();
}
}
@@ -259,33 +331,35 @@ void RenderData(InternalScopedString *buffer, const char *format,
#endif // !SANITIZER_SYMBOLIZER_MARKUP
-void RenderSourceLocation(InternalScopedString *buffer, const char *file,
- int line, int column, bool vs_style,
- const char *strip_path_prefix) {
+void StackTracePrinter::RenderSourceLocation(InternalScopedString *buffer,
+ const char *file, int line,
+ int column, bool vs_style,
+ const char *strip_path_prefix) {
if (vs_style && line > 0) {
- buffer->append("%s(%d", StripPathPrefix(file, strip_path_prefix), line);
+ buffer->AppendF("%s(%d", StripPathPrefix(file, strip_path_prefix), line);
if (column > 0)
- buffer->append(",%d", column);
- buffer->append(")");
+ buffer->AppendF(",%d", column);
+ buffer->AppendF(")");
return;
}
- buffer->append("%s", StripPathPrefix(file, strip_path_prefix));
+ buffer->AppendF("%s", StripPathPrefix(file, strip_path_prefix));
if (line > 0) {
- buffer->append(":%d", line);
+ buffer->AppendF(":%d", line);
if (column > 0)
- buffer->append(":%d", column);
+ buffer->AppendF(":%d", column);
}
}
-void RenderModuleLocation(InternalScopedString *buffer, const char *module,
- uptr offset, ModuleArch arch,
- const char *strip_path_prefix) {
- buffer->append("(%s", StripPathPrefix(module, strip_path_prefix));
+void StackTracePrinter::RenderModuleLocation(InternalScopedString *buffer,
+ const char *module, uptr offset,
+ ModuleArch arch,
+ const char *strip_path_prefix) {
+ buffer->AppendF("(%s", StripPathPrefix(module, strip_path_prefix));
if (arch != kModuleArchUnknown) {
- buffer->append(":%s", ModuleArchToString(arch));
+ buffer->AppendF(":%s", ModuleArchToString(arch));
}
- buffer->append("+0x%zx)", offset);
+ buffer->AppendF("+0x%zx)", offset);
}
} // namespace __sanitizer
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stacktrace_printer.h b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stacktrace_printer.h
index 96119b2ee9e9..10361a320344 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stacktrace_printer.h
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stacktrace_printer.h
@@ -13,60 +13,94 @@
#define SANITIZER_STACKTRACE_PRINTER_H
#include "sanitizer_common.h"
+#include "sanitizer_internal_defs.h"
#include "sanitizer_symbolizer.h"
namespace __sanitizer {
-// Render the contents of "info" structure, which represents the contents of
-// stack frame "frame_no" and appends it to the "buffer". "format" is a
-// string with placeholders, which is copied to the output with
-// placeholders substituted with the contents of "info". For example,
-// format string
-// " frame %n: function %F at %S"
-// will be turned into
-// " frame 10: function foo::bar() at my/file.cc:10"
-// You may additionally pass "strip_path_prefix" to strip prefixes of paths to
-// source files and modules, and "strip_func_prefix" to strip prefixes of
-// function names.
-// Here's the full list of available placeholders:
-// %% - represents a '%' character;
-// %n - frame number (copy of frame_no);
-// %p - PC in hex format;
-// %m - path to module (binary or shared object);
-// %o - offset in the module in hex format;
-// %f - function name;
-// %q - offset in the function in hex format (*if available*);
-// %s - path to source file;
-// %l - line in the source file;
-// %c - column in the source file;
-// %F - if function is known to be <foo>, prints "in <foo>", possibly
-// followed by the offset in this function, but only if source file
-// is unknown;
-// %S - prints file/line/column information;
-// %L - prints location information: file/line/column, if it is known, or
-// module+offset if it is known, or (<unknown module>) string.
-// %M - prints module basename and offset, if it is known, or PC.
-void RenderFrame(InternalScopedString *buffer, const char *format, int frame_no,
- uptr address, const AddressInfo *info, bool vs_style,
- const char *strip_path_prefix = "",
- const char *strip_func_prefix = "");
-
-bool RenderNeedsSymbolization(const char *format);
-
-void RenderSourceLocation(InternalScopedString *buffer, const char *file,
- int line, int column, bool vs_style,
- const char *strip_path_prefix);
-
-void RenderModuleLocation(InternalScopedString *buffer, const char *module,
- uptr offset, ModuleArch arch,
- const char *strip_path_prefix);
-
-// Same as RenderFrame, but for data section (global variables).
-// Accepts %s, %l from above.
-// Also accepts:
-// %g - name of the global variable.
-void RenderData(InternalScopedString *buffer, const char *format,
- const DataInfo *DI, const char *strip_path_prefix = "");
+// StacktracePrinter is an interface that is implemented by
+// classes that can perform rendering of the different parts
+// of a stacktrace.
+class StackTracePrinter {
+ public:
+ static StackTracePrinter *GetOrInit();
+
+ // Strip interceptor prefixes from function name.
+ const char *StripFunctionName(const char *function);
+
+ virtual void RenderFrame(InternalScopedString *buffer, const char *format,
+ int frame_no, uptr address, const AddressInfo *info,
+ bool vs_style,
+ const char *strip_path_prefix = "") = 0;
+
+ virtual bool RenderNeedsSymbolization(const char *format) = 0;
+
+ void RenderSourceLocation(InternalScopedString *buffer, const char *file,
+ int line, int column, bool vs_style,
+ const char *strip_path_prefix);
+
+ void RenderModuleLocation(InternalScopedString *buffer, const char *module,
+ uptr offset, ModuleArch arch,
+ const char *strip_path_prefix);
+ virtual void RenderData(InternalScopedString *buffer, const char *format,
+ const DataInfo *DI,
+ const char *strip_path_prefix = "") = 0;
+
+ private:
+ // To be called from StackTracePrinter::GetOrInit
+ static StackTracePrinter *NewStackTracePrinter();
+
+ protected:
+ ~StackTracePrinter() {}
+};
+
+class FormattedStackTracePrinter : public StackTracePrinter {
+ public:
+ // Render the contents of "info" structure, which represents the contents of
+ // stack frame "frame_no" and appends it to the "buffer". "format" is a
+ // string with placeholders, which is copied to the output with
+ // placeholders substituted with the contents of "info". For example,
+ // format string
+ // " frame %n: function %F at %S"
+ // will be turned into
+ // " frame 10: function foo::bar() at my/file.cc:10"
+ // You may additionally pass "strip_path_prefix" to strip prefixes of paths to
+ // source files and modules.
+ // Here's the full list of available placeholders:
+ // %% - represents a '%' character;
+ // %n - frame number (copy of frame_no);
+ // %p - PC in hex format;
+ // %m - path to module (binary or shared object);
+ // %o - offset in the module in hex format;
+ // %f - function name;
+ // %q - offset in the function in hex format (*if available*);
+ // %s - path to source file;
+ // %l - line in the source file;
+ // %c - column in the source file;
+ // %F - if function is known to be <foo>, prints "in <foo>", possibly
+ // followed by the offset in this function, but only if source file
+ // is unknown;
+ // %S - prints file/line/column information;
+ // %L - prints location information: file/line/column, if it is known, or
+ // module+offset if it is known, or (<unknown module>) string.
+ // %M - prints module basename and offset, if it is known, or PC.
+ void RenderFrame(InternalScopedString *buffer, const char *format,
+ int frame_no, uptr address, const AddressInfo *info,
+ bool vs_style, const char *strip_path_prefix = "") override;
+
+ bool RenderNeedsSymbolization(const char *format) override;
+
+ // Same as RenderFrame, but for data section (global variables).
+ // Accepts %s, %l from above.
+ // Also accepts:
+ // %g - name of the global variable.
+ void RenderData(InternalScopedString *buffer, const char *format,
+ const DataInfo *DI,
+ const char *strip_path_prefix = "") override;
+
+ protected:
+ ~FormattedStackTracePrinter() {}
+};
} // namespace __sanitizer
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stacktrace_sparc.cpp b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stacktrace_sparc.cpp
index 34190fb1bbb2..a2000798a390 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stacktrace_sparc.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stacktrace_sparc.cpp
@@ -9,7 +9,7 @@
// This file is shared between AddressSanitizer and ThreadSanitizer
// run-time libraries.
//
-// Implemention of fast stack unwinding for Sparc.
+// Implementation of fast stack unwinding for Sparc.
//===----------------------------------------------------------------------===//
#if defined(__sparc__)
@@ -30,13 +30,7 @@ void BufferedStackTrace::UnwindFast(uptr pc, uptr bp, uptr stack_top,
// TODO(yln): add arg sanity check for stack_top/stack_bottom
CHECK_GE(max_depth, 2);
const uptr kPageSize = GetPageSizeCached();
-#if defined(__GNUC__)
- // __builtin_return_address returns the address of the call instruction
- // on the SPARC and not the return address, so we need to compensate.
- trace_buffer[0] = GetNextInstructionPc(pc);
-#else
trace_buffer[0] = pc;
-#endif
size = 1;
if (stack_top < 4096) return; // Sanity check for stack top.
// Flush register windows to memory
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stoptheworld_linux_libcdep.cpp b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stoptheworld_linux_libcdep.cpp
index 53cfddcfbe0b..25c4af708560 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stoptheworld_linux_libcdep.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stoptheworld_linux_libcdep.cpp
@@ -16,7 +16,7 @@
#if SANITIZER_LINUX && \
(defined(__x86_64__) || defined(__mips__) || defined(__aarch64__) || \
defined(__powerpc64__) || defined(__s390__) || defined(__i386__) || \
- defined(__arm__) || SANITIZER_RISCV64)
+ defined(__arm__) || SANITIZER_RISCV64 || SANITIZER_LOONGARCH64)
#include "sanitizer_stoptheworld.h"
@@ -31,7 +31,8 @@
#include <sys/types.h> // for pid_t
#include <sys/uio.h> // for iovec
#include <elf.h> // for NT_PRSTATUS
-#if (defined(__aarch64__) || SANITIZER_RISCV64) && !SANITIZER_ANDROID
+#if (defined(__aarch64__) || SANITIZER_RISCV64 || SANITIZER_LOONGARCH64) && \
+ !SANITIZER_ANDROID
// GLIBC 2.20+ sys/user does not include asm/ptrace.h
# include <asm/ptrace.h>
#endif
@@ -108,7 +109,7 @@ struct TracerThreadArgument {
void *callback_argument;
// The tracer thread waits on this mutex while the parent finishes its
// preparations.
- BlockingMutex mutex;
+ Mutex mutex;
// Tracer thread signals its completion by setting done.
atomic_uintptr_t done;
uptr parent_pid;
@@ -514,6 +515,12 @@ typedef struct user_pt_regs regs_struct;
static constexpr uptr kExtraRegs[] = {0};
#define ARCH_IOVEC_FOR_GETREGSET
+#elif defined(__loongarch__)
+typedef struct user_pt_regs regs_struct;
+#define REG_SP regs[3]
+static constexpr uptr kExtraRegs[] = {0};
+#define ARCH_IOVEC_FOR_GETREGSET
+
#elif SANITIZER_RISCV64
typedef struct user_regs_struct regs_struct;
// sys/ucontext.h already defines REG_SP as 2. Undefine it first.
@@ -558,7 +565,7 @@ PtraceRegistersStatus SuspendedThreadsListLinux::GetRegistersAndSP(
constexpr uptr uptr_sz = sizeof(uptr);
int pterrno;
#ifdef ARCH_IOVEC_FOR_GETREGSET
- auto append = [&](uptr regset) {
+ auto AppendF = [&](uptr regset) {
uptr size = buffer->size();
// NT_X86_XSTATE requires 64bit alignment.
uptr size_up = RoundUpTo(size, 8 / uptr_sz);
@@ -589,11 +596,11 @@ PtraceRegistersStatus SuspendedThreadsListLinux::GetRegistersAndSP(
};
buffer->clear();
- bool fail = !append(NT_PRSTATUS);
+ bool fail = !AppendF(NT_PRSTATUS);
if (!fail) {
// Accept the first available and do not report errors.
for (uptr regs : kExtraRegs)
- if (regs && append(regs))
+ if (regs && AppendF(regs))
break;
}
#else
@@ -621,3 +628,4 @@ PtraceRegistersStatus SuspendedThreadsListLinux::GetRegistersAndSP(
#endif // SANITIZER_LINUX && (defined(__x86_64__) || defined(__mips__)
// || defined(__aarch64__) || defined(__powerpc64__)
// || defined(__s390__) || defined(__i386__) || defined(__arm__)
+ // || SANITIZER_LOONGARCH64
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stoptheworld_mac.cpp b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stoptheworld_mac.cpp
index 5ec30803b7ad..813616467656 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stoptheworld_mac.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stoptheworld_mac.cpp
@@ -12,7 +12,7 @@
#include "sanitizer_platform.h"
-#if SANITIZER_MAC && (defined(__x86_64__) || defined(__aarch64__) || \
+#if SANITIZER_APPLE && (defined(__x86_64__) || defined(__aarch64__) || \
defined(__i386))
#include <mach/mach.h>
@@ -29,7 +29,7 @@ typedef struct {
class SuspendedThreadsListMac final : public SuspendedThreadsList {
public:
- SuspendedThreadsListMac() : threads_(1024) {}
+ SuspendedThreadsListMac() = default;
tid_t GetThreadID(uptr index) const override;
thread_t GetThread(uptr index) const;
@@ -87,11 +87,13 @@ void StopTheWorld(StopTheWorldCallback callback, void *argument) {
#if defined(__x86_64__)
typedef x86_thread_state64_t regs_struct;
+#define regs_flavor x86_THREAD_STATE64
#define SP_REG __rsp
#elif defined(__aarch64__)
typedef arm_thread_state64_t regs_struct;
+#define regs_flavor ARM_THREAD_STATE64
# if __DARWIN_UNIX03
# define SP_REG __sp
@@ -101,6 +103,7 @@ typedef arm_thread_state64_t regs_struct;
#elif defined(__i386)
typedef x86_thread_state32_t regs_struct;
+#define regs_flavor x86_THREAD_STATE32
#define SP_REG __esp
@@ -146,17 +149,15 @@ PtraceRegistersStatus SuspendedThreadsListMac::GetRegistersAndSP(
thread_t thread = GetThread(index);
regs_struct regs;
int err;
- mach_msg_type_number_t reg_count = MACHINE_THREAD_STATE_COUNT;
- err = thread_get_state(thread, MACHINE_THREAD_STATE, (thread_state_t)&regs,
+ mach_msg_type_number_t reg_count = sizeof(regs) / sizeof(natural_t);
+ err = thread_get_state(thread, regs_flavor, (thread_state_t)&regs,
&reg_count);
if (err != KERN_SUCCESS) {
VReport(1, "Error - unable to get registers for a thread\n");
- // KERN_INVALID_ARGUMENT indicates that either the flavor is invalid,
- // or the thread does not exist. The other possible error case,
// MIG_ARRAY_TOO_LARGE, means that the state is too large, but it's
// still safe to proceed.
- return err == KERN_INVALID_ARGUMENT ? REGISTERS_UNAVAILABLE_FATAL
- : REGISTERS_UNAVAILABLE;
+ return err == MIG_ARRAY_TOO_LARGE ? REGISTERS_UNAVAILABLE
+ : REGISTERS_UNAVAILABLE_FATAL;
}
buffer->resize(RoundUpTo(sizeof(regs), sizeof(uptr)) / sizeof(uptr));
@@ -176,5 +177,5 @@ PtraceRegistersStatus SuspendedThreadsListMac::GetRegistersAndSP(
} // namespace __sanitizer
-#endif // SANITIZER_MAC && (defined(__x86_64__) || defined(__aarch64__)) ||
+#endif // SANITIZER_APPLE && (defined(__x86_64__) || defined(__aarch64__)) ||
// defined(__i386))
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stoptheworld_netbsd_libcdep.cpp b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stoptheworld_netbsd_libcdep.cpp
index 9c7cd64255e5..701db72619a3 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stoptheworld_netbsd_libcdep.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stoptheworld_netbsd_libcdep.cpp
@@ -68,7 +68,7 @@ class SuspendedThreadsListNetBSD final : public SuspendedThreadsList {
struct TracerThreadArgument {
StopTheWorldCallback callback;
void *callback_argument;
- BlockingMutex mutex;
+ Mutex mutex;
atomic_uintptr_t done;
uptr parent_pid;
};
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stoptheworld_win.cpp b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stoptheworld_win.cpp
new file mode 100644
index 000000000000..f114acea79c9
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_stoptheworld_win.cpp
@@ -0,0 +1,175 @@
+//===-- sanitizer_stoptheworld_win.cpp ------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// See sanitizer_stoptheworld.h for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_platform.h"
+
+#if SANITIZER_WINDOWS
+
+# define WIN32_LEAN_AND_MEAN
+# include <windows.h>
+// windows.h needs to be included before tlhelp32.h
+# include <tlhelp32.h>
+
+# include "sanitizer_stoptheworld.h"
+
+namespace __sanitizer {
+
+namespace {
+
+struct SuspendedThreadsListWindows final : public SuspendedThreadsList {
+ InternalMmapVector<HANDLE> threadHandles;
+ InternalMmapVector<DWORD> threadIds;
+
+ SuspendedThreadsListWindows() {
+ threadIds.reserve(1024);
+ threadHandles.reserve(1024);
+ }
+
+ PtraceRegistersStatus GetRegistersAndSP(uptr index,
+ InternalMmapVector<uptr> *buffer,
+ uptr *sp) const override;
+
+ tid_t GetThreadID(uptr index) const override;
+ uptr ThreadCount() const override;
+};
+
+// Stack Pointer register names on different architectures
+# if SANITIZER_X64
+# define SP_REG Rsp
+# elif SANITIZER_I386
+# define SP_REG Esp
+# elif SANITIZER_ARM | SANITIZER_ARM64
+# define SP_REG Sp
+# else
+# error Architecture not supported!
+# endif
+
+PtraceRegistersStatus SuspendedThreadsListWindows::GetRegistersAndSP(
+ uptr index, InternalMmapVector<uptr> *buffer, uptr *sp) const {
+ CHECK_LT(index, threadHandles.size());
+
+ buffer->resize(RoundUpTo(sizeof(CONTEXT), sizeof(uptr)) / sizeof(uptr));
+ CONTEXT *thread_context = reinterpret_cast<CONTEXT *>(buffer->data());
+ thread_context->ContextFlags = CONTEXT_ALL;
+ CHECK(GetThreadContext(threadHandles[index], thread_context));
+ *sp = thread_context->SP_REG;
+
+ return REGISTERS_AVAILABLE;
+}
+
+tid_t SuspendedThreadsListWindows::GetThreadID(uptr index) const {
+ CHECK_LT(index, threadIds.size());
+ return threadIds[index];
+}
+
+uptr SuspendedThreadsListWindows::ThreadCount() const {
+ return threadIds.size();
+}
+
+struct RunThreadArgs {
+ StopTheWorldCallback callback;
+ void *argument;
+};
+
+DWORD WINAPI RunThread(void *argument) {
+ RunThreadArgs *run_args = (RunThreadArgs *)argument;
+
+ const DWORD this_thread = GetCurrentThreadId();
+ const DWORD this_process = GetCurrentProcessId();
+
+ SuspendedThreadsListWindows suspended_threads_list;
+ bool new_thread_found;
+
+ do {
+ // Take a snapshot of all Threads
+ const HANDLE threads = CreateToolhelp32Snapshot(TH32CS_SNAPTHREAD, 0);
+ CHECK(threads != INVALID_HANDLE_VALUE);
+
+ THREADENTRY32 thread_entry;
+ thread_entry.dwSize = sizeof(thread_entry);
+ new_thread_found = false;
+
+ if (!Thread32First(threads, &thread_entry))
+ break;
+
+ do {
+ if (thread_entry.th32ThreadID == this_thread ||
+ thread_entry.th32OwnerProcessID != this_process)
+ continue;
+
+ bool suspended_thread = false;
+ for (const auto thread_id : suspended_threads_list.threadIds) {
+ if (thread_id == thread_entry.th32ThreadID) {
+ suspended_thread = true;
+ break;
+ }
+ }
+
+ // Skip the Thread if it was already suspended
+ if (suspended_thread)
+ continue;
+
+ const HANDLE thread =
+ OpenThread(THREAD_ALL_ACCESS, FALSE, thread_entry.th32ThreadID);
+ CHECK(thread);
+
+ if (SuspendThread(thread) == (DWORD)-1) {
+ DWORD last_error = GetLastError();
+
+ VPrintf(1, "Could not suspend thread %lu (error %lu)",
+ thread_entry.th32ThreadID, last_error);
+ continue;
+ }
+
+ suspended_threads_list.threadIds.push_back(thread_entry.th32ThreadID);
+ suspended_threads_list.threadHandles.push_back(thread);
+ new_thread_found = true;
+ } while (Thread32Next(threads, &thread_entry));
+
+ CloseHandle(threads);
+
+ // Between the call to `CreateToolhelp32Snapshot` and suspending the
+ // relevant Threads, new Threads could have potentially been created. So
+ // continue to find and suspend new Threads until we don't find any.
+ } while (new_thread_found);
+
+ // Now all Threads of this Process except of this Thread should be suspended.
+ // Execute the callback function.
+ run_args->callback(suspended_threads_list, run_args->argument);
+
+ // Resume all Threads
+ for (const auto suspended_thread_handle :
+ suspended_threads_list.threadHandles) {
+ CHECK_NE(ResumeThread(suspended_thread_handle), -1);
+ CloseHandle(suspended_thread_handle);
+ }
+
+ return 0;
+}
+
+} // namespace
+
+void StopTheWorld(StopTheWorldCallback callback, void *argument) {
+ struct RunThreadArgs arg = {callback, argument};
+ DWORD trace_thread_id;
+
+ auto trace_thread =
+ CreateThread(nullptr, 0, RunThread, &arg, 0, &trace_thread_id);
+ CHECK(trace_thread);
+
+ WaitForSingleObject(trace_thread, INFINITE);
+ CloseHandle(trace_thread);
+}
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_WINDOWS
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_suppressions.cpp b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_suppressions.cpp
index a674034b8e29..9c8c4bf9d1a4 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_suppressions.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_suppressions.cpp
@@ -86,6 +86,7 @@ void SuppressionContext::ParseFromFile(const char *filename) {
}
Parse(file_contents);
+ UnmapOrDie(file_contents, contents_size);
}
bool SuppressionContext::Match(const char *str, const char *type,
@@ -137,7 +138,10 @@ void SuppressionContext::Parse(const char *str) {
}
}
if (type == suppression_types_num_) {
- Printf("%s: failed to parse suppressions\n", SanitizerToolName);
+ Printf("%s: failed to parse suppressions.\n", SanitizerToolName);
+ Printf("Supported suppression types are:\n");
+ for (type = 0; type < suppression_types_num_; type++)
+ Printf("- %s\n", suppression_types_[type]);
Die();
}
Suppression s;
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer.cpp b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer.cpp
index 0c4b84c767aa..519f768f8969 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer.cpp
@@ -10,11 +10,14 @@
// run-time libraries.
//===----------------------------------------------------------------------===//
+#include <errno.h>
+
#include "sanitizer_allocator_internal.h"
-#include "sanitizer_platform.h"
+#include "sanitizer_common.h"
#include "sanitizer_internal_defs.h"
#include "sanitizer_libc.h"
#include "sanitizer_placement_new.h"
+#include "sanitizer_platform.h"
#include "sanitizer_symbolizer_internal.h"
namespace __sanitizer {
@@ -30,6 +33,7 @@ void AddressInfo::Clear() {
InternalFree(file);
internal_memset(this, 0, sizeof(AddressInfo));
function_offset = kUnknown;
+ uuid_size = 0;
}
void AddressInfo::FillModuleInfo(const char *mod_name, uptr mod_offset,
@@ -37,6 +41,16 @@ void AddressInfo::FillModuleInfo(const char *mod_name, uptr mod_offset,
module = internal_strdup(mod_name);
module_offset = mod_offset;
module_arch = mod_arch;
+ uuid_size = 0;
+}
+
+void AddressInfo::FillModuleInfo(const LoadedModule &mod) {
+ module = internal_strdup(mod.full_name());
+ module_offset = address - mod.base_address();
+ module_arch = mod.arch();
+ if (mod.uuid_size())
+ internal_memcpy(uuid, mod.uuid(), mod.uuid_size());
+ uuid_size = mod.uuid_size();
}
SymbolizedStack::SymbolizedStack() : next(nullptr), info() {}
@@ -116,7 +130,7 @@ Symbolizer::Symbolizer(IntrusiveList<SymbolizerTool> tools)
start_hook_(0), end_hook_(0) {}
Symbolizer::SymbolizerScope::SymbolizerScope(const Symbolizer *sym)
- : sym_(sym) {
+ : sym_(sym), errno_(errno) {
if (sym_->start_hook_)
sym_->start_hook_();
}
@@ -124,12 +138,7 @@ Symbolizer::SymbolizerScope::SymbolizerScope(const Symbolizer *sym)
Symbolizer::SymbolizerScope::~SymbolizerScope() {
if (sym_->end_hook_)
sym_->end_hook_();
-}
-
-void Symbolizer::LateInitializeTools() {
- for (auto &tool : tools_) {
- tool.LateInitialize();
- }
+ errno = errno_;
}
} // namespace __sanitizer
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer.h b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer.h
index 2476b0ea7bf7..16ef2f2fd717 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer.h
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer.h
@@ -32,6 +32,8 @@ struct AddressInfo {
char *module;
uptr module_offset;
ModuleArch module_arch;
+ u8 uuid[kModuleUUIDSize];
+ uptr uuid_size;
static const uptr kUnknown = ~(uptr)0;
char *function;
@@ -45,6 +47,8 @@ struct AddressInfo {
// Deletes all strings and resets all fields.
void Clear();
void FillModuleInfo(const char *mod_name, uptr mod_offset, ModuleArch arch);
+ void FillModuleInfo(const LoadedModule &mod);
+ uptr module_base() const { return address - module_offset; }
};
// Linked list of symbolized frames (each frame is described by AddressInfo).
@@ -60,6 +64,26 @@ struct SymbolizedStack {
SymbolizedStack();
};
+class SymbolizedStackHolder {
+ SymbolizedStack *Stack;
+
+ void clear() {
+ if (Stack)
+ Stack->ClearAll();
+ }
+
+ public:
+ explicit SymbolizedStackHolder(SymbolizedStack *Stack = nullptr)
+ : Stack(Stack) {}
+ ~SymbolizedStackHolder() { clear(); }
+ void reset(SymbolizedStack *S = nullptr) {
+ if (Stack != S)
+ clear();
+ Stack = S;
+ }
+ const SymbolizedStack *get() const { return Stack; }
+};
+
// For now, DataInfo is used to describe global variable.
struct DataInfo {
// Owns all the string members. Storage for them is
@@ -132,7 +156,7 @@ class Symbolizer final {
// Release internal caches (if any).
void Flush();
- // Attempts to demangle the provided C++ mangled name.
+ // Attempts to demangle the provided C++ mangled name. Never returns nullptr.
const char *Demangle(const char *name);
// Allow user to install hooks that would be called before/after Symbolizer
@@ -150,6 +174,8 @@ class Symbolizer final {
void InvalidateModuleList();
+ const ListOfModules &GetRefreshedListOfModules();
+
private:
// GetModuleNameAndOffsetForPC has to return a string to the caller.
// Since the corresponding module might get unloaded later, we should create
@@ -158,7 +184,7 @@ class Symbolizer final {
// its method should be protected by |mu_|.
class ModuleNameOwner {
public:
- explicit ModuleNameOwner(BlockingMutex *synchronized_by)
+ explicit ModuleNameOwner(Mutex *synchronized_by)
: last_match_(nullptr), mu_(synchronized_by) {
storage_.reserve(kInitialCapacity);
}
@@ -169,7 +195,7 @@ class Symbolizer final {
InternalMmapVector<const char*> storage_;
const char *last_match_;
- BlockingMutex *mu_;
+ Mutex *mu_;
} module_names_;
/// Platform-specific function for creating a Symbolizer object.
@@ -183,7 +209,7 @@ class Symbolizer final {
// If stale, need to reload the modules before looking up addresses.
bool modules_fresh_;
- // Platform-specific default demangler, must not return nullptr.
+ // Platform-specific default demangler, returns nullptr on failure.
const char *PlatformDemangle(const char *name);
static Symbolizer *symbolizer_;
@@ -192,7 +218,7 @@ class Symbolizer final {
// Mutex locked from public methods of |Symbolizer|, so that the internals
// (including individual symbolizer tools and platform-specific methods) are
// always synchronized.
- BlockingMutex mu_;
+ Mutex mu_;
IntrusiveList<SymbolizerTool> tools_;
@@ -208,10 +234,8 @@ class Symbolizer final {
~SymbolizerScope();
private:
const Symbolizer *sym_;
+ int errno_; // Backup errno in case symbolizer change the value.
};
-
- // Calls `LateInitialize()` on all items in `tools_`.
- void LateInitializeTools();
};
#ifdef SANITIZER_WINDOWS
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_internal.h b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_internal.h
index 71de1758b3e9..2345aee98554 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_internal.h
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_internal.h
@@ -13,15 +13,15 @@
#ifndef SANITIZER_SYMBOLIZER_INTERNAL_H
#define SANITIZER_SYMBOLIZER_INTERNAL_H
-#include "sanitizer_symbolizer.h"
#include "sanitizer_file.h"
+#include "sanitizer_symbolizer.h"
#include "sanitizer_vector.h"
namespace __sanitizer {
// Parsing helpers, 'str' is searched for delimiter(s) and a string or uptr
// is extracted. When extracting a string, a newly allocated (using
-// InternalAlloc) and null-terminataed buffer is returned. They return a pointer
+// InternalAlloc) and null-terminated buffer is returned. They return a pointer
// to the next characted after the found delimiter.
const char *ExtractToken(const char *str, const char *delims, char **result);
const char *ExtractInt(const char *str, const char *delims, int *result);
@@ -70,11 +70,6 @@ class SymbolizerTool {
return nullptr;
}
- // Called during the LateInitialize phase of Sanitizer initialization.
- // Usually this is a safe place to call code that might need to use user
- // memory allocators.
- virtual void LateInitialize() {}
-
protected:
~SymbolizerTool() {}
};
@@ -91,13 +86,14 @@ class SymbolizerProcess {
~SymbolizerProcess() {}
/// The maximum number of arguments required to invoke a tool process.
- static const unsigned kArgVMax = 6;
+ static const unsigned kArgVMax = 16;
// Customizable by subclasses.
virtual bool StartSymbolizerSubprocess();
- virtual bool ReadFromSymbolizer(char *buffer, uptr max_length);
+ virtual bool ReadFromSymbolizer();
// Return the environment to run the symbolizer in.
virtual char **GetEnvP() { return GetEnviron(); }
+ InternalMmapVector<char> &GetBuff() { return buffer_; }
private:
virtual bool ReachedEndOfOutput(const char *buffer, uptr length) const {
@@ -118,8 +114,7 @@ class SymbolizerProcess {
fd_t input_fd_;
fd_t output_fd_;
- static const uptr kBufferSize = 16 * 1024;
- char buffer_[kBufferSize];
+ InternalMmapVector<char> buffer_;
static const uptr kMaxTimesRestarted = 5;
static const int kSymbolizerStartupTimeMillis = 10;
@@ -165,6 +160,15 @@ void ParseSymbolizePCOutput(const char *str, SymbolizedStack *res);
// Used by LLVMSymbolizer and InternalSymbolizer.
void ParseSymbolizeDataOutput(const char *str, DataInfo *info);
+// Parses repeated strings in the following format:
+// <function_name>
+// <var_name>
+// <file_name>:<line_number>[:<column_number>]
+// [<frame_offset>|??] [<size>|??] [<tag_offset>|??]
+// Used by LLVMSymbolizer and InternalSymbolizer.
+void ParseSymbolizeFrameOutput(const char *str,
+ InternalMmapVector<LocalInfo> *locals);
+
} // namespace __sanitizer
#endif // SANITIZER_SYMBOLIZER_INTERNAL_H
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_libbacktrace.cpp b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_libbacktrace.cpp
index 27ed222745ec..d78dab93487f 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_libbacktrace.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_libbacktrace.cpp
@@ -11,11 +11,11 @@
// Libbacktrace implementation of symbolizer parts.
//===----------------------------------------------------------------------===//
-#include "sanitizer_platform.h"
+#include "sanitizer_symbolizer_libbacktrace.h"
#include "sanitizer_internal_defs.h"
+#include "sanitizer_platform.h"
#include "sanitizer_symbolizer.h"
-#include "sanitizer_symbolizer_libbacktrace.h"
#if SANITIZER_LIBBACKTRACE
# include "backtrace-supported.h"
@@ -199,7 +199,7 @@ static char *DemangleAlloc(const char *name, bool always_alloc) {
#endif
if (always_alloc)
return internal_strdup(name);
- return 0;
+ return nullptr;
}
const char *LibbacktraceSymbolizer::Demangle(const char *name) {
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_libcdep.cpp b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_libcdep.cpp
index 98418b426c37..74458028ae8f 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_libcdep.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_libcdep.cpp
@@ -83,16 +83,13 @@ const char *ExtractTokenUpToDelimiter(const char *str, const char *delimiter,
}
SymbolizedStack *Symbolizer::SymbolizePC(uptr addr) {
- BlockingMutexLock l(&mu_);
- const char *module_name = nullptr;
- uptr module_offset;
- ModuleArch arch;
+ Lock l(&mu_);
SymbolizedStack *res = SymbolizedStack::New(addr);
- if (!FindModuleNameAndOffsetForAddress(addr, &module_name, &module_offset,
- &arch))
+ auto *mod = FindModuleForAddress(addr);
+ if (!mod)
return res;
// Always fill data about module name and offset.
- res->info.FillModuleInfo(module_name, module_offset, arch);
+ res->info.FillModuleInfo(*mod);
for (auto &tool : tools_) {
SymbolizerScope sym_scope(this);
if (tool.SymbolizePC(addr, res)) {
@@ -103,7 +100,7 @@ SymbolizedStack *Symbolizer::SymbolizePC(uptr addr) {
}
bool Symbolizer::SymbolizeData(uptr addr, DataInfo *info) {
- BlockingMutexLock l(&mu_);
+ Lock l(&mu_);
const char *module_name = nullptr;
uptr module_offset;
ModuleArch arch;
@@ -120,11 +117,11 @@ bool Symbolizer::SymbolizeData(uptr addr, DataInfo *info) {
return true;
}
}
- return true;
+ return false;
}
bool Symbolizer::SymbolizeFrame(uptr addr, FrameInfo *info) {
- BlockingMutexLock l(&mu_);
+ Lock l(&mu_);
const char *module_name = nullptr;
if (!FindModuleNameAndOffsetForAddress(
addr, &module_name, &info->module_offset, &info->module_arch))
@@ -136,12 +133,12 @@ bool Symbolizer::SymbolizeFrame(uptr addr, FrameInfo *info) {
return true;
}
}
- return true;
+ return false;
}
bool Symbolizer::GetModuleNameAndOffsetForPC(uptr pc, const char **module_name,
uptr *module_address) {
- BlockingMutexLock l(&mu_);
+ Lock l(&mu_);
const char *internal_module_name = nullptr;
ModuleArch arch;
if (!FindModuleNameAndOffsetForAddress(pc, &internal_module_name,
@@ -154,7 +151,7 @@ bool Symbolizer::GetModuleNameAndOffsetForPC(uptr pc, const char **module_name,
}
void Symbolizer::Flush() {
- BlockingMutexLock l(&mu_);
+ Lock l(&mu_);
for (auto &tool : tools_) {
SymbolizerScope sym_scope(this);
tool.Flush();
@@ -162,13 +159,16 @@ void Symbolizer::Flush() {
}
const char *Symbolizer::Demangle(const char *name) {
- BlockingMutexLock l(&mu_);
+ CHECK(name);
+ Lock l(&mu_);
for (auto &tool : tools_) {
SymbolizerScope sym_scope(this);
if (const char *demangled = tool.Demangle(name))
return demangled;
}
- return PlatformDemangle(name);
+ if (const char *demangled = PlatformDemangle(name))
+ return demangled;
+ return name;
}
bool Symbolizer::FindModuleNameAndOffsetForAddress(uptr address,
@@ -191,6 +191,13 @@ void Symbolizer::RefreshModules() {
modules_fresh_ = true;
}
+const ListOfModules &Symbolizer::GetRefreshedListOfModules() {
+ if (!modules_fresh_)
+ RefreshModules();
+
+ return modules_;
+}
+
static const LoadedModule *SearchForModule(const ListOfModules &modules,
uptr address) {
for (uptr i = 0; i < modules.size(); i++) {
@@ -240,7 +247,7 @@ const LoadedModule *Symbolizer::FindModuleForAddress(uptr address) {
class LLVMSymbolizerProcess final : public SymbolizerProcess {
public:
explicit LLVMSymbolizerProcess(const char *path)
- : SymbolizerProcess(path, /*use_posix_spawn=*/SANITIZER_MAC) {}
+ : SymbolizerProcess(path, /*use_posix_spawn=*/SANITIZER_APPLE) {}
private:
bool ReachedEndOfOutput(const char *buffer, uptr length) const override {
@@ -259,6 +266,8 @@ class LLVMSymbolizerProcess final : public SymbolizerProcess {
const char* const kSymbolizerArch = "--default-arch=x86_64";
#elif defined(__i386__)
const char* const kSymbolizerArch = "--default-arch=i386";
+#elif SANITIZER_LOONGARCH64
+ const char *const kSymbolizerArch = "--default-arch=loongarch64";
#elif SANITIZER_RISCV64
const char *const kSymbolizerArch = "--default-arch=riscv64";
#elif defined(__aarch64__)
@@ -277,14 +286,17 @@ class LLVMSymbolizerProcess final : public SymbolizerProcess {
const char* const kSymbolizerArch = "--default-arch=unknown";
#endif
- const char *const inline_flag = common_flags()->symbolize_inline_frames
- ? "--inlines"
- : "--no-inlines";
+ const char *const demangle_flag =
+ common_flags()->demangle ? "--demangle" : "--no-demangle";
+ const char *const inline_flag =
+ common_flags()->symbolize_inline_frames ? "--inlines" : "--no-inlines";
int i = 0;
argv[i++] = path_to_binary;
+ argv[i++] = demangle_flag;
argv[i++] = inline_flag;
argv[i++] = kSymbolizerArch;
argv[i++] = nullptr;
+ CHECK_LE(i, kArgVMax);
}
};
@@ -363,18 +375,25 @@ void ParseSymbolizePCOutput(const char *str, SymbolizedStack *res) {
}
}
-// Parses a two-line string in the following format:
+// Parses a two- or three-line string in the following format:
// <symbol_name>
// <start_address> <size>
-// Used by LLVMSymbolizer and InternalSymbolizer.
+// <filename>:<column>
+// Used by LLVMSymbolizer and InternalSymbolizer. LLVMSymbolizer added support
+// for symbolizing the third line in D123538, but we support the older two-line
+// information as well.
void ParseSymbolizeDataOutput(const char *str, DataInfo *info) {
str = ExtractToken(str, "\n", &info->name);
str = ExtractUptr(str, " ", &info->start);
str = ExtractUptr(str, "\n", &info->size);
+ // Note: If the third line isn't present, these calls will set info.{file,
+ // line} to empty strings.
+ str = ExtractToken(str, ":", &info->file);
+ str = ExtractUptr(str, "\n", &info->line);
}
-static void ParseSymbolizeFrameOutput(const char *str,
- InternalMmapVector<LocalInfo> *locals) {
+void ParseSymbolizeFrameOutput(const char *str,
+ InternalMmapVector<LocalInfo> *locals) {
if (internal_strncmp(str, "??", 2) == 0)
return;
@@ -500,9 +519,9 @@ const char *SymbolizerProcess::SendCommandImpl(const char *command) {
return nullptr;
if (!WriteToSymbolizer(command, internal_strlen(command)))
return nullptr;
- if (!ReadFromSymbolizer(buffer_, kBufferSize))
- return nullptr;
- return buffer_;
+ if (!ReadFromSymbolizer())
+ return nullptr;
+ return buffer_.data();
}
bool SymbolizerProcess::Restart() {
@@ -513,31 +532,33 @@ bool SymbolizerProcess::Restart() {
return StartSymbolizerSubprocess();
}
-bool SymbolizerProcess::ReadFromSymbolizer(char *buffer, uptr max_length) {
- if (max_length == 0)
- return true;
- uptr read_len = 0;
- while (true) {
+bool SymbolizerProcess::ReadFromSymbolizer() {
+ buffer_.clear();
+ constexpr uptr max_length = 1024;
+ bool ret = true;
+ do {
uptr just_read = 0;
- bool success = ReadFromFile(input_fd_, buffer + read_len,
- max_length - read_len - 1, &just_read);
+ uptr size_before = buffer_.size();
+ buffer_.resize(size_before + max_length);
+ buffer_.resize(buffer_.capacity());
+ bool ret = ReadFromFile(input_fd_, &buffer_[size_before],
+ buffer_.size() - size_before, &just_read);
+
+ if (!ret)
+ just_read = 0;
+
+ buffer_.resize(size_before + just_read);
+
// We can't read 0 bytes, as we don't expect external symbolizer to close
// its stdout.
- if (!success || just_read == 0) {
+ if (just_read == 0) {
Report("WARNING: Can't read from symbolizer at fd %d\n", input_fd_);
- return false;
- }
- read_len += just_read;
- if (ReachedEndOfOutput(buffer, read_len))
- break;
- if (read_len + 1 == max_length) {
- Report("WARNING: Symbolizer buffer too small\n");
- read_len = 0;
+ ret = false;
break;
}
- }
- buffer[read_len] = '\0';
- return true;
+ } while (!ReachedEndOfOutput(buffer_.data(), buffer_.size()));
+ buffer_.push_back('\0');
+ return ret;
}
bool SymbolizerProcess::WriteToSymbolizer(const char *buffer, uptr length) {
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_mac.cpp b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_mac.cpp
index 5c25b28b5dc9..f1cc0b5e1e8a 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_mac.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_mac.cpp
@@ -12,19 +12,18 @@
//===----------------------------------------------------------------------===//
#include "sanitizer_platform.h"
-#if SANITIZER_MAC
+#if SANITIZER_APPLE
-#include "sanitizer_allocator_internal.h"
-#include "sanitizer_mac.h"
-#include "sanitizer_symbolizer_mac.h"
+# include <dlfcn.h>
+# include <errno.h>
+# include <stdlib.h>
+# include <sys/wait.h>
+# include <unistd.h>
+# include <util.h>
-#include <dlfcn.h>
-#include <errno.h>
-#include <mach/mach.h>
-#include <stdlib.h>
-#include <sys/wait.h>
-#include <unistd.h>
-#include <util.h>
+# include "sanitizer_allocator_internal.h"
+# include "sanitizer_mac.h"
+# include "sanitizer_symbolizer_mac.h"
namespace __sanitizer {
@@ -43,7 +42,8 @@ bool DlAddrSymbolizer::SymbolizePC(uptr addr, SymbolizedStack *stack) {
}
const char *demangled = DemangleSwiftAndCXX(info.dli_sname);
- if (!demangled) return false;
+ if (!demangled)
+ demangled = info.dli_sname;
stack->info.function = internal_strdup(demangled);
return true;
}
@@ -53,18 +53,13 @@ bool DlAddrSymbolizer::SymbolizeData(uptr addr, DataInfo *datainfo) {
int result = dladdr((const void *)addr, &info);
if (!result) return false;
const char *demangled = DemangleSwiftAndCXX(info.dli_sname);
+ if (!demangled)
+ demangled = info.dli_sname;
datainfo->name = internal_strdup(demangled);
datainfo->start = (uptr)info.dli_saddr;
return true;
}
-#define K_ATOS_ENV_VAR "__check_mach_ports_lookup"
-
-// This cannot live in `AtosSymbolizerProcess` because instances of that object
-// are allocated by the internal allocator which under ASan is poisoned with
-// kAsanInternalHeapMagic.
-static char kAtosMachPortEnvEntry[] = K_ATOS_ENV_VAR "=000000000000000";
-
class AtosSymbolizerProcess final : public SymbolizerProcess {
public:
explicit AtosSymbolizerProcess(const char *path)
@@ -72,51 +67,13 @@ class AtosSymbolizerProcess final : public SymbolizerProcess {
pid_str_[0] = '\0';
}
- void LateInitialize() {
- if (SANITIZER_IOSSIM) {
- // `putenv()` may call malloc/realloc so it is only safe to do this
- // during LateInitialize() or later (i.e. we can't do this in the
- // constructor). We also can't do this in `StartSymbolizerSubprocess()`
- // because in TSan we switch allocators when we're symbolizing.
- // We use `putenv()` rather than `setenv()` so that we can later directly
- // write into the storage without LibC getting involved to change what the
- // variable is set to
- int result = putenv(kAtosMachPortEnvEntry);
- CHECK_EQ(result, 0);
- }
- }
-
private:
bool StartSymbolizerSubprocess() override {
- // Configure sandbox before starting atos process.
-
// Put the string command line argument in the object so that it outlives
// the call to GetArgV.
- internal_snprintf(pid_str_, sizeof(pid_str_), "%d", internal_getpid());
-
- if (SANITIZER_IOSSIM) {
- // `atos` in the simulator is restricted in its ability to retrieve the
- // task port for the target process (us) so we need to do extra work
- // to pass our task port to it.
- mach_port_t ports[]{mach_task_self()};
- kern_return_t ret =
- mach_ports_register(mach_task_self(), ports, /*count=*/1);
- CHECK_EQ(ret, KERN_SUCCESS);
-
- // Set environment variable that signals to `atos` that it should look
- // for our task port. We can't call `setenv()` here because it might call
- // malloc/realloc. To avoid that we instead update the
- // `mach_port_env_var_entry_` variable with our current PID.
- uptr count = internal_snprintf(kAtosMachPortEnvEntry,
- sizeof(kAtosMachPortEnvEntry),
- K_ATOS_ENV_VAR "=%s", pid_str_);
- CHECK_GE(count, sizeof(K_ATOS_ENV_VAR) + internal_strlen(pid_str_));
- // Document our assumption but without calling `getenv()` in normal
- // builds.
- DCHECK(getenv(K_ATOS_ENV_VAR));
- DCHECK_EQ(internal_strcmp(getenv(K_ATOS_ENV_VAR), pid_str_), 0);
- }
+ internal_snprintf(pid_str_, sizeof(pid_str_), "%d", (int)internal_getpid());
+ // Configure sandbox before starting atos process.
return SymbolizerProcess::StartSymbolizerSubprocess();
}
@@ -137,13 +94,10 @@ class AtosSymbolizerProcess final : public SymbolizerProcess {
argv[i++] = "-d";
}
argv[i++] = nullptr;
+ CHECK_LE(i, kArgVMax);
}
char pid_str_[16];
- // Space for `\0` in `K_ATOS_ENV_VAR` is reused for `=`.
- static_assert(sizeof(kAtosMachPortEnvEntry) ==
- (sizeof(K_ATOS_ENV_VAR) + sizeof(pid_str_)),
- "sizes should match");
};
#undef K_ATOS_ENV_VAR
@@ -212,7 +166,7 @@ bool AtosSymbolizer::SymbolizePC(uptr addr, SymbolizedStack *stack) {
uptr start_address = AddressInfo::kUnknown;
if (!ParseCommandOutput(buf, addr, &stack->info.function, &stack->info.module,
&stack->info.file, &line, &start_address)) {
- process_ = nullptr;
+ Report("WARNING: atos failed to symbolize address \"0x%zx\"\n", addr);
return false;
}
stack->info.line = (int)line;
@@ -249,8 +203,6 @@ bool AtosSymbolizer::SymbolizeData(uptr addr, DataInfo *info) {
return true;
}
-void AtosSymbolizer::LateInitialize() { process_->LateInitialize(); }
-
} // namespace __sanitizer
-#endif // SANITIZER_MAC
+#endif // SANITIZER_APPLE
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_mac.h b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_mac.h
index 401d30fa5033..cea244182907 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_mac.h
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_mac.h
@@ -15,7 +15,7 @@
#define SANITIZER_SYMBOLIZER_MAC_H
#include "sanitizer_platform.h"
-#if SANITIZER_MAC
+#if SANITIZER_APPLE
#include "sanitizer_symbolizer_internal.h"
@@ -35,7 +35,6 @@ class AtosSymbolizer final : public SymbolizerTool {
bool SymbolizePC(uptr addr, SymbolizedStack *stack) override;
bool SymbolizeData(uptr addr, DataInfo *info) override;
- void LateInitialize() override;
private:
AtosSymbolizerProcess *process_;
@@ -43,6 +42,6 @@ class AtosSymbolizer final : public SymbolizerTool {
} // namespace __sanitizer
-#endif // SANITIZER_MAC
+#endif // SANITIZER_APPLE
#endif // SANITIZER_SYMBOLIZER_MAC_H
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_markup.cpp b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_markup.cpp
index 9a5b4a8c54c7..31d91ef3c739 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_markup.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_markup.cpp
@@ -8,145 +8,155 @@
//
// This file is shared between various sanitizers' runtime libraries.
//
-// Implementation of offline markup symbolizer.
+// This generic support for offline symbolizing is based on the
+// Fuchsia port. We don't do any actual symbolization per se.
+// Instead, we emit text containing raw addresses and raw linkage
+// symbol names, embedded in Fuchsia's symbolization markup format.
+// See the spec at:
+// https://llvm.org/docs/SymbolizerMarkupFormat.html
//===----------------------------------------------------------------------===//
-#include "sanitizer_platform.h"
-#if SANITIZER_SYMBOLIZER_MARKUP
-
-#if SANITIZER_FUCHSIA
-#include "sanitizer_symbolizer_fuchsia.h"
-# endif
+#include "sanitizer_symbolizer_markup.h"
-# include <limits.h>
-# include <unwind.h>
-
-# include "sanitizer_stacktrace.h"
-# include "sanitizer_symbolizer.h"
+#include "sanitizer_common.h"
+#include "sanitizer_symbolizer.h"
+#include "sanitizer_symbolizer_markup_constants.h"
namespace __sanitizer {
-// This generic support for offline symbolizing is based on the
-// Fuchsia port. We don't do any actual symbolization per se.
-// Instead, we emit text containing raw addresses and raw linkage
-// symbol names, embedded in Fuchsia's symbolization markup format.
-// Fuchsia's logging infrastructure emits enough information about
-// process memory layout that a post-processing filter can do the
-// symbolization and pretty-print the markup. See the spec at:
-// https://fuchsia.googlesource.com/zircon/+/master/docs/symbolizer_markup.md
-
-// This is used by UBSan for type names, and by ASan for global variable names.
-// It's expected to return a static buffer that will be reused on each call.
-const char *Symbolizer::Demangle(const char *name) {
- static char buffer[kFormatDemangleMax];
- internal_snprintf(buffer, sizeof(buffer), kFormatDemangle, name);
- return buffer;
+void MarkupStackTracePrinter::RenderData(InternalScopedString *buffer,
+ const char *format, const DataInfo *DI,
+ const char *strip_path_prefix) {
+ RenderContext(buffer);
+ buffer->AppendF(kFormatData, reinterpret_cast<void *>(DI->start));
}
-// This is used mostly for suppression matching. Making it work
-// would enable "interceptor_via_lib" suppressions. It's also used
-// once in UBSan to say "in module ..." in a message that also
-// includes an address in the module, so post-processing can already
-// pretty-print that so as to indicate the module.
-bool Symbolizer::GetModuleNameAndOffsetForPC(uptr pc, const char **module_name,
- uptr *module_address) {
+bool MarkupStackTracePrinter::RenderNeedsSymbolization(const char *format) {
return false;
}
-// This is mainly used by hwasan for online symbolization. This isn't needed
-// since hwasan can always just dump stack frames for offline symbolization.
-bool Symbolizer::SymbolizeFrame(uptr addr, FrameInfo *info) { return false; }
-
-// This is used in some places for suppression checking, which we
-// don't really support for Fuchsia. It's also used in UBSan to
-// identify a PC location to a function name, so we always fill in
-// the function member with a string containing markup around the PC
-// value.
-// TODO(mcgrathr): Under SANITIZER_GO, it's currently used by TSan
-// to render stack frames, but that should be changed to use
-// RenderStackFrame.
-SymbolizedStack *Symbolizer::SymbolizePC(uptr addr) {
- SymbolizedStack *s = SymbolizedStack::New(addr);
+// We don't support the stack_trace_format flag at all.
+void MarkupStackTracePrinter::RenderFrame(InternalScopedString *buffer,
+ const char *format, int frame_no,
+ uptr address, const AddressInfo *info,
+ bool vs_style,
+ const char *strip_path_prefix) {
+ CHECK(!RenderNeedsSymbolization(format));
+ RenderContext(buffer);
+ buffer->AppendF(kFormatFrame, frame_no, reinterpret_cast<void *>(address));
+}
+
+bool MarkupSymbolizerTool::SymbolizePC(uptr addr, SymbolizedStack *stack) {
char buffer[kFormatFunctionMax];
- internal_snprintf(buffer, sizeof(buffer), kFormatFunction, addr);
- s->info.function = internal_strdup(buffer);
- return s;
+ internal_snprintf(buffer, sizeof(buffer), kFormatFunction,
+ reinterpret_cast<void *>(addr));
+ stack->info.function = internal_strdup(buffer);
+ return true;
}
-// Always claim we succeeded, so that RenderDataInfo will be called.
-bool Symbolizer::SymbolizeData(uptr addr, DataInfo *info) {
+bool MarkupSymbolizerTool::SymbolizeData(uptr addr, DataInfo *info) {
info->Clear();
info->start = addr;
return true;
}
-// We ignore the format argument to __sanitizer_symbolize_global.
-void RenderData(InternalScopedString *buffer, const char *format,
- const DataInfo *DI, const char *strip_path_prefix) {
- buffer->append(kFormatData, DI->start);
+const char *MarkupSymbolizerTool::Demangle(const char *name) {
+ static char buffer[kFormatDemangleMax];
+ internal_snprintf(buffer, sizeof(buffer), kFormatDemangle, name);
+ return buffer;
}
-bool RenderNeedsSymbolization(const char *format) { return false; }
-
-// We don't support the stack_trace_format flag at all.
-void RenderFrame(InternalScopedString *buffer, const char *format, int frame_no,
- uptr address, const AddressInfo *info, bool vs_style,
- const char *strip_path_prefix, const char *strip_func_prefix) {
- CHECK(!RenderNeedsSymbolization(format));
- buffer->append(kFormatFrame, frame_no, address);
+// Fuchsia's implementation of symbolizer markup doesn't need to emit contextual
+// elements at this point.
+// Fuchsia's logging infrastructure emits enough information about
+// process memory layout that a post-processing filter can do the
+// symbolization and pretty-print the markup.
+#if !SANITIZER_FUCHSIA
+
+static bool ModulesEq(const LoadedModule &module,
+ const RenderedModule &renderedModule) {
+ return module.base_address() == renderedModule.base_address &&
+ internal_memcmp(module.uuid(), renderedModule.uuid,
+ module.uuid_size()) == 0 &&
+ internal_strcmp(module.full_name(), renderedModule.full_name) == 0;
}
-Symbolizer *Symbolizer::PlatformInit() {
- return new (symbolizer_allocator_) Symbolizer({});
-}
+static bool ModuleHasBeenRendered(
+ const LoadedModule &module,
+ const InternalMmapVectorNoCtor<RenderedModule> &renderedModules) {
+ for (const auto &renderedModule : renderedModules)
+ if (ModulesEq(module, renderedModule))
+ return true;
-void Symbolizer::LateInitialize() {
- Symbolizer::GetOrInit()->LateInitializeTools();
+ return false;
}
-void StartReportDeadlySignal() {}
-void ReportDeadlySignal(const SignalContext &sig, u32 tid,
- UnwindSignalStackCallbackType unwind,
- const void *unwind_context) {}
-
-#if SANITIZER_CAN_SLOW_UNWIND
-struct UnwindTraceArg {
- BufferedStackTrace *stack;
- u32 max_depth;
-};
-
-_Unwind_Reason_Code Unwind_Trace(struct _Unwind_Context *ctx, void *param) {
- UnwindTraceArg *arg = static_cast<UnwindTraceArg *>(param);
- CHECK_LT(arg->stack->size, arg->max_depth);
- uptr pc = _Unwind_GetIP(ctx);
- if (pc < PAGE_SIZE) return _URC_NORMAL_STOP;
- arg->stack->trace_buffer[arg->stack->size++] = pc;
- return (arg->stack->size == arg->max_depth ? _URC_NORMAL_STOP
- : _URC_NO_REASON);
+static void RenderModule(InternalScopedString *buffer,
+ const LoadedModule &module, uptr moduleId) {
+ InternalScopedString buildIdBuffer;
+ for (uptr i = 0; i < module.uuid_size(); i++)
+ buildIdBuffer.AppendF("%02x", module.uuid()[i]);
+
+ buffer->AppendF(kFormatModule, moduleId, module.full_name(),
+ buildIdBuffer.data());
+ buffer->Append("\n");
}
-void BufferedStackTrace::UnwindSlow(uptr pc, u32 max_depth) {
- CHECK_GE(max_depth, 2);
- size = 0;
- UnwindTraceArg arg = {this, Min(max_depth + 1, kStackTraceMax)};
- _Unwind_Backtrace(Unwind_Trace, &arg);
- CHECK_GT(size, 0);
- // We need to pop a few frames so that pc is on top.
- uptr to_pop = LocatePcInTrace(pc);
- // trace_buffer[0] belongs to the current function so we always pop it,
- // unless there is only 1 frame in the stack trace (1 frame is always better
- // than 0!).
- PopStackFrames(Min(to_pop, static_cast<uptr>(1)));
- trace_buffer[0] = pc;
+static void RenderMmaps(InternalScopedString *buffer,
+ const LoadedModule &module, uptr moduleId) {
+ InternalScopedString accessBuffer;
+
+ // All module mmaps are readable at least
+ for (const auto &range : module.ranges()) {
+ accessBuffer.Append("r");
+ if (range.writable)
+ accessBuffer.Append("w");
+ if (range.executable)
+ accessBuffer.Append("x");
+
+ //{{{mmap:%starting_addr:%size_in_hex:load:%moduleId:r%(w|x):%relative_addr}}}
+
+ // module.base_address == dlpi_addr
+ // range.beg == dlpi_addr + p_vaddr
+ // relative address == p_vaddr == range.beg - module.base_address
+ buffer->AppendF(kFormatMmap, reinterpret_cast<void *>(range.beg),
+ range.end - range.beg, static_cast<int>(moduleId),
+ accessBuffer.data(), range.beg - module.base_address());
+
+ buffer->Append("\n");
+ accessBuffer.clear();
+ }
}
-void BufferedStackTrace::UnwindSlow(uptr pc, void *context, u32 max_depth) {
- CHECK(context);
- CHECK_GE(max_depth, 2);
- UNREACHABLE("signal context doesn't exist");
+void MarkupStackTracePrinter::RenderContext(InternalScopedString *buffer) {
+ if (renderedModules_.size() == 0)
+ buffer->Append("{{{reset}}}\n");
+
+ const auto &modules = Symbolizer::GetOrInit()->GetRefreshedListOfModules();
+
+ for (const auto &module : modules) {
+ if (ModuleHasBeenRendered(module, renderedModules_))
+ continue;
+
+ // symbolizer markup id, used to refer to this modules from other contextual
+ // elements
+ uptr moduleId = renderedModules_.size();
+
+ RenderModule(buffer, module, moduleId);
+ RenderMmaps(buffer, module, moduleId);
+
+ renderedModules_.push_back({
+ internal_strdup(module.full_name()),
+ module.base_address(),
+ {},
+ });
+
+ // kModuleUUIDSize is the size of curModule.uuid
+ CHECK_GE(kModuleUUIDSize, module.uuid_size());
+ internal_memcpy(renderedModules_.back().uuid, module.uuid(),
+ module.uuid_size());
+ }
}
-#endif // SANITIZER_CAN_SLOW_UNWIND
+#endif // !SANITIZER_FUCHSIA
} // namespace __sanitizer
-
-#endif // SANITIZER_SYMBOLIZER_MARKUP
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_markup.h b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_markup.h
new file mode 100644
index 000000000000..bc2ab24d625b
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_markup.h
@@ -0,0 +1,79 @@
+//===-- sanitizer_symbolizer_markup.h -----------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between various sanitizers' runtime libraries.
+//
+// Header for the offline markup symbolizer.
+//===----------------------------------------------------------------------===//
+#ifndef SANITIZER_SYMBOLIZER_MARKUP_H
+#define SANITIZER_SYMBOLIZER_MARKUP_H
+
+#include "sanitizer_common.h"
+#include "sanitizer_stacktrace_printer.h"
+#include "sanitizer_symbolizer.h"
+#include "sanitizer_symbolizer_internal.h"
+
+namespace __sanitizer {
+
+// Simplier view of a LoadedModule. It only holds information necessary to
+// identify unique modules.
+struct RenderedModule {
+ char *full_name;
+ uptr base_address;
+ u8 uuid[kModuleUUIDSize]; // BuildId
+};
+
+class MarkupStackTracePrinter : public StackTracePrinter {
+ public:
+ // We don't support the stack_trace_format flag at all.
+ void RenderFrame(InternalScopedString *buffer, const char *format,
+ int frame_no, uptr address, const AddressInfo *info,
+ bool vs_style, const char *strip_path_prefix = "") override;
+
+ bool RenderNeedsSymbolization(const char *format) override;
+
+ // We ignore the format argument to __sanitizer_symbolize_global.
+ void RenderData(InternalScopedString *buffer, const char *format,
+ const DataInfo *DI,
+ const char *strip_path_prefix = "") override;
+
+ private:
+ // Keeps track of the modules that have been rendered to avoid re-rendering
+ // them
+ InternalMmapVector<RenderedModule> renderedModules_;
+ void RenderContext(InternalScopedString *buffer);
+
+ protected:
+ ~MarkupStackTracePrinter() {}
+};
+
+class MarkupSymbolizerTool final : public SymbolizerTool {
+ public:
+ // This is used in some places for suppression checking, which we
+ // don't really support for Fuchsia. It's also used in UBSan to
+ // identify a PC location to a function name, so we always fill in
+ // the function member with a string containing markup around the PC
+ // value.
+ // TODO(mcgrathr): Under SANITIZER_GO, it's currently used by TSan
+ // to render stack frames, but that should be changed to use
+ // RenderStackFrame.
+ bool SymbolizePC(uptr addr, SymbolizedStack *stack) override;
+
+ // Always claim we succeeded, so that RenderDataInfo will be called.
+ bool SymbolizeData(uptr addr, DataInfo *info) override;
+
+ // May return NULL if demangling failed.
+ // This is used by UBSan for type names, and by ASan for global variable
+ // names. It's expected to return a static buffer that will be reused on each
+ // call.
+ const char *Demangle(const char *name) override;
+};
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_SYMBOLIZER_MARKUP_H
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_fuchsia.h b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_markup_constants.h
index c4061e38c6a4..a43661eaecf2 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_fuchsia.h
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_markup_constants.h
@@ -1,4 +1,5 @@
-//===-- sanitizer_symbolizer_fuchsia.h -----------------------------------===//
+//===-- sanitizer_symbolizer_markup_constants.h
+//-----------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
@@ -8,10 +9,10 @@
//
// This file is shared between various sanitizers' runtime libraries.
//
-// Define Fuchsia's string formats and limits for the markup symbolizer.
+// Define string formats and limits for the markup symbolizer.
//===----------------------------------------------------------------------===//
-#ifndef SANITIZER_SYMBOLIZER_FUCHSIA_H
-#define SANITIZER_SYMBOLIZER_FUCHSIA_H
+#ifndef SANITIZER_SYMBOLIZER_MARKUP_CONSTANTS_H
+#define SANITIZER_SYMBOLIZER_MARKUP_CONSTANTS_H
#include "sanitizer_internal_defs.h"
@@ -32,11 +33,17 @@ constexpr uptr kFormatFunctionMax = 64; // More than big enough for 64-bit hex.
constexpr const char *kFormatData = "{{{data:%p}}}";
// One frame in a backtrace (printed on a line by itself).
-constexpr const char *kFormatFrame = "{{{bt:%u:%p}}}";
+constexpr const char *kFormatFrame = "{{{bt:%d:%p}}}";
+
+// Module contextual element.
+constexpr const char *kFormatModule = "{{{module:%zu:%s:elf:%s}}}";
+
+// mmap for a module segment.
+constexpr const char *kFormatMmap = "{{{mmap:%p:0x%zx:load:%d:%s:0x%zx}}}";
// Dump trigger element.
#define FORMAT_DUMPFILE "{{{dumpfile:%s:%s}}}"
} // namespace __sanitizer
-#endif // SANITIZER_SYMBOLIZER_FUCHSIA_H
+#endif // SANITIZER_SYMBOLIZER_MARKUP_CONSTANTS_H
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_markup_fuchsia.cpp b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_markup_fuchsia.cpp
new file mode 100644
index 000000000000..08b06c2faf30
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_markup_fuchsia.cpp
@@ -0,0 +1,85 @@
+//===-- sanitizer_symbolizer_markup_fuchsia.cpp ---------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between various sanitizers' runtime libraries.
+//
+// Fuchsia specific implementation of offline markup symbolizer.
+//===----------------------------------------------------------------------===//
+#include "sanitizer_platform.h"
+
+#if SANITIZER_SYMBOLIZER_MARKUP
+
+# include "sanitizer_common.h"
+# include "sanitizer_stacktrace_printer.h"
+# include "sanitizer_symbolizer.h"
+# include "sanitizer_symbolizer_markup.h"
+# include "sanitizer_symbolizer_markup_constants.h"
+
+namespace __sanitizer {
+
+// This is used by UBSan for type names, and by ASan for global variable names.
+// It's expected to return a static buffer that will be reused on each call.
+const char *Symbolizer::Demangle(const char *name) {
+ static char buffer[kFormatDemangleMax];
+ internal_snprintf(buffer, sizeof(buffer), kFormatDemangle, name);
+ return buffer;
+}
+
+// This is used mostly for suppression matching. Making it work
+// would enable "interceptor_via_lib" suppressions. It's also used
+// once in UBSan to say "in module ..." in a message that also
+// includes an address in the module, so post-processing can already
+// pretty-print that so as to indicate the module.
+bool Symbolizer::GetModuleNameAndOffsetForPC(uptr pc, const char **module_name,
+ uptr *module_address) {
+ return false;
+}
+
+// This is mainly used by hwasan for online symbolization. This isn't needed
+// since hwasan can always just dump stack frames for offline symbolization.
+bool Symbolizer::SymbolizeFrame(uptr addr, FrameInfo *info) { return false; }
+
+// This is used in some places for suppression checking, which we
+// don't really support for Fuchsia. It's also used in UBSan to
+// identify a PC location to a function name, so we always fill in
+// the function member with a string containing markup around the PC
+// value.
+// TODO(mcgrathr): Under SANITIZER_GO, it's currently used by TSan
+// to render stack frames, but that should be changed to use
+// RenderStackFrame.
+SymbolizedStack *Symbolizer::SymbolizePC(uptr addr) {
+ SymbolizedStack *s = SymbolizedStack::New(addr);
+ char buffer[kFormatFunctionMax];
+ internal_snprintf(buffer, sizeof(buffer), kFormatFunction, addr);
+ s->info.function = internal_strdup(buffer);
+ return s;
+}
+
+// Always claim we succeeded, so that RenderDataInfo will be called.
+bool Symbolizer::SymbolizeData(uptr addr, DataInfo *info) {
+ info->Clear();
+ info->start = addr;
+ return true;
+}
+
+// Fuchsia only uses MarkupStackTracePrinter
+StackTracePrinter *StackTracePrinter::NewStackTracePrinter() {
+ return new (GetGlobalLowLevelAllocator()) MarkupStackTracePrinter();
+}
+
+void MarkupStackTracePrinter::RenderContext(InternalScopedString *) {}
+
+Symbolizer *Symbolizer::PlatformInit() {
+ return new (symbolizer_allocator_) Symbolizer({});
+}
+
+void Symbolizer::LateInitialize() { Symbolizer::GetOrInit(); }
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_SYMBOLIZER_MARKUP
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_posix_libcdep.cpp b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_posix_libcdep.cpp
index 4cd4b4636f0a..0ddc24802d21 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_posix_libcdep.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_posix_libcdep.cpp
@@ -12,26 +12,27 @@
//===----------------------------------------------------------------------===//
#include "sanitizer_platform.h"
+#include "sanitizer_symbolizer_markup.h"
#if SANITIZER_POSIX
-#include "sanitizer_allocator_internal.h"
-#include "sanitizer_common.h"
-#include "sanitizer_file.h"
-#include "sanitizer_flags.h"
-#include "sanitizer_internal_defs.h"
-#include "sanitizer_linux.h"
-#include "sanitizer_placement_new.h"
-#include "sanitizer_posix.h"
-#include "sanitizer_procmaps.h"
-#include "sanitizer_symbolizer_internal.h"
-#include "sanitizer_symbolizer_libbacktrace.h"
-#include "sanitizer_symbolizer_mac.h"
-
-#include <dlfcn.h> // for dlsym()
-#include <errno.h>
-#include <stdint.h>
-#include <stdlib.h>
-#include <sys/wait.h>
-#include <unistd.h>
+# include <dlfcn.h> // for dlsym()
+# include <errno.h>
+# include <stdint.h>
+# include <stdlib.h>
+# include <sys/wait.h>
+# include <unistd.h>
+
+# include "sanitizer_allocator_internal.h"
+# include "sanitizer_common.h"
+# include "sanitizer_file.h"
+# include "sanitizer_flags.h"
+# include "sanitizer_internal_defs.h"
+# include "sanitizer_linux.h"
+# include "sanitizer_placement_new.h"
+# include "sanitizer_posix.h"
+# include "sanitizer_procmaps.h"
+# include "sanitizer_symbolizer_internal.h"
+# include "sanitizer_symbolizer_libbacktrace.h"
+# include "sanitizer_symbolizer_mac.h"
// C++ demangling function, as required by Itanium C++ ABI. This is weak,
// because we do not require a C++ ABI library to be linked to a program
@@ -56,7 +57,7 @@ const char *DemangleCXXABI(const char *name) {
__cxxabiv1::__cxa_demangle(name, 0, 0, 0))
return demangled_name;
- return name;
+ return nullptr;
}
// As of now, there are no headers for the Swift runtime. Once they are
@@ -72,7 +73,6 @@ static swift_demangle_ft swift_demangle_f;
// symbolication.
static void InitializeSwiftDemangler() {
swift_demangle_f = (swift_demangle_ft)dlsym(RTLD_DEFAULT, "swift_demangle");
- (void)dlerror(); // Cleanup error message in case of failure
}
// Attempts to demangle a Swift name. The demangler will return nullptr if a
@@ -155,7 +155,7 @@ bool SymbolizerProcess::StartSymbolizerSubprocess() {
}
if (use_posix_spawn_) {
-#if SANITIZER_MAC
+#if SANITIZER_APPLE
fd_t fd = internal_spawn(argv, const_cast<const char **>(GetEnvP()), &pid);
if (fd == kInvalidFd) {
Report("WARNING: failed to spawn external symbolizer (errno: %d)\n",
@@ -165,9 +165,9 @@ bool SymbolizerProcess::StartSymbolizerSubprocess() {
input_fd_ = fd;
output_fd_ = fd;
-#else // SANITIZER_MAC
+#else // SANITIZER_APPLE
UNIMPLEMENTED();
-#endif // SANITIZER_MAC
+#endif // SANITIZER_APPLE
} else {
fd_t infd[2] = {}, outfd[2] = {};
if (!CreateTwoHighNumberedPipes(infd, outfd)) {
@@ -213,31 +213,36 @@ class Addr2LineProcess final : public SymbolizerProcess {
const char *(&argv)[kArgVMax]) const override {
int i = 0;
argv[i++] = path_to_binary;
- argv[i++] = "-iCfe";
+ if (common_flags()->demangle)
+ argv[i++] = "-C";
+ if (common_flags()->symbolize_inline_frames)
+ argv[i++] = "-i";
+ argv[i++] = "-fe";
argv[i++] = module_name_;
argv[i++] = nullptr;
+ CHECK_LE(i, kArgVMax);
}
bool ReachedEndOfOutput(const char *buffer, uptr length) const override;
- bool ReadFromSymbolizer(char *buffer, uptr max_length) override {
- if (!SymbolizerProcess::ReadFromSymbolizer(buffer, max_length))
+ bool ReadFromSymbolizer() override {
+ if (!SymbolizerProcess::ReadFromSymbolizer())
return false;
- // The returned buffer is empty when output is valid, but exceeds
- // max_length.
- if (*buffer == '\0')
- return true;
+ auto &buff = GetBuff();
// We should cut out output_terminator_ at the end of given buffer,
// appended by addr2line to mark the end of its meaningful output.
// We cannot scan buffer from it's beginning, because it is legal for it
// to start with output_terminator_ in case given offset is invalid. So,
// scanning from second character.
- char *garbage = internal_strstr(buffer + 1, output_terminator_);
+ char *garbage = internal_strstr(buff.data() + 1, output_terminator_);
// This should never be NULL since buffer must end up with
// output_terminator_.
CHECK(garbage);
+
// Trim the buffer.
- garbage[0] = '\0';
+ uintptr_t new_size = garbage - buff.data();
+ GetBuff().resize(new_size);
+ GetBuff().push_back('\0');
return true;
}
@@ -312,43 +317,51 @@ class Addr2LinePool final : public SymbolizerTool {
FIRST_32_SECOND_64(UINT32_MAX, UINT64_MAX);
};
-#if SANITIZER_SUPPORTS_WEAK_HOOKS
+# if SANITIZER_SUPPORTS_WEAK_HOOKS
extern "C" {
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE bool
__sanitizer_symbolize_code(const char *ModuleName, u64 ModuleOffset,
- char *Buffer, int MaxLength,
- bool SymbolizeInlineFrames);
-SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
-bool __sanitizer_symbolize_data(const char *ModuleName, u64 ModuleOffset,
- char *Buffer, int MaxLength);
-SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
-void __sanitizer_symbolize_flush();
-SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
-int __sanitizer_symbolize_demangle(const char *Name, char *Buffer,
- int MaxLength);
+ char *Buffer, int MaxLength);
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE bool
+__sanitizer_symbolize_data(const char *ModuleName, u64 ModuleOffset,
+ char *Buffer, int MaxLength);
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE bool
+__sanitizer_symbolize_frame(const char *ModuleName, u64 ModuleOffset,
+ char *Buffer, int MaxLength);
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
+__sanitizer_symbolize_flush();
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE bool
+__sanitizer_symbolize_demangle(const char *Name, char *Buffer, int MaxLength);
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE bool
+__sanitizer_symbolize_set_demangle(bool Demangle);
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE bool
+__sanitizer_symbolize_set_inline_frames(bool InlineFrames);
} // extern "C"
class InternalSymbolizer final : public SymbolizerTool {
public:
static InternalSymbolizer *get(LowLevelAllocator *alloc) {
- if (__sanitizer_symbolize_code != 0 &&
- __sanitizer_symbolize_data != 0) {
- return new(*alloc) InternalSymbolizer();
- }
- return 0;
+ // These one is the most used one, so we will use it to detect a presence of
+ // internal symbolizer.
+ if (&__sanitizer_symbolize_code == nullptr)
+ return nullptr;
+ CHECK(__sanitizer_symbolize_set_demangle(common_flags()->demangle));
+ CHECK(__sanitizer_symbolize_set_inline_frames(
+ common_flags()->symbolize_inline_frames));
+ return new (*alloc) InternalSymbolizer();
}
bool SymbolizePC(uptr addr, SymbolizedStack *stack) override {
bool result = __sanitizer_symbolize_code(
- stack->info.module, stack->info.module_offset, buffer_, kBufferSize,
- common_flags()->symbolize_inline_frames);
- if (result) ParseSymbolizePCOutput(buffer_, stack);
+ stack->info.module, stack->info.module_offset, buffer_, sizeof(buffer_));
+ if (result)
+ ParseSymbolizePCOutput(buffer_, stack);
return result;
}
bool SymbolizeData(uptr addr, DataInfo *info) override {
bool result = __sanitizer_symbolize_data(info->module, info->module_offset,
- buffer_, kBufferSize);
+ buffer_, sizeof(buffer_));
if (result) {
ParseSymbolizeDataOutput(buffer_, info);
info->start += (addr - info->module_offset); // Add the base address.
@@ -356,43 +369,38 @@ class InternalSymbolizer final : public SymbolizerTool {
return result;
}
- void Flush() override {
- if (__sanitizer_symbolize_flush)
- __sanitizer_symbolize_flush();
+ bool SymbolizeFrame(uptr addr, FrameInfo *info) override {
+ bool result = __sanitizer_symbolize_frame(info->module, info->module_offset,
+ buffer_, sizeof(buffer_));
+ if (result)
+ ParseSymbolizeFrameOutput(buffer_, &info->locals);
+ return result;
}
+ void Flush() override { __sanitizer_symbolize_flush(); }
+
const char *Demangle(const char *name) override {
- if (__sanitizer_symbolize_demangle) {
- for (uptr res_length = 1024;
- res_length <= InternalSizeClassMap::kMaxSize;) {
- char *res_buff = static_cast<char*>(InternalAlloc(res_length));
- uptr req_length =
- __sanitizer_symbolize_demangle(name, res_buff, res_length);
- if (req_length > res_length) {
- res_length = req_length + 1;
- InternalFree(res_buff);
- continue;
- }
- return res_buff;
- }
+ if (__sanitizer_symbolize_demangle(name, buffer_, sizeof(buffer_))) {
+ char *res_buff = nullptr;
+ ExtractToken(buffer_, "", &res_buff);
+ return res_buff;
}
- return name;
+ return nullptr;
}
private:
- InternalSymbolizer() { }
+ InternalSymbolizer() {}
- static const int kBufferSize = 16 * 1024;
- char buffer_[kBufferSize];
+ char buffer_[16 * 1024];
};
-#else // SANITIZER_SUPPORTS_WEAK_HOOKS
+# else // SANITIZER_SUPPORTS_WEAK_HOOKS
class InternalSymbolizer final : public SymbolizerTool {
public:
static InternalSymbolizer *get(LowLevelAllocator *alloc) { return 0; }
};
-#endif // SANITIZER_SUPPORTS_WEAK_HOOKS
+# endif // SANITIZER_SUPPORTS_WEAK_HOOKS
const char *Symbolizer::PlatformDemangle(const char *name) {
return DemangleSwiftAndCXX(name);
@@ -417,13 +425,13 @@ static SymbolizerTool *ChooseExternalSymbolizer(LowLevelAllocator *allocator) {
VReport(2, "Using llvm-symbolizer at user-specified path: %s\n", path);
return new(*allocator) LLVMSymbolizer(path, allocator);
} else if (!internal_strcmp(binary_name, "atos")) {
-#if SANITIZER_MAC
+#if SANITIZER_APPLE
VReport(2, "Using atos at user-specified path: %s\n", path);
return new(*allocator) AtosSymbolizer(path, allocator);
-#else // SANITIZER_MAC
+#else // SANITIZER_APPLE
Report("ERROR: Using `atos` is only supported on Darwin.\n");
Die();
-#endif // SANITIZER_MAC
+#endif // SANITIZER_APPLE
} else if (!internal_strcmp(binary_name, "addr2line")) {
VReport(2, "Using addr2line at user-specified path: %s\n", path);
return new(*allocator) Addr2LinePool(path, allocator);
@@ -436,12 +444,12 @@ static SymbolizerTool *ChooseExternalSymbolizer(LowLevelAllocator *allocator) {
// Otherwise symbolizer program is unknown, let's search $PATH
CHECK(path == nullptr);
-#if SANITIZER_MAC
+#if SANITIZER_APPLE
if (const char *found_path = FindPathToBinary("atos")) {
VReport(2, "Using atos found at: %s\n", found_path);
return new(*allocator) AtosSymbolizer(found_path, allocator);
}
-#endif // SANITIZER_MAC
+#endif // SANITIZER_APPLE
if (const char *found_path = FindPathToBinary("llvm-symbolizer")) {
VReport(2, "Using llvm-symbolizer found at: %s\n", found_path);
return new(*allocator) LLVMSymbolizer(found_path, allocator);
@@ -461,6 +469,12 @@ static void ChooseSymbolizerTools(IntrusiveList<SymbolizerTool> *list,
VReport(2, "Symbolizer is disabled.\n");
return;
}
+ if (common_flags()->enable_symbolizer_markup) {
+ VReport(2, "Using symbolizer markup");
+ SymbolizerTool *tool = new (*allocator) MarkupSymbolizerTool();
+ CHECK(tool);
+ list->push_back(tool);
+ }
if (IsAllocatorOutOfMemory()) {
VReport(2, "Cannot use internal symbolizer: out of memory\n");
} else if (SymbolizerTool *tool = InternalSymbolizer::get(allocator)) {
@@ -478,10 +492,10 @@ static void ChooseSymbolizerTools(IntrusiveList<SymbolizerTool> *list,
list->push_back(tool);
}
-#if SANITIZER_MAC
+#if SANITIZER_APPLE
VReport(2, "Using dladdr symbolizer.\n");
list->push_back(new(*allocator) DlAddrSymbolizer());
-#endif // SANITIZER_MAC
+#endif // SANITIZER_APPLE
}
Symbolizer *Symbolizer::PlatformInit() {
@@ -492,7 +506,7 @@ Symbolizer *Symbolizer::PlatformInit() {
}
void Symbolizer::LateInitialize() {
- Symbolizer::GetOrInit()->LateInitializeTools();
+ Symbolizer::GetOrInit();
InitializeSwiftDemangler();
}
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_report.cpp b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_report.cpp
index f330ed36640a..f6b157c07c65 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_report.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_report.cpp
@@ -28,14 +28,37 @@
namespace __sanitizer {
#if !SANITIZER_GO
+
+static bool FrameIsInternal(const SymbolizedStack *frame) {
+ if (!frame)
+ return true;
+ const char *file = frame->info.file;
+ const char *module = frame->info.module;
+ // On Gentoo, the path is g++-*, so there's *not* a missing /.
+ if (file && (internal_strstr(file, "/compiler-rt/lib/") ||
+ internal_strstr(file, "/include/c++/") ||
+ internal_strstr(file, "/include/g++")))
+ return true;
+ if (module && (internal_strstr(module, "libclang_rt.")))
+ return true;
+ return false;
+}
+
+const SymbolizedStack *SkipInternalFrames(const SymbolizedStack *frames) {
+ for (const SymbolizedStack *f = frames; f; f = f->next)
+ if (!FrameIsInternal(f))
+ return f;
+ return nullptr;
+}
+
void ReportErrorSummary(const char *error_type, const AddressInfo &info,
const char *alt_tool_name) {
if (!common_flags()->print_summary) return;
InternalScopedString buff;
- buff.append("%s ", error_type);
- RenderFrame(&buff, "%L %F", 0, info.address, &info,
- common_flags()->symbolize_vs_style,
- common_flags()->strip_path_prefix);
+ buff.AppendF("%s ", error_type);
+ StackTracePrinter::GetOrInit()->RenderFrame(
+ &buff, "%L %F", 0, info.address, &info,
+ common_flags()->symbolize_vs_style, common_flags()->strip_path_prefix);
ReportErrorSummary(buff.data(), alt_tool_name);
}
#endif
@@ -75,23 +98,46 @@ void ReportErrorSummary(const char *error_type, const StackTrace *stack,
#if !SANITIZER_GO
if (!common_flags()->print_summary)
return;
- if (stack->size == 0) {
- ReportErrorSummary(error_type);
- return;
+
+ // Find first non-internal stack frame.
+ for (uptr i = 0; i < stack->size; ++i) {
+ uptr pc = StackTrace::GetPreviousInstructionPc(stack->trace[i]);
+ SymbolizedStackHolder symbolized_stack(
+ Symbolizer::GetOrInit()->SymbolizePC(pc));
+ if (const SymbolizedStack *frame = symbolized_stack.get()) {
+ if (const SymbolizedStack *summary_frame = SkipInternalFrames(frame)) {
+ ReportErrorSummary(error_type, summary_frame->info, alt_tool_name);
+ return;
+ }
+ }
}
- // Currently, we include the first stack frame into the report summary.
- // Maybe sometimes we need to choose another frame (e.g. skip memcpy/etc).
- uptr pc = StackTrace::GetPreviousInstructionPc(stack->trace[0]);
- SymbolizedStack *frame = Symbolizer::GetOrInit()->SymbolizePC(pc);
- ReportErrorSummary(error_type, frame->info, alt_tool_name);
- frame->ClearAll();
+
+ // Fallback to the top one.
+ if (stack->size) {
+ uptr pc = StackTrace::GetPreviousInstructionPc(stack->trace[0]);
+ SymbolizedStackHolder symbolized_stack(
+ Symbolizer::GetOrInit()->SymbolizePC(pc));
+ if (const SymbolizedStack *frame = symbolized_stack.get()) {
+ ReportErrorSummary(error_type, frame->info, alt_tool_name);
+ return;
+ }
+ }
+
+ // Fallback to a summary without location.
+ ReportErrorSummary(error_type);
#endif
}
-void ReportMmapWriteExec(int prot) {
+void ReportMmapWriteExec(int prot, int flags) {
#if SANITIZER_POSIX && (!SANITIZER_GO && !SANITIZER_ANDROID)
- if ((prot & (PROT_WRITE | PROT_EXEC)) != (PROT_WRITE | PROT_EXEC))
+ int pflags = (PROT_WRITE | PROT_EXEC);
+ if ((prot & pflags) != pflags)
+ return;
+
+# if SANITIZER_APPLE && defined(MAP_JIT)
+ if ((flags & MAP_JIT) == MAP_JIT)
return;
+# endif
ScopedErrorReportLock l;
SanitizerCommonDecorator d;
@@ -101,8 +147,7 @@ void ReportMmapWriteExec(int prot) {
stack->Reset();
uptr top = 0;
uptr bottom = 0;
- GET_CALLER_PC_BP_SP;
- (void)sp;
+ GET_CALLER_PC_BP;
bool fast = common_flags()->fast_unwind_on_fatal;
if (StackTrace::WillUseFastUnwind(fast)) {
GetThreadStackTopAndBottom(false, &top, &bottom);
@@ -143,22 +188,22 @@ static void MaybeReportNonExecRegion(uptr pc) {
static void PrintMemoryByte(InternalScopedString *str, const char *before,
u8 byte) {
SanitizerCommonDecorator d;
- str->append("%s%s%x%x%s ", before, d.MemoryByte(), byte >> 4, byte & 15,
- d.Default());
+ str->AppendF("%s%s%x%x%s ", before, d.MemoryByte(), byte >> 4, byte & 15,
+ d.Default());
}
static void MaybeDumpInstructionBytes(uptr pc) {
if (!common_flags()->dump_instruction_bytes || (pc < GetPageSizeCached()))
return;
InternalScopedString str;
- str.append("First 16 instruction bytes at pc: ");
+ str.AppendF("First 16 instruction bytes at pc: ");
if (IsAccessibleMemoryRange(pc, 16)) {
for (int i = 0; i < 16; ++i) {
PrintMemoryByte(&str, "", ((u8 *)pc)[i]);
}
- str.append("\n");
+ str.AppendF("\n");
} else {
- str.append("unaccessible\n");
+ str.AppendF("unaccessible\n");
}
Report("%s", str.data());
}
@@ -205,9 +250,9 @@ static void ReportDeadlySignalImpl(const SignalContext &sig, u32 tid,
Report("Hint: pc points to the zero page.\n");
if (sig.is_memory_access) {
const char *access_type =
- sig.write_flag == SignalContext::WRITE
+ sig.write_flag == SignalContext::Write
? "WRITE"
- : (sig.write_flag == SignalContext::READ ? "READ" : "UNKNOWN");
+ : (sig.write_flag == SignalContext::Read ? "READ" : "UNKNOWN");
Report("The signal is caused by a %s memory access.\n", access_type);
if (!sig.is_true_faulting_addr)
Report("Hint: this fault was caused by a dereference of a high value "
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_report_fuchsia.cpp b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_report_fuchsia.cpp
new file mode 100644
index 000000000000..fb49cfbb3047
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_report_fuchsia.cpp
@@ -0,0 +1,33 @@
+//===-- sanitizer_symbolizer_report_fuchsia.cpp
+//-----------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Implementation of the report functions for fuchsia.
+//
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_platform.h"
+
+#if SANITIZER_SYMBOLIZER_MARKUP
+
+# include "sanitizer_common.h"
+
+namespace __sanitizer {
+void StartReportDeadlySignal() {}
+
+void ReportDeadlySignal(const SignalContext &sig, u32 tid,
+ UnwindSignalStackCallbackType unwind,
+ const void *unwind_context) {}
+
+void HandleDeadlySignal(void *siginfo, void *context, u32 tid,
+ UnwindSignalStackCallbackType unwind,
+ const void *unwind_context) {}
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_SYMBOLIZER_MARKUP
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_win.cpp b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_win.cpp
index 702d901353db..aae3e76ea229 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_win.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_win.cpp
@@ -14,8 +14,8 @@
#include "sanitizer_platform.h"
#if SANITIZER_WINDOWS
-#include "sanitizer_dbghelp.h"
-#include "sanitizer_symbolizer_internal.h"
+# include "sanitizer_dbghelp.h"
+# include "sanitizer_symbolizer_internal.h"
namespace __sanitizer {
@@ -175,9 +175,7 @@ const char *WinSymbolizerTool::Demangle(const char *name) {
return name;
}
-const char *Symbolizer::PlatformDemangle(const char *name) {
- return name;
-}
+const char *Symbolizer::PlatformDemangle(const char *name) { return nullptr; }
namespace {
struct ScopedHandle {
@@ -231,11 +229,9 @@ bool SymbolizerProcess::StartSymbolizerSubprocess() {
// Check that tool command lines are simple and that complete escaping is
// unnecessary.
CHECK(!internal_strchr(arg, '"') && "quotes in args unsupported");
- CHECK(!internal_strstr(arg, "\\\\") &&
- "double backslashes in args unsupported");
CHECK(arglen > 0 && arg[arglen - 1] != '\\' &&
"args ending in backslash and empty args unsupported");
- command_line.append("\"%s\" ", arg);
+ command_line.AppendF("\"%s\" ", arg);
}
VReport(3, "Launching symbolizer command: %s\n", command_line.data());
@@ -294,15 +290,15 @@ static void ChooseSymbolizerTools(IntrusiveList<SymbolizerTool> *list,
const char *path =
user_path ? user_path : FindPathToBinary("llvm-symbolizer.exe");
if (path) {
- VReport(2, "Using llvm-symbolizer at %spath: %s\n",
- user_path ? "user-specified " : "", path);
- list->push_back(new(*allocator) LLVMSymbolizer(path, allocator));
- } else {
if (user_path && user_path[0] == '\0') {
VReport(2, "External symbolizer is explicitly disabled.\n");
} else {
- VReport(2, "External symbolizer is not present.\n");
+ VReport(2, "Using llvm-symbolizer at %spath: %s\n",
+ user_path ? "user-specified " : "", path);
+ list->push_back(new (*allocator) LLVMSymbolizer(path, allocator));
}
+ } else {
+ VReport(2, "External symbolizer is not present.\n");
}
// Add the dbghelp based symbolizer.
@@ -318,7 +314,7 @@ Symbolizer *Symbolizer::PlatformInit() {
}
void Symbolizer::LateInitialize() {
- Symbolizer::GetOrInit()->LateInitializeTools();
+ Symbolizer::GetOrInit();
}
} // namespace __sanitizer
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_syscall_generic.inc b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_syscall_generic.inc
index 8829985b5b07..e7f95d33ad0d 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_syscall_generic.inc
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_syscall_generic.inc
@@ -13,13 +13,14 @@
// NetBSD uses libc calls directly
#if !SANITIZER_NETBSD
-#if SANITIZER_FREEBSD || SANITIZER_MAC || SANITIZER_SOLARIS
+#if SANITIZER_FREEBSD || SANITIZER_APPLE || SANITIZER_SOLARIS
# define SYSCALL(name) SYS_ ## name
#else
# define SYSCALL(name) __NR_ ## name
#endif
-#if defined(__x86_64__) && (SANITIZER_FREEBSD || SANITIZER_MAC)
+#if (defined(__x86_64__) && (SANITIZER_FREEBSD || SANITIZER_APPLE)) || \
+ (defined(__aarch64__) && SANITIZER_FREEBSD)
# define internal_syscall __syscall
# else
# define internal_syscall syscall
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_syscall_linux_hexagon.inc b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_syscall_linux_hexagon.inc
new file mode 100644
index 000000000000..553bff7503b4
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_syscall_linux_hexagon.inc
@@ -0,0 +1,131 @@
+//===-- sanitizer_syscall_linux_hexagon.inc ---------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Implementations of internal_syscall and internal_iserror for Linux/hexagon.
+//
+//===----------------------------------------------------------------------===//
+
+#define SYSCALL(name) __NR_##name
+
+#define __internal_syscall_LL_E(x) \
+ ((union { \
+ long long ll; \
+ long l[2]; \
+ }){.ll = x}) \
+ .l[0], \
+ ((union { \
+ long long ll; \
+ long l[2]; \
+ }){.ll = x}) \
+ .l[1]
+#define __internal_syscall_LL_O(x) 0, __SYSCALL_LL_E((x))
+
+#define __asm_syscall(...) \
+ do { \
+ __asm__ __volatile__("trap0(#1)" : "=r"(r0) : __VA_ARGS__ : "memory"); \
+ return r0; \
+ } while (0)
+
+#define __internal_syscall0(n) (__internal_syscall)(n)
+
+static uptr __internal_syscall(long n) {
+ register u32 r6 __asm__("r6") = n;
+ register u32 r0 __asm__("r0");
+ __asm_syscall("r"(r6));
+}
+
+#define __internal_syscall1(n, a1) (__internal_syscall)(n, (long)(a1))
+
+static uptr __internal_syscall(long n, long a) {
+ register u32 r6 __asm__("r6") = n;
+ register u32 r0 __asm__("r0") = a;
+ __asm_syscall("r"(r6), "0"(r0));
+}
+
+#define __internal_syscall2(n, a1, a2) \
+ (__internal_syscall)(n, (long)(a1), (long)(a2))
+
+static uptr __internal_syscall(long n, long a, long b) {
+ register u32 r6 __asm__("r6") = n;
+ register u32 r0 __asm__("r0") = a;
+ register u32 r1 __asm__("r1") = b;
+ __asm_syscall("r"(r6), "0"(r0), "r"(r1));
+}
+
+#define __internal_syscall3(n, a1, a2, a3) \
+ (__internal_syscall)(n, (long)(a1), (long)(a2), (long)(a3))
+
+static uptr __internal_syscall(long n, long a, long b, long c) {
+ register u32 r6 __asm__("r6") = n;
+ register u32 r0 __asm__("r0") = a;
+ register u32 r1 __asm__("r1") = b;
+ register u32 r2 __asm__("r2") = c;
+ __asm_syscall("r"(r6), "0"(r0), "r"(r1), "r"(r2));
+}
+
+#define __internal_syscall4(n, a1, a2, a3, a4) \
+ (__internal_syscall)(n, (long)(a1), (long)(a2), (long)(a3), (long)(a4))
+
+static uptr __internal_syscall(long n, long a, long b, long c, long d) {
+ register u32 r6 __asm__("r6") = n;
+ register u32 r0 __asm__("r0") = a;
+ register u32 r1 __asm__("r1") = b;
+ register u32 r2 __asm__("r2") = c;
+ register u32 r3 __asm__("r3") = d;
+ __asm_syscall("r"(r6), "0"(r0), "r"(r1), "r"(r2), "r"(r3));
+}
+
+#define __internal_syscall5(n, a1, a2, a3, a4, a5) \
+ (__internal_syscall)(n, (long)(a1), (long)(a2), (long)(a3), (long)(a4), \
+ (long)(a5))
+
+static uptr __internal_syscall(long n, long a, long b, long c, long d, long e) {
+ register u32 r6 __asm__("r6") = n;
+ register u32 r0 __asm__("r0") = a;
+ register u32 r1 __asm__("r1") = b;
+ register u32 r2 __asm__("r2") = c;
+ register u32 r3 __asm__("r3") = d;
+ register u32 r4 __asm__("r4") = e;
+ __asm_syscall("r"(r6), "0"(r0), "r"(r1), "r"(r2), "r"(r3), "r"(r4));
+}
+
+#define __internal_syscall6(n, a1, a2, a3, a4, a5, a6) \
+ (__internal_syscall)(n, (long)(a1), (long)(a2), (long)(a3), (long)(a4), \
+ (long)(a5), (long)(a6))
+
+static uptr __internal_syscall(long n, long a, long b, long c, long d, long e,
+ long f) {
+ register u32 r6 __asm__("r6") = n;
+ register u32 r0 __asm__("r0") = a;
+ register u32 r1 __asm__("r1") = b;
+ register u32 r2 __asm__("r2") = c;
+ register u32 r3 __asm__("r3") = d;
+ register u32 r4 __asm__("r4") = e;
+ register u32 r5 __asm__("r5") = f;
+ __asm_syscall("r"(r6), "0"(r0), "r"(r1), "r"(r2), "r"(r3), "r"(r4), "r"(r5));
+}
+
+#define __SYSCALL_NARGS_X(a1, a2, a3, a4, a5, a6, a7, a8, n, ...) n
+#define __SYSCALL_NARGS(...) \
+ __SYSCALL_NARGS_X(__VA_ARGS__, 7, 6, 5, 4, 3, 2, 1, 0, )
+#define __SYSCALL_CONCAT_X(a, b) a##b
+#define __SYSCALL_CONCAT(a, b) __SYSCALL_CONCAT_X(a, b)
+#define __SYSCALL_DISP(b, ...) \
+ __SYSCALL_CONCAT(b, __SYSCALL_NARGS(__VA_ARGS__))(__VA_ARGS__)
+
+#define internal_syscall(...) __SYSCALL_DISP(__internal_syscall, __VA_ARGS__)
+
+// Helper function used to avoid clobbering of errno.
+bool internal_iserror(uptr retval, int *rverrno) {
+ if (retval >= (uptr)-4095) {
+ if (rverrno)
+ *rverrno = -retval;
+ return true;
+ }
+ return false;
+}
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_syscall_linux_loongarch64.inc b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_syscall_linux_loongarch64.inc
new file mode 100644
index 000000000000..80f5e6be8ad1
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_syscall_linux_loongarch64.inc
@@ -0,0 +1,171 @@
+//===-- sanitizer_syscall_linux_loongarch64.inc -----------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Implementations of internal_syscall and internal_iserror for
+// Linux/loongarch64.
+//
+//===----------------------------------------------------------------------===//
+
+// About local register variables:
+// https://gcc.gnu.org/onlinedocs/gcc/Local-Register-Variables.html#Local-Register-Variables
+//
+// Kernel ABI:
+// https://lore.kernel.org/loongarch/1f353678-3398-e30b-1c87-6edb278f74db@xen0n.name/T/#m1613bc86c2d7bf5f6da92bd62984302bfd699a2f
+// syscall number is placed in a7
+// parameters, if present, are placed in a0-a6
+// upon return:
+// the return value is placed in a0
+// t0-t8 should be considered clobbered
+// all other registers are preserved
+#define SYSCALL(name) __NR_##name
+
+#define INTERNAL_SYSCALL_CLOBBERS \
+ "memory", "$t0", "$t1", "$t2", "$t3", "$t4", "$t5", "$t6", "$t7", "$t8"
+
+static uptr __internal_syscall(u64 nr) {
+ register u64 a7 asm("$a7") = nr;
+ register u64 a0 asm("$a0");
+ __asm__ volatile("syscall 0\n\t"
+ : "=r"(a0)
+ : "r"(a7)
+ : INTERNAL_SYSCALL_CLOBBERS);
+ return a0;
+}
+#define __internal_syscall0(n) (__internal_syscall)(n)
+
+static uptr __internal_syscall(u64 nr, u64 arg1) {
+ register u64 a7 asm("$a7") = nr;
+ register u64 a0 asm("$a0") = arg1;
+ __asm__ volatile("syscall 0\n\t"
+ : "+r"(a0)
+ : "r"(a7)
+ : INTERNAL_SYSCALL_CLOBBERS);
+ return a0;
+}
+#define __internal_syscall1(n, a1) (__internal_syscall)(n, (u64)(a1))
+
+static uptr __internal_syscall(u64 nr, u64 arg1, long arg2) {
+ register u64 a7 asm("$a7") = nr;
+ register u64 a0 asm("$a0") = arg1;
+ register u64 a1 asm("$a1") = arg2;
+ __asm__ volatile("syscall 0\n\t"
+ : "+r"(a0)
+ : "r"(a7), "r"(a1)
+ : INTERNAL_SYSCALL_CLOBBERS);
+ return a0;
+}
+#define __internal_syscall2(n, a1, a2) \
+ (__internal_syscall)(n, (u64)(a1), (long)(a2))
+
+static uptr __internal_syscall(u64 nr, u64 arg1, long arg2, long arg3) {
+ register u64 a7 asm("$a7") = nr;
+ register u64 a0 asm("$a0") = arg1;
+ register u64 a1 asm("$a1") = arg2;
+ register u64 a2 asm("$a2") = arg3;
+ __asm__ volatile("syscall 0\n\t"
+ : "+r"(a0)
+ : "r"(a7), "r"(a1), "r"(a2)
+ : INTERNAL_SYSCALL_CLOBBERS);
+ return a0;
+}
+#define __internal_syscall3(n, a1, a2, a3) \
+ (__internal_syscall)(n, (u64)(a1), (long)(a2), (long)(a3))
+
+static uptr __internal_syscall(u64 nr, u64 arg1, long arg2, long arg3,
+ u64 arg4) {
+ register u64 a7 asm("$a7") = nr;
+ register u64 a0 asm("$a0") = arg1;
+ register u64 a1 asm("$a1") = arg2;
+ register u64 a2 asm("$a2") = arg3;
+ register u64 a3 asm("$a3") = arg4;
+ __asm__ volatile("syscall 0\n\t"
+ : "+r"(a0)
+ : "r"(a7), "r"(a1), "r"(a2), "r"(a3)
+ : INTERNAL_SYSCALL_CLOBBERS);
+ return a0;
+}
+#define __internal_syscall4(n, a1, a2, a3, a4) \
+ (__internal_syscall)(n, (u64)(a1), (long)(a2), (long)(a3), (long)(a4))
+
+static uptr __internal_syscall(u64 nr, u64 arg1, long arg2, long arg3, u64 arg4,
+ long arg5) {
+ register u64 a7 asm("$a7") = nr;
+ register u64 a0 asm("$a0") = arg1;
+ register u64 a1 asm("$a1") = arg2;
+ register u64 a2 asm("$a2") = arg3;
+ register u64 a3 asm("$a3") = arg4;
+ register u64 a4 asm("$a4") = arg5;
+ __asm__ volatile("syscall 0\n\t"
+ : "+r"(a0)
+ : "r"(a7), "r"(a1), "r"(a2), "r"(a3), "r"(a4)
+ : INTERNAL_SYSCALL_CLOBBERS);
+ return a0;
+}
+#define __internal_syscall5(n, a1, a2, a3, a4, a5) \
+ (__internal_syscall)(n, (u64)(a1), (long)(a2), (long)(a3), (long)(a4), \
+ (u64)(a5))
+
+static uptr __internal_syscall(u64 nr, u64 arg1, long arg2, long arg3, u64 arg4,
+ long arg5, long arg6) {
+ register u64 a7 asm("$a7") = nr;
+ register u64 a0 asm("$a0") = arg1;
+ register u64 a1 asm("$a1") = arg2;
+ register u64 a2 asm("$a2") = arg3;
+ register u64 a3 asm("$a3") = arg4;
+ register u64 a4 asm("$a4") = arg5;
+ register u64 a5 asm("$a5") = arg6;
+ __asm__ volatile("syscall 0\n\t"
+ : "+r"(a0)
+ : "r"(a7), "r"(a1), "r"(a2), "r"(a3), "r"(a4), "r"(a5)
+ : INTERNAL_SYSCALL_CLOBBERS);
+ return a0;
+}
+#define __internal_syscall6(n, a1, a2, a3, a4, a5, a6) \
+ (__internal_syscall)(n, (u64)(a1), (long)(a2), (long)(a3), (long)(a4), \
+ (u64)(a5), (long)(a6))
+
+static uptr __internal_syscall(u64 nr, u64 arg1, long arg2, long arg3, u64 arg4,
+ long arg5, long arg6, long arg7) {
+ register u64 a7 asm("$a7") = nr;
+ register u64 a0 asm("$a0") = arg1;
+ register u64 a1 asm("$a1") = arg2;
+ register u64 a2 asm("$a2") = arg3;
+ register u64 a3 asm("$a3") = arg4;
+ register u64 a4 asm("$a4") = arg5;
+ register u64 a5 asm("$a5") = arg6;
+ register u64 a6 asm("$a6") = arg7;
+ __asm__ volatile("syscall 0\n\t"
+ : "+r"(a0)
+ : "r"(a7), "r"(a1), "r"(a2), "r"(a3), "r"(a4), "r"(a5),
+ "r"(a6)
+ : INTERNAL_SYSCALL_CLOBBERS);
+ return a0;
+}
+#define __internal_syscall7(n, a1, a2, a3, a4, a5, a6, a7) \
+ (__internal_syscall)(n, (u64)(a1), (long)(a2), (long)(a3), (long)(a4), \
+ (u64)(a5), (long)(a6), (long)(a7))
+
+#define __SYSCALL_NARGS_X(a1, a2, a3, a4, a5, a6, a7, a8, n, ...) n
+#define __SYSCALL_NARGS(...) \
+ __SYSCALL_NARGS_X(__VA_ARGS__, 7, 6, 5, 4, 3, 2, 1, 0, )
+#define __SYSCALL_CONCAT_X(a, b) a##b
+#define __SYSCALL_CONCAT(a, b) __SYSCALL_CONCAT_X(a, b)
+#define __SYSCALL_DISP(b, ...) \
+ __SYSCALL_CONCAT(b, __SYSCALL_NARGS(__VA_ARGS__))(__VA_ARGS__)
+
+#define internal_syscall(...) __SYSCALL_DISP(__internal_syscall, __VA_ARGS__)
+
+// Helper function used to avoid clobbering of errno.
+bool internal_iserror(uptr retval, int *internal_errno) {
+ if (retval >= (uptr)-4095) {
+ if (internal_errno)
+ *internal_errno = -retval;
+ return true;
+ }
+ return false;
+}
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_syscalls_netbsd.inc b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_syscalls_netbsd.inc
index c4a9d99fe2f0..4ce5de062756 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_syscalls_netbsd.inc
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_syscalls_netbsd.inc
@@ -2255,13 +2255,13 @@ PRE_SYSCALL(getcontext)(void *ucp_) { /* Nothing to do */ }
POST_SYSCALL(getcontext)(long long res, void *ucp_) { /* Nothing to do */ }
PRE_SYSCALL(setcontext)(void *ucp_) {
if (ucp_) {
- PRE_READ(ucp_, ucontext_t_sz);
+ PRE_READ(ucp_, ucontext_t_sz(ucp_));
}
}
POST_SYSCALL(setcontext)(long long res, void *ucp_) {}
PRE_SYSCALL(_lwp_create)(void *ucp_, long long flags_, void *new_lwp_) {
if (ucp_) {
- PRE_READ(ucp_, ucontext_t_sz);
+ PRE_READ(ucp_, ucontext_t_sz(ucp_));
}
}
POST_SYSCALL(_lwp_create)
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_thread_arg_retval.cpp b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_thread_arg_retval.cpp
new file mode 100644
index 000000000000..754fd7b65a1d
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_thread_arg_retval.cpp
@@ -0,0 +1,109 @@
+//===-- sanitizer_thread_arg_retval.cpp -------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between sanitizer tools.
+//
+// Tracks thread arguments and return value for leak checking.
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_thread_arg_retval.h"
+
+#include "sanitizer_placement_new.h"
+
+namespace __sanitizer {
+
+void ThreadArgRetval::CreateLocked(uptr thread, bool detached,
+ const Args& args) {
+ CheckLocked();
+ Data& t = data_[thread];
+ t = {};
+ t.gen = gen_++;
+ static_assert(sizeof(gen_) == sizeof(u32) && kInvalidGen == UINT32_MAX);
+ if (gen_ == kInvalidGen)
+ gen_ = 0;
+ t.detached = detached;
+ t.args = args;
+}
+
+ThreadArgRetval::Args ThreadArgRetval::GetArgs(uptr thread) const {
+ __sanitizer::Lock lock(&mtx_);
+ auto t = data_.find(thread);
+ CHECK(t);
+ if (t->second.done)
+ return {};
+ return t->second.args;
+}
+
+void ThreadArgRetval::Finish(uptr thread, void* retval) {
+ __sanitizer::Lock lock(&mtx_);
+ auto t = data_.find(thread);
+ if (!t)
+ return;
+ if (t->second.detached) {
+ // Retval of detached thread connot be retrieved.
+ data_.erase(t);
+ return;
+ }
+ t->second.done = true;
+ t->second.args.arg_retval = retval;
+}
+
+u32 ThreadArgRetval::BeforeJoin(uptr thread) const {
+ __sanitizer::Lock lock(&mtx_);
+ auto t = data_.find(thread);
+ if (t && !t->second.detached) {
+ return t->second.gen;
+ }
+ if (!common_flags()->detect_invalid_join)
+ return kInvalidGen;
+ const char* reason = "unknown";
+ if (!t) {
+ reason = "already joined";
+ } else if (t->second.detached) {
+ reason = "detached";
+ }
+ Report("ERROR: %s: Joining %s thread, aborting.\n", SanitizerToolName,
+ reason);
+ Die();
+}
+
+void ThreadArgRetval::AfterJoin(uptr thread, u32 gen) {
+ __sanitizer::Lock lock(&mtx_);
+ auto t = data_.find(thread);
+ if (!t || gen != t->second.gen) {
+ // Thread was reused and erased by any other event, or we had an invalid
+ // join.
+ return;
+ }
+ CHECK(!t->second.detached);
+ data_.erase(t);
+}
+
+void ThreadArgRetval::DetachLocked(uptr thread) {
+ CheckLocked();
+ auto t = data_.find(thread);
+ CHECK(t);
+ CHECK(!t->second.detached);
+ if (t->second.done) {
+ // We can't retrive retval after detached thread finished.
+ data_.erase(t);
+ return;
+ }
+ t->second.detached = true;
+}
+
+void ThreadArgRetval::GetAllPtrsLocked(InternalMmapVector<uptr>* ptrs) {
+ CheckLocked();
+ CHECK(ptrs);
+ data_.forEach([&](DenseMap<uptr, Data>::value_type& kv) -> bool {
+ ptrs->push_back((uptr)kv.second.args.arg_retval);
+ return true;
+ });
+}
+
+} // namespace __sanitizer
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_thread_arg_retval.h b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_thread_arg_retval.h
new file mode 100644
index 000000000000..0e6d35131c23
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_thread_arg_retval.h
@@ -0,0 +1,117 @@
+//===-- sanitizer_thread_arg_retval.h ---------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between sanitizer tools.
+//
+// Tracks thread arguments and return value for leak checking.
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_THREAD_ARG_RETVAL_H
+#define SANITIZER_THREAD_ARG_RETVAL_H
+
+#include "sanitizer_common.h"
+#include "sanitizer_dense_map.h"
+#include "sanitizer_list.h"
+#include "sanitizer_mutex.h"
+
+namespace __sanitizer {
+
+// Primary goal of the class is to keep alive arg and retval pointer for leak
+// checking. However it can be used to pass those pointer into wrappers used by
+// interceptors. The difference from ThreadRegistry/ThreadList is that this
+// class keeps data up to the detach or join, as exited thread still can be
+// joined to retrive retval. ThreadRegistry/ThreadList can discard exited
+// threads immediately.
+class SANITIZER_MUTEX ThreadArgRetval {
+ public:
+ struct Args {
+ void* (*routine)(void*);
+ void* arg_retval; // Either arg or retval.
+ };
+ void Lock() SANITIZER_ACQUIRE() { mtx_.Lock(); }
+ void CheckLocked() const SANITIZER_CHECK_LOCKED() { mtx_.CheckLocked(); }
+ void Unlock() SANITIZER_RELEASE() { mtx_.Unlock(); }
+
+ // Wraps pthread_create or similar. We need to keep object locked, to
+ // prevent child thread from proceeding without thread handle.
+ template <typename CreateFn /* returns thread id on success, or 0 */>
+ void Create(bool detached, const Args& args, const CreateFn& fn) {
+ // No need to track detached threads with no args, but we will to do as it's
+ // not expensive and less edge-cases.
+ __sanitizer::Lock lock(&mtx_);
+ if (uptr thread = fn())
+ CreateLocked(thread, detached, args);
+ }
+
+ // Returns thread arg and routine.
+ Args GetArgs(uptr thread) const;
+
+ // Mark thread as done and stores retval or remove if detached. Should be
+ // called by the thread.
+ void Finish(uptr thread, void* retval);
+
+ // Mark thread as detached or remove if done.
+ template <typename DetachFn /* returns true on success */>
+ void Detach(uptr thread, const DetachFn& fn) {
+ // Lock to prevent re-use of the thread between fn() and DetachLocked()
+ // calls.
+ __sanitizer::Lock lock(&mtx_);
+ if (fn())
+ DetachLocked(thread);
+ }
+
+ // Joins the thread.
+ template <typename JoinFn /* returns true on success */>
+ void Join(uptr thread, const JoinFn& fn) {
+ // Remember internal id of the thread to prevent re-use of the thread
+ // between fn() and AfterJoin() calls. Locking JoinFn, like in
+ // Detach(), implementation can cause deadlock.
+ auto gen = BeforeJoin(thread);
+ if (fn())
+ AfterJoin(thread, gen);
+ }
+
+ // Returns all arg and retval which are considered alive.
+ void GetAllPtrsLocked(InternalMmapVector<uptr>* ptrs);
+
+ uptr size() const {
+ __sanitizer::Lock lock(&mtx_);
+ return data_.size();
+ }
+
+ // FIXME: Add fork support. Expected users of the class are sloppy with forks
+ // anyway. We likely should lock/unlock the object to avoid deadlocks, and
+ // erase all but the current threads, so we can detect leaked arg or retval in
+ // child process.
+
+ // FIXME: Add cancelation support. Now if a thread was canceled, the class
+ // will keep pointers alive forever, missing leaks caused by cancelation.
+
+ private:
+ static const u32 kInvalidGen = UINT32_MAX;
+ struct Data {
+ Args args;
+ u32 gen; // Avoid collision if thread id re-used.
+ bool detached;
+ bool done;
+ };
+
+ void CreateLocked(uptr thread, bool detached, const Args& args);
+ u32 BeforeJoin(uptr thread) const;
+ void AfterJoin(uptr thread, u32 gen);
+ void DetachLocked(uptr thread);
+
+ mutable Mutex mtx_;
+
+ DenseMap<uptr, Data> data_;
+ u32 gen_ = 0;
+};
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_THREAD_ARG_RETVAL_H
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_thread_registry.cpp b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_thread_registry.cpp
index 745fbf76b01f..741e0731c415 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_thread_registry.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_thread_registry.cpp
@@ -13,6 +13,8 @@
#include "sanitizer_thread_registry.h"
+#include "sanitizer_placement_new.h"
+
namespace __sanitizer {
ThreadContextBase::ThreadContextBase(u32 tid)
@@ -108,7 +110,7 @@ ThreadRegistry::ThreadRegistry(ThreadContextFactory factory, u32 max_threads,
max_threads_(max_threads),
thread_quarantine_size_(thread_quarantine_size),
max_reuse_(max_reuse),
- mtx_(),
+ mtx_(MutexThreadRegistry),
total_threads_(0),
alive_threads_(0),
max_alive_threads_(0),
@@ -119,7 +121,7 @@ ThreadRegistry::ThreadRegistry(ThreadContextFactory factory, u32 max_threads,
void ThreadRegistry::GetNumberOfThreads(uptr *total, uptr *running,
uptr *alive) {
- BlockingMutexLock l(&mtx_);
+ ThreadRegistryLock l(this);
if (total)
*total = threads_.size();
if (running) *running = running_threads_;
@@ -127,13 +129,13 @@ void ThreadRegistry::GetNumberOfThreads(uptr *total, uptr *running,
}
uptr ThreadRegistry::GetMaxAliveThreads() {
- BlockingMutexLock l(&mtx_);
+ ThreadRegistryLock l(this);
return max_alive_threads_;
}
u32 ThreadRegistry::CreateThread(uptr user_id, bool detached, u32 parent_tid,
void *arg) {
- BlockingMutexLock l(&mtx_);
+ ThreadRegistryLock l(this);
u32 tid = kInvalidTid;
ThreadContextBase *tctx = QuarantinePop();
if (tctx) {
@@ -162,6 +164,12 @@ u32 ThreadRegistry::CreateThread(uptr user_id, bool detached, u32 parent_tid,
max_alive_threads_++;
CHECK_EQ(alive_threads_, max_alive_threads_);
}
+ if (user_id) {
+ // Ensure that user_id is unique. If it's not the case we are screwed.
+ // Ignoring this situation may lead to very hard to debug false
+ // positives later (e.g. if we join a wrong thread).
+ CHECK(live_.try_emplace(user_id, tid).second);
+ }
tctx->SetCreated(user_id, total_threads_++, detached,
parent_tid, arg);
return tid;
@@ -179,7 +187,7 @@ void ThreadRegistry::RunCallbackForEachThreadLocked(ThreadCallback cb,
}
u32 ThreadRegistry::FindThread(FindThreadCallback cb, void *arg) {
- BlockingMutexLock l(&mtx_);
+ ThreadRegistryLock l(this);
for (u32 tid = 0; tid < threads_.size(); tid++) {
ThreadContextBase *tctx = threads_[tid];
if (tctx != 0 && cb(tctx, arg))
@@ -211,7 +219,7 @@ ThreadContextBase *ThreadRegistry::FindThreadContextByOsIDLocked(tid_t os_id) {
}
void ThreadRegistry::SetThreadName(u32 tid, const char *name) {
- BlockingMutexLock l(&mtx_);
+ ThreadRegistryLock l(this);
ThreadContextBase *tctx = threads_[tid];
CHECK_NE(tctx, 0);
CHECK_EQ(SANITIZER_FUCHSIA ? ThreadStatusCreated : ThreadStatusRunning,
@@ -220,19 +228,13 @@ void ThreadRegistry::SetThreadName(u32 tid, const char *name) {
}
void ThreadRegistry::SetThreadNameByUserId(uptr user_id, const char *name) {
- BlockingMutexLock l(&mtx_);
- for (u32 tid = 0; tid < threads_.size(); tid++) {
- ThreadContextBase *tctx = threads_[tid];
- if (tctx != 0 && tctx->user_id == user_id &&
- tctx->status != ThreadStatusInvalid) {
- tctx->SetName(name);
- return;
- }
- }
+ ThreadRegistryLock l(this);
+ if (const auto *tid = live_.find(user_id))
+ threads_[tid->second]->SetName(name);
}
void ThreadRegistry::DetachThread(u32 tid, void *arg) {
- BlockingMutexLock l(&mtx_);
+ ThreadRegistryLock l(this);
ThreadContextBase *tctx = threads_[tid];
CHECK_NE(tctx, 0);
if (tctx->status == ThreadStatusInvalid) {
@@ -241,6 +243,8 @@ void ThreadRegistry::DetachThread(u32 tid, void *arg) {
}
tctx->OnDetached(arg);
if (tctx->status == ThreadStatusFinished) {
+ if (tctx->user_id)
+ live_.erase(tctx->user_id);
tctx->SetDead();
QuarantinePush(tctx);
} else {
@@ -252,7 +256,7 @@ void ThreadRegistry::JoinThread(u32 tid, void *arg) {
bool destroyed = false;
do {
{
- BlockingMutexLock l(&mtx_);
+ ThreadRegistryLock l(this);
ThreadContextBase *tctx = threads_[tid];
CHECK_NE(tctx, 0);
if (tctx->status == ThreadStatusInvalid) {
@@ -260,6 +264,8 @@ void ThreadRegistry::JoinThread(u32 tid, void *arg) {
return;
}
if ((destroyed = tctx->GetDestroyed())) {
+ if (tctx->user_id)
+ live_.erase(tctx->user_id);
tctx->SetJoined(arg);
QuarantinePush(tctx);
}
@@ -275,7 +281,7 @@ void ThreadRegistry::JoinThread(u32 tid, void *arg) {
// thread before trying to create it, and then failed to actually
// create it, and so never called StartThread.
ThreadStatus ThreadRegistry::FinishThread(u32 tid) {
- BlockingMutexLock l(&mtx_);
+ ThreadRegistryLock l(this);
CHECK_GT(alive_threads_, 0);
alive_threads_--;
ThreadContextBase *tctx = threads_[tid];
@@ -292,6 +298,8 @@ ThreadStatus ThreadRegistry::FinishThread(u32 tid) {
}
tctx->SetFinished();
if (dead) {
+ if (tctx->user_id)
+ live_.erase(tctx->user_id);
tctx->SetDead();
QuarantinePush(tctx);
}
@@ -301,7 +309,7 @@ ThreadStatus ThreadRegistry::FinishThread(u32 tid) {
void ThreadRegistry::StartThread(u32 tid, tid_t os_id, ThreadType thread_type,
void *arg) {
- BlockingMutexLock l(&mtx_);
+ ThreadRegistryLock l(this);
running_threads_++;
ThreadContextBase *tctx = threads_[tid];
CHECK_NE(tctx, 0);
@@ -327,20 +335,50 @@ void ThreadRegistry::QuarantinePush(ThreadContextBase *tctx) {
ThreadContextBase *ThreadRegistry::QuarantinePop() {
if (invalid_threads_.size() == 0)
- return 0;
+ return nullptr;
ThreadContextBase *tctx = invalid_threads_.front();
invalid_threads_.pop_front();
return tctx;
}
+u32 ThreadRegistry::ConsumeThreadUserId(uptr user_id) {
+ ThreadRegistryLock l(this);
+ u32 tid;
+ auto *t = live_.find(user_id);
+ CHECK(t);
+ tid = t->second;
+ live_.erase(t);
+ auto *tctx = threads_[tid];
+ CHECK_EQ(tctx->user_id, user_id);
+ tctx->user_id = 0;
+ return tid;
+}
+
void ThreadRegistry::SetThreadUserId(u32 tid, uptr user_id) {
- BlockingMutexLock l(&mtx_);
+ ThreadRegistryLock l(this);
ThreadContextBase *tctx = threads_[tid];
CHECK_NE(tctx, 0);
CHECK_NE(tctx->status, ThreadStatusInvalid);
CHECK_NE(tctx->status, ThreadStatusDead);
CHECK_EQ(tctx->user_id, 0);
tctx->user_id = user_id;
+ CHECK(live_.try_emplace(user_id, tctx->tid).second);
+}
+
+u32 ThreadRegistry::OnFork(u32 tid) {
+ ThreadRegistryLock l(this);
+ // We only purge user_id (pthread_t) of live threads because
+ // they cause CHECK failures if new threads with matching pthread_t
+ // created after fork.
+ // Potentially we could purge more info (ThreadContextBase themselves),
+ // but it's hard to test and easy to introduce new issues by doing this.
+ for (auto *tctx : threads_) {
+ if (tctx->tid == tid || !tctx->user_id)
+ continue;
+ CHECK(live_.erase(tctx->user_id));
+ tctx->user_id = 0;
+ }
+ return alive_threads_;
}
} // namespace __sanitizer
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_thread_registry.h b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_thread_registry.h
index 0b28bbe6ddf6..2c7e5c276fa1 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_thread_registry.h
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_thread_registry.h
@@ -15,6 +15,7 @@
#define SANITIZER_THREAD_REGISTRY_H
#include "sanitizer_common.h"
+#include "sanitizer_dense_map.h"
#include "sanitizer_list.h"
#include "sanitizer_mutex.h"
@@ -85,7 +86,7 @@ class ThreadContextBase {
typedef ThreadContextBase* (*ThreadContextFactory)(u32 tid);
-class MUTEX ThreadRegistry {
+class SANITIZER_MUTEX ThreadRegistry {
public:
ThreadRegistry(ThreadContextFactory factory);
ThreadRegistry(ThreadContextFactory factory, u32 max_threads,
@@ -94,15 +95,17 @@ class MUTEX ThreadRegistry {
uptr *alive = nullptr);
uptr GetMaxAliveThreads();
- void Lock() ACQUIRE() { mtx_.Lock(); }
- void CheckLocked() const CHECK_LOCKED() { mtx_.CheckLocked(); }
- void Unlock() RELEASE() { mtx_.Unlock(); }
+ void Lock() SANITIZER_ACQUIRE() { mtx_.Lock(); }
+ void CheckLocked() const SANITIZER_CHECK_LOCKED() { mtx_.CheckLocked(); }
+ void Unlock() SANITIZER_RELEASE() { mtx_.Unlock(); }
// Should be guarded by ThreadRegistryLock.
ThreadContextBase *GetThreadLocked(u32 tid) {
return threads_.empty() ? nullptr : threads_[tid];
}
+ u32 NumThreadsLocked() const { return threads_.size(); }
+
u32 CreateThread(uptr user_id, bool detached, u32 parent_tid, void *arg);
typedef void (*ThreadCallback)(ThreadContextBase *tctx, void *arg);
@@ -127,15 +130,21 @@ class MUTEX ThreadRegistry {
// Finishes thread and returns previous status.
ThreadStatus FinishThread(u32 tid);
void StartThread(u32 tid, tid_t os_id, ThreadType thread_type, void *arg);
+ u32 ConsumeThreadUserId(uptr user_id);
void SetThreadUserId(u32 tid, uptr user_id);
+ // OnFork must be called in the child process after fork to purge old
+ // threads that don't exist anymore (except for the current thread tid).
+ // Returns number of alive threads before fork.
+ u32 OnFork(u32 tid);
+
private:
const ThreadContextFactory context_factory_;
const u32 max_threads_;
const u32 thread_quarantine_size_;
const u32 max_reuse_;
- BlockingMutex mtx_;
+ Mutex mtx_;
u64 total_threads_; // Total number of created threads. May be greater than
// max_threads_ if contexts were reused.
@@ -146,6 +155,7 @@ class MUTEX ThreadRegistry {
InternalMmapVector<ThreadContextBase *> threads_;
IntrusiveList<ThreadContextBase> dead_threads_;
IntrusiveList<ThreadContextBase> invalid_threads_;
+ DenseMap<uptr, Tid> live_;
void QuarantinePush(ThreadContextBase *tctx);
ThreadContextBase *QuarantinePop();
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_thread_safety.h b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_thread_safety.h
index 52b25edaa7a3..c34ea804da20 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_thread_safety.h
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_thread_safety.h
@@ -16,27 +16,34 @@
#define SANITIZER_THREAD_SAFETY_H
#if defined(__clang__)
-# define THREAD_ANNOTATION(x) __attribute__((x))
+# define SANITIZER_THREAD_ANNOTATION(x) __attribute__((x))
#else
-# define THREAD_ANNOTATION(x)
+# define SANITIZER_THREAD_ANNOTATION(x)
#endif
-#define MUTEX THREAD_ANNOTATION(capability("mutex"))
-#define SCOPED_LOCK THREAD_ANNOTATION(scoped_lockable)
-#define GUARDED_BY(x) THREAD_ANNOTATION(guarded_by(x))
-#define PT_GUARDED_BY(x) THREAD_ANNOTATION(pt_guarded_by(x))
-#define REQUIRES(...) THREAD_ANNOTATION(requires_capability(__VA_ARGS__))
-#define REQUIRES_SHARED(...) \
- THREAD_ANNOTATION(requires_shared_capability(__VA_ARGS__))
-#define ACQUIRE(...) THREAD_ANNOTATION(acquire_capability(__VA_ARGS__))
-#define ACQUIRE_SHARED(...) \
- THREAD_ANNOTATION(acquire_shared_capability(__VA_ARGS__))
-#define TRY_ACQUIRE(...) THREAD_ANNOTATION(try_acquire_capability(__VA_ARGS__))
-#define RELEASE(...) THREAD_ANNOTATION(release_capability(__VA_ARGS__))
-#define RELEASE_SHARED(...) \
- THREAD_ANNOTATION(release_shared_capability(__VA_ARGS__))
-#define EXCLUDES(...) THREAD_ANNOTATION(locks_excluded(__VA_ARGS__))
-#define CHECK_LOCKED(...) THREAD_ANNOTATION(assert_capability(__VA_ARGS__))
-#define NO_THREAD_SAFETY_ANALYSIS THREAD_ANNOTATION(no_thread_safety_analysis)
+#define SANITIZER_MUTEX SANITIZER_THREAD_ANNOTATION(capability("mutex"))
+#define SANITIZER_SCOPED_LOCK SANITIZER_THREAD_ANNOTATION(scoped_lockable)
+#define SANITIZER_GUARDED_BY(x) SANITIZER_THREAD_ANNOTATION(guarded_by(x))
+#define SANITIZER_PT_GUARDED_BY(x) SANITIZER_THREAD_ANNOTATION(pt_guarded_by(x))
+#define SANITIZER_REQUIRES(...) \
+ SANITIZER_THREAD_ANNOTATION(requires_capability(__VA_ARGS__))
+#define SANITIZER_REQUIRES_SHARED(...) \
+ SANITIZER_THREAD_ANNOTATION(requires_shared_capability(__VA_ARGS__))
+#define SANITIZER_ACQUIRE(...) \
+ SANITIZER_THREAD_ANNOTATION(acquire_capability(__VA_ARGS__))
+#define SANITIZER_ACQUIRE_SHARED(...) \
+ SANITIZER_THREAD_ANNOTATION(acquire_shared_capability(__VA_ARGS__))
+#define SANITIZER_TRY_ACQUIRE(...) \
+ SANITIZER_THREAD_ANNOTATION(try_acquire_capability(__VA_ARGS__))
+#define SANITIZER_RELEASE(...) \
+ SANITIZER_THREAD_ANNOTATION(release_capability(__VA_ARGS__))
+#define SANITIZER_RELEASE_SHARED(...) \
+ SANITIZER_THREAD_ANNOTATION(release_shared_capability(__VA_ARGS__))
+#define SANITIZER_EXCLUDES(...) \
+ SANITIZER_THREAD_ANNOTATION(locks_excluded(__VA_ARGS__))
+#define SANITIZER_CHECK_LOCKED(...) \
+ SANITIZER_THREAD_ANNOTATION(assert_capability(__VA_ARGS__))
+#define SANITIZER_NO_THREAD_SAFETY_ANALYSIS \
+ SANITIZER_THREAD_ANNOTATION(no_thread_safety_analysis)
#endif
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_tls_get_addr.cpp b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_tls_get_addr.cpp
index 1f664b6cf5b8..252979f1c2ba 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_tls_get_addr.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_tls_get_addr.cpp
@@ -12,6 +12,7 @@
#include "sanitizer_tls_get_addr.h"
+#include "sanitizer_allocator_interface.h"
#include "sanitizer_atomic.h"
#include "sanitizer_flags.h"
#include "sanitizer_platform_interceptors.h"
@@ -26,13 +27,6 @@ struct TlsGetAddrParam {
uptr offset;
};
-// Glibc starting from 2.19 allocates tls using __signal_safe_memalign,
-// which has such header.
-struct Glibc_2_19_tls_header {
- uptr size;
- uptr start;
-};
-
// This must be static TLS
__attribute__((tls_model("initial-exec")))
static __thread DTLS dtls;
@@ -44,7 +38,7 @@ static atomic_uintptr_t number_of_live_dtls;
static const uptr kDestroyedThread = -1;
static void DTLS_Deallocate(DTLS::DTVBlock *block) {
- VReport(2, "__tls_get_addr: DTLS_Deallocate %p %zd\n", block);
+ VReport(2, "__tls_get_addr: DTLS_Deallocate %p\n", (void *)block);
UnmapOrDie(block, sizeof(DTLS::DTVBlock));
atomic_fetch_sub(&number_of_live_dtls, 1, memory_order_relaxed);
}
@@ -66,12 +60,13 @@ static DTLS::DTVBlock *DTLS_NextBlock(atomic_uintptr_t *cur) {
}
uptr num_live_dtls =
atomic_fetch_add(&number_of_live_dtls, 1, memory_order_relaxed);
- VReport(2, "__tls_get_addr: DTLS_NextBlock %p %zd\n", &dtls, num_live_dtls);
+ VReport(2, "__tls_get_addr: DTLS_NextBlock %p %zd\n", (void *)&dtls,
+ num_live_dtls);
return new_dtv;
}
static DTLS::DTV *DTLS_Find(uptr id) {
- VReport(2, "__tls_get_addr: DTLS_Find %p %zd\n", &dtls, id);
+ VReport(2, "__tls_get_addr: DTLS_Find %p %zd\n", (void *)&dtls, id);
static constexpr uptr kPerBlock = ARRAY_SIZE(DTLS::DTVBlock::dtvs);
DTLS::DTVBlock *cur = DTLS_NextBlock(&dtls.dtv_block);
if (!cur)
@@ -82,7 +77,7 @@ static DTLS::DTV *DTLS_Find(uptr id) {
void DTLS_Destroy() {
if (!common_flags()->intercept_tls_get_addr) return;
- VReport(2, "__tls_get_addr: DTLS_Destroy %p\n", &dtls);
+ VReport(2, "__tls_get_addr: DTLS_Destroy %p\n", (void *)&dtls);
DTLS::DTVBlock *block = (DTLS::DTVBlock *)atomic_exchange(
&dtls.dtv_block, kDestroyedThread, memory_order_release);
while (block) {
@@ -107,6 +102,14 @@ static const uptr kDtvOffset = 0x800;
static const uptr kDtvOffset = 0;
#endif
+extern "C" {
+SANITIZER_WEAK_ATTRIBUTE
+uptr __sanitizer_get_allocated_size(const void *p);
+
+SANITIZER_WEAK_ATTRIBUTE
+const void *__sanitizer_get_allocated_begin(const void *p);
+}
+
DTLS::DTV *DTLS_on_tls_get_addr(void *arg_void, void *res,
uptr static_tls_begin, uptr static_tls_end) {
if (!common_flags()->intercept_tls_get_addr) return 0;
@@ -117,26 +120,26 @@ DTLS::DTV *DTLS_on_tls_get_addr(void *arg_void, void *res,
return 0;
uptr tls_size = 0;
uptr tls_beg = reinterpret_cast<uptr>(res) - arg->offset - kDtvOffset;
- VReport(2, "__tls_get_addr: %p {%p,%p} => %p; tls_beg: %p; sp: %p "
- "num_live_dtls %zd\n",
- arg, arg->dso_id, arg->offset, res, tls_beg, &tls_beg,
+ VReport(2,
+ "__tls_get_addr: %p {0x%zx,0x%zx} => %p; tls_beg: 0x%zx; sp: %p "
+ "num_live_dtls %zd\n",
+ (void *)arg, arg->dso_id, arg->offset, res, tls_beg, (void *)&tls_beg,
atomic_load(&number_of_live_dtls, memory_order_relaxed));
if (dtls.last_memalign_ptr == tls_beg) {
tls_size = dtls.last_memalign_size;
- VReport(2, "__tls_get_addr: glibc <=2.18 suspected; tls={%p,%p}\n",
- tls_beg, tls_size);
+ VReport(2, "__tls_get_addr: glibc <=2.24 suspected; tls={0x%zx,0x%zx}\n",
+ tls_beg, tls_size);
} else if (tls_beg >= static_tls_begin && tls_beg < static_tls_end) {
// This is the static TLS block which was initialized / unpoisoned at thread
// creation.
- VReport(2, "__tls_get_addr: static tls: %p\n", tls_beg);
+ VReport(2, "__tls_get_addr: static tls: 0x%zx\n", tls_beg);
tls_size = 0;
- } else if ((tls_beg % 4096) == sizeof(Glibc_2_19_tls_header)) {
- // We may want to check gnu_get_libc_version().
- Glibc_2_19_tls_header *header = (Glibc_2_19_tls_header *)tls_beg - 1;
- tls_size = header->size;
- tls_beg = header->start;
- VReport(2, "__tls_get_addr: glibc >=2.19 suspected; tls={%p %p}\n",
- tls_beg, tls_size);
+ } else if (const void *start =
+ __sanitizer_get_allocated_begin((void *)tls_beg)) {
+ tls_beg = (uptr)start;
+ tls_size = __sanitizer_get_allocated_size(start);
+ VReport(2, "__tls_get_addr: glibc >=2.25 suspected; tls={0x%zx,0x%zx}\n",
+ tls_beg, tls_size);
} else {
VReport(2, "__tls_get_addr: Can't guess glibc version\n");
// This may happen inside the DTOR of main thread, so just ignore it.
@@ -149,7 +152,7 @@ DTLS::DTV *DTLS_on_tls_get_addr(void *arg_void, void *res,
void DTLS_on_libc_memalign(void *ptr, uptr size) {
if (!common_flags()->intercept_tls_get_addr) return;
- VReport(2, "DTLS_on_libc_memalign: %p %p\n", ptr, size);
+ VReport(2, "DTLS_on_libc_memalign: %p 0x%zx\n", ptr, size);
dtls.last_memalign_ptr = reinterpret_cast<uptr>(ptr);
dtls.last_memalign_size = size;
}
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_tls_get_addr.h b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_tls_get_addr.h
index a599c0bbc75c..0ddab61deb10 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_tls_get_addr.h
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_tls_get_addr.h
@@ -12,16 +12,24 @@
// the lack of interface that would tell us about the Dynamic TLS (DTLS).
// https://sourceware.org/bugzilla/show_bug.cgi?id=16291
//
-// The matters get worse because the glibc implementation changed between
-// 2.18 and 2.19:
-// https://groups.google.com/forum/#!topic/address-sanitizer/BfwYD8HMxTM
-//
-// Before 2.19, every DTLS chunk is allocated with __libc_memalign,
+// Before 2.25: every DTLS chunk is allocated with __libc_memalign,
// which we intercept and thus know where is the DTLS.
-// Since 2.19, DTLS chunks are allocated with __signal_safe_memalign,
-// which is an internal function that wraps a mmap call, neither of which
-// we can intercept. Luckily, __signal_safe_memalign has a simple parseable
-// header which we can use.
+//
+// Since 2.25: DTLS chunks are allocated with malloc. We could co-opt
+// the malloc interceptor to keep track of the last allocation, similar
+// to how we handle __libc_memalign; however, this adds some overhead
+// (since malloc, unlike __libc_memalign, is commonly called), and
+// requires care to avoid false negatives for LeakSanitizer.
+// Instead, we rely on our internal allocators - which keep track of all
+// its allocations - to determine if an address points to a malloc
+// allocation.
+//
+// There exists a since-deprecated version of Google's internal glibc fork
+// that used __signal_safe_memalign. DTLS_on_tls_get_addr relied on a
+// heuristic check (is the allocation 16 bytes from the start of a page
+// boundary?), which was sometimes erroneous:
+// https://bugs.chromium.org/p/chromium/issues/detail?id=1275223#c15
+// Since that check has no practical use anymore, we have removed it.
//
//===----------------------------------------------------------------------===//
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_type_traits.h b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_type_traits.h
index 2a58d9874d2c..06a44d1b5c7a 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_type_traits.h
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_type_traits.h
@@ -13,6 +13,8 @@
#ifndef SANITIZER_TYPE_TRAITS_H
#define SANITIZER_TYPE_TRAITS_H
+#include "sanitizer_common/sanitizer_internal_defs.h"
+
namespace __sanitizer {
struct true_type {
@@ -57,6 +59,83 @@ struct conditional<false, T, F> {
using type = F;
};
+template <class T>
+struct remove_reference {
+ using type = T;
+};
+template <class T>
+struct remove_reference<T&> {
+ using type = T;
+};
+template <class T>
+struct remove_reference<T&&> {
+ using type = T;
+};
+
+template <class T>
+WARN_UNUSED_RESULT inline typename remove_reference<T>::type&& move(T&& t) {
+ return static_cast<typename remove_reference<T>::type&&>(t);
+}
+
+template <class T>
+WARN_UNUSED_RESULT inline constexpr T&& forward(
+ typename remove_reference<T>::type& t) {
+ return static_cast<T&&>(t);
+}
+
+template <class T>
+WARN_UNUSED_RESULT inline constexpr T&& forward(
+ typename remove_reference<T>::type&& t) {
+ return static_cast<T&&>(t);
+}
+
+template <class T, T v>
+struct integral_constant {
+ static constexpr const T value = v;
+ typedef T value_type;
+ typedef integral_constant type;
+ constexpr operator value_type() const { return value; }
+ constexpr value_type operator()() const { return value; }
+};
+
+#ifndef __has_builtin
+# define __has_builtin(x) 0
+#endif
+
+#if __has_builtin(__is_trivially_destructible)
+
+template <class T>
+struct is_trivially_destructible
+ : public integral_constant<bool, __is_trivially_destructible(T)> {};
+
+#elif __has_builtin(__has_trivial_destructor)
+
+template <class T>
+struct is_trivially_destructible
+ : public integral_constant<bool, __has_trivial_destructor(T)> {};
+
+#else
+
+template <class T>
+struct is_trivially_destructible
+ : public integral_constant<bool, /* less efficient fallback */ false> {};
+
+#endif
+
+#if __has_builtin(__is_trivially_copyable)
+
+template <class T>
+struct is_trivially_copyable
+ : public integral_constant<bool, __is_trivially_copyable(T)> {};
+
+#else
+
+template <class T>
+struct is_trivially_copyable
+ : public integral_constant<bool, /* less efficient fallback */ false> {};
+
+#endif
+
} // namespace __sanitizer
#endif
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_unwind_fuchsia.cpp b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_unwind_fuchsia.cpp
new file mode 100644
index 000000000000..f3eb8591dcbc
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_unwind_fuchsia.cpp
@@ -0,0 +1,66 @@
+//===------------------ sanitizer_unwind_fuchsia.cpp
+//---------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+/// Sanitizer unwind Fuchsia specific functions.
+//
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_platform.h"
+#if SANITIZER_FUCHSIA
+
+# include <limits.h>
+# include <unwind.h>
+
+# include "sanitizer_common.h"
+# include "sanitizer_stacktrace.h"
+
+namespace __sanitizer {
+
+# if SANITIZER_CAN_SLOW_UNWIND
+struct UnwindTraceArg {
+ BufferedStackTrace *stack;
+ u32 max_depth;
+};
+
+_Unwind_Reason_Code Unwind_Trace(struct _Unwind_Context *ctx, void *param) {
+ UnwindTraceArg *arg = static_cast<UnwindTraceArg *>(param);
+ CHECK_LT(arg->stack->size, arg->max_depth);
+ uptr pc = _Unwind_GetIP(ctx);
+ if (pc < GetPageSizeCached())
+ return _URC_NORMAL_STOP;
+ arg->stack->trace_buffer[arg->stack->size++] = pc;
+ return (arg->stack->size == arg->max_depth ? _URC_NORMAL_STOP
+ : _URC_NO_REASON);
+}
+
+void BufferedStackTrace::UnwindSlow(uptr pc, u32 max_depth) {
+ CHECK_GE(max_depth, 2);
+ size = 0;
+ UnwindTraceArg arg = {this, Min(max_depth + 1, kStackTraceMax)};
+ _Unwind_Backtrace(Unwind_Trace, &arg);
+ CHECK_GT(size, 0);
+ // We need to pop a few frames so that pc is on top.
+ uptr to_pop = LocatePcInTrace(pc);
+ // trace_buffer[0] belongs to the current function so we always pop it,
+ // unless there is only 1 frame in the stack trace (1 frame is always better
+ // than 0!).
+ PopStackFrames(Min(to_pop, static_cast<uptr>(1)));
+ trace_buffer[0] = pc;
+}
+
+void BufferedStackTrace::UnwindSlow(uptr pc, void *context, u32 max_depth) {
+ CHECK(context);
+ CHECK_GE(max_depth, 2);
+ UNREACHABLE("signal context doesn't exist");
+}
+# endif // SANITIZER_CAN_SLOW_UNWIND
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_FUCHSIA
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_unwind_linux_libcdep.cpp b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_unwind_linux_libcdep.cpp
index b2628dcc4dc1..6a8e82e2e213 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_unwind_linux_libcdep.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_unwind_linux_libcdep.cpp
@@ -58,7 +58,7 @@ unwind_backtrace_signal_arch_func unwind_backtrace_signal_arch;
#endif
uptr Unwind_GetIP(struct _Unwind_Context *ctx) {
-#if defined(__arm__) && !SANITIZER_MAC
+#if defined(__arm__) && !SANITIZER_APPLE
uptr val;
_Unwind_VRS_Result res = _Unwind_VRS_Get(ctx, _UVRSC_CORE,
15 /* r15 = PC */, _UVRSD_UINT32, &val);
@@ -139,13 +139,7 @@ void BufferedStackTrace::UnwindSlow(uptr pc, u32 max_depth) {
if (to_pop == 0 && size > 1)
to_pop = 1;
PopStackFrames(to_pop);
-#if defined(__GNUC__) && defined(__sparc__)
- // __builtin_return_address returns the address of the call instruction
- // on the SPARC and not the return address, so we need to compensate.
- trace_buffer[0] = GetNextInstructionPc(pc);
-#else
trace_buffer[0] = pc;
-#endif
}
void BufferedStackTrace::UnwindSlow(uptr pc, void *context, u32 max_depth) {
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_unwind_win.cpp b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_unwind_win.cpp
index 7e01c81d0422..afcd01dae0b7 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_unwind_win.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_unwind_win.cpp
@@ -57,30 +57,37 @@ void BufferedStackTrace::UnwindSlow(uptr pc, void *context, u32 max_depth) {
InitializeDbgHelpIfNeeded();
size = 0;
-#if defined(_WIN64)
+# if SANITIZER_WINDOWS64
+# if SANITIZER_ARM64
+ int machine_type = IMAGE_FILE_MACHINE_ARM64;
+ stack_frame.AddrPC.Offset = ctx.Pc;
+ stack_frame.AddrFrame.Offset = ctx.Fp;
+ stack_frame.AddrStack.Offset = ctx.Sp;
+# else
int machine_type = IMAGE_FILE_MACHINE_AMD64;
stack_frame.AddrPC.Offset = ctx.Rip;
stack_frame.AddrFrame.Offset = ctx.Rbp;
stack_frame.AddrStack.Offset = ctx.Rsp;
-#else
+# endif
+# else
int machine_type = IMAGE_FILE_MACHINE_I386;
stack_frame.AddrPC.Offset = ctx.Eip;
stack_frame.AddrFrame.Offset = ctx.Ebp;
stack_frame.AddrStack.Offset = ctx.Esp;
-#endif
+# endif
stack_frame.AddrPC.Mode = AddrModeFlat;
stack_frame.AddrFrame.Mode = AddrModeFlat;
stack_frame.AddrStack.Mode = AddrModeFlat;
while (StackWalk64(machine_type, GetCurrentProcess(), GetCurrentThread(),
- &stack_frame, &ctx, NULL, SymFunctionTableAccess64,
- SymGetModuleBase64, NULL) &&
- size < Min(max_depth, kStackTraceMax)) {
+ &stack_frame, &ctx, NULL, SymFunctionTableAccess64,
+ SymGetModuleBase64, NULL) &&
+ size < Min(max_depth, kStackTraceMax)) {
trace_buffer[size++] = (uptr)stack_frame.AddrPC.Offset;
}
}
-#ifdef __clang__
-#pragma clang diagnostic pop
-#endif
-#endif // #if !SANITIZER_GO
+# ifdef __clang__
+# pragma clang diagnostic pop
+# endif
+# endif // #if !SANITIZER_GO
#endif // SANITIZER_WINDOWS
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_vector.h b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_vector.h
index 31216f3ec3a6..79ff275660d3 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_vector.h
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_vector.h
@@ -83,8 +83,8 @@ class Vector {
}
EnsureSize(size);
if (old_size < size) {
- for (uptr i = old_size; i < size; i++)
- internal_memset(&begin_[i], 0, sizeof(begin_[i]));
+ internal_memset(&begin_[old_size], 0,
+ sizeof(begin_[old_size]) * (size - old_size));
}
}
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_win.cpp b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_win.cpp
index dddd885a45dd..06e496523eea 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_win.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_win.cpp
@@ -93,6 +93,11 @@ bool FileExists(const char *filename) {
return ::GetFileAttributesA(filename) != INVALID_FILE_ATTRIBUTES;
}
+bool DirExists(const char *path) {
+ auto attr = ::GetFileAttributesA(path);
+ return (attr != INVALID_FILE_ATTRIBUTES) && (attr & FILE_ATTRIBUTE_DIRECTORY);
+}
+
uptr internal_getpid() {
return GetProcessId(GetCurrentProcess());
}
@@ -126,6 +131,11 @@ void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top,
}
#endif // #if !SANITIZER_GO
+bool ErrorIsOOM(error_t err) {
+ // TODO: This should check which `err`s correspond to OOM.
+ return false;
+}
+
void *MmapOrDie(uptr size, const char *mem_type, bool raw_report) {
void *rv = VirtualAlloc(0, size, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE);
if (rv == 0)
@@ -224,6 +234,17 @@ void *MmapAlignedOrDieOnFatalError(uptr size, uptr alignment,
return (void *)mapped_addr;
}
+// ZeroMmapFixedRegion zero's out a region of memory previously returned from a
+// call to one of the MmapFixed* helpers. On non-windows systems this would be
+// done with another mmap, but on windows remapping is not an option.
+// VirtualFree(DECOMMIT)+VirtualAlloc(RECOMMIT) would also be a way to zero the
+// memory, but we can't do this atomically, so instead we fall back to using
+// internal_memset.
+bool ZeroMmapFixedRegion(uptr fixed_addr, uptr size) {
+ internal_memset((void*) fixed_addr, 0, size);
+ return true;
+}
+
bool MmapFixedNoReserve(uptr fixed_addr, uptr size, const char *name) {
// FIXME: is this really "NoReserve"? On Win32 this does not matter much,
// but on Win64 it does.
@@ -336,6 +357,16 @@ bool MprotectNoAccess(uptr addr, uptr size) {
return VirtualProtect((LPVOID)addr, size, PAGE_NOACCESS, &old_protection);
}
+bool MprotectReadOnly(uptr addr, uptr size) {
+ DWORD old_protection;
+ return VirtualProtect((LPVOID)addr, size, PAGE_READONLY, &old_protection);
+}
+
+bool MprotectReadWrite(uptr addr, uptr size) {
+ DWORD old_protection;
+ return VirtualProtect((LPVOID)addr, size, PAGE_READWRITE, &old_protection);
+}
+
void ReleaseMemoryPagesToOS(uptr beg, uptr end) {
uptr beg_aligned = RoundDownTo(beg, GetPageSizeCached()),
end_aligned = RoundDownTo(end, GetPageSizeCached());
@@ -512,7 +543,7 @@ void ReExec() {
UNIMPLEMENTED();
}
-void PlatformPrepareForSandboxing(__sanitizer_sandbox_arguments *args) {}
+void PlatformPrepareForSandboxing(void *args) {}
bool StackSizeIsUnlimited() {
UNIMPLEMENTED();
@@ -565,6 +596,10 @@ void Abort() {
internal__exit(3);
}
+bool CreateDir(const char *pathname) {
+ return CreateDirectoryA(pathname, nullptr) != 0;
+}
+
#if !SANITIZER_GO
// Read the file to extract the ImageBase field from the PE header. If ASLR is
// disabled and this virtual address is available, the loader will typically
@@ -688,13 +723,24 @@ void ListOfModules::fallbackInit() { clear(); }
// atexit() as soon as it is ready for use (i.e. after .CRT$XIC initializers).
InternalMmapVectorNoCtor<void (*)(void)> atexit_functions;
-int Atexit(void (*function)(void)) {
+static int queueAtexit(void (*function)(void)) {
atexit_functions.push_back(function);
return 0;
}
+// If Atexit() is being called after RunAtexit() has already been run, it needs
+// to be able to call atexit() directly. Here we use a function ponter to
+// switch out its behaviour.
+// An example of where this is needed is the asan_dynamic runtime on MinGW-w64.
+// On this environment, __asan_init is called during global constructor phase,
+// way after calling the .CRT$XID initializer.
+static int (*volatile queueOrCallAtExit)(void (*)(void)) = &queueAtexit;
+
+int Atexit(void (*function)(void)) { return queueOrCallAtExit(function); }
+
static int RunAtexit() {
TraceLoggingUnregister(g_asan_provider);
+ queueOrCallAtExit = &atexit;
int ret = 0;
for (uptr i = 0; i < atexit_functions.size(); ++i) {
ret |= atexit(atexit_functions[i]);
@@ -827,27 +873,6 @@ void FutexWake(atomic_uint32_t *p, u32 count) {
WakeByAddressAll(p);
}
-// ---------------------- BlockingMutex ---------------- {{{1
-
-BlockingMutex::BlockingMutex() {
- CHECK(sizeof(SRWLOCK) <= sizeof(opaque_storage_));
- internal_memset(this, 0, sizeof(*this));
-}
-
-void BlockingMutex::Lock() {
- AcquireSRWLockExclusive((PSRWLOCK)opaque_storage_);
- CHECK_EQ(owner_, 0);
- owner_ = GetThreadSelf();
-}
-
-void BlockingMutex::Unlock() {
- CheckLocked();
- owner_ = 0;
- ReleaseSRWLockExclusive((PSRWLOCK)opaque_storage_);
-}
-
-void BlockingMutex::CheckLocked() const { CHECK_EQ(owner_, GetThreadSelf()); }
-
uptr GetTlsSize() {
return 0;
}
@@ -962,13 +987,18 @@ void SignalContext::InitPcSpBp() {
CONTEXT *context_record = (CONTEXT *)context;
pc = (uptr)exception_record->ExceptionAddress;
-#ifdef _WIN64
+# if SANITIZER_WINDOWS64
+# if SANITIZER_ARM64
+ bp = (uptr)context_record->Fp;
+ sp = (uptr)context_record->Sp;
+# else
bp = (uptr)context_record->Rbp;
sp = (uptr)context_record->Rsp;
-#else
+# endif
+# else
bp = (uptr)context_record->Ebp;
sp = (uptr)context_record->Esp;
-#endif
+# endif
}
uptr SignalContext::GetAddress() const {
@@ -990,7 +1020,7 @@ SignalContext::WriteFlag SignalContext::GetWriteFlag() const {
// The write flag is only available for access violation exceptions.
if (exception_record->ExceptionCode != EXCEPTION_ACCESS_VIOLATION)
- return SignalContext::UNKNOWN;
+ return SignalContext::Unknown;
// The contents of this array are documented at
// https://docs.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-exception_record
@@ -998,13 +1028,13 @@ SignalContext::WriteFlag SignalContext::GetWriteFlag() const {
// second element is the faulting address.
switch (exception_record->ExceptionInformation[0]) {
case 0:
- return SignalContext::READ;
+ return SignalContext::Read;
case 1:
- return SignalContext::WRITE;
+ return SignalContext::Write;
case 8:
- return SignalContext::UNKNOWN;
+ return SignalContext::Unknown;
}
- return SignalContext::UNKNOWN;
+ return SignalContext::Unknown;
}
void SignalContext::DumpAllRegisters(void *context) {
@@ -1091,10 +1121,6 @@ void InitializePlatformEarly() {
// Do nothing.
}
-void MaybeReexec() {
- // No need to re-exec on Windows.
-}
-
void CheckASLR() {
// Do nothing
}
@@ -1131,7 +1157,7 @@ bool IsProcessRunning(pid_t pid) {
int WaitForProcess(pid_t pid) { return -1; }
// FIXME implement on this platform.
-void GetMemoryProfile(fill_profile_f cb, uptr *stats, uptr stats_size) { }
+void GetMemoryProfile(fill_profile_f cb, uptr *stats) {}
void CheckNoDeepBind(const char *filename, int flag) {
// Do nothing.
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_win_dll_thunk.h b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_win_dll_thunk.h
index 48c73c4c98ad..639d91a2edae 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_win_dll_thunk.h
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_win_dll_thunk.h
@@ -84,7 +84,7 @@ extern "C" int __dll_thunk_init();
// which isn't a big deal.
#define INTERCEPT_LIBRARY_FUNCTION(name) \
extern "C" void name(); \
- INTERCEPT_OR_DIE(WRAPPER_NAME(name), name)
+ INTERCEPT_OR_DIE(STRINGIFY(WRAP(name)), name)
// Use these macros for functions that could be called before __dll_thunk_init()
// is executed and don't lead to errors if defined (free, malloc, etc).
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/symbolizer/sanitizer_symbolize.cpp b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/symbolizer/sanitizer_symbolize.cpp
index 3809880d50b4..c851dbbf2eb2 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/symbolizer/sanitizer_symbolize.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/symbolizer/sanitizer_symbolize.cpp
@@ -10,17 +10,27 @@
//
//===----------------------------------------------------------------------===//
+#include <inttypes.h>
#include <stdio.h>
#include <string>
#include "llvm/DebugInfo/Symbolize/DIPrinter.h"
#include "llvm/DebugInfo/Symbolize/Symbolize.h"
+#include "llvm/Demangle/Demangle.h"
+
+static llvm::symbolize::LLVMSymbolizer *Symbolizer = nullptr;
+static bool Demangle = true;
+static bool InlineFrames = true;
static llvm::symbolize::LLVMSymbolizer *getDefaultSymbolizer() {
- static llvm::symbolize::LLVMSymbolizer *DefaultSymbolizer =
- new llvm::symbolize::LLVMSymbolizer();
- return DefaultSymbolizer;
+ if (Symbolizer)
+ return Symbolizer;
+ llvm::symbolize::LLVMSymbolizer::Options Opts;
+ Opts.Demangle = Demangle;
+ Opts.UntagAddresses = true;
+ Symbolizer = new llvm::symbolize::LLVMSymbolizer(Opts);
+ return Symbolizer;
}
static llvm::symbolize::PrinterConfig getDefaultPrinterConfig() {
@@ -33,9 +43,18 @@ static llvm::symbolize::PrinterConfig getDefaultPrinterConfig() {
return Config;
}
+static llvm::symbolize::ErrorHandler symbolize_error_handler(
+ llvm::raw_string_ostream &OS) {
+ return
+ [&](const llvm::ErrorInfoBase &ErrorInfo, llvm::StringRef ErrorBanner) {
+ OS << ErrorBanner;
+ ErrorInfo.log(OS);
+ OS << '\n';
+ };
+}
+
namespace __sanitizer {
-int internal_snprintf(char *buffer, unsigned long length, const char *format,
- ...);
+int internal_snprintf(char *buffer, uintptr_t length, const char *format, ...);
} // namespace __sanitizer
extern "C" {
@@ -43,29 +62,31 @@ extern "C" {
typedef uint64_t u64;
bool __sanitizer_symbolize_code(const char *ModuleName, uint64_t ModuleOffset,
- char *Buffer, int MaxLength,
- bool SymbolizeInlineFrames) {
+ char *Buffer, int MaxLength) {
std::string Result;
{
llvm::raw_string_ostream OS(Result);
llvm::symbolize::PrinterConfig Config = getDefaultPrinterConfig();
llvm::symbolize::Request Request{ModuleName, ModuleOffset};
- auto Printer =
- std::make_unique<llvm::symbolize::LLVMPrinter>(OS, OS, Config);
+ auto Printer = std::make_unique<llvm::symbolize::LLVMPrinter>(
+ OS, symbolize_error_handler(OS), Config);
- // TODO: it is neccessary to set proper SectionIndex here.
+ // TODO: it is necessary to set proper SectionIndex here.
// object::SectionedAddress::UndefSection works for only absolute addresses.
- if (SymbolizeInlineFrames) {
+ if (InlineFrames) {
auto ResOrErr = getDefaultSymbolizer()->symbolizeInlinedCode(
ModuleName,
{ModuleOffset, llvm::object::SectionedAddress::UndefSection});
- Printer->print(Request,
- ResOrErr ? ResOrErr.get() : llvm::DIInliningInfo());
+ if (!ResOrErr)
+ return false;
+ Printer->print(Request, ResOrErr.get());
} else {
auto ResOrErr = getDefaultSymbolizer()->symbolizeCode(
ModuleName,
{ModuleOffset, llvm::object::SectionedAddress::UndefSection});
- Printer->print(Request, ResOrErr ? ResOrErr.get() : llvm::DILineInfo());
+ if (!ResOrErr)
+ return false;
+ Printer->print(Request, ResOrErr.get());
}
}
return __sanitizer::internal_snprintf(Buffer, MaxLength, "%s",
@@ -79,30 +100,70 @@ bool __sanitizer_symbolize_data(const char *ModuleName, uint64_t ModuleOffset,
llvm::symbolize::PrinterConfig Config = getDefaultPrinterConfig();
llvm::raw_string_ostream OS(Result);
llvm::symbolize::Request Request{ModuleName, ModuleOffset};
- auto Printer =
- std::make_unique<llvm::symbolize::LLVMPrinter>(OS, OS, Config);
+ auto Printer = std::make_unique<llvm::symbolize::LLVMPrinter>(
+ OS, symbolize_error_handler(OS), Config);
- // TODO: it is neccessary to set proper SectionIndex here.
+ // TODO: it is necessary to set proper SectionIndex here.
// object::SectionedAddress::UndefSection works for only absolute addresses.
auto ResOrErr = getDefaultSymbolizer()->symbolizeData(
ModuleName,
{ModuleOffset, llvm::object::SectionedAddress::UndefSection});
- Printer->print(Request, ResOrErr ? ResOrErr.get() : llvm::DIGlobal());
+ if (!ResOrErr)
+ return false;
+ Printer->print(Request, ResOrErr.get());
}
return __sanitizer::internal_snprintf(Buffer, MaxLength, "%s",
Result.c_str()) < MaxLength;
}
-void __sanitizer_symbolize_flush() { getDefaultSymbolizer()->flush(); }
+bool __sanitizer_symbolize_frame(const char *ModuleName, uint64_t ModuleOffset,
+ char *Buffer, int MaxLength) {
+ std::string Result;
+ {
+ llvm::symbolize::PrinterConfig Config = getDefaultPrinterConfig();
+ llvm::raw_string_ostream OS(Result);
+ llvm::symbolize::Request Request{ModuleName, ModuleOffset};
+ auto Printer = std::make_unique<llvm::symbolize::LLVMPrinter>(
+ OS, symbolize_error_handler(OS), Config);
-int __sanitizer_symbolize_demangle(const char *Name, char *Buffer,
- int MaxLength) {
- std::string Result =
- llvm::symbolize::LLVMSymbolizer::DemangleName(Name, nullptr);
+ // TODO: it is necessary to set proper SectionIndex here.
+ // object::SectionedAddress::UndefSection works for only absolute addresses.
+ auto ResOrErr = getDefaultSymbolizer()->symbolizeFrame(
+ ModuleName,
+ {ModuleOffset, llvm::object::SectionedAddress::UndefSection});
+ if (!ResOrErr)
+ return false;
+ Printer->print(Request, ResOrErr.get());
+ }
return __sanitizer::internal_snprintf(Buffer, MaxLength, "%s",
- Result.c_str()) < MaxLength
- ? static_cast<int>(Result.size() + 1)
- : 0;
+ Result.c_str()) < MaxLength;
+}
+
+void __sanitizer_symbolize_flush() {
+ if (Symbolizer)
+ Symbolizer->flush();
+}
+
+bool __sanitizer_symbolize_demangle(const char *Name, char *Buffer,
+ int MaxLength) {
+ std::string Result;
+ if (!llvm::nonMicrosoftDemangle(Name, Result))
+ return false;
+ return __sanitizer::internal_snprintf(Buffer, MaxLength, "%s",
+ Result.c_str()) < MaxLength;
+}
+
+bool __sanitizer_symbolize_set_demangle(bool Value) {
+ // Must be called before LLVMSymbolizer created.
+ if (Symbolizer)
+ return false;
+ Demangle = Value;
+ return true;
+}
+
+bool __sanitizer_symbolize_set_inline_frames(bool Value) {
+ InlineFrames = Value;
+ return true;
}
// Override __cxa_atexit and ignore callbacks.
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/symbolizer/sanitizer_wrappers.cpp b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/symbolizer/sanitizer_wrappers.cpp
index d3c59e357d46..cdac2333706d 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/symbolizer/sanitizer_wrappers.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/symbolizer/sanitizer_wrappers.cpp
@@ -13,6 +13,7 @@
#include <dlfcn.h>
#include <errno.h>
#include <fcntl.h>
+#include <inttypes.h>
#include <stdarg.h>
#include <stdio.h>
#include <unistd.h>
@@ -27,11 +28,11 @@ unsigned long internal_stat(const char *path, void *buf);
unsigned long internal_lstat(const char *path, void *buf);
unsigned long internal_fstat(int fd, void *buf);
size_t internal_strlen(const char *s);
-unsigned long internal_mmap(void *addr, unsigned long length, int prot,
- int flags, int fd, unsigned long long offset);
+unsigned long internal_mmap(void *addr, uintptr_t length, int prot, int flags,
+ int fd, unsigned long long offset);
void *internal_memcpy(void *dest, const void *src, unsigned long n);
// Used to propagate errno.
-bool internal_iserror(unsigned long retval, int *rverrno = 0);
+bool internal_iserror(uintptr_t retval, int *rverrno = 0);
} // namespace __sanitizer
namespace {
@@ -154,8 +155,8 @@ size_t strlen(const char *s) { return __sanitizer::internal_strlen(s); }
void *mmap(void *addr, size_t length, int prot, int flags, int fd,
off_t offset) {
- unsigned long res = __sanitizer::internal_mmap(
- addr, (unsigned long)length, prot, flags, fd, (unsigned long long)offset);
+ unsigned long res =
+ __sanitizer::internal_mmap(addr, length, prot, flags, fd, offset);
RETURN_OR_SET_ERRNO(void *, res);
}
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/symbolizer/scripts/ar_to_bc.sh b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/symbolizer/scripts/ar_to_bc.sh
deleted file mode 100755
index 5c77bea83294..000000000000
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/symbolizer/scripts/ar_to_bc.sh
+++ /dev/null
@@ -1,39 +0,0 @@
-#!/bin/bash
-
-function usage() {
- echo "Usage: $0 INPUT... OUTPUT"
- exit 1
-}
-
-if [ "$#" -le 1 ]; then
- usage
-fi
-
-[[ $AR == /* ]] || AR=$PWD/$AR
-[[ $LINK == /* ]] || LINK=$PWD/$LINK
-
-INPUTS=
-OUTPUT=
-for ARG in $@; do
- INPUTS="$INPUTS $OUTPUT"
- OUTPUT=$(readlink -f $ARG)
-done
-
-echo Inputs: $INPUTS
-echo Output: $OUTPUT
-
-SCRATCH_DIR=$(mktemp -d)
-ln -s $INPUTS $SCRATCH_DIR/
-
-pushd $SCRATCH_DIR
-
-for INPUT in *; do
- for OBJ in $($AR t $INPUT); do
- $AR x $INPUT $OBJ
- mv -f $OBJ $(basename $INPUT).$OBJ
- done
-done
-
-$LINK *.o -o $OUTPUT
-
-rm -rf $SCRATCH_DIR
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/symbolizer/scripts/build_symbolizer.sh b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/symbolizer/scripts/build_symbolizer.sh
index c793875db099..f24d42cc84e4 100755
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/symbolizer/scripts/build_symbolizer.sh
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/symbolizer/scripts/build_symbolizer.sh
@@ -1,29 +1,19 @@
-#!/bin/bash -eu
+#!/usr/bin/env bash
#
-# Run as: CLANG=bin/clang ZLIB_SRC=src/zlib \
-# build_symbolizer.sh runtime_build/lib/clang/4.0.0/lib/linux/
+# Run as: CLANG=bin/clang build_symbolizer.sh out.o
+# If you want to use a local copy of zlib, set ZLIB_SRC.
# zlib can be downloaded from http://www.zlib.net.
#
-# Script compiles self-contained object file with symbolization code and injects
-# it into the given set of runtime libraries. Script updates only libraries
-# which has unresolved __sanitizer_symbolize_* symbols and matches architecture.
-# Object file is be compiled from LLVM sources with dependencies like libc++ and
-# zlib. Then it internalizes symbols in the file, so that it can be linked
-# into arbitrary programs, avoiding conflicts with the program own symbols and
-# avoiding dependencies on any program symbols. The only acceptable dependencies
-# are libc and __sanitizer::internal_* from sanitizer runtime.
+# Script compiles self-contained object file with symbolization code.
#
# Symbols exported by the object file will be used by Sanitizer runtime
# libraries to symbolize code/data in-process.
#
-# The script will modify the output directory which is given as the first
-# argument to the script.
-#
# FIXME: We should really be using a simpler approach to building this object
# file, and it should be available as a regular cmake rule. Conceptually, we
# want to be doing "ld -r" followed by "objcopy -G" to create a relocatable
# object file with only our entry points exposed. However, this does not work at
-# present, see PR30750.
+# present, see https://github.com/llvm/llvm-project/issues/30098.
set -x
set -e
@@ -31,46 +21,26 @@ set -u
SCRIPT_DIR=$(cd "$(dirname "$0")" && pwd)
SRC_DIR=$(readlink -f $SCRIPT_DIR/..)
-TARGE_DIR=$(readlink -f $1)
-COMPILER_RT_SRC=$(readlink -f ${SCRIPT_DIR}/../../../..)
-LLVM_SRC=${LLVM_SRC:-${COMPILER_RT_SRC}/../llvm}
-LLVM_SRC=$(readlink -f $LLVM_SRC)
-if [[ ! -d "${LLVM_SRC}/../llvm" ]] ; then
- LLVM_SRC=$(readlink -f ${COMPILER_RT_SRC}/../../../llvm)
-fi
-LIBCXX_SRC=$(readlink -f ${COMPILER_RT_SRC}/../libcxx)
-LIBCXXABI_SRC=$(readlink -f ${COMPILER_RT_SRC}/../libcxxabi)
-
-if [[ ! -d "${LLVM_SRC}/../llvm" ||
- ! -d "${LIBCXX_SRC}" ||
- ! -d "${LIBCXXABI_SRC}" ]]; then
- echo "Missing or incomplete LLVM_SRC"
- exit 1
-fi
-if [[ "$ZLIB_SRC" == "" ||
- ! -x "${ZLIB_SRC}/configure" ||
- ! -f "${ZLIB_SRC}/zlib.h" ]]; then
- echo "Missing or incomplete ZLIB_SRC"
+if [[ $# -ne 1 ]]; then
+ echo "Missing output file"
exit 1
fi
-ZLIB_SRC=$(readlink -f $ZLIB_SRC)
-J="${J:-50}"
+OUTPUT=$(readlink -f $1)
+COMPILER_RT_SRC=$(readlink -f ${SCRIPT_DIR}/../../../..)
+LLVM_SRC=${LLVM_SRC:-${COMPILER_RT_SRC}/../llvm}
+LLVM_SRC=$(readlink -f $LLVM_SRC)
CLANG="${CLANG:-`which clang`}"
CLANG_DIR=$(readlink -f $(dirname "$CLANG"))
-BUILD_DIR=$(readlink -f ./symbolizer)
-mkdir -p $BUILD_DIR
-cd $BUILD_DIR
-
CC=$CLANG_DIR/clang
CXX=$CLANG_DIR/clang++
TBLGEN=$CLANG_DIR/llvm-tblgen
OPT=$CLANG_DIR/opt
-export AR=$CLANG_DIR/llvm-ar
-export LINK=$CLANG_DIR/llvm-link
+AR=$CLANG_DIR/llvm-ar
+LINK=$CLANG_DIR/llvm-link
for F in $CC $CXX $TBLGEN $LINK $OPT $AR; do
if [[ ! -x "$F" ]]; then
@@ -79,70 +49,102 @@ for F in $CC $CXX $TBLGEN $LINK $OPT $AR; do
fi
done
+BUILD_DIR=${PWD}/symbolizer
+mkdir -p $BUILD_DIR
+cd $BUILD_DIR
+
ZLIB_BUILD=${BUILD_DIR}/zlib
LIBCXX_BUILD=${BUILD_DIR}/libcxx
LLVM_BUILD=${BUILD_DIR}/llvm
SYMBOLIZER_BUILD=${BUILD_DIR}/symbolizer
FLAGS=${FLAGS:-}
-FLAGS="$FLAGS -fPIC -flto -Os -g0 -DNDEBUG"
+ZLIB_SRC=${ZLIB_SRC:-}
+TARGET_TRIPLE=$($CC -print-target-triple $FLAGS)
+if [[ "$FLAGS" =~ "-m32" ]] ; then
+ # Avoid new wrappers.
+ FLAGS+=" -U_FILE_OFFSET_BITS"
+fi
+FLAGS+=" -fPIC -flto -Oz -g0 -DNDEBUG -target $TARGET_TRIPLE -Wno-unused-command-line-argument"
+FLAGS+=" -include ${SRC_DIR}/../sanitizer_redefine_builtins.h -DSANITIZER_COMMON_REDEFINE_BUILTINS_IN_STD -Wno-language-extension-token"
+
+LINKFLAGS="-fuse-ld=lld -target $TARGET_TRIPLE"
# Build zlib.
-mkdir -p ${ZLIB_BUILD}
+if [[ ! -d ${ZLIB_BUILD} ]]; then
+ if [[ -z "${ZLIB_SRC}" ]]; then
+ git clone https://github.com/madler/zlib ${ZLIB_BUILD}
+ else
+ ZLIB_SRC=$(readlink -f $ZLIB_SRC)
+ mkdir -p ${ZLIB_BUILD}
+ cp -r ${ZLIB_SRC}/* ${ZLIB_BUILD}/
+ fi
+fi
+
cd ${ZLIB_BUILD}
-cp -r ${ZLIB_SRC}/* .
-CC=$CC CFLAGS="$FLAGS" RANLIB=/bin/true ./configure --static
-make -j${J} libz.a
+AR="${AR}" CC="${CC}" CFLAGS="$FLAGS -Wno-deprecated-non-prototype" RANLIB=/bin/true ./configure --static
+make -j libz.a
# Build and install libcxxabi and libcxx.
-if [[ ! -d ${LIBCXX_BUILD} ]]; then
+if [[ ! -f ${LLVM_BUILD}/build.ninja ]]; then
+ rm -rf ${LIBCXX_BUILD}
mkdir -p ${LIBCXX_BUILD}
cd ${LIBCXX_BUILD}
LIBCXX_FLAGS="${FLAGS} -Wno-macro-redefined"
- PROJECTS=
- if [[ ! -d $LLVM_SRC/projects/libcxxabi ]] ; then
- PROJECTS="-DLLVM_ENABLE_PROJECTS='libcxx;libcxxabi'"
- fi
cmake -GNinja \
- ${PROJECTS} \
+ -DLLVM_ENABLE_RUNTIMES="libcxx;libcxxabi" \
-DCMAKE_BUILD_TYPE=Release \
+ -DCMAKE_C_COMPILER_WORKS=ON \
+ -DCMAKE_CXX_COMPILER_WORKS=ON \
-DCMAKE_C_COMPILER=$CC \
-DCMAKE_CXX_COMPILER=$CXX \
+ -DLIBCXX_ABI_NAMESPACE=__InternalSymbolizer \
+ '-DLIBCXX_EXTRA_SITE_DEFINES=_LIBCPP_DISABLE_VISIBILITY_ANNOTATIONS;_LIBCXXABI_DISABLE_VISIBILITY_ANNOTATIONS' \
-DCMAKE_C_FLAGS_RELEASE="${LIBCXX_FLAGS}" \
-DCMAKE_CXX_FLAGS_RELEASE="${LIBCXX_FLAGS}" \
-DLIBCXXABI_ENABLE_ASSERTIONS=OFF \
-DLIBCXXABI_ENABLE_EXCEPTIONS=OFF \
- -DLIBCXXABI_ENABLE_SHARED=OFF \
+ -DLIBCXXABI_USE_LLVM_UNWINDER=OFF \
-DLIBCXX_ENABLE_ASSERTIONS=OFF \
-DLIBCXX_ENABLE_EXCEPTIONS=OFF \
-DLIBCXX_ENABLE_RTTI=OFF \
+ -DCMAKE_SHARED_LINKER_FLAGS="$LINKFLAGS" \
-DLIBCXX_ENABLE_SHARED=OFF \
- $LLVM_SRC
+ -DLIBCXXABI_ENABLE_SHARED=OFF \
+ $LLVM_SRC/../runtimes
fi
cd ${LIBCXX_BUILD}
ninja cxx cxxabi
FLAGS="${FLAGS} -fno-rtti -fno-exceptions"
-LLVM_FLAGS="${FLAGS} -nostdinc++ -I${ZLIB_BUILD} -I${LIBCXX_BUILD}/include/c++/v1 -Wno-error=global-constructors"
+LLVM_CFLAGS="${FLAGS} -Wno-global-constructors"
+LLVM_CXXFLAGS="${LLVM_CFLAGS} -nostdinc++ -I${ZLIB_BUILD} -isystem ${LIBCXX_BUILD}/include -isystem ${LIBCXX_BUILD}/include/c++/v1"
# Build LLVM.
-if [[ ! -d ${LLVM_BUILD} ]]; then
+if [[ ! -f ${LLVM_BUILD}/build.ninja ]]; then
+ rm -rf ${LLVM_BUILD}
mkdir -p ${LLVM_BUILD}
cd ${LLVM_BUILD}
cmake -GNinja \
-DCMAKE_BUILD_TYPE=Release \
+ -DCMAKE_C_COMPILER_WORKS=ON \
+ -DCMAKE_CXX_COMPILER_WORKS=ON \
-DCMAKE_C_COMPILER=$CC \
-DCMAKE_CXX_COMPILER=$CXX \
- -DCMAKE_C_FLAGS_RELEASE="${LLVM_FLAGS}" \
- -DCMAKE_CXX_FLAGS_RELEASE="${LLVM_FLAGS}" \
+ -DLLVM_ENABLE_LIBCXX=ON \
+ -DCMAKE_C_FLAGS_RELEASE="${LLVM_CFLAGS}" \
+ -DCMAKE_CXX_FLAGS_RELEASE="${LLVM_CXXFLAGS}" \
+ -DCMAKE_EXE_LINKER_FLAGS="$LINKFLAGS -stdlib=libc++ -L${LIBCXX_BUILD}/lib" \
-DLLVM_TABLEGEN=$TBLGEN \
+ -DLLVM_INCLUDE_TESTS=OFF \
-DLLVM_ENABLE_ZLIB=ON \
+ -DLLVM_ENABLE_ZSTD=OFF \
-DLLVM_ENABLE_TERMINFO=OFF \
-DLLVM_ENABLE_THREADS=OFF \
$LLVM_SRC
fi
cd ${LLVM_BUILD}
-ninja LLVMSymbolize LLVMObject LLVMBinaryFormat LLVMDebugInfoDWARF LLVMSupport LLVMDebugInfoPDB LLVMMC LLVMDemangle LLVMTextAPI
+ninja LLVMSymbolize LLVMObject LLVMBinaryFormat LLVMDebugInfoDWARF LLVMSupport LLVMDebugInfoPDB LLVMDebuginfod LLVMMC LLVMDemangle LLVMTextAPI LLVMTargetParser
cd ${BUILD_DIR}
rm -rf ${SYMBOLIZER_BUILD}
@@ -150,32 +152,42 @@ mkdir ${SYMBOLIZER_BUILD}
cd ${SYMBOLIZER_BUILD}
echo "Compiling..."
-SYMBOLIZER_FLAGS="$LLVM_FLAGS -I${LLVM_SRC}/include -I${LLVM_BUILD}/include -std=c++14"
+SYMBOLIZER_FLAGS="$LLVM_CXXFLAGS -I${LLVM_SRC}/include -I${LLVM_BUILD}/include -std=c++17"
$CXX $SYMBOLIZER_FLAGS ${SRC_DIR}/sanitizer_symbolize.cpp ${SRC_DIR}/sanitizer_wrappers.cpp -c
$AR rc symbolizer.a sanitizer_symbolize.o sanitizer_wrappers.o
-SYMBOLIZER_API_LIST=__sanitizer_symbolize_code,__sanitizer_symbolize_data,__sanitizer_symbolize_flush,__sanitizer_symbolize_demangle
+SYMBOLIZER_API_LIST=__sanitizer_symbolize_code
+SYMBOLIZER_API_LIST+=,__sanitizer_symbolize_data
+SYMBOLIZER_API_LIST+=,__sanitizer_symbolize_frame
+SYMBOLIZER_API_LIST+=,__sanitizer_symbolize_flush
+SYMBOLIZER_API_LIST+=,__sanitizer_symbolize_demangle
+SYMBOLIZER_API_LIST+=,__sanitizer_symbolize_set_demangle
+SYMBOLIZER_API_LIST+=,__sanitizer_symbolize_set_inline_frames
+
+LIBCXX_ARCHIVE_DIR=$(dirname $(find $LIBCXX_BUILD -name libc++.a | head -n1))
# Merge all the object files together and copy the resulting library back.
-$SCRIPT_DIR/ar_to_bc.sh $LIBCXX_BUILD/lib/libc++.a \
- $LIBCXX_BUILD/lib/libc++abi.a \
- $LLVM_BUILD/lib/libLLVMSymbolize.a \
- $LLVM_BUILD/lib/libLLVMObject.a \
- $LLVM_BUILD/lib/libLLVMBinaryFormat.a \
- $LLVM_BUILD/lib/libLLVMDebugInfoDWARF.a \
- $LLVM_BUILD/lib/libLLVMSupport.a \
- $LLVM_BUILD/lib/libLLVMDebugInfoPDB.a \
- $LLVM_BUILD/lib/libLLVMDebugInfoMSF.a \
- $LLVM_BUILD/lib/libLLVMDebugInfoCodeView.a \
- $LLVM_BUILD/lib/libLLVMDemangle.a \
- $LLVM_BUILD/lib/libLLVMMC.a \
- $LLVM_BUILD/lib/libLLVMTextAPI.a \
- $ZLIB_BUILD/libz.a \
- symbolizer.a \
- all.bc
+$LINK $LIBCXX_ARCHIVE_DIR/libc++.a \
+ $LIBCXX_ARCHIVE_DIR/libc++abi.a \
+ $LLVM_BUILD/lib/libLLVMSymbolize.a \
+ $LLVM_BUILD/lib/libLLVMObject.a \
+ $LLVM_BUILD/lib/libLLVMBinaryFormat.a \
+ $LLVM_BUILD/lib/libLLVMDebugInfoDWARF.a \
+ $LLVM_BUILD/lib/libLLVMSupport.a \
+ $LLVM_BUILD/lib/libLLVMDebugInfoPDB.a \
+ $LLVM_BUILD/lib/libLLVMDebugInfoMSF.a \
+ $LLVM_BUILD/lib/libLLVMDebugInfoCodeView.a \
+ $LLVM_BUILD/lib/libLLVMDebuginfod.a \
+ $LLVM_BUILD/lib/libLLVMDemangle.a \
+ $LLVM_BUILD/lib/libLLVMMC.a \
+ $LLVM_BUILD/lib/libLLVMTextAPI.a \
+ $LLVM_BUILD/lib/libLLVMTargetParser.a \
+ $ZLIB_BUILD/libz.a \
+ symbolizer.a \
+ -ignore-non-bitcode -o all.bc
echo "Optimizing..."
-$OPT -internalize -internalize-public-api-list=${SYMBOLIZER_API_LIST} all.bc -o opt.bc
+$OPT -passes=internalize -internalize-public-api-list=${SYMBOLIZER_API_LIST} all.bc -o opt.bc
$CC $FLAGS -fno-lto -c opt.bc -o symbolizer.o
echo "Checking undefined symbols..."
@@ -183,20 +195,6 @@ nm -f posix -g symbolizer.o | cut -f 1,2 -d \ | LC_COLLATE=C sort -u > undefine
(diff -u $SCRIPT_DIR/global_symbols.txt undefined.new | grep -E "^\+[^+]") && \
(echo "Failed: unexpected symbols"; exit 1)
-arch() {
- objdump -f $1 | grep -m1 -Po "(?<=file format ).*$"
-}
-
-SYMBOLIZER_FORMAT=$(arch symbolizer.o)
-echo "Injecting $SYMBOLIZER_FORMAT symbolizer..."
-for A in $TARGE_DIR/libclang_rt.*san*.a; do
- A_FORMAT=$(arch $A)
- if [[ "$A_FORMAT" != "$SYMBOLIZER_FORMAT" ]] ; then
- continue
- fi
- (nm -u $A 2>/dev/null | grep -E "__sanitizer_symbolize_code" >/dev/null) || continue
- echo "$A"
- $AR rcs $A symbolizer.o
-done
+cp -f symbolizer.o $OUTPUT
echo "Success!"
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/symbolizer/scripts/global_symbols.txt b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/symbolizer/scripts/global_symbols.txt
index 29b2960e11fe..0a4bc6989a0d 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/symbolizer/scripts/global_symbols.txt
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/symbolizer/scripts/global_symbols.txt
@@ -1,4 +1,5 @@
_GLOBAL_OFFSET_TABLE_ U
+_ZN11__sanitizer13internal_mmapEPvjiiiy U
_ZN11__sanitizer13internal_mmapEPvmiiiy U
_ZN11__sanitizer13internal_openEPKcij U
_ZN11__sanitizer13internal_statEPKcPv U
@@ -6,8 +7,17 @@ _ZN11__sanitizer14internal_closeEi U
_ZN11__sanitizer14internal_fstatEiPv U
_ZN11__sanitizer14internal_lstatEPKcPv U
_ZN11__sanitizer15internal_strlenEPKc U
+_ZN11__sanitizer16internal_iserrorEjPi U
_ZN11__sanitizer16internal_iserrorEmPi U
+_ZN11__sanitizer17internal_snprintfEPcjPKcz U
_ZN11__sanitizer17internal_snprintfEPcmPKcz U
+__aarch64_cas8_acq_rel U
+__aarch64_ldadd4_acq_rel U
+__aarch64_ldadd8_acq_rel U
+__aarch64_ldadd8_relax U
+__aarch64_swp8_acq_rel U
+__ashldi3 U
+__ashrdi3 U
__ctype_b_loc U
__ctype_get_mb_cur_max U
__cxa_atexit U
@@ -31,19 +41,35 @@ __interceptor_pthread_setspecific w
__interceptor_read w
__interceptor_realpath w
__isinf U
+__isoc23_sscanf U
+__isoc23_strtol U
+__isoc23_strtoll U
+__isoc23_strtoll_l U
+__isoc23_strtoull U
+__isoc23_strtoull_l U
+__isoc23_vsscanf U
__isoc99_sscanf U
__isoc99_vsscanf U
+__lshrdi3 U
__moddi3 U
+__sanitizer_internal_memcpy U
+__sanitizer_internal_memmove U
+__sanitizer_internal_memset U
__sanitizer_symbolize_code T
__sanitizer_symbolize_data T
__sanitizer_symbolize_demangle T
__sanitizer_symbolize_flush T
+__sanitizer_symbolize_frame T
+__sanitizer_symbolize_set_demangle T
+__sanitizer_symbolize_set_inline_frames T
__strdup U
__udivdi3 U
__umoddi3 U
_exit U
abort U
access U
+aligned_alloc U
+arc4random U
bcmp U
calloc U
catclose U
@@ -51,8 +77,8 @@ catgets U
catopen U
ceil U
ceilf U
-clock_gettime U
cfgetospeed U
+clock_gettime U
dl_iterate_phdr U
dlsym U
dup U
@@ -68,6 +94,7 @@ fopen U
fork U
fprintf U
fputc U
+fputwc U
free U
freelocale U
fwrite U
@@ -76,20 +103,24 @@ getcwd U
getenv U
getpagesize U
getpid U
+getpwuid U
getrlimit U
gettimeofday U
+getuid U
+getwc U
ioctl U
isalnum U
isalpha U
isatty U
islower U
-isspace U
isprint U
+isspace U
isupper U
isxdigit U
log10 U
lseek U
lseek64 U
+madvise U
malloc U
mbrlen U
mbrtowc U
@@ -102,21 +133,29 @@ memcpy U
memmove U
memset U
mkdir U
+modf U
munmap U
newlocale U
perror U
+posix_madvise U
+posix_memalign U
posix_spawn U
posix_spawn_file_actions_adddup2 U
posix_spawn_file_actions_addopen U
posix_spawn_file_actions_destroy U
posix_spawn_file_actions_init U
qsort U
+raise U
rand U
readlink U
realloc U
remove U
+rename U
setrlimit U
setvbuf U
+sigaction U
+sigaltstack U
+sigemptyset U
sigfillset U
sigprocmask U
snprintf U
@@ -138,17 +177,24 @@ strncmp U
strncpy U
strrchr U
strsep U
+strtod U
strtod_l U
strtof_l U
strtok_r U
strtol U
strtold_l U
+strtoll U
strtoll_l U
+strtoull U
strtoull_l U
syscall U
+sysconf U
tcgetattr U
+tolower U
+toupper U
uname U
ungetc U
+ungetwc U
unlink U
uselocale U
vasprintf U
@@ -160,6 +206,7 @@ waitpid U
wcrtomb U
wcslen U
wcsnrtombs U
+wmemchr U
wmemcpy U
wmemmove U
wmemset U
diff --git a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/weak_symbols.txt b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/weak_symbols.txt
index 5a2b275184f4..1eb1ce8d6b9c 100644
--- a/contrib/llvm-project/compiler-rt/lib/sanitizer_common/weak_symbols.txt
+++ b/contrib/llvm-project/compiler-rt/lib/sanitizer_common/weak_symbols.txt
@@ -4,5 +4,8 @@ ___sanitizer_report_error_summary
___sanitizer_sandbox_on_notify
___sanitizer_symbolize_code
___sanitizer_symbolize_data
+___sanitizer_symbolize_frame
___sanitizer_symbolize_demangle
___sanitizer_symbolize_flush
+___sanitizer_symbolize_set_demangle
+___sanitizer_symbolize_set_inline_frames
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/scudo_allocator.cpp b/contrib/llvm-project/compiler-rt/lib/scudo/scudo_allocator.cpp
deleted file mode 100644
index 172353fadb1f..000000000000
--- a/contrib/llvm-project/compiler-rt/lib/scudo/scudo_allocator.cpp
+++ /dev/null
@@ -1,830 +0,0 @@
-//===-- scudo_allocator.cpp -------------------------------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-///
-/// Scudo Hardened Allocator implementation.
-/// It uses the sanitizer_common allocator as a base and aims at mitigating
-/// heap corruption vulnerabilities. It provides a checksum-guarded chunk
-/// header, a delayed free list, and additional sanity checks.
-///
-//===----------------------------------------------------------------------===//
-
-#include "scudo_allocator.h"
-#include "scudo_crc32.h"
-#include "scudo_errors.h"
-#include "scudo_flags.h"
-#include "scudo_interface_internal.h"
-#include "scudo_tsd.h"
-#include "scudo_utils.h"
-
-#include "sanitizer_common/sanitizer_allocator_checks.h"
-#include "sanitizer_common/sanitizer_allocator_interface.h"
-#include "sanitizer_common/sanitizer_quarantine.h"
-
-#ifdef GWP_ASAN_HOOKS
-# include "gwp_asan/guarded_pool_allocator.h"
-# include "gwp_asan/optional/backtrace.h"
-# include "gwp_asan/optional/options_parser.h"
-#include "gwp_asan/optional/segv_handler.h"
-#endif // GWP_ASAN_HOOKS
-
-#include <errno.h>
-#include <string.h>
-
-namespace __scudo {
-
-// Global static cookie, initialized at start-up.
-static u32 Cookie;
-
-// We default to software CRC32 if the alternatives are not supported, either
-// at compilation or at runtime.
-static atomic_uint8_t HashAlgorithm = { CRC32Software };
-
-inline u32 computeCRC32(u32 Crc, uptr Value, uptr *Array, uptr ArraySize) {
- // If the hardware CRC32 feature is defined here, it was enabled everywhere,
- // as opposed to only for scudo_crc32.cpp. This means that other hardware
- // specific instructions were likely emitted at other places, and as a
- // result there is no reason to not use it here.
-#if defined(__SSE4_2__) || defined(__ARM_FEATURE_CRC32)
- Crc = CRC32_INTRINSIC(Crc, Value);
- for (uptr i = 0; i < ArraySize; i++)
- Crc = CRC32_INTRINSIC(Crc, Array[i]);
- return Crc;
-#else
- if (atomic_load_relaxed(&HashAlgorithm) == CRC32Hardware) {
- Crc = computeHardwareCRC32(Crc, Value);
- for (uptr i = 0; i < ArraySize; i++)
- Crc = computeHardwareCRC32(Crc, Array[i]);
- return Crc;
- }
- Crc = computeSoftwareCRC32(Crc, Value);
- for (uptr i = 0; i < ArraySize; i++)
- Crc = computeSoftwareCRC32(Crc, Array[i]);
- return Crc;
-#endif // defined(__SSE4_2__) || defined(__ARM_FEATURE_CRC32)
-}
-
-static BackendT &getBackend();
-
-namespace Chunk {
- static inline AtomicPackedHeader *getAtomicHeader(void *Ptr) {
- return reinterpret_cast<AtomicPackedHeader *>(reinterpret_cast<uptr>(Ptr) -
- getHeaderSize());
- }
- static inline
- const AtomicPackedHeader *getConstAtomicHeader(const void *Ptr) {
- return reinterpret_cast<const AtomicPackedHeader *>(
- reinterpret_cast<uptr>(Ptr) - getHeaderSize());
- }
-
- static inline bool isAligned(const void *Ptr) {
- return IsAligned(reinterpret_cast<uptr>(Ptr), MinAlignment);
- }
-
- // We can't use the offset member of the chunk itself, as we would double
- // fetch it without any warranty that it wouldn't have been tampered. To
- // prevent this, we work with a local copy of the header.
- static inline void *getBackendPtr(const void *Ptr, UnpackedHeader *Header) {
- return reinterpret_cast<void *>(reinterpret_cast<uptr>(Ptr) -
- getHeaderSize() - (Header->Offset << MinAlignmentLog));
- }
-
- // Returns the usable size for a chunk, meaning the amount of bytes from the
- // beginning of the user data to the end of the backend allocated chunk.
- static inline uptr getUsableSize(const void *Ptr, UnpackedHeader *Header) {
- const uptr ClassId = Header->ClassId;
- if (ClassId)
- return PrimaryT::ClassIdToSize(ClassId) - getHeaderSize() -
- (Header->Offset << MinAlignmentLog);
- return SecondaryT::GetActuallyAllocatedSize(
- getBackendPtr(Ptr, Header)) - getHeaderSize();
- }
-
- // Returns the size the user requested when allocating the chunk.
- static inline uptr getSize(const void *Ptr, UnpackedHeader *Header) {
- const uptr SizeOrUnusedBytes = Header->SizeOrUnusedBytes;
- if (Header->ClassId)
- return SizeOrUnusedBytes;
- return SecondaryT::GetActuallyAllocatedSize(
- getBackendPtr(Ptr, Header)) - getHeaderSize() - SizeOrUnusedBytes;
- }
-
- // Compute the checksum of the chunk pointer and its header.
- static inline u16 computeChecksum(const void *Ptr, UnpackedHeader *Header) {
- UnpackedHeader ZeroChecksumHeader = *Header;
- ZeroChecksumHeader.Checksum = 0;
- uptr HeaderHolder[sizeof(UnpackedHeader) / sizeof(uptr)];
- memcpy(&HeaderHolder, &ZeroChecksumHeader, sizeof(HeaderHolder));
- const u32 Crc = computeCRC32(Cookie, reinterpret_cast<uptr>(Ptr),
- HeaderHolder, ARRAY_SIZE(HeaderHolder));
- return static_cast<u16>(Crc);
- }
-
- // Checks the validity of a chunk by verifying its checksum. It doesn't
- // incur termination in the event of an invalid chunk.
- static inline bool isValid(const void *Ptr) {
- PackedHeader NewPackedHeader =
- atomic_load_relaxed(getConstAtomicHeader(Ptr));
- UnpackedHeader NewUnpackedHeader =
- bit_cast<UnpackedHeader>(NewPackedHeader);
- return (NewUnpackedHeader.Checksum ==
- computeChecksum(Ptr, &NewUnpackedHeader));
- }
-
- // Ensure that ChunkAvailable is 0, so that if a 0 checksum is ever valid
- // for a fully nulled out header, its state will be available anyway.
- COMPILER_CHECK(ChunkAvailable == 0);
-
- // Loads and unpacks the header, verifying the checksum in the process.
- static inline
- void loadHeader(const void *Ptr, UnpackedHeader *NewUnpackedHeader) {
- PackedHeader NewPackedHeader =
- atomic_load_relaxed(getConstAtomicHeader(Ptr));
- *NewUnpackedHeader = bit_cast<UnpackedHeader>(NewPackedHeader);
- if (UNLIKELY(NewUnpackedHeader->Checksum !=
- computeChecksum(Ptr, NewUnpackedHeader)))
- dieWithMessage("corrupted chunk header at address %p\n", Ptr);
- }
-
- // Packs and stores the header, computing the checksum in the process.
- static inline void storeHeader(void *Ptr, UnpackedHeader *NewUnpackedHeader) {
- NewUnpackedHeader->Checksum = computeChecksum(Ptr, NewUnpackedHeader);
- PackedHeader NewPackedHeader = bit_cast<PackedHeader>(*NewUnpackedHeader);
- atomic_store_relaxed(getAtomicHeader(Ptr), NewPackedHeader);
- }
-
- // Packs and stores the header, computing the checksum in the process. We
- // compare the current header with the expected provided one to ensure that
- // we are not being raced by a corruption occurring in another thread.
- static inline void compareExchangeHeader(void *Ptr,
- UnpackedHeader *NewUnpackedHeader,
- UnpackedHeader *OldUnpackedHeader) {
- NewUnpackedHeader->Checksum = computeChecksum(Ptr, NewUnpackedHeader);
- PackedHeader NewPackedHeader = bit_cast<PackedHeader>(*NewUnpackedHeader);
- PackedHeader OldPackedHeader = bit_cast<PackedHeader>(*OldUnpackedHeader);
- if (UNLIKELY(!atomic_compare_exchange_strong(
- getAtomicHeader(Ptr), &OldPackedHeader, NewPackedHeader,
- memory_order_relaxed)))
- dieWithMessage("race on chunk header at address %p\n", Ptr);
- }
-} // namespace Chunk
-
-struct QuarantineCallback {
- explicit QuarantineCallback(AllocatorCacheT *Cache)
- : Cache_(Cache) {}
-
- // Chunk recycling function, returns a quarantined chunk to the backend,
- // first making sure it hasn't been tampered with.
- void Recycle(void *Ptr) {
- UnpackedHeader Header;
- Chunk::loadHeader(Ptr, &Header);
- if (UNLIKELY(Header.State != ChunkQuarantine))
- dieWithMessage("invalid chunk state when recycling address %p\n", Ptr);
- UnpackedHeader NewHeader = Header;
- NewHeader.State = ChunkAvailable;
- Chunk::compareExchangeHeader(Ptr, &NewHeader, &Header);
- void *BackendPtr = Chunk::getBackendPtr(Ptr, &Header);
- if (Header.ClassId)
- getBackend().deallocatePrimary(Cache_, BackendPtr, Header.ClassId);
- else
- getBackend().deallocateSecondary(BackendPtr);
- }
-
- // Internal quarantine allocation and deallocation functions. We first check
- // that the batches are indeed serviced by the Primary.
- // TODO(kostyak): figure out the best way to protect the batches.
- void *Allocate(uptr Size) {
- const uptr BatchClassId = SizeClassMap::ClassID(sizeof(QuarantineBatch));
- return getBackend().allocatePrimary(Cache_, BatchClassId);
- }
-
- void Deallocate(void *Ptr) {
- const uptr BatchClassId = SizeClassMap::ClassID(sizeof(QuarantineBatch));
- getBackend().deallocatePrimary(Cache_, Ptr, BatchClassId);
- }
-
- AllocatorCacheT *Cache_;
- COMPILER_CHECK(sizeof(QuarantineBatch) < SizeClassMap::kMaxSize);
-};
-
-typedef Quarantine<QuarantineCallback, void> QuarantineT;
-typedef QuarantineT::Cache QuarantineCacheT;
-COMPILER_CHECK(sizeof(QuarantineCacheT) <=
- sizeof(ScudoTSD::QuarantineCachePlaceHolder));
-
-QuarantineCacheT *getQuarantineCache(ScudoTSD *TSD) {
- return reinterpret_cast<QuarantineCacheT *>(TSD->QuarantineCachePlaceHolder);
-}
-
-#ifdef GWP_ASAN_HOOKS
-static gwp_asan::GuardedPoolAllocator GuardedAlloc;
-#endif // GWP_ASAN_HOOKS
-
-struct Allocator {
- static const uptr MaxAllowedMallocSize =
- FIRST_32_SECOND_64(2UL << 30, 1ULL << 40);
-
- BackendT Backend;
- QuarantineT Quarantine;
-
- u32 QuarantineChunksUpToSize;
-
- bool DeallocationTypeMismatch;
- bool ZeroContents;
- bool DeleteSizeMismatch;
-
- bool CheckRssLimit;
- uptr HardRssLimitMb;
- uptr SoftRssLimitMb;
- atomic_uint8_t RssLimitExceeded;
- atomic_uint64_t RssLastCheckedAtNS;
-
- explicit Allocator(LinkerInitialized)
- : Quarantine(LINKER_INITIALIZED) {}
-
- NOINLINE void performSanityChecks();
-
- void init() {
- SanitizerToolName = "Scudo";
- PrimaryAllocatorName = "ScudoPrimary";
- SecondaryAllocatorName = "ScudoSecondary";
-
- initFlags();
-
- performSanityChecks();
-
- // Check if hardware CRC32 is supported in the binary and by the platform,
- // if so, opt for the CRC32 hardware version of the checksum.
- if (&computeHardwareCRC32 && hasHardwareCRC32())
- atomic_store_relaxed(&HashAlgorithm, CRC32Hardware);
-
- SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null);
- Backend.init(common_flags()->allocator_release_to_os_interval_ms);
- HardRssLimitMb = common_flags()->hard_rss_limit_mb;
- SoftRssLimitMb = common_flags()->soft_rss_limit_mb;
- Quarantine.Init(
- static_cast<uptr>(getFlags()->QuarantineSizeKb) << 10,
- static_cast<uptr>(getFlags()->ThreadLocalQuarantineSizeKb) << 10);
- QuarantineChunksUpToSize = (Quarantine.GetCacheSize() == 0) ? 0 :
- getFlags()->QuarantineChunksUpToSize;
- DeallocationTypeMismatch = getFlags()->DeallocationTypeMismatch;
- DeleteSizeMismatch = getFlags()->DeleteSizeMismatch;
- ZeroContents = getFlags()->ZeroContents;
-
- if (UNLIKELY(!GetRandom(reinterpret_cast<void *>(&Cookie), sizeof(Cookie),
- /*blocking=*/false))) {
- Cookie = static_cast<u32>((NanoTime() >> 12) ^
- (reinterpret_cast<uptr>(this) >> 4));
- }
-
- CheckRssLimit = HardRssLimitMb || SoftRssLimitMb;
- if (CheckRssLimit)
- atomic_store_relaxed(&RssLastCheckedAtNS, MonotonicNanoTime());
- }
-
- // Helper function that checks for a valid Scudo chunk. nullptr isn't.
- bool isValidPointer(const void *Ptr) {
- initThreadMaybe();
- if (UNLIKELY(!Ptr))
- return false;
- if (!Chunk::isAligned(Ptr))
- return false;
- return Chunk::isValid(Ptr);
- }
-
- NOINLINE bool isRssLimitExceeded();
-
- // Allocates a chunk.
- void *allocate(uptr Size, uptr Alignment, AllocType Type,
- bool ForceZeroContents = false) NO_THREAD_SAFETY_ANALYSIS {
- initThreadMaybe();
-
- if (UNLIKELY(Alignment > MaxAlignment)) {
- if (AllocatorMayReturnNull())
- return nullptr;
- reportAllocationAlignmentTooBig(Alignment, MaxAlignment);
- }
- if (UNLIKELY(Alignment < MinAlignment))
- Alignment = MinAlignment;
-
-#ifdef GWP_ASAN_HOOKS
- if (UNLIKELY(GuardedAlloc.shouldSample())) {
- if (void *Ptr = GuardedAlloc.allocate(Size, Alignment)) {
- if (SCUDO_CAN_USE_HOOKS && &__sanitizer_malloc_hook)
- __sanitizer_malloc_hook(Ptr, Size);
- return Ptr;
- }
- }
-#endif // GWP_ASAN_HOOKS
-
- const uptr NeededSize = RoundUpTo(Size ? Size : 1, MinAlignment) +
- Chunk::getHeaderSize();
- const uptr AlignedSize = (Alignment > MinAlignment) ?
- NeededSize + (Alignment - Chunk::getHeaderSize()) : NeededSize;
- if (UNLIKELY(Size >= MaxAllowedMallocSize) ||
- UNLIKELY(AlignedSize >= MaxAllowedMallocSize)) {
- if (AllocatorMayReturnNull())
- return nullptr;
- reportAllocationSizeTooBig(Size, AlignedSize, MaxAllowedMallocSize);
- }
-
- if (CheckRssLimit && UNLIKELY(isRssLimitExceeded())) {
- if (AllocatorMayReturnNull())
- return nullptr;
- reportRssLimitExceeded();
- }
-
- // Primary and Secondary backed allocations have a different treatment. We
- // deal with alignment requirements of Primary serviced allocations here,
- // but the Secondary will take care of its own alignment needs.
- void *BackendPtr;
- uptr BackendSize;
- u8 ClassId;
- if (PrimaryT::CanAllocate(AlignedSize, MinAlignment)) {
- BackendSize = AlignedSize;
- ClassId = SizeClassMap::ClassID(BackendSize);
- bool UnlockRequired;
- ScudoTSD *TSD = getTSDAndLock(&UnlockRequired);
- BackendPtr = Backend.allocatePrimary(&TSD->Cache, ClassId);
- if (UnlockRequired)
- TSD->unlock();
- } else {
- BackendSize = NeededSize;
- ClassId = 0;
- BackendPtr = Backend.allocateSecondary(BackendSize, Alignment);
- }
- if (UNLIKELY(!BackendPtr)) {
- SetAllocatorOutOfMemory();
- if (AllocatorMayReturnNull())
- return nullptr;
- reportOutOfMemory(Size);
- }
-
- // If requested, we will zero out the entire contents of the returned chunk.
- if ((ForceZeroContents || ZeroContents) && ClassId)
- memset(BackendPtr, 0, PrimaryT::ClassIdToSize(ClassId));
-
- UnpackedHeader Header = {};
- uptr UserPtr = reinterpret_cast<uptr>(BackendPtr) + Chunk::getHeaderSize();
- if (UNLIKELY(!IsAligned(UserPtr, Alignment))) {
- // Since the Secondary takes care of alignment, a non-aligned pointer
- // means it is from the Primary. It is also the only case where the offset
- // field of the header would be non-zero.
- DCHECK(ClassId);
- const uptr AlignedUserPtr = RoundUpTo(UserPtr, Alignment);
- Header.Offset = (AlignedUserPtr - UserPtr) >> MinAlignmentLog;
- UserPtr = AlignedUserPtr;
- }
- DCHECK_LE(UserPtr + Size, reinterpret_cast<uptr>(BackendPtr) + BackendSize);
- Header.State = ChunkAllocated;
- Header.AllocType = Type;
- if (ClassId) {
- Header.ClassId = ClassId;
- Header.SizeOrUnusedBytes = Size;
- } else {
- // The secondary fits the allocations to a page, so the amount of unused
- // bytes is the difference between the end of the user allocation and the
- // next page boundary.
- const uptr PageSize = GetPageSizeCached();
- const uptr TrailingBytes = (UserPtr + Size) & (PageSize - 1);
- if (TrailingBytes)
- Header.SizeOrUnusedBytes = PageSize - TrailingBytes;
- }
- void *Ptr = reinterpret_cast<void *>(UserPtr);
- Chunk::storeHeader(Ptr, &Header);
- if (SCUDO_CAN_USE_HOOKS && &__sanitizer_malloc_hook)
- __sanitizer_malloc_hook(Ptr, Size);
- return Ptr;
- }
-
- // Place a chunk in the quarantine or directly deallocate it in the event of
- // a zero-sized quarantine, or if the size of the chunk is greater than the
- // quarantine chunk size threshold.
- void quarantineOrDeallocateChunk(void *Ptr, UnpackedHeader *Header,
- uptr Size) NO_THREAD_SAFETY_ANALYSIS {
- const bool BypassQuarantine = !Size || (Size > QuarantineChunksUpToSize);
- if (BypassQuarantine) {
- UnpackedHeader NewHeader = *Header;
- NewHeader.State = ChunkAvailable;
- Chunk::compareExchangeHeader(Ptr, &NewHeader, Header);
- void *BackendPtr = Chunk::getBackendPtr(Ptr, Header);
- if (Header->ClassId) {
- bool UnlockRequired;
- ScudoTSD *TSD = getTSDAndLock(&UnlockRequired);
- getBackend().deallocatePrimary(&TSD->Cache, BackendPtr,
- Header->ClassId);
- if (UnlockRequired)
- TSD->unlock();
- } else {
- getBackend().deallocateSecondary(BackendPtr);
- }
- } else {
- // If a small memory amount was allocated with a larger alignment, we want
- // to take that into account. Otherwise the Quarantine would be filled
- // with tiny chunks, taking a lot of VA memory. This is an approximation
- // of the usable size, that allows us to not call
- // GetActuallyAllocatedSize.
- const uptr EstimatedSize = Size + (Header->Offset << MinAlignmentLog);
- UnpackedHeader NewHeader = *Header;
- NewHeader.State = ChunkQuarantine;
- Chunk::compareExchangeHeader(Ptr, &NewHeader, Header);
- bool UnlockRequired;
- ScudoTSD *TSD = getTSDAndLock(&UnlockRequired);
- Quarantine.Put(getQuarantineCache(TSD), QuarantineCallback(&TSD->Cache),
- Ptr, EstimatedSize);
- if (UnlockRequired)
- TSD->unlock();
- }
- }
-
- // Deallocates a Chunk, which means either adding it to the quarantine or
- // directly returning it to the backend if criteria are met.
- void deallocate(void *Ptr, uptr DeleteSize, uptr DeleteAlignment,
- AllocType Type) {
- // For a deallocation, we only ensure minimal initialization, meaning thread
- // local data will be left uninitialized for now (when using ELF TLS). The
- // fallback cache will be used instead. This is a workaround for a situation
- // where the only heap operation performed in a thread would be a free past
- // the TLS destructors, ending up in initialized thread specific data never
- // being destroyed properly. Any other heap operation will do a full init.
- initThreadMaybe(/*MinimalInit=*/true);
- if (SCUDO_CAN_USE_HOOKS && &__sanitizer_free_hook)
- __sanitizer_free_hook(Ptr);
- if (UNLIKELY(!Ptr))
- return;
-
-#ifdef GWP_ASAN_HOOKS
- if (UNLIKELY(GuardedAlloc.pointerIsMine(Ptr))) {
- GuardedAlloc.deallocate(Ptr);
- return;
- }
-#endif // GWP_ASAN_HOOKS
-
- if (UNLIKELY(!Chunk::isAligned(Ptr)))
- dieWithMessage("misaligned pointer when deallocating address %p\n", Ptr);
- UnpackedHeader Header;
- Chunk::loadHeader(Ptr, &Header);
- if (UNLIKELY(Header.State != ChunkAllocated))
- dieWithMessage("invalid chunk state when deallocating address %p\n", Ptr);
- if (DeallocationTypeMismatch) {
- // The deallocation type has to match the allocation one.
- if (Header.AllocType != Type) {
- // With the exception of memalign'd Chunks, that can be still be free'd.
- if (Header.AllocType != FromMemalign || Type != FromMalloc)
- dieWithMessage("allocation type mismatch when deallocating address "
- "%p\n", Ptr);
- }
- }
- const uptr Size = Chunk::getSize(Ptr, &Header);
- if (DeleteSizeMismatch) {
- if (DeleteSize && DeleteSize != Size)
- dieWithMessage("invalid sized delete when deallocating address %p\n",
- Ptr);
- }
- (void)DeleteAlignment; // TODO(kostyak): verify that the alignment matches.
- quarantineOrDeallocateChunk(Ptr, &Header, Size);
- }
-
- // Reallocates a chunk. We can save on a new allocation if the new requested
- // size still fits in the chunk.
- void *reallocate(void *OldPtr, uptr NewSize) {
- initThreadMaybe();
-
-#ifdef GWP_ASAN_HOOKS
- if (UNLIKELY(GuardedAlloc.pointerIsMine(OldPtr))) {
- size_t OldSize = GuardedAlloc.getSize(OldPtr);
- void *NewPtr = allocate(NewSize, MinAlignment, FromMalloc);
- if (NewPtr)
- memcpy(NewPtr, OldPtr, (NewSize < OldSize) ? NewSize : OldSize);
- GuardedAlloc.deallocate(OldPtr);
- return NewPtr;
- }
-#endif // GWP_ASAN_HOOKS
-
- if (UNLIKELY(!Chunk::isAligned(OldPtr)))
- dieWithMessage("misaligned address when reallocating address %p\n",
- OldPtr);
- UnpackedHeader OldHeader;
- Chunk::loadHeader(OldPtr, &OldHeader);
- if (UNLIKELY(OldHeader.State != ChunkAllocated))
- dieWithMessage("invalid chunk state when reallocating address %p\n",
- OldPtr);
- if (DeallocationTypeMismatch) {
- if (UNLIKELY(OldHeader.AllocType != FromMalloc))
- dieWithMessage("allocation type mismatch when reallocating address "
- "%p\n", OldPtr);
- }
- const uptr UsableSize = Chunk::getUsableSize(OldPtr, &OldHeader);
- // The new size still fits in the current chunk, and the size difference
- // is reasonable.
- if (NewSize <= UsableSize &&
- (UsableSize - NewSize) < (SizeClassMap::kMaxSize / 2)) {
- UnpackedHeader NewHeader = OldHeader;
- NewHeader.SizeOrUnusedBytes =
- OldHeader.ClassId ? NewSize : UsableSize - NewSize;
- Chunk::compareExchangeHeader(OldPtr, &NewHeader, &OldHeader);
- return OldPtr;
- }
- // Otherwise, we have to allocate a new chunk and copy the contents of the
- // old one.
- void *NewPtr = allocate(NewSize, MinAlignment, FromMalloc);
- if (NewPtr) {
- const uptr OldSize = OldHeader.ClassId ? OldHeader.SizeOrUnusedBytes :
- UsableSize - OldHeader.SizeOrUnusedBytes;
- memcpy(NewPtr, OldPtr, Min(NewSize, UsableSize));
- quarantineOrDeallocateChunk(OldPtr, &OldHeader, OldSize);
- }
- return NewPtr;
- }
-
- // Helper function that returns the actual usable size of a chunk.
- uptr getUsableSize(const void *Ptr) {
- initThreadMaybe();
- if (UNLIKELY(!Ptr))
- return 0;
-
-#ifdef GWP_ASAN_HOOKS
- if (UNLIKELY(GuardedAlloc.pointerIsMine(Ptr)))
- return GuardedAlloc.getSize(Ptr);
-#endif // GWP_ASAN_HOOKS
-
- UnpackedHeader Header;
- Chunk::loadHeader(Ptr, &Header);
- // Getting the usable size of a chunk only makes sense if it's allocated.
- if (UNLIKELY(Header.State != ChunkAllocated))
- dieWithMessage("invalid chunk state when sizing address %p\n", Ptr);
- return Chunk::getUsableSize(Ptr, &Header);
- }
-
- void *calloc(uptr NMemB, uptr Size) {
- initThreadMaybe();
- if (UNLIKELY(CheckForCallocOverflow(NMemB, Size))) {
- if (AllocatorMayReturnNull())
- return nullptr;
- reportCallocOverflow(NMemB, Size);
- }
- return allocate(NMemB * Size, MinAlignment, FromMalloc, true);
- }
-
- void commitBack(ScudoTSD *TSD) {
- Quarantine.Drain(getQuarantineCache(TSD), QuarantineCallback(&TSD->Cache));
- Backend.destroyCache(&TSD->Cache);
- }
-
- uptr getStats(AllocatorStat StatType) {
- initThreadMaybe();
- uptr stats[AllocatorStatCount];
- Backend.getStats(stats);
- return stats[StatType];
- }
-
- bool canReturnNull() {
- initThreadMaybe();
- return AllocatorMayReturnNull();
- }
-
- void setRssLimit(uptr LimitMb, bool HardLimit) {
- if (HardLimit)
- HardRssLimitMb = LimitMb;
- else
- SoftRssLimitMb = LimitMb;
- CheckRssLimit = HardRssLimitMb || SoftRssLimitMb;
- }
-
- void printStats() {
- initThreadMaybe();
- Backend.printStats();
- }
-};
-
-NOINLINE void Allocator::performSanityChecks() {
- // Verify that the header offset field can hold the maximum offset. In the
- // case of the Secondary allocator, it takes care of alignment and the
- // offset will always be 0. In the case of the Primary, the worst case
- // scenario happens in the last size class, when the backend allocation
- // would already be aligned on the requested alignment, which would happen
- // to be the maximum alignment that would fit in that size class. As a
- // result, the maximum offset will be at most the maximum alignment for the
- // last size class minus the header size, in multiples of MinAlignment.
- UnpackedHeader Header = {};
- const uptr MaxPrimaryAlignment =
- 1 << MostSignificantSetBitIndex(SizeClassMap::kMaxSize - MinAlignment);
- const uptr MaxOffset =
- (MaxPrimaryAlignment - Chunk::getHeaderSize()) >> MinAlignmentLog;
- Header.Offset = MaxOffset;
- if (Header.Offset != MaxOffset)
- dieWithMessage("maximum possible offset doesn't fit in header\n");
- // Verify that we can fit the maximum size or amount of unused bytes in the
- // header. Given that the Secondary fits the allocation to a page, the worst
- // case scenario happens in the Primary. It will depend on the second to
- // last and last class sizes, as well as the dynamic base for the Primary.
- // The following is an over-approximation that works for our needs.
- const uptr MaxSizeOrUnusedBytes = SizeClassMap::kMaxSize - 1;
- Header.SizeOrUnusedBytes = MaxSizeOrUnusedBytes;
- if (Header.SizeOrUnusedBytes != MaxSizeOrUnusedBytes)
- dieWithMessage("maximum possible unused bytes doesn't fit in header\n");
-
- const uptr LargestClassId = SizeClassMap::kLargestClassID;
- Header.ClassId = LargestClassId;
- if (Header.ClassId != LargestClassId)
- dieWithMessage("largest class ID doesn't fit in header\n");
-}
-
-// Opportunistic RSS limit check. This will update the RSS limit status, if
-// it can, every 250ms, otherwise it will just return the current one.
-NOINLINE bool Allocator::isRssLimitExceeded() {
- u64 LastCheck = atomic_load_relaxed(&RssLastCheckedAtNS);
- const u64 CurrentCheck = MonotonicNanoTime();
- if (LIKELY(CurrentCheck < LastCheck + (250ULL * 1000000ULL)))
- return atomic_load_relaxed(&RssLimitExceeded);
- if (!atomic_compare_exchange_weak(&RssLastCheckedAtNS, &LastCheck,
- CurrentCheck, memory_order_relaxed))
- return atomic_load_relaxed(&RssLimitExceeded);
- // TODO(kostyak): We currently use sanitizer_common's GetRSS which reads the
- // RSS from /proc/self/statm by default. We might want to
- // call getrusage directly, even if it's less accurate.
- const uptr CurrentRssMb = GetRSS() >> 20;
- if (HardRssLimitMb && UNLIKELY(HardRssLimitMb < CurrentRssMb))
- dieWithMessage("hard RSS limit exhausted (%zdMb vs %zdMb)\n",
- HardRssLimitMb, CurrentRssMb);
- if (SoftRssLimitMb) {
- if (atomic_load_relaxed(&RssLimitExceeded)) {
- if (CurrentRssMb <= SoftRssLimitMb)
- atomic_store_relaxed(&RssLimitExceeded, false);
- } else {
- if (CurrentRssMb > SoftRssLimitMb) {
- atomic_store_relaxed(&RssLimitExceeded, true);
- Printf("Scudo INFO: soft RSS limit exhausted (%zdMb vs %zdMb)\n",
- SoftRssLimitMb, CurrentRssMb);
- }
- }
- }
- return atomic_load_relaxed(&RssLimitExceeded);
-}
-
-static Allocator Instance(LINKER_INITIALIZED);
-
-static BackendT &getBackend() {
- return Instance.Backend;
-}
-
-void initScudo() {
- Instance.init();
-#ifdef GWP_ASAN_HOOKS
- gwp_asan::options::initOptions(__sanitizer::GetEnv("GWP_ASAN_OPTIONS"),
- Printf);
- gwp_asan::options::Options &Opts = gwp_asan::options::getOptions();
- Opts.Backtrace = gwp_asan::backtrace::getBacktraceFunction();
- GuardedAlloc.init(Opts);
-
- if (Opts.InstallSignalHandlers)
- gwp_asan::segv_handler::installSignalHandlers(
- &GuardedAlloc, __sanitizer::Printf,
- gwp_asan::backtrace::getPrintBacktraceFunction(),
- gwp_asan::backtrace::getSegvBacktraceFunction());
-#endif // GWP_ASAN_HOOKS
-}
-
-void ScudoTSD::init() {
- getBackend().initCache(&Cache);
- memset(QuarantineCachePlaceHolder, 0, sizeof(QuarantineCachePlaceHolder));
-}
-
-void ScudoTSD::commitBack() {
- Instance.commitBack(this);
-}
-
-void *scudoAllocate(uptr Size, uptr Alignment, AllocType Type) {
- if (Alignment && UNLIKELY(!IsPowerOfTwo(Alignment))) {
- errno = EINVAL;
- if (Instance.canReturnNull())
- return nullptr;
- reportAllocationAlignmentNotPowerOfTwo(Alignment);
- }
- return SetErrnoOnNull(Instance.allocate(Size, Alignment, Type));
-}
-
-void scudoDeallocate(void *Ptr, uptr Size, uptr Alignment, AllocType Type) {
- Instance.deallocate(Ptr, Size, Alignment, Type);
-}
-
-void *scudoRealloc(void *Ptr, uptr Size) {
- if (!Ptr)
- return SetErrnoOnNull(Instance.allocate(Size, MinAlignment, FromMalloc));
- if (Size == 0) {
- Instance.deallocate(Ptr, 0, 0, FromMalloc);
- return nullptr;
- }
- return SetErrnoOnNull(Instance.reallocate(Ptr, Size));
-}
-
-void *scudoCalloc(uptr NMemB, uptr Size) {
- return SetErrnoOnNull(Instance.calloc(NMemB, Size));
-}
-
-void *scudoValloc(uptr Size) {
- return SetErrnoOnNull(
- Instance.allocate(Size, GetPageSizeCached(), FromMemalign));
-}
-
-void *scudoPvalloc(uptr Size) {
- const uptr PageSize = GetPageSizeCached();
- if (UNLIKELY(CheckForPvallocOverflow(Size, PageSize))) {
- errno = ENOMEM;
- if (Instance.canReturnNull())
- return nullptr;
- reportPvallocOverflow(Size);
- }
- // pvalloc(0) should allocate one page.
- Size = Size ? RoundUpTo(Size, PageSize) : PageSize;
- return SetErrnoOnNull(Instance.allocate(Size, PageSize, FromMemalign));
-}
-
-int scudoPosixMemalign(void **MemPtr, uptr Alignment, uptr Size) {
- if (UNLIKELY(!CheckPosixMemalignAlignment(Alignment))) {
- if (!Instance.canReturnNull())
- reportInvalidPosixMemalignAlignment(Alignment);
- return EINVAL;
- }
- void *Ptr = Instance.allocate(Size, Alignment, FromMemalign);
- if (UNLIKELY(!Ptr))
- return ENOMEM;
- *MemPtr = Ptr;
- return 0;
-}
-
-void *scudoAlignedAlloc(uptr Alignment, uptr Size) {
- if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(Alignment, Size))) {
- errno = EINVAL;
- if (Instance.canReturnNull())
- return nullptr;
- reportInvalidAlignedAllocAlignment(Size, Alignment);
- }
- return SetErrnoOnNull(Instance.allocate(Size, Alignment, FromMalloc));
-}
-
-uptr scudoMallocUsableSize(void *Ptr) {
- return Instance.getUsableSize(Ptr);
-}
-
-} // namespace __scudo
-
-using namespace __scudo;
-
-// MallocExtension helper functions
-
-uptr __sanitizer_get_current_allocated_bytes() {
- return Instance.getStats(AllocatorStatAllocated);
-}
-
-uptr __sanitizer_get_heap_size() {
- return Instance.getStats(AllocatorStatMapped);
-}
-
-uptr __sanitizer_get_free_bytes() {
- return 1;
-}
-
-uptr __sanitizer_get_unmapped_bytes() {
- return 1;
-}
-
-uptr __sanitizer_get_estimated_allocated_size(uptr Size) {
- return Size;
-}
-
-int __sanitizer_get_ownership(const void *Ptr) {
- return Instance.isValidPointer(Ptr);
-}
-
-uptr __sanitizer_get_allocated_size(const void *Ptr) {
- return Instance.getUsableSize(Ptr);
-}
-
-#if !SANITIZER_SUPPORTS_WEAK_HOOKS
-SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_malloc_hook,
- void *Ptr, uptr Size) {
- (void)Ptr;
- (void)Size;
-}
-
-SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_free_hook, void *Ptr) {
- (void)Ptr;
-}
-#endif
-
-// Interface functions
-
-void __scudo_set_rss_limit(uptr LimitMb, s32 HardLimit) {
- if (!SCUDO_CAN_USE_PUBLIC_INTERFACE)
- return;
- Instance.setRssLimit(LimitMb, !!HardLimit);
-}
-
-void __scudo_print_stats() {
- Instance.printStats();
-}
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/scudo_allocator.h b/contrib/llvm-project/compiler-rt/lib/scudo/scudo_allocator.h
deleted file mode 100644
index 0efa5c520296..000000000000
--- a/contrib/llvm-project/compiler-rt/lib/scudo/scudo_allocator.h
+++ /dev/null
@@ -1,125 +0,0 @@
-//===-- scudo_allocator.h ---------------------------------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-///
-/// Header for scudo_allocator.cpp.
-///
-//===----------------------------------------------------------------------===//
-
-#ifndef SCUDO_ALLOCATOR_H_
-#define SCUDO_ALLOCATOR_H_
-
-#include "scudo_platform.h"
-
-namespace __scudo {
-
-enum AllocType : u8 {
- FromMalloc = 0, // Memory block came from malloc, realloc, calloc, etc.
- FromNew = 1, // Memory block came from operator new.
- FromNewArray = 2, // Memory block came from operator new [].
- FromMemalign = 3, // Memory block came from memalign, posix_memalign, etc.
-};
-
-enum ChunkState : u8 {
- ChunkAvailable = 0,
- ChunkAllocated = 1,
- ChunkQuarantine = 2
-};
-
-// Our header requires 64 bits of storage. Having the offset saves us from
-// using functions such as GetBlockBegin, that is fairly costly. Our first
-// implementation used the MetaData as well, which offers the advantage of
-// being stored away from the chunk itself, but accessing it was costly as
-// well. The header will be atomically loaded and stored.
-typedef u64 PackedHeader;
-struct UnpackedHeader {
- u64 Checksum : 16;
- u64 ClassId : 8;
- u64 SizeOrUnusedBytes : 20; // Size for Primary backed allocations, amount of
- // unused bytes in the chunk for Secondary ones.
- u64 State : 2; // available, allocated, or quarantined
- u64 AllocType : 2; // malloc, new, new[], or memalign
- u64 Offset : 16; // Offset from the beginning of the backend
- // allocation to the beginning of the chunk
- // itself, in multiples of MinAlignment. See
- // comment about its maximum value and in init().
-};
-
-typedef atomic_uint64_t AtomicPackedHeader;
-COMPILER_CHECK(sizeof(UnpackedHeader) == sizeof(PackedHeader));
-
-// Minimum alignment of 8 bytes for 32-bit, 16 for 64-bit
-const uptr MinAlignmentLog = FIRST_32_SECOND_64(3, 4);
-const uptr MaxAlignmentLog = 24; // 16 MB
-const uptr MinAlignment = 1 << MinAlignmentLog;
-const uptr MaxAlignment = 1 << MaxAlignmentLog;
-
-// constexpr version of __sanitizer::RoundUp without the extraneous CHECK.
-// This way we can use it in constexpr variables and functions declarations.
-constexpr uptr RoundUpTo(uptr Size, uptr Boundary) {
- return (Size + Boundary - 1) & ~(Boundary - 1);
-}
-
-namespace Chunk {
- constexpr uptr getHeaderSize() {
- return RoundUpTo(sizeof(PackedHeader), MinAlignment);
- }
-}
-
-#if SANITIZER_CAN_USE_ALLOCATOR64
-const uptr AllocatorSpace = ~0ULL;
-struct AP64 {
- static const uptr kSpaceBeg = AllocatorSpace;
- static const uptr kSpaceSize = AllocatorSize;
- static const uptr kMetadataSize = 0;
- typedef __scudo::SizeClassMap SizeClassMap;
- typedef NoOpMapUnmapCallback MapUnmapCallback;
- static const uptr kFlags =
- SizeClassAllocator64FlagMasks::kRandomShuffleChunks;
- using AddressSpaceView = LocalAddressSpaceView;
-};
-typedef SizeClassAllocator64<AP64> PrimaryT;
-#else
-struct AP32 {
- static const uptr kSpaceBeg = 0;
- static const u64 kSpaceSize = SANITIZER_MMAP_RANGE_SIZE;
- static const uptr kMetadataSize = 0;
- typedef __scudo::SizeClassMap SizeClassMap;
- static const uptr kRegionSizeLog = RegionSizeLog;
- using AddressSpaceView = LocalAddressSpaceView;
- typedef NoOpMapUnmapCallback MapUnmapCallback;
- static const uptr kFlags =
- SizeClassAllocator32FlagMasks::kRandomShuffleChunks |
- SizeClassAllocator32FlagMasks::kUseSeparateSizeClassForBatch;
-};
-typedef SizeClassAllocator32<AP32> PrimaryT;
-#endif // SANITIZER_CAN_USE_ALLOCATOR64
-
-#include "scudo_allocator_secondary.h"
-
-typedef LargeMmapAllocator SecondaryT;
-
-#include "scudo_allocator_combined.h"
-
-typedef CombinedAllocator BackendT;
-typedef CombinedAllocator::AllocatorCache AllocatorCacheT;
-
-void initScudo();
-
-void *scudoAllocate(uptr Size, uptr Alignment, AllocType Type);
-void scudoDeallocate(void *Ptr, uptr Size, uptr Alignment, AllocType Type);
-void *scudoRealloc(void *Ptr, uptr Size);
-void *scudoCalloc(uptr NMemB, uptr Size);
-void *scudoValloc(uptr Size);
-void *scudoPvalloc(uptr Size);
-int scudoPosixMemalign(void **MemPtr, uptr Alignment, uptr Size);
-void *scudoAlignedAlloc(uptr Alignment, uptr Size);
-uptr scudoMallocUsableSize(void *Ptr);
-
-} // namespace __scudo
-
-#endif // SCUDO_ALLOCATOR_H_
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/scudo_allocator_combined.h b/contrib/llvm-project/compiler-rt/lib/scudo/scudo_allocator_combined.h
deleted file mode 100644
index d61cc9ec1a52..000000000000
--- a/contrib/llvm-project/compiler-rt/lib/scudo/scudo_allocator_combined.h
+++ /dev/null
@@ -1,75 +0,0 @@
-//===-- scudo_allocator_combined.h ------------------------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-///
-/// Scudo Combined Allocator, dispatches allocation & deallocation requests to
-/// the Primary or the Secondary backend allocators.
-///
-//===----------------------------------------------------------------------===//
-
-#ifndef SCUDO_ALLOCATOR_COMBINED_H_
-#define SCUDO_ALLOCATOR_COMBINED_H_
-
-#ifndef SCUDO_ALLOCATOR_H_
-# error "This file must be included inside scudo_allocator.h."
-#endif
-
-class CombinedAllocator {
- public:
- using PrimaryAllocator = PrimaryT;
- using SecondaryAllocator = SecondaryT;
- using AllocatorCache = typename PrimaryAllocator::AllocatorCache;
- void init(s32 ReleaseToOSIntervalMs) {
- Primary.Init(ReleaseToOSIntervalMs);
- Secondary.Init();
- Stats.Init();
- }
-
- // Primary allocations are always MinAlignment aligned, and as such do not
- // require an Alignment parameter.
- void *allocatePrimary(AllocatorCache *Cache, uptr ClassId) {
- return Cache->Allocate(&Primary, ClassId);
- }
-
- // Secondary allocations do not require a Cache, but do require an Alignment
- // parameter.
- void *allocateSecondary(uptr Size, uptr Alignment) {
- return Secondary.Allocate(&Stats, Size, Alignment);
- }
-
- void deallocatePrimary(AllocatorCache *Cache, void *Ptr, uptr ClassId) {
- Cache->Deallocate(&Primary, ClassId, Ptr);
- }
-
- void deallocateSecondary(void *Ptr) {
- Secondary.Deallocate(&Stats, Ptr);
- }
-
- void initCache(AllocatorCache *Cache) {
- Cache->Init(&Stats);
- }
-
- void destroyCache(AllocatorCache *Cache) {
- Cache->Destroy(&Primary, &Stats);
- }
-
- void getStats(AllocatorStatCounters StatType) const {
- Stats.Get(StatType);
- }
-
- void printStats() {
- Primary.PrintStats();
- Secondary.PrintStats();
- }
-
- private:
- PrimaryAllocator Primary;
- SecondaryAllocator Secondary;
- AllocatorGlobalStats Stats;
-};
-
-#endif // SCUDO_ALLOCATOR_COMBINED_H_
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/scudo_allocator_secondary.h b/contrib/llvm-project/compiler-rt/lib/scudo/scudo_allocator_secondary.h
deleted file mode 100644
index 80198c4aebf5..000000000000
--- a/contrib/llvm-project/compiler-rt/lib/scudo/scudo_allocator_secondary.h
+++ /dev/null
@@ -1,192 +0,0 @@
-//===-- scudo_allocator_secondary.h -----------------------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-///
-/// Scudo Secondary Allocator.
-/// This services allocation that are too large to be serviced by the Primary
-/// Allocator. It is directly backed by the memory mapping functions of the
-/// operating system.
-///
-//===----------------------------------------------------------------------===//
-
-#ifndef SCUDO_ALLOCATOR_SECONDARY_H_
-#define SCUDO_ALLOCATOR_SECONDARY_H_
-
-#ifndef SCUDO_ALLOCATOR_H_
-# error "This file must be included inside scudo_allocator.h."
-#endif
-
-// Secondary backed allocations are standalone chunks that contain extra
-// information stored in a LargeChunk::Header prior to the frontend's header.
-//
-// The secondary takes care of alignment requirements (so that it can release
-// unnecessary pages in the rare event of larger alignments), and as such must
-// know about the frontend's header size.
-//
-// Since Windows doesn't support partial releasing of a reserved memory region,
-// we have to keep track of both the reserved and the committed memory.
-//
-// The resulting chunk resembles the following:
-//
-// +--------------------+
-// | Guard page(s) |
-// +--------------------+
-// | Unused space* |
-// +--------------------+
-// | LargeChunk::Header |
-// +--------------------+
-// | {Unp,P}ackedHeader |
-// +--------------------+
-// | Data (aligned) |
-// +--------------------+
-// | Unused space** |
-// +--------------------+
-// | Guard page(s) |
-// +--------------------+
-
-namespace LargeChunk {
-struct Header {
- ReservedAddressRange StoredRange;
- uptr CommittedSize;
- uptr Size;
-};
-constexpr uptr getHeaderSize() {
- return RoundUpTo(sizeof(Header), MinAlignment);
-}
-static Header *getHeader(uptr Ptr) {
- return reinterpret_cast<Header *>(Ptr - getHeaderSize());
-}
-static Header *getHeader(const void *Ptr) {
- return getHeader(reinterpret_cast<uptr>(Ptr));
-}
-} // namespace LargeChunk
-
-class LargeMmapAllocator {
- public:
- void Init() {
- internal_memset(this, 0, sizeof(*this));
- }
-
- void *Allocate(AllocatorStats *Stats, uptr Size, uptr Alignment) {
- const uptr UserSize = Size - Chunk::getHeaderSize();
- // The Scudo frontend prevents us from allocating more than
- // MaxAllowedMallocSize, so integer overflow checks would be superfluous.
- uptr ReservedSize = Size + LargeChunk::getHeaderSize();
- if (UNLIKELY(Alignment > MinAlignment))
- ReservedSize += Alignment;
- const uptr PageSize = GetPageSizeCached();
- ReservedSize = RoundUpTo(ReservedSize, PageSize);
- // Account for 2 guard pages, one before and one after the chunk.
- ReservedSize += 2 * PageSize;
-
- ReservedAddressRange AddressRange;
- uptr ReservedBeg = AddressRange.Init(ReservedSize, SecondaryAllocatorName);
- if (UNLIKELY(ReservedBeg == ~static_cast<uptr>(0)))
- return nullptr;
- // A page-aligned pointer is assumed after that, so check it now.
- DCHECK(IsAligned(ReservedBeg, PageSize));
- uptr ReservedEnd = ReservedBeg + ReservedSize;
- // The beginning of the user area for that allocation comes after the
- // initial guard page, and both headers. This is the pointer that has to
- // abide by alignment requirements.
- uptr CommittedBeg = ReservedBeg + PageSize;
- uptr UserBeg = CommittedBeg + HeadersSize;
- uptr UserEnd = UserBeg + UserSize;
- uptr CommittedEnd = RoundUpTo(UserEnd, PageSize);
-
- // In the rare event of larger alignments, we will attempt to fit the mmap
- // area better and unmap extraneous memory. This will also ensure that the
- // offset and unused bytes field of the header stay small.
- if (UNLIKELY(Alignment > MinAlignment)) {
- if (!IsAligned(UserBeg, Alignment)) {
- UserBeg = RoundUpTo(UserBeg, Alignment);
- CommittedBeg = RoundDownTo(UserBeg - HeadersSize, PageSize);
- const uptr NewReservedBeg = CommittedBeg - PageSize;
- DCHECK_GE(NewReservedBeg, ReservedBeg);
- if (!SANITIZER_WINDOWS && NewReservedBeg != ReservedBeg) {
- AddressRange.Unmap(ReservedBeg, NewReservedBeg - ReservedBeg);
- ReservedBeg = NewReservedBeg;
- }
- UserEnd = UserBeg + UserSize;
- CommittedEnd = RoundUpTo(UserEnd, PageSize);
- }
- const uptr NewReservedEnd = CommittedEnd + PageSize;
- DCHECK_LE(NewReservedEnd, ReservedEnd);
- if (!SANITIZER_WINDOWS && NewReservedEnd != ReservedEnd) {
- AddressRange.Unmap(NewReservedEnd, ReservedEnd - NewReservedEnd);
- ReservedEnd = NewReservedEnd;
- }
- }
-
- DCHECK_LE(UserEnd, CommittedEnd);
- const uptr CommittedSize = CommittedEnd - CommittedBeg;
- // Actually mmap the memory, preserving the guard pages on either sides.
- CHECK_EQ(CommittedBeg, AddressRange.Map(CommittedBeg, CommittedSize));
- const uptr Ptr = UserBeg - Chunk::getHeaderSize();
- LargeChunk::Header *H = LargeChunk::getHeader(Ptr);
- H->StoredRange = AddressRange;
- H->Size = CommittedEnd - Ptr;
- H->CommittedSize = CommittedSize;
-
- // The primary adds the whole class size to the stats when allocating a
- // chunk, so we will do something similar here. But we will not account for
- // the guard pages.
- {
- SpinMutexLock l(&StatsMutex);
- Stats->Add(AllocatorStatAllocated, CommittedSize);
- Stats->Add(AllocatorStatMapped, CommittedSize);
- AllocatedBytes += CommittedSize;
- if (LargestSize < CommittedSize)
- LargestSize = CommittedSize;
- NumberOfAllocs++;
- }
-
- return reinterpret_cast<void *>(Ptr);
- }
-
- void Deallocate(AllocatorStats *Stats, void *Ptr) {
- LargeChunk::Header *H = LargeChunk::getHeader(Ptr);
- // Since we're unmapping the entirety of where the ReservedAddressRange
- // actually is, copy onto the stack.
- ReservedAddressRange AddressRange = H->StoredRange;
- const uptr Size = H->CommittedSize;
- {
- SpinMutexLock l(&StatsMutex);
- Stats->Sub(AllocatorStatAllocated, Size);
- Stats->Sub(AllocatorStatMapped, Size);
- FreedBytes += Size;
- NumberOfFrees++;
- }
- AddressRange.Unmap(reinterpret_cast<uptr>(AddressRange.base()),
- AddressRange.size());
- }
-
- static uptr GetActuallyAllocatedSize(void *Ptr) {
- return LargeChunk::getHeader(Ptr)->Size;
- }
-
- void PrintStats() {
- Printf("Stats: LargeMmapAllocator: allocated %zd times (%zd K), "
- "freed %zd times (%zd K), remains %zd (%zd K) max %zd M\n",
- NumberOfAllocs, AllocatedBytes >> 10, NumberOfFrees,
- FreedBytes >> 10, NumberOfAllocs - NumberOfFrees,
- (AllocatedBytes - FreedBytes) >> 10, LargestSize >> 20);
- }
-
- private:
- static constexpr uptr HeadersSize =
- LargeChunk::getHeaderSize() + Chunk::getHeaderSize();
-
- StaticSpinMutex StatsMutex;
- u32 NumberOfAllocs;
- u32 NumberOfFrees;
- uptr AllocatedBytes;
- uptr FreedBytes;
- uptr LargestSize;
-};
-
-#endif // SCUDO_ALLOCATOR_SECONDARY_H_
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/scudo_crc32.cpp b/contrib/llvm-project/compiler-rt/lib/scudo/scudo_crc32.cpp
deleted file mode 100644
index 87473505fe79..000000000000
--- a/contrib/llvm-project/compiler-rt/lib/scudo/scudo_crc32.cpp
+++ /dev/null
@@ -1,24 +0,0 @@
-//===-- scudo_crc32.cpp -----------------------------------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-///
-/// CRC32 function leveraging hardware specific instructions. This has to be
-/// kept separated to restrict the use of compiler specific flags to this file.
-///
-//===----------------------------------------------------------------------===//
-
-#include "scudo_crc32.h"
-
-namespace __scudo {
-
-#if defined(__SSE4_2__) || defined(__ARM_FEATURE_CRC32)
-u32 computeHardwareCRC32(u32 Crc, uptr Data) {
- return CRC32_INTRINSIC(Crc, Data);
-}
-#endif // defined(__SSE4_2__) || defined(__ARM_FEATURE_CRC32)
-
-} // namespace __scudo
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/scudo_crc32.h b/contrib/llvm-project/compiler-rt/lib/scudo/scudo_crc32.h
deleted file mode 100644
index ef40595a56d1..000000000000
--- a/contrib/llvm-project/compiler-rt/lib/scudo/scudo_crc32.h
+++ /dev/null
@@ -1,100 +0,0 @@
-//===-- scudo_crc32.h -------------------------------------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-///
-/// Scudo chunk header checksum related definitions.
-///
-//===----------------------------------------------------------------------===//
-
-#ifndef SCUDO_CRC32_H_
-#define SCUDO_CRC32_H_
-
-#include "sanitizer_common/sanitizer_internal_defs.h"
-
-// Hardware CRC32 is supported at compilation via the following:
-// - for i386 & x86_64: -msse4.2
-// - for ARM & AArch64: -march=armv8-a+crc or -mcrc
-// An additional check must be performed at runtime as well to make sure the
-// emitted instructions are valid on the target host.
-
-#if defined(__SSE4_2__) || defined(__ARM_FEATURE_CRC32)
-# ifdef __SSE4_2__
-# include <smmintrin.h>
-# define CRC32_INTRINSIC FIRST_32_SECOND_64(_mm_crc32_u32, _mm_crc32_u64)
-# endif
-# ifdef __ARM_FEATURE_CRC32
-# include <arm_acle.h>
-# define CRC32_INTRINSIC FIRST_32_SECOND_64(__crc32cw, __crc32cd)
-# endif
-#endif // defined(__SSE4_2__) || defined(__ARM_FEATURE_CRC32)
-
-namespace __scudo {
-
-enum : u8 {
- CRC32Software = 0,
- CRC32Hardware = 1,
-};
-
-static const u32 CRC32Table[] = {
- 0x00000000, 0x77073096, 0xee0e612c, 0x990951ba, 0x076dc419, 0x706af48f,
- 0xe963a535, 0x9e6495a3, 0x0edb8832, 0x79dcb8a4, 0xe0d5e91e, 0x97d2d988,
- 0x09b64c2b, 0x7eb17cbd, 0xe7b82d07, 0x90bf1d91, 0x1db71064, 0x6ab020f2,
- 0xf3b97148, 0x84be41de, 0x1adad47d, 0x6ddde4eb, 0xf4d4b551, 0x83d385c7,
- 0x136c9856, 0x646ba8c0, 0xfd62f97a, 0x8a65c9ec, 0x14015c4f, 0x63066cd9,
- 0xfa0f3d63, 0x8d080df5, 0x3b6e20c8, 0x4c69105e, 0xd56041e4, 0xa2677172,
- 0x3c03e4d1, 0x4b04d447, 0xd20d85fd, 0xa50ab56b, 0x35b5a8fa, 0x42b2986c,
- 0xdbbbc9d6, 0xacbcf940, 0x32d86ce3, 0x45df5c75, 0xdcd60dcf, 0xabd13d59,
- 0x26d930ac, 0x51de003a, 0xc8d75180, 0xbfd06116, 0x21b4f4b5, 0x56b3c423,
- 0xcfba9599, 0xb8bda50f, 0x2802b89e, 0x5f058808, 0xc60cd9b2, 0xb10be924,
- 0x2f6f7c87, 0x58684c11, 0xc1611dab, 0xb6662d3d, 0x76dc4190, 0x01db7106,
- 0x98d220bc, 0xefd5102a, 0x71b18589, 0x06b6b51f, 0x9fbfe4a5, 0xe8b8d433,
- 0x7807c9a2, 0x0f00f934, 0x9609a88e, 0xe10e9818, 0x7f6a0dbb, 0x086d3d2d,
- 0x91646c97, 0xe6635c01, 0x6b6b51f4, 0x1c6c6162, 0x856530d8, 0xf262004e,
- 0x6c0695ed, 0x1b01a57b, 0x8208f4c1, 0xf50fc457, 0x65b0d9c6, 0x12b7e950,
- 0x8bbeb8ea, 0xfcb9887c, 0x62dd1ddf, 0x15da2d49, 0x8cd37cf3, 0xfbd44c65,
- 0x4db26158, 0x3ab551ce, 0xa3bc0074, 0xd4bb30e2, 0x4adfa541, 0x3dd895d7,
- 0xa4d1c46d, 0xd3d6f4fb, 0x4369e96a, 0x346ed9fc, 0xad678846, 0xda60b8d0,
- 0x44042d73, 0x33031de5, 0xaa0a4c5f, 0xdd0d7cc9, 0x5005713c, 0x270241aa,
- 0xbe0b1010, 0xc90c2086, 0x5768b525, 0x206f85b3, 0xb966d409, 0xce61e49f,
- 0x5edef90e, 0x29d9c998, 0xb0d09822, 0xc7d7a8b4, 0x59b33d17, 0x2eb40d81,
- 0xb7bd5c3b, 0xc0ba6cad, 0xedb88320, 0x9abfb3b6, 0x03b6e20c, 0x74b1d29a,
- 0xead54739, 0x9dd277af, 0x04db2615, 0x73dc1683, 0xe3630b12, 0x94643b84,
- 0x0d6d6a3e, 0x7a6a5aa8, 0xe40ecf0b, 0x9309ff9d, 0x0a00ae27, 0x7d079eb1,
- 0xf00f9344, 0x8708a3d2, 0x1e01f268, 0x6906c2fe, 0xf762575d, 0x806567cb,
- 0x196c3671, 0x6e6b06e7, 0xfed41b76, 0x89d32be0, 0x10da7a5a, 0x67dd4acc,
- 0xf9b9df6f, 0x8ebeeff9, 0x17b7be43, 0x60b08ed5, 0xd6d6a3e8, 0xa1d1937e,
- 0x38d8c2c4, 0x4fdff252, 0xd1bb67f1, 0xa6bc5767, 0x3fb506dd, 0x48b2364b,
- 0xd80d2bda, 0xaf0a1b4c, 0x36034af6, 0x41047a60, 0xdf60efc3, 0xa867df55,
- 0x316e8eef, 0x4669be79, 0xcb61b38c, 0xbc66831a, 0x256fd2a0, 0x5268e236,
- 0xcc0c7795, 0xbb0b4703, 0x220216b9, 0x5505262f, 0xc5ba3bbe, 0xb2bd0b28,
- 0x2bb45a92, 0x5cb36a04, 0xc2d7ffa7, 0xb5d0cf31, 0x2cd99e8b, 0x5bdeae1d,
- 0x9b64c2b0, 0xec63f226, 0x756aa39c, 0x026d930a, 0x9c0906a9, 0xeb0e363f,
- 0x72076785, 0x05005713, 0x95bf4a82, 0xe2b87a14, 0x7bb12bae, 0x0cb61b38,
- 0x92d28e9b, 0xe5d5be0d, 0x7cdcefb7, 0x0bdbdf21, 0x86d3d2d4, 0xf1d4e242,
- 0x68ddb3f8, 0x1fda836e, 0x81be16cd, 0xf6b9265b, 0x6fb077e1, 0x18b74777,
- 0x88085ae6, 0xff0f6a70, 0x66063bca, 0x11010b5c, 0x8f659eff, 0xf862ae69,
- 0x616bffd3, 0x166ccf45, 0xa00ae278, 0xd70dd2ee, 0x4e048354, 0x3903b3c2,
- 0xa7672661, 0xd06016f7, 0x4969474d, 0x3e6e77db, 0xaed16a4a, 0xd9d65adc,
- 0x40df0b66, 0x37d83bf0, 0xa9bcae53, 0xdebb9ec5, 0x47b2cf7f, 0x30b5ffe9,
- 0xbdbdf21c, 0xcabac28a, 0x53b39330, 0x24b4a3a6, 0xbad03605, 0xcdd70693,
- 0x54de5729, 0x23d967bf, 0xb3667a2e, 0xc4614ab8, 0x5d681b02, 0x2a6f2b94,
- 0xb40bbe37, 0xc30c8ea1, 0x5a05df1b, 0x2d02ef8d
-};
-
-inline u32 computeSoftwareCRC32(u32 Crc, uptr Data) {
- for (uptr i = 0; i < sizeof(Data); i++) {
- Crc = CRC32Table[(Crc ^ Data) & 0xff] ^ (Crc >> 8);
- Data >>= 8;
- }
- return Crc;
-}
-
-SANITIZER_WEAK_ATTRIBUTE u32 computeHardwareCRC32(u32 Crc, uptr Data);
-
-} // namespace __scudo
-
-#endif // SCUDO_CRC32_H_
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/scudo_errors.cpp b/contrib/llvm-project/compiler-rt/lib/scudo/scudo_errors.cpp
deleted file mode 100644
index 4bea9ebc6ab0..000000000000
--- a/contrib/llvm-project/compiler-rt/lib/scudo/scudo_errors.cpp
+++ /dev/null
@@ -1,77 +0,0 @@
-//===-- scudo_errors.cpp ----------------------------------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-///
-/// Verbose termination functions.
-///
-//===----------------------------------------------------------------------===//
-
-#include "scudo_utils.h"
-
-#include "sanitizer_common/sanitizer_flags.h"
-
-namespace __scudo {
-
-void NORETURN reportCallocOverflow(uptr Count, uptr Size) {
- dieWithMessage("calloc parameters overflow: count * size (%zd * %zd) cannot "
- "be represented with type size_t\n", Count, Size);
-}
-
-void NORETURN reportPvallocOverflow(uptr Size) {
- dieWithMessage("pvalloc parameters overflow: size 0x%zx rounded up to system "
- "page size 0x%zx cannot be represented in type size_t\n", Size,
- GetPageSizeCached());
-}
-
-void NORETURN reportAllocationAlignmentTooBig(uptr Alignment,
- uptr MaxAlignment) {
- dieWithMessage("invalid allocation alignment: %zd exceeds maximum supported "
- "allocation of %zd\n", Alignment, MaxAlignment);
-}
-
-void NORETURN reportAllocationAlignmentNotPowerOfTwo(uptr Alignment) {
- dieWithMessage("invalid allocation alignment: %zd, alignment must be a power "
- "of two\n", Alignment);
-}
-
-void NORETURN reportInvalidPosixMemalignAlignment(uptr Alignment) {
- dieWithMessage(
- "invalid alignment requested in posix_memalign: %zd, alignment"
- " must be a power of two and a multiple of sizeof(void *) == %zd\n",
- Alignment, sizeof(void *));
-}
-
-void NORETURN reportInvalidAlignedAllocAlignment(uptr Size, uptr Alignment) {
-#if SANITIZER_POSIX
- dieWithMessage("invalid alignment requested in aligned_alloc: %zd, alignment "
- "must be a power of two and the requested size 0x%zx must be a multiple "
- "of alignment\n", Alignment, Size);
-#else
- dieWithMessage("invalid alignment requested in aligned_alloc: %zd, the "
- "requested size 0x%zx must be a multiple of alignment\n", Alignment,
- Size);
-#endif
-}
-
-void NORETURN reportAllocationSizeTooBig(uptr UserSize, uptr TotalSize,
- uptr MaxSize) {
- dieWithMessage("requested allocation size 0x%zx (0x%zx after adjustments) "
- "exceeds maximum supported size of 0x%zx\n", UserSize, TotalSize,
- MaxSize);
-}
-
-void NORETURN reportRssLimitExceeded() {
- dieWithMessage("specified RSS limit exceeded, currently set to "
- "soft_rss_limit_mb=%zd\n", common_flags()->soft_rss_limit_mb);
-}
-
-void NORETURN reportOutOfMemory(uptr RequestedSize) {
- dieWithMessage("allocator is out of memory trying to allocate 0x%zx bytes\n",
- RequestedSize);
-}
-
-} // namespace __scudo
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/scudo_errors.h b/contrib/llvm-project/compiler-rt/lib/scudo/scudo_errors.h
deleted file mode 100644
index 258695c2c02c..000000000000
--- a/contrib/llvm-project/compiler-rt/lib/scudo/scudo_errors.h
+++ /dev/null
@@ -1,34 +0,0 @@
-//===-- scudo_errors.h ------------------------------------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-///
-/// Header for scudo_errors.cpp.
-///
-//===----------------------------------------------------------------------===//
-
-#ifndef SCUDO_ERRORS_H_
-#define SCUDO_ERRORS_H_
-
-#include "sanitizer_common/sanitizer_internal_defs.h"
-
-namespace __scudo {
-
-void NORETURN reportCallocOverflow(uptr Count, uptr Size);
-void NORETURN reportPvallocOverflow(uptr Size);
-void NORETURN reportAllocationAlignmentTooBig(uptr Alignment,
- uptr MaxAlignment);
-void NORETURN reportAllocationAlignmentNotPowerOfTwo(uptr Alignment);
-void NORETURN reportInvalidPosixMemalignAlignment(uptr Alignment);
-void NORETURN reportInvalidAlignedAllocAlignment(uptr Size, uptr Alignment);
-void NORETURN reportAllocationSizeTooBig(uptr UserSize, uptr TotalSize,
- uptr MaxSize);
-void NORETURN reportRssLimitExceeded();
-void NORETURN reportOutOfMemory(uptr RequestedSize);
-
-} // namespace __scudo
-
-#endif // SCUDO_ERRORS_H_
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/scudo_flags.cpp b/contrib/llvm-project/compiler-rt/lib/scudo/scudo_flags.cpp
deleted file mode 100644
index c564e217b35b..000000000000
--- a/contrib/llvm-project/compiler-rt/lib/scudo/scudo_flags.cpp
+++ /dev/null
@@ -1,136 +0,0 @@
-//===-- scudo_flags.cpp -----------------------------------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-///
-/// Hardened Allocator flag parsing logic.
-///
-//===----------------------------------------------------------------------===//
-
-#include "scudo_flags.h"
-#include "scudo_interface_internal.h"
-#include "scudo_utils.h"
-
-#include "sanitizer_common/sanitizer_flags.h"
-#include "sanitizer_common/sanitizer_flag_parser.h"
-
-namespace __scudo {
-
-static Flags ScudoFlags; // Use via getFlags().
-
-void Flags::setDefaults() {
-#define SCUDO_FLAG(Type, Name, DefaultValue, Description) Name = DefaultValue;
-#include "scudo_flags.inc"
-#undef SCUDO_FLAG
-}
-
-static void RegisterScudoFlags(FlagParser *parser, Flags *f) {
-#define SCUDO_FLAG(Type, Name, DefaultValue, Description) \
- RegisterFlag(parser, #Name, Description, &f->Name);
-#include "scudo_flags.inc"
-#undef SCUDO_FLAG
-}
-
-static const char *getCompileDefinitionScudoDefaultOptions() {
-#ifdef SCUDO_DEFAULT_OPTIONS
- return SANITIZER_STRINGIFY(SCUDO_DEFAULT_OPTIONS);
-#else
- return "";
-#endif
-}
-
-static const char *getScudoDefaultOptions() {
- return (&__scudo_default_options) ? __scudo_default_options() : "";
-}
-
-void initFlags() {
- SetCommonFlagsDefaults();
- {
- CommonFlags cf;
- cf.CopyFrom(*common_flags());
- cf.exitcode = 1;
- OverrideCommonFlags(cf);
- }
- Flags *f = getFlags();
- f->setDefaults();
-
- FlagParser ScudoParser;
- RegisterScudoFlags(&ScudoParser, f);
- RegisterCommonFlags(&ScudoParser);
-
- // Override from compile definition.
- ScudoParser.ParseString(getCompileDefinitionScudoDefaultOptions());
-
- // Override from user-specified string.
- ScudoParser.ParseString(getScudoDefaultOptions());
-
- // Override from environment.
- ScudoParser.ParseStringFromEnv("SCUDO_OPTIONS");
-
- InitializeCommonFlags();
-
- // Sanity checks and default settings for the Quarantine parameters.
-
- if (f->QuarantineSizeMb >= 0) {
- // Backward compatible logic if QuarantineSizeMb is set.
- if (f->QuarantineSizeKb >= 0) {
- dieWithMessage("ERROR: please use either QuarantineSizeMb (deprecated) "
- "or QuarantineSizeKb, but not both\n");
- }
- if (f->QuarantineChunksUpToSize >= 0) {
- dieWithMessage("ERROR: QuarantineChunksUpToSize cannot be used in "
- " conjunction with the deprecated QuarantineSizeMb option\n");
- }
- // If everything is in order, update QuarantineSizeKb accordingly.
- f->QuarantineSizeKb = f->QuarantineSizeMb * 1024;
- } else {
- // Otherwise proceed with the new options.
- if (f->QuarantineSizeKb < 0) {
- const int DefaultQuarantineSizeKb = FIRST_32_SECOND_64(64, 256);
- f->QuarantineSizeKb = DefaultQuarantineSizeKb;
- }
- if (f->QuarantineChunksUpToSize < 0) {
- const int DefaultQuarantineChunksUpToSize = FIRST_32_SECOND_64(512, 2048);
- f->QuarantineChunksUpToSize = DefaultQuarantineChunksUpToSize;
- }
- }
-
- // We enforce an upper limit for the chunk quarantine threshold of 4Mb.
- if (f->QuarantineChunksUpToSize > (4 * 1024 * 1024)) {
- dieWithMessage("ERROR: the chunk quarantine threshold is too large\n");
- }
-
- // We enforce an upper limit for the quarantine size of 32Mb.
- if (f->QuarantineSizeKb > (32 * 1024)) {
- dieWithMessage("ERROR: the quarantine size is too large\n");
- }
-
- if (f->ThreadLocalQuarantineSizeKb < 0) {
- const int DefaultThreadLocalQuarantineSizeKb = FIRST_32_SECOND_64(16, 64);
- f->ThreadLocalQuarantineSizeKb = DefaultThreadLocalQuarantineSizeKb;
- }
- // And an upper limit of 8Mb for the thread quarantine cache.
- if (f->ThreadLocalQuarantineSizeKb > (8 * 1024)) {
- dieWithMessage("ERROR: the per thread quarantine cache size is too "
- "large\n");
- }
- if (f->ThreadLocalQuarantineSizeKb == 0 && f->QuarantineSizeKb > 0) {
- dieWithMessage("ERROR: ThreadLocalQuarantineSizeKb can be set to 0 only "
- "when QuarantineSizeKb is set to 0\n");
- }
-}
-
-Flags *getFlags() {
- return &ScudoFlags;
-}
-
-} // namespace __scudo
-
-#if !SANITIZER_SUPPORTS_WEAK_HOOKS
-SANITIZER_INTERFACE_WEAK_DEF(const char*, __scudo_default_options, void) {
- return "";
-}
-#endif
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/scudo_flags.h b/contrib/llvm-project/compiler-rt/lib/scudo/scudo_flags.h
deleted file mode 100644
index 483c79621cbf..000000000000
--- a/contrib/llvm-project/compiler-rt/lib/scudo/scudo_flags.h
+++ /dev/null
@@ -1,32 +0,0 @@
-//===-- scudo_flags.h -------------------------------------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-///
-/// Header for scudo_flags.cpp.
-///
-//===----------------------------------------------------------------------===//
-
-#ifndef SCUDO_FLAGS_H_
-#define SCUDO_FLAGS_H_
-
-namespace __scudo {
-
-struct Flags {
-#define SCUDO_FLAG(Type, Name, DefaultValue, Description) Type Name;
-#include "scudo_flags.inc"
-#undef SCUDO_FLAG
-
- void setDefaults();
-};
-
-Flags *getFlags();
-
-void initFlags();
-
-} // namespace __scudo
-
-#endif // SCUDO_FLAGS_H_
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/scudo_flags.inc b/contrib/llvm-project/compiler-rt/lib/scudo/scudo_flags.inc
deleted file mode 100644
index c124738c1f3a..000000000000
--- a/contrib/llvm-project/compiler-rt/lib/scudo/scudo_flags.inc
+++ /dev/null
@@ -1,48 +0,0 @@
-//===-- scudo_flags.inc -----------------------------------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-///
-/// Hardened Allocator runtime flags.
-///
-//===----------------------------------------------------------------------===//
-
-#ifndef SCUDO_FLAG
-# error "Define SCUDO_FLAG prior to including this file!"
-#endif
-
-SCUDO_FLAG(int, QuarantineSizeMb, -1,
- "Deprecated. Please use QuarantineSizeKb.")
-
-// Default value is set in scudo_flags.cpp based on architecture.
-SCUDO_FLAG(int, QuarantineSizeKb, -1,
- "Size in KB of quarantine used to delay the actual deallocation of "
- "chunks. Lower value may reduce memory usage but decrease the "
- "effectiveness of the mitigation. Defaults to 64KB (32-bit) or "
- "256KB (64-bit)")
-
-// Default value is set in scudo_flags.cpp based on architecture.
-SCUDO_FLAG(int, ThreadLocalQuarantineSizeKb, -1,
- "Size in KB of per-thread cache used to offload the global "
- "quarantine. Lower value may reduce memory usage but might increase "
- "the contention on the global quarantine. Defaults to 16KB (32-bit) "
- "or 64KB (64-bit)")
-
-// Default value is set in scudo_flags.cpp based on architecture.
-SCUDO_FLAG(int, QuarantineChunksUpToSize, -1,
- "Size in bytes up to which chunks will be quarantined (if lower than"
- "or equal to). Defaults to 256 (32-bit) or 2048 (64-bit)")
-
-// Disable the deallocation type check by default on Android, it causes too many
-// issues with third party libraries.
-SCUDO_FLAG(bool, DeallocationTypeMismatch, !SANITIZER_ANDROID,
- "Report errors on malloc/delete, new/free, new/delete[], etc.")
-
-SCUDO_FLAG(bool, DeleteSizeMismatch, true,
- "Report errors on mismatch between size of new and delete.")
-
-SCUDO_FLAG(bool, ZeroContents, false,
- "Zero chunk contents on allocation and deallocation.")
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/scudo_interface_internal.h b/contrib/llvm-project/compiler-rt/lib/scudo/scudo_interface_internal.h
deleted file mode 100644
index 75c63aa6d489..000000000000
--- a/contrib/llvm-project/compiler-rt/lib/scudo/scudo_interface_internal.h
+++ /dev/null
@@ -1,32 +0,0 @@
-//===-- scudo_interface_internal.h ------------------------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-///
-/// Private Scudo interface header.
-///
-//===----------------------------------------------------------------------===//
-
-#ifndef SCUDO_INTERFACE_INTERNAL_H_
-#define SCUDO_INTERFACE_INTERNAL_H_
-
-#include "sanitizer_common/sanitizer_internal_defs.h"
-
-using __sanitizer::uptr;
-using __sanitizer::s32;
-
-extern "C" {
-SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
-const char* __scudo_default_options();
-
-SANITIZER_INTERFACE_ATTRIBUTE
-void __scudo_set_rss_limit(uptr LimitMb, s32 HardLimit);
-
-SANITIZER_INTERFACE_ATTRIBUTE
-void __scudo_print_stats();
-} // extern "C"
-
-#endif // SCUDO_INTERFACE_INTERNAL_H_
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/scudo_malloc.cpp b/contrib/llvm-project/compiler-rt/lib/scudo/scudo_malloc.cpp
deleted file mode 100644
index a72b861e28ee..000000000000
--- a/contrib/llvm-project/compiler-rt/lib/scudo/scudo_malloc.cpp
+++ /dev/null
@@ -1,84 +0,0 @@
-//===-- scudo_malloc.cpp ----------------------------------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-///
-/// Interceptors for malloc related functions.
-///
-//===----------------------------------------------------------------------===//
-
-#include "scudo_allocator.h"
-
-#include "interception/interception.h"
-#include "sanitizer_common/sanitizer_platform_interceptors.h"
-
-#include <stddef.h>
-
-using namespace __scudo;
-
-extern "C" {
-INTERCEPTOR_ATTRIBUTE void free(void *ptr) {
- scudoDeallocate(ptr, 0, 0, FromMalloc);
-}
-
-INTERCEPTOR_ATTRIBUTE void *malloc(size_t size) {
- return scudoAllocate(size, 0, FromMalloc);
-}
-
-INTERCEPTOR_ATTRIBUTE void *realloc(void *ptr, size_t size) {
- return scudoRealloc(ptr, size);
-}
-
-INTERCEPTOR_ATTRIBUTE void *calloc(size_t nmemb, size_t size) {
- return scudoCalloc(nmemb, size);
-}
-
-INTERCEPTOR_ATTRIBUTE void *valloc(size_t size) {
- return scudoValloc(size);
-}
-
-INTERCEPTOR_ATTRIBUTE
-int posix_memalign(void **memptr, size_t alignment, size_t size) {
- return scudoPosixMemalign(memptr, alignment, size);
-}
-
-#if SANITIZER_INTERCEPT_CFREE
-INTERCEPTOR_ATTRIBUTE void cfree(void *ptr) ALIAS("free");
-#endif
-
-#if SANITIZER_INTERCEPT_MEMALIGN
-INTERCEPTOR_ATTRIBUTE void *memalign(size_t alignment, size_t size) {
- return scudoAllocate(size, alignment, FromMemalign);
-}
-
-INTERCEPTOR_ATTRIBUTE
-void *__libc_memalign(size_t alignment, size_t size) ALIAS("memalign");
-#endif
-
-#if SANITIZER_INTERCEPT_PVALLOC
-INTERCEPTOR_ATTRIBUTE void *pvalloc(size_t size) {
- return scudoPvalloc(size);
-}
-#endif
-
-#if SANITIZER_INTERCEPT_ALIGNED_ALLOC
-INTERCEPTOR_ATTRIBUTE void *aligned_alloc(size_t alignment, size_t size) {
- return scudoAlignedAlloc(alignment, size);
-}
-#endif
-
-#if SANITIZER_INTERCEPT_MALLOC_USABLE_SIZE
-INTERCEPTOR_ATTRIBUTE size_t malloc_usable_size(void *ptr) {
- return scudoMallocUsableSize(ptr);
-}
-#endif
-
-#if SANITIZER_INTERCEPT_MALLOPT_AND_MALLINFO
-INTERCEPTOR_ATTRIBUTE int mallopt(int cmd, int value) {
- return 0;
-}
-#endif
-} // extern "C"
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/scudo_new_delete.cpp b/contrib/llvm-project/compiler-rt/lib/scudo/scudo_new_delete.cpp
deleted file mode 100644
index 03eef7f28bb9..000000000000
--- a/contrib/llvm-project/compiler-rt/lib/scudo/scudo_new_delete.cpp
+++ /dev/null
@@ -1,107 +0,0 @@
-//===-- scudo_new_delete.cpp ------------------------------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-///
-/// Interceptors for operators new and delete.
-///
-//===----------------------------------------------------------------------===//
-
-#include "scudo_allocator.h"
-#include "scudo_errors.h"
-
-#include "interception/interception.h"
-
-#include <stddef.h>
-
-using namespace __scudo;
-
-#define CXX_OPERATOR_ATTRIBUTE INTERCEPTOR_ATTRIBUTE
-
-// Fake std::nothrow_t to avoid including <new>.
-namespace std {
-struct nothrow_t {};
-enum class align_val_t: size_t {};
-} // namespace std
-
-// TODO(alekseys): throw std::bad_alloc instead of dying on OOM.
-#define OPERATOR_NEW_BODY_ALIGN(Type, Align, NoThrow) \
- void *Ptr = scudoAllocate(size, static_cast<uptr>(Align), Type); \
- if (!NoThrow && UNLIKELY(!Ptr)) reportOutOfMemory(size); \
- return Ptr;
-#define OPERATOR_NEW_BODY(Type, NoThrow) \
- OPERATOR_NEW_BODY_ALIGN(Type, 0, NoThrow)
-
-CXX_OPERATOR_ATTRIBUTE
-void *operator new(size_t size)
-{ OPERATOR_NEW_BODY(FromNew, /*NoThrow=*/false); }
-CXX_OPERATOR_ATTRIBUTE
-void *operator new[](size_t size)
-{ OPERATOR_NEW_BODY(FromNewArray, /*NoThrow=*/false); }
-CXX_OPERATOR_ATTRIBUTE
-void *operator new(size_t size, std::nothrow_t const&)
-{ OPERATOR_NEW_BODY(FromNew, /*NoThrow=*/true); }
-CXX_OPERATOR_ATTRIBUTE
-void *operator new[](size_t size, std::nothrow_t const&)
-{ OPERATOR_NEW_BODY(FromNewArray, /*NoThrow=*/true); }
-CXX_OPERATOR_ATTRIBUTE
-void *operator new(size_t size, std::align_val_t align)
-{ OPERATOR_NEW_BODY_ALIGN(FromNew, align, /*NoThrow=*/false); }
-CXX_OPERATOR_ATTRIBUTE
-void *operator new[](size_t size, std::align_val_t align)
-{ OPERATOR_NEW_BODY_ALIGN(FromNewArray, align, /*NoThrow=*/false); }
-CXX_OPERATOR_ATTRIBUTE
-void *operator new(size_t size, std::align_val_t align, std::nothrow_t const&)
-{ OPERATOR_NEW_BODY_ALIGN(FromNew, align, /*NoThrow=*/true); }
-CXX_OPERATOR_ATTRIBUTE
-void *operator new[](size_t size, std::align_val_t align, std::nothrow_t const&)
-{ OPERATOR_NEW_BODY_ALIGN(FromNewArray, align, /*NoThrow=*/true); }
-
-#define OPERATOR_DELETE_BODY(Type) \
- scudoDeallocate(ptr, 0, 0, Type);
-#define OPERATOR_DELETE_BODY_SIZE(Type) \
- scudoDeallocate(ptr, size, 0, Type);
-#define OPERATOR_DELETE_BODY_ALIGN(Type) \
- scudoDeallocate(ptr, 0, static_cast<uptr>(align), Type);
-#define OPERATOR_DELETE_BODY_SIZE_ALIGN(Type) \
- scudoDeallocate(ptr, size, static_cast<uptr>(align), Type);
-
-CXX_OPERATOR_ATTRIBUTE
-void operator delete(void *ptr) NOEXCEPT
-{ OPERATOR_DELETE_BODY(FromNew); }
-CXX_OPERATOR_ATTRIBUTE
-void operator delete[](void *ptr) NOEXCEPT
-{ OPERATOR_DELETE_BODY(FromNewArray); }
-CXX_OPERATOR_ATTRIBUTE
-void operator delete(void *ptr, std::nothrow_t const&)
-{ OPERATOR_DELETE_BODY(FromNew); }
-CXX_OPERATOR_ATTRIBUTE
-void operator delete[](void *ptr, std::nothrow_t const&)
-{ OPERATOR_DELETE_BODY(FromNewArray); }
-CXX_OPERATOR_ATTRIBUTE
-void operator delete(void *ptr, size_t size) NOEXCEPT
-{ OPERATOR_DELETE_BODY_SIZE(FromNew); }
-CXX_OPERATOR_ATTRIBUTE
-void operator delete[](void *ptr, size_t size) NOEXCEPT
-{ OPERATOR_DELETE_BODY_SIZE(FromNewArray); }
-CXX_OPERATOR_ATTRIBUTE
-void operator delete(void *ptr, std::align_val_t align) NOEXCEPT
-{ OPERATOR_DELETE_BODY_ALIGN(FromNew); }
-CXX_OPERATOR_ATTRIBUTE
-void operator delete[](void *ptr, std::align_val_t align) NOEXCEPT
-{ OPERATOR_DELETE_BODY_ALIGN(FromNewArray); }
-CXX_OPERATOR_ATTRIBUTE
-void operator delete(void *ptr, std::align_val_t align, std::nothrow_t const&)
-{ OPERATOR_DELETE_BODY_ALIGN(FromNew); }
-CXX_OPERATOR_ATTRIBUTE
-void operator delete[](void *ptr, std::align_val_t align, std::nothrow_t const&)
-{ OPERATOR_DELETE_BODY_ALIGN(FromNewArray); }
-CXX_OPERATOR_ATTRIBUTE
-void operator delete(void *ptr, size_t size, std::align_val_t align) NOEXCEPT
-{ OPERATOR_DELETE_BODY_SIZE_ALIGN(FromNew); }
-CXX_OPERATOR_ATTRIBUTE
-void operator delete[](void *ptr, size_t size, std::align_val_t align) NOEXCEPT
-{ OPERATOR_DELETE_BODY_SIZE_ALIGN(FromNewArray); }
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/scudo_platform.h b/contrib/llvm-project/compiler-rt/lib/scudo/scudo_platform.h
deleted file mode 100644
index 07d4b70fc8e9..000000000000
--- a/contrib/llvm-project/compiler-rt/lib/scudo/scudo_platform.h
+++ /dev/null
@@ -1,93 +0,0 @@
-//===-- scudo_platform.h ----------------------------------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-///
-/// Scudo platform specific definitions.
-/// TODO(kostyak): add tests for the compile time defines.
-///
-//===----------------------------------------------------------------------===//
-
-#ifndef SCUDO_PLATFORM_H_
-#define SCUDO_PLATFORM_H_
-
-#include "sanitizer_common/sanitizer_allocator.h"
-
-#if !SANITIZER_LINUX && !SANITIZER_FUCHSIA
-# error "The Scudo hardened allocator is not supported on this platform."
-#endif
-
-#define SCUDO_TSD_EXCLUSIVE_SUPPORTED (!SANITIZER_ANDROID && !SANITIZER_FUCHSIA)
-
-#ifndef SCUDO_TSD_EXCLUSIVE
-// SCUDO_TSD_EXCLUSIVE wasn't defined, use a default TSD model for the platform.
-# if SANITIZER_ANDROID || SANITIZER_FUCHSIA
-// Android and Fuchsia use a pool of TSDs shared between threads.
-# define SCUDO_TSD_EXCLUSIVE 0
-# elif SANITIZER_LINUX && !SANITIZER_ANDROID
-// Non-Android Linux use an exclusive TSD per thread.
-# define SCUDO_TSD_EXCLUSIVE 1
-# else
-# error "No default TSD model defined for this platform."
-# endif // SANITIZER_ANDROID || SANITIZER_FUCHSIA
-#endif // SCUDO_TSD_EXCLUSIVE
-
-// If the exclusive TSD model is chosen, make sure the platform supports it.
-#if SCUDO_TSD_EXCLUSIVE && !SCUDO_TSD_EXCLUSIVE_SUPPORTED
-# error "The exclusive TSD model is not supported on this platform."
-#endif
-
-// Maximum number of TSDs that can be created for the Shared model.
-#ifndef SCUDO_SHARED_TSD_POOL_SIZE
-# if SANITIZER_ANDROID
-# define SCUDO_SHARED_TSD_POOL_SIZE 2U
-# else
-# define SCUDO_SHARED_TSD_POOL_SIZE 32U
-# endif // SANITIZER_ANDROID
-#endif // SCUDO_SHARED_TSD_POOL_SIZE
-
-// The following allows the public interface functions to be disabled.
-#ifndef SCUDO_CAN_USE_PUBLIC_INTERFACE
-# define SCUDO_CAN_USE_PUBLIC_INTERFACE 1
-#endif
-
-// Hooks in the allocation & deallocation paths can become a security concern if
-// implemented improperly, or if overwritten by an attacker. Use with caution.
-#ifndef SCUDO_CAN_USE_HOOKS
-# if SANITIZER_FUCHSIA
-# define SCUDO_CAN_USE_HOOKS 1
-# else
-# define SCUDO_CAN_USE_HOOKS 0
-# endif // SANITIZER_FUCHSIA
-#endif // SCUDO_CAN_USE_HOOKS
-
-namespace __scudo {
-
-#if SANITIZER_CAN_USE_ALLOCATOR64
-# if defined(__aarch64__) && SANITIZER_ANDROID
-const uptr AllocatorSize = 0x4000000000ULL; // 256G.
-# elif defined(__aarch64__)
-const uptr AllocatorSize = 0x10000000000ULL; // 1T.
-# else
-const uptr AllocatorSize = 0x40000000000ULL; // 4T.
-# endif
-#else
-const uptr RegionSizeLog = SANITIZER_ANDROID ? 19 : 20;
-#endif // SANITIZER_CAN_USE_ALLOCATOR64
-
-#if !defined(SCUDO_SIZE_CLASS_MAP)
-# define SCUDO_SIZE_CLASS_MAP Dense
-#endif
-
-#define SIZE_CLASS_MAP_TYPE SIZE_CLASS_MAP_TYPE_(SCUDO_SIZE_CLASS_MAP)
-#define SIZE_CLASS_MAP_TYPE_(T) SIZE_CLASS_MAP_TYPE__(T)
-#define SIZE_CLASS_MAP_TYPE__(T) T##SizeClassMap
-
-typedef SIZE_CLASS_MAP_TYPE SizeClassMap;
-
-} // namespace __scudo
-
-#endif // SCUDO_PLATFORM_H_
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/scudo_termination.cpp b/contrib/llvm-project/compiler-rt/lib/scudo/scudo_termination.cpp
deleted file mode 100644
index 5f1337efaca0..000000000000
--- a/contrib/llvm-project/compiler-rt/lib/scudo/scudo_termination.cpp
+++ /dev/null
@@ -1,41 +0,0 @@
-//===-- scudo_termination.cpp -----------------------------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-///
-/// This file contains bare-bones termination functions to replace the
-/// __sanitizer ones, in order to avoid any potential abuse of the callbacks
-/// functionality.
-///
-//===----------------------------------------------------------------------===//
-
-#include "scudo_utils.h"
-
-#include "sanitizer_common/sanitizer_common.h"
-
-namespace __sanitizer {
-
-bool AddDieCallback(DieCallbackType Callback) { return true; }
-
-bool RemoveDieCallback(DieCallbackType Callback) { return true; }
-
-void SetUserDieCallback(DieCallbackType Callback) {}
-
-void NORETURN Die() {
- if (common_flags()->abort_on_error)
- Abort();
- internal__exit(common_flags()->exitcode);
-}
-
-void SetCheckUnwindCallback(void (*callback)()) {}
-
-void NORETURN CheckFailed(const char *File, int Line, const char *Condition,
- u64 Value1, u64 Value2) {
- __scudo::dieWithMessage("CHECK failed at %s:%d %s (%lld, %lld)\n",
- File, Line, Condition, Value1, Value2);
-}
-
-} // namespace __sanitizer
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/scudo_tsd.h b/contrib/llvm-project/compiler-rt/lib/scudo/scudo_tsd.h
deleted file mode 100644
index e1310974db45..000000000000
--- a/contrib/llvm-project/compiler-rt/lib/scudo/scudo_tsd.h
+++ /dev/null
@@ -1,65 +0,0 @@
-//===-- scudo_tsd.h ---------------------------------------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-///
-/// Scudo thread specific data definition.
-/// Implementation will differ based on the thread local storage primitives
-/// offered by the underlying platform.
-///
-//===----------------------------------------------------------------------===//
-
-#ifndef SCUDO_TSD_H_
-#define SCUDO_TSD_H_
-
-#include "scudo_allocator.h"
-#include "scudo_utils.h"
-
-#include <pthread.h>
-
-namespace __scudo {
-
-struct ALIGNED(SANITIZER_CACHE_LINE_SIZE) ScudoTSD {
- AllocatorCacheT Cache;
- uptr QuarantineCachePlaceHolder[4];
-
- void init();
- void commitBack();
-
- inline bool tryLock() TRY_ACQUIRE(true, Mutex) {
- if (Mutex.TryLock()) {
- atomic_store_relaxed(&Precedence, 0);
- return true;
- }
- if (atomic_load_relaxed(&Precedence) == 0)
- atomic_store_relaxed(&Precedence, static_cast<uptr>(
- MonotonicNanoTime() >> FIRST_32_SECOND_64(16, 0)));
- return false;
- }
-
- inline void lock() ACQUIRE(Mutex) {
- atomic_store_relaxed(&Precedence, 0);
- Mutex.Lock();
- }
-
- inline void unlock() RELEASE(Mutex) { Mutex.Unlock(); }
-
- inline uptr getPrecedence() { return atomic_load_relaxed(&Precedence); }
-
- private:
- StaticSpinMutex Mutex;
- atomic_uintptr_t Precedence;
-};
-
-void initThread(bool MinimalInit);
-
-// TSD model specific fastpath functions definitions.
-#include "scudo_tsd_exclusive.inc"
-#include "scudo_tsd_shared.inc"
-
-} // namespace __scudo
-
-#endif // SCUDO_TSD_H_
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/scudo_tsd_exclusive.cpp b/contrib/llvm-project/compiler-rt/lib/scudo/scudo_tsd_exclusive.cpp
deleted file mode 100644
index a203a74bbcf8..000000000000
--- a/contrib/llvm-project/compiler-rt/lib/scudo/scudo_tsd_exclusive.cpp
+++ /dev/null
@@ -1,67 +0,0 @@
-//===-- scudo_tsd_exclusive.cpp ---------------------------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-///
-/// Scudo exclusive TSD implementation.
-///
-//===----------------------------------------------------------------------===//
-
-#include "scudo_tsd.h"
-
-#if SCUDO_TSD_EXCLUSIVE
-
-namespace __scudo {
-
-static pthread_once_t GlobalInitialized = PTHREAD_ONCE_INIT;
-static pthread_key_t PThreadKey;
-
-__attribute__((tls_model("initial-exec")))
-THREADLOCAL ThreadState ScudoThreadState = ThreadNotInitialized;
-__attribute__((tls_model("initial-exec")))
-THREADLOCAL ScudoTSD TSD;
-
-// Fallback TSD for when the thread isn't initialized yet or is torn down. It
-// can be shared between multiple threads and as such must be locked.
-ScudoTSD FallbackTSD;
-
-static void teardownThread(void *Ptr) {
- uptr I = reinterpret_cast<uptr>(Ptr);
- // The glibc POSIX thread-local-storage deallocation routine calls user
- // provided destructors in a loop of PTHREAD_DESTRUCTOR_ITERATIONS.
- // We want to be called last since other destructors might call free and the
- // like, so we wait until PTHREAD_DESTRUCTOR_ITERATIONS before draining the
- // quarantine and swallowing the cache.
- if (I > 1) {
- // If pthread_setspecific fails, we will go ahead with the teardown.
- if (LIKELY(pthread_setspecific(PThreadKey,
- reinterpret_cast<void *>(I - 1)) == 0))
- return;
- }
- TSD.commitBack();
- ScudoThreadState = ThreadTornDown;
-}
-
-
-static void initOnce() {
- CHECK_EQ(pthread_key_create(&PThreadKey, teardownThread), 0);
- initScudo();
- FallbackTSD.init();
-}
-
-void initThread(bool MinimalInit) {
- CHECK_EQ(pthread_once(&GlobalInitialized, initOnce), 0);
- if (UNLIKELY(MinimalInit))
- return;
- CHECK_EQ(pthread_setspecific(PThreadKey, reinterpret_cast<void *>(
- GetPthreadDestructorIterations())), 0);
- TSD.init();
- ScudoThreadState = ThreadInitialized;
-}
-
-} // namespace __scudo
-
-#endif // SCUDO_TSD_EXCLUSIVE
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/scudo_tsd_exclusive.inc b/contrib/llvm-project/compiler-rt/lib/scudo/scudo_tsd_exclusive.inc
deleted file mode 100644
index 29db8a2eff1a..000000000000
--- a/contrib/llvm-project/compiler-rt/lib/scudo/scudo_tsd_exclusive.inc
+++ /dev/null
@@ -1,47 +0,0 @@
-//===-- scudo_tsd_exclusive.inc ---------------------------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-///
-/// Scudo exclusive TSD fastpath functions implementation.
-///
-//===----------------------------------------------------------------------===//
-
-#ifndef SCUDO_TSD_H_
-#error "This file must be included inside scudo_tsd.h."
-#endif // SCUDO_TSD_H_
-
-#if SCUDO_TSD_EXCLUSIVE
-
-enum ThreadState : u8 {
- ThreadNotInitialized = 0,
- ThreadInitialized,
- ThreadTornDown,
-};
-__attribute__((
- tls_model("initial-exec"))) extern THREADLOCAL ThreadState ScudoThreadState;
-__attribute__((tls_model("initial-exec"))) extern THREADLOCAL ScudoTSD TSD;
-
-extern ScudoTSD FallbackTSD;
-
-ALWAYS_INLINE void initThreadMaybe(bool MinimalInit = false) {
- if (LIKELY(ScudoThreadState != ThreadNotInitialized))
- return;
- initThread(MinimalInit);
-}
-
-ALWAYS_INLINE ScudoTSD *
-getTSDAndLock(bool *UnlockRequired) NO_THREAD_SAFETY_ANALYSIS {
- if (UNLIKELY(ScudoThreadState != ThreadInitialized)) {
- FallbackTSD.lock();
- *UnlockRequired = true;
- return &FallbackTSD;
- }
- *UnlockRequired = false;
- return &TSD;
-}
-
-#endif // SCUDO_TSD_EXCLUSIVE
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/scudo_tsd_shared.cpp b/contrib/llvm-project/compiler-rt/lib/scudo/scudo_tsd_shared.cpp
deleted file mode 100644
index fd85a7c4017f..000000000000
--- a/contrib/llvm-project/compiler-rt/lib/scudo/scudo_tsd_shared.cpp
+++ /dev/null
@@ -1,107 +0,0 @@
-//===-- scudo_tsd_shared.cpp ------------------------------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-///
-/// Scudo shared TSD implementation.
-///
-//===----------------------------------------------------------------------===//
-
-#include "scudo_tsd.h"
-
-#if !SCUDO_TSD_EXCLUSIVE
-
-namespace __scudo {
-
-static pthread_once_t GlobalInitialized = PTHREAD_ONCE_INIT;
-pthread_key_t PThreadKey;
-
-static atomic_uint32_t CurrentIndex;
-static ScudoTSD *TSDs;
-static u32 NumberOfTSDs;
-static u32 CoPrimes[SCUDO_SHARED_TSD_POOL_SIZE];
-static u32 NumberOfCoPrimes = 0;
-
-#if SANITIZER_LINUX && !SANITIZER_ANDROID
-__attribute__((tls_model("initial-exec")))
-THREADLOCAL ScudoTSD *CurrentTSD;
-#endif
-
-static void initOnce() {
- CHECK_EQ(pthread_key_create(&PThreadKey, NULL), 0);
- initScudo();
- NumberOfTSDs = Min(Max(1U, GetNumberOfCPUsCached()),
- static_cast<u32>(SCUDO_SHARED_TSD_POOL_SIZE));
- TSDs = reinterpret_cast<ScudoTSD *>(
- MmapOrDie(sizeof(ScudoTSD) * NumberOfTSDs, "ScudoTSDs"));
- for (u32 I = 0; I < NumberOfTSDs; I++) {
- TSDs[I].init();
- u32 A = I + 1;
- u32 B = NumberOfTSDs;
- while (B != 0) { const u32 T = A; A = B; B = T % B; }
- if (A == 1)
- CoPrimes[NumberOfCoPrimes++] = I + 1;
- }
-}
-
-ALWAYS_INLINE void setCurrentTSD(ScudoTSD *TSD) {
-#if SANITIZER_ANDROID
- *get_android_tls_ptr() = reinterpret_cast<uptr>(TSD);
-#elif SANITIZER_LINUX
- CurrentTSD = TSD;
-#else
- CHECK_EQ(pthread_setspecific(PThreadKey, reinterpret_cast<void *>(TSD)), 0);
-#endif // SANITIZER_ANDROID
-}
-
-void initThread(bool MinimalInit) {
- pthread_once(&GlobalInitialized, initOnce);
- // Initial context assignment is done in a plain round-robin fashion.
- u32 Index = atomic_fetch_add(&CurrentIndex, 1, memory_order_relaxed);
- setCurrentTSD(&TSDs[Index % NumberOfTSDs]);
-}
-
-ScudoTSD *getTSDAndLockSlow(ScudoTSD *TSD) NO_THREAD_SAFETY_ANALYSIS {
- if (NumberOfTSDs > 1) {
- // Use the Precedence of the current TSD as our random seed. Since we are in
- // the slow path, it means that tryLock failed, and as a result it's very
- // likely that said Precedence is non-zero.
- u32 RandState = static_cast<u32>(TSD->getPrecedence());
- const u32 R = Rand(&RandState);
- const u32 Inc = CoPrimes[R % NumberOfCoPrimes];
- u32 Index = R % NumberOfTSDs;
- uptr LowestPrecedence = UINTPTR_MAX;
- ScudoTSD *CandidateTSD = nullptr;
- // Go randomly through at most 4 contexts and find a candidate.
- for (u32 I = 0; I < Min(4U, NumberOfTSDs); I++) {
- if (TSDs[Index].tryLock()) {
- setCurrentTSD(&TSDs[Index]);
- return &TSDs[Index];
- }
- const uptr Precedence = TSDs[Index].getPrecedence();
- // A 0 precedence here means another thread just locked this TSD.
- if (Precedence && Precedence < LowestPrecedence) {
- CandidateTSD = &TSDs[Index];
- LowestPrecedence = Precedence;
- }
- Index += Inc;
- if (Index >= NumberOfTSDs)
- Index -= NumberOfTSDs;
- }
- if (CandidateTSD) {
- CandidateTSD->lock();
- setCurrentTSD(CandidateTSD);
- return CandidateTSD;
- }
- }
- // Last resort, stick with the current one.
- TSD->lock();
- return TSD;
-}
-
-} // namespace __scudo
-
-#endif // !SCUDO_TSD_EXCLUSIVE
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/scudo_tsd_shared.inc b/contrib/llvm-project/compiler-rt/lib/scudo/scudo_tsd_shared.inc
deleted file mode 100644
index e46b044a81f8..000000000000
--- a/contrib/llvm-project/compiler-rt/lib/scudo/scudo_tsd_shared.inc
+++ /dev/null
@@ -1,56 +0,0 @@
-//===-- scudo_tsd_shared.inc ------------------------------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-///
-/// Scudo shared TSD fastpath functions implementation.
-///
-//===----------------------------------------------------------------------===//
-
-#ifndef SCUDO_TSD_H_
-# error "This file must be included inside scudo_tsd.h."
-#endif // SCUDO_TSD_H_
-
-#if !SCUDO_TSD_EXCLUSIVE
-
-extern pthread_key_t PThreadKey;
-
-#if SANITIZER_LINUX && !SANITIZER_ANDROID
-__attribute__((tls_model("initial-exec")))
-extern THREADLOCAL ScudoTSD *CurrentTSD;
-#endif
-
-ALWAYS_INLINE ScudoTSD* getCurrentTSD() {
-#if SANITIZER_ANDROID
- return reinterpret_cast<ScudoTSD *>(*get_android_tls_ptr());
-#elif SANITIZER_LINUX
- return CurrentTSD;
-#else
- return reinterpret_cast<ScudoTSD *>(pthread_getspecific(PThreadKey));
-#endif // SANITIZER_ANDROID
-}
-
-ALWAYS_INLINE void initThreadMaybe(bool MinimalInit = false) {
- if (LIKELY(getCurrentTSD()))
- return;
- initThread(MinimalInit);
-}
-
-ScudoTSD *getTSDAndLockSlow(ScudoTSD *TSD);
-
-ALWAYS_INLINE ScudoTSD *
-getTSDAndLock(bool *UnlockRequired) NO_THREAD_SAFETY_ANALYSIS {
- ScudoTSD *TSD = getCurrentTSD();
- DCHECK(TSD && "No TSD associated with the current thread!");
- *UnlockRequired = true;
- // Try to lock the currently associated context.
- if (TSD->tryLock())
- return TSD;
- // If it failed, go the slow path.
- return getTSDAndLockSlow(TSD);
-}
-
-#endif // !SCUDO_TSD_EXCLUSIVE
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/scudo_utils.cpp b/contrib/llvm-project/compiler-rt/lib/scudo/scudo_utils.cpp
deleted file mode 100644
index b7ce8f915817..000000000000
--- a/contrib/llvm-project/compiler-rt/lib/scudo/scudo_utils.cpp
+++ /dev/null
@@ -1,145 +0,0 @@
-//===-- scudo_utils.cpp -----------------------------------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-///
-/// Platform specific utility functions.
-///
-//===----------------------------------------------------------------------===//
-
-#include "scudo_utils.h"
-
-#if defined(__x86_64__) || defined(__i386__)
-# include <cpuid.h>
-#elif defined(__arm__) || defined(__aarch64__)
-# include "sanitizer_common/sanitizer_getauxval.h"
-# if SANITIZER_FUCHSIA
-# include <zircon/syscalls.h>
-# include <zircon/features.h>
-# elif SANITIZER_POSIX
-# include "sanitizer_common/sanitizer_posix.h"
-# include <fcntl.h>
-# endif
-#endif
-
-#include <stdarg.h>
-
-// TODO(kostyak): remove __sanitizer *Printf uses in favor for our own less
-// complicated string formatting code. The following is a
-// temporary workaround to be able to use __sanitizer::VSNPrintf.
-namespace __sanitizer {
-
-extern int VSNPrintf(char *buff, int buff_length, const char *format,
- va_list args);
-
-} // namespace __sanitizer
-
-namespace __scudo {
-
-FORMAT(1, 2) void NORETURN dieWithMessage(const char *Format, ...) {
- static const char ScudoError[] = "Scudo ERROR: ";
- static constexpr uptr PrefixSize = sizeof(ScudoError) - 1;
- // Our messages are tiny, 256 characters is more than enough.
- char Message[256];
- va_list Args;
- va_start(Args, Format);
- internal_memcpy(Message, ScudoError, PrefixSize);
- VSNPrintf(Message + PrefixSize, sizeof(Message) - PrefixSize, Format, Args);
- va_end(Args);
- LogMessageOnPrintf(Message);
- if (common_flags()->abort_on_error)
- SetAbortMessage(Message);
- RawWrite(Message);
- Die();
-}
-
-#if defined(__x86_64__) || defined(__i386__)
-// i386 and x86_64 specific code to detect CRC32 hardware support via CPUID.
-// CRC32 requires the SSE 4.2 instruction set.
-# ifndef bit_SSE4_2
-# define bit_SSE4_2 bit_SSE42 // clang and gcc have different defines.
-# endif
-
-#ifndef signature_HYGON_ebx // They are not defined in gcc.
-// HYGON: "HygonGenuine".
-#define signature_HYGON_ebx 0x6f677948
-#define signature_HYGON_edx 0x6e65476e
-#define signature_HYGON_ecx 0x656e6975
-#endif
-
-bool hasHardwareCRC32() {
- u32 Eax, Ebx, Ecx, Edx;
- __get_cpuid(0, &Eax, &Ebx, &Ecx, &Edx);
- const bool IsIntel = (Ebx == signature_INTEL_ebx) &&
- (Edx == signature_INTEL_edx) &&
- (Ecx == signature_INTEL_ecx);
- const bool IsAMD = (Ebx == signature_AMD_ebx) &&
- (Edx == signature_AMD_edx) &&
- (Ecx == signature_AMD_ecx);
- const bool IsHygon = (Ebx == signature_HYGON_ebx) &&
- (Edx == signature_HYGON_edx) &&
- (Ecx == signature_HYGON_ecx);
- if (!IsIntel && !IsAMD && !IsHygon)
- return false;
- __get_cpuid(1, &Eax, &Ebx, &Ecx, &Edx);
- return !!(Ecx & bit_SSE4_2);
-}
-#elif defined(__arm__) || defined(__aarch64__)
-// For ARM and AArch64, hardware CRC32 support is indicated in the AT_HWCAP
-// auxiliary vector.
-# ifndef AT_HWCAP
-# define AT_HWCAP 16
-# endif
-# ifndef HWCAP_CRC32
-# define HWCAP_CRC32 (1 << 7) // HWCAP_CRC32 is missing on older platforms.
-# endif
-# if SANITIZER_POSIX
-bool hasHardwareCRC32ARMPosix() {
- uptr F = internal_open("/proc/self/auxv", O_RDONLY);
- if (internal_iserror(F))
- return false;
- struct { uptr Tag; uptr Value; } Entry = { 0, 0 };
- for (;;) {
- uptr N = internal_read(F, &Entry, sizeof(Entry));
- if (internal_iserror(N) || N != sizeof(Entry) ||
- (Entry.Tag == 0 && Entry.Value == 0) || Entry.Tag == AT_HWCAP)
- break;
- }
- internal_close(F);
- return (Entry.Tag == AT_HWCAP && (Entry.Value & HWCAP_CRC32) != 0);
-}
-# else
-bool hasHardwareCRC32ARMPosix() { return false; }
-# endif // SANITIZER_POSIX
-
-// Bionic doesn't initialize its globals early enough. This causes issues when
-// trying to access them from a preinit_array (b/25751302) or from another
-// constructor called before the libc one (b/68046352). __progname is
-// initialized after the other globals, so we can check its value to know if
-// calling getauxval is safe.
-extern "C" SANITIZER_WEAK_ATTRIBUTE char *__progname;
-inline bool areBionicGlobalsInitialized() {
- return !SANITIZER_ANDROID || (&__progname && __progname);
-}
-
-bool hasHardwareCRC32() {
-#if SANITIZER_FUCHSIA
- u32 HWCap;
- zx_status_t Status = zx_system_get_features(ZX_FEATURE_KIND_CPU, &HWCap);
- if (Status != ZX_OK || (HWCap & ZX_ARM64_FEATURE_ISA_CRC32) == 0)
- return false;
- return true;
-#else
- if (&getauxval && areBionicGlobalsInitialized())
- return !!(getauxval(AT_HWCAP) & HWCAP_CRC32);
- return hasHardwareCRC32ARMPosix();
-#endif // SANITIZER_FUCHSIA
-}
-#else
-bool hasHardwareCRC32() { return false; }
-#endif // defined(__x86_64__) || defined(__i386__)
-
-} // namespace __scudo
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/scudo_utils.h b/contrib/llvm-project/compiler-rt/lib/scudo/scudo_utils.h
deleted file mode 100644
index b657c69d9baf..000000000000
--- a/contrib/llvm-project/compiler-rt/lib/scudo/scudo_utils.h
+++ /dev/null
@@ -1,36 +0,0 @@
-//===-- scudo_utils.h -------------------------------------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-///
-/// Header for scudo_utils.cpp.
-///
-//===----------------------------------------------------------------------===//
-
-#ifndef SCUDO_UTILS_H_
-#define SCUDO_UTILS_H_
-
-#include "sanitizer_common/sanitizer_common.h"
-
-#include <string.h>
-
-namespace __scudo {
-
-template <class Dest, class Source>
-inline Dest bit_cast(const Source& source) {
- static_assert(sizeof(Dest) == sizeof(Source), "Sizes are not equal!");
- Dest dest;
- memcpy(&dest, &source, sizeof(dest));
- return dest;
-}
-
-void NORETURN dieWithMessage(const char *Format, ...);
-
-bool hasHardwareCRC32();
-
-} // namespace __scudo
-
-#endif // SCUDO_UTILS_H_
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/allocator_common.h b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/allocator_common.h
new file mode 100644
index 000000000000..95f4776ac596
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/allocator_common.h
@@ -0,0 +1,85 @@
+//===-- allocator_common.h --------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_ALLOCATOR_COMMON_H_
+#define SCUDO_ALLOCATOR_COMMON_H_
+
+#include "common.h"
+#include "list.h"
+
+namespace scudo {
+
+template <class SizeClassAllocator> struct TransferBatch {
+ typedef typename SizeClassAllocator::SizeClassMap SizeClassMap;
+ typedef typename SizeClassAllocator::CompactPtrT CompactPtrT;
+
+ static const u16 MaxNumCached = SizeClassMap::MaxNumCachedHint;
+ void setFromArray(CompactPtrT *Array, u16 N) {
+ DCHECK_LE(N, MaxNumCached);
+ Count = N;
+ memcpy(Batch, Array, sizeof(Batch[0]) * Count);
+ }
+ void appendFromArray(CompactPtrT *Array, u16 N) {
+ DCHECK_LE(N, MaxNumCached - Count);
+ memcpy(Batch + Count, Array, sizeof(Batch[0]) * N);
+ // u16 will be promoted to int by arithmetic type conversion.
+ Count = static_cast<u16>(Count + N);
+ }
+ void appendFromTransferBatch(TransferBatch *B, u16 N) {
+ DCHECK_LE(N, MaxNumCached - Count);
+ DCHECK_GE(B->Count, N);
+ // Append from the back of `B`.
+ memcpy(Batch + Count, B->Batch + (B->Count - N), sizeof(Batch[0]) * N);
+ // u16 will be promoted to int by arithmetic type conversion.
+ Count = static_cast<u16>(Count + N);
+ B->Count = static_cast<u16>(B->Count - N);
+ }
+ void clear() { Count = 0; }
+ void add(CompactPtrT P) {
+ DCHECK_LT(Count, MaxNumCached);
+ Batch[Count++] = P;
+ }
+ void moveToArray(CompactPtrT *Array) {
+ memcpy(Array, Batch, sizeof(Batch[0]) * Count);
+ clear();
+ }
+ u16 getCount() const { return Count; }
+ bool isEmpty() const { return Count == 0U; }
+ CompactPtrT get(u16 I) const {
+ DCHECK_LE(I, Count);
+ return Batch[I];
+ }
+ TransferBatch *Next;
+
+private:
+ CompactPtrT Batch[MaxNumCached];
+ u16 Count;
+};
+
+// A BatchGroup is used to collect blocks. Each group has a group id to
+// identify the group kind of contained blocks.
+template <class SizeClassAllocator> struct BatchGroup {
+ // `Next` is used by IntrusiveList.
+ BatchGroup *Next;
+ // The compact base address of each group
+ uptr CompactPtrGroupBase;
+ // Cache value of SizeClassAllocatorLocalCache::getMaxCached()
+ u16 MaxCachedPerBatch;
+ // Number of blocks pushed into this group. This is an increment-only
+ // counter.
+ uptr PushedBlocks;
+ // This is used to track how many bytes are not in-use since last time we
+ // tried to release pages.
+ uptr BytesInBGAtLastCheckpoint;
+ // Blocks are managed by TransferBatch in a list.
+ SinglyLinkedList<TransferBatch<SizeClassAllocator>> Batches;
+};
+
+} // namespace scudo
+
+#endif // SCUDO_ALLOCATOR_COMMON_H_
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/allocator_config.h b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/allocator_config.h
index e6f46b511dbf..3c6aa3acb0e4 100644
--- a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/allocator_config.h
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/allocator_config.h
@@ -11,6 +11,7 @@
#include "combined.h"
#include "common.h"
+#include "condition_variable.h"
#include "flags.h"
#include "primary32.h"
#include "primary64.h"
@@ -19,6 +20,22 @@
#include "tsd_exclusive.h"
#include "tsd_shared.h"
+// To import a custom configuration, define `SCUDO_USE_CUSTOM_CONFIG` and
+// aliasing the `Config` like:
+//
+// namespace scudo {
+// // The instance of Scudo will be initiated with `Config`.
+// typedef CustomConfig Config;
+// // Aliasing as default configuration to run the tests with this config.
+// typedef CustomConfig DefaultConfig;
+// } // namespace scudo
+//
+// Put them in the header `custom_scudo_config.h` then you will be using the
+// custom configuration and able to run all the tests as well.
+#ifdef SCUDO_USE_CUSTOM_CONFIG
+#include "custom_scudo_config.h"
+#endif
+
namespace scudo {
// The combined allocator uses a structure as a template argument that
@@ -26,169 +43,226 @@ namespace scudo {
// allocator.
//
// struct ExampleConfig {
-// // SizeClasMmap to use with the Primary.
-// using SizeClassMap = DefaultSizeClassMap;
// // Indicates possible support for Memory Tagging.
// static const bool MaySupportMemoryTagging = false;
-// // Defines the Primary allocator to use.
-// typedef SizeClassAllocator64<ExampleConfig> Primary;
-// // Log2 of the size of a size class region, as used by the Primary.
-// static const uptr PrimaryRegionSizeLog = 30U;
-// // Defines the type and scale of a compact pointer. A compact pointer can
-// // be understood as the offset of a pointer within the region it belongs
-// // to, in increments of a power-of-2 scale.
-// // eg: Ptr = Base + (CompactPtr << Scale).
-// typedef u32 PrimaryCompactPtrT;
-// static const uptr PrimaryCompactPtrScale = SCUDO_MIN_ALIGNMENT_LOG;
-// // Indicates support for offsetting the start of a region by
-// // a random number of pages. Only used with primary64.
-// static const bool PrimaryEnableRandomOffset = true;
-// // Call map for user memory with at least this size. Only used with
-// // primary64.
-// static const uptr PrimaryMapSizeIncrement = 1UL << 18;
-// // Defines the minimal & maximal release interval that can be set.
-// static const s32 PrimaryMinReleaseToOsIntervalMs = INT32_MIN;
-// static const s32 PrimaryMaxReleaseToOsIntervalMs = INT32_MAX;
-// // Defines the type of cache used by the Secondary. Some additional
-// // configuration entries can be necessary depending on the Cache.
-// typedef MapAllocatorNoCache SecondaryCache;
+//
// // Thread-Specific Data Registry used, shared or exclusive.
// template <class A> using TSDRegistryT = TSDRegistrySharedT<A, 8U, 4U>;
+//
+// struct Primary {
+// // SizeClassMap to use with the Primary.
+// using SizeClassMap = DefaultSizeClassMap;
+//
+// // Log2 of the size of a size class region, as used by the Primary.
+// static const uptr RegionSizeLog = 30U;
+//
+// // Log2 of the size of block group, as used by the Primary. Each group
+// // contains a range of memory addresses, blocks in the range will belong
+// // to the same group. In general, single region may have 1 or 2MB group
+// // size. Multiple regions will have the group size equal to the region
+// // size because the region size is usually smaller than 1 MB.
+// // Smaller value gives fine-grained control of memory usage but the
+// // trade-off is that it may take longer time of deallocation.
+// static const uptr GroupSizeLog = 20U;
+//
+// // Defines the type and scale of a compact pointer. A compact pointer can
+// // be understood as the offset of a pointer within the region it belongs
+// // to, in increments of a power-of-2 scale.
+// // eg: Ptr = Base + (CompactPtr << Scale).
+// typedef u32 CompactPtrT;
+// static const uptr CompactPtrScale = SCUDO_MIN_ALIGNMENT_LOG;
+//
+// // Indicates support for offsetting the start of a region by
+// // a random number of pages. Only used with primary64.
+// static const bool EnableRandomOffset = true;
+//
+// // Call map for user memory with at least this size. Only used with
+// // primary64.
+// static const uptr MapSizeIncrement = 1UL << 18;
+//
+// // Defines the minimal & maximal release interval that can be set.
+// static const s32 MinReleaseToOsIntervalMs = INT32_MIN;
+// static const s32 MaxReleaseToOsIntervalMs = INT32_MAX;
+//
+// // Use condition variable to shorten the waiting time of refillment of
+// // freelist. Note that this depends on the implementation of condition
+// // variable on each platform and the performance may vary so that it
+// // doesn't guarantee a performance benefit.
+// // Note that both variables have to be defined to enable it.
+// static const bool UseConditionVariable = true;
+// using ConditionVariableT = ConditionVariableLinux;
+// };
+// // Defines the type of Primary allocator to use.
+// template <typename Config> using PrimaryT = SizeClassAllocator64<Config>;
+//
+// // Defines the type of cache used by the Secondary. Some additional
+// // configuration entries can be necessary depending on the Cache.
+// struct Secondary {
+// struct Cache {
+// static const u32 EntriesArraySize = 32U;
+// static const u32 QuarantineSize = 0U;
+// static const u32 DefaultMaxEntriesCount = 32U;
+// static const uptr DefaultMaxEntrySize = 1UL << 19;
+// static const s32 MinReleaseToOsIntervalMs = INT32_MIN;
+// static const s32 MaxReleaseToOsIntervalMs = INT32_MAX;
+// };
+// // Defines the type of Secondary Cache to use.
+// template <typename Config> using CacheT = MapAllocatorCache<Config>;
+// };
+// // Defines the type of Secondary allocator to use.
+// template <typename Config> using SecondaryT = MapAllocator<Config>;
// };
-// Default configurations for various platforms.
+#ifndef SCUDO_USE_CUSTOM_CONFIG
+// Default configurations for various platforms. Note this is only enabled when
+// there's no custom configuration in the build system.
struct DefaultConfig {
- using SizeClassMap = DefaultSizeClassMap;
static const bool MaySupportMemoryTagging = true;
+ template <class A> using TSDRegistryT = TSDRegistryExT<A>; // Exclusive
+ struct Primary {
+ using SizeClassMap = DefaultSizeClassMap;
#if SCUDO_CAN_USE_PRIMARY64
- typedef SizeClassAllocator64<DefaultConfig> Primary;
- static const uptr PrimaryRegionSizeLog = 32U;
- typedef uptr PrimaryCompactPtrT;
- static const uptr PrimaryCompactPtrScale = 0;
- static const bool PrimaryEnableRandomOffset = true;
- static const uptr PrimaryMapSizeIncrement = 1UL << 18;
+ static const uptr RegionSizeLog = 32U;
+ static const uptr GroupSizeLog = 21U;
+ typedef uptr CompactPtrT;
+ static const uptr CompactPtrScale = 0;
+ static const bool EnableRandomOffset = true;
+ static const uptr MapSizeIncrement = 1UL << 18;
#else
- typedef SizeClassAllocator32<DefaultConfig> Primary;
- static const uptr PrimaryRegionSizeLog = 19U;
- typedef uptr PrimaryCompactPtrT;
+ static const uptr RegionSizeLog = 19U;
+ static const uptr GroupSizeLog = 19U;
+ typedef uptr CompactPtrT;
+#endif
+ static const s32 MinReleaseToOsIntervalMs = INT32_MIN;
+ static const s32 MaxReleaseToOsIntervalMs = INT32_MAX;
+ };
+#if SCUDO_CAN_USE_PRIMARY64
+ template <typename Config> using PrimaryT = SizeClassAllocator64<Config>;
+#else
+ template <typename Config> using PrimaryT = SizeClassAllocator32<Config>;
#endif
- static const s32 PrimaryMinReleaseToOsIntervalMs = INT32_MIN;
- static const s32 PrimaryMaxReleaseToOsIntervalMs = INT32_MAX;
- typedef MapAllocatorCache<DefaultConfig> SecondaryCache;
- static const u32 SecondaryCacheEntriesArraySize = 32U;
- static const u32 SecondaryCacheQuarantineSize = 0U;
- static const u32 SecondaryCacheDefaultMaxEntriesCount = 32U;
- static const uptr SecondaryCacheDefaultMaxEntrySize = 1UL << 19;
- static const s32 SecondaryCacheMinReleaseToOsIntervalMs = INT32_MIN;
- static const s32 SecondaryCacheMaxReleaseToOsIntervalMs = INT32_MAX;
+ struct Secondary {
+ struct Cache {
+ static const u32 EntriesArraySize = 32U;
+ static const u32 QuarantineSize = 0U;
+ static const u32 DefaultMaxEntriesCount = 32U;
+ static const uptr DefaultMaxEntrySize = 1UL << 19;
+ static const s32 MinReleaseToOsIntervalMs = INT32_MIN;
+ static const s32 MaxReleaseToOsIntervalMs = INT32_MAX;
+ };
+ template <typename Config> using CacheT = MapAllocatorCache<Config>;
+ };
- template <class A> using TSDRegistryT = TSDRegistryExT<A>; // Exclusive
+ template <typename Config> using SecondaryT = MapAllocator<Config>;
};
+
+#endif // SCUDO_USE_CUSTOM_CONFIG
+
struct AndroidConfig {
- using SizeClassMap = AndroidSizeClassMap;
static const bool MaySupportMemoryTagging = true;
+ template <class A>
+ using TSDRegistryT = TSDRegistrySharedT<A, 8U, 2U>; // Shared, max 8 TSDs.
+ struct Primary {
+ using SizeClassMap = AndroidSizeClassMap;
#if SCUDO_CAN_USE_PRIMARY64
- typedef SizeClassAllocator64<AndroidConfig> Primary;
- static const uptr PrimaryRegionSizeLog = 28U;
- typedef u32 PrimaryCompactPtrT;
- static const uptr PrimaryCompactPtrScale = SCUDO_MIN_ALIGNMENT_LOG;
- static const bool PrimaryEnableRandomOffset = true;
- static const uptr PrimaryMapSizeIncrement = 1UL << 18;
+ static const uptr RegionSizeLog = 28U;
+ typedef u32 CompactPtrT;
+ static const uptr CompactPtrScale = SCUDO_MIN_ALIGNMENT_LOG;
+ static const uptr GroupSizeLog = 20U;
+ static const bool EnableRandomOffset = true;
+ static const uptr MapSizeIncrement = 1UL << 18;
#else
- typedef SizeClassAllocator32<AndroidConfig> Primary;
- static const uptr PrimaryRegionSizeLog = 18U;
- typedef uptr PrimaryCompactPtrT;
+ static const uptr RegionSizeLog = 18U;
+ static const uptr GroupSizeLog = 18U;
+ typedef uptr CompactPtrT;
#endif
- static const s32 PrimaryMinReleaseToOsIntervalMs = 1000;
- static const s32 PrimaryMaxReleaseToOsIntervalMs = 1000;
-
- typedef MapAllocatorCache<AndroidConfig> SecondaryCache;
- static const u32 SecondaryCacheEntriesArraySize = 256U;
- static const u32 SecondaryCacheQuarantineSize = 32U;
- static const u32 SecondaryCacheDefaultMaxEntriesCount = 32U;
- static const uptr SecondaryCacheDefaultMaxEntrySize = 2UL << 20;
- static const s32 SecondaryCacheMinReleaseToOsIntervalMs = 0;
- static const s32 SecondaryCacheMaxReleaseToOsIntervalMs = 1000;
-
- template <class A>
- using TSDRegistryT = TSDRegistrySharedT<A, 8U, 2U>; // Shared, max 8 TSDs.
-};
-
-struct AndroidSvelteConfig {
- using SizeClassMap = SvelteSizeClassMap;
- static const bool MaySupportMemoryTagging = false;
-
+ static const s32 MinReleaseToOsIntervalMs = 1000;
+ static const s32 MaxReleaseToOsIntervalMs = 1000;
+ };
#if SCUDO_CAN_USE_PRIMARY64
- typedef SizeClassAllocator64<AndroidSvelteConfig> Primary;
- static const uptr PrimaryRegionSizeLog = 27U;
- typedef u32 PrimaryCompactPtrT;
- static const uptr PrimaryCompactPtrScale = SCUDO_MIN_ALIGNMENT_LOG;
- static const bool PrimaryEnableRandomOffset = true;
- static const uptr PrimaryMapSizeIncrement = 1UL << 18;
+ template <typename Config> using PrimaryT = SizeClassAllocator64<Config>;
#else
- typedef SizeClassAllocator32<AndroidSvelteConfig> Primary;
- static const uptr PrimaryRegionSizeLog = 16U;
- typedef uptr PrimaryCompactPtrT;
+ template <typename Config> using PrimaryT = SizeClassAllocator32<Config>;
#endif
- static const s32 PrimaryMinReleaseToOsIntervalMs = 1000;
- static const s32 PrimaryMaxReleaseToOsIntervalMs = 1000;
- typedef MapAllocatorCache<AndroidSvelteConfig> SecondaryCache;
- static const u32 SecondaryCacheEntriesArraySize = 16U;
- static const u32 SecondaryCacheQuarantineSize = 32U;
- static const u32 SecondaryCacheDefaultMaxEntriesCount = 4U;
- static const uptr SecondaryCacheDefaultMaxEntrySize = 1UL << 18;
- static const s32 SecondaryCacheMinReleaseToOsIntervalMs = 0;
- static const s32 SecondaryCacheMaxReleaseToOsIntervalMs = 0;
+ struct Secondary {
+ struct Cache {
+ static const u32 EntriesArraySize = 256U;
+ static const u32 QuarantineSize = 32U;
+ static const u32 DefaultMaxEntriesCount = 32U;
+ static const uptr DefaultMaxEntrySize = 2UL << 20;
+ static const s32 MinReleaseToOsIntervalMs = 0;
+ static const s32 MaxReleaseToOsIntervalMs = 1000;
+ };
+ template <typename Config> using CacheT = MapAllocatorCache<Config>;
+ };
- template <class A>
- using TSDRegistryT = TSDRegistrySharedT<A, 2U, 1U>; // Shared, max 2 TSDs.
+ template <typename Config> using SecondaryT = MapAllocator<Config>;
};
#if SCUDO_CAN_USE_PRIMARY64
struct FuchsiaConfig {
- using SizeClassMap = FuchsiaSizeClassMap;
static const bool MaySupportMemoryTagging = false;
-
- typedef SizeClassAllocator64<FuchsiaConfig> Primary;
- static const uptr PrimaryRegionSizeLog = 30U;
- typedef u32 PrimaryCompactPtrT;
- static const bool PrimaryEnableRandomOffset = true;
- static const uptr PrimaryMapSizeIncrement = 1UL << 18;
- static const uptr PrimaryCompactPtrScale = SCUDO_MIN_ALIGNMENT_LOG;
- static const s32 PrimaryMinReleaseToOsIntervalMs = INT32_MIN;
- static const s32 PrimaryMaxReleaseToOsIntervalMs = INT32_MAX;
-
- typedef MapAllocatorNoCache SecondaryCache;
template <class A>
using TSDRegistryT = TSDRegistrySharedT<A, 8U, 4U>; // Shared, max 8 TSDs.
+
+ struct Primary {
+ using SizeClassMap = FuchsiaSizeClassMap;
+#if SCUDO_RISCV64
+ // Support 39-bit VMA for riscv-64
+ static const uptr RegionSizeLog = 28U;
+ static const uptr GroupSizeLog = 19U;
+#else
+ static const uptr RegionSizeLog = 30U;
+ static const uptr GroupSizeLog = 21U;
+#endif
+ typedef u32 CompactPtrT;
+ static const bool EnableRandomOffset = true;
+ static const uptr MapSizeIncrement = 1UL << 18;
+ static const uptr CompactPtrScale = SCUDO_MIN_ALIGNMENT_LOG;
+ static const s32 MinReleaseToOsIntervalMs = INT32_MIN;
+ static const s32 MaxReleaseToOsIntervalMs = INT32_MAX;
+ };
+ template <typename Config> using PrimaryT = SizeClassAllocator64<Config>;
+
+ struct Secondary {
+ template <typename Config> using CacheT = MapAllocatorNoCache<Config>;
+ };
+ template <typename Config> using SecondaryT = MapAllocator<Config>;
};
struct TrustyConfig {
- using SizeClassMap = TrustySizeClassMap;
- static const bool MaySupportMemoryTagging = false;
-
- typedef SizeClassAllocator64<TrustyConfig> Primary;
- // Some apps have 1 page of heap total so small regions are necessary.
- static const uptr PrimaryRegionSizeLog = 10U;
- typedef u32 PrimaryCompactPtrT;
- static const bool PrimaryEnableRandomOffset = false;
- // Trusty is extremely memory-constrained so minimally round up map calls.
- static const uptr PrimaryMapSizeIncrement = 1UL << 4;
- static const uptr PrimaryCompactPtrScale = SCUDO_MIN_ALIGNMENT_LOG;
- static const s32 PrimaryMinReleaseToOsIntervalMs = INT32_MIN;
- static const s32 PrimaryMaxReleaseToOsIntervalMs = INT32_MAX;
-
- typedef MapAllocatorNoCache SecondaryCache;
+ static const bool MaySupportMemoryTagging = true;
template <class A>
using TSDRegistryT = TSDRegistrySharedT<A, 1U, 1U>; // Shared, max 1 TSD.
+
+ struct Primary {
+ using SizeClassMap = TrustySizeClassMap;
+ static const uptr RegionSizeLog = 28U;
+ static const uptr GroupSizeLog = 20U;
+ typedef u32 CompactPtrT;
+ static const bool EnableRandomOffset = false;
+ static const uptr MapSizeIncrement = 1UL << 12;
+ static const uptr CompactPtrScale = SCUDO_MIN_ALIGNMENT_LOG;
+ static const s32 MinReleaseToOsIntervalMs = INT32_MIN;
+ static const s32 MaxReleaseToOsIntervalMs = INT32_MAX;
+ };
+ template <typename Config> using PrimaryT = SizeClassAllocator64<Config>;
+
+ struct Secondary {
+ template <typename Config> using CacheT = MapAllocatorNoCache<Config>;
+ };
+
+ template <typename Config> using SecondaryT = MapAllocator<Config>;
};
#endif
+#ifndef SCUDO_USE_CUSTOM_CONFIG
+
#if SCUDO_ANDROID
typedef AndroidConfig Config;
#elif SCUDO_FUCHSIA
@@ -199,6 +273,8 @@ typedef TrustyConfig Config;
typedef DefaultConfig Config;
#endif
+#endif // SCUDO_USE_CUSTOM_CONFIG
+
} // namespace scudo
#endif // SCUDO_ALLOCATOR_CONFIG_H_
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/atomic_helpers.h b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/atomic_helpers.h
index d88f5d7be642..a68ffd16291c 100644
--- a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/atomic_helpers.h
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/atomic_helpers.h
@@ -133,10 +133,10 @@ inline void atomic_store_relaxed(volatile T *A, typename T::Type V) {
}
template <typename T>
-inline typename T::Type atomic_compare_exchange(volatile T *A,
- typename T::Type Cmp,
- typename T::Type Xchg) {
- atomic_compare_exchange_strong(A, &Cmp, Xchg, memory_order_acquire);
+inline typename T::Type
+atomic_compare_exchange_strong(volatile T *A, typename T::Type Cmp,
+ typename T::Type Xchg, memory_order MO) {
+ atomic_compare_exchange_strong(A, &Cmp, Xchg, MO);
return Cmp;
}
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/checksum.cpp b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/checksum.cpp
index 05d4ba54bfc8..2c277391a2ec 100644
--- a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/checksum.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/checksum.cpp
@@ -8,6 +8,7 @@
#include "checksum.h"
#include "atomic_helpers.h"
+#include "chunk.h"
#if defined(__x86_64__) || defined(__i386__)
#include <cpuid.h>
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/checksum.h b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/checksum.h
index a63b1b4f064d..f8eda81fd912 100644
--- a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/checksum.h
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/checksum.h
@@ -12,12 +12,17 @@
#include "internal_defs.h"
// Hardware CRC32 is supported at compilation via the following:
-// - for i386 & x86_64: -msse4.2
+// - for i386 & x86_64: -mcrc32 (earlier: -msse4.2)
// - for ARM & AArch64: -march=armv8-a+crc or -mcrc
// An additional check must be performed at runtime as well to make sure the
// emitted instructions are valid on the target host.
-#ifdef __SSE4_2__
+#if defined(__CRC32__)
+// NB: clang has <crc32intrin.h> but GCC does not
+#include <smmintrin.h>
+#define CRC32_INTRINSIC \
+ FIRST_32_SECOND_64(__builtin_ia32_crc32si, __builtin_ia32_crc32di)
+#elif defined(__SSE4_2__)
#include <smmintrin.h>
#define CRC32_INTRINSIC FIRST_32_SECOND_64(_mm_crc32_u32, _mm_crc32_u64)
#endif
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/chunk.h b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/chunk.h
index 69b8e1b12a91..9228df047189 100644
--- a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/chunk.h
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/chunk.h
@@ -25,7 +25,7 @@ inline u16 computeChecksum(u32 Seed, uptr Value, uptr *Array, uptr ArraySize) {
// as opposed to only for crc32_hw.cpp. This means that other hardware
// specific instructions were likely emitted at other places, and as a result
// there is no reason to not use it here.
-#if defined(__SSE4_2__) || defined(__ARM_FEATURE_CRC32)
+#if defined(__CRC32__) || defined(__SSE4_2__) || defined(__ARM_FEATURE_CRC32)
u32 Crc = static_cast<u32>(CRC32_INTRINSIC(Seed, Value));
for (uptr I = 0; I < ArraySize; I++)
Crc = static_cast<u32>(CRC32_INTRINSIC(Crc, Array[I]));
@@ -42,7 +42,8 @@ inline u16 computeChecksum(u32 Seed, uptr Value, uptr *Array, uptr ArraySize) {
Checksum = computeBSDChecksum(Checksum, Array[I]);
return Checksum;
}
-#endif // defined(__SSE4_2__) || defined(__ARM_FEATURE_CRC32)
+#endif // defined(__CRC32__) || defined(__SSE4_2__) ||
+ // defined(__ARM_FEATURE_CRC32)
}
namespace Chunk {
@@ -84,7 +85,7 @@ constexpr uptr OffsetMask = (1UL << 16) - 1;
constexpr uptr ChecksumMask = (1UL << 16) - 1;
constexpr uptr getHeaderSize() {
- return roundUpTo(sizeof(PackedHeader), 1U << SCUDO_MIN_ALIGNMENT_LOG);
+ return roundUp(sizeof(PackedHeader), 1U << SCUDO_MIN_ALIGNMENT_LOG);
}
inline AtomicPackedHeader *getAtomicHeader(void *Ptr) {
@@ -127,19 +128,6 @@ inline void loadHeader(u32 Cookie, const void *Ptr,
reportHeaderCorruption(const_cast<void *>(Ptr));
}
-inline void compareExchangeHeader(u32 Cookie, void *Ptr,
- UnpackedHeader *NewUnpackedHeader,
- UnpackedHeader *OldUnpackedHeader) {
- NewUnpackedHeader->Checksum =
- computeHeaderChecksum(Cookie, Ptr, NewUnpackedHeader);
- PackedHeader NewPackedHeader = bit_cast<PackedHeader>(*NewUnpackedHeader);
- PackedHeader OldPackedHeader = bit_cast<PackedHeader>(*OldUnpackedHeader);
- if (UNLIKELY(!atomic_compare_exchange_strong(
- getAtomicHeader(Ptr), &OldPackedHeader, NewPackedHeader,
- memory_order_relaxed)))
- reportHeaderRace(Ptr);
-}
-
inline bool isValid(u32 Cookie, const void *Ptr,
UnpackedHeader *NewUnpackedHeader) {
PackedHeader NewPackedHeader = atomic_load_relaxed(getConstAtomicHeader(Ptr));
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/combined.h b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/combined.h
index fd5360ce0f55..4624f83d142a 100644
--- a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/combined.h
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/combined.h
@@ -14,6 +14,7 @@
#include "flags.h"
#include "flags_parser.h"
#include "local_cache.h"
+#include "mem_map.h"
#include "memtag.h"
#include "options.h"
#include "quarantine.h"
@@ -42,13 +43,14 @@ extern "C" size_t android_unsafe_frame_pointer_chase(scudo::uptr *buf,
namespace scudo {
-template <class Params, void (*PostInitCallback)(void) = EmptyCallback>
+template <class Config, void (*PostInitCallback)(void) = EmptyCallback>
class Allocator {
public:
- using PrimaryT = typename Params::Primary;
+ using PrimaryT = typename Config::template PrimaryT<Config>;
+ using SecondaryT = typename Config::template SecondaryT<Config>;
using CacheT = typename PrimaryT::CacheT;
- typedef Allocator<Params, PostInitCallback> ThisT;
- typedef typename Params::template TSDRegistryT<ThisT> TSDRegistryT;
+ typedef Allocator<Config, PostInitCallback> ThisT;
+ typedef typename Config::template TSDRegistryT<ThisT> TSDRegistryT;
void callPostInitCallback() {
pthread_once(&PostInitNonce, PostInitCallback);
@@ -66,14 +68,13 @@ public:
if (UNLIKELY(Header.State != Chunk::State::Quarantined))
reportInvalidChunkState(AllocatorAction::Recycling, Ptr);
- Chunk::UnpackedHeader NewHeader = Header;
- NewHeader.State = Chunk::State::Available;
- Chunk::compareExchangeHeader(Allocator.Cookie, Ptr, &NewHeader, &Header);
+ Header.State = Chunk::State::Available;
+ Chunk::storeHeader(Allocator.Cookie, Ptr, &Header);
- if (allocatorSupportsMemoryTagging<Params>())
+ if (allocatorSupportsMemoryTagging<Config>())
Ptr = untagPointer(Ptr);
- void *BlockBegin = Allocator::getBlockBegin(Ptr, &NewHeader);
- Cache.deallocate(NewHeader.ClassId, BlockBegin);
+ void *BlockBegin = Allocator::getBlockBegin(Ptr, &Header);
+ Cache.deallocate(Header.ClassId, BlockBegin);
}
// We take a shortcut when allocating a quarantine batch by working with the
@@ -97,7 +98,7 @@ public:
// Reset tag to 0 as this chunk may have been previously used for a tagged
// user allocation.
- if (UNLIKELY(useMemoryTagging<Params>(Allocator.Primary.Options.load())))
+ if (UNLIKELY(useMemoryTagging<Config>(Allocator.Primary.Options.load())))
storeTags(reinterpret_cast<uptr>(Ptr),
reinterpret_cast<uptr>(Ptr) + sizeof(QuarantineBatch));
@@ -116,9 +117,8 @@ public:
DCHECK_EQ(Header.Offset, 0);
DCHECK_EQ(Header.SizeOrUnusedBytes, sizeof(QuarantineBatch));
- Chunk::UnpackedHeader NewHeader = Header;
- NewHeader.State = Chunk::State::Available;
- Chunk::compareExchangeHeader(Allocator.Cookie, Ptr, &NewHeader, &Header);
+ Header.State = Chunk::State::Available;
+ Chunk::storeHeader(Allocator.Cookie, Ptr, &Header);
Cache.deallocate(QuarantineClassId,
reinterpret_cast<void *>(reinterpret_cast<uptr>(Ptr) -
Chunk::getHeaderSize()));
@@ -158,10 +158,9 @@ public:
Primary.Options.set(OptionBit::DeallocTypeMismatch);
if (getFlags()->delete_size_mismatch)
Primary.Options.set(OptionBit::DeleteSizeMismatch);
- if (allocatorSupportsMemoryTagging<Params>() &&
+ if (allocatorSupportsMemoryTagging<Config>() &&
systemSupportsMemoryTagging())
Primary.Options.set(OptionBit::UseMemoryTagging);
- Primary.Options.set(OptionBit::UseOddEvenTags);
QuarantineMaxChunkSize =
static_cast<u32>(getFlags()->quarantine_max_chunk_size);
@@ -173,6 +172,8 @@ public:
Quarantine.init(
static_cast<uptr>(getFlags()->quarantine_size_kb << 10),
static_cast<uptr>(getFlags()->thread_local_quarantine_size_kb << 10));
+
+ mapAndInitializeRingBuffer();
}
// Initialize the embedded GWP-ASan instance. Requires the main allocator to
@@ -185,6 +186,7 @@ public:
getFlags()->GWP_ASAN_MaxSimultaneousAllocations;
Opt.SampleRate = getFlags()->GWP_ASAN_SampleRate;
Opt.InstallSignalHandlers = getFlags()->GWP_ASAN_InstallSignalHandlers;
+ Opt.Recoverable = getFlags()->GWP_ASAN_Recoverable;
// Embedded GWP-ASan is locked through the Scudo atfork handler (via
// Allocator::disable calling GWPASan.disable). Disable GWP-ASan's atfork
// handler.
@@ -196,7 +198,8 @@ public:
gwp_asan::segv_handler::installSignalHandlers(
&GuardedAlloc, Printf,
gwp_asan::backtrace::getPrintBacktraceFunction(),
- gwp_asan::backtrace::getSegvBacktraceFunction());
+ gwp_asan::backtrace::getSegvBacktraceFunction(),
+ Opt.Recoverable);
GuardedAllocSlotSize =
GuardedAlloc.getAllocatorState()->maximumAllocationSize();
@@ -205,11 +208,22 @@ public:
#endif // GWP_ASAN_HOOKS
}
+#ifdef GWP_ASAN_HOOKS
+ const gwp_asan::AllocationMetadata *getGwpAsanAllocationMetadata() {
+ return GuardedAlloc.getMetadataRegion();
+ }
+
+ const gwp_asan::AllocatorState *getGwpAsanAllocatorState() {
+ return GuardedAlloc.getAllocatorState();
+ }
+#endif // GWP_ASAN_HOOKS
+
ALWAYS_INLINE void initThreadMaybe(bool MinimalInit = false) {
TSDRegistry.initThreadMaybe(this, MinimalInit);
}
void unmapTestOnly() {
+ unmapRingBuffer();
TSDRegistry.unmapTestOnly(this);
Primary.unmapTestOnly();
Secondary.unmapTestOnly();
@@ -221,6 +235,7 @@ public:
}
TSDRegistryT *getTSDRegistry() { return &TSDRegistry; }
+ QuarantineT *getQuarantine() { return &Quarantine; }
// The Cache must be provided zero-initialized.
void initCache(CacheT *Cache) { Cache->init(&Stats, &Primary); }
@@ -231,13 +246,22 @@ public:
// - unlinking the local stats from the global ones (destroying the cache does
// the last two items).
void commitBack(TSD<ThisT> *TSD) {
- Quarantine.drain(&TSD->QuarantineCache,
- QuarantineCallback(*this, TSD->Cache));
- TSD->Cache.destroy(&Stats);
+ TSD->assertLocked(/*BypassCheck=*/true);
+ Quarantine.drain(&TSD->getQuarantineCache(),
+ QuarantineCallback(*this, TSD->getCache()));
+ TSD->getCache().destroy(&Stats);
+ }
+
+ void drainCache(TSD<ThisT> *TSD) {
+ TSD->assertLocked(/*BypassCheck=*/true);
+ Quarantine.drainAndRecycle(&TSD->getQuarantineCache(),
+ QuarantineCallback(*this, TSD->getCache()));
+ TSD->getCache().drain();
}
+ void drainCaches() { TSDRegistry.drainCaches(this); }
ALWAYS_INLINE void *getHeaderTaggedPointer(void *Ptr) {
- if (!allocatorSupportsMemoryTagging<Params>())
+ if (!allocatorSupportsMemoryTagging<Config>())
return Ptr;
auto UntaggedPtr = untagPointer(Ptr);
if (UntaggedPtr != Ptr)
@@ -249,7 +273,7 @@ public:
}
ALWAYS_INLINE uptr addHeaderTag(uptr Ptr) {
- if (!allocatorSupportsMemoryTagging<Params>())
+ if (!allocatorSupportsMemoryTagging<Config>())
return Ptr;
return addFixedTag(Ptr, 2);
}
@@ -272,7 +296,7 @@ public:
#endif
}
- uptr computeOddEvenMaskForPointerMaybe(Options Options, uptr Ptr,
+ uptr computeOddEvenMaskForPointerMaybe(const Options &Options, uptr Ptr,
uptr ClassId) {
if (!Options.get(OptionBit::UseOddEvenTags))
return 0;
@@ -287,7 +311,7 @@ public:
NOINLINE void *allocate(uptr Size, Chunk::Origin Origin,
uptr Alignment = MinAlignment,
- bool ZeroContents = false) {
+ bool ZeroContents = false) NO_THREAD_SAFETY_ANALYSIS {
initThreadMaybe();
const Options Options = Primary.Options.load();
@@ -302,8 +326,6 @@ public:
#ifdef GWP_ASAN_HOOKS
if (UNLIKELY(GuardedAlloc.shouldSample())) {
if (void *Ptr = GuardedAlloc.allocate(Size, Alignment)) {
- if (UNLIKELY(&__scudo_allocate_hook))
- __scudo_allocate_hook(Ptr, Size);
Stats.lock();
Stats.add(StatAllocated, GuardedAllocSlotSize);
Stats.sub(StatFree, GuardedAllocSlotSize);
@@ -324,7 +346,7 @@ public:
// to be sure that there will be an address in the block that will satisfy
// the alignment.
const uptr NeededSize =
- roundUpTo(Size, MinAlignment) +
+ roundUp(Size, MinAlignment) +
((Alignment > MinAlignment) ? Alignment : Chunk::getHeaderSize());
// Takes care of extravagantly large sizes as well as integer overflows.
@@ -344,33 +366,35 @@ public:
DCHECK_NE(ClassId, 0U);
bool UnlockRequired;
auto *TSD = TSDRegistry.getTSDAndLock(&UnlockRequired);
- Block = TSD->Cache.allocate(ClassId);
- // If the allocation failed, the most likely reason with a 32-bit primary
- // is the region being full. In that event, retry in each successively
- // larger class until it fits. If it fails to fit in the largest class,
- // fallback to the Secondary.
+ TSD->assertLocked(/*BypassCheck=*/!UnlockRequired);
+ Block = TSD->getCache().allocate(ClassId);
+ // If the allocation failed, retry in each successively larger class until
+ // it fits. If it fails to fit in the largest class, fallback to the
+ // Secondary.
if (UNLIKELY(!Block)) {
while (ClassId < SizeClassMap::LargestClassId && !Block)
- Block = TSD->Cache.allocate(++ClassId);
+ Block = TSD->getCache().allocate(++ClassId);
if (!Block)
ClassId = 0;
}
if (UnlockRequired)
TSD->unlock();
}
- if (UNLIKELY(ClassId == 0))
+ if (UNLIKELY(ClassId == 0)) {
Block = Secondary.allocate(Options, Size, Alignment, &SecondaryBlockEnd,
FillContents);
+ }
if (UNLIKELY(!Block)) {
if (Options.get(OptionBit::MayReturnNull))
return nullptr;
+ printStats();
reportOutOfMemory(NeededSize);
}
const uptr BlockUptr = reinterpret_cast<uptr>(Block);
const uptr UnalignedUserPtr = BlockUptr + Chunk::getHeaderSize();
- const uptr UserPtr = roundUpTo(UnalignedUserPtr, Alignment);
+ const uptr UserPtr = roundUp(UnalignedUserPtr, Alignment);
void *Ptr = reinterpret_cast<void *>(UserPtr);
void *TaggedPtr = Ptr;
@@ -386,7 +410,7 @@ public:
//
// When memory tagging is enabled, zeroing the contents is done as part of
// setting the tag.
- if (UNLIKELY(useMemoryTagging<Params>(Options))) {
+ if (UNLIKELY(useMemoryTagging<Config>(Options))) {
uptr PrevUserPtr;
Chunk::UnpackedHeader Header;
const uptr BlockSize = PrimaryT::getSizeByClassId(ClassId);
@@ -429,7 +453,7 @@ public:
PrevUserPtr == UserPtr &&
(TaggedUserPtr = loadTag(UserPtr)) != UserPtr) {
uptr PrevEnd = TaggedUserPtr + Header.SizeOrUnusedBytes;
- const uptr NextPage = roundUpTo(TaggedUserPtr, getPageSizeCached());
+ const uptr NextPage = roundUp(TaggedUserPtr, getPageSizeCached());
if (NextPage < PrevEnd && loadTag(NextPage) != NextPage)
PrevEnd = NextPage;
TaggedPtr = reinterpret_cast<void *>(TaggedUserPtr);
@@ -442,8 +466,8 @@ public:
// was freed, it would not have been retagged and thus zeroed, and
// therefore it needs to be zeroed now.
memset(TaggedPtr, 0,
- Min(Size, roundUpTo(PrevEnd - TaggedUserPtr,
- archMemoryTagGranuleSize())));
+ Min(Size, roundUp(PrevEnd - TaggedUserPtr,
+ archMemoryTagGranuleSize())));
} else if (Size) {
// Clear any stack metadata that may have previously been stored in
// the chunk data.
@@ -468,7 +492,7 @@ public:
} else {
Block = addHeaderTag(Block);
Ptr = addHeaderTag(Ptr);
- if (UNLIKELY(useMemoryTagging<Params>(Options))) {
+ if (UNLIKELY(useMemoryTagging<Config>(Options))) {
storeTags(reinterpret_cast<uptr>(Block), reinterpret_cast<uptr>(Ptr));
storeSecondaryAllocationStackMaybe(Options, Ptr, Size);
}
@@ -494,14 +518,14 @@ public:
Chunk::SizeOrUnusedBytesMask;
Chunk::storeHeader(Cookie, Ptr, &Header);
- if (UNLIKELY(&__scudo_allocate_hook))
- __scudo_allocate_hook(TaggedPtr, Size);
-
return TaggedPtr;
}
NOINLINE void deallocate(void *Ptr, Chunk::Origin Origin, uptr DeleteSize = 0,
UNUSED uptr Alignment = MinAlignment) {
+ if (UNLIKELY(!Ptr))
+ return;
+
// For a deallocation, we only ensure minimal initialization, meaning thread
// local data will be left uninitialized for now (when using ELF TLS). The
// fallback cache will be used instead. This is a workaround for a situation
@@ -510,12 +534,6 @@ public:
// being destroyed properly. Any other heap operation will do a full init.
initThreadMaybe(/*MinimalInit=*/true);
- if (UNLIKELY(&__scudo_deallocate_hook))
- __scudo_deallocate_hook(Ptr);
-
- if (UNLIKELY(!Ptr))
- return;
-
#ifdef GWP_ASAN_HOOKS
if (UNLIKELY(GuardedAlloc.pointerIsMine(Ptr))) {
GuardedAlloc.deallocate(Ptr);
@@ -594,48 +612,47 @@ public:
if (UNLIKELY(!isAligned(reinterpret_cast<uptr>(OldPtr), MinAlignment)))
reportMisalignedPointer(AllocatorAction::Reallocating, OldPtr);
- Chunk::UnpackedHeader OldHeader;
- Chunk::loadHeader(Cookie, OldPtr, &OldHeader);
+ Chunk::UnpackedHeader Header;
+ Chunk::loadHeader(Cookie, OldPtr, &Header);
- if (UNLIKELY(OldHeader.State != Chunk::State::Allocated))
+ if (UNLIKELY(Header.State != Chunk::State::Allocated))
reportInvalidChunkState(AllocatorAction::Reallocating, OldPtr);
// Pointer has to be allocated with a malloc-type function. Some
// applications think that it is OK to realloc a memalign'ed pointer, which
// will trigger this check. It really isn't.
if (Options.get(OptionBit::DeallocTypeMismatch)) {
- if (UNLIKELY(OldHeader.OriginOrWasZeroed != Chunk::Origin::Malloc))
+ if (UNLIKELY(Header.OriginOrWasZeroed != Chunk::Origin::Malloc))
reportDeallocTypeMismatch(AllocatorAction::Reallocating, OldPtr,
- OldHeader.OriginOrWasZeroed,
+ Header.OriginOrWasZeroed,
Chunk::Origin::Malloc);
}
- void *BlockBegin = getBlockBegin(OldTaggedPtr, &OldHeader);
+ void *BlockBegin = getBlockBegin(OldTaggedPtr, &Header);
uptr BlockEnd;
uptr OldSize;
- const uptr ClassId = OldHeader.ClassId;
+ const uptr ClassId = Header.ClassId;
if (LIKELY(ClassId)) {
BlockEnd = reinterpret_cast<uptr>(BlockBegin) +
SizeClassMap::getSizeByClassId(ClassId);
- OldSize = OldHeader.SizeOrUnusedBytes;
+ OldSize = Header.SizeOrUnusedBytes;
} else {
BlockEnd = SecondaryT::getBlockEnd(BlockBegin);
OldSize = BlockEnd - (reinterpret_cast<uptr>(OldTaggedPtr) +
- OldHeader.SizeOrUnusedBytes);
+ Header.SizeOrUnusedBytes);
}
// If the new chunk still fits in the previously allocated block (with a
// reasonable delta), we just keep the old block, and update the chunk
// header to reflect the size change.
if (reinterpret_cast<uptr>(OldTaggedPtr) + NewSize <= BlockEnd) {
if (NewSize > OldSize || (OldSize - NewSize) < getPageSizeCached()) {
- Chunk::UnpackedHeader NewHeader = OldHeader;
- NewHeader.SizeOrUnusedBytes =
+ Header.SizeOrUnusedBytes =
(ClassId ? NewSize
: BlockEnd -
(reinterpret_cast<uptr>(OldTaggedPtr) + NewSize)) &
Chunk::SizeOrUnusedBytesMask;
- Chunk::compareExchangeHeader(Cookie, OldPtr, &NewHeader, &OldHeader);
- if (UNLIKELY(useMemoryTagging<Params>(Options))) {
+ Chunk::storeHeader(Cookie, OldPtr, &Header);
+ if (UNLIKELY(useMemoryTagging<Config>(Options))) {
if (ClassId) {
resizeTaggedChunk(reinterpret_cast<uptr>(OldTaggedPtr) + OldSize,
reinterpret_cast<uptr>(OldTaggedPtr) + NewSize,
@@ -656,7 +673,7 @@ public:
void *NewPtr = allocate(NewSize, Chunk::Origin::Malloc, Alignment);
if (LIKELY(NewPtr)) {
memcpy(NewPtr, OldTaggedPtr, Min(NewSize, OldSize));
- quarantineOrDeallocateChunk(Options, OldTaggedPtr, &OldHeader, OldSize);
+ quarantineOrDeallocateChunk(Options, OldTaggedPtr, &Header, OldSize);
}
return NewPtr;
}
@@ -664,7 +681,7 @@ public:
// TODO(kostyak): disable() is currently best-effort. There are some small
// windows of time when an allocation could still succeed after
// this function finishes. We will revisit that later.
- void disable() {
+ void disable() NO_THREAD_SAFETY_ANALYSIS {
initThreadMaybe();
#ifdef GWP_ASAN_HOOKS
GuardedAlloc.disable();
@@ -676,7 +693,7 @@ public:
Secondary.disable();
}
- void enable() {
+ void enable() NO_THREAD_SAFETY_ANALYSIS {
initThreadMaybe();
Secondary.enable();
Primary.enable();
@@ -695,9 +712,7 @@ public:
// sizing purposes.
uptr getStats(char *Buffer, uptr Size) {
ScopedString Str;
- disable();
const uptr Length = getStats(&Str) + 1;
- enable();
if (Length < Size)
Size = Length;
if (Buffer && Size) {
@@ -709,15 +724,22 @@ public:
void printStats() {
ScopedString Str;
- disable();
getStats(&Str);
- enable();
Str.output();
}
- void releaseToOS() {
+ void printFragmentationInfo() {
+ ScopedString Str;
+ Primary.getFragmentationInfo(&Str);
+ // Secondary allocator dumps the fragmentation data in getStats().
+ Str.output();
+ }
+
+ void releaseToOS(ReleaseToOS ReleaseType) {
initThreadMaybe();
- Primary.releaseToOS();
+ if (ReleaseType == ReleaseToOS::ForceAll)
+ drainCaches();
+ Primary.releaseToOS(ReleaseType);
Secondary.releaseToOS();
}
@@ -731,7 +753,7 @@ public:
Base = untagPointer(Base);
const uptr From = Base;
const uptr To = Base + Size;
- bool MayHaveTaggedPrimary = allocatorSupportsMemoryTagging<Params>() &&
+ bool MayHaveTaggedPrimary = allocatorSupportsMemoryTagging<Config>() &&
systemSupportsMemoryTagging();
auto Lambda = [this, From, To, MayHaveTaggedPrimary, Callback,
Arg](uptr Block) {
@@ -753,9 +775,9 @@ public:
}
if (Header.State == Chunk::State::Allocated) {
uptr TaggedChunk = Chunk;
- if (allocatorSupportsMemoryTagging<Params>())
+ if (allocatorSupportsMemoryTagging<Config>())
TaggedChunk = untagPointer(TaggedChunk);
- if (useMemoryTagging<Params>(Primary.Options.load()))
+ if (useMemoryTagging<Config>(Primary.Options.load()))
TaggedChunk = loadTag(Chunk);
Callback(TaggedChunk, getSize(reinterpret_cast<void *>(Chunk), &Header),
Arg);
@@ -806,10 +828,15 @@ public:
// for it, which then forces realloc to copy the usable size of a chunk as
// opposed to its actual size.
uptr getUsableSize(const void *Ptr) {
- initThreadMaybe();
if (UNLIKELY(!Ptr))
return 0;
+ return getAllocSize(Ptr);
+ }
+
+ uptr getAllocSize(const void *Ptr) {
+ initThreadMaybe();
+
#ifdef GWP_ASAN_HOOKS
if (UNLIKELY(GuardedAlloc.pointerIsMine(Ptr)))
return GuardedAlloc.getSize(Ptr);
@@ -818,9 +845,11 @@ public:
Ptr = getHeaderTaggedPointer(const_cast<void *>(Ptr));
Chunk::UnpackedHeader Header;
Chunk::loadHeader(Cookie, Ptr, &Header);
- // Getting the usable size of a chunk only makes sense if it's allocated.
+
+ // Getting the alloc size of a chunk only makes sense if it's allocated.
if (UNLIKELY(Header.State != Chunk::State::Allocated))
reportInvalidChunkState(AllocatorAction::Sizing, const_cast<void *>(Ptr));
+
return getSize(Ptr, &Header);
}
@@ -847,7 +876,7 @@ public:
}
bool useMemoryTaggingTestOnly() const {
- return useMemoryTagging<Params>(Primary.Options.load());
+ return useMemoryTagging<Config>(Primary.Options.load());
}
void disableMemoryTagging() {
// If we haven't been initialized yet, we need to initialize now in order to
@@ -857,7 +886,7 @@ public:
// callback), which may cause mappings to be created with memory tagging
// enabled.
TSDRegistry.initOnceMaybe(this);
- if (allocatorSupportsMemoryTagging<Params>()) {
+ if (allocatorSupportsMemoryTagging<Config>()) {
Secondary.disableMemoryTagging();
Primary.Options.clear(OptionBit::UseMemoryTagging);
}
@@ -865,6 +894,10 @@ public:
void setTrackAllocationStacks(bool Track) {
initThreadMaybe();
+ if (getFlags()->allocation_ring_buffer_size <= 0) {
+ DCHECK(!Primary.Options.load().get(OptionBit::TrackAllocationStacks));
+ return;
+ }
if (Track)
Primary.Options.set(OptionBit::TrackAllocationStacks);
else
@@ -896,11 +929,15 @@ public:
return PrimaryT::getRegionInfoArraySize();
}
- const char *getRingBufferAddress() const {
- return reinterpret_cast<const char *>(&RingBuffer);
+ const char *getRingBufferAddress() {
+ initThreadMaybe();
+ return RawRingBuffer;
}
- static uptr getRingBufferSize() { return sizeof(RingBuffer); }
+ uptr getRingBufferSize() {
+ initThreadMaybe();
+ return RingBufferElements ? ringBufferSizeInBytes(RingBufferElements) : 0;
+ }
static const uptr MaxTraceSize = 64;
@@ -910,16 +947,17 @@ public:
if (!Depot->find(Hash, &RingPos, &Size))
return;
for (unsigned I = 0; I != Size && I != MaxTraceSize; ++I)
- Trace[I] = (*Depot)[RingPos + I];
+ Trace[I] = static_cast<uintptr_t>((*Depot)[RingPos + I]);
}
static void getErrorInfo(struct scudo_error_info *ErrorInfo,
uintptr_t FaultAddr, const char *DepotPtr,
const char *RegionInfoPtr, const char *RingBufferPtr,
- const char *Memory, const char *MemoryTags,
- uintptr_t MemoryAddr, size_t MemorySize) {
+ size_t RingBufferSize, const char *Memory,
+ const char *MemoryTags, uintptr_t MemoryAddr,
+ size_t MemorySize) {
*ErrorInfo = {};
- if (!allocatorSupportsMemoryTagging<Params>() ||
+ if (!allocatorSupportsMemoryTagging<Config>() ||
MemoryAddr + MemorySize < MemoryAddr)
return;
@@ -936,7 +974,7 @@ public:
// Check the ring buffer. For primary allocations this will only find UAF;
// for secondary allocations we can find either UAF or OOB.
getRingBufferErrorInfo(ErrorInfo, NextErrorReport, FaultAddr, Depot,
- RingBufferPtr);
+ RingBufferPtr, RingBufferSize);
// Check for OOB in the 28 blocks surrounding the 3 we checked earlier.
// Beyond that we are likely to hit false positives.
@@ -947,7 +985,6 @@ public:
}
private:
- using SecondaryT = MapAllocator<Params>;
typedef typename PrimaryT::SizeClassMap SizeClassMap;
static const uptr MinAlignmentLog = SCUDO_MIN_ALIGNMENT_LOG;
@@ -959,7 +996,7 @@ private:
static_assert(MinAlignment >= sizeof(Chunk::PackedHeader),
"Minimal alignment must at least cover a chunk header.");
- static_assert(!allocatorSupportsMemoryTagging<Params>() ||
+ static_assert(!allocatorSupportsMemoryTagging<Config>() ||
MinAlignment >= archMemoryTagGranuleSize(),
"");
@@ -1003,14 +1040,14 @@ private:
};
atomic_uptr Pos;
-#ifdef SCUDO_FUZZ
- static const uptr NumEntries = 2;
-#else
- static const uptr NumEntries = 32768;
-#endif
- Entry Entries[NumEntries];
+ // An array of Size (at least one) elements of type Entry is immediately
+ // following to this struct.
};
- AllocationRingBuffer RingBuffer = {};
+ // Pointer to memory mapped area starting with AllocationRingBuffer struct,
+ // and immediately followed by Size elements of type Entry.
+ char *RawRingBuffer = {};
+ u32 RingBufferElements = 0;
+ MemMapT RawRingBufferMap;
// The following might get optimized out by the compiler.
NOINLINE void performSanityChecks() {
@@ -1059,40 +1096,40 @@ private:
const uptr SizeOrUnusedBytes = Header->SizeOrUnusedBytes;
if (LIKELY(Header->ClassId))
return SizeOrUnusedBytes;
- if (allocatorSupportsMemoryTagging<Params>())
+ if (allocatorSupportsMemoryTagging<Config>())
Ptr = untagPointer(const_cast<void *>(Ptr));
return SecondaryT::getBlockEnd(getBlockBegin(Ptr, Header)) -
reinterpret_cast<uptr>(Ptr) - SizeOrUnusedBytes;
}
- void quarantineOrDeallocateChunk(Options Options, void *TaggedPtr,
- Chunk::UnpackedHeader *Header, uptr Size) {
+ void quarantineOrDeallocateChunk(const Options &Options, void *TaggedPtr,
+ Chunk::UnpackedHeader *Header,
+ uptr Size) NO_THREAD_SAFETY_ANALYSIS {
void *Ptr = getHeaderTaggedPointer(TaggedPtr);
- Chunk::UnpackedHeader NewHeader = *Header;
// If the quarantine is disabled, the actual size of a chunk is 0 or larger
// than the maximum allowed, we return a chunk directly to the backend.
// This purposefully underflows for Size == 0.
const bool BypassQuarantine = !Quarantine.getCacheSize() ||
((Size - 1) >= QuarantineMaxChunkSize) ||
- !NewHeader.ClassId;
+ !Header->ClassId;
if (BypassQuarantine)
- NewHeader.State = Chunk::State::Available;
+ Header->State = Chunk::State::Available;
else
- NewHeader.State = Chunk::State::Quarantined;
- NewHeader.OriginOrWasZeroed = useMemoryTagging<Params>(Options) &&
- NewHeader.ClassId &&
- !TSDRegistry.getDisableMemInit();
- Chunk::compareExchangeHeader(Cookie, Ptr, &NewHeader, Header);
+ Header->State = Chunk::State::Quarantined;
+ Header->OriginOrWasZeroed = useMemoryTagging<Config>(Options) &&
+ Header->ClassId &&
+ !TSDRegistry.getDisableMemInit();
+ Chunk::storeHeader(Cookie, Ptr, Header);
- if (UNLIKELY(useMemoryTagging<Params>(Options))) {
+ if (UNLIKELY(useMemoryTagging<Config>(Options))) {
u8 PrevTag = extractTag(reinterpret_cast<uptr>(TaggedPtr));
storeDeallocationStackMaybe(Options, Ptr, PrevTag, Size);
- if (NewHeader.ClassId) {
+ if (Header->ClassId) {
if (!TSDRegistry.getDisableMemInit()) {
uptr TaggedBegin, TaggedEnd;
const uptr OddEvenMask = computeOddEvenMaskForPointerMaybe(
- Options, reinterpret_cast<uptr>(getBlockBegin(Ptr, &NewHeader)),
- NewHeader.ClassId);
+ Options, reinterpret_cast<uptr>(getBlockBegin(Ptr, Header)),
+ Header->ClassId);
// Exclude the previous tag so that immediate use after free is
// detected 100% of the time.
setRandomTag(Ptr, Size, OddEvenMask | (1UL << PrevTag), &TaggedBegin,
@@ -1101,18 +1138,26 @@ private:
}
}
if (BypassQuarantine) {
- if (allocatorSupportsMemoryTagging<Params>())
+ if (allocatorSupportsMemoryTagging<Config>())
Ptr = untagPointer(Ptr);
- void *BlockBegin = getBlockBegin(Ptr, &NewHeader);
- const uptr ClassId = NewHeader.ClassId;
+ void *BlockBegin = getBlockBegin(Ptr, Header);
+ const uptr ClassId = Header->ClassId;
if (LIKELY(ClassId)) {
bool UnlockRequired;
auto *TSD = TSDRegistry.getTSDAndLock(&UnlockRequired);
- TSD->Cache.deallocate(ClassId, BlockBegin);
+ TSD->assertLocked(/*BypassCheck=*/!UnlockRequired);
+ const bool CacheDrained =
+ TSD->getCache().deallocate(ClassId, BlockBegin);
if (UnlockRequired)
TSD->unlock();
+ // When we have drained some blocks back to the Primary from TSD, that
+ // implies that we may have the chance to release some pages as well.
+ // Note that in order not to block other thread's accessing the TSD,
+ // release the TSD first then try the page release.
+ if (CacheDrained)
+ Primary.tryReleaseToOS(ClassId, ReleaseToOS::Normal);
} else {
- if (UNLIKELY(useMemoryTagging<Params>(Options)))
+ if (UNLIKELY(useMemoryTagging<Config>(Options)))
storeTags(reinterpret_cast<uptr>(BlockBegin),
reinterpret_cast<uptr>(Ptr));
Secondary.deallocate(Options, BlockBegin);
@@ -1120,8 +1165,9 @@ private:
} else {
bool UnlockRequired;
auto *TSD = TSDRegistry.getTSDAndLock(&UnlockRequired);
- Quarantine.put(&TSD->QuarantineCache,
- QuarantineCallback(*this, TSD->Cache), Ptr, Size);
+ TSD->assertLocked(/*BypassCheck=*/!UnlockRequired);
+ Quarantine.put(&TSD->getQuarantineCache(),
+ QuarantineCallback(*this, TSD->getCache()), Ptr, Size);
if (UnlockRequired)
TSD->unlock();
}
@@ -1181,22 +1227,22 @@ private:
void resizeTaggedChunk(uptr OldPtr, uptr NewPtr, uptr NewSize,
uptr BlockEnd) {
- uptr RoundOldPtr = roundUpTo(OldPtr, archMemoryTagGranuleSize());
+ uptr RoundOldPtr = roundUp(OldPtr, archMemoryTagGranuleSize());
uptr RoundNewPtr;
if (RoundOldPtr >= NewPtr) {
// If the allocation is shrinking we just need to set the tag past the end
// of the allocation to 0. See explanation in storeEndMarker() above.
- RoundNewPtr = roundUpTo(NewPtr, archMemoryTagGranuleSize());
+ RoundNewPtr = roundUp(NewPtr, archMemoryTagGranuleSize());
} else {
// Set the memory tag of the region
- // [RoundOldPtr, roundUpTo(NewPtr, archMemoryTagGranuleSize()))
+ // [RoundOldPtr, roundUp(NewPtr, archMemoryTagGranuleSize()))
// to the pointer tag stored in OldPtr.
RoundNewPtr = storeTags(RoundOldPtr, NewPtr);
}
storeEndMarker(RoundNewPtr, NewSize, BlockEnd);
}
- void storePrimaryAllocationStackMaybe(Options Options, void *Ptr) {
+ void storePrimaryAllocationStackMaybe(const Options &Options, void *Ptr) {
if (!UNLIKELY(Options.get(OptionBit::TrackAllocationStacks)))
return;
auto *Ptr32 = reinterpret_cast<u32 *>(Ptr);
@@ -1207,9 +1253,9 @@ private:
void storeRingBufferEntry(void *Ptr, u32 AllocationTrace, u32 AllocationTid,
uptr AllocationSize, u32 DeallocationTrace,
u32 DeallocationTid) {
- uptr Pos = atomic_fetch_add(&RingBuffer.Pos, 1, memory_order_relaxed);
+ uptr Pos = atomic_fetch_add(&getRingBuffer()->Pos, 1, memory_order_relaxed);
typename AllocationRingBuffer::Entry *Entry =
- &RingBuffer.Entries[Pos % AllocationRingBuffer::NumEntries];
+ getRingBufferEntry(RawRingBuffer, Pos % RingBufferElements);
// First invalidate our entry so that we don't attempt to interpret a
// partially written state in getSecondaryErrorInfo(). The fences below
@@ -1228,7 +1274,7 @@ private:
atomic_store_relaxed(&Entry->Ptr, reinterpret_cast<uptr>(Ptr));
}
- void storeSecondaryAllocationStackMaybe(Options Options, void *Ptr,
+ void storeSecondaryAllocationStackMaybe(const Options &Options, void *Ptr,
uptr Size) {
if (!UNLIKELY(Options.get(OptionBit::TrackAllocationStacks)))
return;
@@ -1243,8 +1289,8 @@ private:
storeRingBufferEntry(untagPointer(Ptr), Trace, Tid, Size, 0, 0);
}
- void storeDeallocationStackMaybe(Options Options, void *Ptr, u8 PrevTag,
- uptr Size) {
+ void storeDeallocationStackMaybe(const Options &Options, void *Ptr,
+ u8 PrevTag, uptr Size) {
if (!UNLIKELY(Options.get(OptionBit::TrackAllocationStacks)))
return;
@@ -1261,8 +1307,8 @@ private:
}
static const size_t NumErrorReports =
- sizeof(((scudo_error_info *)0)->reports) /
- sizeof(((scudo_error_info *)0)->reports[0]);
+ sizeof(((scudo_error_info *)nullptr)->reports) /
+ sizeof(((scudo_error_info *)nullptr)->reports[0]);
static void getInlineErrorInfo(struct scudo_error_info *ErrorInfo,
size_t &NextErrorReport, uintptr_t FaultAddr,
@@ -1350,15 +1396,19 @@ private:
size_t &NextErrorReport,
uintptr_t FaultAddr,
const StackDepot *Depot,
- const char *RingBufferPtr) {
+ const char *RingBufferPtr,
+ size_t RingBufferSize) {
auto *RingBuffer =
reinterpret_cast<const AllocationRingBuffer *>(RingBufferPtr);
+ size_t RingBufferElements = ringBufferElementsFromBytes(RingBufferSize);
+ if (!RingBuffer || RingBufferElements == 0)
+ return;
uptr Pos = atomic_load_relaxed(&RingBuffer->Pos);
- for (uptr I = Pos - 1; I != Pos - 1 - AllocationRingBuffer::NumEntries &&
+ for (uptr I = Pos - 1; I != Pos - 1 - RingBufferElements &&
NextErrorReport != NumErrorReports;
--I) {
- auto *Entry = &RingBuffer->Entries[I % AllocationRingBuffer::NumEntries];
+ auto *Entry = getRingBufferEntry(RingBufferPtr, I % RingBufferElements);
uptr EntryPtr = atomic_load_relaxed(&Entry->Ptr);
if (!EntryPtr)
continue;
@@ -1421,8 +1471,66 @@ private:
Primary.getStats(Str);
Secondary.getStats(Str);
Quarantine.getStats(Str);
+ TSDRegistry.getStats(Str);
return Str->length();
}
+
+ static typename AllocationRingBuffer::Entry *
+ getRingBufferEntry(char *RawRingBuffer, uptr N) {
+ return &reinterpret_cast<typename AllocationRingBuffer::Entry *>(
+ &RawRingBuffer[sizeof(AllocationRingBuffer)])[N];
+ }
+ static const typename AllocationRingBuffer::Entry *
+ getRingBufferEntry(const char *RawRingBuffer, uptr N) {
+ return &reinterpret_cast<const typename AllocationRingBuffer::Entry *>(
+ &RawRingBuffer[sizeof(AllocationRingBuffer)])[N];
+ }
+
+ void mapAndInitializeRingBuffer() {
+ if (getFlags()->allocation_ring_buffer_size <= 0)
+ return;
+ u32 AllocationRingBufferSize =
+ static_cast<u32>(getFlags()->allocation_ring_buffer_size);
+ MemMapT MemMap;
+ MemMap.map(
+ /*Addr=*/0U,
+ roundUp(ringBufferSizeInBytes(AllocationRingBufferSize),
+ getPageSizeCached()),
+ "scudo:ring_buffer");
+ RawRingBuffer = reinterpret_cast<char *>(MemMap.getBase());
+ RawRingBufferMap = MemMap;
+ RingBufferElements = AllocationRingBufferSize;
+ static_assert(sizeof(AllocationRingBuffer) %
+ alignof(typename AllocationRingBuffer::Entry) ==
+ 0,
+ "invalid alignment");
+ }
+
+ void unmapRingBuffer() {
+ auto *RingBuffer = getRingBuffer();
+ if (RingBuffer != nullptr) {
+ RawRingBufferMap.unmap(RawRingBufferMap.getBase(),
+ RawRingBufferMap.getCapacity());
+ }
+ RawRingBuffer = nullptr;
+ }
+
+ static constexpr size_t ringBufferSizeInBytes(u32 RingBufferElements) {
+ return sizeof(AllocationRingBuffer) +
+ RingBufferElements * sizeof(typename AllocationRingBuffer::Entry);
+ }
+
+ static constexpr size_t ringBufferElementsFromBytes(size_t Bytes) {
+ if (Bytes < sizeof(AllocationRingBuffer)) {
+ return 0;
+ }
+ return (Bytes - sizeof(AllocationRingBuffer)) /
+ sizeof(typename AllocationRingBuffer::Entry);
+ }
+
+ inline AllocationRingBuffer *getRingBuffer() {
+ return reinterpret_cast<AllocationRingBuffer *>(RawRingBuffer);
+ }
};
} // namespace scudo
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/common.cpp b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/common.cpp
index 666f95400c7e..06e930638f6f 100644
--- a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/common.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/common.cpp
@@ -21,18 +21,4 @@ uptr getPageSizeSlow() {
return PageSizeCached;
}
-// Fatal internal map() or unmap() error (potentially OOM related).
-void NORETURN dieOnMapUnmapError(uptr SizeIfOOM) {
- char Error[128] = "Scudo ERROR: internal map or unmap failure\n";
- if (SizeIfOOM) {
- formatString(
- Error, sizeof(Error),
- "Scudo ERROR: internal map failure (NO MEMORY) requesting %zuKB\n",
- SizeIfOOM >> 10);
- }
- outputRaw(Error);
- setAbortMessage(Error);
- die();
-}
-
} // namespace scudo
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/common.h b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/common.h
index bc3dfec6dbba..ae45683f1ee3 100644
--- a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/common.h
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/common.h
@@ -17,6 +17,7 @@
#include <stddef.h>
#include <string.h>
+#include <unistd.h>
namespace scudo {
@@ -27,17 +28,31 @@ template <class Dest, class Source> inline Dest bit_cast(const Source &S) {
return D;
}
-inline constexpr uptr roundUpTo(uptr X, uptr Boundary) {
+inline constexpr bool isPowerOfTwo(uptr X) { return (X & (X - 1)) == 0; }
+
+inline constexpr uptr roundUp(uptr X, uptr Boundary) {
+ DCHECK(isPowerOfTwo(Boundary));
return (X + Boundary - 1) & ~(Boundary - 1);
}
+inline constexpr uptr roundUpSlow(uptr X, uptr Boundary) {
+ return ((X + Boundary - 1) / Boundary) * Boundary;
+}
-inline constexpr uptr roundDownTo(uptr X, uptr Boundary) {
+inline constexpr uptr roundDown(uptr X, uptr Boundary) {
+ DCHECK(isPowerOfTwo(Boundary));
return X & ~(Boundary - 1);
}
+inline constexpr uptr roundDownSlow(uptr X, uptr Boundary) {
+ return (X / Boundary) * Boundary;
+}
inline constexpr bool isAligned(uptr X, uptr Alignment) {
+ DCHECK(isPowerOfTwo(Alignment));
return (X & (Alignment - 1)) == 0;
}
+inline constexpr bool isAlignedSlow(uptr X, uptr Alignment) {
+ return X % Alignment == 0;
+}
template <class T> constexpr T Min(T A, T B) { return A < B ? A : B; }
@@ -49,14 +64,12 @@ template <class T> void Swap(T &A, T &B) {
B = Tmp;
}
-inline bool isPowerOfTwo(uptr X) { return (X & (X - 1)) == 0; }
-
inline uptr getMostSignificantSetBitIndex(uptr X) {
DCHECK_NE(X, 0U);
return SCUDO_WORDSIZE - 1U - static_cast<uptr>(__builtin_clzl(X));
}
-inline uptr roundUpToPowerOfTwo(uptr Size) {
+inline uptr roundUpPowerOfTwo(uptr Size) {
DCHECK(Size);
if (isPowerOfTwo(Size))
return Size;
@@ -99,19 +112,19 @@ template <typename T> inline void shuffle(T *A, u32 N, u32 *RandState) {
*RandState = State;
}
-// Hardware specific inlinable functions.
+inline void computePercentage(uptr Numerator, uptr Denominator, uptr *Integral,
+ uptr *Fractional) {
+ constexpr uptr Digits = 100;
+ if (Denominator == 0) {
+ *Integral = 100;
+ *Fractional = 0;
+ return;
+ }
-inline void yieldProcessor(u8 Count) {
-#if defined(__i386__) || defined(__x86_64__)
- __asm__ __volatile__("" ::: "memory");
- for (u8 I = 0; I < Count; I++)
- __asm__ __volatile__("pause");
-#elif defined(__aarch64__) || defined(__arm__)
- __asm__ __volatile__("" ::: "memory");
- for (u8 I = 0; I < Count; I++)
- __asm__ __volatile__("yield");
-#endif
- __asm__ __volatile__("" ::: "memory");
+ *Integral = Numerator * Digits / Denominator;
+ *Fractional =
+ (((Numerator * Digits) % Denominator) * Digits + Denominator / 2) /
+ Denominator;
}
// Platform specific functions.
@@ -119,9 +132,10 @@ inline void yieldProcessor(u8 Count) {
extern uptr PageSizeCached;
uptr getPageSizeSlow();
inline uptr getPageSizeCached() {
- // Bionic uses a hardcoded value.
- if (SCUDO_ANDROID)
- return 4096U;
+#if SCUDO_ANDROID && defined(PAGE_SIZE)
+ // Most Android builds have a build-time constant page size.
+ return PAGE_SIZE;
+#endif
if (LIKELY(PageSizeCached))
return PageSizeCached;
return getPageSizeSlow();
@@ -133,6 +147,9 @@ u32 getNumberOfCPUs();
const char *getEnv(const char *Name);
u64 getMonotonicTime();
+// Gets the time faster but with less accuracy. Can call getMonotonicTime
+// if no fast version is available.
+u64 getMonotonicTimeFast();
u32 getThreadID();
@@ -147,6 +164,7 @@ bool getRandom(void *Buffer, uptr Length, bool Blocking = false);
#define MAP_NOACCESS (1U << 1)
#define MAP_RESIZABLE (1U << 2)
#define MAP_MEMTAG (1U << 3)
+#define MAP_PRECOMMIT (1U << 4)
// Our platform memory mapping use is restricted to 3 scenarios:
// - reserve memory at a random address (MAP_NOACCESS);
@@ -172,10 +190,6 @@ void setMemoryPermission(uptr Addr, uptr Size, uptr Flags,
void releasePagesToOS(uptr BaseAddress, uptr Offset, uptr Size,
MapPlatformData *Data = nullptr);
-// Internal map & unmap fatal error. This must not call map(). SizeIfOOM shall
-// hold the requested size on an out-of-memory error, 0 otherwise.
-void NORETURN dieOnMapUnmapError(uptr SizeIfOOM = 0);
-
// Logging related functions.
void setAbortMessage(const char *Message);
@@ -197,6 +211,13 @@ enum class Option : u8 {
MaxTSDsCount, // Number of usable TSDs for the shared registry.
};
+enum class ReleaseToOS : u8 {
+ Normal, // Follow the normal rules for releasing pages to the OS
+ Force, // Force release pages to the OS, but avoid cases that take too long.
+ ForceAll, // Force release every page possible regardless of how long it will
+ // take.
+};
+
constexpr unsigned char PatternFillByte = 0xAB;
enum FillContentsMode {
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/condition_variable.h b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/condition_variable.h
new file mode 100644
index 000000000000..4afebdc9d04c
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/condition_variable.h
@@ -0,0 +1,60 @@
+//===-- condition_variable.h ------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_CONDITION_VARIABLE_H_
+#define SCUDO_CONDITION_VARIABLE_H_
+
+#include "condition_variable_base.h"
+
+#include "common.h"
+#include "platform.h"
+
+#include "condition_variable_linux.h"
+
+namespace scudo {
+
+// A default implementation of default condition variable. It doesn't do a real
+// `wait`, instead it spins a short amount of time only.
+class ConditionVariableDummy
+ : public ConditionVariableBase<ConditionVariableDummy> {
+public:
+ void notifyAllImpl(UNUSED HybridMutex &M) REQUIRES(M) {}
+
+ void waitImpl(UNUSED HybridMutex &M) REQUIRES(M) {
+ M.unlock();
+
+ constexpr u32 SpinTimes = 64;
+ volatile u32 V = 0;
+ for (u32 I = 0; I < SpinTimes; ++I) {
+ u32 Tmp = V + 1;
+ V = Tmp;
+ }
+
+ M.lock();
+ }
+};
+
+template <typename Config, typename = const bool>
+struct ConditionVariableState {
+ static constexpr bool enabled() { return false; }
+ // This is only used for compilation purpose so that we won't end up having
+ // many conditional compilations. If you want to use `ConditionVariableDummy`,
+ // define `ConditionVariableT` in your allocator configuration. See
+ // allocator_config.h for more details.
+ using ConditionVariableT = ConditionVariableDummy;
+};
+
+template <typename Config>
+struct ConditionVariableState<Config, decltype(Config::UseConditionVariable)> {
+ static constexpr bool enabled() { return Config::UseConditionVariable; }
+ using ConditionVariableT = typename Config::ConditionVariableT;
+};
+
+} // namespace scudo
+
+#endif // SCUDO_CONDITION_VARIABLE_H_
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/condition_variable_base.h b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/condition_variable_base.h
new file mode 100644
index 000000000000..416c327fed49
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/condition_variable_base.h
@@ -0,0 +1,56 @@
+//===-- condition_variable_base.h -------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_CONDITION_VARIABLE_BASE_H_
+#define SCUDO_CONDITION_VARIABLE_BASE_H_
+
+#include "mutex.h"
+#include "thread_annotations.h"
+
+namespace scudo {
+
+template <typename Derived> class ConditionVariableBase {
+public:
+ constexpr ConditionVariableBase() = default;
+
+ void bindTestOnly(HybridMutex &Mutex) {
+#if SCUDO_DEBUG
+ boundMutex = &Mutex;
+#else
+ (void)Mutex;
+#endif
+ }
+
+ void notifyAll(HybridMutex &M) REQUIRES(M) {
+#if SCUDO_DEBUG
+ CHECK_EQ(&M, boundMutex);
+#endif
+ getDerived()->notifyAllImpl(M);
+ }
+
+ void wait(HybridMutex &M) REQUIRES(M) {
+#if SCUDO_DEBUG
+ CHECK_EQ(&M, boundMutex);
+#endif
+ getDerived()->waitImpl(M);
+ }
+
+protected:
+ Derived *getDerived() { return static_cast<Derived *>(this); }
+
+#if SCUDO_DEBUG
+ // Because thread-safety analysis doesn't support pointer aliasing, we are not
+ // able to mark the proper annotations without false positive. Instead, we
+ // pass the lock and do the same-lock check separately.
+ HybridMutex *boundMutex = nullptr;
+#endif
+};
+
+} // namespace scudo
+
+#endif // SCUDO_CONDITION_VARIABLE_BASE_H_
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/condition_variable_linux.cpp b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/condition_variable_linux.cpp
new file mode 100644
index 000000000000..e6d9bd1771a4
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/condition_variable_linux.cpp
@@ -0,0 +1,52 @@
+//===-- condition_variable_linux.cpp ----------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "platform.h"
+
+#if SCUDO_LINUX
+
+#include "condition_variable_linux.h"
+
+#include "atomic_helpers.h"
+
+#include <limits.h>
+#include <linux/futex.h>
+#include <sys/syscall.h>
+#include <unistd.h>
+
+namespace scudo {
+
+void ConditionVariableLinux::notifyAllImpl(UNUSED HybridMutex &M) {
+ const u32 V = atomic_load_relaxed(&Counter);
+ atomic_store_relaxed(&Counter, V + 1);
+
+ // TODO(chiahungduan): Move the waiters from the futex waiting queue
+ // `Counter` to futex waiting queue `M` so that the awoken threads won't be
+ // blocked again due to locked `M` by current thread.
+ if (LastNotifyAll != V) {
+ syscall(SYS_futex, reinterpret_cast<uptr>(&Counter), FUTEX_WAKE_PRIVATE,
+ INT_MAX, nullptr, nullptr, 0);
+ }
+
+ LastNotifyAll = V + 1;
+}
+
+void ConditionVariableLinux::waitImpl(HybridMutex &M) {
+ const u32 V = atomic_load_relaxed(&Counter) + 1;
+ atomic_store_relaxed(&Counter, V);
+
+ // TODO: Use ScopedUnlock when it's supported.
+ M.unlock();
+ syscall(SYS_futex, reinterpret_cast<uptr>(&Counter), FUTEX_WAIT_PRIVATE, V,
+ nullptr, nullptr, 0);
+ M.lock();
+}
+
+} // namespace scudo
+
+#endif // SCUDO_LINUX
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/condition_variable_linux.h b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/condition_variable_linux.h
new file mode 100644
index 000000000000..cd073287326d
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/condition_variable_linux.h
@@ -0,0 +1,38 @@
+//===-- condition_variable_linux.h ------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_CONDITION_VARIABLE_LINUX_H_
+#define SCUDO_CONDITION_VARIABLE_LINUX_H_
+
+#include "platform.h"
+
+#if SCUDO_LINUX
+
+#include "atomic_helpers.h"
+#include "condition_variable_base.h"
+#include "thread_annotations.h"
+
+namespace scudo {
+
+class ConditionVariableLinux
+ : public ConditionVariableBase<ConditionVariableLinux> {
+public:
+ void notifyAllImpl(HybridMutex &M) REQUIRES(M);
+
+ void waitImpl(HybridMutex &M) REQUIRES(M);
+
+private:
+ u32 LastNotifyAll = 0;
+ atomic_u32 Counter = {};
+};
+
+} // namespace scudo
+
+#endif // SCUDO_LINUX
+
+#endif // SCUDO_CONDITION_VARIABLE_LINUX_H_
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/crc32_hw.cpp b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/crc32_hw.cpp
index 62841ba51019..73f2ae000c63 100644
--- a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/crc32_hw.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/crc32_hw.cpp
@@ -10,10 +10,11 @@
namespace scudo {
-#if defined(__SSE4_2__) || defined(__ARM_FEATURE_CRC32)
+#if defined(__CRC32__) || defined(__SSE4_2__) || defined(__ARM_FEATURE_CRC32)
u32 computeHardwareCRC32(u32 Crc, uptr Data) {
return static_cast<u32>(CRC32_INTRINSIC(Crc, Data));
}
-#endif // defined(__SSE4_2__) || defined(__ARM_FEATURE_CRC32)
+#endif // defined(__CRC32__) || defined(__SSE4_2__) ||
+ // defined(__ARM_FEATURE_CRC32)
} // namespace scudo
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/flags.cpp b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/flags.cpp
index de5153b288b1..f498edfbd326 100644
--- a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/flags.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/flags.cpp
@@ -68,6 +68,9 @@ void initFlags() {
Parser.parseString(getCompileDefinitionScudoDefaultOptions());
Parser.parseString(getScudoDefaultOptions());
Parser.parseString(getEnv("SCUDO_OPTIONS"));
+ if (const char *V = getEnv("SCUDO_ALLOCATION_RING_BUFFER_SIZE")) {
+ Parser.parseStringPair("allocation_ring_buffer_size", V);
+ }
}
} // namespace scudo
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/flags.inc b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/flags.inc
index 690d889b8cee..f5a2bab5057a 100644
--- a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/flags.inc
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/flags.inc
@@ -45,3 +45,7 @@ SCUDO_FLAG(bool, may_return_null, true,
SCUDO_FLAG(int, release_to_os_interval_ms, SCUDO_ANDROID ? INT32_MIN : 5000,
"Interval (in milliseconds) at which to attempt release of unused "
"memory to the OS. Negative values disable the feature.")
+
+SCUDO_FLAG(int, allocation_ring_buffer_size, 32768,
+ "Entries to keep in the allocation ring buffer for scudo. "
+ "Values less or equal to zero disable the buffer.")
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/flags_parser.cpp b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/flags_parser.cpp
index be39fcd4f887..3d8c6f3789b4 100644
--- a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/flags_parser.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/flags_parser.cpp
@@ -10,6 +10,8 @@
#include "common.h"
#include "report.h"
+#include <errno.h>
+#include <limits.h>
#include <stdlib.h>
#include <string.h>
@@ -80,7 +82,7 @@ void FlagParser::parseFlag() {
++Pos;
Value = Buffer + ValueStart;
}
- if (!runHandler(Name, Value))
+ if (!runHandler(Name, Value, '='))
reportError("flag parsing failed.");
}
@@ -122,10 +124,16 @@ inline bool parseBool(const char *Value, bool *b) {
return false;
}
-bool FlagParser::runHandler(const char *Name, const char *Value) {
+void FlagParser::parseStringPair(const char *Name, const char *Value) {
+ if (!runHandler(Name, Value, '\0'))
+ reportError("flag parsing failed.");
+}
+
+bool FlagParser::runHandler(const char *Name, const char *Value,
+ const char Sep) {
for (u32 I = 0; I < NumberOfFlags; ++I) {
const uptr Len = strlen(Flags[I].Name);
- if (strncmp(Name, Flags[I].Name, Len) != 0 || Name[Len] != '=')
+ if (strncmp(Name, Flags[I].Name, Len) != 0 || Name[Len] != Sep)
continue;
bool Ok = false;
switch (Flags[I].Type) {
@@ -136,12 +144,18 @@ bool FlagParser::runHandler(const char *Name, const char *Value) {
break;
case FlagType::FT_int:
char *ValueEnd;
- *reinterpret_cast<int *>(Flags[I].Var) =
- static_cast<int>(strtol(Value, &ValueEnd, 10));
- Ok =
- *ValueEnd == '"' || *ValueEnd == '\'' || isSeparatorOrNull(*ValueEnd);
- if (!Ok)
+ errno = 0;
+ long V = strtol(Value, &ValueEnd, 10);
+ if (errno != 0 || // strtol failed (over or underflow)
+ V > INT_MAX || V < INT_MIN || // overflows integer
+ // contains unexpected characters
+ (*ValueEnd != '"' && *ValueEnd != '\'' &&
+ !isSeparatorOrNull(*ValueEnd))) {
reportInvalidFlag("int", Value);
+ break;
+ }
+ *reinterpret_cast<int *>(Flags[I].Var) = static_cast<int>(V);
+ Ok = true;
break;
}
return Ok;
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/flags_parser.h b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/flags_parser.h
index ba832adbd909..ded496fda3b9 100644
--- a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/flags_parser.h
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/flags_parser.h
@@ -27,6 +27,7 @@ public:
void *Var);
void parseString(const char *S);
void printFlagDescriptions();
+ void parseStringPair(const char *Name, const char *Value);
private:
static const u32 MaxFlags = 20;
@@ -45,7 +46,7 @@ private:
void skipWhitespace();
void parseFlags();
void parseFlag();
- bool runHandler(const char *Name, const char *Value);
+ bool runHandler(const char *Name, const char *Value, char Sep);
};
void reportUnrecognizedFlags();
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/fuchsia.cpp b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/fuchsia.cpp
index 3b473bc9e22d..0788c4198e53 100644
--- a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/fuchsia.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/fuchsia.cpp
@@ -17,7 +17,9 @@
#include <lib/sync/mutex.h> // for sync_mutex_t
#include <stdlib.h> // for getenv()
#include <zircon/compiler.h>
+#include <zircon/process.h>
#include <zircon/sanitizer.h>
+#include <zircon/status.h>
#include <zircon/syscalls.h>
namespace scudo {
@@ -30,6 +32,16 @@ void NORETURN die() { __builtin_trap(); }
// with ZX_HANDLE_INVALID.
static_assert(ZX_HANDLE_INVALID == 0, "");
+static void NORETURN dieOnError(zx_status_t Status, const char *FnName,
+ uptr Size) {
+ char Error[128];
+ formatString(Error, sizeof(Error),
+ "SCUDO ERROR: %s failed with size %zuKB (%s)", FnName,
+ Size >> 10, zx_status_get_string(Status));
+ outputRaw(Error);
+ die();
+}
+
static void *allocateVmar(uptr Size, MapPlatformData *Data, bool AllowNoMem) {
// Only scenario so far.
DCHECK(Data);
@@ -41,7 +53,7 @@ static void *allocateVmar(uptr Size, MapPlatformData *Data, bool AllowNoMem) {
Size, &Data->Vmar, &Data->VmarBase);
if (UNLIKELY(Status != ZX_OK)) {
if (Status != ZX_ERR_NO_MEMORY || !AllowNoMem)
- dieOnMapUnmapError(Status == ZX_ERR_NO_MEMORY ? Size : 0);
+ dieOnError(Status, "zx_vmar_allocate", Size);
return nullptr;
}
return reinterpret_cast<void *>(Data->VmarBase);
@@ -56,8 +68,9 @@ void *map(void *Addr, uptr Size, const char *Name, uptr Flags,
if (Flags & MAP_NOACCESS)
return allocateVmar(Size, Data, AllowNoMem);
- const zx_handle_t Vmar = Data ? Data->Vmar : _zx_vmar_root_self();
- CHECK_NE(Vmar, ZX_HANDLE_INVALID);
+ const zx_handle_t Vmar = (Data && Data->Vmar != ZX_HANDLE_INVALID)
+ ? Data->Vmar
+ : _zx_vmar_root_self();
zx_status_t Status;
zx_handle_t Vmo;
@@ -71,7 +84,7 @@ void *map(void *Addr, uptr Size, const char *Name, uptr Flags,
Status = _zx_vmo_set_size(Vmo, VmoSize + Size);
if (Status != ZX_OK) {
if (Status != ZX_ERR_NO_MEMORY || !AllowNoMem)
- dieOnMapUnmapError(Status == ZX_ERR_NO_MEMORY ? Size : 0);
+ dieOnError(Status, "zx_vmo_set_size", VmoSize + Size);
return nullptr;
}
} else {
@@ -79,7 +92,7 @@ void *map(void *Addr, uptr Size, const char *Name, uptr Flags,
Status = _zx_vmo_create(Size, ZX_VMO_RESIZABLE, &Vmo);
if (UNLIKELY(Status != ZX_OK)) {
if (Status != ZX_ERR_NO_MEMORY || !AllowNoMem)
- dieOnMapUnmapError(Status == ZX_ERR_NO_MEMORY ? Size : 0);
+ dieOnError(Status, "zx_vmo_create", Size);
return nullptr;
}
_zx_object_set_property(Vmo, ZX_PROP_NAME, Name, strlen(Name));
@@ -88,11 +101,24 @@ void *map(void *Addr, uptr Size, const char *Name, uptr Flags,
uintptr_t P;
zx_vm_option_t MapFlags =
ZX_VM_PERM_READ | ZX_VM_PERM_WRITE | ZX_VM_ALLOW_FAULTS;
+ if (Addr)
+ DCHECK(Data);
const uint64_t Offset =
Addr ? reinterpret_cast<uintptr_t>(Addr) - Data->VmarBase : 0;
if (Offset)
MapFlags |= ZX_VM_SPECIFIC;
Status = _zx_vmar_map(Vmar, MapFlags, Offset, Vmo, VmoSize, Size, &P);
+ if (UNLIKELY(Status != ZX_OK)) {
+ if (Status != ZX_ERR_NO_MEMORY || !AllowNoMem)
+ dieOnError(Status, "zx_vmar_map", Size);
+ return nullptr;
+ }
+
+ if (Flags & MAP_PRECOMMIT) {
+ Status = _zx_vmar_op_range(Vmar, ZX_VMAR_OP_COMMIT, P, Size,
+ /*buffer=*/nullptr, /*buffer_size=*/0);
+ }
+
// No need to track the Vmo if we don't intend on resizing it. Close it.
if (Flags & MAP_RESIZABLE) {
DCHECK(Data);
@@ -105,9 +131,10 @@ void *map(void *Addr, uptr Size, const char *Name, uptr Flags,
}
if (UNLIKELY(Status != ZX_OK)) {
if (Status != ZX_ERR_NO_MEMORY || !AllowNoMem)
- dieOnMapUnmapError(Status == ZX_ERR_NO_MEMORY ? Size : 0);
+ dieOnError(Status, "zx_vmar_op_range", Size);
return nullptr;
}
+
if (Data)
Data->VmoSize += Size;
@@ -123,11 +150,13 @@ void unmap(void *Addr, uptr Size, uptr Flags, MapPlatformData *Data) {
CHECK_EQ(_zx_vmar_destroy(Vmar), ZX_OK);
CHECK_EQ(_zx_handle_close(Vmar), ZX_OK);
} else {
- const zx_handle_t Vmar = Data ? Data->Vmar : _zx_vmar_root_self();
+ const zx_handle_t Vmar = (Data && Data->Vmar != ZX_HANDLE_INVALID)
+ ? Data->Vmar
+ : _zx_vmar_root_self();
const zx_status_t Status =
_zx_vmar_unmap(Vmar, reinterpret_cast<uintptr_t>(Addr), Size);
if (UNLIKELY(Status != ZX_OK))
- dieOnMapUnmapError();
+ dieOnError(Status, "zx_vmar_unmap", Size);
}
if (Data) {
if (Data->Vmo != ZX_HANDLE_INVALID)
@@ -142,12 +171,15 @@ void setMemoryPermission(UNUSED uptr Addr, UNUSED uptr Size, UNUSED uptr Flags,
(Flags & MAP_NOACCESS) ? 0 : (ZX_VM_PERM_READ | ZX_VM_PERM_WRITE);
DCHECK(Data);
DCHECK_NE(Data->Vmar, ZX_HANDLE_INVALID);
- if (_zx_vmar_protect(Data->Vmar, Prot, Addr, Size) != ZX_OK)
- dieOnMapUnmapError();
+ const zx_status_t Status = _zx_vmar_protect(Data->Vmar, Prot, Addr, Size);
+ if (Status != ZX_OK)
+ dieOnError(Status, "zx_vmar_protect", Size);
}
void releasePagesToOS(UNUSED uptr BaseAddress, uptr Offset, uptr Size,
MapPlatformData *Data) {
+ // TODO: DCHECK the BaseAddress is consistent with the data in
+ // MapPlatformData.
DCHECK(Data);
DCHECK_NE(Data->Vmar, ZX_HANDLE_INVALID);
DCHECK_NE(Data->Vmo, ZX_HANDLE_INVALID);
@@ -177,7 +209,10 @@ void HybridMutex::unlock() __TA_NO_THREAD_SAFETY_ANALYSIS {
sync_mutex_unlock(&M);
}
+void HybridMutex::assertHeldImpl() __TA_NO_THREAD_SAFETY_ANALYSIS {}
+
u64 getMonotonicTime() { return _zx_clock_get_monotonic(); }
+u64 getMonotonicTimeFast() { return _zx_clock_get_monotonic(); }
u32 getNumberOfCPUs() { return _zx_system_get_num_cpus(); }
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/fuchsia.h b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/fuchsia.h
index d6993f892140..c1dfd7638ec5 100644
--- a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/fuchsia.h
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/fuchsia.h
@@ -13,7 +13,8 @@
#if SCUDO_FUCHSIA
-#include <zircon/process.h>
+#include <stdint.h>
+#include <zircon/types.h>
namespace scudo {
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/fuzz/get_error_info_fuzzer.cpp b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/fuzz/get_error_info_fuzzer.cpp
index 078e44b0dfc8..5b01ebe11c09 100644
--- a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/fuzz/get_error_info_fuzzer.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/fuzz/get_error_info_fuzzer.cpp
@@ -46,15 +46,11 @@ extern "C" int LLVMFuzzerTestOneInput(uint8_t *Data, size_t Size) {
}
std::string RingBufferBytes = FDP.ConsumeRemainingBytesAsString();
- std::vector<char> RingBuffer(AllocatorT::getRingBufferSize(), 0);
- for (size_t i = 0; i < RingBufferBytes.length() && i < RingBuffer.size();
- ++i) {
- RingBuffer[i] = RingBufferBytes[i];
- }
scudo_error_info ErrorInfo;
AllocatorT::getErrorInfo(&ErrorInfo, FaultAddr, StackDepot.data(),
- RegionInfo.data(), RingBuffer.data(), Memory,
- MemoryTags, MemoryAddr, MemorySize);
+ RegionInfo.data(), RingBufferBytes.data(),
+ RingBufferBytes.size(), Memory, MemoryTags,
+ MemoryAddr, MemorySize);
return 0;
}
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/include/scudo/interface.h b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/include/scudo/interface.h
index 9b9a84623c51..a2dedea910cc 100644
--- a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/include/scudo/interface.h
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/include/scudo/interface.h
@@ -14,13 +14,25 @@
extern "C" {
-__attribute__((weak)) const char *__scudo_default_options();
+__attribute__((weak)) const char *__scudo_default_options(void);
// Post-allocation & pre-deallocation hooks.
-// They must be thread-safe and not use heap related functions.
__attribute__((weak)) void __scudo_allocate_hook(void *ptr, size_t size);
__attribute__((weak)) void __scudo_deallocate_hook(void *ptr);
+// `realloc` involves both deallocation and allocation but they are not reported
+// atomically. In one specific case which may keep taking a snapshot right in
+// the middle of `realloc` reporting the deallocation and allocation, it may
+// confuse the user by missing memory from `realloc`. To alleviate that case,
+// define the two `realloc` hooks to get the knowledge of the bundled hook
+// calls. These hooks are optional and should only be used when a hooks user
+// wants to track reallocs more closely.
+//
+// See more details in the comment of `realloc` in wrapper_c.inc.
+__attribute__((weak)) void
+__scudo_realloc_allocate_hook(void *old_ptr, void *new_ptr, size_t size);
+__attribute__((weak)) void __scudo_realloc_deallocate_hook(void *old_ptr);
+
void __scudo_print_stats(void);
typedef void (*iterate_callback)(uintptr_t base, size_t size, void *arg);
@@ -73,7 +85,8 @@ typedef void (*iterate_callback)(uintptr_t base, size_t size, void *arg);
// pointer.
void __scudo_get_error_info(struct scudo_error_info *error_info,
uintptr_t fault_addr, const char *stack_depot,
- const char *region_info, const char *ring_buffer,
+ size_t stack_depot_size, const char *region_info,
+ const char *ring_buffer, size_t ring_buffer_size,
const char *memory, const char *memory_tags,
uintptr_t memory_addr, size_t memory_size);
@@ -101,14 +114,14 @@ struct scudo_error_info {
struct scudo_error_report reports[3];
};
-const char *__scudo_get_stack_depot_addr();
-size_t __scudo_get_stack_depot_size();
+const char *__scudo_get_stack_depot_addr(void);
+size_t __scudo_get_stack_depot_size(void);
-const char *__scudo_get_region_info_addr();
-size_t __scudo_get_region_info_size();
+const char *__scudo_get_region_info_addr(void);
+size_t __scudo_get_region_info_size(void);
-const char *__scudo_get_ring_buffer_addr();
-size_t __scudo_get_ring_buffer_size();
+const char *__scudo_get_ring_buffer_addr(void);
+size_t __scudo_get_ring_buffer_size(void);
#ifndef M_DECAY_TIME
#define M_DECAY_TIME -100
@@ -118,6 +131,10 @@ size_t __scudo_get_ring_buffer_size();
#define M_PURGE -101
#endif
+#ifndef M_PURGE_ALL
+#define M_PURGE_ALL -104
+#endif
+
// Tune the allocator's choice of memory tags to make it more likely that
// a certain class of memory errors will be detected. The value argument should
// be one of the M_MEMTAG_TUNING_* constants below.
@@ -155,6 +172,11 @@ size_t __scudo_get_ring_buffer_size();
#define M_MEMTAG_TUNING_UAF 1
#endif
+// Print internal stats to the log.
+#ifndef M_LOG_STATS
+#define M_LOG_STATS -205
+#endif
+
} // extern "C"
#endif // SCUDO_INTERFACE_H_
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/internal_defs.h b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/internal_defs.h
index c9ffad136b78..27c6b451ffe7 100644
--- a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/internal_defs.h
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/internal_defs.h
@@ -78,16 +78,16 @@
namespace scudo {
-typedef unsigned long uptr;
-typedef unsigned char u8;
-typedef unsigned short u16;
-typedef unsigned int u32;
-typedef unsigned long long u64;
-typedef signed long sptr;
-typedef signed char s8;
-typedef signed short s16;
-typedef signed int s32;
-typedef signed long long s64;
+typedef uintptr_t uptr;
+typedef uint8_t u8;
+typedef uint16_t u16;
+typedef uint32_t u32;
+typedef uint64_t u64;
+typedef intptr_t sptr;
+typedef int8_t s8;
+typedef int16_t s16;
+typedef int32_t s32;
+typedef int64_t s64;
// The following two functions have platform specific implementations.
void outputRaw(const char *Buffer);
@@ -133,25 +133,25 @@ void NORETURN reportCheckFailed(const char *File, int Line,
#else
#define DCHECK(A) \
do { \
- } while (false)
+ } while (false && (A))
#define DCHECK_EQ(A, B) \
do { \
- } while (false)
+ } while (false && (A) == (B))
#define DCHECK_NE(A, B) \
do { \
- } while (false)
+ } while (false && (A) != (B))
#define DCHECK_LT(A, B) \
do { \
- } while (false)
+ } while (false && (A) < (B))
#define DCHECK_LE(A, B) \
do { \
- } while (false)
+ } while (false && (A) <= (B))
#define DCHECK_GT(A, B) \
do { \
- } while (false)
+ } while (false && (A) > (B))
#define DCHECK_GE(A, B) \
do { \
- } while (false)
+ } while (false && (A) >= (B))
#endif
// The superfluous die() call effectively makes this macro NORETURN.
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/linux.cpp b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/linux.cpp
index c77c1bb600d9..274695108109 100644
--- a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/linux.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/linux.cpp
@@ -11,14 +11,17 @@
#if SCUDO_LINUX
#include "common.h"
+#include "internal_defs.h"
#include "linux.h"
#include "mutex.h"
+#include "report_linux.h"
#include "string_utils.h"
#include <errno.h>
#include <fcntl.h>
#include <linux/futex.h>
#include <sched.h>
+#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/mman.h>
@@ -41,6 +44,7 @@ uptr getPageSize() { return static_cast<uptr>(sysconf(_SC_PAGESIZE)); }
void NORETURN die() { abort(); }
+// TODO: Will be deprecated. Use the interfaces in MemMapLinux instead.
void *map(void *Addr, uptr Size, UNUSED const char *Name, uptr Flags,
UNUSED MapPlatformData *Data) {
int MmapFlags = MAP_PRIVATE | MAP_ANONYMOUS;
@@ -63,7 +67,7 @@ void *map(void *Addr, uptr Size, UNUSED const char *Name, uptr Flags,
void *P = mmap(Addr, Size, MmapProt, MmapFlags, -1, 0);
if (P == MAP_FAILED) {
if (!(Flags & MAP_ALLOWNOMEM) || errno != ENOMEM)
- dieOnMapUnmapError(errno == ENOMEM ? Size : 0);
+ reportMapError(errno == ENOMEM ? Size : 0);
return nullptr;
}
#if SCUDO_ANDROID
@@ -73,19 +77,22 @@ void *map(void *Addr, uptr Size, UNUSED const char *Name, uptr Flags,
return P;
}
+// TODO: Will be deprecated. Use the interfaces in MemMapLinux instead.
void unmap(void *Addr, uptr Size, UNUSED uptr Flags,
UNUSED MapPlatformData *Data) {
if (munmap(Addr, Size) != 0)
- dieOnMapUnmapError();
+ reportUnmapError(reinterpret_cast<uptr>(Addr), Size);
}
+// TODO: Will be deprecated. Use the interfaces in MemMapLinux instead.
void setMemoryPermission(uptr Addr, uptr Size, uptr Flags,
UNUSED MapPlatformData *Data) {
int Prot = (Flags & MAP_NOACCESS) ? PROT_NONE : (PROT_READ | PROT_WRITE);
if (mprotect(reinterpret_cast<void *>(Addr), Size, Prot) != 0)
- dieOnMapUnmapError();
+ reportProtectError(Addr, Size, Prot);
}
+// TODO: Will be deprecated. Use the interfaces in MemMapLinux instead.
void releasePagesToOS(uptr BaseAddress, uptr Offset, uptr Size,
UNUSED MapPlatformData *Data) {
void *Addr = reinterpret_cast<void *>(BaseAddress + Offset);
@@ -102,12 +109,14 @@ enum State : u32 { Unlocked = 0, Locked = 1, Sleeping = 2 };
}
bool HybridMutex::tryLock() {
- return atomic_compare_exchange(&M, Unlocked, Locked) == Unlocked;
+ return atomic_compare_exchange_strong(&M, Unlocked, Locked,
+ memory_order_acquire) == Unlocked;
}
// The following is based on https://akkadia.org/drepper/futex.pdf.
void HybridMutex::lockSlow() {
- u32 V = atomic_compare_exchange(&M, Unlocked, Locked);
+ u32 V = atomic_compare_exchange_strong(&M, Unlocked, Locked,
+ memory_order_acquire);
if (V == Unlocked)
return;
if (V != Sleeping)
@@ -127,6 +136,10 @@ void HybridMutex::unlock() {
}
}
+void HybridMutex::assertHeldImpl() {
+ CHECK(atomic_load(&M, memory_order_acquire) != Unlocked);
+}
+
u64 getMonotonicTime() {
timespec TS;
clock_gettime(CLOCK_MONOTONIC, &TS);
@@ -134,6 +147,17 @@ u64 getMonotonicTime() {
static_cast<u64>(TS.tv_nsec);
}
+u64 getMonotonicTimeFast() {
+#if defined(CLOCK_MONOTONIC_COARSE)
+ timespec TS;
+ clock_gettime(CLOCK_MONOTONIC_COARSE, &TS);
+ return static_cast<u64>(TS.tv_sec) * (1000ULL * 1000 * 1000) +
+ static_cast<u64>(TS.tv_nsec);
+#else
+ return getMonotonicTime();
+#endif
+}
+
u32 getNumberOfCPUs() {
cpu_set_t CPUs;
// sched_getaffinity can fail for a variety of legitimate reasons (lack of
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/list.h b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/list.h
index 1ac93c2f65d7..0137667d1dcf 100644
--- a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/list.h
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/list.h
@@ -110,6 +110,18 @@ template <class T> struct SinglyLinkedList : public IntrusiveList<T> {
Size--;
}
+ // Insert X next to Prev
+ void insert(T *Prev, T *X) {
+ DCHECK(!empty());
+ DCHECK_NE(Prev, nullptr);
+ DCHECK_NE(X, nullptr);
+ X->Next = Prev->Next;
+ Prev->Next = X;
+ if (Last == Prev)
+ Last = X;
+ ++Size;
+ }
+
void extract(T *Prev, T *X) {
DCHECK(!empty());
DCHECK_NE(Prev, nullptr);
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/local_cache.h b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/local_cache.h
index f46645f9badf..46d6affdc033 100644
--- a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/local_cache.h
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/local_cache.h
@@ -10,8 +10,11 @@
#define SCUDO_LOCAL_CACHE_H_
#include "internal_defs.h"
+#include "list.h"
+#include "platform.h"
#include "report.h"
#include "stats.h"
+#include "string_utils.h"
namespace scudo {
@@ -19,42 +22,13 @@ template <class SizeClassAllocator> struct SizeClassAllocatorLocalCache {
typedef typename SizeClassAllocator::SizeClassMap SizeClassMap;
typedef typename SizeClassAllocator::CompactPtrT CompactPtrT;
- struct TransferBatch {
- static const u32 MaxNumCached = SizeClassMap::MaxNumCachedHint;
- void setFromArray(CompactPtrT *Array, u32 N) {
- DCHECK_LE(N, MaxNumCached);
- Count = N;
- memcpy(Batch, Array, sizeof(Batch[0]) * Count);
- }
- void clear() { Count = 0; }
- void add(CompactPtrT P) {
- DCHECK_LT(Count, MaxNumCached);
- Batch[Count++] = P;
- }
- void copyToArray(CompactPtrT *Array) const {
- memcpy(Array, Batch, sizeof(Batch[0]) * Count);
- }
- u32 getCount() const { return Count; }
- CompactPtrT get(u32 I) const {
- DCHECK_LE(I, Count);
- return Batch[I];
- }
- static u32 getMaxCached(uptr Size) {
- return Min(MaxNumCached, SizeClassMap::getMaxCachedHint(Size));
- }
- TransferBatch *Next;
-
- private:
- u32 Count;
- CompactPtrT Batch[MaxNumCached];
- };
-
void init(GlobalStats *S, SizeClassAllocator *A) {
DCHECK(isEmpty());
Stats.init();
if (LIKELY(S))
S->link(&Stats);
Allocator = A;
+ initCache();
}
void destroy(GlobalStats *S) {
@@ -67,7 +41,9 @@ template <class SizeClassAllocator> struct SizeClassAllocatorLocalCache {
DCHECK_LT(ClassId, NumClasses);
PerClass *C = &PerClassArray[ClassId];
if (C->Count == 0) {
- if (UNLIKELY(!refill(C, ClassId)))
+ // Refill half of the number of max cached.
+ DCHECK_GT(C->MaxCount / 2, 0U);
+ if (UNLIKELY(!refill(C, ClassId, C->MaxCount / 2)))
return nullptr;
DCHECK_GT(C->Count, 0);
}
@@ -81,13 +57,13 @@ template <class SizeClassAllocator> struct SizeClassAllocatorLocalCache {
return Allocator->decompactPtr(ClassId, CompactP);
}
- void deallocate(uptr ClassId, void *P) {
+ bool deallocate(uptr ClassId, void *P) {
CHECK_LT(ClassId, NumClasses);
PerClass *C = &PerClassArray[ClassId];
- // We still have to initialize the cache in the event that the first heap
- // operation in a thread is a deallocation.
- initCacheMaybe(C);
- if (C->Count == C->MaxCount)
+
+ // If the cache is full, drain half of blocks back to the main allocator.
+ const bool NeedToDrainCache = C->Count == C->MaxCount;
+ if (NeedToDrainCache)
drain(C, ClassId);
// See comment in allocate() about memory accesses.
const uptr ClassSize = C->ClassSize;
@@ -95,6 +71,8 @@ template <class SizeClassAllocator> struct SizeClassAllocatorLocalCache {
Allocator->compactPtr(ClassId, reinterpret_cast<uptr>(P));
Stats.sub(StatAllocated, ClassSize);
Stats.add(StatFree, ClassSize);
+
+ return NeedToDrainCache;
}
bool isEmpty() const {
@@ -105,7 +83,7 @@ template <class SizeClassAllocator> struct SizeClassAllocatorLocalCache {
}
void drain() {
- // Drain BatchClassId last as createBatch can refill it.
+ // Drain BatchClassId last as it may be needed while draining normal blocks.
for (uptr I = 0; I < NumClasses; ++I) {
if (I == BatchClassId)
continue;
@@ -117,40 +95,62 @@ template <class SizeClassAllocator> struct SizeClassAllocatorLocalCache {
DCHECK(isEmpty());
}
- TransferBatch *createBatch(uptr ClassId, void *B) {
- if (ClassId != BatchClassId)
- B = allocate(BatchClassId);
- return reinterpret_cast<TransferBatch *>(B);
+ void *getBatchClassBlock() {
+ void *B = allocate(BatchClassId);
+ if (UNLIKELY(!B))
+ reportOutOfMemory(SizeClassAllocator::getSizeByClassId(BatchClassId));
+ return B;
}
LocalStats &getStats() { return Stats; }
+ void getStats(ScopedString *Str) {
+ bool EmptyCache = true;
+ for (uptr I = 0; I < NumClasses; ++I) {
+ if (PerClassArray[I].Count == 0)
+ continue;
+
+ EmptyCache = false;
+ // The size of BatchClass is set to 0 intentionally. See the comment in
+ // initCache() for more details.
+ const uptr ClassSize = I == BatchClassId
+ ? SizeClassAllocator::getSizeByClassId(I)
+ : PerClassArray[I].ClassSize;
+ // Note that the string utils don't support printing u16 thus we cast it
+ // to a common use type uptr.
+ Str->append(" %02zu (%6zu): cached: %4zu max: %4zu\n", I, ClassSize,
+ static_cast<uptr>(PerClassArray[I].Count),
+ static_cast<uptr>(PerClassArray[I].MaxCount));
+ }
+
+ if (EmptyCache)
+ Str->append(" No block is cached.\n");
+ }
+
+ static u16 getMaxCached(uptr Size) {
+ return Min(SizeClassMap::MaxNumCachedHint,
+ SizeClassMap::getMaxCachedHint(Size));
+ }
+
private:
static const uptr NumClasses = SizeClassMap::NumClasses;
static const uptr BatchClassId = SizeClassMap::BatchClassId;
- struct PerClass {
- u32 Count;
- u32 MaxCount;
+ struct alignas(SCUDO_CACHE_LINE_SIZE) PerClass {
+ u16 Count;
+ u16 MaxCount;
// Note: ClassSize is zero for the transfer batch.
uptr ClassSize;
- CompactPtrT Chunks[2 * TransferBatch::MaxNumCached];
+ CompactPtrT Chunks[2 * SizeClassMap::MaxNumCachedHint];
};
PerClass PerClassArray[NumClasses] = {};
LocalStats Stats;
SizeClassAllocator *Allocator = nullptr;
- ALWAYS_INLINE void initCacheMaybe(PerClass *C) {
- if (LIKELY(C->MaxCount))
- return;
- initCache();
- DCHECK_NE(C->MaxCount, 0U);
- }
-
NOINLINE void initCache() {
for (uptr I = 0; I < NumClasses; I++) {
PerClass *P = &PerClassArray[I];
const uptr Size = SizeClassAllocator::getSizeByClassId(I);
- P->MaxCount = 2 * TransferBatch::getMaxCached(Size);
+ P->MaxCount = static_cast<u16>(2 * getMaxCached(Size));
if (I != BatchClassId) {
P->ClassSize = Size;
} else {
@@ -166,30 +166,21 @@ private:
deallocate(BatchClassId, B);
}
- NOINLINE bool refill(PerClass *C, uptr ClassId) {
- initCacheMaybe(C);
- TransferBatch *B = Allocator->popBatch(this, ClassId);
- if (UNLIKELY(!B))
- return false;
- DCHECK_GT(B->getCount(), 0);
- C->Count = B->getCount();
- B->copyToArray(C->Chunks);
- B->clear();
- destroyBatch(ClassId, B);
- return true;
+ NOINLINE bool refill(PerClass *C, uptr ClassId, u16 MaxRefill) {
+ const u16 NumBlocksRefilled =
+ Allocator->popBlocks(this, ClassId, C->Chunks, MaxRefill);
+ DCHECK_LE(NumBlocksRefilled, MaxRefill);
+ C->Count = static_cast<u16>(C->Count + NumBlocksRefilled);
+ return NumBlocksRefilled != 0;
}
NOINLINE void drain(PerClass *C, uptr ClassId) {
- const u32 Count = Min(C->MaxCount / 2, C->Count);
- TransferBatch *B =
- createBatch(ClassId, Allocator->decompactPtr(ClassId, C->Chunks[0]));
- if (UNLIKELY(!B))
- reportOutOfMemory(SizeClassAllocator::getSizeByClassId(BatchClassId));
- B->setFromArray(&C->Chunks[0], Count);
- C->Count -= Count;
- for (uptr I = 0; I < C->Count; I++)
+ const u16 Count = Min(static_cast<u16>(C->MaxCount / 2), C->Count);
+ Allocator->pushBlocks(this, ClassId, &C->Chunks[0], Count);
+ // u16 will be promoted to int by arithmetic type conversion.
+ C->Count = static_cast<u16>(C->Count - Count);
+ for (u16 I = 0; I < C->Count; I++)
C->Chunks[I] = C->Chunks[I + Count];
- Allocator->pushBatch(ClassId, B);
}
};
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/mem_map.cpp b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/mem_map.cpp
new file mode 100644
index 000000000000..115cc34e7060
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/mem_map.cpp
@@ -0,0 +1,84 @@
+//===-- mem_map.cpp ---------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "mem_map.h"
+
+#include "common.h"
+
+namespace scudo {
+
+bool MemMapDefault::mapImpl(uptr Addr, uptr Size, const char *Name,
+ uptr Flags) {
+ void *MappedAddr =
+ ::scudo::map(reinterpret_cast<void *>(Addr), Size, Name, Flags, &Data);
+ if (MappedAddr == nullptr)
+ return false;
+ Base = reinterpret_cast<uptr>(MappedAddr);
+ MappedBase = Base;
+ Capacity = Size;
+ return true;
+}
+
+void MemMapDefault::unmapImpl(uptr Addr, uptr Size) {
+ if (Size == Capacity) {
+ Base = MappedBase = Capacity = 0;
+ } else {
+ if (Base == Addr) {
+ Base = Addr + Size;
+ MappedBase = MappedBase == 0 ? Base : Max(MappedBase, Base);
+ }
+ Capacity -= Size;
+ }
+
+ ::scudo::unmap(reinterpret_cast<void *>(Addr), Size, UNMAP_ALL, &Data);
+}
+
+bool MemMapDefault::remapImpl(uptr Addr, uptr Size, const char *Name,
+ uptr Flags) {
+ void *RemappedPtr =
+ ::scudo::map(reinterpret_cast<void *>(Addr), Size, Name, Flags, &Data);
+ const uptr RemappedAddr = reinterpret_cast<uptr>(RemappedPtr);
+ MappedBase = MappedBase == 0 ? RemappedAddr : Min(MappedBase, RemappedAddr);
+ return RemappedAddr == Addr;
+}
+
+void MemMapDefault::releaseAndZeroPagesToOSImpl(uptr From, uptr Size) {
+ DCHECK_NE(MappedBase, 0U);
+ DCHECK_GE(From, MappedBase);
+ return ::scudo::releasePagesToOS(MappedBase, From - MappedBase, Size, &Data);
+}
+
+void MemMapDefault::setMemoryPermissionImpl(uptr Addr, uptr Size, uptr Flags) {
+ return ::scudo::setMemoryPermission(Addr, Size, Flags);
+}
+
+void ReservedMemoryDefault::releaseImpl() {
+ ::scudo::unmap(reinterpret_cast<void *>(Base), Capacity, UNMAP_ALL, &Data);
+}
+
+bool ReservedMemoryDefault::createImpl(uptr Addr, uptr Size, const char *Name,
+ uptr Flags) {
+ void *Reserved = ::scudo::map(reinterpret_cast<void *>(Addr), Size, Name,
+ Flags | MAP_NOACCESS, &Data);
+ if (Reserved == nullptr)
+ return false;
+
+ Base = reinterpret_cast<uptr>(Reserved);
+ Capacity = Size;
+
+ return true;
+}
+
+ReservedMemoryDefault::MemMapT ReservedMemoryDefault::dispatchImpl(uptr Addr,
+ uptr Size) {
+ ReservedMemoryDefault::MemMapT NewMap(Addr, Size);
+ NewMap.setMapPlatformData(Data);
+ return NewMap;
+}
+
+} // namespace scudo
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/mem_map.h b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/mem_map.h
new file mode 100644
index 000000000000..b92216cf271d
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/mem_map.h
@@ -0,0 +1,92 @@
+//===-- mem_map.h -----------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_MEM_MAP_H_
+#define SCUDO_MEM_MAP_H_
+
+#include "mem_map_base.h"
+
+#include "common.h"
+#include "internal_defs.h"
+
+// TODO: This is only used for `MapPlatformData`. Remove these includes when we
+// have all three platform specific `MemMap` and `ReservedMemory`
+// implementations.
+#include "fuchsia.h"
+#include "linux.h"
+#include "trusty.h"
+
+#include "mem_map_fuchsia.h"
+#include "mem_map_linux.h"
+
+namespace scudo {
+
+// This will be deprecated when every allocator has been supported by each
+// platform's `MemMap` implementation.
+class MemMapDefault final : public MemMapBase<MemMapDefault> {
+public:
+ constexpr MemMapDefault() = default;
+ MemMapDefault(uptr Base, uptr Capacity) : Base(Base), Capacity(Capacity) {}
+
+ // Impls for base functions.
+ bool mapImpl(uptr Addr, uptr Size, const char *Name, uptr Flags);
+ void unmapImpl(uptr Addr, uptr Size);
+ bool remapImpl(uptr Addr, uptr Size, const char *Name, uptr Flags);
+ void setMemoryPermissionImpl(uptr Addr, uptr Size, uptr Flags);
+ void releasePagesToOSImpl(uptr From, uptr Size) {
+ return releaseAndZeroPagesToOSImpl(From, Size);
+ }
+ void releaseAndZeroPagesToOSImpl(uptr From, uptr Size);
+ uptr getBaseImpl() { return Base; }
+ uptr getCapacityImpl() { return Capacity; }
+
+ void setMapPlatformData(MapPlatformData &NewData) { Data = NewData; }
+
+private:
+ uptr Base = 0;
+ uptr Capacity = 0;
+ uptr MappedBase = 0;
+ MapPlatformData Data = {};
+};
+
+// This will be deprecated when every allocator has been supported by each
+// platform's `MemMap` implementation.
+class ReservedMemoryDefault final
+ : public ReservedMemory<ReservedMemoryDefault, MemMapDefault> {
+public:
+ constexpr ReservedMemoryDefault() = default;
+
+ bool createImpl(uptr Addr, uptr Size, const char *Name, uptr Flags);
+ void releaseImpl();
+ MemMapT dispatchImpl(uptr Addr, uptr Size);
+ uptr getBaseImpl() { return Base; }
+ uptr getCapacityImpl() { return Capacity; }
+
+private:
+ uptr Base = 0;
+ uptr Capacity = 0;
+ MapPlatformData Data = {};
+};
+
+#if SCUDO_LINUX
+using ReservedMemoryT = ReservedMemoryLinux;
+using MemMapT = ReservedMemoryT::MemMapT;
+#elif SCUDO_FUCHSIA
+using ReservedMemoryT = ReservedMemoryFuchsia;
+using MemMapT = ReservedMemoryT::MemMapT;
+#elif SCUDO_TRUSTY
+using ReservedMemoryT = ReservedMemoryDefault;
+using MemMapT = ReservedMemoryT::MemMapT;
+#else
+#error \
+ "Unsupported platform, please implement the ReservedMemory for your platform!"
+#endif
+
+} // namespace scudo
+
+#endif // SCUDO_MEM_MAP_H_
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/mem_map_base.h b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/mem_map_base.h
new file mode 100644
index 000000000000..99ab0cba604f
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/mem_map_base.h
@@ -0,0 +1,129 @@
+//===-- mem_map_base.h ------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_MEM_MAP_BASE_H_
+#define SCUDO_MEM_MAP_BASE_H_
+
+#include "common.h"
+
+namespace scudo {
+
+// In Scudo, every memory operation will be fulfilled through a
+// platform-specific `MemMap` instance. The essential APIs are listed in the
+// `MemMapBase` below. This is implemented in CRTP, so for each implementation,
+// it has to implement all of the 'Impl' named functions.
+template <class Derived> class MemMapBase {
+public:
+ constexpr MemMapBase() = default;
+
+ // This is used to map a new set of contiguous pages. Note that the `Addr` is
+ // only a suggestion to the system.
+ bool map(uptr Addr, uptr Size, const char *Name, uptr Flags = 0) {
+ DCHECK(!isAllocated());
+ return invokeImpl(&Derived::mapImpl, Addr, Size, Name, Flags);
+ }
+
+ // This is used to unmap partial/full pages from the beginning or the end.
+ // I.e., the result pages are expected to be still contiguous.
+ void unmap(uptr Addr, uptr Size) {
+ DCHECK(isAllocated());
+ DCHECK((Addr == getBase()) || (Addr + Size == getBase() + getCapacity()));
+ invokeImpl(&Derived::unmapImpl, Addr, Size);
+ }
+
+ // This is used to remap a mapped range (either from map() or dispatched from
+ // ReservedMemory). For example, we have reserved several pages and then we
+ // want to remap them with different accessibility.
+ bool remap(uptr Addr, uptr Size, const char *Name, uptr Flags = 0) {
+ DCHECK(isAllocated());
+ DCHECK((Addr >= getBase()) && (Addr + Size <= getBase() + getCapacity()));
+ return invokeImpl(&Derived::remapImpl, Addr, Size, Name, Flags);
+ }
+
+ // This is used to update the pages' access permission. For example, mark
+ // pages as no read/write permission.
+ void setMemoryPermission(uptr Addr, uptr Size, uptr Flags) {
+ DCHECK(isAllocated());
+ DCHECK((Addr >= getBase()) && (Addr + Size <= getBase() + getCapacity()));
+ return invokeImpl(&Derived::setMemoryPermissionImpl, Addr, Size, Flags);
+ }
+
+ // Suggest releasing a set of contiguous physical pages back to the OS. Note
+ // that only physical pages are supposed to be released. Any release of
+ // virtual pages may lead to undefined behavior.
+ void releasePagesToOS(uptr From, uptr Size) {
+ DCHECK(isAllocated());
+ DCHECK((From >= getBase()) && (From + Size <= getBase() + getCapacity()));
+ invokeImpl(&Derived::releasePagesToOSImpl, From, Size);
+ }
+ // This is similar to the above one except that any subsequent access to the
+ // released pages will return with zero-filled pages.
+ void releaseAndZeroPagesToOS(uptr From, uptr Size) {
+ DCHECK(isAllocated());
+ DCHECK((From >= getBase()) && (From + Size <= getBase() + getCapacity()));
+ invokeImpl(&Derived::releaseAndZeroPagesToOSImpl, From, Size);
+ }
+
+ uptr getBase() { return invokeImpl(&Derived::getBaseImpl); }
+ uptr getCapacity() { return invokeImpl(&Derived::getCapacityImpl); }
+
+ bool isAllocated() { return getBase() != 0U; }
+
+protected:
+ template <typename R, typename... Args>
+ R invokeImpl(R (Derived::*MemFn)(Args...), Args... args) {
+ return (static_cast<Derived *>(this)->*MemFn)(args...);
+ }
+};
+
+// `ReservedMemory` is a special memory handle which can be viewed as a page
+// allocator. `ReservedMemory` will reserve a contiguous pages and the later
+// page request can be fulfilled at the designated address. This is used when
+// we want to ensure the virtual address of the MemMap will be in a known range.
+// This is implemented in CRTP, so for each
+// implementation, it has to implement all of the 'Impl' named functions.
+template <class Derived, typename MemMapTy> class ReservedMemory {
+public:
+ using MemMapT = MemMapTy;
+ constexpr ReservedMemory() = default;
+
+ // Reserve a chunk of memory at a suggested address.
+ bool create(uptr Addr, uptr Size, const char *Name, uptr Flags = 0) {
+ DCHECK(!isCreated());
+ return invokeImpl(&Derived::createImpl, Addr, Size, Name, Flags);
+ }
+
+ // Release the entire reserved memory.
+ void release() {
+ DCHECK(isCreated());
+ invokeImpl(&Derived::releaseImpl);
+ }
+
+ // Dispatch a sub-range of reserved memory. Note that any fragmentation of
+ // the reserved pages is managed by each implementation.
+ MemMapT dispatch(uptr Addr, uptr Size) {
+ DCHECK(isCreated());
+ DCHECK((Addr >= getBase()) && (Addr + Size <= getBase() + getCapacity()));
+ return invokeImpl(&Derived::dispatchImpl, Addr, Size);
+ }
+
+ uptr getBase() { return invokeImpl(&Derived::getBaseImpl); }
+ uptr getCapacity() { return invokeImpl(&Derived::getCapacityImpl); }
+
+ bool isCreated() { return getBase() != 0U; }
+
+protected:
+ template <typename R, typename... Args>
+ R invokeImpl(R (Derived::*MemFn)(Args...), Args... args) {
+ return (static_cast<Derived *>(this)->*MemFn)(args...);
+ }
+};
+
+} // namespace scudo
+
+#endif // SCUDO_MEM_MAP_BASE_H_
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/mem_map_fuchsia.cpp b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/mem_map_fuchsia.cpp
new file mode 100644
index 000000000000..0566ab065526
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/mem_map_fuchsia.cpp
@@ -0,0 +1,252 @@
+//===-- mem_map_fuchsia.cpp -------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "mem_map_fuchsia.h"
+
+#include "atomic_helpers.h"
+#include "common.h"
+#include "string_utils.h"
+
+#if SCUDO_FUCHSIA
+
+#include <zircon/process.h>
+#include <zircon/status.h>
+#include <zircon/syscalls.h>
+
+namespace scudo {
+
+static void NORETURN dieOnError(zx_status_t Status, const char *FnName,
+ uptr Size) {
+ char Error[128];
+ formatString(Error, sizeof(Error),
+ "SCUDO ERROR: %s failed with size %zuKB (%s)", FnName,
+ Size >> 10, _zx_status_get_string(Status));
+ outputRaw(Error);
+ die();
+}
+
+static void setVmoName(zx_handle_t Vmo, const char *Name) {
+ size_t Len = strlen(Name);
+ DCHECK_LT(Len, ZX_MAX_NAME_LEN);
+ zx_status_t Status = _zx_object_set_property(Vmo, ZX_PROP_NAME, Name, Len);
+ CHECK_EQ(Status, ZX_OK);
+}
+
+// Returns the (cached) base address of the root VMAR.
+static uptr getRootVmarBase() {
+ static atomic_uptr CachedResult = {0};
+
+ uptr Result = atomic_load(&CachedResult, memory_order_acquire);
+ if (UNLIKELY(!Result)) {
+ zx_info_vmar_t VmarInfo;
+ zx_status_t Status =
+ _zx_object_get_info(_zx_vmar_root_self(), ZX_INFO_VMAR, &VmarInfo,
+ sizeof(VmarInfo), nullptr, nullptr);
+ CHECK_EQ(Status, ZX_OK);
+ CHECK_NE(VmarInfo.base, 0);
+
+ atomic_store(&CachedResult, VmarInfo.base, memory_order_release);
+ Result = VmarInfo.base;
+ }
+
+ return Result;
+}
+
+// Lazily creates and then always returns the same zero-sized VMO.
+static zx_handle_t getPlaceholderVmo() {
+ static atomic_u32 StoredVmo = {ZX_HANDLE_INVALID};
+
+ zx_handle_t Vmo = atomic_load(&StoredVmo, memory_order_acquire);
+ if (UNLIKELY(Vmo == ZX_HANDLE_INVALID)) {
+ // Create a zero-sized placeholder VMO.
+ zx_status_t Status = _zx_vmo_create(0, 0, &Vmo);
+ if (UNLIKELY(Status != ZX_OK))
+ dieOnError(Status, "zx_vmo_create", 0);
+
+ setVmoName(Vmo, "scudo:reserved");
+
+ // Atomically store its handle. If some other thread wins the race, use its
+ // handle and discard ours.
+ zx_handle_t OldValue = atomic_compare_exchange_strong(
+ &StoredVmo, ZX_HANDLE_INVALID, Vmo, memory_order_acq_rel);
+ if (UNLIKELY(OldValue != ZX_HANDLE_INVALID)) {
+ Status = _zx_handle_close(Vmo);
+ CHECK_EQ(Status, ZX_OK);
+
+ Vmo = OldValue;
+ }
+ }
+
+ return Vmo;
+}
+
+MemMapFuchsia::MemMapFuchsia(uptr Base, uptr Capacity)
+ : MapAddr(Base), WindowBase(Base), WindowSize(Capacity) {
+ // Create the VMO.
+ zx_status_t Status = _zx_vmo_create(Capacity, 0, &Vmo);
+ if (UNLIKELY(Status != ZX_OK))
+ dieOnError(Status, "zx_vmo_create", Capacity);
+}
+
+bool MemMapFuchsia::mapImpl(UNUSED uptr Addr, uptr Size, const char *Name,
+ uptr Flags) {
+ const bool AllowNoMem = !!(Flags & MAP_ALLOWNOMEM);
+ const bool PreCommit = !!(Flags & MAP_PRECOMMIT);
+ const bool NoAccess = !!(Flags & MAP_NOACCESS);
+
+ // Create the VMO.
+ zx_status_t Status = _zx_vmo_create(Size, 0, &Vmo);
+ if (UNLIKELY(Status != ZX_OK)) {
+ if (Status != ZX_ERR_NO_MEMORY || !AllowNoMem)
+ dieOnError(Status, "zx_vmo_create", Size);
+ return false;
+ }
+
+ if (Name != nullptr)
+ setVmoName(Vmo, Name);
+
+ // Map it.
+ zx_vm_option_t MapFlags = ZX_VM_ALLOW_FAULTS;
+ if (!NoAccess)
+ MapFlags |= ZX_VM_PERM_READ | ZX_VM_PERM_WRITE;
+ Status =
+ _zx_vmar_map(_zx_vmar_root_self(), MapFlags, 0, Vmo, 0, Size, &MapAddr);
+ if (UNLIKELY(Status != ZX_OK)) {
+ if (Status != ZX_ERR_NO_MEMORY || !AllowNoMem)
+ dieOnError(Status, "zx_vmar_map", Size);
+
+ Status = _zx_handle_close(Vmo);
+ CHECK_EQ(Status, ZX_OK);
+
+ MapAddr = 0;
+ Vmo = ZX_HANDLE_INVALID;
+ return false;
+ }
+
+ if (PreCommit) {
+ Status = _zx_vmar_op_range(_zx_vmar_root_self(), ZX_VMAR_OP_COMMIT, MapAddr,
+ Size, nullptr, 0);
+ CHECK_EQ(Status, ZX_OK);
+ }
+
+ WindowBase = MapAddr;
+ WindowSize = Size;
+ return true;
+}
+
+void MemMapFuchsia::unmapImpl(uptr Addr, uptr Size) {
+ zx_status_t Status;
+
+ if (Size == WindowSize) {
+ // NOTE: Closing first and then unmapping seems slightly faster than doing
+ // the same operations in the opposite order.
+ Status = _zx_handle_close(Vmo);
+ CHECK_EQ(Status, ZX_OK);
+ Status = _zx_vmar_unmap(_zx_vmar_root_self(), Addr, Size);
+ CHECK_EQ(Status, ZX_OK);
+
+ MapAddr = WindowBase = WindowSize = 0;
+ Vmo = ZX_HANDLE_INVALID;
+ } else {
+ // Unmap the subrange.
+ Status = _zx_vmar_unmap(_zx_vmar_root_self(), Addr, Size);
+ CHECK_EQ(Status, ZX_OK);
+
+ // Decommit the pages that we just unmapped.
+ Status = _zx_vmo_op_range(Vmo, ZX_VMO_OP_DECOMMIT, Addr - MapAddr, Size,
+ nullptr, 0);
+ CHECK_EQ(Status, ZX_OK);
+
+ if (Addr == WindowBase)
+ WindowBase += Size;
+ WindowSize -= Size;
+ }
+}
+
+bool MemMapFuchsia::remapImpl(uptr Addr, uptr Size, const char *Name,
+ uptr Flags) {
+ const bool AllowNoMem = !!(Flags & MAP_ALLOWNOMEM);
+ const bool PreCommit = !!(Flags & MAP_PRECOMMIT);
+ const bool NoAccess = !!(Flags & MAP_NOACCESS);
+
+ // NOTE: This will rename the *whole* VMO, not only the requested portion of
+ // it. But we cannot do better than this given the MemMap API. In practice,
+ // the upper layers of Scudo always pass the same Name for a given MemMap.
+ if (Name != nullptr)
+ setVmoName(Vmo, Name);
+
+ uptr MappedAddr;
+ zx_vm_option_t MapFlags = ZX_VM_ALLOW_FAULTS | ZX_VM_SPECIFIC_OVERWRITE;
+ if (!NoAccess)
+ MapFlags |= ZX_VM_PERM_READ | ZX_VM_PERM_WRITE;
+ zx_status_t Status =
+ _zx_vmar_map(_zx_vmar_root_self(), MapFlags, Addr - getRootVmarBase(),
+ Vmo, Addr - MapAddr, Size, &MappedAddr);
+ if (UNLIKELY(Status != ZX_OK)) {
+ if (Status != ZX_ERR_NO_MEMORY || !AllowNoMem)
+ dieOnError(Status, "zx_vmar_map", Size);
+ return false;
+ }
+ DCHECK_EQ(Addr, MappedAddr);
+
+ if (PreCommit) {
+ Status = _zx_vmar_op_range(_zx_vmar_root_self(), ZX_VMAR_OP_COMMIT, MapAddr,
+ Size, nullptr, 0);
+ CHECK_EQ(Status, ZX_OK);
+ }
+
+ return true;
+}
+
+void MemMapFuchsia::releaseAndZeroPagesToOSImpl(uptr From, uptr Size) {
+ zx_status_t Status = _zx_vmo_op_range(Vmo, ZX_VMO_OP_DECOMMIT, From - MapAddr,
+ Size, nullptr, 0);
+ CHECK_EQ(Status, ZX_OK);
+}
+
+void MemMapFuchsia::setMemoryPermissionImpl(uptr Addr, uptr Size, uptr Flags) {
+ const bool NoAccess = !!(Flags & MAP_NOACCESS);
+
+ zx_vm_option_t MapFlags = 0;
+ if (!NoAccess)
+ MapFlags |= ZX_VM_PERM_READ | ZX_VM_PERM_WRITE;
+ zx_status_t Status =
+ _zx_vmar_protect(_zx_vmar_root_self(), MapFlags, Addr, Size);
+ CHECK_EQ(Status, ZX_OK);
+}
+
+bool ReservedMemoryFuchsia::createImpl(UNUSED uptr Addr, uptr Size,
+ UNUSED const char *Name, uptr Flags) {
+ const bool AllowNoMem = !!(Flags & MAP_ALLOWNOMEM);
+
+ // Reserve memory by mapping the placeholder VMO without any permission.
+ zx_status_t Status = _zx_vmar_map(_zx_vmar_root_self(), ZX_VM_ALLOW_FAULTS, 0,
+ getPlaceholderVmo(), 0, Size, &Base);
+ if (UNLIKELY(Status != ZX_OK)) {
+ if (Status != ZX_ERR_NO_MEMORY || !AllowNoMem)
+ dieOnError(Status, "zx_vmar_map", Size);
+ return false;
+ }
+
+ Capacity = Size;
+ return true;
+}
+
+void ReservedMemoryFuchsia::releaseImpl() {
+ zx_status_t Status = _zx_vmar_unmap(_zx_vmar_root_self(), Base, Capacity);
+ CHECK_EQ(Status, ZX_OK);
+}
+
+ReservedMemoryFuchsia::MemMapT ReservedMemoryFuchsia::dispatchImpl(uptr Addr,
+ uptr Size) {
+ return ReservedMemoryFuchsia::MemMapT(Addr, Size);
+}
+
+} // namespace scudo
+
+#endif // SCUDO_FUCHSIA
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/mem_map_fuchsia.h b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/mem_map_fuchsia.h
new file mode 100644
index 000000000000..2e66f89cfca5
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/mem_map_fuchsia.h
@@ -0,0 +1,75 @@
+//===-- mem_map_fuchsia.h ---------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_MEM_MAP_FUCHSIA_H_
+#define SCUDO_MEM_MAP_FUCHSIA_H_
+
+#include "mem_map_base.h"
+
+#if SCUDO_FUCHSIA
+
+#include <stdint.h>
+#include <zircon/types.h>
+
+namespace scudo {
+
+class MemMapFuchsia final : public MemMapBase<MemMapFuchsia> {
+public:
+ constexpr MemMapFuchsia() = default;
+
+ // Impls for base functions.
+ bool mapImpl(uptr Addr, uptr Size, const char *Name, uptr Flags);
+ void unmapImpl(uptr Addr, uptr Size);
+ bool remapImpl(uptr Addr, uptr Size, const char *Name, uptr Flags);
+ void setMemoryPermissionImpl(uptr Addr, uptr Size, uptr Flags);
+ void releasePagesToOSImpl(uptr From, uptr Size) {
+ return releaseAndZeroPagesToOSImpl(From, Size);
+ }
+ void releaseAndZeroPagesToOSImpl(uptr From, uptr Size);
+ uptr getBaseImpl() { return WindowBase; }
+ uptr getCapacityImpl() { return WindowSize; }
+
+private:
+ friend class ReservedMemoryFuchsia;
+
+ // Used by ReservedMemoryFuchsia::dispatch.
+ MemMapFuchsia(uptr Base, uptr Capacity);
+
+ // Virtual memory address corresponding to VMO offset 0.
+ uptr MapAddr = 0;
+
+ // Virtual memory base address and size of the VMO subrange that is still in
+ // use. unmapImpl() can shrink this range, either at the beginning or at the
+ // end.
+ uptr WindowBase = 0;
+ uptr WindowSize = 0;
+
+ zx_handle_t Vmo = ZX_HANDLE_INVALID;
+};
+
+class ReservedMemoryFuchsia final
+ : public ReservedMemory<ReservedMemoryFuchsia, MemMapFuchsia> {
+public:
+ constexpr ReservedMemoryFuchsia() = default;
+
+ bool createImpl(uptr Addr, uptr Size, const char *Name, uptr Flags);
+ void releaseImpl();
+ MemMapT dispatchImpl(uptr Addr, uptr Size);
+ uptr getBaseImpl() { return Base; }
+ uptr getCapacityImpl() { return Capacity; }
+
+private:
+ uptr Base = 0;
+ uptr Capacity = 0;
+};
+
+} // namespace scudo
+
+#endif // SCUDO_FUCHSIA
+
+#endif // SCUDO_MEM_MAP_FUCHSIA_H_
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/mem_map_linux.cpp b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/mem_map_linux.cpp
new file mode 100644
index 000000000000..783c4f0d9ab0
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/mem_map_linux.cpp
@@ -0,0 +1,153 @@
+//===-- mem_map_linux.cpp ---------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "platform.h"
+
+#if SCUDO_LINUX
+
+#include "mem_map_linux.h"
+
+#include "common.h"
+#include "internal_defs.h"
+#include "linux.h"
+#include "mutex.h"
+#include "report_linux.h"
+#include "string_utils.h"
+
+#include <errno.h>
+#include <fcntl.h>
+#include <linux/futex.h>
+#include <sched.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/mman.h>
+#include <sys/stat.h>
+#include <sys/syscall.h>
+#include <sys/time.h>
+#include <time.h>
+#include <unistd.h>
+
+#if SCUDO_ANDROID
+// TODO(chiahungduan): Review if we still need the followings macros.
+#include <sys/prctl.h>
+// Definitions of prctl arguments to set a vma name in Android kernels.
+#define ANDROID_PR_SET_VMA 0x53564d41
+#define ANDROID_PR_SET_VMA_ANON_NAME 0
+#endif
+
+namespace scudo {
+
+static void *mmapWrapper(uptr Addr, uptr Size, const char *Name, uptr Flags) {
+ int MmapFlags = MAP_PRIVATE | MAP_ANONYMOUS;
+ int MmapProt;
+ if (Flags & MAP_NOACCESS) {
+ MmapFlags |= MAP_NORESERVE;
+ MmapProt = PROT_NONE;
+ } else {
+ MmapProt = PROT_READ | PROT_WRITE;
+ }
+#if defined(__aarch64__)
+#ifndef PROT_MTE
+#define PROT_MTE 0x20
+#endif
+ if (Flags & MAP_MEMTAG)
+ MmapProt |= PROT_MTE;
+#endif
+ if (Addr)
+ MmapFlags |= MAP_FIXED;
+ void *P =
+ mmap(reinterpret_cast<void *>(Addr), Size, MmapProt, MmapFlags, -1, 0);
+ if (P == MAP_FAILED) {
+ if (!(Flags & MAP_ALLOWNOMEM) || errno != ENOMEM)
+ reportMapError(errno == ENOMEM ? Size : 0);
+ return nullptr;
+ }
+#if SCUDO_ANDROID
+ if (Name)
+ prctl(ANDROID_PR_SET_VMA, ANDROID_PR_SET_VMA_ANON_NAME, P, Size, Name);
+#else
+ (void)Name;
+#endif
+
+ return P;
+}
+
+bool MemMapLinux::mapImpl(uptr Addr, uptr Size, const char *Name, uptr Flags) {
+ void *P = mmapWrapper(Addr, Size, Name, Flags);
+ if (P == nullptr)
+ return false;
+
+ MapBase = reinterpret_cast<uptr>(P);
+ MapCapacity = Size;
+ return true;
+}
+
+void MemMapLinux::unmapImpl(uptr Addr, uptr Size) {
+ // If we unmap all the pages, also mark `MapBase` to 0 to indicate invalid
+ // status.
+ if (Size == MapCapacity) {
+ MapBase = MapCapacity = 0;
+ } else {
+ // This is partial unmap and is unmapping the pages from the beginning,
+ // shift `MapBase` to the new base.
+ if (MapBase == Addr)
+ MapBase = Addr + Size;
+ MapCapacity -= Size;
+ }
+
+ if (munmap(reinterpret_cast<void *>(Addr), Size) != 0)
+ reportUnmapError(Addr, Size);
+}
+
+bool MemMapLinux::remapImpl(uptr Addr, uptr Size, const char *Name,
+ uptr Flags) {
+ void *P = mmapWrapper(Addr, Size, Name, Flags);
+ if (reinterpret_cast<uptr>(P) != Addr)
+ reportMapError();
+ return true;
+}
+
+void MemMapLinux::setMemoryPermissionImpl(uptr Addr, uptr Size, uptr Flags) {
+ int Prot = (Flags & MAP_NOACCESS) ? PROT_NONE : (PROT_READ | PROT_WRITE);
+ if (mprotect(reinterpret_cast<void *>(Addr), Size, Prot) != 0)
+ reportProtectError(Addr, Size, Prot);
+}
+
+void MemMapLinux::releaseAndZeroPagesToOSImpl(uptr From, uptr Size) {
+ void *Addr = reinterpret_cast<void *>(From);
+
+ while (madvise(Addr, Size, MADV_DONTNEED) == -1 && errno == EAGAIN) {
+ }
+}
+
+bool ReservedMemoryLinux::createImpl(uptr Addr, uptr Size, const char *Name,
+ uptr Flags) {
+ ReservedMemoryLinux::MemMapT MemMap;
+ if (!MemMap.map(Addr, Size, Name, Flags | MAP_NOACCESS))
+ return false;
+
+ MapBase = MemMap.getBase();
+ MapCapacity = MemMap.getCapacity();
+
+ return true;
+}
+
+void ReservedMemoryLinux::releaseImpl() {
+ if (munmap(reinterpret_cast<void *>(getBase()), getCapacity()) != 0)
+ reportUnmapError(getBase(), getCapacity());
+}
+
+ReservedMemoryLinux::MemMapT ReservedMemoryLinux::dispatchImpl(uptr Addr,
+ uptr Size) {
+ return ReservedMemoryLinux::MemMapT(Addr, Size);
+}
+
+} // namespace scudo
+
+#endif // SCUDO_LINUX
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/mem_map_linux.h b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/mem_map_linux.h
new file mode 100644
index 000000000000..7a89b3bff5ed
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/mem_map_linux.h
@@ -0,0 +1,67 @@
+//===-- mem_map_linux.h -----------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_MEM_MAP_LINUX_H_
+#define SCUDO_MEM_MAP_LINUX_H_
+
+#include "platform.h"
+
+#if SCUDO_LINUX
+
+#include "common.h"
+#include "mem_map_base.h"
+
+namespace scudo {
+
+class MemMapLinux final : public MemMapBase<MemMapLinux> {
+public:
+ constexpr MemMapLinux() = default;
+ MemMapLinux(uptr Base, uptr Capacity)
+ : MapBase(Base), MapCapacity(Capacity) {}
+
+ // Impls for base functions.
+ bool mapImpl(uptr Addr, uptr Size, const char *Name, uptr Flags = 0);
+ void unmapImpl(uptr Addr, uptr Size);
+ bool remapImpl(uptr Addr, uptr Size, const char *Name, uptr Flags = 0);
+ void setMemoryPermissionImpl(uptr Addr, uptr Size, uptr Flags);
+ void releasePagesToOSImpl(uptr From, uptr Size) {
+ return releaseAndZeroPagesToOSImpl(From, Size);
+ }
+ void releaseAndZeroPagesToOSImpl(uptr From, uptr Size);
+ uptr getBaseImpl() { return MapBase; }
+ uptr getCapacityImpl() { return MapCapacity; }
+
+private:
+ uptr MapBase = 0;
+ uptr MapCapacity = 0;
+};
+
+// This will be deprecated when every allocator has been supported by each
+// platform's `MemMap` implementation.
+class ReservedMemoryLinux final
+ : public ReservedMemory<ReservedMemoryLinux, MemMapLinux> {
+public:
+ // The following two are the Impls for function in `MemMapBase`.
+ uptr getBaseImpl() { return MapBase; }
+ uptr getCapacityImpl() { return MapCapacity; }
+
+ // These threes are specific to `ReservedMemory`.
+ bool createImpl(uptr Addr, uptr Size, const char *Name, uptr Flags);
+ void releaseImpl();
+ MemMapT dispatchImpl(uptr Addr, uptr Size);
+
+private:
+ uptr MapBase = 0;
+ uptr MapCapacity = 0;
+};
+
+} // namespace scudo
+
+#endif // SCUDO_LINUX
+
+#endif // SCUDO_MEM_MAP_LINUX_H_
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/memtag.h b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/memtag.h
index c48e228fbe44..aaed2192ad75 100644
--- a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/memtag.h
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/memtag.h
@@ -11,19 +11,22 @@
#include "internal_defs.h"
-#if SCUDO_LINUX
+#if SCUDO_CAN_USE_MTE
#include <sys/auxv.h>
#include <sys/prctl.h>
#endif
namespace scudo {
-#if (__clang_major__ >= 12 && defined(__aarch64__)) || defined(SCUDO_FUZZ)
+#if (__clang_major__ >= 12 && defined(__aarch64__) && !defined(__ILP32__)) || \
+ defined(SCUDO_FUZZ)
// We assume that Top-Byte Ignore is enabled if the architecture supports memory
// tagging. Not all operating systems enable TBI, so we only claim architectural
// support for memory tagging if the operating system enables TBI.
-#if SCUDO_LINUX && !defined(SCUDO_DISABLE_TBI)
+// HWASan uses the top byte for its own purpose and Scudo should not touch it.
+#if SCUDO_CAN_USE_MTE && !defined(SCUDO_DISABLE_TBI) && \
+ !__has_feature(hwaddress_sanitizer)
inline constexpr bool archSupportsMemoryTagging() { return true; }
#else
inline constexpr bool archSupportsMemoryTagging() { return false; }
@@ -39,25 +42,25 @@ inline uint8_t extractTag(uptr Ptr) { return (Ptr >> 56) & 0xf; }
inline constexpr bool archSupportsMemoryTagging() { return false; }
-inline uptr archMemoryTagGranuleSize() {
+inline NORETURN uptr archMemoryTagGranuleSize() {
UNREACHABLE("memory tagging not supported");
}
-inline uptr untagPointer(uptr Ptr) {
+inline NORETURN uptr untagPointer(uptr Ptr) {
(void)Ptr;
UNREACHABLE("memory tagging not supported");
}
-inline uint8_t extractTag(uptr Ptr) {
+inline NORETURN uint8_t extractTag(uptr Ptr) {
(void)Ptr;
UNREACHABLE("memory tagging not supported");
}
#endif
-#if __clang_major__ >= 12 && defined(__aarch64__)
+#if __clang_major__ >= 12 && defined(__aarch64__) && !defined(__ILP32__)
-#if SCUDO_LINUX
+#if SCUDO_CAN_USE_MTE
inline bool systemSupportsMemoryTagging() {
#ifndef HWCAP2_MTE
@@ -91,9 +94,10 @@ inline bool systemDetectsMemoryTagFaultsTestOnly() {
#ifndef PR_MTE_TCF_MASK
#define PR_MTE_TCF_MASK (3UL << PR_MTE_TCF_SHIFT)
#endif
- return (static_cast<unsigned long>(
- prctl(PR_GET_TAGGED_ADDR_CTRL, 0, 0, 0, 0)) &
- PR_MTE_TCF_MASK) != PR_MTE_TCF_NONE;
+ int res = prctl(PR_GET_TAGGED_ADDR_CTRL, 0, 0, 0, 0);
+ if (res == -1)
+ return false;
+ return (static_cast<unsigned long>(res) & PR_MTE_TCF_MASK) != PR_MTE_TCF_NONE;
}
inline void enableSystemMemoryTaggingTestOnly() {
@@ -102,19 +106,19 @@ inline void enableSystemMemoryTaggingTestOnly() {
0, 0, 0);
}
-#else // !SCUDO_LINUX
+#else // !SCUDO_CAN_USE_MTE
inline bool systemSupportsMemoryTagging() { return false; }
-inline bool systemDetectsMemoryTagFaultsTestOnly() {
+inline NORETURN bool systemDetectsMemoryTagFaultsTestOnly() {
UNREACHABLE("memory tagging not supported");
}
-inline void enableSystemMemoryTaggingTestOnly() {
+inline NORETURN void enableSystemMemoryTaggingTestOnly() {
UNREACHABLE("memory tagging not supported");
}
-#endif // SCUDO_LINUX
+#endif // SCUDO_CAN_USE_MTE
class ScopedDisableMemoryTagChecks {
uptr PrevTCO;
@@ -252,15 +256,15 @@ inline uptr loadTag(uptr Ptr) {
#else
-inline bool systemSupportsMemoryTagging() {
+inline NORETURN bool systemSupportsMemoryTagging() {
UNREACHABLE("memory tagging not supported");
}
-inline bool systemDetectsMemoryTagFaultsTestOnly() {
+inline NORETURN bool systemDetectsMemoryTagFaultsTestOnly() {
UNREACHABLE("memory tagging not supported");
}
-inline void enableSystemMemoryTaggingTestOnly() {
+inline NORETURN void enableSystemMemoryTaggingTestOnly() {
UNREACHABLE("memory tagging not supported");
}
@@ -268,41 +272,44 @@ struct ScopedDisableMemoryTagChecks {
ScopedDisableMemoryTagChecks() {}
};
-inline uptr selectRandomTag(uptr Ptr, uptr ExcludeMask) {
+inline NORETURN uptr selectRandomTag(uptr Ptr, uptr ExcludeMask) {
(void)Ptr;
(void)ExcludeMask;
UNREACHABLE("memory tagging not supported");
}
-inline uptr addFixedTag(uptr Ptr, uptr Tag) {
+inline NORETURN uptr addFixedTag(uptr Ptr, uptr Tag) {
(void)Ptr;
(void)Tag;
UNREACHABLE("memory tagging not supported");
}
-inline uptr storeTags(uptr Begin, uptr End) {
+inline NORETURN uptr storeTags(uptr Begin, uptr End) {
(void)Begin;
(void)End;
UNREACHABLE("memory tagging not supported");
}
-inline void storeTag(uptr Ptr) {
+inline NORETURN void storeTag(uptr Ptr) {
(void)Ptr;
UNREACHABLE("memory tagging not supported");
}
-inline uptr loadTag(uptr Ptr) {
+inline NORETURN uptr loadTag(uptr Ptr) {
(void)Ptr;
UNREACHABLE("memory tagging not supported");
}
#endif
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wmissing-noreturn"
inline void setRandomTag(void *Ptr, uptr Size, uptr ExcludeMask,
uptr *TaggedBegin, uptr *TaggedEnd) {
*TaggedBegin = selectRandomTag(reinterpret_cast<uptr>(Ptr), ExcludeMask);
*TaggedEnd = storeTags(*TaggedBegin, *TaggedBegin + Size);
}
+#pragma GCC diagnostic pop
inline void *untagPointer(void *Ptr) {
return reinterpret_cast<void *>(untagPointer(reinterpret_cast<uptr>(Ptr)));
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/mutex.h b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/mutex.h
index c8504c040914..4caa945219b5 100644
--- a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/mutex.h
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/mutex.h
@@ -11,6 +11,7 @@
#include "atomic_helpers.h"
#include "common.h"
+#include "thread_annotations.h"
#include <string.h>
@@ -20,10 +21,10 @@
namespace scudo {
-class HybridMutex {
+class CAPABILITY("mutex") HybridMutex {
public:
- bool tryLock();
- NOINLINE void lock() {
+ bool tryLock() TRY_ACQUIRE(true);
+ NOINLINE void lock() ACQUIRE() {
if (LIKELY(tryLock()))
return;
// The compiler may try to fully unroll the loop, ending up in a
@@ -34,17 +35,41 @@ public:
#pragma nounroll
#endif
for (u8 I = 0U; I < NumberOfTries; I++) {
- yieldProcessor(NumberOfYields);
+ delayLoop();
if (tryLock())
return;
}
lockSlow();
}
- void unlock();
+ void unlock() RELEASE();
+
+ // TODO(chiahungduan): In general, we may want to assert the owner of lock as
+ // well. Given the current uses of HybridMutex, it's acceptable without
+ // asserting the owner. Re-evaluate this when we have certain scenarios which
+ // requires a more fine-grained lock granularity.
+ ALWAYS_INLINE void assertHeld() ASSERT_CAPABILITY(this) {
+ if (SCUDO_DEBUG)
+ assertHeldImpl();
+ }
private:
- static constexpr u8 NumberOfTries = 8U;
- static constexpr u8 NumberOfYields = 8U;
+ void delayLoop() {
+ // The value comes from the average time spent in accessing caches (which
+ // are the fastest operations) so that we are unlikely to wait too long for
+ // fast operations.
+ constexpr u32 SpinTimes = 16;
+ volatile u32 V = 0;
+ for (u32 I = 0; I < SpinTimes; ++I) {
+ u32 Tmp = V + 1;
+ V = Tmp;
+ }
+ }
+
+ void assertHeldImpl();
+
+ // TODO(chiahungduan): Adapt this value based on scenarios. E.g., primary and
+ // secondary allocator have different allocation times.
+ static constexpr u8 NumberOfTries = 32U;
#if SCUDO_LINUX
atomic_u32 M = {};
@@ -52,13 +77,13 @@ private:
sync_mutex_t M = {};
#endif
- void lockSlow();
+ void lockSlow() ACQUIRE();
};
-class ScopedLock {
+class SCOPED_CAPABILITY ScopedLock {
public:
- explicit ScopedLock(HybridMutex &M) : Mutex(M) { Mutex.lock(); }
- ~ScopedLock() { Mutex.unlock(); }
+ explicit ScopedLock(HybridMutex &M) ACQUIRE(M) : Mutex(M) { Mutex.lock(); }
+ ~ScopedLock() RELEASE() { Mutex.unlock(); }
private:
HybridMutex &Mutex;
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/options.h b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/options.h
index 4e6786513334..b20142a41590 100644
--- a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/options.h
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/options.h
@@ -38,7 +38,7 @@ struct Options {
}
};
-template <typename Config> bool useMemoryTagging(Options Options) {
+template <typename Config> bool useMemoryTagging(const Options &Options) {
return allocatorSupportsMemoryTagging<Config>() &&
Options.get(OptionBit::UseMemoryTagging);
}
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/platform.h b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/platform.h
index 36378d14d844..b71a86be7669 100644
--- a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/platform.h
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/platform.h
@@ -37,7 +37,13 @@
#define SCUDO_TRUSTY 0
#endif
-#if __LP64__
+#if defined(__riscv) && (__riscv_xlen == 64)
+#define SCUDO_RISCV64 1
+#else
+#define SCUDO_RISCV64 0
+#endif
+
+#if defined(__LP64__)
#define SCUDO_WORDSIZE 64U
#else
#define SCUDO_WORDSIZE 32U
@@ -53,6 +59,24 @@
#define SCUDO_CAN_USE_PRIMARY64 (SCUDO_WORDSIZE == 64U)
#endif
+#ifndef SCUDO_CAN_USE_MTE
+#define SCUDO_CAN_USE_MTE (SCUDO_LINUX || SCUDO_TRUSTY)
+#endif
+
+// Use smaller table sizes for fuzzing in order to reduce input size.
+// Trusty just has less available memory.
+#ifndef SCUDO_SMALL_STACK_DEPOT
+#if defined(SCUDO_FUZZ) || SCUDO_TRUSTY
+#define SCUDO_SMALL_STACK_DEPOT 1
+#else
+#define SCUDO_SMALL_STACK_DEPOT 0
+#endif
+#endif
+
+#ifndef SCUDO_ENABLE_HOOKS
+#define SCUDO_ENABLE_HOOKS 0
+#endif
+
#ifndef SCUDO_MIN_ALIGNMENT_LOG
// We force malloc-type functions to be aligned to std::max_align_t, but there
// is no reason why the minimum alignment for all other functions can't be 8
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/primary32.h b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/primary32.h
index 326c10a32a85..4d03b282d000 100644
--- a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/primary32.h
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/primary32.h
@@ -9,6 +9,7 @@
#ifndef SCUDO_PRIMARY32_H_
#define SCUDO_PRIMARY32_H_
+#include "allocator_common.h"
#include "bytemap.h"
#include "common.h"
#include "list.h"
@@ -18,6 +19,7 @@
#include "report.h"
#include "stats.h"
#include "string_utils.h"
+#include "thread_annotations.h"
namespace scudo {
@@ -41,26 +43,32 @@ namespace scudo {
template <typename Config> class SizeClassAllocator32 {
public:
- typedef typename Config::PrimaryCompactPtrT CompactPtrT;
- typedef typename Config::SizeClassMap SizeClassMap;
+ typedef typename Config::Primary::CompactPtrT CompactPtrT;
+ typedef typename Config::Primary::SizeClassMap SizeClassMap;
+ static const uptr GroupSizeLog = Config::Primary::GroupSizeLog;
// The bytemap can only track UINT8_MAX - 1 classes.
static_assert(SizeClassMap::LargestClassId <= (UINT8_MAX - 1), "");
// Regions should be large enough to hold the largest Block.
- static_assert((1UL << Config::PrimaryRegionSizeLog) >= SizeClassMap::MaxSize,
+ static_assert((1UL << Config::Primary::RegionSizeLog) >=
+ SizeClassMap::MaxSize,
"");
typedef SizeClassAllocator32<Config> ThisT;
typedef SizeClassAllocatorLocalCache<ThisT> CacheT;
- typedef typename CacheT::TransferBatch TransferBatch;
+ typedef TransferBatch<ThisT> TransferBatchT;
+ typedef BatchGroup<ThisT> BatchGroupT;
+
+ static_assert(sizeof(BatchGroupT) <= sizeof(TransferBatchT),
+ "BatchGroupT uses the same class size as TransferBatchT");
static uptr getSizeByClassId(uptr ClassId) {
return (ClassId == SizeClassMap::BatchClassId)
- ? sizeof(TransferBatch)
+ ? sizeof(TransferBatchT)
: SizeClassMap::getSizeByClassId(ClassId);
}
static bool canAllocate(uptr Size) { return Size <= SizeClassMap::MaxSize; }
- void init(s32 ReleaseToOsInterval) {
+ void init(s32 ReleaseToOsInterval) NO_THREAD_SAFETY_ANALYSIS {
if (SCUDO_FUCHSIA)
reportError("SizeClassAllocator32 is not supported on Fuchsia");
@@ -70,7 +78,7 @@ public:
DCHECK(isAligned(reinterpret_cast<uptr>(this), alignof(ThisT)));
PossibleRegions.init();
u32 Seed;
- const u64 Time = getMonotonicTime();
+ const u64 Time = getMonotonicTimeFast();
if (!getRandom(reinterpret_cast<void *>(&Seed), sizeof(Seed)))
Seed = static_cast<u32>(
Time ^ (reinterpret_cast<uptr>(SizeClassInfoArray) >> 6));
@@ -85,24 +93,77 @@ public:
}
void unmapTestOnly() {
- while (NumberOfStashedRegions > 0)
- unmap(reinterpret_cast<void *>(RegionsStash[--NumberOfStashedRegions]),
- RegionSize);
+ {
+ ScopedLock L(RegionsStashMutex);
+ while (NumberOfStashedRegions > 0) {
+ unmap(reinterpret_cast<void *>(RegionsStash[--NumberOfStashedRegions]),
+ RegionSize);
+ }
+ }
+
uptr MinRegionIndex = NumRegions, MaxRegionIndex = 0;
for (uptr I = 0; I < NumClasses; I++) {
SizeClassInfo *Sci = getSizeClassInfo(I);
+ ScopedLock L(Sci->Mutex);
if (Sci->MinRegionIndex < MinRegionIndex)
MinRegionIndex = Sci->MinRegionIndex;
if (Sci->MaxRegionIndex > MaxRegionIndex)
MaxRegionIndex = Sci->MaxRegionIndex;
*Sci = {};
}
- for (uptr I = MinRegionIndex; I < MaxRegionIndex; I++)
+
+ ScopedLock L(ByteMapMutex);
+ for (uptr I = MinRegionIndex; I <= MaxRegionIndex; I++)
if (PossibleRegions[I])
unmap(reinterpret_cast<void *>(I * RegionSize), RegionSize);
PossibleRegions.unmapTestOnly();
}
+ // When all blocks are freed, it has to be the same size as `AllocatedUser`.
+ void verifyAllBlocksAreReleasedTestOnly() {
+ // `BatchGroup` and `TransferBatch` also use the blocks from BatchClass.
+ uptr BatchClassUsedInFreeLists = 0;
+ for (uptr I = 0; I < NumClasses; I++) {
+ // We have to count BatchClassUsedInFreeLists in other regions first.
+ if (I == SizeClassMap::BatchClassId)
+ continue;
+ SizeClassInfo *Sci = getSizeClassInfo(I);
+ ScopedLock L1(Sci->Mutex);
+ uptr TotalBlocks = 0;
+ for (BatchGroupT &BG : Sci->FreeListInfo.BlockList) {
+ // `BG::Batches` are `TransferBatches`. +1 for `BatchGroup`.
+ BatchClassUsedInFreeLists += BG.Batches.size() + 1;
+ for (const auto &It : BG.Batches)
+ TotalBlocks += It.getCount();
+ }
+
+ const uptr BlockSize = getSizeByClassId(I);
+ DCHECK_EQ(TotalBlocks, Sci->AllocatedUser / BlockSize);
+ DCHECK_EQ(Sci->FreeListInfo.PushedBlocks, Sci->FreeListInfo.PoppedBlocks);
+ }
+
+ SizeClassInfo *Sci = getSizeClassInfo(SizeClassMap::BatchClassId);
+ ScopedLock L1(Sci->Mutex);
+ uptr TotalBlocks = 0;
+ for (BatchGroupT &BG : Sci->FreeListInfo.BlockList) {
+ if (LIKELY(!BG.Batches.empty())) {
+ for (const auto &It : BG.Batches)
+ TotalBlocks += It.getCount();
+ } else {
+ // `BatchGroup` with empty freelist doesn't have `TransferBatch` record
+ // itself.
+ ++TotalBlocks;
+ }
+ }
+
+ const uptr BlockSize = getSizeByClassId(SizeClassMap::BatchClassId);
+ DCHECK_EQ(TotalBlocks + BatchClassUsedInFreeLists,
+ Sci->AllocatedUser / BlockSize);
+ const uptr BlocksInUse =
+ Sci->FreeListInfo.PoppedBlocks - Sci->FreeListInfo.PushedBlocks;
+ DCHECK_EQ(BlocksInUse, BatchClassUsedInFreeLists);
+ }
+
CompactPtrT compactPtr(UNUSED uptr ClassId, uptr Ptr) const {
return static_cast<CompactPtrT>(Ptr);
}
@@ -111,35 +172,95 @@ public:
return reinterpret_cast<void *>(static_cast<uptr>(CompactPtr));
}
- TransferBatch *popBatch(CacheT *C, uptr ClassId) {
+ uptr compactPtrGroupBase(CompactPtrT CompactPtr) {
+ const uptr Mask = (static_cast<uptr>(1) << GroupSizeLog) - 1;
+ return CompactPtr & ~Mask;
+ }
+
+ uptr decompactGroupBase(uptr CompactPtrGroupBase) {
+ return CompactPtrGroupBase;
+ }
+
+ ALWAYS_INLINE static bool isSmallBlock(uptr BlockSize) {
+ const uptr PageSize = getPageSizeCached();
+ return BlockSize < PageSize / 16U;
+ }
+
+ ALWAYS_INLINE static bool isLargeBlock(uptr BlockSize) {
+ const uptr PageSize = getPageSizeCached();
+ return BlockSize > PageSize;
+ }
+
+ // Note that the `MaxBlockCount` will be used when we support arbitrary blocks
+ // count. Now it's the same as the number of blocks stored in the
+ // `TransferBatch`.
+ u16 popBlocks(CacheT *C, uptr ClassId, CompactPtrT *ToArray,
+ UNUSED const u16 MaxBlockCount) {
+ TransferBatchT *B = popBatch(C, ClassId);
+ if (!B)
+ return 0;
+
+ const u16 Count = B->getCount();
+ DCHECK_GT(Count, 0U);
+ B->moveToArray(ToArray);
+
+ if (ClassId != SizeClassMap::BatchClassId)
+ C->deallocate(SizeClassMap::BatchClassId, B);
+
+ return Count;
+ }
+
+ TransferBatchT *popBatch(CacheT *C, uptr ClassId) {
DCHECK_LT(ClassId, NumClasses);
SizeClassInfo *Sci = getSizeClassInfo(ClassId);
ScopedLock L(Sci->Mutex);
- TransferBatch *B = Sci->FreeList.front();
- if (B) {
- Sci->FreeList.pop_front();
- } else {
- B = populateFreeList(C, ClassId, Sci);
- if (UNLIKELY(!B))
+ TransferBatchT *B = popBatchImpl(C, ClassId, Sci);
+ if (UNLIKELY(!B)) {
+ if (UNLIKELY(!populateFreeList(C, ClassId, Sci)))
return nullptr;
+ B = popBatchImpl(C, ClassId, Sci);
+ // if `populateFreeList` succeeded, we are supposed to get free blocks.
+ DCHECK_NE(B, nullptr);
}
- DCHECK_GT(B->getCount(), 0);
- Sci->Stats.PoppedBlocks += B->getCount();
return B;
}
- void pushBatch(uptr ClassId, TransferBatch *B) {
+ // Push the array of free blocks to the designated batch group.
+ void pushBlocks(CacheT *C, uptr ClassId, CompactPtrT *Array, u32 Size) {
DCHECK_LT(ClassId, NumClasses);
- DCHECK_GT(B->getCount(), 0);
+ DCHECK_GT(Size, 0);
+
SizeClassInfo *Sci = getSizeClassInfo(ClassId);
+ if (ClassId == SizeClassMap::BatchClassId) {
+ ScopedLock L(Sci->Mutex);
+ pushBatchClassBlocks(Sci, Array, Size);
+ return;
+ }
+
+ // TODO(chiahungduan): Consider not doing grouping if the group size is not
+ // greater than the block size with a certain scale.
+
+ // Sort the blocks so that blocks belonging to the same group can be pushed
+ // together.
+ bool SameGroup = true;
+ for (u32 I = 1; I < Size; ++I) {
+ if (compactPtrGroupBase(Array[I - 1]) != compactPtrGroupBase(Array[I]))
+ SameGroup = false;
+ CompactPtrT Cur = Array[I];
+ u32 J = I;
+ while (J > 0 &&
+ compactPtrGroupBase(Cur) < compactPtrGroupBase(Array[J - 1])) {
+ Array[J] = Array[J - 1];
+ --J;
+ }
+ Array[J] = Cur;
+ }
+
ScopedLock L(Sci->Mutex);
- Sci->FreeList.push_front(B);
- Sci->Stats.PushedBlocks += B->getCount();
- if (ClassId != SizeClassMap::BatchClassId)
- releaseToOSMaybe(Sci, ClassId);
+ pushBlocksImpl(C, ClassId, Sci, Array, Size, SameGroup);
}
- void disable() {
+ void disable() NO_THREAD_SAFETY_ANALYSIS {
// The BatchClassId must be locked last since other classes can use it.
for (sptr I = static_cast<sptr>(NumClasses) - 1; I >= 0; I--) {
if (static_cast<uptr>(I) == SizeClassMap::BatchClassId)
@@ -148,11 +269,11 @@ public:
}
getSizeClassInfo(SizeClassMap::BatchClassId)->Mutex.lock();
RegionsStashMutex.lock();
- PossibleRegions.disable();
+ ByteMapMutex.lock();
}
- void enable() {
- PossibleRegions.enable();
+ void enable() NO_THREAD_SAFETY_ANALYSIS {
+ ByteMapMutex.unlock();
RegionsStashMutex.unlock();
getSizeClassInfo(SizeClassMap::BatchClassId)->Mutex.unlock();
for (uptr I = 0; I < NumClasses; I++) {
@@ -166,12 +287,20 @@ public:
uptr MinRegionIndex = NumRegions, MaxRegionIndex = 0;
for (uptr I = 0; I < NumClasses; I++) {
SizeClassInfo *Sci = getSizeClassInfo(I);
+ // TODO: The call of `iterateOverBlocks` requires disabling
+ // SizeClassAllocator32. We may consider locking each region on demand
+ // only.
+ Sci->Mutex.assertHeld();
if (Sci->MinRegionIndex < MinRegionIndex)
MinRegionIndex = Sci->MinRegionIndex;
if (Sci->MaxRegionIndex > MaxRegionIndex)
MaxRegionIndex = Sci->MaxRegionIndex;
}
- for (uptr I = MinRegionIndex; I <= MaxRegionIndex; I++)
+
+ // SizeClassAllocator32 is disabled, i.e., ByteMapMutex is held.
+ ByteMapMutex.assertHeld();
+
+ for (uptr I = MinRegionIndex; I <= MaxRegionIndex; I++) {
if (PossibleRegions[I] &&
(PossibleRegions[I] - 1U) != SizeClassMap::BatchClassId) {
const uptr BlockSize = getSizeByClassId(PossibleRegions[I] - 1U);
@@ -180,6 +309,7 @@ public:
for (uptr Block = From; Block < To; Block += BlockSize)
Callback(Block);
}
+ }
}
void getStats(ScopedString *Str) {
@@ -189,22 +319,38 @@ public:
uptr PushedBlocks = 0;
for (uptr I = 0; I < NumClasses; I++) {
SizeClassInfo *Sci = getSizeClassInfo(I);
+ ScopedLock L(Sci->Mutex);
TotalMapped += Sci->AllocatedUser;
- PoppedBlocks += Sci->Stats.PoppedBlocks;
- PushedBlocks += Sci->Stats.PushedBlocks;
+ PoppedBlocks += Sci->FreeListInfo.PoppedBlocks;
+ PushedBlocks += Sci->FreeListInfo.PushedBlocks;
}
Str->append("Stats: SizeClassAllocator32: %zuM mapped in %zu allocations; "
"remains %zu\n",
TotalMapped >> 20, PoppedBlocks, PoppedBlocks - PushedBlocks);
- for (uptr I = 0; I < NumClasses; I++)
- getStats(Str, I, 0);
+ for (uptr I = 0; I < NumClasses; I++) {
+ SizeClassInfo *Sci = getSizeClassInfo(I);
+ ScopedLock L(Sci->Mutex);
+ getStats(Str, I, Sci);
+ }
+ }
+
+ void getFragmentationInfo(ScopedString *Str) {
+ Str->append(
+ "Fragmentation Stats: SizeClassAllocator32: page size = %zu bytes\n",
+ getPageSizeCached());
+
+ for (uptr I = 1; I < NumClasses; I++) {
+ SizeClassInfo *Sci = getSizeClassInfo(I);
+ ScopedLock L(Sci->Mutex);
+ getSizeClassFragmentationInfo(Sci, I, Str);
+ }
}
bool setOption(Option O, sptr Value) {
if (O == Option::ReleaseInterval) {
- const s32 Interval = Max(
- Min(static_cast<s32>(Value), Config::PrimaryMaxReleaseToOsIntervalMs),
- Config::PrimaryMinReleaseToOsIntervalMs);
+ const s32 Interval = Max(Min(static_cast<s32>(Value),
+ Config::Primary::MaxReleaseToOsIntervalMs),
+ Config::Primary::MinReleaseToOsIntervalMs);
atomic_store_relaxed(&ReleaseToOsIntervalMs, Interval);
return true;
}
@@ -212,14 +358,22 @@ public:
return true;
}
- uptr releaseToOS() {
+ uptr tryReleaseToOS(uptr ClassId, ReleaseToOS ReleaseType) {
+ SizeClassInfo *Sci = getSizeClassInfo(ClassId);
+ // TODO: Once we have separate locks like primary64, we may consider using
+ // tryLock() as well.
+ ScopedLock L(Sci->Mutex);
+ return releaseToOSMaybe(Sci, ClassId, ReleaseType);
+ }
+
+ uptr releaseToOS(ReleaseToOS ReleaseType) {
uptr TotalReleasedBytes = 0;
for (uptr I = 0; I < NumClasses; I++) {
if (I == SizeClassMap::BatchClassId)
continue;
SizeClassInfo *Sci = getSizeClassInfo(I);
ScopedLock L(Sci->Mutex);
- TotalReleasedBytes += releaseToOSMaybe(Sci, I, /*Force=*/true);
+ TotalReleasedBytes += releaseToOSMaybe(Sci, I, ReleaseType);
}
return TotalReleasedBytes;
}
@@ -236,42 +390,42 @@ public:
private:
static const uptr NumClasses = SizeClassMap::NumClasses;
- static const uptr RegionSize = 1UL << Config::PrimaryRegionSizeLog;
+ static const uptr RegionSize = 1UL << Config::Primary::RegionSizeLog;
static const uptr NumRegions =
- SCUDO_MMAP_RANGE_SIZE >> Config::PrimaryRegionSizeLog;
+ SCUDO_MMAP_RANGE_SIZE >> Config::Primary::RegionSizeLog;
static const u32 MaxNumBatches = SCUDO_ANDROID ? 4U : 8U;
typedef FlatByteMap<NumRegions> ByteMap;
- struct SizeClassStats {
- uptr PoppedBlocks;
- uptr PushedBlocks;
- };
-
struct ReleaseToOsInfo {
- uptr PushedBlocksAtLastRelease;
+ uptr BytesInFreeListAtLastCheckpoint;
uptr RangesReleased;
uptr LastReleasedBytes;
u64 LastReleaseAtNs;
};
+ struct BlocksInfo {
+ SinglyLinkedList<BatchGroupT> BlockList = {};
+ uptr PoppedBlocks = 0;
+ uptr PushedBlocks = 0;
+ };
+
struct alignas(SCUDO_CACHE_LINE_SIZE) SizeClassInfo {
HybridMutex Mutex;
- SinglyLinkedList<TransferBatch> FreeList;
- uptr CurrentRegion;
- uptr CurrentRegionAllocated;
- SizeClassStats Stats;
+ BlocksInfo FreeListInfo GUARDED_BY(Mutex);
+ uptr CurrentRegion GUARDED_BY(Mutex);
+ uptr CurrentRegionAllocated GUARDED_BY(Mutex);
u32 RandState;
- uptr AllocatedUser;
+ uptr AllocatedUser GUARDED_BY(Mutex);
// Lowest & highest region index allocated for this size class, to avoid
// looping through the whole NumRegions.
- uptr MinRegionIndex;
- uptr MaxRegionIndex;
- ReleaseToOsInfo ReleaseInfo;
+ uptr MinRegionIndex GUARDED_BY(Mutex);
+ uptr MaxRegionIndex GUARDED_BY(Mutex);
+ ReleaseToOsInfo ReleaseInfo GUARDED_BY(Mutex);
};
static_assert(sizeof(SizeClassInfo) % SCUDO_CACHE_LINE_SIZE == 0, "");
uptr computeRegionId(uptr Mem) {
- const uptr Id = Mem >> Config::PrimaryRegionSizeLog;
+ const uptr Id = Mem >> Config::Primary::RegionSizeLog;
CHECK_LT(Id, NumRegions);
return Id;
}
@@ -291,17 +445,22 @@ private:
else
MapSize = RegionSize;
} else {
- Region = roundUpTo(MapBase, RegionSize);
+ Region = roundUp(MapBase, RegionSize);
unmap(reinterpret_cast<void *>(MapBase), Region - MapBase);
MapSize = RegionSize;
}
const uptr End = Region + MapSize;
if (End != MapEnd)
unmap(reinterpret_cast<void *>(End), MapEnd - End);
+
+ DCHECK_EQ(Region % RegionSize, 0U);
+ static_assert(Config::Primary::RegionSizeLog == GroupSizeLog,
+ "Memory group should be the same size as Region");
+
return Region;
}
- uptr allocateRegion(SizeClassInfo *Sci, uptr ClassId) {
+ uptr allocateRegion(SizeClassInfo *Sci, uptr ClassId) REQUIRES(Sci->Mutex) {
DCHECK_LT(ClassId, NumClasses);
uptr Region = 0;
{
@@ -318,6 +477,7 @@ private:
Sci->MinRegionIndex = RegionIndex;
if (RegionIndex > Sci->MaxRegionIndex)
Sci->MaxRegionIndex = RegionIndex;
+ ScopedLock L(ByteMapMutex);
PossibleRegions.set(RegionIndex, static_cast<u8>(ClassId + 1U));
}
return Region;
@@ -328,8 +488,295 @@ private:
return &SizeClassInfoArray[ClassId];
}
- NOINLINE TransferBatch *populateFreeList(CacheT *C, uptr ClassId,
- SizeClassInfo *Sci) {
+ void pushBatchClassBlocks(SizeClassInfo *Sci, CompactPtrT *Array, u32 Size)
+ REQUIRES(Sci->Mutex) {
+ DCHECK_EQ(Sci, getSizeClassInfo(SizeClassMap::BatchClassId));
+
+ // Free blocks are recorded by TransferBatch in freelist for all
+ // size-classes. In addition, TransferBatch is allocated from BatchClassId.
+ // In order not to use additional block to record the free blocks in
+ // BatchClassId, they are self-contained. I.e., A TransferBatch records the
+ // block address of itself. See the figure below:
+ //
+ // TransferBatch at 0xABCD
+ // +----------------------------+
+ // | Free blocks' addr |
+ // | +------+------+------+ |
+ // | |0xABCD|... |... | |
+ // | +------+------+------+ |
+ // +----------------------------+
+ //
+ // When we allocate all the free blocks in the TransferBatch, the block used
+ // by TransferBatch is also free for use. We don't need to recycle the
+ // TransferBatch. Note that the correctness is maintained by the invariant,
+ //
+ // The unit of each popBatch() request is entire TransferBatch. Return
+ // part of the blocks in a TransferBatch is invalid.
+ //
+ // This ensures that TransferBatch won't leak the address itself while it's
+ // still holding other valid data.
+ //
+ // Besides, BatchGroup is also allocated from BatchClassId and has its
+ // address recorded in the TransferBatch too. To maintain the correctness,
+ //
+ // The address of BatchGroup is always recorded in the last TransferBatch
+ // in the freelist (also imply that the freelist should only be
+ // updated with push_front). Once the last TransferBatch is popped,
+ // the block used by BatchGroup is also free for use.
+ //
+ // With this approach, the blocks used by BatchGroup and TransferBatch are
+ // reusable and don't need additional space for them.
+
+ Sci->FreeListInfo.PushedBlocks += Size;
+ BatchGroupT *BG = Sci->FreeListInfo.BlockList.front();
+
+ if (BG == nullptr) {
+ // Construct `BatchGroup` on the last element.
+ BG = reinterpret_cast<BatchGroupT *>(
+ decompactPtr(SizeClassMap::BatchClassId, Array[Size - 1]));
+ --Size;
+ BG->Batches.clear();
+ // BatchClass hasn't enabled memory group. Use `0` to indicate there's no
+ // memory group here.
+ BG->CompactPtrGroupBase = 0;
+ // `BG` is also the block of BatchClassId. Note that this is different
+ // from `CreateGroup` in `pushBlocksImpl`
+ BG->PushedBlocks = 1;
+ BG->BytesInBGAtLastCheckpoint = 0;
+ BG->MaxCachedPerBatch =
+ CacheT::getMaxCached(getSizeByClassId(SizeClassMap::BatchClassId));
+
+ Sci->FreeListInfo.BlockList.push_front(BG);
+ }
+
+ if (UNLIKELY(Size == 0))
+ return;
+
+ // This happens under 2 cases.
+ // 1. just allocated a new `BatchGroup`.
+ // 2. Only 1 block is pushed when the freelist is empty.
+ if (BG->Batches.empty()) {
+ // Construct the `TransferBatch` on the last element.
+ TransferBatchT *TB = reinterpret_cast<TransferBatchT *>(
+ decompactPtr(SizeClassMap::BatchClassId, Array[Size - 1]));
+ TB->clear();
+ // As mentioned above, addresses of `TransferBatch` and `BatchGroup` are
+ // recorded in the TransferBatch.
+ TB->add(Array[Size - 1]);
+ TB->add(
+ compactPtr(SizeClassMap::BatchClassId, reinterpret_cast<uptr>(BG)));
+ --Size;
+ DCHECK_EQ(BG->PushedBlocks, 1U);
+ // `TB` is also the block of BatchClassId.
+ BG->PushedBlocks += 1;
+ BG->Batches.push_front(TB);
+ }
+
+ TransferBatchT *CurBatch = BG->Batches.front();
+ DCHECK_NE(CurBatch, nullptr);
+
+ for (u32 I = 0; I < Size;) {
+ u16 UnusedSlots =
+ static_cast<u16>(BG->MaxCachedPerBatch - CurBatch->getCount());
+ if (UnusedSlots == 0) {
+ CurBatch = reinterpret_cast<TransferBatchT *>(
+ decompactPtr(SizeClassMap::BatchClassId, Array[I]));
+ CurBatch->clear();
+ // Self-contained
+ CurBatch->add(Array[I]);
+ ++I;
+ // TODO(chiahungduan): Avoid the use of push_back() in `Batches` of
+ // BatchClassId.
+ BG->Batches.push_front(CurBatch);
+ UnusedSlots = static_cast<u16>(BG->MaxCachedPerBatch - 1);
+ }
+ // `UnusedSlots` is u16 so the result will be also fit in u16.
+ const u16 AppendSize = static_cast<u16>(Min<u32>(UnusedSlots, Size - I));
+ CurBatch->appendFromArray(&Array[I], AppendSize);
+ I += AppendSize;
+ }
+
+ BG->PushedBlocks += Size;
+ }
+ // Push the blocks to their batch group. The layout will be like,
+ //
+ // FreeListInfo.BlockList - > BG -> BG -> BG
+ // | | |
+ // v v v
+ // TB TB TB
+ // |
+ // v
+ // TB
+ //
+ // Each BlockGroup(BG) will associate with unique group id and the free blocks
+ // are managed by a list of TransferBatch(TB). To reduce the time of inserting
+ // blocks, BGs are sorted and the input `Array` are supposed to be sorted so
+ // that we can get better performance of maintaining sorted property.
+ // Use `SameGroup=true` to indicate that all blocks in the array are from the
+ // same group then we will skip checking the group id of each block.
+ //
+ // The region mutex needs to be held while calling this method.
+ void pushBlocksImpl(CacheT *C, uptr ClassId, SizeClassInfo *Sci,
+ CompactPtrT *Array, u32 Size, bool SameGroup = false)
+ REQUIRES(Sci->Mutex) {
+ DCHECK_NE(ClassId, SizeClassMap::BatchClassId);
+ DCHECK_GT(Size, 0U);
+
+ auto CreateGroup = [&](uptr CompactPtrGroupBase) {
+ BatchGroupT *BG =
+ reinterpret_cast<BatchGroupT *>(C->getBatchClassBlock());
+ BG->Batches.clear();
+ TransferBatchT *TB =
+ reinterpret_cast<TransferBatchT *>(C->getBatchClassBlock());
+ TB->clear();
+
+ BG->CompactPtrGroupBase = CompactPtrGroupBase;
+ BG->Batches.push_front(TB);
+ BG->PushedBlocks = 0;
+ BG->BytesInBGAtLastCheckpoint = 0;
+ BG->MaxCachedPerBatch = CacheT::getMaxCached(getSizeByClassId(ClassId));
+
+ return BG;
+ };
+
+ auto InsertBlocks = [&](BatchGroupT *BG, CompactPtrT *Array, u32 Size) {
+ SinglyLinkedList<TransferBatchT> &Batches = BG->Batches;
+ TransferBatchT *CurBatch = Batches.front();
+ DCHECK_NE(CurBatch, nullptr);
+
+ for (u32 I = 0; I < Size;) {
+ DCHECK_GE(BG->MaxCachedPerBatch, CurBatch->getCount());
+ u16 UnusedSlots =
+ static_cast<u16>(BG->MaxCachedPerBatch - CurBatch->getCount());
+ if (UnusedSlots == 0) {
+ CurBatch =
+ reinterpret_cast<TransferBatchT *>(C->getBatchClassBlock());
+ CurBatch->clear();
+ Batches.push_front(CurBatch);
+ UnusedSlots = BG->MaxCachedPerBatch;
+ }
+ // `UnusedSlots` is u16 so the result will be also fit in u16.
+ u16 AppendSize = static_cast<u16>(Min<u32>(UnusedSlots, Size - I));
+ CurBatch->appendFromArray(&Array[I], AppendSize);
+ I += AppendSize;
+ }
+
+ BG->PushedBlocks += Size;
+ };
+
+ Sci->FreeListInfo.PushedBlocks += Size;
+ BatchGroupT *Cur = Sci->FreeListInfo.BlockList.front();
+
+ // In the following, `Cur` always points to the BatchGroup for blocks that
+ // will be pushed next. `Prev` is the element right before `Cur`.
+ BatchGroupT *Prev = nullptr;
+
+ while (Cur != nullptr &&
+ compactPtrGroupBase(Array[0]) > Cur->CompactPtrGroupBase) {
+ Prev = Cur;
+ Cur = Cur->Next;
+ }
+
+ if (Cur == nullptr ||
+ compactPtrGroupBase(Array[0]) != Cur->CompactPtrGroupBase) {
+ Cur = CreateGroup(compactPtrGroupBase(Array[0]));
+ if (Prev == nullptr)
+ Sci->FreeListInfo.BlockList.push_front(Cur);
+ else
+ Sci->FreeListInfo.BlockList.insert(Prev, Cur);
+ }
+
+ // All the blocks are from the same group, just push without checking group
+ // id.
+ if (SameGroup) {
+ for (u32 I = 0; I < Size; ++I)
+ DCHECK_EQ(compactPtrGroupBase(Array[I]), Cur->CompactPtrGroupBase);
+
+ InsertBlocks(Cur, Array, Size);
+ return;
+ }
+
+ // The blocks are sorted by group id. Determine the segment of group and
+ // push them to their group together.
+ u32 Count = 1;
+ for (u32 I = 1; I < Size; ++I) {
+ if (compactPtrGroupBase(Array[I - 1]) != compactPtrGroupBase(Array[I])) {
+ DCHECK_EQ(compactPtrGroupBase(Array[I - 1]), Cur->CompactPtrGroupBase);
+ InsertBlocks(Cur, Array + I - Count, Count);
+
+ while (Cur != nullptr &&
+ compactPtrGroupBase(Array[I]) > Cur->CompactPtrGroupBase) {
+ Prev = Cur;
+ Cur = Cur->Next;
+ }
+
+ if (Cur == nullptr ||
+ compactPtrGroupBase(Array[I]) != Cur->CompactPtrGroupBase) {
+ Cur = CreateGroup(compactPtrGroupBase(Array[I]));
+ DCHECK_NE(Prev, nullptr);
+ Sci->FreeListInfo.BlockList.insert(Prev, Cur);
+ }
+
+ Count = 1;
+ } else {
+ ++Count;
+ }
+ }
+
+ InsertBlocks(Cur, Array + Size - Count, Count);
+ }
+
+ // Pop one TransferBatch from a BatchGroup. The BatchGroup with the smallest
+ // group id will be considered first.
+ //
+ // The region mutex needs to be held while calling this method.
+ TransferBatchT *popBatchImpl(CacheT *C, uptr ClassId, SizeClassInfo *Sci)
+ REQUIRES(Sci->Mutex) {
+ if (Sci->FreeListInfo.BlockList.empty())
+ return nullptr;
+
+ SinglyLinkedList<TransferBatchT> &Batches =
+ Sci->FreeListInfo.BlockList.front()->Batches;
+
+ if (Batches.empty()) {
+ DCHECK_EQ(ClassId, SizeClassMap::BatchClassId);
+ BatchGroupT *BG = Sci->FreeListInfo.BlockList.front();
+ Sci->FreeListInfo.BlockList.pop_front();
+
+ // Block used by `BatchGroup` is from BatchClassId. Turn the block into
+ // `TransferBatch` with single block.
+ TransferBatchT *TB = reinterpret_cast<TransferBatchT *>(BG);
+ TB->clear();
+ TB->add(
+ compactPtr(SizeClassMap::BatchClassId, reinterpret_cast<uptr>(TB)));
+ Sci->FreeListInfo.PoppedBlocks += 1;
+ return TB;
+ }
+
+ TransferBatchT *B = Batches.front();
+ Batches.pop_front();
+ DCHECK_NE(B, nullptr);
+ DCHECK_GT(B->getCount(), 0U);
+
+ if (Batches.empty()) {
+ BatchGroupT *BG = Sci->FreeListInfo.BlockList.front();
+ Sci->FreeListInfo.BlockList.pop_front();
+
+ // We don't keep BatchGroup with zero blocks to avoid empty-checking while
+ // allocating. Note that block used by constructing BatchGroup is recorded
+ // as free blocks in the last element of BatchGroup::Batches. Which means,
+ // once we pop the last TransferBatch, the block is implicitly
+ // deallocated.
+ if (ClassId != SizeClassMap::BatchClassId)
+ C->deallocate(SizeClassMap::BatchClassId, BG);
+ }
+
+ Sci->FreeListInfo.PoppedBlocks += B->getCount();
+ return B;
+ }
+
+ NOINLINE bool populateFreeList(CacheT *C, uptr ClassId, SizeClassInfo *Sci)
+ REQUIRES(Sci->Mutex) {
uptr Region;
uptr Offset;
// If the size-class currently has a region associated to it, use it. The
@@ -344,14 +791,14 @@ private:
DCHECK_EQ(Sci->CurrentRegionAllocated, 0U);
Region = allocateRegion(Sci, ClassId);
if (UNLIKELY(!Region))
- return nullptr;
+ return false;
C->getStats().add(StatMapped, RegionSize);
Sci->CurrentRegion = Region;
Offset = 0;
}
const uptr Size = getSizeByClassId(ClassId);
- const u32 MaxCount = TransferBatch::getMaxCached(Size);
+ const u16 MaxCount = CacheT::getMaxCached(Size);
DCHECK_GT(MaxCount, 0U);
// The maximum number of blocks we should carve in the region is dictated
// by the maximum number of batches we want to fill, and the amount of
@@ -364,7 +811,7 @@ private:
DCHECK_GT(NumberOfBlocks, 0U);
constexpr u32 ShuffleArraySize =
- MaxNumBatches * TransferBatch::MaxNumCached;
+ MaxNumBatches * TransferBatchT::MaxNumCached;
// Fill the transfer batches and put them in the size-class freelist. We
// need to randomize the blocks for security purposes, so we first fill a
// local array that we then shuffle before populating the batches.
@@ -374,23 +821,34 @@ private:
uptr P = Region + Offset;
for (u32 I = 0; I < NumberOfBlocks; I++, P += Size)
ShuffleArray[I] = reinterpret_cast<CompactPtrT>(P);
- // No need to shuffle the batches size class.
- if (ClassId != SizeClassMap::BatchClassId)
- shuffle(ShuffleArray, NumberOfBlocks, &Sci->RandState);
- for (u32 I = 0; I < NumberOfBlocks;) {
- TransferBatch *B =
- C->createBatch(ClassId, reinterpret_cast<void *>(ShuffleArray[I]));
- if (UNLIKELY(!B))
- return nullptr;
- const u32 N = Min(MaxCount, NumberOfBlocks - I);
- B->setFromArray(&ShuffleArray[I], N);
- Sci->FreeList.push_back(B);
- I += N;
+
+ if (ClassId != SizeClassMap::BatchClassId) {
+ u32 N = 1;
+ uptr CurGroup = compactPtrGroupBase(ShuffleArray[0]);
+ for (u32 I = 1; I < NumberOfBlocks; I++) {
+ if (UNLIKELY(compactPtrGroupBase(ShuffleArray[I]) != CurGroup)) {
+ shuffle(ShuffleArray + I - N, N, &Sci->RandState);
+ pushBlocksImpl(C, ClassId, Sci, ShuffleArray + I - N, N,
+ /*SameGroup=*/true);
+ N = 1;
+ CurGroup = compactPtrGroupBase(ShuffleArray[I]);
+ } else {
+ ++N;
+ }
+ }
+
+ shuffle(ShuffleArray + NumberOfBlocks - N, N, &Sci->RandState);
+ pushBlocksImpl(C, ClassId, Sci, &ShuffleArray[NumberOfBlocks - N], N,
+ /*SameGroup=*/true);
+ } else {
+ pushBatchClassBlocks(Sci, ShuffleArray, NumberOfBlocks);
}
- TransferBatch *B = Sci->FreeList.front();
- Sci->FreeList.pop_front();
- DCHECK(B);
- DCHECK_GT(B->getCount(), 0);
+
+ // Note that `PushedBlocks` and `PoppedBlocks` are supposed to only record
+ // the requests from `PushBlocks` and `PopBatch` which are external
+ // interfaces. `populateFreeList` is the internal interface so we should set
+ // the values back to avoid incorrectly setting the stats.
+ Sci->FreeListInfo.PushedBlocks -= NumberOfBlocks;
const uptr AllocatedUser = Size * NumberOfBlocks;
C->getStats().add(StatFree, AllocatedUser);
@@ -406,60 +864,105 @@ private:
}
Sci->AllocatedUser += AllocatedUser;
- return B;
+ return true;
}
- void getStats(ScopedString *Str, uptr ClassId, uptr Rss) {
- SizeClassInfo *Sci = getSizeClassInfo(ClassId);
+ void getStats(ScopedString *Str, uptr ClassId, SizeClassInfo *Sci)
+ REQUIRES(Sci->Mutex) {
if (Sci->AllocatedUser == 0)
return;
- const uptr InUse = Sci->Stats.PoppedBlocks - Sci->Stats.PushedBlocks;
- const uptr AvailableChunks = Sci->AllocatedUser / getSizeByClassId(ClassId);
+ const uptr BlockSize = getSizeByClassId(ClassId);
+ const uptr InUse =
+ Sci->FreeListInfo.PoppedBlocks - Sci->FreeListInfo.PushedBlocks;
+ const uptr BytesInFreeList = Sci->AllocatedUser - InUse * BlockSize;
+ uptr PushedBytesDelta = 0;
+ if (BytesInFreeList >= Sci->ReleaseInfo.BytesInFreeListAtLastCheckpoint) {
+ PushedBytesDelta =
+ BytesInFreeList - Sci->ReleaseInfo.BytesInFreeListAtLastCheckpoint;
+ }
+ const uptr AvailableChunks = Sci->AllocatedUser / BlockSize;
Str->append(" %02zu (%6zu): mapped: %6zuK popped: %7zu pushed: %7zu "
- "inuse: %6zu avail: %6zu rss: %6zuK releases: %6zu\n",
+ "inuse: %6zu avail: %6zu releases: %6zu last released: %6zuK "
+ "latest pushed bytes: %6zuK\n",
ClassId, getSizeByClassId(ClassId), Sci->AllocatedUser >> 10,
- Sci->Stats.PoppedBlocks, Sci->Stats.PushedBlocks, InUse,
- AvailableChunks, Rss >> 10, Sci->ReleaseInfo.RangesReleased);
+ Sci->FreeListInfo.PoppedBlocks, Sci->FreeListInfo.PushedBlocks,
+ InUse, AvailableChunks, Sci->ReleaseInfo.RangesReleased,
+ Sci->ReleaseInfo.LastReleasedBytes >> 10,
+ PushedBytesDelta >> 10);
}
- NOINLINE uptr releaseToOSMaybe(SizeClassInfo *Sci, uptr ClassId,
- bool Force = false) {
+ void getSizeClassFragmentationInfo(SizeClassInfo *Sci, uptr ClassId,
+ ScopedString *Str) REQUIRES(Sci->Mutex) {
const uptr BlockSize = getSizeByClassId(ClassId);
+ const uptr First = Sci->MinRegionIndex;
+ const uptr Last = Sci->MaxRegionIndex;
+ const uptr Base = First * RegionSize;
+ const uptr NumberOfRegions = Last - First + 1U;
+ auto SkipRegion = [this, First, ClassId](uptr RegionIndex) {
+ ScopedLock L(ByteMapMutex);
+ return (PossibleRegions[First + RegionIndex] - 1U) != ClassId;
+ };
+
+ FragmentationRecorder Recorder;
+ if (!Sci->FreeListInfo.BlockList.empty()) {
+ PageReleaseContext Context =
+ markFreeBlocks(Sci, ClassId, BlockSize, Base, NumberOfRegions,
+ ReleaseToOS::ForceAll);
+ releaseFreeMemoryToOS(Context, Recorder, SkipRegion);
+ }
+
const uptr PageSize = getPageSizeCached();
+ const uptr TotalBlocks = Sci->AllocatedUser / BlockSize;
+ const uptr InUseBlocks =
+ Sci->FreeListInfo.PoppedBlocks - Sci->FreeListInfo.PushedBlocks;
+ uptr AllocatedPagesCount = 0;
+ if (TotalBlocks != 0U) {
+ for (uptr I = 0; I < NumberOfRegions; ++I) {
+ if (SkipRegion(I))
+ continue;
+ AllocatedPagesCount += RegionSize / PageSize;
+ }
+
+ DCHECK_NE(AllocatedPagesCount, 0U);
+ }
+
+ DCHECK_GE(AllocatedPagesCount, Recorder.getReleasedPagesCount());
+ const uptr InUsePages =
+ AllocatedPagesCount - Recorder.getReleasedPagesCount();
+ const uptr InUseBytes = InUsePages * PageSize;
+
+ uptr Integral;
+ uptr Fractional;
+ computePercentage(BlockSize * InUseBlocks, InUsePages * PageSize, &Integral,
+ &Fractional);
+ Str->append(" %02zu (%6zu): inuse/total blocks: %6zu/%6zu inuse/total "
+ "pages: %6zu/%6zu inuse bytes: %6zuK util: %3zu.%02zu%%\n",
+ ClassId, BlockSize, InUseBlocks, TotalBlocks, InUsePages,
+ AllocatedPagesCount, InUseBytes >> 10, Integral, Fractional);
+ }
+
+ NOINLINE uptr releaseToOSMaybe(SizeClassInfo *Sci, uptr ClassId,
+ ReleaseToOS ReleaseType = ReleaseToOS::Normal)
+ REQUIRES(Sci->Mutex) {
+ const uptr BlockSize = getSizeByClassId(ClassId);
- DCHECK_GE(Sci->Stats.PoppedBlocks, Sci->Stats.PushedBlocks);
+ DCHECK_GE(Sci->FreeListInfo.PoppedBlocks, Sci->FreeListInfo.PushedBlocks);
const uptr BytesInFreeList =
Sci->AllocatedUser -
- (Sci->Stats.PoppedBlocks - Sci->Stats.PushedBlocks) * BlockSize;
- if (BytesInFreeList < PageSize)
- return 0; // No chance to release anything.
- const uptr BytesPushed =
- (Sci->Stats.PushedBlocks - Sci->ReleaseInfo.PushedBlocksAtLastRelease) *
- BlockSize;
- if (BytesPushed < PageSize)
- return 0; // Nothing new to release.
+ (Sci->FreeListInfo.PoppedBlocks - Sci->FreeListInfo.PushedBlocks) *
+ BlockSize;
- // Releasing smaller blocks is expensive, so we want to make sure that a
- // significant amount of bytes are free, and that there has been a good
- // amount of batches pushed to the freelist before attempting to release.
- if (BlockSize < PageSize / 16U) {
- if (!Force && BytesPushed < Sci->AllocatedUser / 16U)
- return 0;
- // We want 8x% to 9x% free bytes (the larger the block, the lower the %).
- if ((BytesInFreeList * 100U) / Sci->AllocatedUser <
- (100U - 1U - BlockSize / 16U))
- return 0;
- }
+ if (UNLIKELY(BytesInFreeList == 0))
+ return 0;
- if (!Force) {
- const s32 IntervalMs = atomic_load_relaxed(&ReleaseToOsIntervalMs);
- if (IntervalMs < 0)
- return 0;
- if (Sci->ReleaseInfo.LastReleaseAtNs +
- static_cast<u64>(IntervalMs) * 1000000 >
- getMonotonicTime()) {
- return 0; // Memory was returned recently.
- }
+ // ====================================================================== //
+ // 1. Check if we have enough free blocks and if it's worth doing a page
+ // release.
+ // ====================================================================== //
+ if (ReleaseType != ReleaseToOS::ForceAll &&
+ !hasChanceToReleasePages(Sci, BlockSize, BytesInFreeList,
+ ReleaseType)) {
+ return 0;
}
const uptr First = Sci->MinRegionIndex;
@@ -469,37 +972,197 @@ private:
uptr TotalReleasedBytes = 0;
const uptr Base = First * RegionSize;
const uptr NumberOfRegions = Last - First + 1U;
+
+ // ==================================================================== //
+ // 2. Mark the free blocks and we can tell which pages are in-use by
+ // querying `PageReleaseContext`.
+ // ==================================================================== //
+ PageReleaseContext Context = markFreeBlocks(Sci, ClassId, BlockSize, Base,
+ NumberOfRegions, ReleaseType);
+ if (!Context.hasBlockMarked())
+ return 0;
+
+ // ==================================================================== //
+ // 3. Release the unused physical pages back to the OS.
+ // ==================================================================== //
ReleaseRecorder Recorder(Base);
auto SkipRegion = [this, First, ClassId](uptr RegionIndex) {
+ ScopedLock L(ByteMapMutex);
return (PossibleRegions[First + RegionIndex] - 1U) != ClassId;
};
- auto DecompactPtr = [](CompactPtrT CompactPtr) {
- return reinterpret_cast<uptr>(CompactPtr);
- };
- releaseFreeMemoryToOS(Sci->FreeList, RegionSize, NumberOfRegions, BlockSize,
- &Recorder, DecompactPtr, SkipRegion);
+ releaseFreeMemoryToOS(Context, Recorder, SkipRegion);
+
if (Recorder.getReleasedRangesCount() > 0) {
- Sci->ReleaseInfo.PushedBlocksAtLastRelease = Sci->Stats.PushedBlocks;
+ Sci->ReleaseInfo.BytesInFreeListAtLastCheckpoint = BytesInFreeList;
Sci->ReleaseInfo.RangesReleased += Recorder.getReleasedRangesCount();
Sci->ReleaseInfo.LastReleasedBytes = Recorder.getReleasedBytes();
TotalReleasedBytes += Sci->ReleaseInfo.LastReleasedBytes;
}
- Sci->ReleaseInfo.LastReleaseAtNs = getMonotonicTime();
+ Sci->ReleaseInfo.LastReleaseAtNs = getMonotonicTimeFast();
return TotalReleasedBytes;
}
+ bool hasChanceToReleasePages(SizeClassInfo *Sci, uptr BlockSize,
+ uptr BytesInFreeList, ReleaseToOS ReleaseType)
+ REQUIRES(Sci->Mutex) {
+ DCHECK_GE(Sci->FreeListInfo.PoppedBlocks, Sci->FreeListInfo.PushedBlocks);
+ const uptr PageSize = getPageSizeCached();
+
+ if (BytesInFreeList <= Sci->ReleaseInfo.BytesInFreeListAtLastCheckpoint)
+ Sci->ReleaseInfo.BytesInFreeListAtLastCheckpoint = BytesInFreeList;
+
+ // Always update `BytesInFreeListAtLastCheckpoint` with the smallest value
+ // so that we won't underestimate the releasable pages. For example, the
+ // following is the region usage,
+ //
+ // BytesInFreeListAtLastCheckpoint AllocatedUser
+ // v v
+ // |--------------------------------------->
+ // ^ ^
+ // BytesInFreeList ReleaseThreshold
+ //
+ // In general, if we have collected enough bytes and the amount of free
+ // bytes meets the ReleaseThreshold, we will try to do page release. If we
+ // don't update `BytesInFreeListAtLastCheckpoint` when the current
+ // `BytesInFreeList` is smaller, we may take longer time to wait for enough
+ // freed blocks because we miss the bytes between
+ // (BytesInFreeListAtLastCheckpoint - BytesInFreeList).
+ const uptr PushedBytesDelta =
+ BytesInFreeList - Sci->ReleaseInfo.BytesInFreeListAtLastCheckpoint;
+ if (PushedBytesDelta < PageSize)
+ return false;
+
+ // Releasing smaller blocks is expensive, so we want to make sure that a
+ // significant amount of bytes are free, and that there has been a good
+ // amount of batches pushed to the freelist before attempting to release.
+ if (isSmallBlock(BlockSize) && ReleaseType == ReleaseToOS::Normal)
+ if (PushedBytesDelta < Sci->AllocatedUser / 16U)
+ return false;
+
+ if (ReleaseType == ReleaseToOS::Normal) {
+ const s32 IntervalMs = atomic_load_relaxed(&ReleaseToOsIntervalMs);
+ if (IntervalMs < 0)
+ return false;
+
+ // The constant 8 here is selected from profiling some apps and the number
+ // of unreleased pages in the large size classes is around 16 pages or
+ // more. Choose half of it as a heuristic and which also avoids page
+ // release every time for every pushBlocks() attempt by large blocks.
+ const bool ByPassReleaseInterval =
+ isLargeBlock(BlockSize) && PushedBytesDelta > 8 * PageSize;
+ if (!ByPassReleaseInterval) {
+ if (Sci->ReleaseInfo.LastReleaseAtNs +
+ static_cast<u64>(IntervalMs) * 1000000 >
+ getMonotonicTimeFast()) {
+ // Memory was returned recently.
+ return false;
+ }
+ }
+ } // if (ReleaseType == ReleaseToOS::Normal)
+
+ return true;
+ }
+
+ PageReleaseContext markFreeBlocks(SizeClassInfo *Sci, const uptr ClassId,
+ const uptr BlockSize, const uptr Base,
+ const uptr NumberOfRegions,
+ ReleaseToOS ReleaseType)
+ REQUIRES(Sci->Mutex) {
+ const uptr PageSize = getPageSizeCached();
+ const uptr GroupSize = (1UL << GroupSizeLog);
+ const uptr CurGroupBase =
+ compactPtrGroupBase(compactPtr(ClassId, Sci->CurrentRegion));
+
+ PageReleaseContext Context(BlockSize, NumberOfRegions,
+ /*ReleaseSize=*/RegionSize);
+
+ auto DecompactPtr = [](CompactPtrT CompactPtr) {
+ return reinterpret_cast<uptr>(CompactPtr);
+ };
+ for (BatchGroupT &BG : Sci->FreeListInfo.BlockList) {
+ const uptr GroupBase = decompactGroupBase(BG.CompactPtrGroupBase);
+ // The `GroupSize` may not be divided by `BlockSize`, which means there is
+ // an unused space at the end of Region. Exclude that space to avoid
+ // unused page map entry.
+ uptr AllocatedGroupSize = GroupBase == CurGroupBase
+ ? Sci->CurrentRegionAllocated
+ : roundDownSlow(GroupSize, BlockSize);
+ if (AllocatedGroupSize == 0)
+ continue;
+
+ // TransferBatches are pushed in front of BG.Batches. The first one may
+ // not have all caches used.
+ const uptr NumBlocks = (BG.Batches.size() - 1) * BG.MaxCachedPerBatch +
+ BG.Batches.front()->getCount();
+ const uptr BytesInBG = NumBlocks * BlockSize;
+
+ if (ReleaseType != ReleaseToOS::ForceAll) {
+ if (BytesInBG <= BG.BytesInBGAtLastCheckpoint) {
+ BG.BytesInBGAtLastCheckpoint = BytesInBG;
+ continue;
+ }
+
+ const uptr PushedBytesDelta = BytesInBG - BG.BytesInBGAtLastCheckpoint;
+ if (PushedBytesDelta < PageSize)
+ continue;
+
+ // Given the randomness property, we try to release the pages only if
+ // the bytes used by free blocks exceed certain proportion of allocated
+ // spaces.
+ if (isSmallBlock(BlockSize) && (BytesInBG * 100U) / AllocatedGroupSize <
+ (100U - 1U - BlockSize / 16U)) {
+ continue;
+ }
+ }
+
+ // TODO: Consider updating this after page release if `ReleaseRecorder`
+ // can tell the released bytes in each group.
+ BG.BytesInBGAtLastCheckpoint = BytesInBG;
+
+ const uptr MaxContainedBlocks = AllocatedGroupSize / BlockSize;
+ const uptr RegionIndex = (GroupBase - Base) / RegionSize;
+
+ if (NumBlocks == MaxContainedBlocks) {
+ for (const auto &It : BG.Batches)
+ for (u16 I = 0; I < It.getCount(); ++I)
+ DCHECK_EQ(compactPtrGroupBase(It.get(I)), BG.CompactPtrGroupBase);
+
+ const uptr To = GroupBase + AllocatedGroupSize;
+ Context.markRangeAsAllCounted(GroupBase, To, GroupBase, RegionIndex,
+ AllocatedGroupSize);
+ } else {
+ DCHECK_LT(NumBlocks, MaxContainedBlocks);
+
+ // Note that we don't always visit blocks in each BatchGroup so that we
+ // may miss the chance of releasing certain pages that cross
+ // BatchGroups.
+ Context.markFreeBlocksInRegion(BG.Batches, DecompactPtr, GroupBase,
+ RegionIndex, AllocatedGroupSize,
+ /*MayContainLastBlockInRegion=*/true);
+ }
+
+ // We may not be able to do the page release In a rare case that we may
+ // fail on PageMap allocation.
+ if (UNLIKELY(!Context.hasBlockMarked()))
+ break;
+ }
+
+ return Context;
+ }
+
SizeClassInfo SizeClassInfoArray[NumClasses] = {};
+ HybridMutex ByteMapMutex;
// Track the regions in use, 0 is unused, otherwise store ClassId + 1.
- ByteMap PossibleRegions = {};
+ ByteMap PossibleRegions GUARDED_BY(ByteMapMutex) = {};
atomic_s32 ReleaseToOsIntervalMs = {};
// Unless several threads request regions simultaneously from different size
// classes, the stash rarely contains more than 1 entry.
static constexpr uptr MaxStashedRegions = 4;
HybridMutex RegionsStashMutex;
- uptr NumberOfStashedRegions = 0;
- uptr RegionsStash[MaxStashedRegions] = {};
+ uptr NumberOfStashedRegions GUARDED_BY(RegionsStashMutex) = 0;
+ uptr RegionsStash[MaxStashedRegions] GUARDED_BY(RegionsStashMutex) = {};
};
} // namespace scudo
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/primary64.h b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/primary64.h
index 13420bf3d222..9a642d23620e 100644
--- a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/primary64.h
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/primary64.h
@@ -9,15 +9,20 @@
#ifndef SCUDO_PRIMARY64_H_
#define SCUDO_PRIMARY64_H_
+#include "allocator_common.h"
#include "bytemap.h"
#include "common.h"
#include "list.h"
#include "local_cache.h"
+#include "mem_map.h"
#include "memtag.h"
#include "options.h"
#include "release.h"
#include "stats.h"
#include "string_utils.h"
+#include "thread_annotations.h"
+
+#include "condition_variable.h"
namespace scudo {
@@ -43,99 +48,309 @@ namespace scudo {
template <typename Config> class SizeClassAllocator64 {
public:
- typedef typename Config::PrimaryCompactPtrT CompactPtrT;
- static const uptr CompactPtrScale = Config::PrimaryCompactPtrScale;
- typedef typename Config::SizeClassMap SizeClassMap;
+ typedef typename Config::Primary::CompactPtrT CompactPtrT;
+ typedef typename Config::Primary::SizeClassMap SizeClassMap;
+ typedef typename ConditionVariableState<
+ typename Config::Primary>::ConditionVariableT ConditionVariableT;
+ static const uptr CompactPtrScale = Config::Primary::CompactPtrScale;
+ static const uptr RegionSizeLog = Config::Primary::RegionSizeLog;
+ static const uptr GroupSizeLog = Config::Primary::GroupSizeLog;
+ static_assert(RegionSizeLog >= GroupSizeLog,
+ "Group size shouldn't be greater than the region size");
+ static const uptr GroupScale = GroupSizeLog - CompactPtrScale;
typedef SizeClassAllocator64<Config> ThisT;
typedef SizeClassAllocatorLocalCache<ThisT> CacheT;
- typedef typename CacheT::TransferBatch TransferBatch;
+ typedef TransferBatch<ThisT> TransferBatchT;
+ typedef BatchGroup<ThisT> BatchGroupT;
+
+ static_assert(sizeof(BatchGroupT) <= sizeof(TransferBatchT),
+ "BatchGroupT uses the same class size as TransferBatchT");
static uptr getSizeByClassId(uptr ClassId) {
return (ClassId == SizeClassMap::BatchClassId)
- ? roundUpTo(sizeof(TransferBatch), 1U << CompactPtrScale)
+ ? roundUp(sizeof(TransferBatchT), 1U << CompactPtrScale)
: SizeClassMap::getSizeByClassId(ClassId);
}
static bool canAllocate(uptr Size) { return Size <= SizeClassMap::MaxSize; }
- void init(s32 ReleaseToOsInterval) {
+ static bool conditionVariableEnabled() {
+ return ConditionVariableState<typename Config::Primary>::enabled();
+ }
+
+ void init(s32 ReleaseToOsInterval) NO_THREAD_SAFETY_ANALYSIS {
DCHECK(isAligned(reinterpret_cast<uptr>(this), alignof(ThisT)));
- DCHECK_EQ(PrimaryBase, 0U);
+
+ const uptr PageSize = getPageSizeCached();
+ const uptr GroupSize = (1UL << GroupSizeLog);
+ const uptr PagesInGroup = GroupSize / PageSize;
+ const uptr MinSizeClass = getSizeByClassId(1);
+ // When trying to release pages back to memory, visiting smaller size
+ // classes is expensive. Therefore, we only try to release smaller size
+ // classes when the amount of free blocks goes over a certain threshold (See
+ // the comment in releaseToOSMaybe() for more details). For example, for
+ // size class 32, we only do the release when the size of free blocks is
+ // greater than 97% of pages in a group. However, this may introduce another
+ // issue that if the number of free blocks is bouncing between 97% ~ 100%.
+ // Which means we may try many page releases but only release very few of
+ // them (less than 3% in a group). Even though we have
+ // `&ReleaseToOsIntervalMs` which slightly reduce the frequency of these
+ // calls but it will be better to have another guard to mitigate this issue.
+ //
+ // Here we add another constraint on the minimum size requirement. The
+ // constraint is determined by the size of in-use blocks in the minimal size
+ // class. Take size class 32 as an example,
+ //
+ // +- one memory group -+
+ // +----------------------+------+
+ // | 97% of free blocks | |
+ // +----------------------+------+
+ // \ /
+ // 3% in-use blocks
+ //
+ // * The release size threshold is 97%.
+ //
+ // The 3% size in a group is about 7 pages. For two consecutive
+ // releaseToOSMaybe(), we require the difference between `PushedBlocks`
+ // should be greater than 7 pages. This mitigates the page releasing
+ // thrashing which is caused by memory usage bouncing around the threshold.
+ // The smallest size class takes longest time to do the page release so we
+ // use its size of in-use blocks as a heuristic.
+ SmallerBlockReleasePageDelta =
+ PagesInGroup * (1 + MinSizeClass / 16U) / 100;
+
// Reserve the space required for the Primary.
- PrimaryBase = reinterpret_cast<uptr>(
- map(nullptr, PrimarySize, nullptr, MAP_NOACCESS, &Data));
+ CHECK(ReservedMemory.create(/*Addr=*/0U, PrimarySize,
+ "scudo:primary_reserve"));
+ PrimaryBase = ReservedMemory.getBase();
+ DCHECK_NE(PrimaryBase, 0U);
u32 Seed;
- const u64 Time = getMonotonicTime();
+ const u64 Time = getMonotonicTimeFast();
if (!getRandom(reinterpret_cast<void *>(&Seed), sizeof(Seed)))
Seed = static_cast<u32>(Time ^ (PrimaryBase >> 12));
- const uptr PageSize = getPageSizeCached();
+
for (uptr I = 0; I < NumClasses; I++) {
RegionInfo *Region = getRegionInfo(I);
+
// The actual start of a region is offset by a random number of pages
// when PrimaryEnableRandomOffset is set.
- Region->RegionBeg = getRegionBaseByClassId(I) +
- (Config::PrimaryEnableRandomOffset
+ Region->RegionBeg = (PrimaryBase + (I << RegionSizeLog)) +
+ (Config::Primary::EnableRandomOffset
? ((getRandomModN(&Seed, 16) + 1) * PageSize)
: 0);
Region->RandState = getRandomU32(&Seed);
+ // Releasing small blocks is expensive, set a higher threshold to avoid
+ // frequent page releases.
+ if (isSmallBlock(getSizeByClassId(I)))
+ Region->TryReleaseThreshold = PageSize * SmallerBlockReleasePageDelta;
+ else
+ Region->TryReleaseThreshold = PageSize;
Region->ReleaseInfo.LastReleaseAtNs = Time;
+
+ Region->MemMapInfo.MemMap = ReservedMemory.dispatch(
+ PrimaryBase + (I << RegionSizeLog), RegionSize);
+ CHECK(Region->MemMapInfo.MemMap.isAllocated());
}
+ shuffle(RegionInfoArray, NumClasses, &Seed);
+
+ // The binding should be done after region shuffling so that it won't bind
+ // the FLLock from the wrong region.
+ for (uptr I = 0; I < NumClasses; I++)
+ getRegionInfo(I)->FLLockCV.bindTestOnly(getRegionInfo(I)->FLLock);
+
setOption(Option::ReleaseInterval, static_cast<sptr>(ReleaseToOsInterval));
}
- void unmapTestOnly() {
+ void unmapTestOnly() NO_THREAD_SAFETY_ANALYSIS {
for (uptr I = 0; I < NumClasses; I++) {
RegionInfo *Region = getRegionInfo(I);
*Region = {};
}
- unmap(reinterpret_cast<void *>(PrimaryBase), PrimarySize, UNMAP_ALL, &Data);
+ if (PrimaryBase)
+ ReservedMemory.release();
PrimaryBase = 0U;
}
- TransferBatch *popBatch(CacheT *C, uptr ClassId) {
+ // When all blocks are freed, it has to be the same size as `AllocatedUser`.
+ void verifyAllBlocksAreReleasedTestOnly() {
+ // `BatchGroup` and `TransferBatch` also use the blocks from BatchClass.
+ uptr BatchClassUsedInFreeLists = 0;
+ for (uptr I = 0; I < NumClasses; I++) {
+ // We have to count BatchClassUsedInFreeLists in other regions first.
+ if (I == SizeClassMap::BatchClassId)
+ continue;
+ RegionInfo *Region = getRegionInfo(I);
+ ScopedLock ML(Region->MMLock);
+ ScopedLock FL(Region->FLLock);
+ const uptr BlockSize = getSizeByClassId(I);
+ uptr TotalBlocks = 0;
+ for (BatchGroupT &BG : Region->FreeListInfo.BlockList) {
+ // `BG::Batches` are `TransferBatches`. +1 for `BatchGroup`.
+ BatchClassUsedInFreeLists += BG.Batches.size() + 1;
+ for (const auto &It : BG.Batches)
+ TotalBlocks += It.getCount();
+ }
+
+ DCHECK_EQ(TotalBlocks, Region->MemMapInfo.AllocatedUser / BlockSize);
+ DCHECK_EQ(Region->FreeListInfo.PushedBlocks,
+ Region->FreeListInfo.PoppedBlocks);
+ }
+
+ RegionInfo *Region = getRegionInfo(SizeClassMap::BatchClassId);
+ ScopedLock ML(Region->MMLock);
+ ScopedLock FL(Region->FLLock);
+ const uptr BlockSize = getSizeByClassId(SizeClassMap::BatchClassId);
+ uptr TotalBlocks = 0;
+ for (BatchGroupT &BG : Region->FreeListInfo.BlockList) {
+ if (LIKELY(!BG.Batches.empty())) {
+ for (const auto &It : BG.Batches)
+ TotalBlocks += It.getCount();
+ } else {
+ // `BatchGroup` with empty freelist doesn't have `TransferBatch` record
+ // itself.
+ ++TotalBlocks;
+ }
+ }
+ DCHECK_EQ(TotalBlocks + BatchClassUsedInFreeLists,
+ Region->MemMapInfo.AllocatedUser / BlockSize);
+ DCHECK_GE(Region->FreeListInfo.PoppedBlocks,
+ Region->FreeListInfo.PushedBlocks);
+ const uptr BlocksInUse =
+ Region->FreeListInfo.PoppedBlocks - Region->FreeListInfo.PushedBlocks;
+ DCHECK_EQ(BlocksInUse, BatchClassUsedInFreeLists);
+ }
+
+ // Note that the `MaxBlockCount` will be used when we support arbitrary blocks
+ // count. Now it's the same as the number of blocks stored in the
+ // `TransferBatch`.
+ u16 popBlocks(CacheT *C, uptr ClassId, CompactPtrT *ToArray,
+ UNUSED const u16 MaxBlockCount) {
+ TransferBatchT *B = popBatch(C, ClassId);
+ if (!B)
+ return 0;
+
+ const u16 Count = B->getCount();
+ DCHECK_GT(Count, 0U);
+ B->moveToArray(ToArray);
+
+ if (ClassId != SizeClassMap::BatchClassId)
+ C->deallocate(SizeClassMap::BatchClassId, B);
+
+ return Count;
+ }
+
+ TransferBatchT *popBatch(CacheT *C, uptr ClassId) {
DCHECK_LT(ClassId, NumClasses);
RegionInfo *Region = getRegionInfo(ClassId);
- ScopedLock L(Region->Mutex);
- TransferBatch *B = Region->FreeList.front();
- if (B) {
- Region->FreeList.pop_front();
+
+ {
+ ScopedLock L(Region->FLLock);
+ TransferBatchT *B = popBatchImpl(C, ClassId, Region);
+ if (LIKELY(B))
+ return B;
+ }
+
+ bool ReportRegionExhausted = false;
+ TransferBatchT *B = nullptr;
+
+ if (conditionVariableEnabled()) {
+ B = popBatchWithCV(C, ClassId, Region, ReportRegionExhausted);
} else {
- B = populateFreeList(C, ClassId, Region);
- if (UNLIKELY(!B))
- return nullptr;
+ while (true) {
+ // When two threads compete for `Region->MMLock`, we only want one of
+ // them to call populateFreeListAndPopBatch(). To avoid both of them
+ // doing that, always check the freelist before mapping new pages.
+ ScopedLock ML(Region->MMLock);
+ {
+ ScopedLock FL(Region->FLLock);
+ if ((B = popBatchImpl(C, ClassId, Region)))
+ break;
+ }
+
+ const bool RegionIsExhausted = Region->Exhausted;
+ if (!RegionIsExhausted)
+ B = populateFreeListAndPopBatch(C, ClassId, Region);
+ ReportRegionExhausted = !RegionIsExhausted && Region->Exhausted;
+ break;
+ }
+ }
+
+ if (UNLIKELY(ReportRegionExhausted)) {
+ Printf("Can't populate more pages for size class %zu.\n",
+ getSizeByClassId(ClassId));
+
+ // Theoretically, BatchClass shouldn't be used up. Abort immediately when
+ // it happens.
+ if (ClassId == SizeClassMap::BatchClassId)
+ reportOutOfBatchClass();
}
- DCHECK_GT(B->getCount(), 0);
- Region->Stats.PoppedBlocks += B->getCount();
+
return B;
}
- void pushBatch(uptr ClassId, TransferBatch *B) {
- DCHECK_GT(B->getCount(), 0);
+ // Push the array of free blocks to the designated batch group.
+ void pushBlocks(CacheT *C, uptr ClassId, CompactPtrT *Array, u32 Size) {
+ DCHECK_LT(ClassId, NumClasses);
+ DCHECK_GT(Size, 0);
+
RegionInfo *Region = getRegionInfo(ClassId);
- ScopedLock L(Region->Mutex);
- Region->FreeList.push_front(B);
- Region->Stats.PushedBlocks += B->getCount();
- if (ClassId != SizeClassMap::BatchClassId)
- releaseToOSMaybe(Region, ClassId);
+ if (ClassId == SizeClassMap::BatchClassId) {
+ ScopedLock L(Region->FLLock);
+ pushBatchClassBlocks(Region, Array, Size);
+ if (conditionVariableEnabled())
+ Region->FLLockCV.notifyAll(Region->FLLock);
+ return;
+ }
+
+ // TODO(chiahungduan): Consider not doing grouping if the group size is not
+ // greater than the block size with a certain scale.
+
+ bool SameGroup = true;
+ if (GroupSizeLog < RegionSizeLog) {
+ // Sort the blocks so that blocks belonging to the same group can be
+ // pushed together.
+ for (u32 I = 1; I < Size; ++I) {
+ if (compactPtrGroup(Array[I - 1]) != compactPtrGroup(Array[I]))
+ SameGroup = false;
+ CompactPtrT Cur = Array[I];
+ u32 J = I;
+ while (J > 0 && compactPtrGroup(Cur) < compactPtrGroup(Array[J - 1])) {
+ Array[J] = Array[J - 1];
+ --J;
+ }
+ Array[J] = Cur;
+ }
+ }
+
+ {
+ ScopedLock L(Region->FLLock);
+ pushBlocksImpl(C, ClassId, Region, Array, Size, SameGroup);
+ if (conditionVariableEnabled())
+ Region->FLLockCV.notifyAll(Region->FLLock);
+ }
}
- void disable() {
+ void disable() NO_THREAD_SAFETY_ANALYSIS {
// The BatchClassId must be locked last since other classes can use it.
for (sptr I = static_cast<sptr>(NumClasses) - 1; I >= 0; I--) {
if (static_cast<uptr>(I) == SizeClassMap::BatchClassId)
continue;
- getRegionInfo(static_cast<uptr>(I))->Mutex.lock();
+ getRegionInfo(static_cast<uptr>(I))->MMLock.lock();
+ getRegionInfo(static_cast<uptr>(I))->FLLock.lock();
}
- getRegionInfo(SizeClassMap::BatchClassId)->Mutex.lock();
+ getRegionInfo(SizeClassMap::BatchClassId)->MMLock.lock();
+ getRegionInfo(SizeClassMap::BatchClassId)->FLLock.lock();
}
- void enable() {
- getRegionInfo(SizeClassMap::BatchClassId)->Mutex.unlock();
+ void enable() NO_THREAD_SAFETY_ANALYSIS {
+ getRegionInfo(SizeClassMap::BatchClassId)->FLLock.unlock();
+ getRegionInfo(SizeClassMap::BatchClassId)->MMLock.unlock();
for (uptr I = 0; I < NumClasses; I++) {
if (I == SizeClassMap::BatchClassId)
continue;
- getRegionInfo(I)->Mutex.unlock();
+ getRegionInfo(I)->FLLock.unlock();
+ getRegionInfo(I)->MMLock.unlock();
}
}
@@ -143,10 +358,15 @@ public:
for (uptr I = 0; I < NumClasses; I++) {
if (I == SizeClassMap::BatchClassId)
continue;
- const RegionInfo *Region = getRegionInfo(I);
+ RegionInfo *Region = getRegionInfo(I);
+ // TODO: The call of `iterateOverBlocks` requires disabling
+ // SizeClassAllocator64. We may consider locking each region on demand
+ // only.
+ Region->FLLock.assertHeld();
+ Region->MMLock.assertHeld();
const uptr BlockSize = getSizeByClassId(I);
const uptr From = Region->RegionBeg;
- const uptr To = From + Region->AllocatedUser;
+ const uptr To = From + Region->MemMapInfo.AllocatedUser;
for (uptr Block = From; Block < To; Block += BlockSize)
Callback(Block);
}
@@ -159,25 +379,46 @@ public:
uptr PushedBlocks = 0;
for (uptr I = 0; I < NumClasses; I++) {
RegionInfo *Region = getRegionInfo(I);
- if (Region->MappedUser)
- TotalMapped += Region->MappedUser;
- PoppedBlocks += Region->Stats.PoppedBlocks;
- PushedBlocks += Region->Stats.PushedBlocks;
+ {
+ ScopedLock L(Region->MMLock);
+ TotalMapped += Region->MemMapInfo.MappedUser;
+ }
+ {
+ ScopedLock L(Region->FLLock);
+ PoppedBlocks += Region->FreeListInfo.PoppedBlocks;
+ PushedBlocks += Region->FreeListInfo.PushedBlocks;
+ }
}
- Str->append("Stats: SizeClassAllocator64: %zuM mapped (%zuM rss) in %zu "
+ Str->append("Stats: SizeClassAllocator64: %zuM mapped (%uM rss) in %zu "
"allocations; remains %zu\n",
- TotalMapped >> 20, 0, PoppedBlocks,
+ TotalMapped >> 20, 0U, PoppedBlocks,
PoppedBlocks - PushedBlocks);
- for (uptr I = 0; I < NumClasses; I++)
- getStats(Str, I, 0);
+ for (uptr I = 0; I < NumClasses; I++) {
+ RegionInfo *Region = getRegionInfo(I);
+ ScopedLock L1(Region->MMLock);
+ ScopedLock L2(Region->FLLock);
+ getStats(Str, I, Region);
+ }
+ }
+
+ void getFragmentationInfo(ScopedString *Str) {
+ Str->append(
+ "Fragmentation Stats: SizeClassAllocator64: page size = %zu bytes\n",
+ getPageSizeCached());
+
+ for (uptr I = 1; I < NumClasses; I++) {
+ RegionInfo *Region = getRegionInfo(I);
+ ScopedLock L(Region->MMLock);
+ getRegionFragmentationInfo(Region, I, Str);
+ }
}
bool setOption(Option O, sptr Value) {
if (O == Option::ReleaseInterval) {
- const s32 Interval = Max(
- Min(static_cast<s32>(Value), Config::PrimaryMaxReleaseToOsIntervalMs),
- Config::PrimaryMinReleaseToOsIntervalMs);
+ const s32 Interval = Max(Min(static_cast<s32>(Value),
+ Config::Primary::MaxReleaseToOsIntervalMs),
+ Config::Primary::MinReleaseToOsIntervalMs);
atomic_store_relaxed(&ReleaseToOsIntervalMs, Interval);
return true;
}
@@ -185,14 +426,27 @@ public:
return true;
}
- uptr releaseToOS() {
+ uptr tryReleaseToOS(uptr ClassId, ReleaseToOS ReleaseType) {
+ RegionInfo *Region = getRegionInfo(ClassId);
+ // Note that the tryLock() may fail spuriously, given that it should rarely
+ // happen and page releasing is fine to skip, we don't take certain
+ // approaches to ensure one page release is done.
+ if (Region->MMLock.tryLock()) {
+ uptr BytesReleased = releaseToOSMaybe(Region, ClassId, ReleaseType);
+ Region->MMLock.unlock();
+ return BytesReleased;
+ }
+ return 0;
+ }
+
+ uptr releaseToOS(ReleaseToOS ReleaseType) {
uptr TotalReleasedBytes = 0;
for (uptr I = 0; I < NumClasses; I++) {
if (I == SizeClassMap::BatchClassId)
continue;
RegionInfo *Region = getRegionInfo(I);
- ScopedLock L(Region->Mutex);
- TotalReleasedBytes += releaseToOSMaybe(Region, I, /*Force=*/true);
+ ScopedLock L(Region->MMLock);
+ TotalReleasedBytes += releaseToOSMaybe(Region, I, ReleaseType);
}
return TotalReleasedBytes;
}
@@ -204,9 +458,6 @@ public:
static uptr getRegionInfoArraySize() { return sizeof(RegionInfoArray); }
uptr getCompactPtrBaseByClassId(uptr ClassId) {
- // If we are not compacting pointers, base everything off of 0.
- if (sizeof(CompactPtrT) == sizeof(uptr) && CompactPtrScale == 0)
- return 0;
return getRegionInfo(ClassId)->RegionBeg;
}
@@ -221,16 +472,24 @@ public:
decompactPtrInternal(getCompactPtrBaseByClassId(ClassId), CompactPtr));
}
- static BlockInfo findNearestBlock(const char *RegionInfoData, uptr Ptr) {
+ static BlockInfo findNearestBlock(const char *RegionInfoData,
+ uptr Ptr) NO_THREAD_SAFETY_ANALYSIS {
const RegionInfo *RegionInfoArray =
reinterpret_cast<const RegionInfo *>(RegionInfoData);
+
uptr ClassId;
uptr MinDistance = -1UL;
for (uptr I = 0; I != NumClasses; ++I) {
if (I == SizeClassMap::BatchClassId)
continue;
uptr Begin = RegionInfoArray[I].RegionBeg;
- uptr End = Begin + RegionInfoArray[I].AllocatedUser;
+ // TODO(chiahungduan): In fact, We need to lock the RegionInfo::MMLock.
+ // However, the RegionInfoData is passed with const qualifier and lock the
+ // mutex requires modifying RegionInfoData, which means we need to remove
+ // the const qualifier. This may lead to another undefined behavior (The
+ // first one is accessing `AllocatedUser` without locking. It's better to
+ // pass `RegionInfoData` as `void *` then we can lock the mutex properly.
+ uptr End = Begin + RegionInfoArray[I].MemMapInfo.AllocatedUser;
if (Begin > End || End - Begin < SizeClassMap::getSizeByClassId(I))
continue;
uptr RegionDistance;
@@ -252,7 +511,8 @@ public:
BlockInfo B = {};
if (MinDistance <= 8192) {
B.RegionBegin = RegionInfoArray[ClassId].RegionBeg;
- B.RegionEnd = B.RegionBegin + RegionInfoArray[ClassId].AllocatedUser;
+ B.RegionEnd =
+ B.RegionBegin + RegionInfoArray[ClassId].MemMapInfo.AllocatedUser;
B.BlockSize = SizeClassMap::getSizeByClassId(ClassId);
B.BlockBegin =
B.RegionBegin + uptr(sptr(Ptr - B.RegionBegin) / sptr(B.BlockSize) *
@@ -268,37 +528,51 @@ public:
AtomicOptions Options;
private:
- static const uptr RegionSize = 1UL << Config::PrimaryRegionSizeLog;
+ static const uptr RegionSize = 1UL << RegionSizeLog;
static const uptr NumClasses = SizeClassMap::NumClasses;
static const uptr PrimarySize = RegionSize * NumClasses;
- static const uptr MapSizeIncrement = Config::PrimaryMapSizeIncrement;
+ static const uptr MapSizeIncrement = Config::Primary::MapSizeIncrement;
// Fill at most this number of batches from the newly map'd memory.
static const u32 MaxNumBatches = SCUDO_ANDROID ? 4U : 8U;
- struct RegionStats {
- uptr PoppedBlocks;
- uptr PushedBlocks;
- };
-
struct ReleaseToOsInfo {
- uptr PushedBlocksAtLastRelease;
+ uptr BytesInFreeListAtLastCheckpoint;
uptr RangesReleased;
uptr LastReleasedBytes;
u64 LastReleaseAtNs;
};
+ struct BlocksInfo {
+ SinglyLinkedList<BatchGroupT> BlockList = {};
+ uptr PoppedBlocks = 0;
+ uptr PushedBlocks = 0;
+ };
+
+ struct PagesInfo {
+ MemMapT MemMap = {};
+ // Bytes mapped for user memory.
+ uptr MappedUser = 0;
+ // Bytes allocated for user memory.
+ uptr AllocatedUser = 0;
+ };
+
struct UnpaddedRegionInfo {
- HybridMutex Mutex;
- SinglyLinkedList<TransferBatch> FreeList;
+ // Mutex for operations on freelist
+ HybridMutex FLLock;
+ ConditionVariableT FLLockCV GUARDED_BY(FLLock);
+ // Mutex for memmap operations
+ HybridMutex MMLock ACQUIRED_BEFORE(FLLock);
+ // `RegionBeg` is initialized before thread creation and won't be changed.
uptr RegionBeg = 0;
- RegionStats Stats = {};
- u32 RandState = 0;
- uptr MappedUser = 0; // Bytes mapped for user memory.
- uptr AllocatedUser = 0; // Bytes allocated for user memory.
- MapPlatformData Data = {};
- ReleaseToOsInfo ReleaseInfo = {};
- bool Exhausted = false;
+ u32 RandState GUARDED_BY(MMLock) = 0;
+ BlocksInfo FreeListInfo GUARDED_BY(FLLock);
+ PagesInfo MemMapInfo GUARDED_BY(MMLock);
+ // The minimum size of pushed blocks to trigger page release.
+ uptr TryReleaseThreshold GUARDED_BY(MMLock) = 0;
+ ReleaseToOsInfo ReleaseInfo GUARDED_BY(MMLock) = {};
+ bool Exhausted GUARDED_BY(MMLock) = false;
+ bool isPopulatingFreeList GUARDED_BY(FLLock) = false;
};
struct RegionInfo : UnpaddedRegionInfo {
char Padding[SCUDO_CACHE_LINE_SIZE -
@@ -306,18 +580,15 @@ private:
};
static_assert(sizeof(RegionInfo) % SCUDO_CACHE_LINE_SIZE == 0, "");
- uptr PrimaryBase = 0;
- MapPlatformData Data = {};
- atomic_s32 ReleaseToOsIntervalMs = {};
- alignas(SCUDO_CACHE_LINE_SIZE) RegionInfo RegionInfoArray[NumClasses];
-
RegionInfo *getRegionInfo(uptr ClassId) {
DCHECK_LT(ClassId, NumClasses);
return &RegionInfoArray[ClassId];
}
- uptr getRegionBaseByClassId(uptr ClassId) const {
- return PrimaryBase + (ClassId << Config::PrimaryRegionSizeLog);
+ uptr getRegionBaseByClassId(uptr ClassId) {
+ return roundDown(getRegionInfo(ClassId)->RegionBeg - PrimaryBase,
+ RegionSize) +
+ PrimaryBase;
}
static CompactPtrT compactPtrInternal(uptr Base, uptr Ptr) {
@@ -328,160 +599,1088 @@ private:
return Base + (static_cast<uptr>(CompactPtr) << CompactPtrScale);
}
- NOINLINE TransferBatch *populateFreeList(CacheT *C, uptr ClassId,
- RegionInfo *Region) {
+ static uptr compactPtrGroup(CompactPtrT CompactPtr) {
+ const uptr Mask = (static_cast<uptr>(1) << GroupScale) - 1;
+ return static_cast<uptr>(CompactPtr) & ~Mask;
+ }
+ static uptr decompactGroupBase(uptr Base, uptr CompactPtrGroupBase) {
+ DCHECK_EQ(CompactPtrGroupBase % (static_cast<uptr>(1) << (GroupScale)), 0U);
+ return Base + (CompactPtrGroupBase << CompactPtrScale);
+ }
+
+ ALWAYS_INLINE static bool isSmallBlock(uptr BlockSize) {
+ const uptr PageSize = getPageSizeCached();
+ return BlockSize < PageSize / 16U;
+ }
+
+ ALWAYS_INLINE static bool isLargeBlock(uptr BlockSize) {
+ const uptr PageSize = getPageSizeCached();
+ return BlockSize > PageSize;
+ }
+
+ void pushBatchClassBlocks(RegionInfo *Region, CompactPtrT *Array, u32 Size)
+ REQUIRES(Region->FLLock) {
+ DCHECK_EQ(Region, getRegionInfo(SizeClassMap::BatchClassId));
+
+ // Free blocks are recorded by TransferBatch in freelist for all
+ // size-classes. In addition, TransferBatch is allocated from BatchClassId.
+ // In order not to use additional block to record the free blocks in
+ // BatchClassId, they are self-contained. I.e., A TransferBatch records the
+ // block address of itself. See the figure below:
+ //
+ // TransferBatch at 0xABCD
+ // +----------------------------+
+ // | Free blocks' addr |
+ // | +------+------+------+ |
+ // | |0xABCD|... |... | |
+ // | +------+------+------+ |
+ // +----------------------------+
+ //
+ // When we allocate all the free blocks in the TransferBatch, the block used
+ // by TransferBatch is also free for use. We don't need to recycle the
+ // TransferBatch. Note that the correctness is maintained by the invariant,
+ //
+ // The unit of each popBatch() request is entire TransferBatch. Return
+ // part of the blocks in a TransferBatch is invalid.
+ //
+ // This ensures that TransferBatch won't leak the address itself while it's
+ // still holding other valid data.
+ //
+ // Besides, BatchGroup is also allocated from BatchClassId and has its
+ // address recorded in the TransferBatch too. To maintain the correctness,
+ //
+ // The address of BatchGroup is always recorded in the last TransferBatch
+ // in the freelist (also imply that the freelist should only be
+ // updated with push_front). Once the last TransferBatch is popped,
+ // the block used by BatchGroup is also free for use.
+ //
+ // With this approach, the blocks used by BatchGroup and TransferBatch are
+ // reusable and don't need additional space for them.
+
+ Region->FreeListInfo.PushedBlocks += Size;
+ BatchGroupT *BG = Region->FreeListInfo.BlockList.front();
+
+ if (BG == nullptr) {
+ // Construct `BatchGroup` on the last element.
+ BG = reinterpret_cast<BatchGroupT *>(
+ decompactPtr(SizeClassMap::BatchClassId, Array[Size - 1]));
+ --Size;
+ BG->Batches.clear();
+ // BatchClass hasn't enabled memory group. Use `0` to indicate there's no
+ // memory group here.
+ BG->CompactPtrGroupBase = 0;
+ // `BG` is also the block of BatchClassId. Note that this is different
+ // from `CreateGroup` in `pushBlocksImpl`
+ BG->PushedBlocks = 1;
+ BG->BytesInBGAtLastCheckpoint = 0;
+ BG->MaxCachedPerBatch =
+ CacheT::getMaxCached(getSizeByClassId(SizeClassMap::BatchClassId));
+
+ Region->FreeListInfo.BlockList.push_front(BG);
+ }
+
+ if (UNLIKELY(Size == 0))
+ return;
+
+ // This happens under 2 cases.
+ // 1. just allocated a new `BatchGroup`.
+ // 2. Only 1 block is pushed when the freelist is empty.
+ if (BG->Batches.empty()) {
+ // Construct the `TransferBatch` on the last element.
+ TransferBatchT *TB = reinterpret_cast<TransferBatchT *>(
+ decompactPtr(SizeClassMap::BatchClassId, Array[Size - 1]));
+ TB->clear();
+ // As mentioned above, addresses of `TransferBatch` and `BatchGroup` are
+ // recorded in the TransferBatch.
+ TB->add(Array[Size - 1]);
+ TB->add(
+ compactPtr(SizeClassMap::BatchClassId, reinterpret_cast<uptr>(BG)));
+ --Size;
+ DCHECK_EQ(BG->PushedBlocks, 1U);
+ // `TB` is also the block of BatchClassId.
+ BG->PushedBlocks += 1;
+ BG->Batches.push_front(TB);
+ }
+
+ TransferBatchT *CurBatch = BG->Batches.front();
+ DCHECK_NE(CurBatch, nullptr);
+
+ for (u32 I = 0; I < Size;) {
+ u16 UnusedSlots =
+ static_cast<u16>(BG->MaxCachedPerBatch - CurBatch->getCount());
+ if (UnusedSlots == 0) {
+ CurBatch = reinterpret_cast<TransferBatchT *>(
+ decompactPtr(SizeClassMap::BatchClassId, Array[I]));
+ CurBatch->clear();
+ // Self-contained
+ CurBatch->add(Array[I]);
+ ++I;
+ // TODO(chiahungduan): Avoid the use of push_back() in `Batches` of
+ // BatchClassId.
+ BG->Batches.push_front(CurBatch);
+ UnusedSlots = static_cast<u16>(BG->MaxCachedPerBatch - 1);
+ }
+ // `UnusedSlots` is u16 so the result will be also fit in u16.
+ const u16 AppendSize = static_cast<u16>(Min<u32>(UnusedSlots, Size - I));
+ CurBatch->appendFromArray(&Array[I], AppendSize);
+ I += AppendSize;
+ }
+
+ BG->PushedBlocks += Size;
+ }
+
+ // Push the blocks to their batch group. The layout will be like,
+ //
+ // FreeListInfo.BlockList - > BG -> BG -> BG
+ // | | |
+ // v v v
+ // TB TB TB
+ // |
+ // v
+ // TB
+ //
+ // Each BlockGroup(BG) will associate with unique group id and the free blocks
+ // are managed by a list of TransferBatch(TB). To reduce the time of inserting
+ // blocks, BGs are sorted and the input `Array` are supposed to be sorted so
+ // that we can get better performance of maintaining sorted property.
+ // Use `SameGroup=true` to indicate that all blocks in the array are from the
+ // same group then we will skip checking the group id of each block.
+ void pushBlocksImpl(CacheT *C, uptr ClassId, RegionInfo *Region,
+ CompactPtrT *Array, u32 Size, bool SameGroup = false)
+ REQUIRES(Region->FLLock) {
+ DCHECK_NE(ClassId, SizeClassMap::BatchClassId);
+ DCHECK_GT(Size, 0U);
+
+ auto CreateGroup = [&](uptr CompactPtrGroupBase) {
+ BatchGroupT *BG =
+ reinterpret_cast<BatchGroupT *>(C->getBatchClassBlock());
+ BG->Batches.clear();
+ TransferBatchT *TB =
+ reinterpret_cast<TransferBatchT *>(C->getBatchClassBlock());
+ TB->clear();
+
+ BG->CompactPtrGroupBase = CompactPtrGroupBase;
+ BG->Batches.push_front(TB);
+ BG->PushedBlocks = 0;
+ BG->BytesInBGAtLastCheckpoint = 0;
+ BG->MaxCachedPerBatch = CacheT::getMaxCached(getSizeByClassId(ClassId));
+
+ return BG;
+ };
+
+ auto InsertBlocks = [&](BatchGroupT *BG, CompactPtrT *Array, u32 Size) {
+ SinglyLinkedList<TransferBatchT> &Batches = BG->Batches;
+ TransferBatchT *CurBatch = Batches.front();
+ DCHECK_NE(CurBatch, nullptr);
+
+ for (u32 I = 0; I < Size;) {
+ DCHECK_GE(BG->MaxCachedPerBatch, CurBatch->getCount());
+ u16 UnusedSlots =
+ static_cast<u16>(BG->MaxCachedPerBatch - CurBatch->getCount());
+ if (UnusedSlots == 0) {
+ CurBatch =
+ reinterpret_cast<TransferBatchT *>(C->getBatchClassBlock());
+ CurBatch->clear();
+ Batches.push_front(CurBatch);
+ UnusedSlots = BG->MaxCachedPerBatch;
+ }
+ // `UnusedSlots` is u16 so the result will be also fit in u16.
+ u16 AppendSize = static_cast<u16>(Min<u32>(UnusedSlots, Size - I));
+ CurBatch->appendFromArray(&Array[I], AppendSize);
+ I += AppendSize;
+ }
+
+ BG->PushedBlocks += Size;
+ };
+
+ Region->FreeListInfo.PushedBlocks += Size;
+ BatchGroupT *Cur = Region->FreeListInfo.BlockList.front();
+
+ // In the following, `Cur` always points to the BatchGroup for blocks that
+ // will be pushed next. `Prev` is the element right before `Cur`.
+ BatchGroupT *Prev = nullptr;
+
+ while (Cur != nullptr &&
+ compactPtrGroup(Array[0]) > Cur->CompactPtrGroupBase) {
+ Prev = Cur;
+ Cur = Cur->Next;
+ }
+
+ if (Cur == nullptr ||
+ compactPtrGroup(Array[0]) != Cur->CompactPtrGroupBase) {
+ Cur = CreateGroup(compactPtrGroup(Array[0]));
+ if (Prev == nullptr)
+ Region->FreeListInfo.BlockList.push_front(Cur);
+ else
+ Region->FreeListInfo.BlockList.insert(Prev, Cur);
+ }
+
+ // All the blocks are from the same group, just push without checking group
+ // id.
+ if (SameGroup) {
+ for (u32 I = 0; I < Size; ++I)
+ DCHECK_EQ(compactPtrGroup(Array[I]), Cur->CompactPtrGroupBase);
+
+ InsertBlocks(Cur, Array, Size);
+ return;
+ }
+
+ // The blocks are sorted by group id. Determine the segment of group and
+ // push them to their group together.
+ u32 Count = 1;
+ for (u32 I = 1; I < Size; ++I) {
+ if (compactPtrGroup(Array[I - 1]) != compactPtrGroup(Array[I])) {
+ DCHECK_EQ(compactPtrGroup(Array[I - 1]), Cur->CompactPtrGroupBase);
+ InsertBlocks(Cur, Array + I - Count, Count);
+
+ while (Cur != nullptr &&
+ compactPtrGroup(Array[I]) > Cur->CompactPtrGroupBase) {
+ Prev = Cur;
+ Cur = Cur->Next;
+ }
+
+ if (Cur == nullptr ||
+ compactPtrGroup(Array[I]) != Cur->CompactPtrGroupBase) {
+ Cur = CreateGroup(compactPtrGroup(Array[I]));
+ DCHECK_NE(Prev, nullptr);
+ Region->FreeListInfo.BlockList.insert(Prev, Cur);
+ }
+
+ Count = 1;
+ } else {
+ ++Count;
+ }
+ }
+
+ InsertBlocks(Cur, Array + Size - Count, Count);
+ }
+
+ TransferBatchT *popBatchWithCV(CacheT *C, uptr ClassId, RegionInfo *Region,
+ bool &ReportRegionExhausted) {
+ TransferBatchT *B = nullptr;
+
+ while (true) {
+ // We only expect one thread doing the freelist refillment and other
+ // threads will be waiting for either the completion of the
+ // `populateFreeListAndPopBatch()` or `pushBlocks()` called by other
+ // threads.
+ bool PopulateFreeList = false;
+ {
+ ScopedLock FL(Region->FLLock);
+ if (!Region->isPopulatingFreeList) {
+ Region->isPopulatingFreeList = true;
+ PopulateFreeList = true;
+ }
+ }
+
+ if (PopulateFreeList) {
+ ScopedLock ML(Region->MMLock);
+
+ const bool RegionIsExhausted = Region->Exhausted;
+ if (!RegionIsExhausted)
+ B = populateFreeListAndPopBatch(C, ClassId, Region);
+ ReportRegionExhausted = !RegionIsExhausted && Region->Exhausted;
+
+ {
+ // Before reacquiring the `FLLock`, the freelist may be used up again
+ // and some threads are waiting for the freelist refillment by the
+ // current thread. It's important to set
+ // `Region->isPopulatingFreeList` to false so the threads about to
+ // sleep will notice the status change.
+ ScopedLock FL(Region->FLLock);
+ Region->isPopulatingFreeList = false;
+ Region->FLLockCV.notifyAll(Region->FLLock);
+ }
+
+ break;
+ }
+
+ // At here, there are two preconditions to be met before waiting,
+ // 1. The freelist is empty.
+ // 2. Region->isPopulatingFreeList == true, i.e, someone is still doing
+ // `populateFreeListAndPopBatch()`.
+ //
+ // Note that it has the chance that freelist is empty but
+ // Region->isPopulatingFreeList == false because all the new populated
+ // blocks were used up right after the refillment. Therefore, we have to
+ // check if someone is still populating the freelist.
+ ScopedLock FL(Region->FLLock);
+ if (LIKELY(B = popBatchImpl(C, ClassId, Region)))
+ break;
+
+ if (!Region->isPopulatingFreeList)
+ continue;
+
+ // Now the freelist is empty and someone's doing the refillment. We will
+ // wait until anyone refills the freelist or someone finishes doing
+ // `populateFreeListAndPopBatch()`. The refillment can be done by
+ // `populateFreeListAndPopBatch()`, `pushBlocks()`,
+ // `pushBatchClassBlocks()` and `mergeGroupsToReleaseBack()`.
+ Region->FLLockCV.wait(Region->FLLock);
+
+ if (LIKELY(B = popBatchImpl(C, ClassId, Region)))
+ break;
+ }
+
+ return B;
+ }
+
+ // Pop one TransferBatch from a BatchGroup. The BatchGroup with the smallest
+ // group id will be considered first.
+ //
+ // The region mutex needs to be held while calling this method.
+ TransferBatchT *popBatchImpl(CacheT *C, uptr ClassId, RegionInfo *Region)
+ REQUIRES(Region->FLLock) {
+ if (Region->FreeListInfo.BlockList.empty())
+ return nullptr;
+
+ SinglyLinkedList<TransferBatchT> &Batches =
+ Region->FreeListInfo.BlockList.front()->Batches;
+
+ if (Batches.empty()) {
+ DCHECK_EQ(ClassId, SizeClassMap::BatchClassId);
+ BatchGroupT *BG = Region->FreeListInfo.BlockList.front();
+ Region->FreeListInfo.BlockList.pop_front();
+
+ // Block used by `BatchGroup` is from BatchClassId. Turn the block into
+ // `TransferBatch` with single block.
+ TransferBatchT *TB = reinterpret_cast<TransferBatchT *>(BG);
+ TB->clear();
+ TB->add(
+ compactPtr(SizeClassMap::BatchClassId, reinterpret_cast<uptr>(TB)));
+ Region->FreeListInfo.PoppedBlocks += 1;
+ return TB;
+ }
+
+ TransferBatchT *B = Batches.front();
+ Batches.pop_front();
+ DCHECK_NE(B, nullptr);
+ DCHECK_GT(B->getCount(), 0U);
+
+ if (Batches.empty()) {
+ BatchGroupT *BG = Region->FreeListInfo.BlockList.front();
+ Region->FreeListInfo.BlockList.pop_front();
+
+ // We don't keep BatchGroup with zero blocks to avoid empty-checking while
+ // allocating. Note that block used by constructing BatchGroup is recorded
+ // as free blocks in the last element of BatchGroup::Batches. Which means,
+ // once we pop the last TransferBatch, the block is implicitly
+ // deallocated.
+ if (ClassId != SizeClassMap::BatchClassId)
+ C->deallocate(SizeClassMap::BatchClassId, BG);
+ }
+
+ Region->FreeListInfo.PoppedBlocks += B->getCount();
+
+ return B;
+ }
+
+ // Refill the freelist and return one batch.
+ NOINLINE TransferBatchT *populateFreeListAndPopBatch(CacheT *C, uptr ClassId,
+ RegionInfo *Region)
+ REQUIRES(Region->MMLock) EXCLUDES(Region->FLLock) {
const uptr Size = getSizeByClassId(ClassId);
- const u32 MaxCount = TransferBatch::getMaxCached(Size);
+ const u16 MaxCount = CacheT::getMaxCached(Size);
const uptr RegionBeg = Region->RegionBeg;
- const uptr MappedUser = Region->MappedUser;
- const uptr TotalUserBytes = Region->AllocatedUser + MaxCount * Size;
+ const uptr MappedUser = Region->MemMapInfo.MappedUser;
+ const uptr TotalUserBytes =
+ Region->MemMapInfo.AllocatedUser + MaxCount * Size;
// Map more space for blocks, if necessary.
if (TotalUserBytes > MappedUser) {
// Do the mmap for the user memory.
const uptr MapSize =
- roundUpTo(TotalUserBytes - MappedUser, MapSizeIncrement);
+ roundUp(TotalUserBytes - MappedUser, MapSizeIncrement);
const uptr RegionBase = RegionBeg - getRegionBaseByClassId(ClassId);
if (UNLIKELY(RegionBase + MappedUser + MapSize > RegionSize)) {
- if (!Region->Exhausted) {
- Region->Exhausted = true;
- ScopedString Str;
- getStats(&Str);
- Str.append(
- "Scudo OOM: The process has exhausted %zuM for size class %zu.\n",
- RegionSize >> 20, Size);
- Str.output();
- }
+ Region->Exhausted = true;
return nullptr;
}
- if (MappedUser == 0)
- Region->Data = Data;
- if (UNLIKELY(!map(
- reinterpret_cast<void *>(RegionBeg + MappedUser), MapSize,
- "scudo:primary",
+
+ if (UNLIKELY(!Region->MemMapInfo.MemMap.remap(
+ RegionBeg + MappedUser, MapSize, "scudo:primary",
MAP_ALLOWNOMEM | MAP_RESIZABLE |
- (useMemoryTagging<Config>(Options.load()) ? MAP_MEMTAG : 0),
- &Region->Data)))
+ (useMemoryTagging<Config>(Options.load()) ? MAP_MEMTAG
+ : 0)))) {
return nullptr;
- Region->MappedUser += MapSize;
+ }
+ Region->MemMapInfo.MappedUser += MapSize;
C->getStats().add(StatMapped, MapSize);
}
- const u32 NumberOfBlocks = Min(
- MaxNumBatches * MaxCount,
- static_cast<u32>((Region->MappedUser - Region->AllocatedUser) / Size));
+ const u32 NumberOfBlocks =
+ Min(MaxNumBatches * MaxCount,
+ static_cast<u32>((Region->MemMapInfo.MappedUser -
+ Region->MemMapInfo.AllocatedUser) /
+ Size));
DCHECK_GT(NumberOfBlocks, 0);
constexpr u32 ShuffleArraySize =
- MaxNumBatches * TransferBatch::MaxNumCached;
+ MaxNumBatches * TransferBatchT::MaxNumCached;
CompactPtrT ShuffleArray[ShuffleArraySize];
DCHECK_LE(NumberOfBlocks, ShuffleArraySize);
const uptr CompactPtrBase = getCompactPtrBaseByClassId(ClassId);
- uptr P = RegionBeg + Region->AllocatedUser;
+ uptr P = RegionBeg + Region->MemMapInfo.AllocatedUser;
for (u32 I = 0; I < NumberOfBlocks; I++, P += Size)
ShuffleArray[I] = compactPtrInternal(CompactPtrBase, P);
- // No need to shuffle the batches size class.
- if (ClassId != SizeClassMap::BatchClassId)
- shuffle(ShuffleArray, NumberOfBlocks, &Region->RandState);
- for (u32 I = 0; I < NumberOfBlocks;) {
- TransferBatch *B =
- C->createBatch(ClassId, reinterpret_cast<void *>(decompactPtrInternal(
- CompactPtrBase, ShuffleArray[I])));
- if (UNLIKELY(!B))
- return nullptr;
- const u32 N = Min(MaxCount, NumberOfBlocks - I);
- B->setFromArray(&ShuffleArray[I], N);
- Region->FreeList.push_back(B);
- I += N;
+
+ ScopedLock L(Region->FLLock);
+
+ if (ClassId != SizeClassMap::BatchClassId) {
+ u32 N = 1;
+ uptr CurGroup = compactPtrGroup(ShuffleArray[0]);
+ for (u32 I = 1; I < NumberOfBlocks; I++) {
+ if (UNLIKELY(compactPtrGroup(ShuffleArray[I]) != CurGroup)) {
+ shuffle(ShuffleArray + I - N, N, &Region->RandState);
+ pushBlocksImpl(C, ClassId, Region, ShuffleArray + I - N, N,
+ /*SameGroup=*/true);
+ N = 1;
+ CurGroup = compactPtrGroup(ShuffleArray[I]);
+ } else {
+ ++N;
+ }
+ }
+
+ shuffle(ShuffleArray + NumberOfBlocks - N, N, &Region->RandState);
+ pushBlocksImpl(C, ClassId, Region, &ShuffleArray[NumberOfBlocks - N], N,
+ /*SameGroup=*/true);
+ } else {
+ pushBatchClassBlocks(Region, ShuffleArray, NumberOfBlocks);
}
- TransferBatch *B = Region->FreeList.front();
- Region->FreeList.pop_front();
- DCHECK(B);
- DCHECK_GT(B->getCount(), 0);
+
+ TransferBatchT *B = popBatchImpl(C, ClassId, Region);
+ DCHECK_NE(B, nullptr);
+
+ // Note that `PushedBlocks` and `PoppedBlocks` are supposed to only record
+ // the requests from `PushBlocks` and `PopBatch` which are external
+ // interfaces. `populateFreeListAndPopBatch` is the internal interface so we
+ // should set the values back to avoid incorrectly setting the stats.
+ Region->FreeListInfo.PushedBlocks -= NumberOfBlocks;
const uptr AllocatedUser = Size * NumberOfBlocks;
C->getStats().add(StatFree, AllocatedUser);
- Region->AllocatedUser += AllocatedUser;
+ Region->MemMapInfo.AllocatedUser += AllocatedUser;
return B;
}
- void getStats(ScopedString *Str, uptr ClassId, uptr Rss) {
- RegionInfo *Region = getRegionInfo(ClassId);
- if (Region->MappedUser == 0)
+ void getStats(ScopedString *Str, uptr ClassId, RegionInfo *Region)
+ REQUIRES(Region->MMLock, Region->FLLock) {
+ if (Region->MemMapInfo.MappedUser == 0)
return;
- const uptr InUse = Region->Stats.PoppedBlocks - Region->Stats.PushedBlocks;
- const uptr TotalChunks = Region->AllocatedUser / getSizeByClassId(ClassId);
- Str->append("%s %02zu (%6zu): mapped: %6zuK popped: %7zu pushed: %7zu "
- "inuse: %6zu total: %6zu rss: %6zuK releases: %6zu last "
- "released: %6zuK region: 0x%zx (0x%zx)\n",
- Region->Exhausted ? "F" : " ", ClassId,
- getSizeByClassId(ClassId), Region->MappedUser >> 10,
- Region->Stats.PoppedBlocks, Region->Stats.PushedBlocks, InUse,
- TotalChunks, Rss >> 10, Region->ReleaseInfo.RangesReleased,
- Region->ReleaseInfo.LastReleasedBytes >> 10, Region->RegionBeg,
- getRegionBaseByClassId(ClassId));
+ const uptr BlockSize = getSizeByClassId(ClassId);
+ const uptr InUseBlocks =
+ Region->FreeListInfo.PoppedBlocks - Region->FreeListInfo.PushedBlocks;
+ const uptr BytesInFreeList =
+ Region->MemMapInfo.AllocatedUser - InUseBlocks * BlockSize;
+ uptr RegionPushedBytesDelta = 0;
+ if (BytesInFreeList >=
+ Region->ReleaseInfo.BytesInFreeListAtLastCheckpoint) {
+ RegionPushedBytesDelta =
+ BytesInFreeList - Region->ReleaseInfo.BytesInFreeListAtLastCheckpoint;
+ }
+ const uptr TotalChunks = Region->MemMapInfo.AllocatedUser / BlockSize;
+ Str->append(
+ "%s %02zu (%6zu): mapped: %6zuK popped: %7zu pushed: %7zu "
+ "inuse: %6zu total: %6zu releases: %6zu last "
+ "released: %6zuK latest pushed bytes: %6zuK region: 0x%zx (0x%zx)\n",
+ Region->Exhausted ? "E" : " ", ClassId, getSizeByClassId(ClassId),
+ Region->MemMapInfo.MappedUser >> 10, Region->FreeListInfo.PoppedBlocks,
+ Region->FreeListInfo.PushedBlocks, InUseBlocks, TotalChunks,
+ Region->ReleaseInfo.RangesReleased,
+ Region->ReleaseInfo.LastReleasedBytes >> 10,
+ RegionPushedBytesDelta >> 10, Region->RegionBeg,
+ getRegionBaseByClassId(ClassId));
+ }
+
+ void getRegionFragmentationInfo(RegionInfo *Region, uptr ClassId,
+ ScopedString *Str) REQUIRES(Region->MMLock) {
+ const uptr BlockSize = getSizeByClassId(ClassId);
+ const uptr AllocatedUserEnd =
+ Region->MemMapInfo.AllocatedUser + Region->RegionBeg;
+
+ SinglyLinkedList<BatchGroupT> GroupsToRelease;
+ {
+ ScopedLock L(Region->FLLock);
+ GroupsToRelease = Region->FreeListInfo.BlockList;
+ Region->FreeListInfo.BlockList.clear();
+ }
+
+ FragmentationRecorder Recorder;
+ if (!GroupsToRelease.empty()) {
+ PageReleaseContext Context =
+ markFreeBlocks(Region, BlockSize, AllocatedUserEnd,
+ getCompactPtrBaseByClassId(ClassId), GroupsToRelease);
+ auto SkipRegion = [](UNUSED uptr RegionIndex) { return false; };
+ releaseFreeMemoryToOS(Context, Recorder, SkipRegion);
+
+ mergeGroupsToReleaseBack(Region, GroupsToRelease);
+ }
+
+ ScopedLock L(Region->FLLock);
+ const uptr PageSize = getPageSizeCached();
+ const uptr TotalBlocks = Region->MemMapInfo.AllocatedUser / BlockSize;
+ const uptr InUseBlocks =
+ Region->FreeListInfo.PoppedBlocks - Region->FreeListInfo.PushedBlocks;
+ const uptr AllocatedPagesCount =
+ roundUp(Region->MemMapInfo.AllocatedUser, PageSize) / PageSize;
+ DCHECK_GE(AllocatedPagesCount, Recorder.getReleasedPagesCount());
+ const uptr InUsePages =
+ AllocatedPagesCount - Recorder.getReleasedPagesCount();
+ const uptr InUseBytes = InUsePages * PageSize;
+
+ uptr Integral;
+ uptr Fractional;
+ computePercentage(BlockSize * InUseBlocks, InUsePages * PageSize, &Integral,
+ &Fractional);
+ Str->append(" %02zu (%6zu): inuse/total blocks: %6zu/%6zu inuse/total "
+ "pages: %6zu/%6zu inuse bytes: %6zuK util: %3zu.%02zu%%\n",
+ ClassId, BlockSize, InUseBlocks, TotalBlocks, InUsePages,
+ AllocatedPagesCount, InUseBytes >> 10, Integral, Fractional);
}
NOINLINE uptr releaseToOSMaybe(RegionInfo *Region, uptr ClassId,
- bool Force = false) {
+ ReleaseToOS ReleaseType = ReleaseToOS::Normal)
+ REQUIRES(Region->MMLock) EXCLUDES(Region->FLLock) {
const uptr BlockSize = getSizeByClassId(ClassId);
+ uptr BytesInFreeList;
+ const uptr AllocatedUserEnd =
+ Region->MemMapInfo.AllocatedUser + Region->RegionBeg;
+ SinglyLinkedList<BatchGroupT> GroupsToRelease;
+
+ {
+ ScopedLock L(Region->FLLock);
+
+ BytesInFreeList = Region->MemMapInfo.AllocatedUser -
+ (Region->FreeListInfo.PoppedBlocks -
+ Region->FreeListInfo.PushedBlocks) *
+ BlockSize;
+ if (UNLIKELY(BytesInFreeList == 0))
+ return false;
+
+ // ==================================================================== //
+ // 1. Check if we have enough free blocks and if it's worth doing a page
+ // release.
+ // ==================================================================== //
+ if (ReleaseType != ReleaseToOS::ForceAll &&
+ !hasChanceToReleasePages(Region, BlockSize, BytesInFreeList,
+ ReleaseType)) {
+ return 0;
+ }
+
+ // ==================================================================== //
+ // 2. Determine which groups can release the pages. Use a heuristic to
+ // gather groups that are candidates for doing a release.
+ // ==================================================================== //
+ if (ReleaseType == ReleaseToOS::ForceAll) {
+ GroupsToRelease = Region->FreeListInfo.BlockList;
+ Region->FreeListInfo.BlockList.clear();
+ } else {
+ GroupsToRelease =
+ collectGroupsToRelease(Region, BlockSize, AllocatedUserEnd,
+ getCompactPtrBaseByClassId(ClassId));
+ }
+ if (GroupsToRelease.empty())
+ return 0;
+ }
+
+ // Note that we have extracted the `GroupsToRelease` from region freelist.
+ // It's safe to let pushBlocks()/popBatches() access the remaining region
+ // freelist. In the steps 3 and 4, we will temporarily release the FLLock
+ // and lock it again before step 5.
+
+ // ==================================================================== //
+ // 3. Mark the free blocks in `GroupsToRelease` in the `PageReleaseContext`.
+ // Then we can tell which pages are in-use by querying
+ // `PageReleaseContext`.
+ // ==================================================================== //
+ PageReleaseContext Context =
+ markFreeBlocks(Region, BlockSize, AllocatedUserEnd,
+ getCompactPtrBaseByClassId(ClassId), GroupsToRelease);
+ if (UNLIKELY(!Context.hasBlockMarked())) {
+ mergeGroupsToReleaseBack(Region, GroupsToRelease);
+ return 0;
+ }
+
+ // ==================================================================== //
+ // 4. Release the unused physical pages back to the OS.
+ // ==================================================================== //
+ RegionReleaseRecorder<MemMapT> Recorder(&Region->MemMapInfo.MemMap,
+ Region->RegionBeg,
+ Context.getReleaseOffset());
+ auto SkipRegion = [](UNUSED uptr RegionIndex) { return false; };
+ releaseFreeMemoryToOS(Context, Recorder, SkipRegion);
+ if (Recorder.getReleasedRangesCount() > 0) {
+ Region->ReleaseInfo.BytesInFreeListAtLastCheckpoint = BytesInFreeList;
+ Region->ReleaseInfo.RangesReleased += Recorder.getReleasedRangesCount();
+ Region->ReleaseInfo.LastReleasedBytes = Recorder.getReleasedBytes();
+ }
+ Region->ReleaseInfo.LastReleaseAtNs = getMonotonicTimeFast();
+
+ // ====================================================================== //
+ // 5. Merge the `GroupsToRelease` back to the freelist.
+ // ====================================================================== //
+ mergeGroupsToReleaseBack(Region, GroupsToRelease);
+
+ return Recorder.getReleasedBytes();
+ }
+
+ bool hasChanceToReleasePages(RegionInfo *Region, uptr BlockSize,
+ uptr BytesInFreeList, ReleaseToOS ReleaseType)
+ REQUIRES(Region->MMLock, Region->FLLock) {
+ DCHECK_GE(Region->FreeListInfo.PoppedBlocks,
+ Region->FreeListInfo.PushedBlocks);
const uptr PageSize = getPageSizeCached();
- DCHECK_GE(Region->Stats.PoppedBlocks, Region->Stats.PushedBlocks);
- const uptr BytesInFreeList =
- Region->AllocatedUser -
- (Region->Stats.PoppedBlocks - Region->Stats.PushedBlocks) * BlockSize;
- if (BytesInFreeList < PageSize)
- return 0; // No chance to release anything.
- const uptr BytesPushed = (Region->Stats.PushedBlocks -
- Region->ReleaseInfo.PushedBlocksAtLastRelease) *
- BlockSize;
- if (BytesPushed < PageSize)
- return 0; // Nothing new to release.
+ // Always update `BytesInFreeListAtLastCheckpoint` with the smallest value
+ // so that we won't underestimate the releasable pages. For example, the
+ // following is the region usage,
+ //
+ // BytesInFreeListAtLastCheckpoint AllocatedUser
+ // v v
+ // |--------------------------------------->
+ // ^ ^
+ // BytesInFreeList ReleaseThreshold
+ //
+ // In general, if we have collected enough bytes and the amount of free
+ // bytes meets the ReleaseThreshold, we will try to do page release. If we
+ // don't update `BytesInFreeListAtLastCheckpoint` when the current
+ // `BytesInFreeList` is smaller, we may take longer time to wait for enough
+ // freed blocks because we miss the bytes between
+ // (BytesInFreeListAtLastCheckpoint - BytesInFreeList).
+ if (BytesInFreeList <=
+ Region->ReleaseInfo.BytesInFreeListAtLastCheckpoint) {
+ Region->ReleaseInfo.BytesInFreeListAtLastCheckpoint = BytesInFreeList;
+ }
+
+ const uptr RegionPushedBytesDelta =
+ BytesInFreeList - Region->ReleaseInfo.BytesInFreeListAtLastCheckpoint;
+ if (RegionPushedBytesDelta < PageSize)
+ return false;
// Releasing smaller blocks is expensive, so we want to make sure that a
// significant amount of bytes are free, and that there has been a good
// amount of batches pushed to the freelist before attempting to release.
- if (BlockSize < PageSize / 16U) {
- if (!Force && BytesPushed < Region->AllocatedUser / 16U)
- return 0;
- // We want 8x% to 9x% free bytes (the larger the block, the lower the %).
- if ((BytesInFreeList * 100U) / Region->AllocatedUser <
- (100U - 1U - BlockSize / 16U))
- return 0;
- }
+ if (isSmallBlock(BlockSize) && ReleaseType == ReleaseToOS::Normal)
+ if (RegionPushedBytesDelta < Region->TryReleaseThreshold)
+ return false;
- if (!Force) {
+ if (ReleaseType == ReleaseToOS::Normal) {
const s32 IntervalMs = atomic_load_relaxed(&ReleaseToOsIntervalMs);
if (IntervalMs < 0)
- return 0;
- if (Region->ReleaseInfo.LastReleaseAtNs +
- static_cast<u64>(IntervalMs) * 1000000 >
- getMonotonicTime()) {
- return 0; // Memory was returned recently.
+ return false;
+
+ // The constant 8 here is selected from profiling some apps and the number
+ // of unreleased pages in the large size classes is around 16 pages or
+ // more. Choose half of it as a heuristic and which also avoids page
+ // release every time for every pushBlocks() attempt by large blocks.
+ const bool ByPassReleaseInterval =
+ isLargeBlock(BlockSize) && RegionPushedBytesDelta > 8 * PageSize;
+ if (!ByPassReleaseInterval) {
+ if (Region->ReleaseInfo.LastReleaseAtNs +
+ static_cast<u64>(IntervalMs) * 1000000 >
+ getMonotonicTimeFast()) {
+ // Memory was returned recently.
+ return false;
+ }
+ }
+ } // if (ReleaseType == ReleaseToOS::Normal)
+
+ return true;
+ }
+
+ SinglyLinkedList<BatchGroupT>
+ collectGroupsToRelease(RegionInfo *Region, const uptr BlockSize,
+ const uptr AllocatedUserEnd, const uptr CompactPtrBase)
+ REQUIRES(Region->MMLock, Region->FLLock) {
+ const uptr GroupSize = (1UL << GroupSizeLog);
+ const uptr PageSize = getPageSizeCached();
+ SinglyLinkedList<BatchGroupT> GroupsToRelease;
+
+ // We are examining each group and will take the minimum distance to the
+ // release threshold as the next Region::TryReleaseThreshold(). Note that if
+ // the size of free blocks has reached the release threshold, the distance
+ // to the next release will be PageSize * SmallerBlockReleasePageDelta. See
+ // the comment on `SmallerBlockReleasePageDelta` for more details.
+ uptr MinDistToThreshold = GroupSize;
+
+ for (BatchGroupT *BG = Region->FreeListInfo.BlockList.front(),
+ *Prev = nullptr;
+ BG != nullptr;) {
+ // Group boundary is always GroupSize-aligned from CompactPtr base. The
+ // layout of memory groups is like,
+ //
+ // (CompactPtrBase)
+ // #1 CompactPtrGroupBase #2 CompactPtrGroupBase ...
+ // | | |
+ // v v v
+ // +-----------------------+-----------------------+
+ // \ / \ /
+ // --- GroupSize --- --- GroupSize ---
+ //
+ // After decompacting the CompactPtrGroupBase, we expect the alignment
+ // property is held as well.
+ const uptr BatchGroupBase =
+ decompactGroupBase(CompactPtrBase, BG->CompactPtrGroupBase);
+ DCHECK_LE(Region->RegionBeg, BatchGroupBase);
+ DCHECK_GE(AllocatedUserEnd, BatchGroupBase);
+ DCHECK_EQ((Region->RegionBeg - BatchGroupBase) % GroupSize, 0U);
+ // TransferBatches are pushed in front of BG.Batches. The first one may
+ // not have all caches used.
+ const uptr NumBlocks = (BG->Batches.size() - 1) * BG->MaxCachedPerBatch +
+ BG->Batches.front()->getCount();
+ const uptr BytesInBG = NumBlocks * BlockSize;
+
+ if (BytesInBG <= BG->BytesInBGAtLastCheckpoint) {
+ BG->BytesInBGAtLastCheckpoint = BytesInBG;
+ Prev = BG;
+ BG = BG->Next;
+ continue;
}
+
+ const uptr PushedBytesDelta = BG->BytesInBGAtLastCheckpoint - BytesInBG;
+
+ // Given the randomness property, we try to release the pages only if the
+ // bytes used by free blocks exceed certain proportion of group size. Note
+ // that this heuristic only applies when all the spaces in a BatchGroup
+ // are allocated.
+ if (isSmallBlock(BlockSize)) {
+ const uptr BatchGroupEnd = BatchGroupBase + GroupSize;
+ const uptr AllocatedGroupSize = AllocatedUserEnd >= BatchGroupEnd
+ ? GroupSize
+ : AllocatedUserEnd - BatchGroupBase;
+ const uptr ReleaseThreshold =
+ (AllocatedGroupSize * (100 - 1U - BlockSize / 16U)) / 100U;
+ const bool HighDensity = BytesInBG >= ReleaseThreshold;
+ const bool MayHaveReleasedAll = NumBlocks >= (GroupSize / BlockSize);
+ // If all blocks in the group are released, we will do range marking
+ // which is fast. Otherwise, we will wait until we have accumulated
+ // a certain amount of free memory.
+ const bool ReachReleaseDelta =
+ MayHaveReleasedAll
+ ? true
+ : PushedBytesDelta >= PageSize * SmallerBlockReleasePageDelta;
+
+ if (!HighDensity) {
+ DCHECK_LE(BytesInBG, ReleaseThreshold);
+ // The following is the usage of a memroy group,
+ //
+ // BytesInBG ReleaseThreshold
+ // / \ v
+ // +---+---------------------------+-----+
+ // | | | | |
+ // +---+---------------------------+-----+
+ // \ / ^
+ // PushedBytesDelta GroupEnd
+ MinDistToThreshold =
+ Min(MinDistToThreshold,
+ ReleaseThreshold - BytesInBG + PushedBytesDelta);
+ } else {
+ // If it reaches high density at this round, the next time we will try
+ // to release is based on SmallerBlockReleasePageDelta
+ MinDistToThreshold =
+ Min(MinDistToThreshold, PageSize * SmallerBlockReleasePageDelta);
+ }
+
+ if (!HighDensity || !ReachReleaseDelta) {
+ Prev = BG;
+ BG = BG->Next;
+ continue;
+ }
+ }
+
+ // If `BG` is the first BatchGroupT in the list, we only need to advance
+ // `BG` and call FreeListInfo.BlockList::pop_front(). No update is needed
+ // for `Prev`.
+ //
+ // (BG) (BG->Next)
+ // Prev Cur BG
+ // | | |
+ // v v v
+ // nil +--+ +--+
+ // |X | -> | | -> ...
+ // +--+ +--+
+ //
+ // Otherwise, `Prev` will be used to extract the `Cur` from the
+ // `FreeListInfo.BlockList`.
+ //
+ // (BG) (BG->Next)
+ // Prev Cur BG
+ // | | |
+ // v v v
+ // +--+ +--+ +--+
+ // | | -> |X | -> | | -> ...
+ // +--+ +--+ +--+
+ //
+ // After FreeListInfo.BlockList::extract(),
+ //
+ // Prev Cur BG
+ // | | |
+ // v v v
+ // +--+ +--+ +--+
+ // | |-+ |X | +->| | -> ...
+ // +--+ | +--+ | +--+
+ // +--------+
+ //
+ // Note that we need to advance before pushing this BatchGroup to
+ // GroupsToRelease because it's a destructive operation.
+
+ BatchGroupT *Cur = BG;
+ BG = BG->Next;
+
+ // Ideally, we may want to update this only after successful release.
+ // However, for smaller blocks, each block marking is a costly operation.
+ // Therefore, we update it earlier.
+ // TODO: Consider updating this after releasing pages if `ReleaseRecorder`
+ // can tell the released bytes in each group.
+ Cur->BytesInBGAtLastCheckpoint = BytesInBG;
+
+ if (Prev != nullptr)
+ Region->FreeListInfo.BlockList.extract(Prev, Cur);
+ else
+ Region->FreeListInfo.BlockList.pop_front();
+ GroupsToRelease.push_back(Cur);
}
- ReleaseRecorder Recorder(Region->RegionBeg, &Region->Data);
- const uptr CompactPtrBase = getCompactPtrBaseByClassId(ClassId);
+ // Only small blocks have the adaptive `TryReleaseThreshold`.
+ if (isSmallBlock(BlockSize)) {
+ // If the MinDistToThreshold is not updated, that means each memory group
+ // may have only pushed less than a page size. In that case, just set it
+ // back to normal.
+ if (MinDistToThreshold == GroupSize)
+ MinDistToThreshold = PageSize * SmallerBlockReleasePageDelta;
+ Region->TryReleaseThreshold = MinDistToThreshold;
+ }
+
+ return GroupsToRelease;
+ }
+
+ PageReleaseContext
+ markFreeBlocks(RegionInfo *Region, const uptr BlockSize,
+ const uptr AllocatedUserEnd, const uptr CompactPtrBase,
+ SinglyLinkedList<BatchGroupT> &GroupsToRelease)
+ REQUIRES(Region->MMLock) EXCLUDES(Region->FLLock) {
+ const uptr GroupSize = (1UL << GroupSizeLog);
auto DecompactPtr = [CompactPtrBase](CompactPtrT CompactPtr) {
return decompactPtrInternal(CompactPtrBase, CompactPtr);
};
- auto SkipRegion = [](UNUSED uptr RegionIndex) { return false; };
- releaseFreeMemoryToOS(Region->FreeList, Region->AllocatedUser, 1U,
- BlockSize, &Recorder, DecompactPtr, SkipRegion);
- if (Recorder.getReleasedRangesCount() > 0) {
- Region->ReleaseInfo.PushedBlocksAtLastRelease =
- Region->Stats.PushedBlocks;
- Region->ReleaseInfo.RangesReleased += Recorder.getReleasedRangesCount();
- Region->ReleaseInfo.LastReleasedBytes = Recorder.getReleasedBytes();
+ const uptr ReleaseBase = decompactGroupBase(
+ CompactPtrBase, GroupsToRelease.front()->CompactPtrGroupBase);
+ const uptr LastGroupEnd =
+ Min(decompactGroupBase(CompactPtrBase,
+ GroupsToRelease.back()->CompactPtrGroupBase) +
+ GroupSize,
+ AllocatedUserEnd);
+ // The last block may straddle the group boundary. Rounding up to BlockSize
+ // to get the exact range.
+ const uptr ReleaseEnd =
+ roundUpSlow(LastGroupEnd - Region->RegionBeg, BlockSize) +
+ Region->RegionBeg;
+ const uptr ReleaseRangeSize = ReleaseEnd - ReleaseBase;
+ const uptr ReleaseOffset = ReleaseBase - Region->RegionBeg;
+
+ PageReleaseContext Context(BlockSize, /*NumberOfRegions=*/1U,
+ ReleaseRangeSize, ReleaseOffset);
+ // We may not be able to do the page release in a rare case that we may
+ // fail on PageMap allocation.
+ if (UNLIKELY(!Context.ensurePageMapAllocated()))
+ return Context;
+
+ for (BatchGroupT &BG : GroupsToRelease) {
+ const uptr BatchGroupBase =
+ decompactGroupBase(CompactPtrBase, BG.CompactPtrGroupBase);
+ const uptr BatchGroupEnd = BatchGroupBase + GroupSize;
+ const uptr AllocatedGroupSize = AllocatedUserEnd >= BatchGroupEnd
+ ? GroupSize
+ : AllocatedUserEnd - BatchGroupBase;
+ const uptr BatchGroupUsedEnd = BatchGroupBase + AllocatedGroupSize;
+ const bool MayContainLastBlockInRegion =
+ BatchGroupUsedEnd == AllocatedUserEnd;
+ const bool BlockAlignedWithUsedEnd =
+ (BatchGroupUsedEnd - Region->RegionBeg) % BlockSize == 0;
+
+ uptr MaxContainedBlocks = AllocatedGroupSize / BlockSize;
+ if (!BlockAlignedWithUsedEnd)
+ ++MaxContainedBlocks;
+
+ const uptr NumBlocks = (BG.Batches.size() - 1) * BG.MaxCachedPerBatch +
+ BG.Batches.front()->getCount();
+
+ if (NumBlocks == MaxContainedBlocks) {
+ for (const auto &It : BG.Batches) {
+ if (&It != BG.Batches.front())
+ DCHECK_EQ(It.getCount(), BG.MaxCachedPerBatch);
+ for (u16 I = 0; I < It.getCount(); ++I)
+ DCHECK_EQ(compactPtrGroup(It.get(I)), BG.CompactPtrGroupBase);
+ }
+
+ Context.markRangeAsAllCounted(BatchGroupBase, BatchGroupUsedEnd,
+ Region->RegionBeg, /*RegionIndex=*/0,
+ Region->MemMapInfo.AllocatedUser);
+ } else {
+ DCHECK_LT(NumBlocks, MaxContainedBlocks);
+ // Note that we don't always visit blocks in each BatchGroup so that we
+ // may miss the chance of releasing certain pages that cross
+ // BatchGroups.
+ Context.markFreeBlocksInRegion(
+ BG.Batches, DecompactPtr, Region->RegionBeg, /*RegionIndex=*/0,
+ Region->MemMapInfo.AllocatedUser, MayContainLastBlockInRegion);
+ }
}
- Region->ReleaseInfo.LastReleaseAtNs = getMonotonicTime();
- return Recorder.getReleasedBytes();
+
+ DCHECK(Context.hasBlockMarked());
+
+ return Context;
}
+
+ void mergeGroupsToReleaseBack(RegionInfo *Region,
+ SinglyLinkedList<BatchGroupT> &GroupsToRelease)
+ REQUIRES(Region->MMLock) EXCLUDES(Region->FLLock) {
+ ScopedLock L(Region->FLLock);
+
+ // After merging two freelists, we may have redundant `BatchGroup`s that
+ // need to be recycled. The number of unused `BatchGroup`s is expected to be
+ // small. Pick a constant which is inferred from real programs.
+ constexpr uptr MaxUnusedSize = 8;
+ CompactPtrT Blocks[MaxUnusedSize];
+ u32 Idx = 0;
+ RegionInfo *BatchClassRegion = getRegionInfo(SizeClassMap::BatchClassId);
+ // We can't call pushBatchClassBlocks() to recycle the unused `BatchGroup`s
+ // when we are manipulating the freelist of `BatchClassRegion`. Instead, we
+ // should just push it back to the freelist when we merge two `BatchGroup`s.
+ // This logic hasn't been implemented because we haven't supported releasing
+ // pages in `BatchClassRegion`.
+ DCHECK_NE(BatchClassRegion, Region);
+
+ // Merge GroupsToRelease back to the Region::FreeListInfo.BlockList. Note
+ // that both `Region->FreeListInfo.BlockList` and `GroupsToRelease` are
+ // sorted.
+ for (BatchGroupT *BG = Region->FreeListInfo.BlockList.front(),
+ *Prev = nullptr;
+ ;) {
+ if (BG == nullptr || GroupsToRelease.empty()) {
+ if (!GroupsToRelease.empty())
+ Region->FreeListInfo.BlockList.append_back(&GroupsToRelease);
+ break;
+ }
+
+ DCHECK(!BG->Batches.empty());
+
+ if (BG->CompactPtrGroupBase <
+ GroupsToRelease.front()->CompactPtrGroupBase) {
+ Prev = BG;
+ BG = BG->Next;
+ continue;
+ }
+
+ BatchGroupT *Cur = GroupsToRelease.front();
+ TransferBatchT *UnusedTransferBatch = nullptr;
+ GroupsToRelease.pop_front();
+
+ if (BG->CompactPtrGroupBase == Cur->CompactPtrGroupBase) {
+ BG->PushedBlocks += Cur->PushedBlocks;
+ // We have updated `BatchGroup::BytesInBGAtLastCheckpoint` while
+ // collecting the `GroupsToRelease`.
+ BG->BytesInBGAtLastCheckpoint = Cur->BytesInBGAtLastCheckpoint;
+ const uptr MaxCachedPerBatch = BG->MaxCachedPerBatch;
+
+ // Note that the first TransferBatches in both `Batches` may not be
+ // full and only the first TransferBatch can have non-full blocks. Thus
+ // we have to merge them before appending one to another.
+ if (Cur->Batches.front()->getCount() == MaxCachedPerBatch) {
+ BG->Batches.append_back(&Cur->Batches);
+ } else {
+ TransferBatchT *NonFullBatch = Cur->Batches.front();
+ Cur->Batches.pop_front();
+ const u16 NonFullBatchCount = NonFullBatch->getCount();
+ // The remaining Batches in `Cur` are full.
+ BG->Batches.append_back(&Cur->Batches);
+
+ if (BG->Batches.front()->getCount() == MaxCachedPerBatch) {
+ // Only 1 non-full TransferBatch, push it to the front.
+ BG->Batches.push_front(NonFullBatch);
+ } else {
+ const u16 NumBlocksToMove = static_cast<u16>(
+ Min(static_cast<u16>(MaxCachedPerBatch -
+ BG->Batches.front()->getCount()),
+ NonFullBatchCount));
+ BG->Batches.front()->appendFromTransferBatch(NonFullBatch,
+ NumBlocksToMove);
+ if (NonFullBatch->isEmpty())
+ UnusedTransferBatch = NonFullBatch;
+ else
+ BG->Batches.push_front(NonFullBatch);
+ }
+ }
+
+ const u32 NeededSlots = UnusedTransferBatch == nullptr ? 1U : 2U;
+ if (UNLIKELY(Idx + NeededSlots > MaxUnusedSize)) {
+ ScopedLock L(BatchClassRegion->FLLock);
+ pushBatchClassBlocks(BatchClassRegion, Blocks, Idx);
+ if (conditionVariableEnabled())
+ BatchClassRegion->FLLockCV.notifyAll(BatchClassRegion->FLLock);
+ Idx = 0;
+ }
+ Blocks[Idx++] =
+ compactPtr(SizeClassMap::BatchClassId, reinterpret_cast<uptr>(Cur));
+ if (UnusedTransferBatch) {
+ Blocks[Idx++] =
+ compactPtr(SizeClassMap::BatchClassId,
+ reinterpret_cast<uptr>(UnusedTransferBatch));
+ }
+ Prev = BG;
+ BG = BG->Next;
+ continue;
+ }
+
+ // At here, the `BG` is the first BatchGroup with CompactPtrGroupBase
+ // larger than the first element in `GroupsToRelease`. We need to insert
+ // `GroupsToRelease::front()` (which is `Cur` below) before `BG`.
+ //
+ // 1. If `Prev` is nullptr, we simply push `Cur` to the front of
+ // FreeListInfo.BlockList.
+ // 2. Otherwise, use `insert()` which inserts an element next to `Prev`.
+ //
+ // Afterwards, we don't need to advance `BG` because the order between
+ // `BG` and the new `GroupsToRelease::front()` hasn't been checked.
+ if (Prev == nullptr)
+ Region->FreeListInfo.BlockList.push_front(Cur);
+ else
+ Region->FreeListInfo.BlockList.insert(Prev, Cur);
+ DCHECK_EQ(Cur->Next, BG);
+ Prev = Cur;
+ }
+
+ if (Idx != 0) {
+ ScopedLock L(BatchClassRegion->FLLock);
+ pushBatchClassBlocks(BatchClassRegion, Blocks, Idx);
+ if (conditionVariableEnabled())
+ BatchClassRegion->FLLockCV.notifyAll(BatchClassRegion->FLLock);
+ }
+
+ if (SCUDO_DEBUG) {
+ BatchGroupT *Prev = Region->FreeListInfo.BlockList.front();
+ for (BatchGroupT *Cur = Prev->Next; Cur != nullptr;
+ Prev = Cur, Cur = Cur->Next) {
+ CHECK_LT(Prev->CompactPtrGroupBase, Cur->CompactPtrGroupBase);
+ }
+ }
+
+ if (conditionVariableEnabled())
+ Region->FLLockCV.notifyAll(Region->FLLock);
+ }
+
+ // TODO: `PrimaryBase` can be obtained from ReservedMemory. This needs to be
+ // deprecated.
+ uptr PrimaryBase = 0;
+ ReservedMemoryT ReservedMemory = {};
+ // The minimum size of pushed blocks that we will try to release the pages in
+ // that size class.
+ uptr SmallerBlockReleasePageDelta = 0;
+ atomic_s32 ReleaseToOsIntervalMs = {};
+ alignas(SCUDO_CACHE_LINE_SIZE) RegionInfo RegionInfoArray[NumClasses];
};
} // namespace scudo
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/quarantine.h b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/quarantine.h
index 2d231c3a28db..b5f8db0e87c2 100644
--- a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/quarantine.h
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/quarantine.h
@@ -12,6 +12,7 @@
#include "list.h"
#include "mutex.h"
#include "string_utils.h"
+#include "thread_annotations.h"
namespace scudo {
@@ -172,7 +173,7 @@ public:
typedef QuarantineCache<Callback> CacheT;
using ThisT = GlobalQuarantine<Callback, Node>;
- void init(uptr Size, uptr CacheSize) {
+ void init(uptr Size, uptr CacheSize) NO_THREAD_SAFETY_ANALYSIS {
DCHECK(isAligned(reinterpret_cast<uptr>(this), alignof(ThisT)));
DCHECK_EQ(atomic_load_relaxed(&MaxSize), 0U);
DCHECK_EQ(atomic_load_relaxed(&MinSize), 0U);
@@ -191,22 +192,31 @@ public:
uptr getMaxSize() const { return atomic_load_relaxed(&MaxSize); }
uptr getCacheSize() const { return atomic_load_relaxed(&MaxCacheSize); }
+ // This is supposed to be used in test only.
+ bool isEmpty() {
+ ScopedLock L(CacheMutex);
+ return Cache.getSize() == 0U;
+ }
+
void put(CacheT *C, Callback Cb, Node *Ptr, uptr Size) {
C->enqueue(Cb, Ptr, Size);
if (C->getSize() > getCacheSize())
drain(C, Cb);
}
- void NOINLINE drain(CacheT *C, Callback Cb) {
+ void NOINLINE drain(CacheT *C, Callback Cb) EXCLUDES(CacheMutex) {
+ bool needRecycle = false;
{
ScopedLock L(CacheMutex);
Cache.transfer(C);
+ needRecycle = Cache.getSize() > getMaxSize();
}
- if (Cache.getSize() > getMaxSize() && RecycleMutex.tryLock())
+
+ if (needRecycle && RecycleMutex.tryLock())
recycle(atomic_load_relaxed(&MinSize), Cb);
}
- void NOINLINE drainAndRecycle(CacheT *C, Callback Cb) {
+ void NOINLINE drainAndRecycle(CacheT *C, Callback Cb) EXCLUDES(CacheMutex) {
{
ScopedLock L(CacheMutex);
Cache.transfer(C);
@@ -215,20 +225,21 @@ public:
recycle(0, Cb);
}
- void getStats(ScopedString *Str) const {
+ void getStats(ScopedString *Str) EXCLUDES(CacheMutex) {
+ ScopedLock L(CacheMutex);
// It assumes that the world is stopped, just as the allocator's printStats.
Cache.getStats(Str);
Str->append("Quarantine limits: global: %zuK; thread local: %zuK\n",
getMaxSize() >> 10, getCacheSize() >> 10);
}
- void disable() {
+ void disable() NO_THREAD_SAFETY_ANALYSIS {
// RecycleMutex must be locked 1st since we grab CacheMutex within recycle.
RecycleMutex.lock();
CacheMutex.lock();
}
- void enable() {
+ void enable() NO_THREAD_SAFETY_ANALYSIS {
CacheMutex.unlock();
RecycleMutex.unlock();
}
@@ -236,13 +247,14 @@ public:
private:
// Read-only data.
alignas(SCUDO_CACHE_LINE_SIZE) HybridMutex CacheMutex;
- CacheT Cache;
+ CacheT Cache GUARDED_BY(CacheMutex);
alignas(SCUDO_CACHE_LINE_SIZE) HybridMutex RecycleMutex;
atomic_uptr MinSize = {};
atomic_uptr MaxSize = {};
alignas(SCUDO_CACHE_LINE_SIZE) atomic_uptr MaxCacheSize = {};
- void NOINLINE recycle(uptr MinSize, Callback Cb) {
+ void NOINLINE recycle(uptr MinSize, Callback Cb) RELEASE(RecycleMutex)
+ EXCLUDES(CacheMutex) {
CacheT Tmp;
Tmp.init();
{
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/release.cpp b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/release.cpp
index 5d7c6c5fc110..875a2b0c1c57 100644
--- a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/release.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/release.cpp
@@ -10,7 +10,8 @@
namespace scudo {
-HybridMutex PackedCounterArray::Mutex = {};
-uptr PackedCounterArray::StaticBuffer[PackedCounterArray::StaticBufferCount];
+BufferPool<RegionPageMap::StaticBufferCount,
+ RegionPageMap::StaticBufferNumElements>
+ RegionPageMap::Buffers;
} // namespace scudo
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/release.h b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/release.h
index 293a8bc27bab..b6f76a4d2058 100644
--- a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/release.h
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/release.h
@@ -11,14 +11,46 @@
#include "common.h"
#include "list.h"
+#include "mem_map.h"
#include "mutex.h"
+#include "thread_annotations.h"
namespace scudo {
+template <typename MemMapT> class RegionReleaseRecorder {
+public:
+ RegionReleaseRecorder(MemMapT *RegionMemMap, uptr Base, uptr Offset = 0)
+ : RegionMemMap(RegionMemMap), Base(Base), Offset(Offset) {}
+
+ uptr getReleasedRangesCount() const { return ReleasedRangesCount; }
+
+ uptr getReleasedBytes() const { return ReleasedBytes; }
+
+ uptr getBase() const { return Base; }
+
+ // Releases [From, To) range of pages back to OS. Note that `From` and `To`
+ // are offseted from `Base` + Offset.
+ void releasePageRangeToOS(uptr From, uptr To) {
+ const uptr Size = To - From;
+ RegionMemMap->releasePagesToOS(getBase() + Offset + From, Size);
+ ReleasedRangesCount++;
+ ReleasedBytes += Size;
+ }
+
+private:
+ uptr ReleasedRangesCount = 0;
+ uptr ReleasedBytes = 0;
+ MemMapT *RegionMemMap = nullptr;
+ uptr Base = 0;
+ // The release offset from Base. This is used when we know a given range after
+ // Base will not be released.
+ uptr Offset = 0;
+};
+
class ReleaseRecorder {
public:
- ReleaseRecorder(uptr Base, MapPlatformData *Data = nullptr)
- : Base(Base), Data(Data) {}
+ ReleaseRecorder(uptr Base, uptr Offset = 0, MapPlatformData *Data = nullptr)
+ : Base(Base), Offset(Offset), Data(Data) {}
uptr getReleasedRangesCount() const { return ReleasedRangesCount; }
@@ -29,7 +61,7 @@ public:
// Releases [From, To) range of pages back to OS.
void releasePageRangeToOS(uptr From, uptr To) {
const uptr Size = To - From;
- releasePagesToOS(Base, From, Size, Data);
+ releasePagesToOS(Base, From + Offset, Size, Data);
ReleasedRangesCount++;
ReleasedBytes += Size;
}
@@ -37,31 +69,171 @@ public:
private:
uptr ReleasedRangesCount = 0;
uptr ReleasedBytes = 0;
+ // The starting address to release. Note that we may want to combine (Base +
+ // Offset) as a new Base. However, the Base is retrieved from
+ // `MapPlatformData` on Fuchsia, which means the offset won't be aware.
+ // Therefore, store them separately to make it work on all the platforms.
uptr Base = 0;
+ // The release offset from Base. This is used when we know a given range after
+ // Base will not be released.
+ uptr Offset = 0;
MapPlatformData *Data = nullptr;
};
-// A packed array of Counters. Each counter occupies 2^N bits, enough to store
-// counter's MaxValue. Ctor will try to use a static buffer first, and if that
-// fails (the buffer is too small or already locked), will allocate the
+class FragmentationRecorder {
+public:
+ FragmentationRecorder() = default;
+
+ uptr getReleasedPagesCount() const { return ReleasedPagesCount; }
+
+ void releasePageRangeToOS(uptr From, uptr To) {
+ DCHECK_EQ((To - From) % getPageSizeCached(), 0U);
+ ReleasedPagesCount += (To - From) / getPageSizeCached();
+ }
+
+private:
+ uptr ReleasedPagesCount = 0;
+};
+
+// A buffer pool which holds a fixed number of static buffers of `uptr` elements
+// for fast buffer allocation. If the request size is greater than
+// `StaticBufferNumElements` or if all the static buffers are in use, it'll
+// delegate the allocation to map().
+template <uptr StaticBufferCount, uptr StaticBufferNumElements>
+class BufferPool {
+public:
+ // Preserve 1 bit in the `Mask` so that we don't need to do zero-check while
+ // extracting the least significant bit from the `Mask`.
+ static_assert(StaticBufferCount < SCUDO_WORDSIZE, "");
+ static_assert(isAligned(StaticBufferNumElements * sizeof(uptr),
+ SCUDO_CACHE_LINE_SIZE),
+ "");
+
+ struct Buffer {
+ // Pointer to the buffer's memory, or nullptr if no buffer was allocated.
+ uptr *Data = nullptr;
+
+ // The index of the underlying static buffer, or StaticBufferCount if this
+ // buffer was dynamically allocated. This value is initially set to a poison
+ // value to aid debugging.
+ uptr BufferIndex = ~static_cast<uptr>(0);
+
+ // Only valid if BufferIndex == StaticBufferCount.
+ MemMapT MemMap = {};
+ };
+
+ // Return a zero-initialized buffer which can contain at least the given
+ // number of elements, or nullptr on failure.
+ Buffer getBuffer(const uptr NumElements) {
+ if (UNLIKELY(NumElements > StaticBufferNumElements))
+ return getDynamicBuffer(NumElements);
+
+ uptr index;
+ {
+ // TODO: In general, we expect this operation should be fast so the
+ // waiting thread won't be put into sleep. The HybridMutex does implement
+ // the busy-waiting but we may want to review the performance and see if
+ // we need an explict spin lock here.
+ ScopedLock L(Mutex);
+ index = getLeastSignificantSetBitIndex(Mask);
+ if (index < StaticBufferCount)
+ Mask ^= static_cast<uptr>(1) << index;
+ }
+
+ if (index >= StaticBufferCount)
+ return getDynamicBuffer(NumElements);
+
+ Buffer Buf;
+ Buf.Data = &RawBuffer[index * StaticBufferNumElements];
+ Buf.BufferIndex = index;
+ memset(Buf.Data, 0, StaticBufferNumElements * sizeof(uptr));
+ return Buf;
+ }
+
+ void releaseBuffer(Buffer Buf) {
+ DCHECK_NE(Buf.Data, nullptr);
+ DCHECK_LE(Buf.BufferIndex, StaticBufferCount);
+ if (Buf.BufferIndex != StaticBufferCount) {
+ ScopedLock L(Mutex);
+ DCHECK_EQ((Mask & (static_cast<uptr>(1) << Buf.BufferIndex)), 0U);
+ Mask |= static_cast<uptr>(1) << Buf.BufferIndex;
+ } else {
+ Buf.MemMap.unmap(Buf.MemMap.getBase(), Buf.MemMap.getCapacity());
+ }
+ }
+
+ bool isStaticBufferTestOnly(const Buffer &Buf) {
+ DCHECK_NE(Buf.Data, nullptr);
+ DCHECK_LE(Buf.BufferIndex, StaticBufferCount);
+ return Buf.BufferIndex != StaticBufferCount;
+ }
+
+private:
+ Buffer getDynamicBuffer(const uptr NumElements) {
+ // When using a heap-based buffer, precommit the pages backing the
+ // Vmar by passing |MAP_PRECOMMIT| flag. This allows an optimization
+ // where page fault exceptions are skipped as the allocated memory
+ // is accessed. So far, this is only enabled on Fuchsia. It hasn't proven a
+ // performance benefit on other platforms.
+ const uptr MmapFlags = MAP_ALLOWNOMEM | (SCUDO_FUCHSIA ? MAP_PRECOMMIT : 0);
+ const uptr MappedSize =
+ roundUp(NumElements * sizeof(uptr), getPageSizeCached());
+ Buffer Buf;
+ if (Buf.MemMap.map(/*Addr=*/0, MappedSize, "scudo:counters", MmapFlags)) {
+ Buf.Data = reinterpret_cast<uptr *>(Buf.MemMap.getBase());
+ Buf.BufferIndex = StaticBufferCount;
+ }
+ return Buf;
+ }
+
+ HybridMutex Mutex;
+ // '1' means that buffer index is not used. '0' means the buffer is in use.
+ uptr Mask GUARDED_BY(Mutex) = ~static_cast<uptr>(0);
+ uptr RawBuffer[StaticBufferCount * StaticBufferNumElements] GUARDED_BY(Mutex);
+};
+
+// A Region page map is used to record the usage of pages in the regions. It
+// implements a packed array of Counters. Each counter occupies 2^N bits, enough
+// to store counter's MaxValue. Ctor will try to use a static buffer first, and
+// if that fails (the buffer is too small or already locked), will allocate the
// required Buffer via map(). The caller is expected to check whether the
// initialization was successful by checking isAllocated() result. For
// performance sake, none of the accessors check the validity of the arguments,
// It is assumed that Index is always in [0, N) range and the value is not
// incremented past MaxValue.
-class PackedCounterArray {
+class RegionPageMap {
public:
- PackedCounterArray(uptr NumberOfRegions, uptr CountersPerRegion,
- uptr MaxValue)
- : Regions(NumberOfRegions), NumCounters(CountersPerRegion) {
- DCHECK_GT(Regions, 0);
- DCHECK_GT(NumCounters, 0);
+ RegionPageMap()
+ : Regions(0), NumCounters(0), CounterSizeBitsLog(0), CounterMask(0),
+ PackingRatioLog(0), BitOffsetMask(0), SizePerRegion(0),
+ BufferNumElements(0) {}
+ RegionPageMap(uptr NumberOfRegions, uptr CountersPerRegion, uptr MaxValue) {
+ reset(NumberOfRegions, CountersPerRegion, MaxValue);
+ }
+ ~RegionPageMap() {
+ if (!isAllocated())
+ return;
+ Buffers.releaseBuffer(Buffer);
+ Buffer = {};
+ }
+
+ // Lock of `StaticBuffer` is acquired conditionally and there's no easy way to
+ // specify the thread-safety attribute properly in current code structure.
+ // Besides, it's the only place we may want to check thread safety. Therefore,
+ // it's fine to bypass the thread-safety analysis now.
+ void reset(uptr NumberOfRegion, uptr CountersPerRegion, uptr MaxValue) {
+ DCHECK_GT(NumberOfRegion, 0);
+ DCHECK_GT(CountersPerRegion, 0);
DCHECK_GT(MaxValue, 0);
- constexpr uptr MaxCounterBits = sizeof(*Buffer) * 8UL;
+
+ Regions = NumberOfRegion;
+ NumCounters = CountersPerRegion;
+
+ constexpr uptr MaxCounterBits = sizeof(*Buffer.Data) * 8UL;
// Rounding counter storage size up to the power of two allows for using
// bit shifts calculating particular counter's Index and offset.
const uptr CounterSizeBits =
- roundUpToPowerOfTwo(getMostSignificantSetBitIndex(MaxValue) + 1);
+ roundUpPowerOfTwo(getMostSignificantSetBitIndex(MaxValue) + 1);
DCHECK_LE(CounterSizeBits, MaxCounterBits);
CounterSizeBitsLog = getLog2(CounterSizeBits);
CounterMask = ~(static_cast<uptr>(0)) >> (MaxCounterBits - CounterSizeBits);
@@ -72,30 +244,13 @@ public:
BitOffsetMask = PackingRatio - 1;
SizePerRegion =
- roundUpTo(NumCounters, static_cast<uptr>(1U) << PackingRatioLog) >>
+ roundUp(NumCounters, static_cast<uptr>(1U) << PackingRatioLog) >>
PackingRatioLog;
- BufferSize = SizePerRegion * sizeof(*Buffer) * Regions;
- if (BufferSize <= (StaticBufferCount * sizeof(Buffer[0])) &&
- Mutex.tryLock()) {
- Buffer = &StaticBuffer[0];
- memset(Buffer, 0, BufferSize);
- } else {
- Buffer = reinterpret_cast<uptr *>(
- map(nullptr, roundUpTo(BufferSize, getPageSizeCached()),
- "scudo:counters", MAP_ALLOWNOMEM));
- }
- }
- ~PackedCounterArray() {
- if (!isAllocated())
- return;
- if (Buffer == &StaticBuffer[0])
- Mutex.unlock();
- else
- unmap(reinterpret_cast<void *>(Buffer),
- roundUpTo(BufferSize, getPageSizeCached()));
+ BufferNumElements = SizePerRegion * Regions;
+ Buffer = Buffers.getBuffer(BufferNumElements);
}
- bool isAllocated() const { return !!Buffer; }
+ bool isAllocated() const { return Buffer.Data != nullptr; }
uptr getCount() const { return NumCounters; }
@@ -104,7 +259,8 @@ public:
DCHECK_LT(I, NumCounters);
const uptr Index = I >> PackingRatioLog;
const uptr BitOffset = (I & BitOffsetMask) << CounterSizeBitsLog;
- return (Buffer[Region * SizePerRegion + Index] >> BitOffset) & CounterMask;
+ return (Buffer.Data[Region * SizePerRegion + Index] >> BitOffset) &
+ CounterMask;
}
void inc(uptr Region, uptr I) const {
@@ -112,8 +268,20 @@ public:
const uptr Index = I >> PackingRatioLog;
const uptr BitOffset = (I & BitOffsetMask) << CounterSizeBitsLog;
DCHECK_LT(BitOffset, SCUDO_WORDSIZE);
- Buffer[Region * SizePerRegion + Index] += static_cast<uptr>(1U)
- << BitOffset;
+ DCHECK_EQ(isAllCounted(Region, I), false);
+ Buffer.Data[Region * SizePerRegion + Index] += static_cast<uptr>(1U)
+ << BitOffset;
+ }
+
+ void incN(uptr Region, uptr I, uptr N) const {
+ DCHECK_GT(N, 0U);
+ DCHECK_LE(N, CounterMask);
+ DCHECK_LE(get(Region, I), CounterMask - N);
+ const uptr Index = I >> PackingRatioLog;
+ const uptr BitOffset = (I & BitOffsetMask) << CounterSizeBitsLog;
+ DCHECK_LT(BitOffset, SCUDO_WORDSIZE);
+ DCHECK_EQ(isAllCounted(Region, I), false);
+ Buffer.Data[Region * SizePerRegion + Index] += N << BitOffset;
}
void incRange(uptr Region, uptr From, uptr To) const {
@@ -123,33 +291,67 @@ public:
inc(Region, I);
}
- uptr getBufferSize() const { return BufferSize; }
+ // Set the counter to the max value. Note that the max number of blocks in a
+ // page may vary. To provide an easier way to tell if all the blocks are
+ // counted for different pages, set to the same max value to denote the
+ // all-counted status.
+ void setAsAllCounted(uptr Region, uptr I) const {
+ DCHECK_LE(get(Region, I), CounterMask);
+ const uptr Index = I >> PackingRatioLog;
+ const uptr BitOffset = (I & BitOffsetMask) << CounterSizeBitsLog;
+ DCHECK_LT(BitOffset, SCUDO_WORDSIZE);
+ Buffer.Data[Region * SizePerRegion + Index] |= CounterMask << BitOffset;
+ }
+ void setAsAllCountedRange(uptr Region, uptr From, uptr To) const {
+ DCHECK_LE(From, To);
+ const uptr Top = Min(To + 1, NumCounters);
+ for (uptr I = From; I < Top; I++)
+ setAsAllCounted(Region, I);
+ }
+
+ bool updateAsAllCountedIf(uptr Region, uptr I, uptr MaxCount) {
+ const uptr Count = get(Region, I);
+ if (Count == CounterMask)
+ return true;
+ if (Count == MaxCount) {
+ setAsAllCounted(Region, I);
+ return true;
+ }
+ return false;
+ }
+ bool isAllCounted(uptr Region, uptr I) const {
+ return get(Region, I) == CounterMask;
+ }
- static const uptr StaticBufferCount = 2048U;
+ uptr getBufferNumElements() const { return BufferNumElements; }
private:
- const uptr Regions;
- const uptr NumCounters;
+ // We may consider making this configurable if there are cases which may
+ // benefit from this.
+ static const uptr StaticBufferCount = 2U;
+ static const uptr StaticBufferNumElements = 512U;
+ using BufferPoolT = BufferPool<StaticBufferCount, StaticBufferNumElements>;
+ static BufferPoolT Buffers;
+
+ uptr Regions;
+ uptr NumCounters;
uptr CounterSizeBitsLog;
uptr CounterMask;
uptr PackingRatioLog;
uptr BitOffsetMask;
uptr SizePerRegion;
- uptr BufferSize;
- uptr *Buffer;
-
- static HybridMutex Mutex;
- static uptr StaticBuffer[StaticBufferCount];
+ uptr BufferNumElements;
+ BufferPoolT::Buffer Buffer;
};
template <class ReleaseRecorderT> class FreePagesRangeTracker {
public:
- explicit FreePagesRangeTracker(ReleaseRecorderT *Recorder)
+ explicit FreePagesRangeTracker(ReleaseRecorderT &Recorder)
: Recorder(Recorder), PageSizeLog(getLog2(getPageSizeCached())) {}
- void processNextPage(bool Freed) {
- if (Freed) {
+ void processNextPage(bool Released) {
+ if (Released) {
if (!InRange) {
CurrentRangeStatePage = CurrentPage;
InRange = true;
@@ -170,113 +372,271 @@ public:
private:
void closeOpenedRange() {
if (InRange) {
- Recorder->releasePageRangeToOS((CurrentRangeStatePage << PageSizeLog),
- (CurrentPage << PageSizeLog));
+ Recorder.releasePageRangeToOS((CurrentRangeStatePage << PageSizeLog),
+ (CurrentPage << PageSizeLog));
InRange = false;
}
}
- ReleaseRecorderT *const Recorder;
+ ReleaseRecorderT &Recorder;
const uptr PageSizeLog;
bool InRange = false;
uptr CurrentPage = 0;
uptr CurrentRangeStatePage = 0;
};
-template <class TransferBatchT, class ReleaseRecorderT, typename DecompactPtrT,
- typename SkipRegionT>
-NOINLINE void
-releaseFreeMemoryToOS(const IntrusiveList<TransferBatchT> &FreeList,
- uptr RegionSize, uptr NumberOfRegions, uptr BlockSize,
- ReleaseRecorderT *Recorder, DecompactPtrT DecompactPtr,
- SkipRegionT SkipRegion) {
- const uptr PageSize = getPageSizeCached();
-
- // Figure out the number of chunks per page and whether we can take a fast
- // path (the number of chunks per page is the same for all pages).
- uptr FullPagesBlockCountMax;
- bool SameBlockCountPerPage;
- if (BlockSize <= PageSize) {
- if (PageSize % BlockSize == 0) {
- // Same number of chunks per page, no cross overs.
- FullPagesBlockCountMax = PageSize / BlockSize;
- SameBlockCountPerPage = true;
- } else if (BlockSize % (PageSize % BlockSize) == 0) {
- // Some chunks are crossing page boundaries, which means that the page
- // contains one or two partial chunks, but all pages contain the same
- // number of chunks.
- FullPagesBlockCountMax = PageSize / BlockSize + 1;
- SameBlockCountPerPage = true;
+struct PageReleaseContext {
+ PageReleaseContext(uptr BlockSize, uptr NumberOfRegions, uptr ReleaseSize,
+ uptr ReleaseOffset = 0)
+ : BlockSize(BlockSize), NumberOfRegions(NumberOfRegions) {
+ PageSize = getPageSizeCached();
+ if (BlockSize <= PageSize) {
+ if (PageSize % BlockSize == 0) {
+ // Same number of chunks per page, no cross overs.
+ FullPagesBlockCountMax = PageSize / BlockSize;
+ SameBlockCountPerPage = true;
+ } else if (BlockSize % (PageSize % BlockSize) == 0) {
+ // Some chunks are crossing page boundaries, which means that the page
+ // contains one or two partial chunks, but all pages contain the same
+ // number of chunks.
+ FullPagesBlockCountMax = PageSize / BlockSize + 1;
+ SameBlockCountPerPage = true;
+ } else {
+ // Some chunks are crossing page boundaries, which means that the page
+ // contains one or two partial chunks.
+ FullPagesBlockCountMax = PageSize / BlockSize + 2;
+ SameBlockCountPerPage = false;
+ }
} else {
- // Some chunks are crossing page boundaries, which means that the page
- // contains one or two partial chunks.
- FullPagesBlockCountMax = PageSize / BlockSize + 2;
- SameBlockCountPerPage = false;
+ if (BlockSize % PageSize == 0) {
+ // One chunk covers multiple pages, no cross overs.
+ FullPagesBlockCountMax = 1;
+ SameBlockCountPerPage = true;
+ } else {
+ // One chunk covers multiple pages, Some chunks are crossing page
+ // boundaries. Some pages contain one chunk, some contain two.
+ FullPagesBlockCountMax = 2;
+ SameBlockCountPerPage = false;
+ }
}
- } else {
- if (BlockSize % PageSize == 0) {
- // One chunk covers multiple pages, no cross overs.
- FullPagesBlockCountMax = 1;
- SameBlockCountPerPage = true;
+
+ // TODO: For multiple regions, it's more complicated to support partial
+ // region marking (which includes the complexity of how to handle the last
+ // block in a region). We may consider this after markFreeBlocks() accepts
+ // only free blocks from the same region.
+ if (NumberOfRegions != 1)
+ DCHECK_EQ(ReleaseOffset, 0U);
+
+ PagesCount = roundUp(ReleaseSize, PageSize) / PageSize;
+ PageSizeLog = getLog2(PageSize);
+ ReleasePageOffset = ReleaseOffset >> PageSizeLog;
+ }
+
+ // PageMap is lazily allocated when markFreeBlocks() is invoked.
+ bool hasBlockMarked() const {
+ return PageMap.isAllocated();
+ }
+
+ bool ensurePageMapAllocated() {
+ if (PageMap.isAllocated())
+ return true;
+ PageMap.reset(NumberOfRegions, PagesCount, FullPagesBlockCountMax);
+ // TODO: Log some message when we fail on PageMap allocation.
+ return PageMap.isAllocated();
+ }
+
+ // Mark all the blocks in the given range [From, to). Instead of visiting all
+ // the blocks, we will just mark the page as all counted. Note the `From` and
+ // `To` has to be page aligned but with one exception, if `To` is equal to the
+ // RegionSize, it's not necessary to be aligned with page size.
+ bool markRangeAsAllCounted(uptr From, uptr To, uptr Base,
+ const uptr RegionIndex, const uptr RegionSize) {
+ DCHECK_LT(From, To);
+ DCHECK_LE(To, Base + RegionSize);
+ DCHECK_EQ(From % PageSize, 0U);
+ DCHECK_LE(To - From, RegionSize);
+
+ if (!ensurePageMapAllocated())
+ return false;
+
+ uptr FromInRegion = From - Base;
+ uptr ToInRegion = To - Base;
+ uptr FirstBlockInRange = roundUpSlow(FromInRegion, BlockSize);
+
+ // The straddling block sits across entire range.
+ if (FirstBlockInRange >= ToInRegion)
+ return true;
+
+ // First block may not sit at the first pape in the range, move
+ // `FromInRegion` to the first block page.
+ FromInRegion = roundDown(FirstBlockInRange, PageSize);
+
+ // When The first block is not aligned to the range boundary, which means
+ // there is a block sitting acorss `From`, that looks like,
+ //
+ // From To
+ // V V
+ // +-----------------------------------------------+
+ // +-----+-----+-----+-----+
+ // | | | | | ...
+ // +-----+-----+-----+-----+
+ // |- first page -||- second page -||- ...
+ //
+ // Therefore, we can't just mark the first page as all counted. Instead, we
+ // increment the number of blocks in the first page in the page map and
+ // then round up the `From` to the next page.
+ if (FirstBlockInRange != FromInRegion) {
+ DCHECK_GT(FromInRegion + PageSize, FirstBlockInRange);
+ uptr NumBlocksInFirstPage =
+ (FromInRegion + PageSize - FirstBlockInRange + BlockSize - 1) /
+ BlockSize;
+ PageMap.incN(RegionIndex, getPageIndex(FromInRegion),
+ NumBlocksInFirstPage);
+ FromInRegion = roundUp(FromInRegion + 1, PageSize);
+ }
+
+ uptr LastBlockInRange = roundDownSlow(ToInRegion - 1, BlockSize);
+
+ // Note that LastBlockInRange may be smaller than `FromInRegion` at this
+ // point because it may contain only one block in the range.
+
+ // When the last block sits across `To`, we can't just mark the pages
+ // occupied by the last block as all counted. Instead, we increment the
+ // counters of those pages by 1. The exception is that if it's the last
+ // block in the region, it's fine to mark those pages as all counted.
+ if (LastBlockInRange + BlockSize != RegionSize) {
+ DCHECK_EQ(ToInRegion % PageSize, 0U);
+ // The case below is like,
+ //
+ // From To
+ // V V
+ // +----------------------------------------+
+ // +-----+-----+-----+-----+
+ // | | | | | ...
+ // +-----+-----+-----+-----+
+ // ... -||- last page -||- next page -|
+ //
+ // The last block is not aligned to `To`, we need to increment the
+ // counter of `next page` by 1.
+ if (LastBlockInRange + BlockSize != ToInRegion) {
+ PageMap.incRange(RegionIndex, getPageIndex(ToInRegion),
+ getPageIndex(LastBlockInRange + BlockSize - 1));
+ }
} else {
- // One chunk covers multiple pages, Some chunks are crossing page
- // boundaries. Some pages contain one chunk, some contain two.
- FullPagesBlockCountMax = 2;
- SameBlockCountPerPage = false;
+ ToInRegion = RegionSize;
+ }
+
+ // After handling the first page and the last block, it's safe to mark any
+ // page in between the range [From, To).
+ if (FromInRegion < ToInRegion) {
+ PageMap.setAsAllCountedRange(RegionIndex, getPageIndex(FromInRegion),
+ getPageIndex(ToInRegion - 1));
}
+
+ return true;
}
- const uptr PagesCount = roundUpTo(RegionSize, PageSize) / PageSize;
- PackedCounterArray Counters(NumberOfRegions, PagesCount,
- FullPagesBlockCountMax);
- if (!Counters.isAllocated())
- return;
-
- const uptr PageSizeLog = getLog2(PageSize);
- const uptr RoundedRegionSize = PagesCount << PageSizeLog;
- const uptr RoundedSize = NumberOfRegions * RoundedRegionSize;
-
- // Iterate over free chunks and count how many free chunks affect each
- // allocated page.
- if (BlockSize <= PageSize && PageSize % BlockSize == 0) {
- // Each chunk affects one page only.
- for (const auto &It : FreeList) {
- for (u32 I = 0; I < It.getCount(); I++) {
- const uptr P = DecompactPtr(It.get(I)) - Recorder->getBase();
- if (P >= RoundedSize)
- continue;
- const uptr RegionIndex = NumberOfRegions == 1U ? 0 : P / RegionSize;
- const uptr PInRegion = P - RegionIndex * RegionSize;
- Counters.inc(RegionIndex, PInRegion >> PageSizeLog);
+ template <class TransferBatchT, typename DecompactPtrT>
+ bool markFreeBlocksInRegion(const IntrusiveList<TransferBatchT> &FreeList,
+ DecompactPtrT DecompactPtr, const uptr Base,
+ const uptr RegionIndex, const uptr RegionSize,
+ bool MayContainLastBlockInRegion) {
+ if (!ensurePageMapAllocated())
+ return false;
+
+ if (MayContainLastBlockInRegion) {
+ const uptr LastBlockInRegion =
+ ((RegionSize / BlockSize) - 1U) * BlockSize;
+ // The last block in a region may not use the entire page, we mark the
+ // following "pretend" memory block(s) as free in advance.
+ //
+ // Region Boundary
+ // v
+ // -----+-----------------------+
+ // | Last Page | <- Rounded Region Boundary
+ // -----+-----------------------+
+ // |-----||- trailing blocks -|
+ // ^
+ // last block
+ const uptr RoundedRegionSize = roundUp(RegionSize, PageSize);
+ const uptr TrailingBlockBase = LastBlockInRegion + BlockSize;
+ // If the difference between `RoundedRegionSize` and
+ // `TrailingBlockBase` is larger than a page, that implies the reported
+ // `RegionSize` may not be accurate.
+ DCHECK_LT(RoundedRegionSize - TrailingBlockBase, PageSize);
+
+ // Only the last page touched by the last block needs to mark the trailing
+ // blocks. Note that if the last "pretend" block straddles the boundary,
+ // we still have to count it in so that the logic of counting the number
+ // of blocks on a page is consistent.
+ uptr NumTrailingBlocks =
+ (roundUpSlow(RoundedRegionSize - TrailingBlockBase, BlockSize) +
+ BlockSize - 1) /
+ BlockSize;
+ if (NumTrailingBlocks > 0) {
+ PageMap.incN(RegionIndex, getPageIndex(TrailingBlockBase),
+ NumTrailingBlocks);
}
}
- } else {
- // In all other cases chunks might affect more than one page.
- DCHECK_GE(RegionSize, BlockSize);
- const uptr LastBlockInRegion = ((RegionSize / BlockSize) - 1U) * BlockSize;
- for (const auto &It : FreeList) {
- for (u32 I = 0; I < It.getCount(); I++) {
- const uptr P = DecompactPtr(It.get(I)) - Recorder->getBase();
- if (P >= RoundedSize)
- continue;
- const uptr RegionIndex = NumberOfRegions == 1U ? 0 : P / RegionSize;
- uptr PInRegion = P - RegionIndex * RegionSize;
- Counters.incRange(RegionIndex, PInRegion >> PageSizeLog,
- (PInRegion + BlockSize - 1) >> PageSizeLog);
- // The last block in a region might straddle a page, so if it's
- // free, we mark the following "pretend" memory block(s) as free.
- if (PInRegion == LastBlockInRegion) {
- PInRegion += BlockSize;
- while (PInRegion < RoundedRegionSize) {
- Counters.incRange(RegionIndex, PInRegion >> PageSizeLog,
- (PInRegion + BlockSize - 1) >> PageSizeLog);
- PInRegion += BlockSize;
- }
+
+ // Iterate over free chunks and count how many free chunks affect each
+ // allocated page.
+ if (BlockSize <= PageSize && PageSize % BlockSize == 0) {
+ // Each chunk affects one page only.
+ for (const auto &It : FreeList) {
+ for (u16 I = 0; I < It.getCount(); I++) {
+ const uptr PInRegion = DecompactPtr(It.get(I)) - Base;
+ DCHECK_LT(PInRegion, RegionSize);
+ PageMap.inc(RegionIndex, getPageIndex(PInRegion));
+ }
+ }
+ } else {
+ // In all other cases chunks might affect more than one page.
+ DCHECK_GE(RegionSize, BlockSize);
+ for (const auto &It : FreeList) {
+ for (u16 I = 0; I < It.getCount(); I++) {
+ const uptr PInRegion = DecompactPtr(It.get(I)) - Base;
+ PageMap.incRange(RegionIndex, getPageIndex(PInRegion),
+ getPageIndex(PInRegion + BlockSize - 1));
}
}
}
+
+ return true;
}
+ uptr getPageIndex(uptr P) { return (P >> PageSizeLog) - ReleasePageOffset; }
+ uptr getReleaseOffset() { return ReleasePageOffset << PageSizeLog; }
+
+ uptr BlockSize;
+ uptr NumberOfRegions;
+ // For partial region marking, some pages in front are not needed to be
+ // counted.
+ uptr ReleasePageOffset;
+ uptr PageSize;
+ uptr PagesCount;
+ uptr PageSizeLog;
+ uptr FullPagesBlockCountMax;
+ bool SameBlockCountPerPage;
+ RegionPageMap PageMap;
+};
+
+// Try to release the page which doesn't have any in-used block, i.e., they are
+// all free blocks. The `PageMap` will record the number of free blocks in each
+// page.
+template <class ReleaseRecorderT, typename SkipRegionT>
+NOINLINE void
+releaseFreeMemoryToOS(PageReleaseContext &Context,
+ ReleaseRecorderT &Recorder, SkipRegionT SkipRegion) {
+ const uptr PageSize = Context.PageSize;
+ const uptr BlockSize = Context.BlockSize;
+ const uptr PagesCount = Context.PagesCount;
+ const uptr NumberOfRegions = Context.NumberOfRegions;
+ const uptr ReleasePageOffset = Context.ReleasePageOffset;
+ const uptr FullPagesBlockCountMax = Context.FullPagesBlockCountMax;
+ const bool SameBlockCountPerPage = Context.SameBlockCountPerPage;
+ RegionPageMap &PageMap = Context.PageMap;
+
// Iterate over pages detecting ranges of pages with chunk Counters equal
// to the expected number of chunks for the particular page.
FreePagesRangeTracker<ReleaseRecorderT> RangeTracker(Recorder);
@@ -287,9 +647,11 @@ releaseFreeMemoryToOS(const IntrusiveList<TransferBatchT> &FreeList,
RangeTracker.skipPages(PagesCount);
continue;
}
- for (uptr J = 0; J < PagesCount; J++)
- RangeTracker.processNextPage(Counters.get(I, J) ==
- FullPagesBlockCountMax);
+ for (uptr J = 0; J < PagesCount; J++) {
+ const bool CanRelease =
+ PageMap.updateAsAllCountedIf(I, J, FullPagesBlockCountMax);
+ RangeTracker.processNextPage(CanRelease);
+ }
}
} else {
// Slow path, go through the pages keeping count how many chunks affect
@@ -308,6 +670,10 @@ releaseFreeMemoryToOS(const IntrusiveList<TransferBatchT> &FreeList,
}
uptr PrevPageBoundary = 0;
uptr CurrentBoundary = 0;
+ if (ReleasePageOffset > 0) {
+ PrevPageBoundary = ReleasePageOffset * PageSize;
+ CurrentBoundary = roundUpSlow(PrevPageBoundary, BlockSize);
+ }
for (uptr J = 0; J < PagesCount; J++) {
const uptr PageBoundary = PrevPageBoundary + PageSize;
uptr BlocksPerPage = Pn;
@@ -321,7 +687,9 @@ releaseFreeMemoryToOS(const IntrusiveList<TransferBatchT> &FreeList,
}
}
PrevPageBoundary = PageBoundary;
- RangeTracker.processNextPage(Counters.get(I, J) == BlocksPerPage);
+ const bool CanRelease =
+ PageMap.updateAsAllCountedIf(I, J, BlocksPerPage);
+ RangeTracker.processNextPage(CanRelease);
}
}
}
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/report.cpp b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/report.cpp
index 561c7c51f4e1..9cef0adc0bb3 100644
--- a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/report.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/report.cpp
@@ -21,14 +21,10 @@ public:
void append(const char *Format, ...) {
va_list Args;
va_start(Args, Format);
- Message.append(Format, Args);
+ Message.vappend(Format, Args);
va_end(Args);
}
- NORETURN ~ScopedErrorReport() {
- outputRaw(Message.data());
- setAbortMessage(Message.data());
- die();
- }
+ NORETURN ~ScopedErrorReport() { reportRawError(Message.data()); }
private:
ScopedString Message;
@@ -55,6 +51,13 @@ void NORETURN reportError(const char *Message) {
Report.append("%s\n", Message);
}
+// Generic fatal error message without ScopedString.
+void NORETURN reportRawError(const char *Message) {
+ outputRaw(Message);
+ setAbortMessage(Message);
+ die();
+}
+
void NORETURN reportInvalidFlag(const char *FlagType, const char *Value) {
ScopedErrorReport Report;
Report.append("invalid value for %s option: '%s'\n", FlagType, Value);
@@ -67,14 +70,6 @@ void NORETURN reportHeaderCorruption(void *Ptr) {
Report.append("corrupted chunk header at address %p\n", Ptr);
}
-// Two threads have attempted to modify a chunk header at the same time. This is
-// symptomatic of a race-condition in the application code, or general lack of
-// proper locking.
-void NORETURN reportHeaderRace(void *Ptr) {
- ScopedErrorReport Report;
- Report.append("race on chunk header at address %p\n", Ptr);
-}
-
// The allocator was compiled with parameters that conflict with field size
// requirements.
void NORETURN reportSanityCheckError(const char *Field) {
@@ -100,6 +95,11 @@ void NORETURN reportAllocationSizeTooBig(uptr UserSize, uptr TotalSize,
UserSize, TotalSize, MaxSize);
}
+void NORETURN reportOutOfBatchClass() {
+ ScopedErrorReport Report;
+ Report.append("BatchClass region is used up, can't hold any free block\n");
+}
+
void NORETURN reportOutOfMemory(uptr RequestedSize) {
ScopedErrorReport Report;
Report.append("out of memory trying to allocate %zu bytes\n", RequestedSize);
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/report.h b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/report.h
index 14e4e799b736..a510fdaebb6d 100644
--- a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/report.h
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/report.h
@@ -15,15 +15,17 @@ namespace scudo {
// Reports are *fatal* unless stated otherwise.
-// Generic error.
+// Generic error, adds newline to end of message.
void NORETURN reportError(const char *Message);
+// Generic error, but the message is not modified.
+void NORETURN reportRawError(const char *Message);
+
// Flags related errors.
void NORETURN reportInvalidFlag(const char *FlagType, const char *Value);
// Chunk header related errors.
void NORETURN reportHeaderCorruption(void *Ptr);
-void NORETURN reportHeaderRace(void *Ptr);
// Sanity checks related error.
void NORETURN reportSanityCheckError(const char *Field);
@@ -32,6 +34,7 @@ void NORETURN reportSanityCheckError(const char *Field);
void NORETURN reportAlignmentTooBig(uptr Alignment, uptr MaxAlignment);
void NORETURN reportAllocationSizeTooBig(uptr UserSize, uptr TotalSize,
uptr MaxSize);
+void NORETURN reportOutOfBatchClass();
void NORETURN reportOutOfMemory(uptr RequestedSize);
enum class AllocatorAction : u8 {
Recycling,
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/report_linux.cpp b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/report_linux.cpp
new file mode 100644
index 000000000000..6a983036e6cd
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/report_linux.cpp
@@ -0,0 +1,58 @@
+//===-- report_linux.cpp ----------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "platform.h"
+
+#if SCUDO_LINUX || SCUDO_TRUSTY
+
+#include "common.h"
+#include "internal_defs.h"
+#include "report.h"
+#include "report_linux.h"
+#include "string_utils.h"
+
+#include <errno.h>
+#include <stdlib.h>
+#include <string.h>
+
+namespace scudo {
+
+// Fatal internal map() error (potentially OOM related).
+void NORETURN reportMapError(uptr SizeIfOOM) {
+ char Error[128] = "Scudo ERROR: internal map failure\n";
+ if (SizeIfOOM) {
+ formatString(
+ Error, sizeof(Error),
+ "Scudo ERROR: internal map failure (NO MEMORY) requesting %zuKB\n",
+ SizeIfOOM >> 10);
+ }
+ reportRawError(Error);
+}
+
+void NORETURN reportUnmapError(uptr Addr, uptr Size) {
+ char Error[128];
+ formatString(Error, sizeof(Error),
+ "Scudo ERROR: internal unmap failure (error desc=%s) Addr 0x%zx "
+ "Size %zu\n",
+ strerror(errno), Addr, Size);
+ reportRawError(Error);
+}
+
+void NORETURN reportProtectError(uptr Addr, uptr Size, int Prot) {
+ char Error[128];
+ formatString(
+ Error, sizeof(Error),
+ "Scudo ERROR: internal protect failure (error desc=%s) Addr 0x%zx "
+ "Size %zu Prot %x\n",
+ strerror(errno), Addr, Size, Prot);
+ reportRawError(Error);
+}
+
+} // namespace scudo
+
+#endif // SCUDO_LINUX || SCUDO_TRUSTY
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/report_linux.h b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/report_linux.h
new file mode 100644
index 000000000000..aa0bb247e672
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/report_linux.h
@@ -0,0 +1,34 @@
+//===-- report_linux.h ------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_REPORT_LINUX_H_
+#define SCUDO_REPORT_LINUX_H_
+
+#include "platform.h"
+
+#if SCUDO_LINUX || SCUDO_TRUSTY
+
+#include "internal_defs.h"
+
+namespace scudo {
+
+// Report a fatal error when a map call fails. SizeIfOOM shall
+// hold the requested size on an out-of-memory error, 0 otherwise.
+void NORETURN reportMapError(uptr SizeIfOOM = 0);
+
+// Report a fatal error when an unmap call fails.
+void NORETURN reportUnmapError(uptr Addr, uptr Size);
+
+// Report a fatal error when a mprotect call fails.
+void NORETURN reportProtectError(uptr Addr, uptr Size, int Prot);
+
+} // namespace scudo
+
+#endif // SCUDO_LINUX || SCUDO_TRUSTY
+
+#endif // SCUDO_REPORT_LINUX_H_
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/secondary.h b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/secondary.h
index 630e64d46edf..f52a4188bcf3 100644
--- a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/secondary.h
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/secondary.h
@@ -12,11 +12,13 @@
#include "chunk.h"
#include "common.h"
#include "list.h"
+#include "mem_map.h"
#include "memtag.h"
#include "mutex.h"
#include "options.h"
#include "stats.h"
#include "string_utils.h"
+#include "thread_annotations.h"
namespace scudo {
@@ -36,9 +38,7 @@ struct alignas(Max<uptr>(archSupportsMemoryTagging()
LargeBlock::Header *Next;
uptr CommitBase;
uptr CommitSize;
- uptr MapBase;
- uptr MapSize;
- [[no_unique_address]] MapPlatformData Data;
+ MemMapT MemMap;
};
static_assert(sizeof(Header) % (1U << SCUDO_MIN_ALIGNMENT_LOG) == 0, "");
@@ -64,16 +64,34 @@ template <typename Config> static Header *getHeader(const void *Ptr) {
} // namespace LargeBlock
-static void unmap(LargeBlock::Header *H) {
- MapPlatformData Data = H->Data;
- unmap(reinterpret_cast<void *>(H->MapBase), H->MapSize, UNMAP_ALL, &Data);
+static inline void unmap(LargeBlock::Header *H) {
+ // Note that the `H->MapMap` is stored on the pages managed by itself. Take
+ // over the ownership before unmap() so that any operation along with unmap()
+ // won't touch inaccessible pages.
+ MemMapT MemMap = H->MemMap;
+ MemMap.unmap(MemMap.getBase(), MemMap.getCapacity());
}
-class MapAllocatorNoCache {
+namespace {
+struct CachedBlock {
+ uptr CommitBase = 0;
+ uptr CommitSize = 0;
+ uptr BlockBegin = 0;
+ MemMapT MemMap = {};
+ u64 Time = 0;
+
+ bool isValid() { return CommitBase != 0; }
+
+ void invalidate() { CommitBase = 0; }
+};
+} // namespace
+
+template <typename Config> class MapAllocatorNoCache {
public:
void init(UNUSED s32 ReleaseToOsInterval) {}
bool retrieve(UNUSED Options Options, UNUSED uptr Size, UNUSED uptr Alignment,
- UNUSED LargeBlock::Header **H, UNUSED bool *Zeroed) {
+ UNUSED uptr HeadersSize, UNUSED LargeBlock::Header **H,
+ UNUSED bool *Zeroed) {
return false;
}
void store(UNUSED Options Options, LargeBlock::Header *H) { unmap(H); }
@@ -90,61 +108,102 @@ public:
// Not supported by the Secondary Cache, but not an error either.
return true;
}
+
+ void getStats(UNUSED ScopedString *Str) {
+ Str->append("Secondary Cache Disabled\n");
+ }
};
static const uptr MaxUnusedCachePages = 4U;
template <typename Config>
-void mapSecondary(Options Options, uptr CommitBase, uptr CommitSize,
- uptr AllocPos, uptr Flags, MapPlatformData *Data) {
+bool mapSecondary(const Options &Options, uptr CommitBase, uptr CommitSize,
+ uptr AllocPos, uptr Flags, MemMapT &MemMap) {
+ Flags |= MAP_RESIZABLE;
+ Flags |= MAP_ALLOWNOMEM;
+
const uptr MaxUnusedCacheBytes = MaxUnusedCachePages * getPageSizeCached();
if (useMemoryTagging<Config>(Options) && CommitSize > MaxUnusedCacheBytes) {
const uptr UntaggedPos = Max(AllocPos, CommitBase + MaxUnusedCacheBytes);
- map(reinterpret_cast<void *>(CommitBase), UntaggedPos - CommitBase,
- "scudo:secondary", MAP_RESIZABLE | MAP_MEMTAG | Flags, Data);
- map(reinterpret_cast<void *>(UntaggedPos),
- CommitBase + CommitSize - UntaggedPos, "scudo:secondary",
- MAP_RESIZABLE | Flags, Data);
+ return MemMap.remap(CommitBase, UntaggedPos - CommitBase, "scudo:secondary",
+ MAP_MEMTAG | Flags) &&
+ MemMap.remap(UntaggedPos, CommitBase + CommitSize - UntaggedPos,
+ "scudo:secondary", Flags);
} else {
- map(reinterpret_cast<void *>(CommitBase), CommitSize, "scudo:secondary",
- MAP_RESIZABLE | (useMemoryTagging<Config>(Options) ? MAP_MEMTAG : 0) |
- Flags,
- Data);
+ const uptr RemapFlags =
+ (useMemoryTagging<Config>(Options) ? MAP_MEMTAG : 0) | Flags;
+ return MemMap.remap(CommitBase, CommitSize, "scudo:secondary", RemapFlags);
}
}
+// Template specialization to avoid producing zero-length array
+template <typename T, size_t Size> class NonZeroLengthArray {
+public:
+ T &operator[](uptr Idx) { return values[Idx]; }
+
+private:
+ T values[Size];
+};
+template <typename T> class NonZeroLengthArray<T, 0> {
+public:
+ T &operator[](uptr UNUSED Idx) { UNREACHABLE("Unsupported!"); }
+};
+
template <typename Config> class MapAllocatorCache {
public:
+ using CacheConfig = typename Config::Secondary::Cache;
+
+ void getStats(ScopedString *Str) {
+ ScopedLock L(Mutex);
+ uptr Integral;
+ uptr Fractional;
+ computePercentage(SuccessfulRetrieves, CallsToRetrieve, &Integral,
+ &Fractional);
+ Str->append("Stats: MapAllocatorCache: EntriesCount: %d, "
+ "MaxEntriesCount: %u, MaxEntrySize: %zu\n",
+ EntriesCount, atomic_load_relaxed(&MaxEntriesCount),
+ atomic_load_relaxed(&MaxEntrySize));
+ Str->append("Stats: CacheRetrievalStats: SuccessRate: %u/%u "
+ "(%zu.%02zu%%)\n",
+ SuccessfulRetrieves, CallsToRetrieve, Integral, Fractional);
+ for (CachedBlock Entry : Entries) {
+ if (!Entry.isValid())
+ continue;
+ Str->append("StartBlockAddress: 0x%zx, EndBlockAddress: 0x%zx, "
+ "BlockSize: %zu %s\n",
+ Entry.CommitBase, Entry.CommitBase + Entry.CommitSize,
+ Entry.CommitSize, Entry.Time == 0 ? "[R]" : "");
+ }
+ }
+
// Ensure the default maximum specified fits the array.
- static_assert(Config::SecondaryCacheDefaultMaxEntriesCount <=
- Config::SecondaryCacheEntriesArraySize,
+ static_assert(CacheConfig::DefaultMaxEntriesCount <=
+ CacheConfig::EntriesArraySize,
"");
- void init(s32 ReleaseToOsInterval) {
+ void init(s32 ReleaseToOsInterval) NO_THREAD_SAFETY_ANALYSIS {
DCHECK_EQ(EntriesCount, 0U);
setOption(Option::MaxCacheEntriesCount,
- static_cast<sptr>(Config::SecondaryCacheDefaultMaxEntriesCount));
+ static_cast<sptr>(CacheConfig::DefaultMaxEntriesCount));
setOption(Option::MaxCacheEntrySize,
- static_cast<sptr>(Config::SecondaryCacheDefaultMaxEntrySize));
+ static_cast<sptr>(CacheConfig::DefaultMaxEntrySize));
setOption(Option::ReleaseInterval, static_cast<sptr>(ReleaseToOsInterval));
}
- void store(Options Options, LargeBlock::Header *H) {
+ void store(const Options &Options, LargeBlock::Header *H) EXCLUDES(Mutex) {
if (!canCache(H->CommitSize))
return unmap(H);
bool EntryCached = false;
bool EmptyCache = false;
const s32 Interval = atomic_load_relaxed(&ReleaseToOsIntervalMs);
- const u64 Time = getMonotonicTime();
+ const u64 Time = getMonotonicTimeFast();
const u32 MaxCount = atomic_load_relaxed(&MaxEntriesCount);
CachedBlock Entry;
Entry.CommitBase = H->CommitBase;
Entry.CommitSize = H->CommitSize;
- Entry.MapBase = H->MapBase;
- Entry.MapSize = H->MapSize;
Entry.BlockBegin = reinterpret_cast<uptr>(H + 1);
- Entry.Data = H->Data;
+ Entry.MemMap = H->MemMap;
Entry.Time = Time;
if (useMemoryTagging<Config>(Options)) {
if (Interval == 0 && !SCUDO_FUCHSIA) {
@@ -154,13 +213,13 @@ public:
// on top so we just do the two syscalls there.
Entry.Time = 0;
mapSecondary<Config>(Options, Entry.CommitBase, Entry.CommitSize,
- Entry.CommitBase, MAP_NOACCESS, &Entry.Data);
+ Entry.CommitBase, MAP_NOACCESS, Entry.MemMap);
} else {
- setMemoryPermission(Entry.CommitBase, Entry.CommitSize, MAP_NOACCESS,
- &Entry.Data);
+ Entry.MemMap.setMemoryPermission(Entry.CommitBase, Entry.CommitSize,
+ MAP_NOACCESS);
}
} else if (Interval == 0) {
- releasePagesToOS(Entry.CommitBase, 0, Entry.CommitSize, &Entry.Data);
+ Entry.MemMap.releaseAndZeroPagesToOS(Entry.CommitBase, Entry.CommitSize);
Entry.Time = 0;
}
do {
@@ -172,11 +231,10 @@ public:
// just unmap it.
break;
}
- if (Config::SecondaryCacheQuarantineSize &&
- useMemoryTagging<Config>(Options)) {
+ if (CacheConfig::QuarantineSize && useMemoryTagging<Config>(Options)) {
QuarantinePos =
- (QuarantinePos + 1) % Max(Config::SecondaryCacheQuarantineSize, 1u);
- if (!Quarantine[QuarantinePos].CommitBase) {
+ (QuarantinePos + 1) % Max(CacheConfig::QuarantineSize, 1u);
+ if (!Quarantine[QuarantinePos].isValid()) {
Quarantine[QuarantinePos] = Entry;
return;
}
@@ -191,7 +249,7 @@ public:
EmptyCache = true;
} else {
for (u32 I = 0; I < MaxCount; I++) {
- if (Entries[I].CommitBase)
+ if (Entries[I].isValid())
continue;
if (I != 0)
Entries[I] = Entries[0];
@@ -209,66 +267,89 @@ public:
else if (Interval >= 0)
releaseOlderThan(Time - static_cast<u64>(Interval) * 1000000);
if (!EntryCached)
- unmap(reinterpret_cast<void *>(Entry.MapBase), Entry.MapSize, UNMAP_ALL,
- &Entry.Data);
+ Entry.MemMap.unmap(Entry.MemMap.getBase(), Entry.MemMap.getCapacity());
}
- bool retrieve(Options Options, uptr Size, uptr Alignment,
- LargeBlock::Header **H, bool *Zeroed) {
+ bool retrieve(Options Options, uptr Size, uptr Alignment, uptr HeadersSize,
+ LargeBlock::Header **H, bool *Zeroed) EXCLUDES(Mutex) {
const uptr PageSize = getPageSizeCached();
const u32 MaxCount = atomic_load_relaxed(&MaxEntriesCount);
+ // 10% of the requested size proved to be the optimal choice for
+ // retrieving cached blocks after testing several options.
+ constexpr u32 FragmentedBytesDivisor = 10;
bool Found = false;
CachedBlock Entry;
- uptr HeaderPos;
+ uptr EntryHeaderPos = 0;
{
ScopedLock L(Mutex);
+ CallsToRetrieve++;
if (EntriesCount == 0)
return false;
+ u32 OptimalFitIndex = 0;
+ uptr MinDiff = UINTPTR_MAX;
for (u32 I = 0; I < MaxCount; I++) {
- const uptr CommitBase = Entries[I].CommitBase;
- if (!CommitBase)
+ if (!Entries[I].isValid())
continue;
+ const uptr CommitBase = Entries[I].CommitBase;
const uptr CommitSize = Entries[I].CommitSize;
const uptr AllocPos =
- roundDownTo(CommitBase + CommitSize - Size, Alignment);
- HeaderPos =
- AllocPos - Chunk::getHeaderSize() - LargeBlock::getHeaderSize();
+ roundDown(CommitBase + CommitSize - Size, Alignment);
+ const uptr HeaderPos = AllocPos - HeadersSize;
if (HeaderPos > CommitBase + CommitSize)
continue;
if (HeaderPos < CommitBase ||
- AllocPos > CommitBase + PageSize * MaxUnusedCachePages)
+ AllocPos > CommitBase + PageSize * MaxUnusedCachePages) {
continue;
+ }
Found = true;
- Entry = Entries[I];
- Entries[I].CommitBase = 0;
- break;
+ const uptr Diff = HeaderPos - CommitBase;
+ // immediately use a cached block if it's size is close enough to the
+ // requested size.
+ const uptr MaxAllowedFragmentedBytes =
+ (CommitBase + CommitSize - HeaderPos) / FragmentedBytesDivisor;
+ if (Diff <= MaxAllowedFragmentedBytes) {
+ OptimalFitIndex = I;
+ EntryHeaderPos = HeaderPos;
+ break;
+ }
+ // keep track of the smallest cached block
+ // that is greater than (AllocSize + HeaderSize)
+ if (Diff > MinDiff)
+ continue;
+ OptimalFitIndex = I;
+ MinDiff = Diff;
+ EntryHeaderPos = HeaderPos;
+ }
+ if (Found) {
+ Entry = Entries[OptimalFitIndex];
+ Entries[OptimalFitIndex].invalidate();
+ EntriesCount--;
+ SuccessfulRetrieves++;
}
}
- if (Found) {
- *H = reinterpret_cast<LargeBlock::Header *>(
- LargeBlock::addHeaderTag<Config>(HeaderPos));
- *Zeroed = Entry.Time == 0;
- if (useMemoryTagging<Config>(Options))
- setMemoryPermission(Entry.CommitBase, Entry.CommitSize, 0, &Entry.Data);
- uptr NewBlockBegin = reinterpret_cast<uptr>(*H + 1);
- if (useMemoryTagging<Config>(Options)) {
- if (*Zeroed)
- storeTags(LargeBlock::addHeaderTag<Config>(Entry.CommitBase),
- NewBlockBegin);
- else if (Entry.BlockBegin < NewBlockBegin)
- storeTags(Entry.BlockBegin, NewBlockBegin);
- else
- storeTags(untagPointer(NewBlockBegin),
- untagPointer(Entry.BlockBegin));
+ if (!Found)
+ return false;
+
+ *H = reinterpret_cast<LargeBlock::Header *>(
+ LargeBlock::addHeaderTag<Config>(EntryHeaderPos));
+ *Zeroed = Entry.Time == 0;
+ if (useMemoryTagging<Config>(Options))
+ Entry.MemMap.setMemoryPermission(Entry.CommitBase, Entry.CommitSize, 0);
+ uptr NewBlockBegin = reinterpret_cast<uptr>(*H + 1);
+ if (useMemoryTagging<Config>(Options)) {
+ if (*Zeroed) {
+ storeTags(LargeBlock::addHeaderTag<Config>(Entry.CommitBase),
+ NewBlockBegin);
+ } else if (Entry.BlockBegin < NewBlockBegin) {
+ storeTags(Entry.BlockBegin, NewBlockBegin);
+ } else {
+ storeTags(untagPointer(NewBlockBegin), untagPointer(Entry.BlockBegin));
}
- (*H)->CommitBase = Entry.CommitBase;
- (*H)->CommitSize = Entry.CommitSize;
- (*H)->MapBase = Entry.MapBase;
- (*H)->MapSize = Entry.MapSize;
- (*H)->Data = Entry.Data;
- EntriesCount--;
}
- return Found;
+ (*H)->CommitBase = Entry.CommitBase;
+ (*H)->CommitSize = Entry.CommitSize;
+ (*H)->MemMap = Entry.MemMap;
+ return true;
}
bool canCache(uptr Size) {
@@ -278,16 +359,15 @@ public:
bool setOption(Option O, sptr Value) {
if (O == Option::ReleaseInterval) {
- const s32 Interval =
- Max(Min(static_cast<s32>(Value),
- Config::SecondaryCacheMaxReleaseToOsIntervalMs),
- Config::SecondaryCacheMinReleaseToOsIntervalMs);
+ const s32 Interval = Max(
+ Min(static_cast<s32>(Value), CacheConfig::MaxReleaseToOsIntervalMs),
+ CacheConfig::MinReleaseToOsIntervalMs);
atomic_store_relaxed(&ReleaseToOsIntervalMs, Interval);
return true;
}
if (O == Option::MaxCacheEntriesCount) {
const u32 MaxCount = static_cast<u32>(Value);
- if (MaxCount > Config::SecondaryCacheEntriesArraySize)
+ if (MaxCount > CacheConfig::EntriesArraySize)
return false;
atomic_store_relaxed(&MaxEntriesCount, MaxCount);
return true;
@@ -302,105 +382,96 @@ public:
void releaseToOS() { releaseOlderThan(UINT64_MAX); }
- void disableMemoryTagging() {
+ void disableMemoryTagging() EXCLUDES(Mutex) {
ScopedLock L(Mutex);
- for (u32 I = 0; I != Config::SecondaryCacheQuarantineSize; ++I) {
- if (Quarantine[I].CommitBase) {
- unmap(reinterpret_cast<void *>(Quarantine[I].MapBase),
- Quarantine[I].MapSize, UNMAP_ALL, &Quarantine[I].Data);
- Quarantine[I].CommitBase = 0;
+ for (u32 I = 0; I != CacheConfig::QuarantineSize; ++I) {
+ if (Quarantine[I].isValid()) {
+ MemMapT &MemMap = Quarantine[I].MemMap;
+ MemMap.unmap(MemMap.getBase(), MemMap.getCapacity());
+ Quarantine[I].invalidate();
}
}
const u32 MaxCount = atomic_load_relaxed(&MaxEntriesCount);
- for (u32 I = 0; I < MaxCount; I++)
- if (Entries[I].CommitBase)
- setMemoryPermission(Entries[I].CommitBase, Entries[I].CommitSize, 0,
- &Entries[I].Data);
+ for (u32 I = 0; I < MaxCount; I++) {
+ if (Entries[I].isValid()) {
+ Entries[I].MemMap.setMemoryPermission(Entries[I].CommitBase,
+ Entries[I].CommitSize, 0);
+ }
+ }
QuarantinePos = -1U;
}
- void disable() { Mutex.lock(); }
+ void disable() NO_THREAD_SAFETY_ANALYSIS { Mutex.lock(); }
- void enable() { Mutex.unlock(); }
+ void enable() NO_THREAD_SAFETY_ANALYSIS { Mutex.unlock(); }
void unmapTestOnly() { empty(); }
private:
void empty() {
- struct {
- void *MapBase;
- uptr MapSize;
- MapPlatformData Data;
- } MapInfo[Config::SecondaryCacheEntriesArraySize];
+ MemMapT MapInfo[CacheConfig::EntriesArraySize];
uptr N = 0;
{
ScopedLock L(Mutex);
- for (uptr I = 0; I < Config::SecondaryCacheEntriesArraySize; I++) {
- if (!Entries[I].CommitBase)
+ for (uptr I = 0; I < CacheConfig::EntriesArraySize; I++) {
+ if (!Entries[I].isValid())
continue;
- MapInfo[N].MapBase = reinterpret_cast<void *>(Entries[I].MapBase);
- MapInfo[N].MapSize = Entries[I].MapSize;
- MapInfo[N].Data = Entries[I].Data;
- Entries[I].CommitBase = 0;
+ MapInfo[N] = Entries[I].MemMap;
+ Entries[I].invalidate();
N++;
}
EntriesCount = 0;
IsFullEvents = 0;
}
- for (uptr I = 0; I < N; I++)
- unmap(MapInfo[I].MapBase, MapInfo[I].MapSize, UNMAP_ALL,
- &MapInfo[I].Data);
+ for (uptr I = 0; I < N; I++) {
+ MemMapT &MemMap = MapInfo[I];
+ MemMap.unmap(MemMap.getBase(), MemMap.getCapacity());
+ }
}
- struct CachedBlock {
- uptr CommitBase;
- uptr CommitSize;
- uptr MapBase;
- uptr MapSize;
- uptr BlockBegin;
- [[no_unique_address]] MapPlatformData Data;
- u64 Time;
- };
-
- void releaseIfOlderThan(CachedBlock &Entry, u64 Time) {
- if (!Entry.CommitBase || !Entry.Time)
+ void releaseIfOlderThan(CachedBlock &Entry, u64 Time) REQUIRES(Mutex) {
+ if (!Entry.isValid() || !Entry.Time)
return;
if (Entry.Time > Time) {
if (OldestTime == 0 || Entry.Time < OldestTime)
OldestTime = Entry.Time;
return;
}
- releasePagesToOS(Entry.CommitBase, 0, Entry.CommitSize, &Entry.Data);
+ Entry.MemMap.releaseAndZeroPagesToOS(Entry.CommitBase, Entry.CommitSize);
Entry.Time = 0;
}
- void releaseOlderThan(u64 Time) {
+ void releaseOlderThan(u64 Time) EXCLUDES(Mutex) {
ScopedLock L(Mutex);
if (!EntriesCount || OldestTime == 0 || OldestTime > Time)
return;
OldestTime = 0;
- for (uptr I = 0; I < Config::SecondaryCacheQuarantineSize; I++)
+ for (uptr I = 0; I < CacheConfig::QuarantineSize; I++)
releaseIfOlderThan(Quarantine[I], Time);
- for (uptr I = 0; I < Config::SecondaryCacheEntriesArraySize; I++)
+ for (uptr I = 0; I < CacheConfig::EntriesArraySize; I++)
releaseIfOlderThan(Entries[I], Time);
}
HybridMutex Mutex;
- u32 EntriesCount = 0;
- u32 QuarantinePos = 0;
+ u32 EntriesCount GUARDED_BY(Mutex) = 0;
+ u32 QuarantinePos GUARDED_BY(Mutex) = 0;
atomic_u32 MaxEntriesCount = {};
atomic_uptr MaxEntrySize = {};
- u64 OldestTime = 0;
- u32 IsFullEvents = 0;
+ u64 OldestTime GUARDED_BY(Mutex) = 0;
+ u32 IsFullEvents GUARDED_BY(Mutex) = 0;
atomic_s32 ReleaseToOsIntervalMs = {};
+ u32 CallsToRetrieve GUARDED_BY(Mutex) = 0;
+ u32 SuccessfulRetrieves GUARDED_BY(Mutex) = 0;
- CachedBlock Entries[Config::SecondaryCacheEntriesArraySize] = {};
- CachedBlock Quarantine[Config::SecondaryCacheQuarantineSize] = {};
+ CachedBlock Entries[CacheConfig::EntriesArraySize] GUARDED_BY(Mutex) = {};
+ NonZeroLengthArray<CachedBlock, CacheConfig::QuarantineSize>
+ Quarantine GUARDED_BY(Mutex) = {};
};
template <typename Config> class MapAllocator {
public:
- void init(GlobalStats *S, s32 ReleaseToOsInterval = -1) {
+ void init(GlobalStats *S,
+ s32 ReleaseToOsInterval = -1) NO_THREAD_SAFETY_ANALYSIS {
DCHECK_EQ(AllocatedBytes, 0U);
DCHECK_EQ(FreedBytes, 0U);
Cache.init(ReleaseToOsInterval);
@@ -409,11 +480,11 @@ public:
S->link(&Stats);
}
- void *allocate(Options Options, uptr Size, uptr AlignmentHint = 0,
+ void *allocate(const Options &Options, uptr Size, uptr AlignmentHint = 0,
uptr *BlockEnd = nullptr,
FillContentsMode FillContents = NoFill);
- void deallocate(Options Options, void *Ptr);
+ void deallocate(const Options &Options, void *Ptr);
static uptr getBlockEnd(void *Ptr) {
auto *B = LargeBlock::getHeader<Config>(Ptr);
@@ -424,19 +495,23 @@ public:
return getBlockEnd(Ptr) - reinterpret_cast<uptr>(Ptr);
}
- void getStats(ScopedString *Str) const;
+ static constexpr uptr getHeadersSize() {
+ return Chunk::getHeaderSize() + LargeBlock::getHeaderSize();
+ }
- void disable() {
+ void disable() NO_THREAD_SAFETY_ANALYSIS {
Mutex.lock();
Cache.disable();
}
- void enable() {
+ void enable() NO_THREAD_SAFETY_ANALYSIS {
Cache.enable();
Mutex.unlock();
}
template <typename F> void iterateOverBlocks(F Callback) const {
+ Mutex.assertHeld();
+
for (const auto &H : InUseBlocks) {
uptr Ptr = reinterpret_cast<uptr>(&H) + LargeBlock::getHeaderSize();
if (allocatorSupportsMemoryTagging<Config>())
@@ -445,7 +520,7 @@ public:
}
}
- uptr canCache(uptr Size) { return Cache.canCache(Size); }
+ bool canCache(uptr Size) { return Cache.canCache(Size); }
bool setOption(Option O, sptr Value) { return Cache.setOption(O, Value); }
@@ -455,17 +530,20 @@ public:
void unmapTestOnly() { Cache.unmapTestOnly(); }
-private:
- typename Config::SecondaryCache Cache;
+ void getStats(ScopedString *Str);
- HybridMutex Mutex;
- DoublyLinkedList<LargeBlock::Header> InUseBlocks;
- uptr AllocatedBytes = 0;
- uptr FreedBytes = 0;
- uptr LargestSize = 0;
- u32 NumberOfAllocs = 0;
- u32 NumberOfFrees = 0;
- LocalStats Stats;
+private:
+ typename Config::Secondary::template CacheT<Config> Cache;
+
+ mutable HybridMutex Mutex;
+ DoublyLinkedList<LargeBlock::Header> InUseBlocks GUARDED_BY(Mutex);
+ uptr AllocatedBytes GUARDED_BY(Mutex) = 0;
+ uptr FreedBytes GUARDED_BY(Mutex) = 0;
+ uptr FragmentedBytes GUARDED_BY(Mutex) = 0;
+ uptr LargestSize GUARDED_BY(Mutex) = 0;
+ u32 NumberOfAllocs GUARDED_BY(Mutex) = 0;
+ u32 NumberOfFrees GUARDED_BY(Mutex) = 0;
+ LocalStats Stats GUARDED_BY(Mutex);
};
// As with the Primary, the size passed to this function includes any desired
@@ -480,24 +558,23 @@ private:
// the committed memory will amount to something close to Size - AlignmentHint
// (pending rounding and headers).
template <typename Config>
-void *MapAllocator<Config>::allocate(Options Options, uptr Size, uptr Alignment,
- uptr *BlockEndPtr,
+void *MapAllocator<Config>::allocate(const Options &Options, uptr Size,
+ uptr Alignment, uptr *BlockEndPtr,
FillContentsMode FillContents) {
if (Options.get(OptionBit::AddLargeAllocationSlack))
Size += 1UL << SCUDO_MIN_ALIGNMENT_LOG;
- Alignment = Max(Alignment, 1UL << SCUDO_MIN_ALIGNMENT_LOG);
+ Alignment = Max(Alignment, uptr(1U) << SCUDO_MIN_ALIGNMENT_LOG);
const uptr PageSize = getPageSizeCached();
- uptr RoundedSize =
- roundUpTo(roundUpTo(Size, Alignment) + LargeBlock::getHeaderSize() +
- Chunk::getHeaderSize(),
- PageSize);
- if (Alignment > PageSize)
- RoundedSize += Alignment - PageSize;
- if (Alignment < PageSize && Cache.canCache(RoundedSize)) {
+ // Note that cached blocks may have aligned address already. Thus we simply
+ // pass the required size (`Size` + `getHeadersSize()`) to do cache look up.
+ const uptr MinNeededSizeForCache = roundUp(Size + getHeadersSize(), PageSize);
+
+ if (Alignment < PageSize && Cache.canCache(MinNeededSizeForCache)) {
LargeBlock::Header *H;
bool Zeroed;
- if (Cache.retrieve(Options, Size, Alignment, &H, &Zeroed)) {
+ if (Cache.retrieve(Options, Size, Alignment, getHeadersSize(), &H,
+ &Zeroed)) {
const uptr BlockEnd = H->CommitBase + H->CommitSize;
if (BlockEndPtr)
*BlockEndPtr = BlockEnd;
@@ -509,25 +586,35 @@ void *MapAllocator<Config>::allocate(Options Options, uptr Size, uptr Alignment,
if (FillContents && !Zeroed)
memset(Ptr, FillContents == ZeroFill ? 0 : PatternFillByte,
BlockEnd - PtrInt);
- const uptr BlockSize = BlockEnd - HInt;
{
ScopedLock L(Mutex);
InUseBlocks.push_back(H);
- AllocatedBytes += BlockSize;
+ AllocatedBytes += H->CommitSize;
+ FragmentedBytes += H->MemMap.getCapacity() - H->CommitSize;
NumberOfAllocs++;
- Stats.add(StatAllocated, BlockSize);
- Stats.add(StatMapped, H->MapSize);
+ Stats.add(StatAllocated, H->CommitSize);
+ Stats.add(StatMapped, H->MemMap.getCapacity());
}
return Ptr;
}
}
- MapPlatformData Data = {};
+ uptr RoundedSize =
+ roundUp(roundUp(Size, Alignment) + getHeadersSize(), PageSize);
+ if (Alignment > PageSize)
+ RoundedSize += Alignment - PageSize;
+
+ ReservedMemoryT ReservedMemory;
const uptr MapSize = RoundedSize + 2 * PageSize;
- uptr MapBase = reinterpret_cast<uptr>(
- map(nullptr, MapSize, nullptr, MAP_NOACCESS | MAP_ALLOWNOMEM, &Data));
- if (UNLIKELY(!MapBase))
+ if (UNLIKELY(!ReservedMemory.create(/*Addr=*/0U, MapSize, nullptr,
+ MAP_ALLOWNOMEM))) {
return nullptr;
+ }
+
+ // Take the entire ownership of reserved region.
+ MemMapT MemMap = ReservedMemory.dispatch(ReservedMemory.getBase(),
+ ReservedMemory.getCapacity());
+ uptr MapBase = MemMap.getBase();
uptr CommitBase = MapBase + PageSize;
uptr MapEnd = MapBase + MapSize;
@@ -537,77 +624,83 @@ void *MapAllocator<Config>::allocate(Options Options, uptr Size, uptr Alignment,
// For alignments greater than or equal to a page, the user pointer (eg: the
// pointer that is returned by the C or C++ allocation APIs) ends up on a
// page boundary , and our headers will live in the preceding page.
- CommitBase = roundUpTo(MapBase + PageSize + 1, Alignment) - PageSize;
+ CommitBase = roundUp(MapBase + PageSize + 1, Alignment) - PageSize;
const uptr NewMapBase = CommitBase - PageSize;
DCHECK_GE(NewMapBase, MapBase);
// We only trim the extra memory on 32-bit platforms: 64-bit platforms
// are less constrained memory wise, and that saves us two syscalls.
if (SCUDO_WORDSIZE == 32U && NewMapBase != MapBase) {
- unmap(reinterpret_cast<void *>(MapBase), NewMapBase - MapBase, 0, &Data);
+ MemMap.unmap(MapBase, NewMapBase - MapBase);
MapBase = NewMapBase;
}
const uptr NewMapEnd =
- CommitBase + PageSize + roundUpTo(Size, PageSize) + PageSize;
+ CommitBase + PageSize + roundUp(Size, PageSize) + PageSize;
DCHECK_LE(NewMapEnd, MapEnd);
if (SCUDO_WORDSIZE == 32U && NewMapEnd != MapEnd) {
- unmap(reinterpret_cast<void *>(NewMapEnd), MapEnd - NewMapEnd, 0, &Data);
+ MemMap.unmap(NewMapEnd, MapEnd - NewMapEnd);
MapEnd = NewMapEnd;
}
}
const uptr CommitSize = MapEnd - PageSize - CommitBase;
- const uptr AllocPos = roundDownTo(CommitBase + CommitSize - Size, Alignment);
- mapSecondary<Config>(Options, CommitBase, CommitSize, AllocPos, 0, &Data);
- const uptr HeaderPos =
- AllocPos - Chunk::getHeaderSize() - LargeBlock::getHeaderSize();
+ const uptr AllocPos = roundDown(CommitBase + CommitSize - Size, Alignment);
+ if (!mapSecondary<Config>(Options, CommitBase, CommitSize, AllocPos, 0,
+ MemMap)) {
+ MemMap.unmap(MemMap.getBase(), MemMap.getCapacity());
+ return nullptr;
+ }
+ const uptr HeaderPos = AllocPos - getHeadersSize();
LargeBlock::Header *H = reinterpret_cast<LargeBlock::Header *>(
LargeBlock::addHeaderTag<Config>(HeaderPos));
if (useMemoryTagging<Config>(Options))
storeTags(LargeBlock::addHeaderTag<Config>(CommitBase),
reinterpret_cast<uptr>(H + 1));
- H->MapBase = MapBase;
- H->MapSize = MapEnd - MapBase;
H->CommitBase = CommitBase;
H->CommitSize = CommitSize;
- H->Data = Data;
+ H->MemMap = MemMap;
if (BlockEndPtr)
*BlockEndPtr = CommitBase + CommitSize;
{
ScopedLock L(Mutex);
InUseBlocks.push_back(H);
AllocatedBytes += CommitSize;
+ FragmentedBytes += H->MemMap.getCapacity() - CommitSize;
if (LargestSize < CommitSize)
LargestSize = CommitSize;
NumberOfAllocs++;
Stats.add(StatAllocated, CommitSize);
- Stats.add(StatMapped, H->MapSize);
+ Stats.add(StatMapped, H->MemMap.getCapacity());
}
return reinterpret_cast<void *>(HeaderPos + LargeBlock::getHeaderSize());
}
template <typename Config>
-void MapAllocator<Config>::deallocate(Options Options, void *Ptr) {
+void MapAllocator<Config>::deallocate(const Options &Options, void *Ptr)
+ EXCLUDES(Mutex) {
LargeBlock::Header *H = LargeBlock::getHeader<Config>(Ptr);
const uptr CommitSize = H->CommitSize;
{
ScopedLock L(Mutex);
InUseBlocks.remove(H);
FreedBytes += CommitSize;
+ FragmentedBytes -= H->MemMap.getCapacity() - CommitSize;
NumberOfFrees++;
Stats.sub(StatAllocated, CommitSize);
- Stats.sub(StatMapped, H->MapSize);
+ Stats.sub(StatMapped, H->MemMap.getCapacity());
}
Cache.store(Options, H);
}
template <typename Config>
-void MapAllocator<Config>::getStats(ScopedString *Str) const {
- Str->append(
- "Stats: MapAllocator: allocated %zu times (%zuK), freed %zu times "
- "(%zuK), remains %zu (%zuK) max %zuM\n",
- NumberOfAllocs, AllocatedBytes >> 10, NumberOfFrees, FreedBytes >> 10,
- NumberOfAllocs - NumberOfFrees, (AllocatedBytes - FreedBytes) >> 10,
- LargestSize >> 20);
+void MapAllocator<Config>::getStats(ScopedString *Str) EXCLUDES(Mutex) {
+ ScopedLock L(Mutex);
+ Str->append("Stats: MapAllocator: allocated %u times (%zuK), freed %u times "
+ "(%zuK), remains %u (%zuK) max %zuM, Fragmented %zuK\n",
+ NumberOfAllocs, AllocatedBytes >> 10, NumberOfFrees,
+ FreedBytes >> 10, NumberOfAllocs - NumberOfFrees,
+ (AllocatedBytes - FreedBytes) >> 10, LargestSize >> 20,
+ FragmentedBytes >> 10);
+ Cache.getStats(Str);
}
} // namespace scudo
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/size_class_map.h b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/size_class_map.h
index ba0f78453bcb..4138885de338 100644
--- a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/size_class_map.h
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/size_class_map.h
@@ -23,7 +23,7 @@ inline uptr scaledLog2(uptr Size, uptr ZeroLog, uptr LogBits) {
}
template <typename Config> struct SizeClassMapBase {
- static u32 getMaxCachedHint(uptr Size) {
+ static u16 getMaxCachedHint(uptr Size) {
DCHECK_NE(Size, 0);
u32 N;
// Force a 32-bit division if the template parameters allow for it.
@@ -31,7 +31,10 @@ template <typename Config> struct SizeClassMapBase {
N = static_cast<u32>((1UL << Config::MaxBytesCachedLog) / Size);
else
N = (1U << Config::MaxBytesCachedLog) / static_cast<u32>(Size);
- return Max(1U, Min(Config::MaxNumCachedHint, N));
+
+ // Note that Config::MaxNumCachedHint is u16 so the result is guaranteed to
+ // fit in u16.
+ return static_cast<u16>(Max(1U, Min<u32>(Config::MaxNumCachedHint, N)));
}
};
@@ -65,7 +68,7 @@ class FixedSizeClassMap : public SizeClassMapBase<Config> {
static const uptr M = (1UL << S) - 1;
public:
- static const u32 MaxNumCachedHint = Config::MaxNumCachedHint;
+ static const u16 MaxNumCachedHint = Config::MaxNumCachedHint;
static const uptr MaxSize = (1UL << Config::MaxSizeLog) + Config::SizeDelta;
static const uptr NumClasses =
@@ -99,7 +102,7 @@ public:
return MidClass + 1 + scaledLog2(Size - 1, Config::MidSizeLog, S);
}
- static u32 getMaxCachedHint(uptr Size) {
+ static u16 getMaxCachedHint(uptr Size) {
DCHECK_LE(Size, MaxSize);
return Base::getMaxCachedHint(Size);
}
@@ -178,7 +181,7 @@ class TableSizeClassMap : public SizeClassMapBase<Config> {
static constexpr LSBTable LTable = {};
public:
- static const u32 MaxNumCachedHint = Config::MaxNumCachedHint;
+ static const u16 MaxNumCachedHint = Config::MaxNumCachedHint;
static const uptr NumClasses = ClassesSize + 1;
static_assert(NumClasses < 256, "");
@@ -212,7 +215,7 @@ public:
return SzTable.Tab[scaledLog2(Size - 1, Config::MidSizeLog, S)];
}
- static u32 getMaxCachedHint(uptr Size) {
+ static u16 getMaxCachedHint(uptr Size) {
DCHECK_LE(Size, MaxSize);
return Base::getMaxCachedHint(Size);
}
@@ -223,7 +226,7 @@ struct DefaultSizeClassConfig {
static const uptr MinSizeLog = 5;
static const uptr MidSizeLog = 8;
static const uptr MaxSizeLog = 17;
- static const u32 MaxNumCachedHint = 14;
+ static const u16 MaxNumCachedHint = 14;
static const uptr MaxBytesCachedLog = 10;
static const uptr SizeDelta = 0;
};
@@ -235,7 +238,7 @@ struct FuchsiaSizeClassConfig {
static const uptr MinSizeLog = 5;
static const uptr MidSizeLog = 8;
static const uptr MaxSizeLog = 17;
- static const u32 MaxNumCachedHint = 10;
+ static const u16 MaxNumCachedHint = 12;
static const uptr MaxBytesCachedLog = 10;
static const uptr SizeDelta = Chunk::getHeaderSize();
};
@@ -248,10 +251,10 @@ struct AndroidSizeClassConfig {
static const uptr MinSizeLog = 4;
static const uptr MidSizeLog = 6;
static const uptr MaxSizeLog = 16;
- static const u32 MaxNumCachedHint = 13;
+ static const u16 MaxNumCachedHint = 13;
static const uptr MaxBytesCachedLog = 13;
- static constexpr u32 Classes[] = {
+ static constexpr uptr Classes[] = {
0x00020, 0x00030, 0x00040, 0x00050, 0x00060, 0x00070, 0x00090, 0x000b0,
0x000c0, 0x000e0, 0x00120, 0x00160, 0x001c0, 0x00250, 0x00320, 0x00450,
0x00670, 0x00830, 0x00a10, 0x00c30, 0x01010, 0x01210, 0x01bd0, 0x02210,
@@ -263,10 +266,10 @@ struct AndroidSizeClassConfig {
static const uptr MinSizeLog = 4;
static const uptr MidSizeLog = 7;
static const uptr MaxSizeLog = 16;
- static const u32 MaxNumCachedHint = 14;
+ static const u16 MaxNumCachedHint = 14;
static const uptr MaxBytesCachedLog = 13;
- static constexpr u32 Classes[] = {
+ static constexpr uptr Classes[] = {
0x00020, 0x00030, 0x00040, 0x00050, 0x00060, 0x00070, 0x00080, 0x00090,
0x000a0, 0x000b0, 0x000c0, 0x000e0, 0x000f0, 0x00110, 0x00120, 0x00130,
0x00150, 0x00160, 0x00170, 0x00190, 0x001d0, 0x00210, 0x00240, 0x002a0,
@@ -286,36 +289,12 @@ typedef TableSizeClassMap<AndroidSizeClassConfig> AndroidSizeClassMap;
static_assert(AndroidSizeClassMap::usesCompressedLSBFormat(), "");
#endif
-struct SvelteSizeClassConfig {
-#if SCUDO_WORDSIZE == 64U
- static const uptr NumBits = 4;
- static const uptr MinSizeLog = 4;
- static const uptr MidSizeLog = 8;
- static const uptr MaxSizeLog = 14;
- static const u32 MaxNumCachedHint = 13;
- static const uptr MaxBytesCachedLog = 10;
- static const uptr SizeDelta = Chunk::getHeaderSize();
-#else
- static const uptr NumBits = 4;
- static const uptr MinSizeLog = 3;
- static const uptr MidSizeLog = 7;
- static const uptr MaxSizeLog = 14;
- static const u32 MaxNumCachedHint = 14;
- static const uptr MaxBytesCachedLog = 10;
- static const uptr SizeDelta = Chunk::getHeaderSize();
-#endif
-};
-
-typedef FixedSizeClassMap<SvelteSizeClassConfig> SvelteSizeClassMap;
-
-// Trusty is configured to only have one region containing blocks of size
-// 2^7 bytes.
struct TrustySizeClassConfig {
static const uptr NumBits = 1;
- static const uptr MinSizeLog = 7;
- static const uptr MidSizeLog = 7;
- static const uptr MaxSizeLog = 7;
- static const u32 MaxNumCachedHint = 8;
+ static const uptr MinSizeLog = 5;
+ static const uptr MidSizeLog = 5;
+ static const uptr MaxSizeLog = 15;
+ static const u16 MaxNumCachedHint = 12;
static const uptr MaxBytesCachedLog = 10;
static const uptr SizeDelta = 0;
};
@@ -335,8 +314,8 @@ template <typename SCMap> inline void printMap() {
const uptr L = S ? getMostSignificantSetBitIndex(S) : 0;
const uptr Cached = SCMap::getMaxCachedHint(S) * S;
Buffer.append(
- "C%02zu => S: %zu diff: +%zu %02zu%% L %zu Cached: %zu %zu; id %zu\n",
- I, S, D, P, L, SCMap::getMaxCachedHint(S), Cached,
+ "C%02zu => S: %zu diff: +%zu %02zu%% L %zu Cached: %u %zu; id %zu\n", I,
+ S, D, P, L, SCMap::getMaxCachedHint(S), Cached,
SCMap::getClassIdBySize(S));
TotalCached += Cached;
PrevS = S;
@@ -345,7 +324,7 @@ template <typename SCMap> inline void printMap() {
Buffer.output();
}
-template <typename SCMap> static void validateMap() {
+template <typename SCMap> static UNUSED void validateMap() {
for (uptr C = 0; C < SCMap::NumClasses; C++) {
if (C == SCMap::BatchClassId)
continue;
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/stack_depot.h b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/stack_depot.h
index 458198fcb7aa..12c35eb2a4f3 100644
--- a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/stack_depot.h
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/stack_depot.h
@@ -62,8 +62,7 @@ class StackDepot {
// This is achieved by re-checking the hash of the stack trace before
// returning the trace.
-#ifdef SCUDO_FUZZ
- // Use smaller table sizes for fuzzing in order to reduce input size.
+#if SCUDO_SMALL_STACK_DEPOT
static const uptr TabBits = 4;
#else
static const uptr TabBits = 16;
@@ -72,7 +71,7 @@ class StackDepot {
static const uptr TabMask = TabSize - 1;
atomic_u32 Tab[TabSize] = {};
-#ifdef SCUDO_FUZZ
+#if SCUDO_SMALL_STACK_DEPOT
static const uptr RingBits = 4;
#else
static const uptr RingBits = 19;
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/stats.h b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/stats.h
index be5bf2d3720a..658b75863ade 100644
--- a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/stats.h
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/stats.h
@@ -12,6 +12,7 @@
#include "atomic_helpers.h"
#include "list.h"
#include "mutex.h"
+#include "thread_annotations.h"
#include <string.h>
@@ -60,19 +61,19 @@ class GlobalStats : public LocalStats {
public:
void init() { LocalStats::init(); }
- void link(LocalStats *S) {
+ void link(LocalStats *S) EXCLUDES(Mutex) {
ScopedLock L(Mutex);
StatsList.push_back(S);
}
- void unlink(LocalStats *S) {
+ void unlink(LocalStats *S) EXCLUDES(Mutex) {
ScopedLock L(Mutex);
StatsList.remove(S);
for (uptr I = 0; I < StatCount; I++)
add(static_cast<StatType>(I), S->get(static_cast<StatType>(I)));
}
- void get(uptr *S) const {
+ void get(uptr *S) const EXCLUDES(Mutex) {
ScopedLock L(Mutex);
for (uptr I = 0; I < StatCount; I++)
S[I] = LocalStats::get(static_cast<StatType>(I));
@@ -85,15 +86,15 @@ public:
S[I] = static_cast<sptr>(S[I]) >= 0 ? S[I] : 0;
}
- void lock() { Mutex.lock(); }
- void unlock() { Mutex.unlock(); }
+ void lock() ACQUIRE(Mutex) { Mutex.lock(); }
+ void unlock() RELEASE(Mutex) { Mutex.unlock(); }
- void disable() { lock(); }
- void enable() { unlock(); }
+ void disable() ACQUIRE(Mutex) { lock(); }
+ void enable() RELEASE(Mutex) { unlock(); }
private:
mutable HybridMutex Mutex;
- DoublyLinkedList<LocalStats> StatsList;
+ DoublyLinkedList<LocalStats> StatsList GUARDED_BY(Mutex);
};
} // namespace scudo
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/string_utils.cpp b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/string_utils.cpp
index acf85889fcff..d4e4e3becd0e 100644
--- a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/string_utils.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/string_utils.cpp
@@ -195,6 +195,28 @@ static int formatString(char *Buffer, uptr BufferLength, const char *Format,
appendChar(&Buffer, BufferEnd, static_cast<char>(va_arg(Args, int)));
break;
}
+ // In Scudo, `s64`/`u64` are supposed to use `lld` and `llu` respectively.
+ // However, `-Wformat` doesn't know we have a different parser for those
+ // placeholders and it keeps complaining the type mismatch on 64-bit
+ // platform which uses `ld`/`lu` for `s64`/`u64`. Therefore, in order to
+ // silence the warning, we turn to use `PRId64`/`PRIu64` for printing
+ // `s64`/`u64` and handle the `ld`/`lu` here.
+ case 'l': {
+ ++Cur;
+ RAW_CHECK(*Cur == 'd' || *Cur == 'u');
+
+ if (*Cur == 'd') {
+ DVal = va_arg(Args, s64);
+ Res +=
+ appendSignedDecimal(&Buffer, BufferEnd, DVal, Width, PadWithZero);
+ } else {
+ UVal = va_arg(Args, u64);
+ Res += appendUnsigned(&Buffer, BufferEnd, UVal, 10, Width, PadWithZero,
+ false);
+ }
+
+ break;
+ }
case '%': {
RAW_CHECK_MSG(!HaveFlags, PrintfFormatsHelp);
Res += appendChar(&Buffer, BufferEnd, '%');
@@ -218,7 +240,7 @@ int formatString(char *Buffer, uptr BufferLength, const char *Format, ...) {
return Res;
}
-void ScopedString::append(const char *Format, va_list Args) {
+void ScopedString::vappend(const char *Format, va_list Args) {
va_list ArgsCopy;
va_copy(ArgsCopy, Args);
// formatString doesn't currently support a null buffer or zero buffer length,
@@ -236,20 +258,18 @@ void ScopedString::append(const char *Format, va_list Args) {
va_end(ArgsCopy);
}
-FORMAT(2, 3)
void ScopedString::append(const char *Format, ...) {
va_list Args;
va_start(Args, Format);
- append(Format, Args);
+ vappend(Format, Args);
va_end(Args);
}
-FORMAT(1, 2)
void Printf(const char *Format, ...) {
va_list Args;
va_start(Args, Format);
ScopedString Msg;
- Msg.append(Format, Args);
+ Msg.vappend(Format, Args);
outputRaw(Msg.data());
va_end(Args);
}
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/string_utils.h b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/string_utils.h
index 06d23d42246d..a4cab5268ede 100644
--- a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/string_utils.h
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/string_utils.h
@@ -25,16 +25,18 @@ public:
String.clear();
String.push_back('\0');
}
- void append(const char *Format, va_list Args);
- void append(const char *Format, ...);
+ void vappend(const char *Format, va_list Args);
+ void append(const char *Format, ...) FORMAT(2, 3);
void output() const { outputRaw(String.data()); }
+ void reserve(size_t Size) { String.reserve(Size + 1); }
private:
Vector<char> String;
};
-int formatString(char *Buffer, uptr BufferLength, const char *Format, ...);
-void Printf(const char *Format, ...);
+int formatString(char *Buffer, uptr BufferLength, const char *Format, ...)
+ FORMAT(3, 4);
+void Printf(const char *Format, ...) FORMAT(1, 2);
} // namespace scudo
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/thread_annotations.h b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/thread_annotations.h
new file mode 100644
index 000000000000..68a1087c2034
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/thread_annotations.h
@@ -0,0 +1,70 @@
+//===-- thread_annotations.h ------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_THREAD_ANNOTATIONS_
+#define SCUDO_THREAD_ANNOTATIONS_
+
+// Enable thread safety attributes only with clang.
+// The attributes can be safely ignored when compiling with other compilers.
+#if defined(__clang__)
+#define THREAD_ANNOTATION_ATTRIBUTE_(x) __attribute__((x))
+#else
+#define THREAD_ANNOTATION_ATTRIBUTE_(x) // no-op
+#endif
+
+#define CAPABILITY(x) THREAD_ANNOTATION_ATTRIBUTE_(capability(x))
+
+#define SCOPED_CAPABILITY THREAD_ANNOTATION_ATTRIBUTE_(scoped_lockable)
+
+#define GUARDED_BY(x) THREAD_ANNOTATION_ATTRIBUTE_(guarded_by(x))
+
+#define PT_GUARDED_BY(x) THREAD_ANNOTATION_ATTRIBUTE_(pt_guarded_by(x))
+
+#define ACQUIRED_BEFORE(...) \
+ THREAD_ANNOTATION_ATTRIBUTE_(acquired_before(__VA_ARGS__))
+
+#define ACQUIRED_AFTER(...) \
+ THREAD_ANNOTATION_ATTRIBUTE_(acquired_after(__VA_ARGS__))
+
+#define REQUIRES(...) \
+ THREAD_ANNOTATION_ATTRIBUTE_(requires_capability(__VA_ARGS__))
+
+#define REQUIRES_SHARED(...) \
+ THREAD_ANNOTATION_ATTRIBUTE_(requires_shared_capability(__VA_ARGS__))
+
+#define ACQUIRE(...) \
+ THREAD_ANNOTATION_ATTRIBUTE_(acquire_capability(__VA_ARGS__))
+
+#define ACQUIRE_SHARED(...) \
+ THREAD_ANNOTATION_ATTRIBUTE_(acquire_shared_capability(__VA_ARGS__))
+
+#define RELEASE(...) \
+ THREAD_ANNOTATION_ATTRIBUTE_(release_capability(__VA_ARGS__))
+
+#define RELEASE_SHARED(...) \
+ THREAD_ANNOTATION_ATTRIBUTE_(release_shared_capability(__VA_ARGS__))
+
+#define TRY_ACQUIRE(...) \
+ THREAD_ANNOTATION_ATTRIBUTE_(try_acquire_capability(__VA_ARGS__))
+
+#define TRY_ACQUIRE_SHARED(...) \
+ THREAD_ANNOTATION_ATTRIBUTE_(try_acquire_shared_capability(__VA_ARGS__))
+
+#define EXCLUDES(...) THREAD_ANNOTATION_ATTRIBUTE_(locks_excluded(__VA_ARGS__))
+
+#define ASSERT_CAPABILITY(x) THREAD_ANNOTATION_ATTRIBUTE_(assert_capability(x))
+
+#define ASSERT_SHARED_CAPABILITY(x) \
+ THREAD_ANNOTATION_ATTRIBUTE_(assert_shared_capability(x))
+
+#define RETURN_CAPABILITY(x) THREAD_ANNOTATION_ATTRIBUTE_(lock_returned(x))
+
+#define NO_THREAD_SAFETY_ANALYSIS \
+ THREAD_ANNOTATION_ATTRIBUTE_(no_thread_safety_analysis)
+
+#endif // SCUDO_THREAD_ANNOTATIONS_
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/timing.cpp b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/timing.cpp
new file mode 100644
index 000000000000..59ae21d10f0f
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/timing.cpp
@@ -0,0 +1,29 @@
+//===-- timing.cpp ----------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "timing.h"
+
+namespace scudo {
+
+Timer::~Timer() {
+ if (Manager)
+ Manager->report(*this);
+}
+
+ScopedTimer::ScopedTimer(TimingManager &Manager, const char *Name)
+ : Timer(Manager.getOrCreateTimer(Name)) {
+ start();
+}
+
+ScopedTimer::ScopedTimer(TimingManager &Manager, const Timer &Nest,
+ const char *Name)
+ : Timer(Manager.nest(Nest, Name)) {
+ start();
+}
+
+} // namespace scudo
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/timing.h b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/timing.h
new file mode 100644
index 000000000000..84caa79e5c3a
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/timing.h
@@ -0,0 +1,221 @@
+//===-- timing.h ------------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_TIMING_H_
+#define SCUDO_TIMING_H_
+
+#include "common.h"
+#include "mutex.h"
+#include "string_utils.h"
+#include "thread_annotations.h"
+
+#include <inttypes.h>
+#include <string.h>
+
+namespace scudo {
+
+class TimingManager;
+
+// A simple timer for evaluating execution time of code snippets. It can be used
+// along with TimingManager or standalone.
+class Timer {
+public:
+ // The use of Timer without binding to a TimingManager is supposed to do the
+ // timer logging manually. Otherwise, TimingManager will do the logging stuff
+ // for you.
+ Timer() = default;
+ Timer(Timer &&Other)
+ : StartTime(0), AccTime(Other.AccTime), Manager(Other.Manager),
+ HandleId(Other.HandleId) {
+ Other.Manager = nullptr;
+ }
+
+ Timer(const Timer &) = delete;
+
+ ~Timer();
+
+ void start() {
+ CHECK_EQ(StartTime, 0U);
+ StartTime = getMonotonicTime();
+ }
+ void stop() {
+ AccTime += getMonotonicTime() - StartTime;
+ StartTime = 0;
+ }
+ u64 getAccumulatedTime() const { return AccTime; }
+
+ // Unset the bound TimingManager so that we don't report the data back. This
+ // is useful if we only want to track subset of certain scope events.
+ void ignore() {
+ StartTime = 0;
+ AccTime = 0;
+ Manager = nullptr;
+ }
+
+protected:
+ friend class TimingManager;
+ Timer(TimingManager &Manager, u32 HandleId)
+ : Manager(&Manager), HandleId(HandleId) {}
+
+ u64 StartTime = 0;
+ u64 AccTime = 0;
+ TimingManager *Manager = nullptr;
+ u32 HandleId;
+};
+
+// A RAII-style wrapper for easy scope execution measurement. Note that in order
+// not to take additional space for the message like `Name`. It only works with
+// TimingManager.
+class ScopedTimer : public Timer {
+public:
+ ScopedTimer(TimingManager &Manager, const char *Name);
+ ScopedTimer(TimingManager &Manager, const Timer &Nest, const char *Name);
+ ~ScopedTimer() { stop(); }
+};
+
+// In Scudo, the execution time of single run of code snippets may not be
+// useful, we are more interested in the average time from several runs.
+// TimingManager lets the registered timer report their data and reports the
+// average execution time for each timer periodically.
+class TimingManager {
+public:
+ TimingManager(u32 PrintingInterval = DefaultPrintingInterval)
+ : PrintingInterval(PrintingInterval) {}
+ ~TimingManager() {
+ if (NumAllocatedTimers != 0)
+ printAll();
+ }
+
+ Timer getOrCreateTimer(const char *Name) EXCLUDES(Mutex) {
+ ScopedLock L(Mutex);
+
+ CHECK_LT(strlen(Name), MaxLenOfTimerName);
+ for (u32 I = 0; I < NumAllocatedTimers; ++I) {
+ if (strncmp(Name, Timers[I].Name, MaxLenOfTimerName) == 0)
+ return Timer(*this, I);
+ }
+
+ CHECK_LT(NumAllocatedTimers, MaxNumberOfTimers);
+ strncpy(Timers[NumAllocatedTimers].Name, Name, MaxLenOfTimerName);
+ TimerRecords[NumAllocatedTimers].AccumulatedTime = 0;
+ TimerRecords[NumAllocatedTimers].Occurrence = 0;
+ return Timer(*this, NumAllocatedTimers++);
+ }
+
+ // Add a sub-Timer associated with another Timer. This is used when we want to
+ // detail the execution time in the scope of a Timer.
+ // For example,
+ // void Foo() {
+ // // T1 records the time spent in both first and second tasks.
+ // ScopedTimer T1(getTimingManager(), "Task1");
+ // {
+ // // T2 records the time spent in first task
+ // ScopedTimer T2(getTimingManager, T1, "Task2");
+ // // Do first task.
+ // }
+ // // Do second task.
+ // }
+ //
+ // The report will show proper indents to indicate the nested relation like,
+ // -- Average Operation Time -- -- Name (# of Calls) --
+ // 10.0(ns) Task1 (1)
+ // 5.0(ns) Task2 (1)
+ Timer nest(const Timer &T, const char *Name) EXCLUDES(Mutex) {
+ CHECK_EQ(T.Manager, this);
+ Timer Nesting = getOrCreateTimer(Name);
+
+ ScopedLock L(Mutex);
+ CHECK_NE(Nesting.HandleId, T.HandleId);
+ Timers[Nesting.HandleId].Nesting = T.HandleId;
+ return Nesting;
+ }
+
+ void report(const Timer &T) EXCLUDES(Mutex) {
+ ScopedLock L(Mutex);
+
+ const u32 HandleId = T.HandleId;
+ CHECK_LT(HandleId, MaxNumberOfTimers);
+ TimerRecords[HandleId].AccumulatedTime += T.getAccumulatedTime();
+ ++TimerRecords[HandleId].Occurrence;
+ ++NumEventsReported;
+ if (NumEventsReported % PrintingInterval == 0)
+ printAllImpl();
+ }
+
+ void printAll() EXCLUDES(Mutex) {
+ ScopedLock L(Mutex);
+ printAllImpl();
+ }
+
+private:
+ void printAllImpl() REQUIRES(Mutex) {
+ static char NameHeader[] = "-- Name (# of Calls) --";
+ static char AvgHeader[] = "-- Average Operation Time --";
+ ScopedString Str;
+ Str.append("%-15s %-15s\n", AvgHeader, NameHeader);
+
+ for (u32 I = 0; I < NumAllocatedTimers; ++I) {
+ if (Timers[I].Nesting != MaxNumberOfTimers)
+ continue;
+ printImpl(Str, I);
+ }
+
+ Str.output();
+ }
+
+ void printImpl(ScopedString &Str, const u32 HandleId,
+ const u32 ExtraIndent = 0) REQUIRES(Mutex) {
+ const u64 AccumulatedTime = TimerRecords[HandleId].AccumulatedTime;
+ const u64 Occurrence = TimerRecords[HandleId].Occurrence;
+ const u64 Integral = Occurrence == 0 ? 0 : AccumulatedTime / Occurrence;
+ // Only keep single digit of fraction is enough and it enables easier layout
+ // maintenance.
+ const u64 Fraction =
+ Occurrence == 0 ? 0
+ : ((AccumulatedTime % Occurrence) * 10) / Occurrence;
+
+ Str.append("%14" PRId64 ".%" PRId64 "(ns) %-11s", Integral, Fraction, " ");
+
+ for (u32 I = 0; I < ExtraIndent; ++I)
+ Str.append("%s", " ");
+ Str.append("%s (%" PRId64 ")\n", Timers[HandleId].Name, Occurrence);
+
+ for (u32 I = 0; I < NumAllocatedTimers; ++I)
+ if (Timers[I].Nesting == HandleId)
+ printImpl(Str, I, ExtraIndent + 1);
+ }
+
+ // Instead of maintaining pages for timer registration, a static buffer is
+ // sufficient for most use cases in Scudo.
+ static constexpr u32 MaxNumberOfTimers = 50;
+ static constexpr u32 MaxLenOfTimerName = 50;
+ static constexpr u32 DefaultPrintingInterval = 100;
+
+ struct Record {
+ u64 AccumulatedTime = 0;
+ u64 Occurrence = 0;
+ };
+
+ struct TimerInfo {
+ char Name[MaxLenOfTimerName + 1];
+ u32 Nesting = MaxNumberOfTimers;
+ };
+
+ HybridMutex Mutex;
+ // The frequency of proactively dumping the timer statistics. For example, the
+ // default setting is to dump the statistics every 100 reported events.
+ u32 PrintingInterval GUARDED_BY(Mutex);
+ u64 NumEventsReported GUARDED_BY(Mutex) = 0;
+ u32 NumAllocatedTimers GUARDED_BY(Mutex) = 0;
+ TimerInfo Timers[MaxNumberOfTimers] GUARDED_BY(Mutex);
+ Record TimerRecords[MaxNumberOfTimers] GUARDED_BY(Mutex);
+};
+
+} // namespace scudo
+
+#endif // SCUDO_TIMING_H_
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/trusty.cpp b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/trusty.cpp
index 81d6bc585f09..26b349c6e506 100644
--- a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/trusty.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/trusty.cpp
@@ -12,17 +12,18 @@
#include "common.h"
#include "mutex.h"
-#include "string_utils.h"
+#include "report_linux.h"
#include "trusty.h"
#include <errno.h> // for errno
+#include <lk/err_ptr.h> // for PTR_ERR and IS_ERR
#include <stdio.h> // for printf()
#include <stdlib.h> // for getenv()
#include <sys/auxv.h> // for getauxval()
#include <time.h> // for clock_gettime()
+#include <trusty_err.h> // for lk_err_to_errno()
#include <trusty_syscalls.h> // for _trusty_brk()
-
-#define SBRK_ALIGN 32
+#include <uapi/mm.h> // for MMAP flags
namespace scudo {
@@ -30,35 +31,39 @@ uptr getPageSize() { return getauxval(AT_PAGESZ); }
void NORETURN die() { abort(); }
-void *map(UNUSED void *Addr, uptr Size, UNUSED const char *Name, uptr Flags,
+void *map(void *Addr, uptr Size, const char *Name, uptr Flags,
UNUSED MapPlatformData *Data) {
- // Calling _trusty_brk(0) returns the current program break.
- uptr ProgramBreak = reinterpret_cast<uptr>(_trusty_brk(0));
- uptr Start;
- uptr End;
-
- Start = roundUpTo(ProgramBreak, SBRK_ALIGN);
- // Don't actually extend the heap if MAP_NOACCESS flag is set since this is
- // the case where Scudo tries to reserve a memory region without mapping
- // physical pages.
+ uint32_t MmapFlags =
+ MMAP_FLAG_ANONYMOUS | MMAP_FLAG_PROT_READ | MMAP_FLAG_PROT_WRITE;
+
+ // If the MAP_NOACCESS flag is set, Scudo tries to reserve
+ // a memory region without mapping physical pages. This corresponds
+ // to MMAP_FLAG_NO_PHYSICAL in Trusty.
if (Flags & MAP_NOACCESS)
- return reinterpret_cast<void *>(Start);
-
- // Attempt to extend the heap by Size bytes using _trusty_brk.
- End = roundUpTo(Start + Size, SBRK_ALIGN);
- ProgramBreak =
- reinterpret_cast<uptr>(_trusty_brk(reinterpret_cast<void *>(End)));
- if (ProgramBreak < End) {
- errno = ENOMEM;
- dieOnMapUnmapError(Size);
+ MmapFlags |= MMAP_FLAG_NO_PHYSICAL;
+ if (Addr)
+ MmapFlags |= MMAP_FLAG_FIXED_NOREPLACE;
+
+ if (Flags & MAP_MEMTAG)
+ MmapFlags |= MMAP_FLAG_PROT_MTE;
+
+ void *P = (void *)_trusty_mmap(Addr, Size, MmapFlags, 0);
+
+ if (IS_ERR(P)) {
+ errno = lk_err_to_errno(PTR_ERR(P));
+ if (!(Flags & MAP_ALLOWNOMEM) || errno != ENOMEM)
+ reportMapError(Size);
return nullptr;
}
- return reinterpret_cast<void *>(Start); // Base of new reserved region.
+
+ return P;
}
-// Unmap is a no-op since Trusty uses sbrk instead of memory mapping.
void unmap(UNUSED void *Addr, UNUSED uptr Size, UNUSED uptr Flags,
- UNUSED MapPlatformData *Data) {}
+ UNUSED MapPlatformData *Data) {
+ if (_trusty_munmap(Addr, Size) != 0)
+ reportUnmapError(reinterpret_cast<uptr>(Addr), Size);
+}
void setMemoryPermission(UNUSED uptr Addr, UNUSED uptr Size, UNUSED uptr Flags,
UNUSED MapPlatformData *Data) {}
@@ -76,6 +81,8 @@ void HybridMutex::lockSlow() {}
void HybridMutex::unlock() {}
+void HybridMutex::assertHeldImpl() {}
+
u64 getMonotonicTime() {
timespec TS;
clock_gettime(CLOCK_MONOTONIC, &TS);
@@ -83,6 +90,17 @@ u64 getMonotonicTime() {
static_cast<u64>(TS.tv_nsec);
}
+u64 getMonotonicTimeFast() {
+#if defined(CLOCK_MONOTONIC_COARSE)
+ timespec TS;
+ clock_gettime(CLOCK_MONOTONIC_COARSE, &TS);
+ return static_cast<u64>(TS.tv_sec) * (1000ULL * 1000 * 1000) +
+ static_cast<u64>(TS.tv_nsec);
+#else
+ return getMonotonicTime();
+#endif
+}
+
u32 getNumberOfCPUs() { return 0; }
u32 getThreadID() { return 0; }
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/tsd.h b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/tsd.h
index b400a3b56da9..b2108a01900b 100644
--- a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/tsd.h
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/tsd.h
@@ -12,6 +12,7 @@
#include "atomic_helpers.h"
#include "common.h"
#include "mutex.h"
+#include "thread_annotations.h"
#include <limits.h> // for PTHREAD_DESTRUCTOR_ITERATIONS
#include <pthread.h>
@@ -24,41 +25,64 @@
namespace scudo {
template <class Allocator> struct alignas(SCUDO_CACHE_LINE_SIZE) TSD {
- typename Allocator::CacheT Cache;
- typename Allocator::QuarantineCacheT QuarantineCache;
using ThisT = TSD<Allocator>;
u8 DestructorIterations = 0;
- void init(Allocator *Instance) {
+ void init(Allocator *Instance) NO_THREAD_SAFETY_ANALYSIS {
DCHECK_EQ(DestructorIterations, 0U);
DCHECK(isAligned(reinterpret_cast<uptr>(this), alignof(ThisT)));
Instance->initCache(&Cache);
DestructorIterations = PTHREAD_DESTRUCTOR_ITERATIONS;
}
- void commitBack(Allocator *Instance) { Instance->commitBack(this); }
-
- inline bool tryLock() {
+ inline bool tryLock() NO_THREAD_SAFETY_ANALYSIS {
if (Mutex.tryLock()) {
atomic_store_relaxed(&Precedence, 0);
return true;
}
if (atomic_load_relaxed(&Precedence) == 0)
- atomic_store_relaxed(
- &Precedence,
- static_cast<uptr>(getMonotonicTime() >> FIRST_32_SECOND_64(16, 0)));
+ atomic_store_relaxed(&Precedence,
+ static_cast<uptr>(getMonotonicTimeFast() >>
+ FIRST_32_SECOND_64(16, 0)));
return false;
}
- inline void lock() {
+ inline void lock() NO_THREAD_SAFETY_ANALYSIS {
atomic_store_relaxed(&Precedence, 0);
Mutex.lock();
}
- inline void unlock() { Mutex.unlock(); }
+ inline void unlock() NO_THREAD_SAFETY_ANALYSIS { Mutex.unlock(); }
inline uptr getPrecedence() { return atomic_load_relaxed(&Precedence); }
+ void commitBack(Allocator *Instance) { Instance->commitBack(this); }
+
+ // As the comments attached to `getCache()`, the TSD doesn't always need to be
+ // locked. In that case, we would only skip the check before we have all TSDs
+ // locked in all paths.
+ void assertLocked(bool BypassCheck) ASSERT_CAPABILITY(Mutex) {
+ if (SCUDO_DEBUG && !BypassCheck)
+ Mutex.assertHeld();
+ }
+
+ // Ideally, we may want to assert that all the operations on
+ // Cache/QuarantineCache always have the `Mutex` acquired. However, the
+ // current architecture of accessing TSD is not easy to cooperate with the
+ // thread-safety analysis because of pointer aliasing. So now we just add the
+ // assertion on the getters of Cache/QuarantineCache.
+ //
+ // TODO(chiahungduan): Ideally, we want to do `Mutex.assertHeld` but acquiring
+ // TSD doesn't always require holding the lock. Add this assertion while the
+ // lock is always acquired.
+ typename Allocator::CacheT &getCache() REQUIRES(Mutex) { return Cache; }
+ typename Allocator::QuarantineCacheT &getQuarantineCache() REQUIRES(Mutex) {
+ return QuarantineCache;
+ }
+
private:
HybridMutex Mutex;
atomic_uptr Precedence = {};
+
+ typename Allocator::CacheT Cache GUARDED_BY(Mutex);
+ typename Allocator::QuarantineCacheT QuarantineCache GUARDED_BY(Mutex);
};
} // namespace scudo
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/tsd_exclusive.h b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/tsd_exclusive.h
index bba0c277c6a7..238367420238 100644
--- a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/tsd_exclusive.h
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/tsd_exclusive.h
@@ -11,11 +11,13 @@
#include "tsd.h"
+#include "string_utils.h"
+
namespace scudo {
struct ThreadState {
bool DisableMemInit : 1;
- enum {
+ enum : unsigned {
NotInitialized = 0,
Initialized,
TornDown,
@@ -25,7 +27,7 @@ struct ThreadState {
template <class Allocator> void teardownThread(void *Ptr);
template <class Allocator> struct TSDRegistryExT {
- void init(Allocator *Instance) {
+ void init(Allocator *Instance) REQUIRES(Mutex) {
DCHECK(!Initialized);
Instance->init();
CHECK_EQ(pthread_key_create(&PThreadKey, teardownThread<Allocator>), 0);
@@ -33,14 +35,14 @@ template <class Allocator> struct TSDRegistryExT {
Initialized = true;
}
- void initOnceMaybe(Allocator *Instance) {
+ void initOnceMaybe(Allocator *Instance) EXCLUDES(Mutex) {
ScopedLock L(Mutex);
if (LIKELY(Initialized))
return;
init(Instance); // Sets Initialized.
}
- void unmapTestOnly(Allocator *Instance) {
+ void unmapTestOnly(Allocator *Instance) EXCLUDES(Mutex) {
DCHECK(Instance);
if (reinterpret_cast<Allocator *>(pthread_getspecific(PThreadKey))) {
DCHECK_EQ(reinterpret_cast<Allocator *>(pthread_getspecific(PThreadKey)),
@@ -53,16 +55,32 @@ template <class Allocator> struct TSDRegistryExT {
FallbackTSD.commitBack(Instance);
FallbackTSD = {};
State = {};
+ ScopedLock L(Mutex);
Initialized = false;
}
+ void drainCaches(Allocator *Instance) {
+ // We don't have a way to iterate all thread local `ThreadTSD`s. Simply
+ // drain the `ThreadTSD` of current thread and `FallbackTSD`.
+ Instance->drainCache(&ThreadTSD);
+ FallbackTSD.lock();
+ Instance->drainCache(&FallbackTSD);
+ FallbackTSD.unlock();
+ }
+
ALWAYS_INLINE void initThreadMaybe(Allocator *Instance, bool MinimalInit) {
if (LIKELY(State.InitState != ThreadState::NotInitialized))
return;
initThread(Instance, MinimalInit);
}
- ALWAYS_INLINE TSD<Allocator> *getTSDAndLock(bool *UnlockRequired) {
+ // TODO(chiahungduan): Consider removing the argument `UnlockRequired` by
+ // embedding the logic into TSD or always locking the TSD. It will enable us
+ // to properly mark thread annotation here and adding proper runtime
+ // assertions in the member functions of TSD. For example, assert the lock is
+ // acquired before calling TSD::commitBack().
+ ALWAYS_INLINE TSD<Allocator> *
+ getTSDAndLock(bool *UnlockRequired) NO_THREAD_SAFETY_ANALYSIS {
if (LIKELY(State.InitState == ThreadState::Initialized &&
!atomic_load(&Disabled, memory_order_acquire))) {
*UnlockRequired = false;
@@ -75,19 +93,19 @@ template <class Allocator> struct TSDRegistryExT {
// To disable the exclusive TSD registry, we effectively lock the fallback TSD
// and force all threads to attempt to use it instead of their local one.
- void disable() {
+ void disable() NO_THREAD_SAFETY_ANALYSIS {
Mutex.lock();
FallbackTSD.lock();
atomic_store(&Disabled, 1U, memory_order_release);
}
- void enable() {
+ void enable() NO_THREAD_SAFETY_ANALYSIS {
atomic_store(&Disabled, 0U, memory_order_release);
FallbackTSD.unlock();
Mutex.unlock();
}
- bool setOption(Option O, UNUSED sptr Value) {
+ bool setOption(Option O, sptr Value) {
if (O == Option::ThreadDisableMemInit)
State.DisableMemInit = Value;
if (O == Option::MaxTSDsCount)
@@ -97,6 +115,13 @@ template <class Allocator> struct TSDRegistryExT {
bool getDisableMemInit() { return State.DisableMemInit; }
+ void getStats(ScopedString *Str) {
+ // We don't have a way to iterate all thread local `ThreadTSD`s. Instead of
+ // printing only self `ThreadTSD` which may mislead the usage, we just skip
+ // it.
+ Str->append("Exclusive TSD don't support iterating each TSD\n");
+ }
+
private:
// Using minimal initialization allows for global initialization while keeping
// the thread specific structure untouched. The fallback structure will be
@@ -113,7 +138,7 @@ private:
}
pthread_key_t PThreadKey = {};
- bool Initialized = false;
+ bool Initialized GUARDED_BY(Mutex) = false;
atomic_u8 Disabled = {};
TSD<Allocator> FallbackTSD;
HybridMutex Mutex;
@@ -128,7 +153,8 @@ thread_local TSD<Allocator> TSDRegistryExT<Allocator>::ThreadTSD;
template <class Allocator>
thread_local ThreadState TSDRegistryExT<Allocator>::State;
-template <class Allocator> void teardownThread(void *Ptr) {
+template <class Allocator>
+void teardownThread(void *Ptr) NO_THREAD_SAFETY_ANALYSIS {
typedef TSDRegistryExT<Allocator> TSDRegistryT;
Allocator *Instance = reinterpret_cast<Allocator *>(Ptr);
// The glibc POSIX thread-local-storage deallocation routine calls user
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/tsd_shared.h b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/tsd_shared.h
index 1c2a880416b9..1bca578ee14b 100644
--- a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/tsd_shared.h
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/tsd_shared.h
@@ -11,6 +11,8 @@
#include "tsd.h"
+#include "string_utils.h"
+
#if SCUDO_HAS_PLATFORM_TLS_SLOT
// This is a platform-provided header that needs to be on the include path when
// Scudo is compiled. It must declare a function with the prototype:
@@ -24,7 +26,7 @@ namespace scudo {
template <class Allocator, u32 TSDsArraySize, u32 DefaultTSDCount>
struct TSDRegistrySharedT {
- void init(Allocator *Instance) {
+ void init(Allocator *Instance) REQUIRES(Mutex) {
DCHECK(!Initialized);
Instance->init();
for (u32 I = 0; I < TSDsArraySize; I++)
@@ -35,22 +37,32 @@ struct TSDRegistrySharedT {
Initialized = true;
}
- void initOnceMaybe(Allocator *Instance) {
+ void initOnceMaybe(Allocator *Instance) EXCLUDES(Mutex) {
ScopedLock L(Mutex);
if (LIKELY(Initialized))
return;
init(Instance); // Sets Initialized.
}
- void unmapTestOnly(Allocator *Instance) {
+ void unmapTestOnly(Allocator *Instance) EXCLUDES(Mutex) {
for (u32 I = 0; I < TSDsArraySize; I++) {
TSDs[I].commitBack(Instance);
TSDs[I] = {};
}
setCurrentTSD(nullptr);
+ ScopedLock L(Mutex);
Initialized = false;
}
+ void drainCaches(Allocator *Instance) {
+ ScopedLock L(MutexTSDs);
+ for (uptr I = 0; I < NumberOfTSDs; ++I) {
+ TSDs[I].lock();
+ Instance->drainCache(&TSDs[I]);
+ TSDs[I].unlock();
+ }
+ }
+
ALWAYS_INLINE void initThreadMaybe(Allocator *Instance,
UNUSED bool MinimalInit) {
if (LIKELY(getCurrentTSD()))
@@ -58,7 +70,10 @@ struct TSDRegistrySharedT {
initThread(Instance);
}
- ALWAYS_INLINE TSD<Allocator> *getTSDAndLock(bool *UnlockRequired) {
+ // TSDs is an array of locks and which is not supported for marking
+ // thread-safety capability.
+ ALWAYS_INLINE TSD<Allocator> *
+ getTSDAndLock(bool *UnlockRequired) NO_THREAD_SAFETY_ANALYSIS {
TSD<Allocator> *TSD = getCurrentTSD();
DCHECK(TSD);
*UnlockRequired = true;
@@ -75,13 +90,13 @@ struct TSDRegistrySharedT {
return getTSDAndLockSlow(TSD);
}
- void disable() {
+ void disable() NO_THREAD_SAFETY_ANALYSIS {
Mutex.lock();
for (u32 I = 0; I < TSDsArraySize; I++)
TSDs[I].lock();
}
- void enable() {
+ void enable() NO_THREAD_SAFETY_ANALYSIS {
for (s32 I = static_cast<s32>(TSDsArraySize - 1); I >= 0; I--)
TSDs[I].unlock();
Mutex.unlock();
@@ -98,6 +113,24 @@ struct TSDRegistrySharedT {
bool getDisableMemInit() const { return *getTlsPtr() & 1; }
+ void getStats(ScopedString *Str) EXCLUDES(MutexTSDs) {
+ ScopedLock L(MutexTSDs);
+
+ Str->append("Stats: SharedTSDs: %u available; total %u\n", NumberOfTSDs,
+ TSDsArraySize);
+ for (uptr I = 0; I < NumberOfTSDs; ++I) {
+ TSDs[I].lock();
+ // Theoretically, we want to mark TSD::lock()/TSD::unlock() with proper
+ // thread annotations. However, given the TSD is only locked on shared
+ // path, do the assertion in a separate path to avoid confusing the
+ // analyzer.
+ TSDs[I].assertLocked(/*BypassCheck=*/true);
+ Str->append(" Shared TSD[%zu]:\n", I);
+ TSDs[I].getCache().getStats(Str);
+ TSDs[I].unlock();
+ }
+ }
+
private:
ALWAYS_INLINE uptr *getTlsPtr() const {
#if SCUDO_HAS_PLATFORM_TLS_SLOT
@@ -119,7 +152,7 @@ private:
return reinterpret_cast<TSD<Allocator> *>(*getTlsPtr() & ~1ULL);
}
- bool setNumberOfTSDs(u32 N) {
+ bool setNumberOfTSDs(u32 N) EXCLUDES(MutexTSDs) {
ScopedLock L(MutexTSDs);
if (N < NumberOfTSDs)
return false;
@@ -150,7 +183,7 @@ private:
*getTlsPtr() |= B;
}
- NOINLINE void initThread(Allocator *Instance) {
+ NOINLINE void initThread(Allocator *Instance) NO_THREAD_SAFETY_ANALYSIS {
initOnceMaybe(Instance);
// Initial context assignment is done in a plain round-robin fashion.
const u32 Index = atomic_fetch_add(&CurrentIndex, 1U, memory_order_relaxed);
@@ -158,7 +191,10 @@ private:
Instance->callPostInitCallback();
}
- NOINLINE TSD<Allocator> *getTSDAndLockSlow(TSD<Allocator> *CurrentTSD) {
+ // TSDs is an array of locks which is not supported for marking thread-safety
+ // capability.
+ NOINLINE TSD<Allocator> *getTSDAndLockSlow(TSD<Allocator> *CurrentTSD)
+ EXCLUDES(MutexTSDs) {
// Use the Precedence of the current TSD as our random seed. Since we are
// in the slow path, it means that tryLock failed, and as a result it's
// very likely that said Precedence is non-zero.
@@ -202,10 +238,10 @@ private:
}
atomic_u32 CurrentIndex = {};
- u32 NumberOfTSDs = 0;
- u32 NumberOfCoPrimes = 0;
- u32 CoPrimes[TSDsArraySize] = {};
- bool Initialized = false;
+ u32 NumberOfTSDs GUARDED_BY(MutexTSDs) = 0;
+ u32 NumberOfCoPrimes GUARDED_BY(MutexTSDs) = 0;
+ u32 CoPrimes[TSDsArraySize] GUARDED_BY(MutexTSDs) = {};
+ bool Initialized GUARDED_BY(Mutex) = false;
HybridMutex Mutex;
HybridMutex MutexTSDs;
TSD<Allocator> TSDs[TSDsArraySize];
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/vector.h b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/vector.h
index 2c9a6e2aa655..c0f1ba0eddfa 100644
--- a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/vector.h
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/vector.h
@@ -9,25 +9,20 @@
#ifndef SCUDO_VECTOR_H_
#define SCUDO_VECTOR_H_
-#include "common.h"
+#include "mem_map.h"
#include <string.h>
namespace scudo {
-// A low-level vector based on map. May incur a significant memory overhead for
-// small vectors. The current implementation supports only POD types.
+// A low-level vector based on map. It stores the contents inline up to a fixed
+// capacity, or in an external memory buffer if it grows bigger than that. May
+// incur a significant memory overhead for small vectors. The current
+// implementation supports only POD types.
+//
+// NOTE: This class is not meant to be used directly, use Vector<T> instead.
template <typename T> class VectorNoCtor {
public:
- void init(uptr InitialCapacity = 0) {
- Data = reinterpret_cast<T *>(&LocalData[0]);
- CapacityBytes = sizeof(LocalData);
- reserve(InitialCapacity);
- }
- void destroy() {
- if (Data != reinterpret_cast<T *>(&LocalData[0]))
- unmap(Data, CapacityBytes);
- }
T &operator[](uptr I) {
DCHECK_LT(I, Size);
return Data[I];
@@ -39,7 +34,7 @@ public:
void push_back(const T &Element) {
DCHECK_LE(Size, capacity());
if (Size == capacity()) {
- const uptr NewCapacity = roundUpToPowerOfTwo(Size + 1);
+ const uptr NewCapacity = roundUpPowerOfTwo(Size + 1);
reallocate(NewCapacity);
}
memcpy(&Data[Size++], &Element, sizeof(T));
@@ -55,7 +50,7 @@ public:
uptr size() const { return Size; }
const T *data() const { return Data; }
T *data() { return Data; }
- uptr capacity() const { return CapacityBytes / sizeof(T); }
+ constexpr uptr capacity() const { return CapacityBytes / sizeof(T); }
void reserve(uptr NewSize) {
// Never downsize internal buffer.
if (NewSize > capacity())
@@ -77,28 +72,48 @@ public:
const T *end() const { return data() + size(); }
T *end() { return data() + size(); }
+protected:
+ constexpr void init(uptr InitialCapacity = 0) {
+ Data = &LocalData[0];
+ CapacityBytes = sizeof(LocalData);
+ if (InitialCapacity > capacity())
+ reserve(InitialCapacity);
+ }
+ void destroy() {
+ if (Data != &LocalData[0])
+ ExternalBuffer.unmap(ExternalBuffer.getBase(),
+ ExternalBuffer.getCapacity());
+ }
+
private:
void reallocate(uptr NewCapacity) {
DCHECK_GT(NewCapacity, 0);
DCHECK_LE(Size, NewCapacity);
- NewCapacity = roundUpTo(NewCapacity * sizeof(T), getPageSizeCached());
- T *NewData =
- reinterpret_cast<T *>(map(nullptr, NewCapacity, "scudo:vector"));
- memcpy(NewData, Data, Size * sizeof(T));
+
+ MemMapT NewExternalBuffer;
+ NewCapacity = roundUp(NewCapacity * sizeof(T), getPageSizeCached());
+ NewExternalBuffer.map(/*Addr=*/0U, NewCapacity, "scudo:vector");
+ T *NewExternalData = reinterpret_cast<T *>(NewExternalBuffer.getBase());
+
+ memcpy(NewExternalData, Data, Size * sizeof(T));
destroy();
- Data = NewData;
+
+ Data = NewExternalData;
CapacityBytes = NewCapacity;
+ ExternalBuffer = NewExternalBuffer;
}
T *Data = nullptr;
- u8 LocalData[256] = {};
uptr CapacityBytes = 0;
uptr Size = 0;
+
+ T LocalData[256 / sizeof(T)] = {};
+ MemMapT ExternalBuffer;
};
template <typename T> class Vector : public VectorNoCtor<T> {
public:
- Vector() { VectorNoCtor<T>::init(); }
+ constexpr Vector() { VectorNoCtor<T>::init(); }
explicit Vector(uptr Count) {
VectorNoCtor<T>::init(Count);
this->resize(Count);
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/wrappers_c.cpp b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/wrappers_c.cpp
index 81c7dd60ee33..60014a0f66bf 100644
--- a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/wrappers_c.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/wrappers_c.cpp
@@ -12,6 +12,9 @@
#if !SCUDO_ANDROID || !_BIONIC
#include "allocator_config.h"
+#include "internal_defs.h"
+#include "platform.h"
+#include "scudo/interface.h"
#include "wrappers_c.h"
#include "wrappers_c_checks.h"
@@ -21,8 +24,6 @@
#define SCUDO_PREFIX(name) name
#define SCUDO_ALLOCATOR Allocator
-extern "C" void SCUDO_PREFIX(malloc_postinit)();
-
// Export the static allocator so that the C++ wrappers can access it.
// Technically we could have a completely separated heap for C & C++ but in
// reality the amount of cross pollination between the two is staggering.
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/wrappers_c.h b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/wrappers_c.h
index 6d0cecdc4b41..08dc679b34ca 100644
--- a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/wrappers_c.h
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/wrappers_c.h
@@ -32,6 +32,19 @@ struct __scudo_mallinfo {
__scudo_mallinfo_data_t keepcost;
};
+struct __scudo_mallinfo2 {
+ size_t arena;
+ size_t ordblks;
+ size_t smblks;
+ size_t hblks;
+ size_t hblkhd;
+ size_t usmblks;
+ size_t fsmblks;
+ size_t uordblks;
+ size_t fordblks;
+ size_t keepcost;
+};
+
// Android sometimes includes malloc.h no matter what, which yields to
// conflicting return types for mallinfo() if we use our own structure. So if
// struct mallinfo is declared (#define courtesy of malloc.h), use it directly.
@@ -41,4 +54,9 @@ struct __scudo_mallinfo {
#define SCUDO_MALLINFO __scudo_mallinfo
#endif
+#if !SCUDO_ANDROID || !_BIONIC
+extern "C" void malloc_postinit();
+extern HIDDEN scudo::Allocator<scudo::Config, malloc_postinit> Allocator;
+#endif
+
#endif // SCUDO_WRAPPERS_C_H_
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/wrappers_c.inc b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/wrappers_c.inc
index 43efb02cb860..56d8ef20156e 100644
--- a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/wrappers_c.inc
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/wrappers_c.inc
@@ -17,6 +17,35 @@
#define SCUDO_MALLOC_ALIGNMENT FIRST_32_SECOND_64(8U, 16U)
#endif
+static void reportAllocation(void *ptr, size_t size) {
+ if (SCUDO_ENABLE_HOOKS)
+ if (__scudo_allocate_hook && ptr)
+ __scudo_allocate_hook(ptr, size);
+}
+static void reportDeallocation(void *ptr) {
+ if (SCUDO_ENABLE_HOOKS)
+ if (__scudo_deallocate_hook)
+ __scudo_deallocate_hook(ptr);
+}
+static void reportReallocAllocation(void *old_ptr, void *new_ptr, size_t size) {
+ DCHECK_NE(new_ptr, nullptr);
+
+ if (SCUDO_ENABLE_HOOKS) {
+ if (__scudo_realloc_allocate_hook)
+ __scudo_realloc_allocate_hook(old_ptr, new_ptr, size);
+ else if (__scudo_allocate_hook)
+ __scudo_allocate_hook(new_ptr, size);
+ }
+}
+static void reportReallocDeallocation(void *old_ptr) {
+ if (SCUDO_ENABLE_HOOKS) {
+ if (__scudo_realloc_deallocate_hook)
+ __scudo_realloc_deallocate_hook(old_ptr);
+ else if (__scudo_deallocate_hook)
+ __scudo_deallocate_hook(old_ptr);
+ }
+}
+
extern "C" {
INTERFACE WEAK void *SCUDO_PREFIX(calloc)(size_t nmemb, size_t size) {
@@ -28,11 +57,14 @@ INTERFACE WEAK void *SCUDO_PREFIX(calloc)(size_t nmemb, size_t size) {
}
scudo::reportCallocOverflow(nmemb, size);
}
- return scudo::setErrnoOnNull(SCUDO_ALLOCATOR.allocate(
- Product, scudo::Chunk::Origin::Malloc, SCUDO_MALLOC_ALIGNMENT, true));
+ void *Ptr = SCUDO_ALLOCATOR.allocate(Product, scudo::Chunk::Origin::Malloc,
+ SCUDO_MALLOC_ALIGNMENT, true);
+ reportAllocation(Ptr, Product);
+ return scudo::setErrnoOnNull(Ptr);
}
INTERFACE WEAK void SCUDO_PREFIX(free)(void *ptr) {
+ reportDeallocation(ptr);
SCUDO_ALLOCATOR.deallocate(ptr, scudo::Chunk::Origin::Malloc);
}
@@ -54,9 +86,31 @@ INTERFACE WEAK struct SCUDO_MALLINFO SCUDO_PREFIX(mallinfo)(void) {
return Info;
}
+// On Android, mallinfo2 is an alias of mallinfo, so don't define both.
+#if !SCUDO_ANDROID
+INTERFACE WEAK struct __scudo_mallinfo2 SCUDO_PREFIX(mallinfo2)(void) {
+ struct __scudo_mallinfo2 Info = {};
+ scudo::StatCounters Stats;
+ SCUDO_ALLOCATOR.getStats(Stats);
+ // Space allocated in mmapped regions (bytes)
+ Info.hblkhd = Stats[scudo::StatMapped];
+ // Maximum total allocated space (bytes)
+ Info.usmblks = Info.hblkhd;
+ // Space in freed fastbin blocks (bytes)
+ Info.fsmblks = Stats[scudo::StatFree];
+ // Total allocated space (bytes)
+ Info.uordblks = Stats[scudo::StatAllocated];
+ // Total free space (bytes)
+ Info.fordblks = Info.fsmblks;
+ return Info;
+}
+#endif
+
INTERFACE WEAK void *SCUDO_PREFIX(malloc)(size_t size) {
- return scudo::setErrnoOnNull(SCUDO_ALLOCATOR.allocate(
- size, scudo::Chunk::Origin::Malloc, SCUDO_MALLOC_ALIGNMENT));
+ void *Ptr = SCUDO_ALLOCATOR.allocate(size, scudo::Chunk::Origin::Malloc,
+ SCUDO_MALLOC_ALIGNMENT);
+ reportAllocation(Ptr, size);
+ return scudo::setErrnoOnNull(Ptr);
}
#if SCUDO_ANDROID
@@ -74,7 +128,7 @@ INTERFACE WEAK void *SCUDO_PREFIX(memalign)(size_t alignment, size_t size) {
alignment = 1U;
} else {
if (UNLIKELY(!scudo::isPowerOfTwo(alignment)))
- alignment = scudo::roundUpToPowerOfTwo(alignment);
+ alignment = scudo::roundUpPowerOfTwo(alignment);
}
} else {
if (UNLIKELY(!scudo::isPowerOfTwo(alignment))) {
@@ -85,8 +139,10 @@ INTERFACE WEAK void *SCUDO_PREFIX(memalign)(size_t alignment, size_t size) {
scudo::reportAlignmentNotPowerOfTwo(alignment);
}
}
- return SCUDO_ALLOCATOR.allocate(size, scudo::Chunk::Origin::Memalign,
- alignment);
+ void *Ptr =
+ SCUDO_ALLOCATOR.allocate(size, scudo::Chunk::Origin::Memalign, alignment);
+ reportAllocation(Ptr, size);
+ return Ptr;
}
INTERFACE WEAK int SCUDO_PREFIX(posix_memalign)(void **memptr, size_t alignment,
@@ -100,6 +156,8 @@ INTERFACE WEAK int SCUDO_PREFIX(posix_memalign)(void **memptr, size_t alignment,
SCUDO_ALLOCATOR.allocate(size, scudo::Chunk::Origin::Memalign, alignment);
if (UNLIKELY(!Ptr))
return ENOMEM;
+ reportAllocation(Ptr, size);
+
*memptr = Ptr;
return 0;
}
@@ -114,26 +172,57 @@ INTERFACE WEAK void *SCUDO_PREFIX(pvalloc)(size_t size) {
scudo::reportPvallocOverflow(size);
}
// pvalloc(0) should allocate one page.
- return scudo::setErrnoOnNull(SCUDO_ALLOCATOR.allocate(
- size ? scudo::roundUpTo(size, PageSize) : PageSize,
- scudo::Chunk::Origin::Memalign, PageSize));
+ void *Ptr =
+ SCUDO_ALLOCATOR.allocate(size ? scudo::roundUp(size, PageSize) : PageSize,
+ scudo::Chunk::Origin::Memalign, PageSize);
+ reportAllocation(Ptr, scudo::roundUp(size, PageSize));
+
+ return scudo::setErrnoOnNull(Ptr);
}
INTERFACE WEAK void *SCUDO_PREFIX(realloc)(void *ptr, size_t size) {
- if (!ptr)
- return scudo::setErrnoOnNull(SCUDO_ALLOCATOR.allocate(
- size, scudo::Chunk::Origin::Malloc, SCUDO_MALLOC_ALIGNMENT));
+ if (!ptr) {
+ void *Ptr = SCUDO_ALLOCATOR.allocate(size, scudo::Chunk::Origin::Malloc,
+ SCUDO_MALLOC_ALIGNMENT);
+ reportAllocation(Ptr, size);
+ return scudo::setErrnoOnNull(Ptr);
+ }
if (size == 0) {
+ reportDeallocation(ptr);
SCUDO_ALLOCATOR.deallocate(ptr, scudo::Chunk::Origin::Malloc);
return nullptr;
}
- return scudo::setErrnoOnNull(
- SCUDO_ALLOCATOR.reallocate(ptr, size, SCUDO_MALLOC_ALIGNMENT));
+
+ // Given that the reporting of deallocation and allocation are not atomic, we
+ // always pretend the old pointer will be released so that the user doesn't
+ // need to worry about the false double-use case from the view of hooks.
+ //
+ // For example, assume that `realloc` releases the old pointer and allocates a
+ // new pointer. Before the reporting of both operations has been done, another
+ // thread may get the old pointer from `malloc`. It may be misinterpreted as
+ // double-use if it's not handled properly on the hook side.
+ reportReallocDeallocation(ptr);
+ void *NewPtr = SCUDO_ALLOCATOR.reallocate(ptr, size, SCUDO_MALLOC_ALIGNMENT);
+ if (NewPtr != nullptr) {
+ // Note that even if NewPtr == ptr, the size has changed. We still need to
+ // report the new size.
+ reportReallocAllocation(/*OldPtr=*/ptr, NewPtr, size);
+ } else {
+ // If `realloc` fails, the old pointer is not released. Report the old
+ // pointer as allocated again.
+ reportReallocAllocation(/*OldPtr=*/ptr, /*NewPtr=*/ptr,
+ SCUDO_ALLOCATOR.getAllocSize(ptr));
+ }
+
+ return scudo::setErrnoOnNull(NewPtr);
}
INTERFACE WEAK void *SCUDO_PREFIX(valloc)(size_t size) {
- return scudo::setErrnoOnNull(SCUDO_ALLOCATOR.allocate(
- size, scudo::Chunk::Origin::Memalign, scudo::getPageSizeCached()));
+ void *Ptr = SCUDO_ALLOCATOR.allocate(size, scudo::Chunk::Origin::Memalign,
+ scudo::getPageSizeCached());
+ reportAllocation(Ptr, size);
+
+ return scudo::setErrnoOnNull(Ptr);
}
INTERFACE WEAK int SCUDO_PREFIX(malloc_iterate)(
@@ -171,7 +260,14 @@ INTERFACE WEAK int SCUDO_PREFIX(mallopt)(int param, int value) {
static_cast<scudo::sptr>(value));
return 1;
} else if (param == M_PURGE) {
- SCUDO_ALLOCATOR.releaseToOS();
+ SCUDO_ALLOCATOR.releaseToOS(scudo::ReleaseToOS::Force);
+ return 1;
+ } else if (param == M_PURGE_ALL) {
+ SCUDO_ALLOCATOR.releaseToOS(scudo::ReleaseToOS::ForceAll);
+ return 1;
+ } else if (param == M_LOG_STATS) {
+ SCUDO_ALLOCATOR.printStats();
+ SCUDO_ALLOCATOR.printFragmentationInfo();
return 1;
} else {
scudo::Option option;
@@ -207,8 +303,12 @@ INTERFACE WEAK void *SCUDO_PREFIX(aligned_alloc)(size_t alignment,
}
scudo::reportInvalidAlignedAllocAlignment(alignment, size);
}
- return scudo::setErrnoOnNull(
- SCUDO_ALLOCATOR.allocate(size, scudo::Chunk::Origin::Malloc, alignment));
+
+ void *Ptr =
+ SCUDO_ALLOCATOR.allocate(size, scudo::Chunk::Origin::Malloc, alignment);
+ reportAllocation(Ptr, size);
+
+ return scudo::setErrnoOnNull(Ptr);
}
INTERFACE WEAK int SCUDO_PREFIX(malloc_info)(UNUSED int options, FILE *stream) {
@@ -221,12 +321,15 @@ INTERFACE WEAK int SCUDO_PREFIX(malloc_info)(UNUSED int options, FILE *stream) {
if (size < max_size)
sizes[size]++;
};
+
+ SCUDO_ALLOCATOR.disable();
SCUDO_ALLOCATOR.iterateOverChunks(0, -1ul, callback, sizes);
+ SCUDO_ALLOCATOR.enable();
fputs("<malloc version=\"scudo-1\">\n", stream);
for (scudo::uptr i = 0; i != max_size; ++i)
if (sizes[i])
- fprintf(stream, "<alloc size=\"%lu\" count=\"%lu\"/>\n", i, sizes[i]);
+ fprintf(stream, "<alloc size=\"%zu\" count=\"%zu\"/>\n", i, sizes[i]);
fputs("</malloc>\n", stream);
SCUDO_PREFIX(free)(sizes);
return 0;
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/wrappers_c_bionic.cpp b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/wrappers_c_bionic.cpp
index 18c3bf2c0edf..21694c3f17fe 100644
--- a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/wrappers_c_bionic.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/wrappers_c_bionic.cpp
@@ -12,6 +12,9 @@
#if SCUDO_ANDROID && _BIONIC
#include "allocator_config.h"
+#include "internal_defs.h"
+#include "platform.h"
+#include "scudo/interface.h"
#include "wrappers_c.h"
#include "wrappers_c_checks.h"
@@ -24,22 +27,7 @@
extern "C" void SCUDO_PREFIX(malloc_postinit)();
SCUDO_REQUIRE_CONSTANT_INITIALIZATION
-static scudo::Allocator<scudo::AndroidConfig, SCUDO_PREFIX(malloc_postinit)>
- SCUDO_ALLOCATOR;
-
-#include "wrappers_c.inc"
-
-#undef SCUDO_ALLOCATOR
-#undef SCUDO_PREFIX
-
-// Svelte MallocDispatch definitions.
-#define SCUDO_PREFIX(name) CONCATENATE(scudo_svelte_, name)
-#define SCUDO_ALLOCATOR SvelteAllocator
-
-extern "C" void SCUDO_PREFIX(malloc_postinit)();
-SCUDO_REQUIRE_CONSTANT_INITIALIZATION
-static scudo::Allocator<scudo::AndroidSvelteConfig,
- SCUDO_PREFIX(malloc_postinit)>
+static scudo::Allocator<scudo::Config, SCUDO_PREFIX(malloc_postinit)>
SCUDO_ALLOCATOR;
#include "wrappers_c.inc"
@@ -50,15 +38,15 @@ static scudo::Allocator<scudo::AndroidSvelteConfig,
// TODO(kostyak): support both allocators.
INTERFACE void __scudo_print_stats(void) { Allocator.printStats(); }
-INTERFACE void
-__scudo_get_error_info(struct scudo_error_info *error_info,
- uintptr_t fault_addr, const char *stack_depot,
- const char *region_info, const char *ring_buffer,
- const char *memory, const char *memory_tags,
- uintptr_t memory_addr, size_t memory_size) {
+INTERFACE void __scudo_get_error_info(
+ struct scudo_error_info *error_info, uintptr_t fault_addr,
+ const char *stack_depot, size_t stack_depot_size, const char *region_info,
+ const char *ring_buffer, size_t ring_buffer_size, const char *memory,
+ const char *memory_tags, uintptr_t memory_addr, size_t memory_size) {
+ (void)(stack_depot_size);
Allocator.getErrorInfo(error_info, fault_addr, stack_depot, region_info,
- ring_buffer, memory, memory_tags, memory_addr,
- memory_size);
+ ring_buffer, ring_buffer_size, memory, memory_tags,
+ memory_addr, memory_size);
}
INTERFACE const char *__scudo_get_stack_depot_addr() {
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/wrappers_c_checks.h b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/wrappers_c_checks.h
index 7fc1a9600e53..9cd48e82792e 100644
--- a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/wrappers_c_checks.h
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/wrappers_c_checks.h
@@ -46,8 +46,13 @@ inline bool checkPosixMemalignAlignment(uptr Alignment) {
// builtin supported by recent clang & GCC if it exists, otherwise fallback to a
// costly division.
inline bool checkForCallocOverflow(uptr Size, uptr N, uptr *Product) {
-#if __has_builtin(__builtin_umull_overflow)
- return __builtin_umull_overflow(Size, N, Product);
+#if __has_builtin(__builtin_umull_overflow) && (SCUDO_WORDSIZE == 64U)
+ return __builtin_umull_overflow(Size, N,
+ reinterpret_cast<unsigned long *>(Product));
+#elif __has_builtin(__builtin_umul_overflow) && (SCUDO_WORDSIZE == 32U)
+ // On, e.g. armv7, uptr/uintptr_t may be defined as unsigned long
+ return __builtin_umul_overflow(Size, N,
+ reinterpret_cast<unsigned int *>(Product));
#else
*Product = Size * N;
if (!Size)
@@ -59,7 +64,7 @@ inline bool checkForCallocOverflow(uptr Size, uptr N, uptr *Product) {
// Returns true if the size passed to pvalloc overflows when rounded to the next
// multiple of PageSize.
inline bool checkForPvallocOverflow(uptr Size, uptr PageSize) {
- return roundUpTo(Size, PageSize) < Size;
+ return roundUp(Size, PageSize) < Size;
}
} // namespace scudo
diff --git a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/wrappers_cpp.cpp b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/wrappers_cpp.cpp
index adb104118123..098d4f71acc4 100644
--- a/contrib/llvm-project/compiler-rt/lib/scudo/standalone/wrappers_cpp.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/scudo/standalone/wrappers_cpp.cpp
@@ -12,95 +12,137 @@
#if !SCUDO_ANDROID || !_BIONIC
#include "allocator_config.h"
+#include "internal_defs.h"
+#include "platform.h"
+#include "scudo/interface.h"
+#include "wrappers_c.h"
#include <stdint.h>
-extern "C" void malloc_postinit();
-extern HIDDEN scudo::Allocator<scudo::Config, malloc_postinit> Allocator;
-
namespace std {
struct nothrow_t {};
enum class align_val_t : size_t {};
} // namespace std
+static void reportAllocation(void *ptr, size_t size) {
+ if (SCUDO_ENABLE_HOOKS)
+ if (__scudo_allocate_hook && ptr)
+ __scudo_allocate_hook(ptr, size);
+}
+static void reportDeallocation(void *ptr) {
+ if (SCUDO_ENABLE_HOOKS)
+ if (__scudo_deallocate_hook)
+ __scudo_deallocate_hook(ptr);
+}
+
INTERFACE WEAK void *operator new(size_t size) {
- return Allocator.allocate(size, scudo::Chunk::Origin::New);
+ void *Ptr = Allocator.allocate(size, scudo::Chunk::Origin::New);
+ reportAllocation(Ptr, size);
+ return Ptr;
}
INTERFACE WEAK void *operator new[](size_t size) {
- return Allocator.allocate(size, scudo::Chunk::Origin::NewArray);
+ void *Ptr = Allocator.allocate(size, scudo::Chunk::Origin::NewArray);
+ reportAllocation(Ptr, size);
+ return Ptr;
}
INTERFACE WEAK void *operator new(size_t size,
std::nothrow_t const &) NOEXCEPT {
- return Allocator.allocate(size, scudo::Chunk::Origin::New);
+ void *Ptr = Allocator.allocate(size, scudo::Chunk::Origin::New);
+ reportAllocation(Ptr, size);
+ return Ptr;
}
INTERFACE WEAK void *operator new[](size_t size,
std::nothrow_t const &) NOEXCEPT {
- return Allocator.allocate(size, scudo::Chunk::Origin::NewArray);
+ void *Ptr = Allocator.allocate(size, scudo::Chunk::Origin::NewArray);
+ reportAllocation(Ptr, size);
+ return Ptr;
}
INTERFACE WEAK void *operator new(size_t size, std::align_val_t align) {
- return Allocator.allocate(size, scudo::Chunk::Origin::New,
- static_cast<scudo::uptr>(align));
+ void *Ptr = Allocator.allocate(size, scudo::Chunk::Origin::New,
+ static_cast<scudo::uptr>(align));
+ reportAllocation(Ptr, size);
+ return Ptr;
}
INTERFACE WEAK void *operator new[](size_t size, std::align_val_t align) {
- return Allocator.allocate(size, scudo::Chunk::Origin::NewArray,
- static_cast<scudo::uptr>(align));
+ void *Ptr = Allocator.allocate(size, scudo::Chunk::Origin::NewArray,
+ static_cast<scudo::uptr>(align));
+ reportAllocation(Ptr, size);
+ return Ptr;
}
INTERFACE WEAK void *operator new(size_t size, std::align_val_t align,
std::nothrow_t const &) NOEXCEPT {
- return Allocator.allocate(size, scudo::Chunk::Origin::New,
- static_cast<scudo::uptr>(align));
+ void *Ptr = Allocator.allocate(size, scudo::Chunk::Origin::New,
+ static_cast<scudo::uptr>(align));
+ reportAllocation(Ptr, size);
+ return Ptr;
}
INTERFACE WEAK void *operator new[](size_t size, std::align_val_t align,
std::nothrow_t const &) NOEXCEPT {
- return Allocator.allocate(size, scudo::Chunk::Origin::NewArray,
- static_cast<scudo::uptr>(align));
+ void *Ptr = Allocator.allocate(size, scudo::Chunk::Origin::NewArray,
+ static_cast<scudo::uptr>(align));
+ reportAllocation(Ptr, size);
+ return Ptr;
}
-INTERFACE WEAK void operator delete(void *ptr)NOEXCEPT {
+INTERFACE WEAK void operator delete(void *ptr) NOEXCEPT {
+ reportDeallocation(ptr);
Allocator.deallocate(ptr, scudo::Chunk::Origin::New);
}
INTERFACE WEAK void operator delete[](void *ptr) NOEXCEPT {
+ reportDeallocation(ptr);
Allocator.deallocate(ptr, scudo::Chunk::Origin::NewArray);
}
-INTERFACE WEAK void operator delete(void *ptr, std::nothrow_t const &)NOEXCEPT {
+INTERFACE WEAK void operator delete(void *ptr,
+ std::nothrow_t const &) NOEXCEPT {
+ reportDeallocation(ptr);
Allocator.deallocate(ptr, scudo::Chunk::Origin::New);
}
INTERFACE WEAK void operator delete[](void *ptr,
std::nothrow_t const &) NOEXCEPT {
+ reportDeallocation(ptr);
Allocator.deallocate(ptr, scudo::Chunk::Origin::NewArray);
}
-INTERFACE WEAK void operator delete(void *ptr, size_t size)NOEXCEPT {
+INTERFACE WEAK void operator delete(void *ptr, size_t size) NOEXCEPT {
+ reportDeallocation(ptr);
Allocator.deallocate(ptr, scudo::Chunk::Origin::New, size);
}
INTERFACE WEAK void operator delete[](void *ptr, size_t size) NOEXCEPT {
+ reportDeallocation(ptr);
Allocator.deallocate(ptr, scudo::Chunk::Origin::NewArray, size);
}
-INTERFACE WEAK void operator delete(void *ptr, std::align_val_t align)NOEXCEPT {
+INTERFACE WEAK void operator delete(void *ptr,
+ std::align_val_t align) NOEXCEPT {
+ reportDeallocation(ptr);
Allocator.deallocate(ptr, scudo::Chunk::Origin::New, 0,
static_cast<scudo::uptr>(align));
}
INTERFACE WEAK void operator delete[](void *ptr,
std::align_val_t align) NOEXCEPT {
+ reportDeallocation(ptr);
Allocator.deallocate(ptr, scudo::Chunk::Origin::NewArray, 0,
static_cast<scudo::uptr>(align));
}
INTERFACE WEAK void operator delete(void *ptr, std::align_val_t align,
- std::nothrow_t const &)NOEXCEPT {
+ std::nothrow_t const &) NOEXCEPT {
+ reportDeallocation(ptr);
Allocator.deallocate(ptr, scudo::Chunk::Origin::New, 0,
static_cast<scudo::uptr>(align));
}
INTERFACE WEAK void operator delete[](void *ptr, std::align_val_t align,
std::nothrow_t const &) NOEXCEPT {
+ reportDeallocation(ptr);
Allocator.deallocate(ptr, scudo::Chunk::Origin::NewArray, 0,
static_cast<scudo::uptr>(align));
}
INTERFACE WEAK void operator delete(void *ptr, size_t size,
- std::align_val_t align)NOEXCEPT {
+ std::align_val_t align) NOEXCEPT {
+ reportDeallocation(ptr);
Allocator.deallocate(ptr, scudo::Chunk::Origin::New, size,
static_cast<scudo::uptr>(align));
}
INTERFACE WEAK void operator delete[](void *ptr, size_t size,
std::align_val_t align) NOEXCEPT {
+ reportDeallocation(ptr);
Allocator.deallocate(ptr, scudo::Chunk::Origin::NewArray, size,
static_cast<scudo::uptr>(align));
}
diff --git a/contrib/llvm-project/compiler-rt/lib/tsan/dd/dd_interceptors.cpp b/contrib/llvm-project/compiler-rt/lib/tsan/dd/dd_interceptors.cpp
index f78ef2d44279..2c36f691ec5b 100644
--- a/contrib/llvm-project/compiler-rt/lib/tsan/dd/dd_interceptors.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/tsan/dd/dd_interceptors.cpp
@@ -285,7 +285,8 @@ static void InitDataSeg() {
if (is_bss) g_data_end = segment.end;
prev_is_data = is_data;
}
- VPrintf(1, "guessed data_start=%p data_end=%p\n", g_data_start, g_data_end);
+ VPrintf(1, "guessed data_start=0x%zx data_end=0x%zx\n", g_data_start,
+ g_data_end);
CHECK_LT(g_data_start, g_data_end);
CHECK_GE((uptr)&g_data_start, g_data_start);
CHECK_LT((uptr)&g_data_start, g_data_end);
diff --git a/contrib/llvm-project/compiler-rt/lib/tsan/dd/dd_rtl.cpp b/contrib/llvm-project/compiler-rt/lib/tsan/dd/dd_rtl.cpp
index 2095217586a8..35b367c0cecb 100644
--- a/contrib/llvm-project/compiler-rt/lib/tsan/dd/dd_rtl.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/tsan/dd/dd_rtl.cpp
@@ -38,12 +38,12 @@ static void PrintStackTrace(Thread *thr, u32 stk) {
static void ReportDeadlock(Thread *thr, DDReport *rep) {
if (rep == 0)
return;
- BlockingMutexLock lock(&ctx->report_mutex);
+ Lock lock(&ctx->report_mutex);
Printf("==============================\n");
Printf("WARNING: lock-order-inversion (potential deadlock)\n");
for (int i = 0; i < rep->n; i++) {
- Printf("Thread %d locks mutex %llu while holding mutex %llu:\n",
- rep->loop[i].thr_ctx, rep->loop[i].mtx_ctx1, rep->loop[i].mtx_ctx0);
+ Printf("Thread %lld locks mutex %llu while holding mutex %llu:\n",
+ rep->loop[i].thr_ctx, rep->loop[i].mtx_ctx1, rep->loop[i].mtx_ctx0);
PrintStackTrace(thr, rep->loop[i].stk[1]);
if (rep->loop[i].stk[0]) {
Printf("Mutex %llu was acquired here:\n",
diff --git a/contrib/llvm-project/compiler-rt/lib/tsan/dd/dd_rtl.h b/contrib/llvm-project/compiler-rt/lib/tsan/dd/dd_rtl.h
index b1e19be57d3f..c812ffbd1393 100644
--- a/contrib/llvm-project/compiler-rt/lib/tsan/dd/dd_rtl.h
+++ b/contrib/llvm-project/compiler-rt/lib/tsan/dd/dd_rtl.h
@@ -19,7 +19,7 @@ namespace __dsan {
typedef DDFlags Flags;
-struct Mutex {
+struct UserMutex {
DDMutex dd;
};
@@ -37,12 +37,12 @@ struct Callback final : public DDCallback {
u32 Unwind() override;
};
-typedef AddrHashMap<Mutex, 31051> MutexHashMap;
+typedef AddrHashMap<UserMutex, 31051> MutexHashMap;
struct Context {
DDetector *dd;
- BlockingMutex report_mutex;
+ Mutex report_mutex;
MutexHashMap mutex_map;
};
diff --git a/contrib/llvm-project/compiler-rt/lib/tsan/go/test.c b/contrib/llvm-project/compiler-rt/lib/tsan/go/test.c
index 787b4c5b7dc1..1b0d828c9044 100644
--- a/contrib/llvm-project/compiler-rt/lib/tsan/go/test.c
+++ b/contrib/llvm-project/compiler-rt/lib/tsan/go/test.c
@@ -6,7 +6,7 @@
//
//===----------------------------------------------------------------------===//
//
-// Sanity test for Go runtime.
+// Test for Go runtime.
//
//===----------------------------------------------------------------------===//
diff --git a/contrib/llvm-project/compiler-rt/lib/tsan/go/tsan_go.cpp b/contrib/llvm-project/compiler-rt/lib/tsan/go/tsan_go.cpp
index 77987f43bf54..c689a51fb5e1 100644
--- a/contrib/llvm-project/compiler-rt/lib/tsan/go/tsan_go.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/tsan/go/tsan_go.cpp
@@ -27,13 +27,9 @@ bool IsExpectedReport(uptr addr, uptr size) {
return false;
}
-void *internal_alloc(MBlockType typ, uptr sz) {
- return InternalAlloc(sz);
-}
+void *Alloc(uptr sz) { return InternalAlloc(sz); }
-void internal_free(void *p) {
- InternalFree(p);
-}
+void FreeImpl(void *p) { InternalFree(p); }
// Callback into Go.
static void (*go_runtime_cb)(uptr cmd, void *ctx);
@@ -103,14 +99,16 @@ ReportLocation *SymbolizeData(uptr addr) {
MBlock *b = ctx->metamap.GetBlock(cbctx.start);
if (!b)
return 0;
- ReportLocation *loc = ReportLocation::New(ReportLocationHeap);
+ auto *loc = New<ReportLocation>();
+ loc->type = ReportLocationHeap;
loc->heap_chunk_start = cbctx.start;
loc->heap_chunk_size = b->siz;
loc->tid = b->tid;
loc->stack = SymbolizeStackId(b->stk);
return loc;
} else {
- ReportLocation *loc = ReportLocation::New(ReportLocationGlobal);
+ auto *loc = New<ReportLocation>();
+ loc->type = ReportLocationGlobal;
loc->global.name = internal_strdup(cbctx.name ? cbctx.name : "??");
loc->global.file = internal_strdup(cbctx.file ? cbctx.file : "??");
loc->global.line = cbctx.line;
@@ -142,8 +140,7 @@ Processor *ThreadState::proc() {
extern "C" {
static ThreadState *AllocGoroutine() {
- ThreadState *thr = (ThreadState*)internal_alloc(MBlockThreadContex,
- sizeof(ThreadState));
+ auto *thr = (ThreadState *)Alloc(sizeof(ThreadState));
internal_memset(thr, 0, sizeof(*thr));
return thr;
}
@@ -170,25 +167,25 @@ void __tsan_map_shadow(uptr addr, uptr size) {
}
void __tsan_read(ThreadState *thr, void *addr, void *pc) {
- MemoryRead(thr, (uptr)pc, (uptr)addr, kSizeLog1);
+ MemoryAccess(thr, (uptr)pc, (uptr)addr, 1, kAccessRead);
}
void __tsan_read_pc(ThreadState *thr, void *addr, uptr callpc, uptr pc) {
if (callpc != 0)
FuncEntry(thr, callpc);
- MemoryRead(thr, (uptr)pc, (uptr)addr, kSizeLog1);
+ MemoryAccess(thr, (uptr)pc, (uptr)addr, 1, kAccessRead);
if (callpc != 0)
FuncExit(thr);
}
void __tsan_write(ThreadState *thr, void *addr, void *pc) {
- MemoryWrite(thr, (uptr)pc, (uptr)addr, kSizeLog1);
+ MemoryAccess(thr, (uptr)pc, (uptr)addr, 1, kAccessWrite);
}
void __tsan_write_pc(ThreadState *thr, void *addr, uptr callpc, uptr pc) {
if (callpc != 0)
FuncEntry(thr, callpc);
- MemoryWrite(thr, (uptr)pc, (uptr)addr, kSizeLog1);
+ MemoryAccess(thr, (uptr)pc, (uptr)addr, 1, kAccessWrite);
if (callpc != 0)
FuncExit(thr);
}
@@ -213,23 +210,23 @@ void __tsan_malloc(ThreadState *thr, uptr pc, uptr p, uptr sz) {
CHECK(inited);
if (thr && pc)
ctx->metamap.AllocBlock(thr, pc, p, sz);
- MemoryResetRange(0, 0, (uptr)p, sz);
+ MemoryResetRange(thr, pc, (uptr)p, sz);
}
void __tsan_free(uptr p, uptr sz) {
- ctx->metamap.FreeRange(get_cur_proc(), p, sz);
+ ctx->metamap.FreeRange(get_cur_proc(), p, sz, false);
}
void __tsan_go_start(ThreadState *parent, ThreadState **pthr, void *pc) {
ThreadState *thr = AllocGoroutine();
*pthr = thr;
- int goid = ThreadCreate(parent, (uptr)pc, 0, true);
+ Tid goid = ThreadCreate(parent, (uptr)pc, 0, true);
ThreadStart(thr, goid, 0, ThreadType::Regular);
}
void __tsan_go_end(ThreadState *thr) {
ThreadFinish(thr);
- internal_free(thr);
+ Free(thr);
}
void __tsan_proc_create(Processor **pproc) {
@@ -256,9 +253,7 @@ void __tsan_release_merge(ThreadState *thr, void *addr) {
Release(thr, 0, (uptr)addr);
}
-void __tsan_finalizer_goroutine(ThreadState *thr) {
- AcquireGlobal(thr, 0);
-}
+void __tsan_finalizer_goroutine(ThreadState *thr) { AcquireGlobal(thr); }
void __tsan_mutex_before_lock(ThreadState *thr, uptr addr, uptr write) {
if (write)
@@ -285,9 +280,7 @@ void __tsan_go_ignore_sync_begin(ThreadState *thr) {
ThreadIgnoreSyncBegin(thr, 0);
}
-void __tsan_go_ignore_sync_end(ThreadState *thr) {
- ThreadIgnoreSyncEnd(thr, 0);
-}
+void __tsan_go_ignore_sync_end(ThreadState *thr) { ThreadIgnoreSyncEnd(thr); }
void __tsan_report_count(u64 *pn) {
Lock lock(&ctx->report_mtx);
diff --git a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan.syms.extra b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan.syms.extra
index 4838bb0a7279..6416e5d47fc4 100644
--- a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan.syms.extra
+++ b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan.syms.extra
@@ -9,6 +9,9 @@ __tsan_java*
__tsan_unaligned*
__tsan_release
__tsan_acquire
+__tsan_memcpy
+__tsan_memmove
+__tsan_memset
__tsan_mutex_create
__tsan_mutex_destroy
__tsan_mutex_pre_lock
@@ -19,6 +22,7 @@ __tsan_mutex_pre_signal
__tsan_mutex_post_signal
__tsan_mutex_pre_divert
__tsan_mutex_post_divert
+__tsan_check_no_mutexes_held
__tsan_get_current_fiber
__tsan_create_fiber
__tsan_destroy_fiber
diff --git a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_clock.cpp b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_clock.cpp
deleted file mode 100644
index 61848c21d162..000000000000
--- a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_clock.cpp
+++ /dev/null
@@ -1,625 +0,0 @@
-//===-- tsan_clock.cpp ----------------------------------------------------===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-//
-// This file is a part of ThreadSanitizer (TSan), a race detector.
-//
-//===----------------------------------------------------------------------===//
-#include "tsan_clock.h"
-#include "tsan_rtl.h"
-#include "sanitizer_common/sanitizer_placement_new.h"
-
-// SyncClock and ThreadClock implement vector clocks for sync variables
-// (mutexes, atomic variables, file descriptors, etc) and threads, respectively.
-// ThreadClock contains fixed-size vector clock for maximum number of threads.
-// SyncClock contains growable vector clock for currently necessary number of
-// threads.
-// Together they implement very simple model of operations, namely:
-//
-// void ThreadClock::acquire(const SyncClock *src) {
-// for (int i = 0; i < kMaxThreads; i++)
-// clock[i] = max(clock[i], src->clock[i]);
-// }
-//
-// void ThreadClock::release(SyncClock *dst) const {
-// for (int i = 0; i < kMaxThreads; i++)
-// dst->clock[i] = max(dst->clock[i], clock[i]);
-// }
-//
-// void ThreadClock::releaseStoreAcquire(SyncClock *sc) const {
-// for (int i = 0; i < kMaxThreads; i++) {
-// tmp = clock[i];
-// clock[i] = max(clock[i], sc->clock[i]);
-// sc->clock[i] = tmp;
-// }
-// }
-//
-// void ThreadClock::ReleaseStore(SyncClock *dst) const {
-// for (int i = 0; i < kMaxThreads; i++)
-// dst->clock[i] = clock[i];
-// }
-//
-// void ThreadClock::acq_rel(SyncClock *dst) {
-// acquire(dst);
-// release(dst);
-// }
-//
-// Conformance to this model is extensively verified in tsan_clock_test.cpp.
-// However, the implementation is significantly more complex. The complexity
-// allows to implement important classes of use cases in O(1) instead of O(N).
-//
-// The use cases are:
-// 1. Singleton/once atomic that has a single release-store operation followed
-// by zillions of acquire-loads (the acquire-load is O(1)).
-// 2. Thread-local mutex (both lock and unlock can be O(1)).
-// 3. Leaf mutex (unlock is O(1)).
-// 4. A mutex shared by 2 threads (both lock and unlock can be O(1)).
-// 5. An atomic with a single writer (writes can be O(1)).
-// The implementation dynamically adopts to workload. So if an atomic is in
-// read-only phase, these reads will be O(1); if it later switches to read/write
-// phase, the implementation will correctly handle that by switching to O(N).
-//
-// Thread-safety note: all const operations on SyncClock's are conducted under
-// a shared lock; all non-const operations on SyncClock's are conducted under
-// an exclusive lock; ThreadClock's are private to respective threads and so
-// do not need any protection.
-//
-// Description of SyncClock state:
-// clk_ - variable size vector clock, low kClkBits hold timestamp,
-// the remaining bits hold "acquired" flag (the actual value is thread's
-// reused counter);
-// if acquried == thr->reused_, then the respective thread has already
-// acquired this clock (except possibly for dirty elements).
-// dirty_ - holds up to two indeces in the vector clock that other threads
-// need to acquire regardless of "acquired" flag value;
-// release_store_tid_ - denotes that the clock state is a result of
-// release-store operation by the thread with release_store_tid_ index.
-// release_store_reused_ - reuse count of release_store_tid_.
-
-namespace __tsan {
-
-static atomic_uint32_t *ref_ptr(ClockBlock *cb) {
- return reinterpret_cast<atomic_uint32_t *>(&cb->table[ClockBlock::kRefIdx]);
-}
-
-// Drop reference to the first level block idx.
-static void UnrefClockBlock(ClockCache *c, u32 idx, uptr blocks) {
- ClockBlock *cb = ctx->clock_alloc.Map(idx);
- atomic_uint32_t *ref = ref_ptr(cb);
- u32 v = atomic_load(ref, memory_order_acquire);
- for (;;) {
- CHECK_GT(v, 0);
- if (v == 1)
- break;
- if (atomic_compare_exchange_strong(ref, &v, v - 1, memory_order_acq_rel))
- return;
- }
- // First level block owns second level blocks, so them as well.
- for (uptr i = 0; i < blocks; i++)
- ctx->clock_alloc.Free(c, cb->table[ClockBlock::kBlockIdx - i]);
- ctx->clock_alloc.Free(c, idx);
-}
-
-ThreadClock::ThreadClock(unsigned tid, unsigned reused)
- : tid_(tid)
- , reused_(reused + 1) // 0 has special meaning
- , last_acquire_()
- , global_acquire_()
- , cached_idx_()
- , cached_size_()
- , cached_blocks_() {
- CHECK_LT(tid, kMaxTidInClock);
- CHECK_EQ(reused_, ((u64)reused_ << kClkBits) >> kClkBits);
- nclk_ = tid_ + 1;
- internal_memset(clk_, 0, sizeof(clk_));
-}
-
-void ThreadClock::ResetCached(ClockCache *c) {
- if (cached_idx_) {
- UnrefClockBlock(c, cached_idx_, cached_blocks_);
- cached_idx_ = 0;
- cached_size_ = 0;
- cached_blocks_ = 0;
- }
-}
-
-void ThreadClock::acquire(ClockCache *c, SyncClock *src) {
- DCHECK_LE(nclk_, kMaxTid);
- DCHECK_LE(src->size_, kMaxTid);
-
- // Check if it's empty -> no need to do anything.
- const uptr nclk = src->size_;
- if (nclk == 0)
- return;
-
- bool acquired = false;
- for (unsigned i = 0; i < kDirtyTids; i++) {
- SyncClock::Dirty dirty = src->dirty_[i];
- unsigned tid = dirty.tid();
- if (tid != kInvalidTid) {
- if (clk_[tid] < dirty.epoch) {
- clk_[tid] = dirty.epoch;
- acquired = true;
- }
- }
- }
-
- // Check if we've already acquired src after the last release operation on src
- if (tid_ >= nclk || src->elem(tid_).reused != reused_) {
- // O(N) acquire.
- nclk_ = max(nclk_, nclk);
- u64 *dst_pos = &clk_[0];
- for (ClockElem &src_elem : *src) {
- u64 epoch = src_elem.epoch;
- if (*dst_pos < epoch) {
- *dst_pos = epoch;
- acquired = true;
- }
- dst_pos++;
- }
-
- // Remember that this thread has acquired this clock.
- if (nclk > tid_)
- src->elem(tid_).reused = reused_;
- }
-
- if (acquired) {
- last_acquire_ = clk_[tid_];
- ResetCached(c);
- }
-}
-
-void ThreadClock::releaseStoreAcquire(ClockCache *c, SyncClock *sc) {
- DCHECK_LE(nclk_, kMaxTid);
- DCHECK_LE(sc->size_, kMaxTid);
-
- if (sc->size_ == 0) {
- // ReleaseStore will correctly set release_store_tid_,
- // which can be important for future operations.
- ReleaseStore(c, sc);
- return;
- }
-
- nclk_ = max(nclk_, (uptr) sc->size_);
-
- // Check if we need to resize sc.
- if (sc->size_ < nclk_)
- sc->Resize(c, nclk_);
-
- bool acquired = false;
-
- sc->Unshare(c);
- // Update sc->clk_.
- sc->FlushDirty();
- uptr i = 0;
- for (ClockElem &ce : *sc) {
- u64 tmp = clk_[i];
- if (clk_[i] < ce.epoch) {
- clk_[i] = ce.epoch;
- acquired = true;
- }
- ce.epoch = tmp;
- ce.reused = 0;
- i++;
- }
- sc->release_store_tid_ = kInvalidTid;
- sc->release_store_reused_ = 0;
-
- if (acquired) {
- last_acquire_ = clk_[tid_];
- ResetCached(c);
- }
-}
-
-void ThreadClock::release(ClockCache *c, SyncClock *dst) {
- DCHECK_LE(nclk_, kMaxTid);
- DCHECK_LE(dst->size_, kMaxTid);
-
- if (dst->size_ == 0) {
- // ReleaseStore will correctly set release_store_tid_,
- // which can be important for future operations.
- ReleaseStore(c, dst);
- return;
- }
-
- // Check if we need to resize dst.
- if (dst->size_ < nclk_)
- dst->Resize(c, nclk_);
-
- // Check if we had not acquired anything from other threads
- // since the last release on dst. If so, we need to update
- // only dst->elem(tid_).
- if (!HasAcquiredAfterRelease(dst)) {
- UpdateCurrentThread(c, dst);
- if (dst->release_store_tid_ != tid_ ||
- dst->release_store_reused_ != reused_)
- dst->release_store_tid_ = kInvalidTid;
- return;
- }
-
- // O(N) release.
- dst->Unshare(c);
- // First, remember whether we've acquired dst.
- bool acquired = IsAlreadyAcquired(dst);
- // Update dst->clk_.
- dst->FlushDirty();
- uptr i = 0;
- for (ClockElem &ce : *dst) {
- ce.epoch = max(ce.epoch, clk_[i]);
- ce.reused = 0;
- i++;
- }
- // Clear 'acquired' flag in the remaining elements.
- dst->release_store_tid_ = kInvalidTid;
- dst->release_store_reused_ = 0;
- // If we've acquired dst, remember this fact,
- // so that we don't need to acquire it on next acquire.
- if (acquired)
- dst->elem(tid_).reused = reused_;
-}
-
-void ThreadClock::ReleaseStore(ClockCache *c, SyncClock *dst) {
- DCHECK_LE(nclk_, kMaxTid);
- DCHECK_LE(dst->size_, kMaxTid);
-
- if (dst->size_ == 0 && cached_idx_ != 0) {
- // Reuse the cached clock.
- // Note: we could reuse/cache the cached clock in more cases:
- // we could update the existing clock and cache it, or replace it with the
- // currently cached clock and release the old one. And for a shared
- // existing clock, we could replace it with the currently cached;
- // or unshare, update and cache. But, for simplicity, we currnetly reuse
- // cached clock only when the target clock is empty.
- dst->tab_ = ctx->clock_alloc.Map(cached_idx_);
- dst->tab_idx_ = cached_idx_;
- dst->size_ = cached_size_;
- dst->blocks_ = cached_blocks_;
- CHECK_EQ(dst->dirty_[0].tid(), kInvalidTid);
- // The cached clock is shared (immutable),
- // so this is where we store the current clock.
- dst->dirty_[0].set_tid(tid_);
- dst->dirty_[0].epoch = clk_[tid_];
- dst->release_store_tid_ = tid_;
- dst->release_store_reused_ = reused_;
- // Rememeber that we don't need to acquire it in future.
- dst->elem(tid_).reused = reused_;
- // Grab a reference.
- atomic_fetch_add(ref_ptr(dst->tab_), 1, memory_order_relaxed);
- return;
- }
-
- // Check if we need to resize dst.
- if (dst->size_ < nclk_)
- dst->Resize(c, nclk_);
-
- if (dst->release_store_tid_ == tid_ &&
- dst->release_store_reused_ == reused_ &&
- !HasAcquiredAfterRelease(dst)) {
- UpdateCurrentThread(c, dst);
- return;
- }
-
- // O(N) release-store.
- dst->Unshare(c);
- // Note: dst can be larger than this ThreadClock.
- // This is fine since clk_ beyond size is all zeros.
- uptr i = 0;
- for (ClockElem &ce : *dst) {
- ce.epoch = clk_[i];
- ce.reused = 0;
- i++;
- }
- for (uptr i = 0; i < kDirtyTids; i++) dst->dirty_[i].set_tid(kInvalidTid);
- dst->release_store_tid_ = tid_;
- dst->release_store_reused_ = reused_;
- // Rememeber that we don't need to acquire it in future.
- dst->elem(tid_).reused = reused_;
-
- // If the resulting clock is cachable, cache it for future release operations.
- // The clock is always cachable if we released to an empty sync object.
- if (cached_idx_ == 0 && dst->Cachable()) {
- // Grab a reference to the ClockBlock.
- atomic_uint32_t *ref = ref_ptr(dst->tab_);
- if (atomic_load(ref, memory_order_acquire) == 1)
- atomic_store_relaxed(ref, 2);
- else
- atomic_fetch_add(ref_ptr(dst->tab_), 1, memory_order_relaxed);
- cached_idx_ = dst->tab_idx_;
- cached_size_ = dst->size_;
- cached_blocks_ = dst->blocks_;
- }
-}
-
-void ThreadClock::acq_rel(ClockCache *c, SyncClock *dst) {
- acquire(c, dst);
- ReleaseStore(c, dst);
-}
-
-// Updates only single element related to the current thread in dst->clk_.
-void ThreadClock::UpdateCurrentThread(ClockCache *c, SyncClock *dst) const {
- // Update the threads time, but preserve 'acquired' flag.
- for (unsigned i = 0; i < kDirtyTids; i++) {
- SyncClock::Dirty *dirty = &dst->dirty_[i];
- const unsigned tid = dirty->tid();
- if (tid == tid_ || tid == kInvalidTid) {
- dirty->set_tid(tid_);
- dirty->epoch = clk_[tid_];
- return;
- }
- }
- // Reset all 'acquired' flags, O(N).
- // We are going to touch dst elements, so we need to unshare it.
- dst->Unshare(c);
- dst->elem(tid_).epoch = clk_[tid_];
- for (uptr i = 0; i < dst->size_; i++)
- dst->elem(i).reused = 0;
- dst->FlushDirty();
-}
-
-// Checks whether the current thread has already acquired src.
-bool ThreadClock::IsAlreadyAcquired(const SyncClock *src) const {
- if (src->elem(tid_).reused != reused_)
- return false;
- for (unsigned i = 0; i < kDirtyTids; i++) {
- SyncClock::Dirty dirty = src->dirty_[i];
- if (dirty.tid() != kInvalidTid) {
- if (clk_[dirty.tid()] < dirty.epoch)
- return false;
- }
- }
- return true;
-}
-
-// Checks whether the current thread has acquired anything
-// from other clocks after releasing to dst (directly or indirectly).
-bool ThreadClock::HasAcquiredAfterRelease(const SyncClock *dst) const {
- const u64 my_epoch = dst->elem(tid_).epoch;
- return my_epoch <= last_acquire_ ||
- my_epoch <= atomic_load_relaxed(&global_acquire_);
-}
-
-// Sets a single element in the vector clock.
-// This function is called only from weird places like AcquireGlobal.
-void ThreadClock::set(ClockCache *c, unsigned tid, u64 v) {
- DCHECK_LT(tid, kMaxTid);
- DCHECK_GE(v, clk_[tid]);
- clk_[tid] = v;
- if (nclk_ <= tid)
- nclk_ = tid + 1;
- last_acquire_ = clk_[tid_];
- ResetCached(c);
-}
-
-void ThreadClock::DebugDump(int(*printf)(const char *s, ...)) {
- printf("clock=[");
- for (uptr i = 0; i < nclk_; i++)
- printf("%s%llu", i == 0 ? "" : ",", clk_[i]);
- printf("] tid=%u/%u last_acq=%llu", tid_, reused_, last_acquire_);
-}
-
-SyncClock::SyncClock() {
- ResetImpl();
-}
-
-SyncClock::~SyncClock() {
- // Reset must be called before dtor.
- CHECK_EQ(size_, 0);
- CHECK_EQ(blocks_, 0);
- CHECK_EQ(tab_, 0);
- CHECK_EQ(tab_idx_, 0);
-}
-
-void SyncClock::Reset(ClockCache *c) {
- if (size_)
- UnrefClockBlock(c, tab_idx_, blocks_);
- ResetImpl();
-}
-
-void SyncClock::ResetImpl() {
- tab_ = 0;
- tab_idx_ = 0;
- size_ = 0;
- blocks_ = 0;
- release_store_tid_ = kInvalidTid;
- release_store_reused_ = 0;
- for (uptr i = 0; i < kDirtyTids; i++) dirty_[i].set_tid(kInvalidTid);
-}
-
-void SyncClock::Resize(ClockCache *c, uptr nclk) {
- Unshare(c);
- if (nclk <= capacity()) {
- // Memory is already allocated, just increase the size.
- size_ = nclk;
- return;
- }
- if (size_ == 0) {
- // Grow from 0 to one-level table.
- CHECK_EQ(size_, 0);
- CHECK_EQ(blocks_, 0);
- CHECK_EQ(tab_, 0);
- CHECK_EQ(tab_idx_, 0);
- tab_idx_ = ctx->clock_alloc.Alloc(c);
- tab_ = ctx->clock_alloc.Map(tab_idx_);
- internal_memset(tab_, 0, sizeof(*tab_));
- atomic_store_relaxed(ref_ptr(tab_), 1);
- size_ = 1;
- } else if (size_ > blocks_ * ClockBlock::kClockCount) {
- u32 idx = ctx->clock_alloc.Alloc(c);
- ClockBlock *new_cb = ctx->clock_alloc.Map(idx);
- uptr top = size_ - blocks_ * ClockBlock::kClockCount;
- CHECK_LT(top, ClockBlock::kClockCount);
- const uptr move = top * sizeof(tab_->clock[0]);
- internal_memcpy(&new_cb->clock[0], tab_->clock, move);
- internal_memset(&new_cb->clock[top], 0, sizeof(*new_cb) - move);
- internal_memset(tab_->clock, 0, move);
- append_block(idx);
- }
- // At this point we have first level table allocated and all clock elements
- // are evacuated from it to a second level block.
- // Add second level tables as necessary.
- while (nclk > capacity()) {
- u32 idx = ctx->clock_alloc.Alloc(c);
- ClockBlock *cb = ctx->clock_alloc.Map(idx);
- internal_memset(cb, 0, sizeof(*cb));
- append_block(idx);
- }
- size_ = nclk;
-}
-
-// Flushes all dirty elements into the main clock array.
-void SyncClock::FlushDirty() {
- for (unsigned i = 0; i < kDirtyTids; i++) {
- Dirty *dirty = &dirty_[i];
- if (dirty->tid() != kInvalidTid) {
- CHECK_LT(dirty->tid(), size_);
- elem(dirty->tid()).epoch = dirty->epoch;
- dirty->set_tid(kInvalidTid);
- }
- }
-}
-
-bool SyncClock::IsShared() const {
- if (size_ == 0)
- return false;
- atomic_uint32_t *ref = ref_ptr(tab_);
- u32 v = atomic_load(ref, memory_order_acquire);
- CHECK_GT(v, 0);
- return v > 1;
-}
-
-// Unshares the current clock if it's shared.
-// Shared clocks are immutable, so they need to be unshared before any updates.
-// Note: this does not apply to dirty entries as they are not shared.
-void SyncClock::Unshare(ClockCache *c) {
- if (!IsShared())
- return;
- // First, copy current state into old.
- SyncClock old;
- old.tab_ = tab_;
- old.tab_idx_ = tab_idx_;
- old.size_ = size_;
- old.blocks_ = blocks_;
- old.release_store_tid_ = release_store_tid_;
- old.release_store_reused_ = release_store_reused_;
- for (unsigned i = 0; i < kDirtyTids; i++)
- old.dirty_[i] = dirty_[i];
- // Then, clear current object.
- ResetImpl();
- // Allocate brand new clock in the current object.
- Resize(c, old.size_);
- // Now copy state back into this object.
- Iter old_iter(&old);
- for (ClockElem &ce : *this) {
- ce = *old_iter;
- ++old_iter;
- }
- release_store_tid_ = old.release_store_tid_;
- release_store_reused_ = old.release_store_reused_;
- for (unsigned i = 0; i < kDirtyTids; i++)
- dirty_[i] = old.dirty_[i];
- // Drop reference to old and delete if necessary.
- old.Reset(c);
-}
-
-// Can we cache this clock for future release operations?
-ALWAYS_INLINE bool SyncClock::Cachable() const {
- if (size_ == 0)
- return false;
- for (unsigned i = 0; i < kDirtyTids; i++) {
- if (dirty_[i].tid() != kInvalidTid)
- return false;
- }
- return atomic_load_relaxed(ref_ptr(tab_)) == 1;
-}
-
-// elem linearizes the two-level structure into linear array.
-// Note: this is used only for one time accesses, vector operations use
-// the iterator as it is much faster.
-ALWAYS_INLINE ClockElem &SyncClock::elem(unsigned tid) const {
- DCHECK_LT(tid, size_);
- const uptr block = tid / ClockBlock::kClockCount;
- DCHECK_LE(block, blocks_);
- tid %= ClockBlock::kClockCount;
- if (block == blocks_)
- return tab_->clock[tid];
- u32 idx = get_block(block);
- ClockBlock *cb = ctx->clock_alloc.Map(idx);
- return cb->clock[tid];
-}
-
-ALWAYS_INLINE uptr SyncClock::capacity() const {
- if (size_ == 0)
- return 0;
- uptr ratio = sizeof(ClockBlock::clock[0]) / sizeof(ClockBlock::table[0]);
- // How many clock elements we can fit into the first level block.
- // +1 for ref counter.
- uptr top = ClockBlock::kClockCount - RoundUpTo(blocks_ + 1, ratio) / ratio;
- return blocks_ * ClockBlock::kClockCount + top;
-}
-
-ALWAYS_INLINE u32 SyncClock::get_block(uptr bi) const {
- DCHECK(size_);
- DCHECK_LT(bi, blocks_);
- return tab_->table[ClockBlock::kBlockIdx - bi];
-}
-
-ALWAYS_INLINE void SyncClock::append_block(u32 idx) {
- uptr bi = blocks_++;
- CHECK_EQ(get_block(bi), 0);
- tab_->table[ClockBlock::kBlockIdx - bi] = idx;
-}
-
-// Used only by tests.
-u64 SyncClock::get(unsigned tid) const {
- for (unsigned i = 0; i < kDirtyTids; i++) {
- Dirty dirty = dirty_[i];
- if (dirty.tid() == tid)
- return dirty.epoch;
- }
- return elem(tid).epoch;
-}
-
-// Used only by Iter test.
-u64 SyncClock::get_clean(unsigned tid) const {
- return elem(tid).epoch;
-}
-
-void SyncClock::DebugDump(int(*printf)(const char *s, ...)) {
- printf("clock=[");
- for (uptr i = 0; i < size_; i++)
- printf("%s%llu", i == 0 ? "" : ",", elem(i).epoch);
- printf("] reused=[");
- for (uptr i = 0; i < size_; i++)
- printf("%s%llu", i == 0 ? "" : ",", elem(i).reused);
- printf("] release_store_tid=%d/%d dirty_tids=%d[%llu]/%d[%llu]",
- release_store_tid_, release_store_reused_, dirty_[0].tid(),
- dirty_[0].epoch, dirty_[1].tid(), dirty_[1].epoch);
-}
-
-void SyncClock::Iter::Next() {
- // Finished with the current block, move on to the next one.
- block_++;
- if (block_ < parent_->blocks_) {
- // Iterate over the next second level block.
- u32 idx = parent_->get_block(block_);
- ClockBlock *cb = ctx->clock_alloc.Map(idx);
- pos_ = &cb->clock[0];
- end_ = pos_ + min(parent_->size_ - block_ * ClockBlock::kClockCount,
- ClockBlock::kClockCount);
- return;
- }
- if (block_ == parent_->blocks_ &&
- parent_->size_ > parent_->blocks_ * ClockBlock::kClockCount) {
- // Iterate over elements in the first level block.
- pos_ = &parent_->tab_->clock[0];
- end_ = pos_ + min(parent_->size_ - block_ * ClockBlock::kClockCount,
- ClockBlock::kClockCount);
- return;
- }
- parent_ = nullptr; // denotes end
-}
-} // namespace __tsan
diff --git a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_clock.h b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_clock.h
deleted file mode 100644
index 31376a1bc9e2..000000000000
--- a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_clock.h
+++ /dev/null
@@ -1,293 +0,0 @@
-//===-- tsan_clock.h --------------------------------------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-//
-// This file is a part of ThreadSanitizer (TSan), a race detector.
-//
-//===----------------------------------------------------------------------===//
-#ifndef TSAN_CLOCK_H
-#define TSAN_CLOCK_H
-
-#include "tsan_defs.h"
-#include "tsan_dense_alloc.h"
-
-namespace __tsan {
-
-typedef DenseSlabAlloc<ClockBlock, 1 << 22, 1 << 10> ClockAlloc;
-typedef DenseSlabAllocCache ClockCache;
-
-// The clock that lives in sync variables (mutexes, atomics, etc).
-class SyncClock {
- public:
- SyncClock();
- ~SyncClock();
-
- uptr size() const;
-
- // These are used only in tests.
- u64 get(unsigned tid) const;
- u64 get_clean(unsigned tid) const;
-
- void Resize(ClockCache *c, uptr nclk);
- void Reset(ClockCache *c);
-
- void DebugDump(int(*printf)(const char *s, ...));
-
- // Clock element iterator.
- // Note: it iterates only over the table without regard to dirty entries.
- class Iter {
- public:
- explicit Iter(SyncClock* parent);
- Iter& operator++();
- bool operator!=(const Iter& other);
- ClockElem &operator*();
-
- private:
- SyncClock *parent_;
- // [pos_, end_) is the current continuous range of clock elements.
- ClockElem *pos_;
- ClockElem *end_;
- int block_; // Current number of second level block.
-
- NOINLINE void Next();
- };
-
- Iter begin();
- Iter end();
-
- private:
- friend class ThreadClock;
- friend class Iter;
- static const uptr kDirtyTids = 2;
-
- struct Dirty {
- u32 tid() const { return tid_ == kShortInvalidTid ? kInvalidTid : tid_; }
- void set_tid(u32 tid) {
- tid_ = tid == kInvalidTid ? kShortInvalidTid : tid;
- }
- u64 epoch : kClkBits;
-
- private:
- // Full kInvalidTid won't fit into Dirty::tid.
- static const u64 kShortInvalidTid = (1ull << (64 - kClkBits)) - 1;
- u64 tid_ : 64 - kClkBits; // kInvalidId if not active
- };
-
- static_assert(sizeof(Dirty) == 8, "Dirty is not 64bit");
-
- unsigned release_store_tid_;
- unsigned release_store_reused_;
- Dirty dirty_[kDirtyTids];
- // If size_ is 0, tab_ is nullptr.
- // If size <= 64 (kClockCount), tab_ contains pointer to an array with
- // 64 ClockElem's (ClockBlock::clock).
- // Otherwise, tab_ points to an array with up to 127 u32 elements,
- // each pointing to the second-level 512b block with 64 ClockElem's.
- // Unused space in the first level ClockBlock is used to store additional
- // clock elements.
- // The last u32 element in the first level ClockBlock is always used as
- // reference counter.
- //
- // See the following scheme for details.
- // All memory blocks are 512 bytes (allocated from ClockAlloc).
- // Clock (clk) elements are 64 bits.
- // Idx and ref are 32 bits.
- //
- // tab_
- // |
- // \/
- // +----------------------------------------------------+
- // | clk128 | clk129 | ...unused... | idx1 | idx0 | ref |
- // +----------------------------------------------------+
- // | |
- // | \/
- // | +----------------+
- // | | clk0 ... clk63 |
- // | +----------------+
- // \/
- // +------------------+
- // | clk64 ... clk127 |
- // +------------------+
- //
- // Note: dirty entries, if active, always override what's stored in the clock.
- ClockBlock *tab_;
- u32 tab_idx_;
- u16 size_;
- u16 blocks_; // Number of second level blocks.
-
- void Unshare(ClockCache *c);
- bool IsShared() const;
- bool Cachable() const;
- void ResetImpl();
- void FlushDirty();
- uptr capacity() const;
- u32 get_block(uptr bi) const;
- void append_block(u32 idx);
- ClockElem &elem(unsigned tid) const;
-};
-
-// The clock that lives in threads.
-class ThreadClock {
- public:
- typedef DenseSlabAllocCache Cache;
-
- explicit ThreadClock(unsigned tid, unsigned reused = 0);
-
- u64 get(unsigned tid) const;
- void set(ClockCache *c, unsigned tid, u64 v);
- void set(u64 v);
- void tick();
- uptr size() const;
-
- void acquire(ClockCache *c, SyncClock *src);
- void releaseStoreAcquire(ClockCache *c, SyncClock *src);
- void release(ClockCache *c, SyncClock *dst);
- void acq_rel(ClockCache *c, SyncClock *dst);
- void ReleaseStore(ClockCache *c, SyncClock *dst);
- void ResetCached(ClockCache *c);
- void NoteGlobalAcquire(u64 v);
-
- void DebugReset();
- void DebugDump(int(*printf)(const char *s, ...));
-
- private:
- static const uptr kDirtyTids = SyncClock::kDirtyTids;
- // Index of the thread associated with he clock ("current thread").
- const unsigned tid_;
- const unsigned reused_; // tid_ reuse count.
- // Current thread time when it acquired something from other threads.
- u64 last_acquire_;
-
- // Last time another thread has done a global acquire of this thread's clock.
- // It helps to avoid problem described in:
- // https://github.com/golang/go/issues/39186
- // See test/tsan/java_finalizer2.cpp for a regression test.
- // Note the failuire is _extremely_ hard to hit, so if you are trying
- // to reproduce it, you may want to run something like:
- // $ go get golang.org/x/tools/cmd/stress
- // $ stress -p=64 ./a.out
- //
- // The crux of the problem is roughly as follows.
- // A number of O(1) optimizations in the clocks algorithm assume proper
- // transitive cumulative propagation of clock values. The AcquireGlobal
- // operation may produce an inconsistent non-linearazable view of
- // thread clocks. Namely, it may acquire a later value from a thread
- // with a higher ID, but fail to acquire an earlier value from a thread
- // with a lower ID. If a thread that executed AcquireGlobal then releases
- // to a sync clock, it will spoil the sync clock with the inconsistent
- // values. If another thread later releases to the sync clock, the optimized
- // algorithm may break.
- //
- // The exact sequence of events that leads to the failure.
- // - thread 1 executes AcquireGlobal
- // - thread 1 acquires value 1 for thread 2
- // - thread 2 increments clock to 2
- // - thread 2 releases to sync object 1
- // - thread 3 at time 1
- // - thread 3 acquires from sync object 1
- // - thread 3 increments clock to 2
- // - thread 1 acquires value 2 for thread 3
- // - thread 1 releases to sync object 2
- // - sync object 2 clock has 1 for thread 2 and 2 for thread 3
- // - thread 3 releases to sync object 2
- // - thread 3 sees value 2 in the clock for itself
- // and decides that it has already released to the clock
- // and did not acquire anything from other threads after that
- // (the last_acquire_ check in release operation)
- // - thread 3 does not update the value for thread 2 in the clock from 1 to 2
- // - thread 4 acquires from sync object 2
- // - thread 4 detects a false race with thread 2
- // as it should have been synchronized with thread 2 up to time 2,
- // but because of the broken clock it is now synchronized only up to time 1
- //
- // The global_acquire_ value helps to prevent this scenario.
- // Namely, thread 3 will not trust any own clock values up to global_acquire_
- // for the purposes of the last_acquire_ optimization.
- atomic_uint64_t global_acquire_;
-
- // Cached SyncClock (without dirty entries and release_store_tid_).
- // We reuse it for subsequent store-release operations without intervening
- // acquire operations. Since it is shared (and thus constant), clock value
- // for the current thread is then stored in dirty entries in the SyncClock.
- // We host a refernece to the table while it is cached here.
- u32 cached_idx_;
- u16 cached_size_;
- u16 cached_blocks_;
-
- // Number of active elements in the clk_ table (the rest is zeros).
- uptr nclk_;
- u64 clk_[kMaxTidInClock]; // Fixed size vector clock.
-
- bool IsAlreadyAcquired(const SyncClock *src) const;
- bool HasAcquiredAfterRelease(const SyncClock *dst) const;
- void UpdateCurrentThread(ClockCache *c, SyncClock *dst) const;
-};
-
-ALWAYS_INLINE u64 ThreadClock::get(unsigned tid) const {
- DCHECK_LT(tid, kMaxTidInClock);
- return clk_[tid];
-}
-
-ALWAYS_INLINE void ThreadClock::set(u64 v) {
- DCHECK_GE(v, clk_[tid_]);
- clk_[tid_] = v;
-}
-
-ALWAYS_INLINE void ThreadClock::tick() {
- clk_[tid_]++;
-}
-
-ALWAYS_INLINE uptr ThreadClock::size() const {
- return nclk_;
-}
-
-ALWAYS_INLINE void ThreadClock::NoteGlobalAcquire(u64 v) {
- // Here we rely on the fact that AcquireGlobal is protected by
- // ThreadRegistryLock, thus only one thread at a time executes it
- // and values passed to this function should not go backwards.
- CHECK_LE(atomic_load_relaxed(&global_acquire_), v);
- atomic_store_relaxed(&global_acquire_, v);
-}
-
-ALWAYS_INLINE SyncClock::Iter SyncClock::begin() {
- return Iter(this);
-}
-
-ALWAYS_INLINE SyncClock::Iter SyncClock::end() {
- return Iter(nullptr);
-}
-
-ALWAYS_INLINE uptr SyncClock::size() const {
- return size_;
-}
-
-ALWAYS_INLINE SyncClock::Iter::Iter(SyncClock* parent)
- : parent_(parent)
- , pos_(nullptr)
- , end_(nullptr)
- , block_(-1) {
- if (parent)
- Next();
-}
-
-ALWAYS_INLINE SyncClock::Iter& SyncClock::Iter::operator++() {
- pos_++;
- if (UNLIKELY(pos_ >= end_))
- Next();
- return *this;
-}
-
-ALWAYS_INLINE bool SyncClock::Iter::operator!=(const SyncClock::Iter& other) {
- return parent_ != other.parent_;
-}
-
-ALWAYS_INLINE ClockElem &SyncClock::Iter::operator*() {
- return *pos_;
-}
-} // namespace __tsan
-
-#endif // TSAN_CLOCK_H
diff --git a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_debugging.cpp b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_debugging.cpp
index d3d6255090b7..41fa293dbaaa 100644
--- a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_debugging.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_debugging.cpp
@@ -35,7 +35,9 @@ static const char *ReportTypeDescription(ReportType typ) {
case ReportTypeSignalUnsafe: return "signal-unsafe-call";
case ReportTypeErrnoInSignal: return "errno-in-signal-handler";
case ReportTypeDeadlock: return "lock-order-inversion";
- // No default case so compiler warns us if we miss one
+ case ReportTypeMutexHeldWrongContext:
+ return "mutex-held-in-wrong-context";
+ // No default case so compiler warns us if we miss one
}
UNREACHABLE("missing case");
}
@@ -157,7 +159,7 @@ int __tsan_get_report_mutex(void *report, uptr idx, uptr *mutex_id, void **addr,
ReportMutex *mutex = rep->mutexes[idx];
*mutex_id = mutex->id;
*addr = (void *)mutex->addr;
- *destroyed = mutex->destroyed;
+ *destroyed = false;
if (mutex->stack) CopyTrace(mutex->stack->frames, trace, trace_size);
return 1;
}
@@ -195,9 +197,9 @@ const char *__tsan_locate_address(uptr addr, char *name, uptr name_size,
const char *region_kind = nullptr;
if (name && name_size > 0) name[0] = 0;
- if (IsMetaMem(addr)) {
+ if (IsMetaMem(reinterpret_cast<u32 *>(addr))) {
region_kind = "meta shadow";
- } else if (IsShadowMem(addr)) {
+ } else if (IsShadowMem(reinterpret_cast<RawShadow *>(addr))) {
region_kind = "shadow";
} else {
bool is_stack = false;
@@ -215,9 +217,9 @@ const char *__tsan_locate_address(uptr addr, char *name, uptr name_size,
} else {
// TODO(kuba.brecka): We should not lock. This is supposed to be called
// from within the debugger when other threads are stopped.
- ctx->thread_registry->Lock();
+ ctx->thread_registry.Lock();
ThreadContext *tctx = IsThreadStackOrTls(addr, &is_stack);
- ctx->thread_registry->Unlock();
+ ctx->thread_registry.Unlock();
if (tctx) {
region_kind = is_stack ? "stack" : "tls";
} else {
@@ -252,7 +254,7 @@ int __tsan_get_alloc_stack(uptr addr, uptr *trace, uptr size, int *thread_id,
*thread_id = b->tid;
// No locking. This is supposed to be called from within the debugger when
// other threads are stopped.
- ThreadContextBase *tctx = ctx->thread_registry->GetThreadLocked(b->tid);
+ ThreadContextBase *tctx = ctx->thread_registry.GetThreadLocked(b->tid);
*os_id = tctx->os_id;
StackTrace stack = StackDepotGet(b->stk);
diff --git a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_defs.h b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_defs.h
index f2fb7b1a213f..1ffa3d6aec40 100644
--- a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_defs.h
+++ b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_defs.h
@@ -18,6 +18,24 @@
#include "sanitizer_common/sanitizer_mutex.h"
#include "ubsan/ubsan_platform.h"
+#ifndef TSAN_VECTORIZE
+# define TSAN_VECTORIZE __SSE4_2__
+#endif
+
+#if TSAN_VECTORIZE
+// <emmintrin.h> transitively includes <stdlib.h>,
+// and it's prohibited to include std headers into tsan runtime.
+// So we do this dirty trick.
+# define _MM_MALLOC_H_INCLUDED
+# define __MM_MALLOC_H
+# include <emmintrin.h>
+# include <smmintrin.h>
+# define VECTOR_ALIGNED ALIGNED(16)
+typedef __m128i m128;
+#else
+# define VECTOR_ALIGNED
+#endif
+
// Setup defaults for compile definitions.
#ifndef TSAN_NO_HISTORY
# define TSAN_NO_HISTORY 0
@@ -33,40 +51,26 @@
namespace __tsan {
-const int kClkBits = 42;
-const unsigned kMaxTidReuse = (1 << (64 - kClkBits)) - 1;
+constexpr uptr kByteBits = 8;
-struct ClockElem {
- u64 epoch : kClkBits;
- u64 reused : 64 - kClkBits; // tid reuse count
-};
+// Thread slot ID.
+enum class Sid : u8 {};
+constexpr uptr kThreadSlotCount = 256;
+constexpr Sid kFreeSid = static_cast<Sid>(255);
-struct ClockBlock {
- static const uptr kSize = 512;
- static const uptr kTableSize = kSize / sizeof(u32);
- static const uptr kClockCount = kSize / sizeof(ClockElem);
- static const uptr kRefIdx = kTableSize - 1;
- static const uptr kBlockIdx = kTableSize - 2;
+// Abstract time unit, vector clock element.
+enum class Epoch : u16 {};
+constexpr uptr kEpochBits = 14;
+constexpr Epoch kEpochZero = static_cast<Epoch>(0);
+constexpr Epoch kEpochOver = static_cast<Epoch>(1 << kEpochBits);
+constexpr Epoch kEpochLast = static_cast<Epoch>((1 << kEpochBits) - 1);
- union {
- u32 table[kTableSize];
- ClockElem clock[kClockCount];
- };
+inline Epoch EpochInc(Epoch epoch) {
+ return static_cast<Epoch>(static_cast<u16>(epoch) + 1);
+}
- ClockBlock() {
- }
-};
+inline bool EpochOverflow(Epoch epoch) { return epoch == kEpochOver; }
-const int kTidBits = 13;
-// Reduce kMaxTid by kClockCount because one slot in ClockBlock table is
-// occupied by reference counter, so total number of elements we can store
-// in SyncClock is kClockCount * (kTableSize - 1).
-const unsigned kMaxTid = (1 << kTidBits) - ClockBlock::kClockCount;
-#if !SANITIZER_GO
-const unsigned kMaxTidInClock = kMaxTid * 2; // This includes msb 'freed' bit.
-#else
-const unsigned kMaxTidInClock = kMaxTid; // Go does not track freed memory.
-#endif
const uptr kShadowStackSize = 64 * 1024;
// Count of shadow values in a shadow cell.
@@ -75,8 +79,9 @@ const uptr kShadowCnt = 4;
// That many user bytes are mapped onto a single shadow cell.
const uptr kShadowCell = 8;
-// Size of a single shadow value (u64).
-const uptr kShadowSize = 8;
+// Single shadow value.
+enum class RawShadow : u32 {};
+const uptr kShadowSize = sizeof(RawShadow);
// Shadow memory is kShadowMultiplier times larger than user memory.
const uptr kShadowMultiplier = kShadowSize * kShadowCnt / kShadowCell;
@@ -88,6 +93,9 @@ const uptr kMetaShadowCell = 8;
// Size of a single meta shadow value (u32).
const uptr kMetaShadowSize = 4;
+// All addresses and PCs are assumed to be compressable to that many bits.
+const uptr kCompressedAddrBits = 44;
+
#if TSAN_NO_HISTORY
const bool kCollectHistory = false;
#else
@@ -149,17 +157,34 @@ MD5Hash md5_hash(const void *data, uptr size);
struct Processor;
struct ThreadState;
class ThreadContext;
+struct TidSlot;
struct Context;
struct ReportStack;
class ReportDesc;
class RegionAlloc;
+struct Trace;
+struct TracePart;
+
+typedef uptr AccessType;
+
+enum : AccessType {
+ kAccessWrite = 0,
+ kAccessRead = 1 << 0,
+ kAccessAtomic = 1 << 1,
+ kAccessVptr = 1 << 2, // read or write of an object virtual table pointer
+ kAccessFree = 1 << 3, // synthetic memory access during memory freeing
+ kAccessExternalPC = 1 << 4, // access PC can have kExternalPCBit set
+ kAccessCheckOnly = 1 << 5, // check for races, but don't store
+ kAccessNoRodata = 1 << 6, // don't check for .rodata marker
+ kAccessSlotLocked = 1 << 7, // memory access with TidSlot locked
+};
// Descriptor of user's memory block.
struct MBlock {
u64 siz : 48;
u64 tag : 16;
- u32 stk;
- u16 tid;
+ StackID stk;
+ Tid tid;
};
COMPILER_CHECK(sizeof(MBlock) == 16);
@@ -173,15 +198,18 @@ enum ExternalTag : uptr {
// as 16-bit values, see tsan_defs.h.
};
-enum MutexType {
- MutexTypeTrace = MutexLastCommon,
- MutexTypeReport,
+enum {
+ MutexTypeReport = MutexLastCommon,
MutexTypeSyncVar,
MutexTypeAnnotations,
MutexTypeAtExit,
MutexTypeFired,
MutexTypeRacy,
MutexTypeGlobalProc,
+ MutexTypeInternalAlloc,
+ MutexTypeTrace,
+ MutexTypeSlot,
+ MutexTypeSlots,
};
} // namespace __tsan
diff --git a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_dense_alloc.h b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_dense_alloc.h
index 68ded43c4f6b..2eaff39057bc 100644
--- a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_dense_alloc.h
+++ b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_dense_alloc.h
@@ -49,11 +49,7 @@ class DenseSlabAlloc {
static_assert(sizeof(T) > sizeof(IndexT),
"it doesn't make sense to use dense alloc");
- explicit DenseSlabAlloc(LinkerInitialized, const char *name) {
- freelist_ = 0;
- fillpos_ = 0;
- name_ = name;
- }
+ DenseSlabAlloc(LinkerInitialized, const char *name) : name_(name) {}
explicit DenseSlabAlloc(const char *name)
: DenseSlabAlloc(LINKER_INITIALIZED, name) {
@@ -89,12 +85,7 @@ class DenseSlabAlloc {
}
void FlushCache(Cache *c) {
- SpinMutexLock lock(&mtx_);
- while (c->pos) {
- IndexT idx = c->cache[--c->pos];
- *(IndexT*)Map(idx) = freelist_;
- freelist_ = idx;
- }
+ while (c->pos) Drain(c);
}
void InitCache(Cache *c) {
@@ -102,48 +93,101 @@ class DenseSlabAlloc {
internal_memset(c->cache, 0, sizeof(c->cache));
}
+ uptr AllocatedMemory() const {
+ return atomic_load_relaxed(&fillpos_) * kL2Size * sizeof(T);
+ }
+
+ template <typename Func>
+ void ForEach(Func func) {
+ Lock lock(&mtx_);
+ uptr fillpos = atomic_load_relaxed(&fillpos_);
+ for (uptr l1 = 0; l1 < fillpos; l1++) {
+ for (IndexT l2 = l1 == 0 ? 1 : 0; l2 < kL2Size; l2++) func(&map_[l1][l2]);
+ }
+ }
+
private:
T *map_[kL1Size];
- SpinMutex mtx_;
- IndexT freelist_;
- uptr fillpos_;
- const char *name_;
-
- void Refill(Cache *c) {
- SpinMutexLock lock(&mtx_);
- if (freelist_ == 0) {
- if (fillpos_ == kL1Size) {
- Printf("ThreadSanitizer: %s overflow (%zu*%zu). Dying.\n",
- name_, kL1Size, kL2Size);
- Die();
- }
- VPrintf(2, "ThreadSanitizer: growing %s: %zu out of %zu*%zu\n",
- name_, fillpos_, kL1Size, kL2Size);
- T *batch = (T*)MmapOrDie(kL2Size * sizeof(T), name_);
- // Reserve 0 as invalid index.
- IndexT start = fillpos_ == 0 ? 1 : 0;
- for (IndexT i = start; i < kL2Size; i++) {
- new(batch + i) T;
- *(IndexT*)(batch + i) = i + 1 + fillpos_ * kL2Size;
- }
- *(IndexT*)(batch + kL2Size - 1) = 0;
- freelist_ = fillpos_ * kL2Size + start;
- map_[fillpos_++] = batch;
- }
- for (uptr i = 0; i < Cache::kSize / 2 && freelist_ != 0; i++) {
- IndexT idx = freelist_;
+ Mutex mtx_;
+ // The freelist is organized as a lock-free stack of batches of nodes.
+ // The stack itself uses Block::next links, while the batch within each
+ // stack node uses Block::batch links.
+ // Low 32-bits of freelist_ is the node index, top 32-bits is ABA-counter.
+ atomic_uint64_t freelist_ = {0};
+ atomic_uintptr_t fillpos_ = {0};
+ const char *const name_;
+
+ struct Block {
+ IndexT next;
+ IndexT batch;
+ };
+
+ Block *MapBlock(IndexT idx) { return reinterpret_cast<Block *>(Map(idx)); }
+
+ static constexpr u64 kCounterInc = 1ull << 32;
+ static constexpr u64 kCounterMask = ~(kCounterInc - 1);
+
+ NOINLINE void Refill(Cache *c) {
+ // Pop 1 batch of nodes from the freelist.
+ IndexT idx;
+ u64 xchg;
+ u64 cmp = atomic_load(&freelist_, memory_order_acquire);
+ do {
+ idx = static_cast<IndexT>(cmp);
+ if (!idx)
+ return AllocSuperBlock(c);
+ Block *ptr = MapBlock(idx);
+ xchg = ptr->next | (cmp & kCounterMask);
+ } while (!atomic_compare_exchange_weak(&freelist_, &cmp, xchg,
+ memory_order_acq_rel));
+ // Unpack it into c->cache.
+ while (idx) {
c->cache[c->pos++] = idx;
- freelist_ = *(IndexT*)Map(idx);
+ idx = MapBlock(idx)->batch;
}
}
- void Drain(Cache *c) {
- SpinMutexLock lock(&mtx_);
- for (uptr i = 0; i < Cache::kSize / 2; i++) {
+ NOINLINE void Drain(Cache *c) {
+ // Build a batch of at most Cache::kSize / 2 nodes linked by Block::batch.
+ IndexT head_idx = 0;
+ for (uptr i = 0; i < Cache::kSize / 2 && c->pos; i++) {
IndexT idx = c->cache[--c->pos];
- *(IndexT*)Map(idx) = freelist_;
- freelist_ = idx;
+ Block *ptr = MapBlock(idx);
+ ptr->batch = head_idx;
+ head_idx = idx;
+ }
+ // Push it onto the freelist stack.
+ Block *head = MapBlock(head_idx);
+ u64 xchg;
+ u64 cmp = atomic_load(&freelist_, memory_order_acquire);
+ do {
+ head->next = static_cast<IndexT>(cmp);
+ xchg = head_idx | (cmp & kCounterMask) + kCounterInc;
+ } while (!atomic_compare_exchange_weak(&freelist_, &cmp, xchg,
+ memory_order_acq_rel));
+ }
+
+ NOINLINE void AllocSuperBlock(Cache *c) {
+ Lock lock(&mtx_);
+ uptr fillpos = atomic_load_relaxed(&fillpos_);
+ if (fillpos == kL1Size) {
+ Printf("ThreadSanitizer: %s overflow (%zu*%zu). Dying.\n", name_, kL1Size,
+ kL2Size);
+ Die();
+ }
+ VPrintf(2, "ThreadSanitizer: growing %s: %zu out of %zu*%zu\n", name_,
+ fillpos, kL1Size, kL2Size);
+ T *batch = (T *)MmapOrDie(kL2Size * sizeof(T), name_);
+ map_[fillpos] = batch;
+ // Reserve 0 as invalid index.
+ for (IndexT i = fillpos ? 0 : 1; i < kL2Size; i++) {
+ new (batch + i) T;
+ c->cache[c->pos++] = i + fillpos * kL2Size;
+ if (c->pos == Cache::kSize)
+ Drain(c);
}
+ atomic_store_relaxed(&fillpos_, fillpos + 1);
+ CHECK(c->pos);
}
};
diff --git a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_dispatch_defs.h b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_dispatch_defs.h
index 94e0b50fed36..8d38beb0b0a2 100644
--- a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_dispatch_defs.h
+++ b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_dispatch_defs.h
@@ -56,13 +56,6 @@ extern const dispatch_block_t _dispatch_data_destructor_munmap;
# define DISPATCH_NOESCAPE
#endif
-#if SANITIZER_MAC
-# define SANITIZER_WEAK_IMPORT extern "C" __attribute((weak_import))
-#else
-# define SANITIZER_WEAK_IMPORT extern "C" __attribute((weak))
-#endif
-
-
// Data types used in dispatch APIs
typedef unsigned long size_t;
typedef unsigned long uintptr_t;
diff --git a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_external.cpp b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_external.cpp
index a87e12f2936f..98abff54e2b2 100644
--- a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_external.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_external.cpp
@@ -10,9 +10,12 @@
//
//===----------------------------------------------------------------------===//
#include "tsan_rtl.h"
-#include "tsan_interceptors.h"
#include "sanitizer_common/sanitizer_ptrauth.h"
+#if !SANITIZER_GO
+# include "tsan_interceptors.h"
+#endif
+
namespace __tsan {
#define CALLERPC ((uptr)__builtin_return_address(0))
@@ -43,10 +46,6 @@ const char *GetReportHeaderFromTag(uptr tag) {
return tag_data ? tag_data->header : nullptr;
}
-void InsertShadowStackFrameForTag(ThreadState *thr, uptr tag) {
- FuncEntry(thr, (uptr)&registered_tags[tag]);
-}
-
uptr TagFromShadowStackFrame(uptr pc) {
uptr tag_count = atomic_load(&used_tags, memory_order_relaxed);
void *pc_ptr = (void *)pc;
@@ -57,17 +56,26 @@ uptr TagFromShadowStackFrame(uptr pc) {
#if !SANITIZER_GO
-typedef void(*AccessFunc)(ThreadState *, uptr, uptr, int);
-void ExternalAccess(void *addr, uptr caller_pc, void *tag, AccessFunc access) {
+// We need to track tags for individual memory accesses, but there is no space
+// in the shadow cells for them. Instead we push/pop them onto the thread
+// traces and ignore the extra tag frames when printing reports.
+static void PushTag(ThreadState *thr, uptr tag) {
+ FuncEntry(thr, (uptr)&registered_tags[tag]);
+}
+static void PopTag(ThreadState *thr) { FuncExit(thr); }
+
+static void ExternalAccess(void *addr, uptr caller_pc, uptr tsan_caller_pc,
+ void *tag, AccessType typ) {
CHECK_LT(tag, atomic_load(&used_tags, memory_order_relaxed));
+ bool in_ignored_lib;
+ if (caller_pc && libignore()->IsIgnored(caller_pc, &in_ignored_lib))
+ return;
+
ThreadState *thr = cur_thread();
if (caller_pc) FuncEntry(thr, caller_pc);
- InsertShadowStackFrameForTag(thr, (uptr)tag);
- bool in_ignored_lib;
- if (!caller_pc || !libignore()->IsIgnored(caller_pc, &in_ignored_lib)) {
- access(thr, CALLERPC, (uptr)addr, kSizeLog1);
- }
- FuncExit(thr);
+ PushTag(thr, (uptr)tag);
+ MemoryAccess(thr, tsan_caller_pc, (uptr)addr, 1, typ);
+ PopTag(thr);
if (caller_pc) FuncExit(thr);
}
@@ -92,7 +100,7 @@ void __tsan_external_register_header(void *tag, const char *header) {
header = internal_strdup(header);
char *old_header =
(char *)atomic_exchange(header_ptr, (uptr)header, memory_order_seq_cst);
- if (old_header) internal_free(old_header);
+ Free(old_header);
}
SANITIZER_INTERFACE_ATTRIBUTE
@@ -111,12 +119,12 @@ void __tsan_external_assign_tag(void *addr, void *tag) {
SANITIZER_INTERFACE_ATTRIBUTE
void __tsan_external_read(void *addr, void *caller_pc, void *tag) {
- ExternalAccess(addr, STRIP_PAC_PC(caller_pc), tag, MemoryRead);
+ ExternalAccess(addr, STRIP_PAC_PC(caller_pc), CALLERPC, tag, kAccessRead);
}
SANITIZER_INTERFACE_ATTRIBUTE
void __tsan_external_write(void *addr, void *caller_pc, void *tag) {
- ExternalAccess(addr, STRIP_PAC_PC(caller_pc), tag, MemoryWrite);
+ ExternalAccess(addr, STRIP_PAC_PC(caller_pc), CALLERPC, tag, kAccessWrite);
}
} // extern "C"
diff --git a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_fd.cpp b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_fd.cpp
index 50a6b56916aa..ab295a69dce1 100644
--- a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_fd.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_fd.cpp
@@ -11,9 +11,12 @@
//===----------------------------------------------------------------------===//
#include "tsan_fd.h"
-#include "tsan_rtl.h"
+
#include <sanitizer_common/sanitizer_atomic.h>
+#include "tsan_interceptors.h"
+#include "tsan_rtl.h"
+
namespace __tsan {
const int kTableSizeL1 = 1024;
@@ -26,8 +29,12 @@ struct FdSync {
struct FdDesc {
FdSync *sync;
- int creation_tid;
- u32 creation_stack;
+ // This is used to establish write -> epoll_wait synchronization
+ // where epoll_wait receives notification about the write.
+ atomic_uintptr_t aux_sync; // FdSync*
+ Tid creation_tid;
+ StackID creation_stack;
+ bool closed;
};
struct FdContext {
@@ -100,6 +107,10 @@ static void init(ThreadState *thr, uptr pc, int fd, FdSync *s,
unref(thr, pc, d->sync);
d->sync = 0;
}
+ unref(thr, pc,
+ reinterpret_cast<FdSync *>(
+ atomic_load(&d->aux_sync, memory_order_relaxed)));
+ atomic_store(&d->aux_sync, 0, memory_order_relaxed);
if (flags()->io_sync == 0) {
unref(thr, pc, s);
} else if (flags()->io_sync == 1) {
@@ -110,12 +121,18 @@ static void init(ThreadState *thr, uptr pc, int fd, FdSync *s,
}
d->creation_tid = thr->tid;
d->creation_stack = CurrentStackId(thr, pc);
+ d->closed = false;
+ // This prevents false positives on fd_close_norace3.cpp test.
+ // The mechanics of the false positive are not completely clear,
+ // but it happens only if global reset is enabled (flush_memory_ms=1)
+ // and may be related to lost writes during asynchronous MADV_DONTNEED.
+ SlotLocker locker(thr);
if (write) {
// To catch races between fd usage and open.
MemoryRangeImitateWrite(thr, pc, (uptr)d, 8);
} else {
// See the dup-related comment in FdClose.
- MemoryRead(thr, pc, (uptr)d, kSizeLog8);
+ MemoryAccess(thr, pc, (uptr)d, 8, kAccessRead | kAccessSlotLocked);
}
}
@@ -140,7 +157,7 @@ void FdOnFork(ThreadState *thr, uptr pc) {
}
}
-bool FdLocation(uptr addr, int *fd, int *tid, u32 *stack) {
+bool FdLocation(uptr addr, int *fd, Tid *tid, StackID *stack, bool *closed) {
for (int l1 = 0; l1 < kTableSizeL1; l1++) {
FdDesc *tab = (FdDesc*)atomic_load(&fdctx.tab[l1], memory_order_relaxed);
if (tab == 0)
@@ -151,6 +168,7 @@ bool FdLocation(uptr addr, int *fd, int *tid, u32 *stack) {
*fd = l1 * kTableSizeL1 + l2;
*tid = d->creation_tid;
*stack = d->creation_stack;
+ *closed = d->closed;
return true;
}
}
@@ -163,7 +181,7 @@ void FdAcquire(ThreadState *thr, uptr pc, int fd) {
FdDesc *d = fddesc(thr, pc, fd);
FdSync *s = d->sync;
DPrintf("#%d: FdAcquire(%d) -> %p\n", thr->tid, fd, s);
- MemoryRead(thr, pc, (uptr)d, kSizeLog8);
+ MemoryAccess(thr, pc, (uptr)d, 8, kAccessRead);
if (s)
Acquire(thr, pc, (uptr)s);
}
@@ -174,9 +192,11 @@ void FdRelease(ThreadState *thr, uptr pc, int fd) {
FdDesc *d = fddesc(thr, pc, fd);
FdSync *s = d->sync;
DPrintf("#%d: FdRelease(%d) -> %p\n", thr->tid, fd, s);
- MemoryRead(thr, pc, (uptr)d, kSizeLog8);
+ MemoryAccess(thr, pc, (uptr)d, 8, kAccessRead);
if (s)
Release(thr, pc, (uptr)s);
+ if (uptr aux_sync = atomic_load(&d->aux_sync, memory_order_acquire))
+ Release(thr, pc, aux_sync);
}
void FdAccess(ThreadState *thr, uptr pc, int fd) {
@@ -184,7 +204,7 @@ void FdAccess(ThreadState *thr, uptr pc, int fd) {
if (bogusfd(fd))
return;
FdDesc *d = fddesc(thr, pc, fd);
- MemoryRead(thr, pc, (uptr)d, kSizeLog8);
+ MemoryAccess(thr, pc, (uptr)d, 8, kAccessRead);
}
void FdClose(ThreadState *thr, uptr pc, int fd, bool write) {
@@ -192,27 +212,42 @@ void FdClose(ThreadState *thr, uptr pc, int fd, bool write) {
if (bogusfd(fd))
return;
FdDesc *d = fddesc(thr, pc, fd);
- if (write) {
- // To catch races between fd usage and close.
- MemoryWrite(thr, pc, (uptr)d, kSizeLog8);
- } else {
- // This path is used only by dup2/dup3 calls.
- // We do read instead of write because there is a number of legitimate
- // cases where write would lead to false positives:
- // 1. Some software dups a closed pipe in place of a socket before closing
- // the socket (to prevent races actually).
- // 2. Some daemons dup /dev/null in place of stdin/stdout.
- // On the other hand we have not seen cases when write here catches real
- // bugs.
- MemoryRead(thr, pc, (uptr)d, kSizeLog8);
+ {
+ // Need to lock the slot to make MemoryAccess and MemoryResetRange atomic
+ // with respect to global reset. See the comment in MemoryRangeFreed.
+ SlotLocker locker(thr);
+ if (!MustIgnoreInterceptor(thr)) {
+ if (write) {
+ // To catch races between fd usage and close.
+ MemoryAccess(thr, pc, (uptr)d, 8,
+ kAccessWrite | kAccessCheckOnly | kAccessSlotLocked);
+ } else {
+ // This path is used only by dup2/dup3 calls.
+ // We do read instead of write because there is a number of legitimate
+ // cases where write would lead to false positives:
+ // 1. Some software dups a closed pipe in place of a socket before
+ // closing
+ // the socket (to prevent races actually).
+ // 2. Some daemons dup /dev/null in place of stdin/stdout.
+ // On the other hand we have not seen cases when write here catches real
+ // bugs.
+ MemoryAccess(thr, pc, (uptr)d, 8,
+ kAccessRead | kAccessCheckOnly | kAccessSlotLocked);
+ }
+ }
+ // We need to clear it, because if we do not intercept any call out there
+ // that creates fd, we will hit false postives.
+ MemoryResetRange(thr, pc, (uptr)d, 8);
}
- // We need to clear it, because if we do not intercept any call out there
- // that creates fd, we will hit false postives.
- MemoryResetRange(thr, pc, (uptr)d, 8);
unref(thr, pc, d->sync);
d->sync = 0;
- d->creation_tid = 0;
- d->creation_stack = 0;
+ unref(thr, pc,
+ reinterpret_cast<FdSync *>(
+ atomic_load(&d->aux_sync, memory_order_relaxed)));
+ atomic_store(&d->aux_sync, 0, memory_order_relaxed);
+ d->closed = true;
+ d->creation_tid = thr->tid;
+ d->creation_stack = CurrentStackId(thr, pc);
}
void FdFileCreate(ThreadState *thr, uptr pc, int fd) {
@@ -228,7 +263,7 @@ void FdDup(ThreadState *thr, uptr pc, int oldfd, int newfd, bool write) {
return;
// Ignore the case when user dups not yet connected socket.
FdDesc *od = fddesc(thr, pc, oldfd);
- MemoryRead(thr, pc, (uptr)od, kSizeLog8);
+ MemoryAccess(thr, pc, (uptr)od, 8, kAccessRead);
FdClose(thr, pc, newfd, write);
init(thr, pc, newfd, ref(od->sync), write);
}
@@ -269,6 +304,30 @@ void FdPollCreate(ThreadState *thr, uptr pc, int fd) {
init(thr, pc, fd, allocsync(thr, pc));
}
+void FdPollAdd(ThreadState *thr, uptr pc, int epfd, int fd) {
+ DPrintf("#%d: FdPollAdd(%d, %d)\n", thr->tid, epfd, fd);
+ if (bogusfd(epfd) || bogusfd(fd))
+ return;
+ FdDesc *d = fddesc(thr, pc, fd);
+ // Associate fd with epoll fd only once.
+ // While an fd can be associated with multiple epolls at the same time,
+ // or with different epolls during different phases of lifetime,
+ // synchronization semantics (and examples) of this are unclear.
+ // So we don't support this for now.
+ // If we change the association, it will also create lifetime management
+ // problem for FdRelease which accesses the aux_sync.
+ if (atomic_load(&d->aux_sync, memory_order_relaxed))
+ return;
+ FdDesc *epd = fddesc(thr, pc, epfd);
+ FdSync *s = epd->sync;
+ if (!s)
+ return;
+ uptr cmp = 0;
+ if (atomic_compare_exchange_strong(
+ &d->aux_sync, &cmp, reinterpret_cast<uptr>(s), memory_order_release))
+ ref(s);
+}
+
void FdSocketCreate(ThreadState *thr, uptr pc, int fd) {
DPrintf("#%d: FdSocketCreate(%d)\n", thr->tid, fd);
if (bogusfd(fd))
diff --git a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_fd.h b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_fd.h
index ce4f2f73bac6..dddc1d2ab24b 100644
--- a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_fd.h
+++ b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_fd.h
@@ -49,11 +49,12 @@ void FdEventCreate(ThreadState *thr, uptr pc, int fd);
void FdSignalCreate(ThreadState *thr, uptr pc, int fd);
void FdInotifyCreate(ThreadState *thr, uptr pc, int fd);
void FdPollCreate(ThreadState *thr, uptr pc, int fd);
+void FdPollAdd(ThreadState *thr, uptr pc, int epfd, int fd);
void FdSocketCreate(ThreadState *thr, uptr pc, int fd);
void FdSocketAccept(ThreadState *thr, uptr pc, int fd, int newfd);
void FdSocketConnecting(ThreadState *thr, uptr pc, int fd);
void FdSocketConnect(ThreadState *thr, uptr pc, int fd);
-bool FdLocation(uptr addr, int *fd, int *tid, u32 *stack);
+bool FdLocation(uptr addr, int *fd, Tid *tid, StackID *stack, bool *closed);
void FdOnFork(ThreadState *thr, uptr pc);
uptr File2addr(const char *path);
diff --git a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_flags.cpp b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_flags.cpp
index 49e4a9c21da9..3fd58f46983f 100644
--- a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_flags.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_flags.cpp
@@ -10,19 +10,21 @@
//
//===----------------------------------------------------------------------===//
-#include "sanitizer_common/sanitizer_flags.h"
+#include "tsan_flags.h"
+
#include "sanitizer_common/sanitizer_flag_parser.h"
+#include "sanitizer_common/sanitizer_flags.h"
#include "sanitizer_common/sanitizer_libc.h"
-#include "tsan_flags.h"
-#include "tsan_rtl.h"
+#include "tsan_interface.h"
#include "tsan_mman.h"
+#include "tsan_rtl.h"
#include "ubsan/ubsan_flags.h"
namespace __tsan {
// Can be overriden in frontend.
#ifdef TSAN_EXTERNAL_HOOKS
-extern "C" const char* __tsan_default_options();
+extern "C" const char *__tsan_default_options();
#else
SANITIZER_WEAK_DEFAULT_IMPL
const char *__tsan_default_options() {
@@ -55,6 +57,7 @@ void InitializeFlags(Flags *f, const char *env, const char *env_option_name) {
// Override some common flags defaults.
CommonFlags cf;
cf.CopyFrom(*common_flags());
+ cf.external_symbolizer_path = GetEnv("TSAN_SYMBOLIZER_PATH");
cf.allow_addr2line = true;
if (SANITIZER_GO) {
// Does not work as expected for Go: runtime handles SIGABRT and crashes.
@@ -96,7 +99,7 @@ void InitializeFlags(Flags *f, const char *env, const char *env_option_name) {
ubsan_parser.ParseStringFromEnv("UBSAN_OPTIONS");
#endif
- // Sanity check.
+ // Check flags.
if (!f->report_bugs) {
f->report_thread_leaks = false;
f->report_destroy_locked = false;
@@ -109,12 +112,6 @@ void InitializeFlags(Flags *f, const char *env, const char *env_option_name) {
if (common_flags()->help) parser.PrintFlagDescriptions();
- if (f->history_size < 0 || f->history_size > 7) {
- Printf("ThreadSanitizer: incorrect value for history_size"
- " (must be [0..7])\n");
- Die();
- }
-
if (f->io_sync < 0 || f->io_sync > 2) {
Printf("ThreadSanitizer: incorrect value for io_sync"
" (must be [0..2])\n");
diff --git a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_flags.inc b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_flags.inc
index 2105c754486f..731d776cc893 100644
--- a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_flags.inc
+++ b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_flags.inc
@@ -23,10 +23,6 @@ TSAN_FLAG(bool, enable_annotations, true,
TSAN_FLAG(bool, suppress_equal_stacks, true,
"Suppress a race report if we've already output another race report "
"with the same stack.")
-TSAN_FLAG(bool, suppress_equal_addresses, true,
- "Suppress a race report if we've already output another race report "
- "on the same address.")
-
TSAN_FLAG(bool, report_bugs, true,
"Turns off bug reporting entirely (useful for benchmarking).")
TSAN_FLAG(bool, report_thread_leaks, true, "Report thread leaks at exit?")
@@ -43,7 +39,9 @@ TSAN_FLAG(
bool, force_seq_cst_atomics, false,
"If set, all atomics are effectively sequentially consistent (seq_cst), "
"regardless of what user actually specified.")
-TSAN_FLAG(bool, print_benign, false, "Print matched \"benign\" races at exit.")
+TSAN_FLAG(bool, force_background_thread, false,
+ "If set, eagerly launch a background thread for memory reclamation "
+ "instead of waiting for a user call to pthread_create.")
TSAN_FLAG(bool, halt_on_error, false, "Exit after first reported error.")
TSAN_FLAG(int, atexit_sleep_ms, 1000,
"Sleep in main thread before exiting for that many ms "
@@ -60,14 +58,10 @@ TSAN_FLAG(bool, stop_on_start, false,
"Stops on start until __tsan_resume() is called (for debugging).")
TSAN_FLAG(bool, running_on_valgrind, false,
"Controls whether RunningOnValgrind() returns true or false.")
-// There are a lot of goroutines in Go, so we use smaller history.
TSAN_FLAG(
- int, history_size, SANITIZER_GO ? 1 : 3,
- "Per-thread history size, controls how many previous memory accesses "
- "are remembered per thread. Possible values are [0..7]. "
- "history_size=0 amounts to 32K memory accesses. Each next value doubles "
- "the amount of memory accesses, up to history_size=7 that amounts to "
- "4M memory accesses. The default value is 2 (128K memory accesses).")
+ uptr, history_size, 0,
+ "Per-thread history size,"
+ " controls how many extra previous memory accesses are remembered per thread.")
TSAN_FLAG(int, io_sync, 1,
"Controls level of synchronization implied by IO operations. "
"0 - no synchronization "
@@ -76,10 +70,13 @@ TSAN_FLAG(int, io_sync, 1,
TSAN_FLAG(bool, die_after_fork, true,
"Die after multi-threaded fork if the child creates new threads.")
TSAN_FLAG(const char *, suppressions, "", "Suppressions file name.")
-TSAN_FLAG(bool, ignore_interceptors_accesses, SANITIZER_MAC ? true : false,
+TSAN_FLAG(bool, ignore_interceptors_accesses, SANITIZER_APPLE ? true : false,
"Ignore reads and writes from all interceptors.")
-TSAN_FLAG(bool, ignore_noninstrumented_modules, SANITIZER_MAC ? true : false,
+TSAN_FLAG(bool, ignore_noninstrumented_modules, SANITIZER_APPLE ? true : false,
"Interceptors should only detect races when called from instrumented "
"modules.")
TSAN_FLAG(bool, shared_ptr_interceptor, true,
"Track atomic reference counting in libc++ shared_ptr and weak_ptr.")
+TSAN_FLAG(bool, print_full_thread_history, false,
+ "If set, prints thread creation stacks for the threads involved in "
+ "the report and their ancestors up to the main thread.")
diff --git a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_ignoreset.cpp b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_ignoreset.cpp
index f6e41f668618..1fca1cf4f9fc 100644
--- a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_ignoreset.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_ignoreset.cpp
@@ -19,7 +19,7 @@ IgnoreSet::IgnoreSet()
: size_() {
}
-void IgnoreSet::Add(u32 stack_id) {
+void IgnoreSet::Add(StackID stack_id) {
if (size_ == kMaxSize)
return;
for (uptr i = 0; i < size_; i++) {
@@ -29,15 +29,7 @@ void IgnoreSet::Add(u32 stack_id) {
stacks_[size_++] = stack_id;
}
-void IgnoreSet::Reset() {
- size_ = 0;
-}
-
-uptr IgnoreSet::Size() const {
- return size_;
-}
-
-u32 IgnoreSet::At(uptr i) const {
+StackID IgnoreSet::At(uptr i) const {
CHECK_LT(i, size_);
CHECK_LE(size_, kMaxSize);
return stacks_[i];
diff --git a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_ignoreset.h b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_ignoreset.h
index 3e318bd674d9..4e2511291ce4 100644
--- a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_ignoreset.h
+++ b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_ignoreset.h
@@ -19,17 +19,16 @@ namespace __tsan {
class IgnoreSet {
public:
- static const uptr kMaxSize = 16;
-
IgnoreSet();
- void Add(u32 stack_id);
- void Reset();
- uptr Size() const;
- u32 At(uptr i) const;
+ void Add(StackID stack_id);
+ void Reset() { size_ = 0; }
+ uptr Size() const { return size_; }
+ StackID At(uptr i) const;
private:
+ static constexpr uptr kMaxSize = 16;
uptr size_;
- u32 stacks_[kMaxSize];
+ StackID stacks_[kMaxSize];
};
} // namespace __tsan
diff --git a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_ilist.h b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_ilist.h
new file mode 100644
index 000000000000..d7d8be219dbe
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_ilist.h
@@ -0,0 +1,189 @@
+//===-- tsan_ilist.h --------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+#ifndef TSAN_ILIST_H
+#define TSAN_ILIST_H
+
+#include "sanitizer_common/sanitizer_internal_defs.h"
+
+namespace __tsan {
+
+class INode {
+ public:
+ INode() = default;
+
+ private:
+ INode* next_ = nullptr;
+ INode* prev_ = nullptr;
+
+ template <typename Base, INode Base::*Node, typename Elem>
+ friend class IList;
+ INode(const INode&) = delete;
+ void operator=(const INode&) = delete;
+};
+
+// Intrusive doubly-linked list.
+//
+// The node class (MyNode) needs to include "INode foo" field,
+// then the list can be declared as IList<MyNode, &MyNode::foo>.
+// This design allows to link MyNode into multiple lists using
+// different INode fields.
+// The optional Elem template argument allows to specify node MDT
+// (most derived type) if it's different from MyNode.
+template <typename Base, INode Base::*Node, typename Elem = Base>
+class IList {
+ public:
+ IList();
+
+ void PushFront(Elem* e);
+ void PushBack(Elem* e);
+ void Remove(Elem* e);
+
+ Elem* PopFront();
+ Elem* PopBack();
+ Elem* Front();
+ Elem* Back();
+
+ // Prev links point towards front of the queue.
+ Elem* Prev(Elem* e);
+ // Next links point towards back of the queue.
+ Elem* Next(Elem* e);
+
+ uptr Size() const;
+ bool Empty() const;
+ bool Queued(Elem* e) const;
+
+ private:
+ INode node_;
+ uptr size_ = 0;
+
+ void Push(Elem* e, INode* after);
+ static INode* ToNode(Elem* e);
+ static Elem* ToElem(INode* n);
+
+ IList(const IList&) = delete;
+ void operator=(const IList&) = delete;
+};
+
+template <typename Base, INode Base::*Node, typename Elem>
+IList<Base, Node, Elem>::IList() {
+ node_.next_ = node_.prev_ = &node_;
+}
+
+template <typename Base, INode Base::*Node, typename Elem>
+void IList<Base, Node, Elem>::PushFront(Elem* e) {
+ Push(e, &node_);
+}
+
+template <typename Base, INode Base::*Node, typename Elem>
+void IList<Base, Node, Elem>::PushBack(Elem* e) {
+ Push(e, node_.prev_);
+}
+
+template <typename Base, INode Base::*Node, typename Elem>
+void IList<Base, Node, Elem>::Push(Elem* e, INode* after) {
+ INode* n = ToNode(e);
+ DCHECK_EQ(n->next_, nullptr);
+ DCHECK_EQ(n->prev_, nullptr);
+ INode* next = after->next_;
+ n->next_ = next;
+ n->prev_ = after;
+ next->prev_ = n;
+ after->next_ = n;
+ size_++;
+}
+
+template <typename Base, INode Base::*Node, typename Elem>
+void IList<Base, Node, Elem>::Remove(Elem* e) {
+ INode* n = ToNode(e);
+ INode* next = n->next_;
+ INode* prev = n->prev_;
+ DCHECK(next);
+ DCHECK(prev);
+ DCHECK(size_);
+ next->prev_ = prev;
+ prev->next_ = next;
+ n->prev_ = n->next_ = nullptr;
+ size_--;
+}
+
+template <typename Base, INode Base::*Node, typename Elem>
+Elem* IList<Base, Node, Elem>::PopFront() {
+ Elem* e = Front();
+ if (e)
+ Remove(e);
+ return e;
+}
+
+template <typename Base, INode Base::*Node, typename Elem>
+Elem* IList<Base, Node, Elem>::PopBack() {
+ Elem* e = Back();
+ if (e)
+ Remove(e);
+ return e;
+}
+
+template <typename Base, INode Base::*Node, typename Elem>
+Elem* IList<Base, Node, Elem>::Front() {
+ return size_ ? ToElem(node_.next_) : nullptr;
+}
+
+template <typename Base, INode Base::*Node, typename Elem>
+Elem* IList<Base, Node, Elem>::Back() {
+ return size_ ? ToElem(node_.prev_) : nullptr;
+}
+
+template <typename Base, INode Base::*Node, typename Elem>
+Elem* IList<Base, Node, Elem>::Prev(Elem* e) {
+ INode* n = ToNode(e);
+ DCHECK(n->prev_);
+ return n->prev_ != &node_ ? ToElem(n->prev_) : nullptr;
+}
+
+template <typename Base, INode Base::*Node, typename Elem>
+Elem* IList<Base, Node, Elem>::Next(Elem* e) {
+ INode* n = ToNode(e);
+ DCHECK(n->next_);
+ return n->next_ != &node_ ? ToElem(n->next_) : nullptr;
+}
+
+template <typename Base, INode Base::*Node, typename Elem>
+uptr IList<Base, Node, Elem>::Size() const {
+ return size_;
+}
+
+template <typename Base, INode Base::*Node, typename Elem>
+bool IList<Base, Node, Elem>::Empty() const {
+ return size_ == 0;
+}
+
+template <typename Base, INode Base::*Node, typename Elem>
+bool IList<Base, Node, Elem>::Queued(Elem* e) const {
+ INode* n = ToNode(e);
+ DCHECK_EQ(!n->next_, !n->prev_);
+ return n->next_;
+}
+
+template <typename Base, INode Base::*Node, typename Elem>
+INode* IList<Base, Node, Elem>::ToNode(Elem* e) {
+ return &(e->*Node);
+}
+
+template <typename Base, INode Base::*Node, typename Elem>
+Elem* IList<Base, Node, Elem>::ToElem(INode* n) {
+ return static_cast<Elem*>(reinterpret_cast<Base*>(
+ reinterpret_cast<uptr>(n) -
+ reinterpret_cast<uptr>(&(reinterpret_cast<Elem*>(0)->*Node))));
+}
+
+} // namespace __tsan
+
+#endif
diff --git a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_interceptors.h b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_interceptors.h
index c5716f53a323..a357a870fdf8 100644
--- a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_interceptors.h
+++ b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_interceptors.h
@@ -10,44 +10,71 @@ class ScopedInterceptor {
public:
ScopedInterceptor(ThreadState *thr, const char *fname, uptr pc);
~ScopedInterceptor();
- void DisableIgnores();
- void EnableIgnores();
+ void DisableIgnores() {
+ if (UNLIKELY(ignoring_))
+ DisableIgnoresImpl();
+ }
+ void EnableIgnores() {
+ if (UNLIKELY(ignoring_))
+ EnableIgnoresImpl();
+ }
+
private:
ThreadState *const thr_;
- const uptr pc_;
- bool in_ignored_lib_;
- bool ignoring_;
+ bool in_ignored_lib_ = false;
+ bool in_blocking_func_ = false;
+ bool ignoring_ = false;
+
+ void DisableIgnoresImpl();
+ void EnableIgnoresImpl();
+};
+
+struct TsanInterceptorContext {
+ ThreadState *thr;
+ const uptr pc;
};
LibIgnore *libignore();
#if !SANITIZER_GO
inline bool in_symbolizer() {
- cur_thread_init();
- return UNLIKELY(cur_thread()->in_symbolizer);
+ return UNLIKELY(cur_thread_init()->in_symbolizer);
}
#endif
+inline bool MustIgnoreInterceptor(ThreadState *thr) {
+ return !thr->is_inited || thr->ignore_interceptors || thr->in_ignored_lib;
+}
+
} // namespace __tsan
-#define SCOPED_INTERCEPTOR_RAW(func, ...) \
- cur_thread_init(); \
- ThreadState *thr = cur_thread(); \
- const uptr caller_pc = GET_CALLER_PC(); \
- ScopedInterceptor si(thr, #func, caller_pc); \
- const uptr pc = GET_CURRENT_PC(); \
- (void)pc; \
- /**/
-
-#define SCOPED_TSAN_INTERCEPTOR(func, ...) \
- SCOPED_INTERCEPTOR_RAW(func, __VA_ARGS__); \
- if (REAL(func) == 0) { \
+#define SCOPED_INTERCEPTOR_RAW(func, ...) \
+ ThreadState *thr = cur_thread_init(); \
+ ScopedInterceptor si(thr, #func, GET_CALLER_PC()); \
+ UNUSED const uptr pc = GET_CURRENT_PC();
+
+#ifdef __powerpc64__
+// Debugging of crashes on powerpc after commit:
+// c80604f7a3 ("tsan: remove real func check from interceptors")
+// Somehow replacing if with DCHECK leads to strange failures in:
+// SanitizerCommon-tsan-powerpc64le-Linux :: Linux/ptrace.cpp
+// https://lab.llvm.org/buildbot/#/builders/105
+// https://lab.llvm.org/buildbot/#/builders/121
+// https://lab.llvm.org/buildbot/#/builders/57
+# define CHECK_REAL_FUNC(func) \
+ if (REAL(func) == 0) { \
Report("FATAL: ThreadSanitizer: failed to intercept %s\n", #func); \
- Die(); \
- } \
- if (!thr->is_inited || thr->ignore_interceptors || thr->in_ignored_lib) \
- return REAL(func)(__VA_ARGS__); \
-/**/
+ Die(); \
+ }
+#else
+# define CHECK_REAL_FUNC(func) DCHECK(REAL(func))
+#endif
+
+#define SCOPED_TSAN_INTERCEPTOR(func, ...) \
+ SCOPED_INTERCEPTOR_RAW(func, __VA_ARGS__); \
+ CHECK_REAL_FUNC(func); \
+ if (MustIgnoreInterceptor(thr)) \
+ return REAL(func)(__VA_ARGS__);
#define SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_START() \
si.DisableIgnores();
@@ -57,20 +84,49 @@ inline bool in_symbolizer() {
#define TSAN_INTERCEPTOR(ret, func, ...) INTERCEPTOR(ret, func, __VA_ARGS__)
+#if SANITIZER_FREEBSD
+# define TSAN_INTERCEPTOR_FREEBSD_ALIAS(ret, func, ...) \
+ TSAN_INTERCEPTOR(ret, _pthread_##func, __VA_ARGS__) \
+ ALIAS(WRAP(pthread_##func));
+#else
+# define TSAN_INTERCEPTOR_FREEBSD_ALIAS(ret, func, ...)
+#endif
+
#if SANITIZER_NETBSD
# define TSAN_INTERCEPTOR_NETBSD_ALIAS(ret, func, ...) \
TSAN_INTERCEPTOR(ret, __libc_##func, __VA_ARGS__) \
- ALIAS(WRAPPER_NAME(pthread_##func));
+ ALIAS(WRAP(pthread_##func));
# define TSAN_INTERCEPTOR_NETBSD_ALIAS_THR(ret, func, ...) \
TSAN_INTERCEPTOR(ret, __libc_thr_##func, __VA_ARGS__) \
- ALIAS(WRAPPER_NAME(pthread_##func));
+ ALIAS(WRAP(pthread_##func));
# define TSAN_INTERCEPTOR_NETBSD_ALIAS_THR2(ret, func, func2, ...) \
TSAN_INTERCEPTOR(ret, __libc_thr_##func, __VA_ARGS__) \
- ALIAS(WRAPPER_NAME(pthread_##func2));
+ ALIAS(WRAP(pthread_##func2));
#else
# define TSAN_INTERCEPTOR_NETBSD_ALIAS(ret, func, ...)
# define TSAN_INTERCEPTOR_NETBSD_ALIAS_THR(ret, func, ...)
# define TSAN_INTERCEPTOR_NETBSD_ALIAS_THR2(ret, func, func2, ...)
#endif
+#define COMMON_INTERCEPT_FUNCTION(name) INTERCEPT_FUNCTION(name)
+
+#define COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED \
+ (!cur_thread_init()->is_inited)
+
+#define COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ptr, size) \
+ MemoryAccessRange(((TsanInterceptorContext *)ctx)->thr, \
+ ((TsanInterceptorContext *)ctx)->pc, (uptr)ptr, size, \
+ true)
+
+#define COMMON_INTERCEPTOR_READ_RANGE(ctx, ptr, size) \
+ MemoryAccessRange(((TsanInterceptorContext *) ctx)->thr, \
+ ((TsanInterceptorContext *) ctx)->pc, (uptr) ptr, size, \
+ false)
+
+#define COMMON_INTERCEPTOR_ENTER(ctx, func, ...) \
+ SCOPED_TSAN_INTERCEPTOR(func, __VA_ARGS__); \
+ TsanInterceptorContext _ctx = {thr, pc}; \
+ ctx = (void *)&_ctx; \
+ (void)ctx;
+
#endif // TSAN_INTERCEPTORS_H
diff --git a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_interceptors_libdispatch.cpp b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_interceptors_libdispatch.cpp
index cbbb7ecb2397..2104fe7fd059 100644
--- a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_interceptors_libdispatch.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_interceptors_libdispatch.cpp
@@ -19,7 +19,7 @@
#include "BlocksRuntime/Block.h"
#include "tsan_dispatch_defs.h"
-#if SANITIZER_MAC
+#if SANITIZER_APPLE
# include <Availability.h>
#endif
@@ -225,7 +225,7 @@ DISPATCH_INTERCEPT(dispatch_barrier, true)
// dispatch_async_and_wait() and friends were introduced in macOS 10.14.
// Linking of these interceptors fails when using an older SDK.
-#if !SANITIZER_MAC || defined(__MAC_10_14)
+#if !SANITIZER_APPLE || defined(__MAC_10_14)
// macOS 10.14 is greater than our minimal deployment target. To ensure we
// generate a weak reference so the TSan dylib continues to work on older
// systems, we need to forward declare the intercepted functions as "weak
@@ -558,7 +558,7 @@ TSAN_INTERCEPTOR(void, dispatch_apply_f, size_t iterations,
}
DECLARE_REAL_AND_INTERCEPTOR(void, free, void *ptr)
-DECLARE_REAL_AND_INTERCEPTOR(int, munmap, void *addr, long_t sz)
+DECLARE_REAL_AND_INTERCEPTOR(int, munmap, void *addr, SIZE_T sz)
TSAN_INTERCEPTOR(dispatch_data_t, dispatch_data_create, const void *buffer,
size_t size, dispatch_queue_t q, dispatch_block_t destructor) {
diff --git a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_interceptors_mac.cpp b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_interceptors_mac.cpp
index 2d400c7e7098..e4f9e2915ced 100644
--- a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_interceptors_mac.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_interceptors_mac.cpp
@@ -12,12 +12,13 @@
//===----------------------------------------------------------------------===//
#include "sanitizer_common/sanitizer_platform.h"
-#if SANITIZER_MAC
+#if SANITIZER_APPLE
#include "interception/interception.h"
#include "tsan_interceptors.h"
#include "tsan_interface.h"
#include "tsan_interface_ann.h"
+#include "tsan_spinlock_defs_mac.h"
#include "sanitizer_common/sanitizer_addrhashmap.h"
#include <errno.h>
@@ -365,7 +366,7 @@ static uptr GetOrCreateSyncAddress(uptr addr, ThreadState *thr, uptr pc) {
if (h.created()) {
ThreadIgnoreBegin(thr, pc);
*h = (uptr) user_alloc(thr, pc, /*size=*/1);
- ThreadIgnoreEnd(thr, pc);
+ ThreadIgnoreEnd(thr);
}
return *h;
}
@@ -405,8 +406,8 @@ TSAN_INTERCEPTOR(int, swapcontext, ucontext_t *oucp, const ucontext_t *ucp) {
{
SCOPED_INTERCEPTOR_RAW(swapcontext, oucp, ucp);
}
- // Bacause of swapcontext() semantics we have no option but to copy its
- // impementation here
+ // Because of swapcontext() semantics we have no option but to copy its
+ // implementation here
if (!oucp || !ucp) {
errno = EINVAL;
return -1;
@@ -518,4 +519,4 @@ STDCXX_INTERCEPTOR(void, _ZNSt3__111__call_onceERVmPvPFvS2_E, void *flag,
} // namespace __tsan
-#endif // SANITIZER_MAC
+#endif // SANITIZER_APPLE
diff --git a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_interceptors_memintrinsics.cpp b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_interceptors_memintrinsics.cpp
new file mode 100644
index 000000000000..c8b6b2ef1948
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_interceptors_memintrinsics.cpp
@@ -0,0 +1,43 @@
+//===-- tsan_interceptors_posix.cpp ---------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+
+#define SANITIZER_COMMON_NO_REDEFINE_BUILTINS
+
+#include "tsan_interceptors.h"
+#include "tsan_interface.h"
+
+using namespace __tsan;
+
+#include "sanitizer_common/sanitizer_common_interceptors_memintrinsics.inc"
+
+extern "C" {
+
+void *__tsan_memcpy(void *dst, const void *src, uptr size) {
+ void *ctx;
+#if PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE
+ COMMON_INTERCEPTOR_MEMCPY_IMPL(ctx, dst, src, size);
+#else
+ COMMON_INTERCEPTOR_MEMMOVE_IMPL(ctx, dst, src, size);
+#endif
+}
+
+void *__tsan_memset(void *dst, int c, uptr size) {
+ void *ctx;
+ COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, dst, c, size);
+}
+
+void *__tsan_memmove(void *dst, const void *src, uptr size) {
+ void *ctx;
+ COMMON_INTERCEPTOR_MEMMOVE_IMPL(ctx, dst, src, size);
+}
+
+} // extern "C"
diff --git a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp
index dd2442842795..d0282c270431 100644
--- a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp
@@ -14,6 +14,7 @@
#include "sanitizer_common/sanitizer_atomic.h"
#include "sanitizer_common/sanitizer_errno.h"
+#include "sanitizer_common/sanitizer_glibc_version.h"
#include "sanitizer_common/sanitizer_libc.h"
#include "sanitizer_common/sanitizer_linux.h"
#include "sanitizer_common/sanitizer_platform_limits_netbsd.h"
@@ -35,7 +36,10 @@
using namespace __tsan;
-#if SANITIZER_FREEBSD || SANITIZER_MAC
+DECLARE_REAL(void *, memcpy, void *to, const void *from, SIZE_T size)
+DECLARE_REAL(void *, memset, void *block, int c, SIZE_T size)
+
+#if SANITIZER_FREEBSD || SANITIZER_APPLE
#define stdout __stdoutp
#define stderr __stderrp
#endif
@@ -76,6 +80,10 @@ struct ucontext_t {
#define PTHREAD_ABI_BASE "GLIBC_2.3.2"
#elif defined(__aarch64__) || SANITIZER_PPC64V2
#define PTHREAD_ABI_BASE "GLIBC_2.17"
+#elif SANITIZER_LOONGARCH64
+#define PTHREAD_ABI_BASE "GLIBC_2.36"
+#elif SANITIZER_RISCV64
+# define PTHREAD_ABI_BASE "GLIBC_2.27"
#endif
extern "C" int pthread_attr_init(void *attr);
@@ -90,28 +98,26 @@ DECLARE_REAL(int, pthread_mutexattr_gettype, void *, void *)
DECLARE_REAL(int, fflush, __sanitizer_FILE *fp)
DECLARE_REAL_AND_INTERCEPTOR(void *, malloc, uptr size)
DECLARE_REAL_AND_INTERCEPTOR(void, free, void *ptr)
+extern "C" int pthread_equal(void *t1, void *t2);
extern "C" void *pthread_self();
extern "C" void _exit(int status);
#if !SANITIZER_NETBSD
extern "C" int fileno_unlocked(void *stream);
extern "C" int dirfd(void *dirp);
#endif
-#if SANITIZER_GLIBC
-extern "C" int mallopt(int param, int value);
-#endif
#if SANITIZER_NETBSD
extern __sanitizer_FILE __sF[];
#else
extern __sanitizer_FILE *stdout, *stderr;
#endif
-#if !SANITIZER_FREEBSD && !SANITIZER_MAC && !SANITIZER_NETBSD
+#if !SANITIZER_FREEBSD && !SANITIZER_APPLE && !SANITIZER_NETBSD
const int PTHREAD_MUTEX_RECURSIVE = 1;
const int PTHREAD_MUTEX_RECURSIVE_NP = 1;
#else
const int PTHREAD_MUTEX_RECURSIVE = 2;
const int PTHREAD_MUTEX_RECURSIVE_NP = 2;
#endif
-#if !SANITIZER_FREEBSD && !SANITIZER_MAC && !SANITIZER_NETBSD
+#if !SANITIZER_FREEBSD && !SANITIZER_APPLE && !SANITIZER_NETBSD
const int EPOLL_CTL_ADD = 1;
#endif
const int SIGILL = 4;
@@ -121,17 +127,20 @@ const int SIGFPE = 8;
const int SIGSEGV = 11;
const int SIGPIPE = 13;
const int SIGTERM = 15;
-#if defined(__mips__) || SANITIZER_FREEBSD || SANITIZER_MAC || SANITIZER_NETBSD
+#if defined(__mips__) || SANITIZER_FREEBSD || SANITIZER_APPLE || SANITIZER_NETBSD
const int SIGBUS = 10;
const int SIGSYS = 12;
#else
const int SIGBUS = 7;
const int SIGSYS = 31;
#endif
+#if SANITIZER_HAS_SIGINFO
+const int SI_TIMER = -2;
+#endif
void *const MAP_FAILED = (void*)-1;
#if SANITIZER_NETBSD
const int PTHREAD_BARRIER_SERIAL_THREAD = 1234567;
-#elif !SANITIZER_MAC
+#elif !SANITIZER_APPLE
const int PTHREAD_BARRIER_SERIAL_THREAD = -1;
#endif
const int MAP_FIXED = 0x10;
@@ -144,7 +153,7 @@ typedef __sanitizer::u16 mode_t;
# define F_TLOCK 2 /* Test and lock a region for exclusive use. */
# define F_TEST 3 /* Test a region for other processes locks. */
-#if SANITIZER_FREEBSD || SANITIZER_MAC || SANITIZER_NETBSD
+#if SANITIZER_FREEBSD || SANITIZER_APPLE || SANITIZER_NETBSD
const int SA_SIGINFO = 0x40;
const int SIG_SETMASK = 3;
#elif defined(__mips__)
@@ -155,32 +164,41 @@ const int SA_SIGINFO = 4;
const int SIG_SETMASK = 2;
#endif
-#define COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED \
- (cur_thread_init(), !cur_thread()->is_inited)
-
namespace __tsan {
struct SignalDesc {
bool armed;
- bool sigaction;
__sanitizer_siginfo siginfo;
ucontext_t ctx;
};
struct ThreadSignalContext {
int int_signal_send;
- atomic_uintptr_t in_blocking_func;
- atomic_uintptr_t have_pending_signals;
SignalDesc pending_signals[kSigCount];
// emptyset and oldset are too big for stack.
__sanitizer_sigset_t emptyset;
__sanitizer_sigset_t oldset;
};
+void EnterBlockingFunc(ThreadState *thr) {
+ for (;;) {
+ // The order is important to not delay a signal infinitely if it's
+ // delivered right before we set in_blocking_func. Note: we can't call
+ // ProcessPendingSignals when in_blocking_func is set, or we can handle
+ // a signal synchronously when we are already handling a signal.
+ atomic_store(&thr->in_blocking_func, 1, memory_order_relaxed);
+ if (atomic_load(&thr->pending_signals, memory_order_relaxed) == 0)
+ break;
+ atomic_store(&thr->in_blocking_func, 0, memory_order_relaxed);
+ ProcessPendingSignals(thr);
+ }
+}
+
// The sole reason tsan wraps atexit callbacks is to establish synchronization
// between callback setup and callback execution.
struct AtExitCtx {
void (*f)();
void *arg;
+ uptr pc;
};
// InterceptorContext holds all global data required for interceptors.
@@ -192,7 +210,7 @@ struct InterceptorContext {
// in a single cache line if possible (it's accessed in every interceptor).
ALIGNED(64) LibIgnore libignore;
__sanitizer_sigaction sigactions[kSigCount];
-#if !SANITIZER_MAC && !SANITIZER_NETBSD
+#if !SANITIZER_APPLE && !SANITIZER_NETBSD
unsigned finalize_key;
#endif
@@ -237,19 +255,37 @@ SANITIZER_WEAK_CXX_DEFAULT_IMPL void OnPotentiallyBlockingRegionEnd() {}
} // namespace __tsan
static ThreadSignalContext *SigCtx(ThreadState *thr) {
- ThreadSignalContext *ctx = (ThreadSignalContext*)thr->signal_ctx;
+ // This function may be called reentrantly if it is interrupted by a signal
+ // handler. Use CAS to handle the race.
+ uptr ctx = atomic_load(&thr->signal_ctx, memory_order_relaxed);
if (ctx == 0 && !thr->is_dead) {
- ctx = (ThreadSignalContext*)MmapOrDie(sizeof(*ctx), "ThreadSignalContext");
- MemoryResetRange(thr, (uptr)&SigCtx, (uptr)ctx, sizeof(*ctx));
- thr->signal_ctx = ctx;
+ uptr pctx =
+ (uptr)MmapOrDie(sizeof(ThreadSignalContext), "ThreadSignalContext");
+ MemoryResetRange(thr, (uptr)&SigCtx, pctx, sizeof(ThreadSignalContext));
+ if (atomic_compare_exchange_strong(&thr->signal_ctx, &ctx, pctx,
+ memory_order_relaxed)) {
+ ctx = pctx;
+ } else {
+ UnmapOrDie((ThreadSignalContext *)pctx, sizeof(ThreadSignalContext));
+ }
}
- return ctx;
+ return (ThreadSignalContext *)ctx;
}
ScopedInterceptor::ScopedInterceptor(ThreadState *thr, const char *fname,
uptr pc)
- : thr_(thr), pc_(pc), in_ignored_lib_(false), ignoring_(false) {
- Initialize(thr);
+ : thr_(thr) {
+ LazyInitialize(thr);
+ if (UNLIKELY(atomic_load(&thr->in_blocking_func, memory_order_relaxed))) {
+ // pthread_join is marked as blocking, but it's also known to call other
+ // intercepted functions (mmap, free). If we don't reset in_blocking_func
+ // we can get deadlocks and memory corruptions if we deliver a synchronous
+ // signal inside of an mmap/free interceptor.
+ // So reset it and restore it back in the destructor.
+ // See https://github.com/google/sanitizers/issues/1540
+ atomic_store(&thr->in_blocking_func, 0, memory_order_relaxed);
+ in_blocking_func_ = true;
+ }
if (!thr_->is_inited) return;
if (!thr_->ignore_interceptors) FuncEntry(thr, pc);
DPrintf("#%d: intercept %s()\n", thr_->tid, fname);
@@ -262,6 +298,8 @@ ScopedInterceptor::ScopedInterceptor(ThreadState *thr, const char *fname,
ScopedInterceptor::~ScopedInterceptor() {
if (!thr_->is_inited) return;
DisableIgnores();
+ if (UNLIKELY(in_blocking_func_))
+ EnterBlockingFunc(thr_);
if (!thr_->ignore_interceptors) {
ProcessPendingSignals(thr_);
FuncExit(thr_);
@@ -269,43 +307,48 @@ ScopedInterceptor::~ScopedInterceptor() {
}
}
-void ScopedInterceptor::EnableIgnores() {
- if (ignoring_) {
- ThreadIgnoreBegin(thr_, pc_, /*save_stack=*/false);
- if (flags()->ignore_noninstrumented_modules) thr_->suppress_reports++;
- if (in_ignored_lib_) {
- DCHECK(!thr_->in_ignored_lib);
- thr_->in_ignored_lib = true;
- }
+NOINLINE
+void ScopedInterceptor::EnableIgnoresImpl() {
+ ThreadIgnoreBegin(thr_, 0);
+ if (flags()->ignore_noninstrumented_modules)
+ thr_->suppress_reports++;
+ if (in_ignored_lib_) {
+ DCHECK(!thr_->in_ignored_lib);
+ thr_->in_ignored_lib = true;
}
}
-void ScopedInterceptor::DisableIgnores() {
- if (ignoring_) {
- ThreadIgnoreEnd(thr_, pc_);
- if (flags()->ignore_noninstrumented_modules) thr_->suppress_reports--;
- if (in_ignored_lib_) {
- DCHECK(thr_->in_ignored_lib);
- thr_->in_ignored_lib = false;
- }
+NOINLINE
+void ScopedInterceptor::DisableIgnoresImpl() {
+ ThreadIgnoreEnd(thr_);
+ if (flags()->ignore_noninstrumented_modules)
+ thr_->suppress_reports--;
+ if (in_ignored_lib_) {
+ DCHECK(thr_->in_ignored_lib);
+ thr_->in_ignored_lib = false;
}
}
#define TSAN_INTERCEPT(func) INTERCEPT_FUNCTION(func)
+#if SANITIZER_FREEBSD || SANITIZER_NETBSD
+# define TSAN_INTERCEPT_VER(func, ver) INTERCEPT_FUNCTION(func)
+#else
+# define TSAN_INTERCEPT_VER(func, ver) INTERCEPT_FUNCTION_VER(func, ver)
+#endif
#if SANITIZER_FREEBSD
-# define TSAN_INTERCEPT_VER(func, ver) INTERCEPT_FUNCTION(func)
-# define TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(func)
-# define TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS_THR(func)
-#elif SANITIZER_NETBSD
-# define TSAN_INTERCEPT_VER(func, ver) INTERCEPT_FUNCTION(func)
-# define TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(func) \
- INTERCEPT_FUNCTION(__libc_##func)
-# define TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS_THR(func) \
- INTERCEPT_FUNCTION(__libc_thr_##func)
+# define TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(func) \
+ INTERCEPT_FUNCTION(_pthread_##func)
+#else
+# define TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(func)
+#endif
+#if SANITIZER_NETBSD
+# define TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(func) \
+ INTERCEPT_FUNCTION(__libc_##func)
+# define TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS_THR(func) \
+ INTERCEPT_FUNCTION(__libc_thr_##func)
#else
-# define TSAN_INTERCEPT_VER(func, ver) INTERCEPT_FUNCTION_VER(func, ver)
-# define TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(func)
-# define TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS_THR(func)
+# define TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(func)
+# define TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS_THR(func)
#endif
#define READ_STRING_OF_LEN(thr, pc, s, len, n) \
@@ -319,15 +362,8 @@ void ScopedInterceptor::DisableIgnores() {
struct BlockingCall {
explicit BlockingCall(ThreadState *thr)
- : thr(thr)
- , ctx(SigCtx(thr)) {
- for (;;) {
- atomic_store(&ctx->in_blocking_func, 1, memory_order_relaxed);
- if (atomic_load(&ctx->have_pending_signals, memory_order_relaxed) == 0)
- break;
- atomic_store(&ctx->in_blocking_func, 0, memory_order_relaxed);
- ProcessPendingSignals(thr);
- }
+ : thr(thr) {
+ EnterBlockingFunc(thr);
// When we are in a "blocking call", we process signals asynchronously
// (right when they arrive). In this context we do not expect to be
// executing any user/runtime code. The known interceptor sequence when
@@ -338,11 +374,10 @@ struct BlockingCall {
~BlockingCall() {
thr->ignore_interceptors--;
- atomic_store(&ctx->in_blocking_func, 0, memory_order_relaxed);
+ atomic_store(&thr->in_blocking_func, 0, memory_order_relaxed);
}
ThreadState *thr;
- ThreadSignalContext *ctx;
};
TSAN_INTERCEPTOR(unsigned, sleep, unsigned sec) {
@@ -371,7 +406,10 @@ TSAN_INTERCEPTOR(int, pause, int fake) {
return BLOCK_REAL(pause)(fake);
}
-static void at_exit_wrapper() {
+// Note: we specifically call the function in such strange way
+// with "installed_at" because in reports it will appear between
+// callback frames and the frame that installed the callback.
+static void at_exit_callback_installed_at() {
AtExitCtx *ctx;
{
// Ensure thread-safety.
@@ -383,16 +421,22 @@ static void at_exit_wrapper() {
interceptor_ctx()->AtExitStack.PopBack();
}
- Acquire(cur_thread(), (uptr)0, (uptr)ctx);
+ ThreadState *thr = cur_thread();
+ Acquire(thr, ctx->pc, (uptr)ctx);
+ FuncEntry(thr, ctx->pc);
((void(*)())ctx->f)();
- InternalFree(ctx);
+ FuncExit(thr);
+ Free(ctx);
}
-static void cxa_at_exit_wrapper(void *arg) {
- Acquire(cur_thread(), 0, (uptr)arg);
+static void cxa_at_exit_callback_installed_at(void *arg) {
+ ThreadState *thr = cur_thread();
AtExitCtx *ctx = (AtExitCtx*)arg;
+ Acquire(thr, ctx->pc, (uptr)arg);
+ FuncEntry(thr, ctx->pc);
((void(*)(void *arg))ctx->f)(ctx->arg);
- InternalFree(ctx);
+ FuncExit(thr);
+ Free(ctx);
}
static int setup_at_exit_wrapper(ThreadState *thr, uptr pc, void(*f)(),
@@ -405,7 +449,7 @@ TSAN_INTERCEPTOR(int, atexit, void (*f)()) {
// We want to setup the atexit callback even if we are in ignored lib
// or after fork.
SCOPED_INTERCEPTOR_RAW(atexit, f);
- return setup_at_exit_wrapper(thr, pc, (void(*)())f, 0, 0);
+ return setup_at_exit_wrapper(thr, GET_CALLER_PC(), (void (*)())f, 0, 0);
}
#endif
@@ -413,14 +457,15 @@ TSAN_INTERCEPTOR(int, __cxa_atexit, void (*f)(void *a), void *arg, void *dso) {
if (in_symbolizer())
return 0;
SCOPED_TSAN_INTERCEPTOR(__cxa_atexit, f, arg, dso);
- return setup_at_exit_wrapper(thr, pc, (void(*)())f, arg, dso);
+ return setup_at_exit_wrapper(thr, GET_CALLER_PC(), (void (*)())f, arg, dso);
}
static int setup_at_exit_wrapper(ThreadState *thr, uptr pc, void(*f)(),
void *arg, void *dso) {
- AtExitCtx *ctx = (AtExitCtx*)InternalAlloc(sizeof(AtExitCtx));
+ auto *ctx = New<AtExitCtx>();
ctx->f = f;
ctx->arg = arg;
+ ctx->pc = pc;
Release(thr, pc, (uptr)ctx);
// Memory allocation in __cxa_atexit will race with free during exit,
// because we do not see synchronization around atexit callback list.
@@ -436,41 +481,44 @@ static int setup_at_exit_wrapper(ThreadState *thr, uptr pc, void(*f)(),
// due to atexit_mu held on exit from the calloc interceptor.
ScopedIgnoreInterceptors ignore;
- res = REAL(__cxa_atexit)((void (*)(void *a))at_exit_wrapper, 0, 0);
+ res = REAL(__cxa_atexit)((void (*)(void *a))at_exit_callback_installed_at,
+ 0, 0);
// Push AtExitCtx on the top of the stack of callback functions
if (!res) {
interceptor_ctx()->AtExitStack.PushBack(ctx);
}
} else {
- res = REAL(__cxa_atexit)(cxa_at_exit_wrapper, ctx, dso);
+ res = REAL(__cxa_atexit)(cxa_at_exit_callback_installed_at, ctx, dso);
}
- ThreadIgnoreEnd(thr, pc);
+ ThreadIgnoreEnd(thr);
return res;
}
-#if !SANITIZER_MAC && !SANITIZER_NETBSD
-static void on_exit_wrapper(int status, void *arg) {
+#if !SANITIZER_APPLE && !SANITIZER_NETBSD
+static void on_exit_callback_installed_at(int status, void *arg) {
ThreadState *thr = cur_thread();
- uptr pc = 0;
- Acquire(thr, pc, (uptr)arg);
AtExitCtx *ctx = (AtExitCtx*)arg;
+ Acquire(thr, ctx->pc, (uptr)arg);
+ FuncEntry(thr, ctx->pc);
((void(*)(int status, void *arg))ctx->f)(status, ctx->arg);
- InternalFree(ctx);
+ FuncExit(thr);
+ Free(ctx);
}
TSAN_INTERCEPTOR(int, on_exit, void(*f)(int, void*), void *arg) {
if (in_symbolizer())
return 0;
SCOPED_TSAN_INTERCEPTOR(on_exit, f, arg);
- AtExitCtx *ctx = (AtExitCtx*)InternalAlloc(sizeof(AtExitCtx));
+ auto *ctx = New<AtExitCtx>();
ctx->f = (void(*)())f;
ctx->arg = arg;
+ ctx->pc = GET_CALLER_PC();
Release(thr, pc, (uptr)ctx);
// Memory allocation in __cxa_atexit will race with free during exit,
// because we do not see synchronization around atexit callback list.
ThreadIgnoreBegin(thr, pc);
- int res = REAL(on_exit)(on_exit_wrapper, ctx);
- ThreadIgnoreEnd(thr, pc);
+ int res = REAL(on_exit)(on_exit_callback_installed_at, ctx);
+ ThreadIgnoreEnd(thr);
return res;
}
#define TSAN_MAYBE_INTERCEPT_ON_EXIT TSAN_INTERCEPT(on_exit)
@@ -502,9 +550,7 @@ static void SetJmp(ThreadState *thr, uptr sp) {
buf->shadow_stack_pos = thr->shadow_stack_pos;
ThreadSignalContext *sctx = SigCtx(thr);
buf->int_signal_send = sctx ? sctx->int_signal_send : 0;
- buf->in_blocking_func = sctx ?
- atomic_load(&sctx->in_blocking_func, memory_order_relaxed) :
- false;
+ buf->in_blocking_func = atomic_load(&thr->in_blocking_func, memory_order_relaxed);
buf->in_signal_handler = atomic_load(&thr->in_signal_handler,
memory_order_relaxed);
}
@@ -520,11 +566,10 @@ static void LongJmp(ThreadState *thr, uptr *env) {
while (thr->shadow_stack_pos > buf->shadow_stack_pos)
FuncExit(thr);
ThreadSignalContext *sctx = SigCtx(thr);
- if (sctx) {
+ if (sctx)
sctx->int_signal_send = buf->int_signal_send;
- atomic_store(&sctx->in_blocking_func, buf->in_blocking_func,
- memory_order_relaxed);
- }
+ atomic_store(&thr->in_blocking_func, buf->in_blocking_func,
+ memory_order_relaxed);
atomic_store(&thr->in_signal_handler, buf->in_signal_handler,
memory_order_relaxed);
JmpBufGarbageCollect(thr, buf->sp - 1); // do not collect buf->sp
@@ -536,16 +581,13 @@ static void LongJmp(ThreadState *thr, uptr *env) {
}
// FIXME: put everything below into a common extern "C" block?
-extern "C" void __tsan_setjmp(uptr sp) {
- cur_thread_init();
- SetJmp(cur_thread(), sp);
-}
+extern "C" void __tsan_setjmp(uptr sp) { SetJmp(cur_thread_init(), sp); }
-#if SANITIZER_MAC
+#if SANITIZER_APPLE
TSAN_INTERCEPTOR(int, setjmp, void *env);
TSAN_INTERCEPTOR(int, _setjmp, void *env);
TSAN_INTERCEPTOR(int, sigsetjmp, void *env);
-#else // SANITIZER_MAC
+#else // SANITIZER_APPLE
#if SANITIZER_NETBSD
#define setjmp_symname __setjmp14
@@ -555,59 +597,28 @@ TSAN_INTERCEPTOR(int, sigsetjmp, void *env);
#define sigsetjmp_symname sigsetjmp
#endif
-#define TSAN_INTERCEPTOR_SETJMP_(x) __interceptor_ ## x
-#define TSAN_INTERCEPTOR_SETJMP__(x) TSAN_INTERCEPTOR_SETJMP_(x)
-#define TSAN_INTERCEPTOR_SETJMP TSAN_INTERCEPTOR_SETJMP__(setjmp_symname)
-#define TSAN_INTERCEPTOR_SIGSETJMP TSAN_INTERCEPTOR_SETJMP__(sigsetjmp_symname)
-
-#define TSAN_STRING_SETJMP SANITIZER_STRINGIFY(setjmp_symname)
-#define TSAN_STRING_SIGSETJMP SANITIZER_STRINGIFY(sigsetjmp_symname)
-
-// Not called. Merely to satisfy TSAN_INTERCEPT().
-extern "C" SANITIZER_INTERFACE_ATTRIBUTE
-int TSAN_INTERCEPTOR_SETJMP(void *env);
-extern "C" int TSAN_INTERCEPTOR_SETJMP(void *env) {
- CHECK(0);
- return 0;
-}
-
-// FIXME: any reason to have a separate declaration?
-extern "C" SANITIZER_INTERFACE_ATTRIBUTE
-int __interceptor__setjmp(void *env);
-extern "C" int __interceptor__setjmp(void *env) {
- CHECK(0);
- return 0;
-}
-
-extern "C" SANITIZER_INTERFACE_ATTRIBUTE
-int TSAN_INTERCEPTOR_SIGSETJMP(void *env);
-extern "C" int TSAN_INTERCEPTOR_SIGSETJMP(void *env) {
- CHECK(0);
- return 0;
-}
-
-#if !SANITIZER_NETBSD
-extern "C" SANITIZER_INTERFACE_ATTRIBUTE
-int __interceptor___sigsetjmp(void *env);
-extern "C" int __interceptor___sigsetjmp(void *env) {
- CHECK(0);
- return 0;
-}
-#endif
-
-extern "C" int setjmp_symname(void *env);
-extern "C" int _setjmp(void *env);
-extern "C" int sigsetjmp_symname(void *env);
-#if !SANITIZER_NETBSD
-extern "C" int __sigsetjmp(void *env);
-#endif
DEFINE_REAL(int, setjmp_symname, void *env)
DEFINE_REAL(int, _setjmp, void *env)
DEFINE_REAL(int, sigsetjmp_symname, void *env)
#if !SANITIZER_NETBSD
DEFINE_REAL(int, __sigsetjmp, void *env)
#endif
-#endif // SANITIZER_MAC
+
+// The real interceptor for setjmp is special, and implemented in pure asm. We
+// just need to initialize the REAL functions so that they can be used in asm.
+static void InitializeSetjmpInterceptors() {
+ // We can not use TSAN_INTERCEPT to get setjmp addr, because it does &setjmp and
+ // setjmp is not present in some versions of libc.
+ using __interception::InterceptFunction;
+ InterceptFunction(SANITIZER_STRINGIFY(setjmp_symname), (uptr*)&REAL(setjmp_symname), 0, 0);
+ InterceptFunction("_setjmp", (uptr*)&REAL(_setjmp), 0, 0);
+ InterceptFunction(SANITIZER_STRINGIFY(sigsetjmp_symname), (uptr*)&REAL(sigsetjmp_symname), 0,
+ 0);
+#if !SANITIZER_NETBSD
+ InterceptFunction("__sigsetjmp", (uptr*)&REAL(__sigsetjmp), 0, 0);
+#endif
+}
+#endif // SANITIZER_APPLE
#if SANITIZER_NETBSD
#define longjmp_symname __longjmp14
@@ -646,7 +657,7 @@ TSAN_INTERCEPTOR(void, _longjmp, uptr *env, int val) {
}
#endif
-#if !SANITIZER_MAC
+#if !SANITIZER_APPLE
TSAN_INTERCEPTOR(void*, malloc, uptr size) {
if (in_symbolizer())
return InternalAlloc(size);
@@ -787,10 +798,11 @@ static void *mmap_interceptor(ThreadState *thr, uptr pc, Mmap real_mmap,
return res;
}
-TSAN_INTERCEPTOR(int, munmap, void *addr, long_t sz) {
- SCOPED_TSAN_INTERCEPTOR(munmap, addr, sz);
+template <class Munmap>
+static int munmap_interceptor(ThreadState *thr, uptr pc, Munmap real_munmap,
+ void *addr, SIZE_T sz) {
UnmapShadow(thr, (uptr)addr, sz);
- int res = REAL(munmap)(addr, sz);
+ int res = real_munmap(addr, sz);
return res;
}
@@ -804,7 +816,7 @@ TSAN_INTERCEPTOR(void*, memalign, uptr align, uptr sz) {
#define TSAN_MAYBE_INTERCEPT_MEMALIGN
#endif
-#if !SANITIZER_MAC
+#if !SANITIZER_APPLE
TSAN_INTERCEPTOR(void*, aligned_alloc, uptr align, uptr sz) {
if (in_symbolizer())
return InternalAlloc(sz, nullptr, align);
@@ -835,7 +847,7 @@ TSAN_INTERCEPTOR(void*, pvalloc, uptr sz) {
#define TSAN_MAYBE_INTERCEPT_PVALLOC
#endif
-#if !SANITIZER_MAC
+#if !SANITIZER_APPLE
TSAN_INTERCEPTOR(int, posix_memalign, void **memptr, uptr align, uptr sz) {
if (in_symbolizer()) {
void *p = InternalAlloc(sz, nullptr, align);
@@ -849,6 +861,54 @@ TSAN_INTERCEPTOR(int, posix_memalign, void **memptr, uptr align, uptr sz) {
}
#endif
+// Both __cxa_guard_acquire and pthread_once 0-initialize
+// the object initially. pthread_once does not have any
+// other ABI requirements. __cxa_guard_acquire assumes
+// that any non-0 value in the first byte means that
+// initialization is completed. Contents of the remaining
+// bytes are up to us.
+constexpr u32 kGuardInit = 0;
+constexpr u32 kGuardDone = 1;
+constexpr u32 kGuardRunning = 1 << 16;
+constexpr u32 kGuardWaiter = 1 << 17;
+
+static int guard_acquire(ThreadState *thr, uptr pc, atomic_uint32_t *g,
+ bool blocking_hooks = true) {
+ if (blocking_hooks)
+ OnPotentiallyBlockingRegionBegin();
+ auto on_exit = at_scope_exit([blocking_hooks] {
+ if (blocking_hooks)
+ OnPotentiallyBlockingRegionEnd();
+ });
+
+ for (;;) {
+ u32 cmp = atomic_load(g, memory_order_acquire);
+ if (cmp == kGuardInit) {
+ if (atomic_compare_exchange_strong(g, &cmp, kGuardRunning,
+ memory_order_relaxed))
+ return 1;
+ } else if (cmp == kGuardDone) {
+ if (!thr->in_ignored_lib)
+ Acquire(thr, pc, (uptr)g);
+ return 0;
+ } else {
+ if ((cmp & kGuardWaiter) ||
+ atomic_compare_exchange_strong(g, &cmp, cmp | kGuardWaiter,
+ memory_order_relaxed))
+ FutexWait(g, cmp | kGuardWaiter);
+ }
+ }
+}
+
+static void guard_release(ThreadState *thr, uptr pc, atomic_uint32_t *g,
+ u32 v) {
+ if (!thr->in_ignored_lib)
+ Release(thr, pc, (uptr)g);
+ u32 old = atomic_exchange(g, v, memory_order_release);
+ if (old & kGuardWaiter)
+ FutexWake(g, 1 << 30);
+}
+
// __cxa_guard_acquire and friends need to be intercepted in a special way -
// regular interceptors will break statically-linked libstdc++. Linux
// interceptors are especially defined as weak functions (so that they don't
@@ -859,7 +919,7 @@ TSAN_INTERCEPTOR(int, posix_memalign, void **memptr, uptr align, uptr sz) {
// these interceptors with INTERFACE_ATTRIBUTE.
// On OS X, we don't support statically linking, so we just use a regular
// interceptor.
-#if SANITIZER_MAC
+#if SANITIZER_APPLE
#define STDCXX_INTERCEPTOR TSAN_INTERCEPTOR
#else
#define STDCXX_INTERCEPTOR(rettype, name, ...) \
@@ -869,31 +929,17 @@ TSAN_INTERCEPTOR(int, posix_memalign, void **memptr, uptr align, uptr sz) {
// Used in thread-safe function static initialization.
STDCXX_INTERCEPTOR(int, __cxa_guard_acquire, atomic_uint32_t *g) {
SCOPED_INTERCEPTOR_RAW(__cxa_guard_acquire, g);
- OnPotentiallyBlockingRegionBegin();
- auto on_exit = at_scope_exit(&OnPotentiallyBlockingRegionEnd);
- for (;;) {
- u32 cmp = atomic_load(g, memory_order_acquire);
- if (cmp == 0) {
- if (atomic_compare_exchange_strong(g, &cmp, 1<<16, memory_order_relaxed))
- return 1;
- } else if (cmp == 1) {
- Acquire(thr, pc, (uptr)g);
- return 0;
- } else {
- internal_sched_yield();
- }
- }
+ return guard_acquire(thr, pc, g);
}
STDCXX_INTERCEPTOR(void, __cxa_guard_release, atomic_uint32_t *g) {
SCOPED_INTERCEPTOR_RAW(__cxa_guard_release, g);
- Release(thr, pc, (uptr)g);
- atomic_store(g, 1, memory_order_release);
+ guard_release(thr, pc, g, kGuardDone);
}
STDCXX_INTERCEPTOR(void, __cxa_guard_abort, atomic_uint32_t *g) {
SCOPED_INTERCEPTOR_RAW(__cxa_guard_abort, g);
- atomic_store(g, 0, memory_order_relaxed);
+ guard_release(thr, pc, g, kGuardInit);
}
namespace __tsan {
@@ -908,15 +954,16 @@ void DestroyThreadState() {
}
void PlatformCleanUpThreadState(ThreadState *thr) {
- ThreadSignalContext *sctx = thr->signal_ctx;
+ ThreadSignalContext *sctx = (ThreadSignalContext *)atomic_load(
+ &thr->signal_ctx, memory_order_relaxed);
if (sctx) {
- thr->signal_ctx = 0;
+ atomic_store(&thr->signal_ctx, 0, memory_order_relaxed);
UnmapOrDie(sctx, sizeof(*sctx));
}
}
} // namespace __tsan
-#if !SANITIZER_MAC && !SANITIZER_NETBSD && !SANITIZER_FREEBSD
+#if !SANITIZER_APPLE && !SANITIZER_NETBSD && !SANITIZER_FREEBSD
static void thread_finalize(void *v) {
uptr iter = (uptr)v;
if (iter > 1) {
@@ -935,34 +982,33 @@ static void thread_finalize(void *v) {
struct ThreadParam {
void* (*callback)(void *arg);
void *param;
- atomic_uintptr_t tid;
+ Tid tid;
+ Semaphore created;
+ Semaphore started;
};
extern "C" void *__tsan_thread_start_func(void *arg) {
ThreadParam *p = (ThreadParam*)arg;
void* (*callback)(void *arg) = p->callback;
void *param = p->param;
- int tid = 0;
{
- cur_thread_init();
- ThreadState *thr = cur_thread();
+ ThreadState *thr = cur_thread_init();
// Thread-local state is not initialized yet.
ScopedIgnoreInterceptors ignore;
-#if !SANITIZER_MAC && !SANITIZER_NETBSD && !SANITIZER_FREEBSD
+#if !SANITIZER_APPLE && !SANITIZER_NETBSD && !SANITIZER_FREEBSD
ThreadIgnoreBegin(thr, 0);
if (pthread_setspecific(interceptor_ctx()->finalize_key,
(void *)GetPthreadDestructorIterations())) {
Printf("ThreadSanitizer: failed to set thread key\n");
Die();
}
- ThreadIgnoreEnd(thr, 0);
+ ThreadIgnoreEnd(thr);
#endif
- while ((tid = atomic_load(&p->tid, memory_order_acquire)) == 0)
- internal_sched_yield();
+ p->created.Wait();
Processor *proc = ProcCreate();
ProcWire(proc, thr);
- ThreadStart(thr, tid, GetTid(), ThreadType::Regular);
- atomic_store(&p->tid, 0, memory_order_release);
+ ThreadStart(thr, p->tid, GetTid(), ThreadType::Regular);
+ p->started.Post();
}
void *res = callback(param);
// Prevent the callback from being tail called,
@@ -984,9 +1030,11 @@ TSAN_INTERCEPTOR(int, pthread_create,
"fork is not supported. Dying (set die_after_fork=0 to override)\n");
Die();
} else {
- VPrintf(1, "ThreadSanitizer: starting new threads after multi-threaded "
- "fork is not supported (pid %d). Continuing because of "
- "die_after_fork=0, but you are on your own\n", internal_getpid());
+ VPrintf(1,
+ "ThreadSanitizer: starting new threads after multi-threaded "
+ "fork is not supported (pid %lu). Continuing because of "
+ "die_after_fork=0, but you are on your own\n",
+ internal_getpid());
}
}
__sanitizer_pthread_attr_t myattr;
@@ -1001,18 +1049,18 @@ TSAN_INTERCEPTOR(int, pthread_create,
ThreadParam p;
p.callback = callback;
p.param = param;
- atomic_store(&p.tid, 0, memory_order_relaxed);
+ p.tid = kMainTid;
int res = -1;
{
// Otherwise we see false positives in pthread stack manipulation.
ScopedIgnoreInterceptors ignore;
ThreadIgnoreBegin(thr, pc);
res = REAL(pthread_create)(th, attr, __tsan_thread_start_func, &p);
- ThreadIgnoreEnd(thr, pc);
+ ThreadIgnoreEnd(thr);
}
if (res == 0) {
- int tid = ThreadCreate(thr, pc, *(uptr*)th, IsStateDetached(detached));
- CHECK_NE(tid, 0);
+ p.tid = ThreadCreate(thr, pc, *(uptr *)th, IsStateDetached(detached));
+ CHECK_NE(p.tid, kMainTid);
// Synchronization on p.tid serves two purposes:
// 1. ThreadCreate must finish before the new thread starts.
// Otherwise the new thread can call pthread_detach, but the pthread_t
@@ -1020,9 +1068,8 @@ TSAN_INTERCEPTOR(int, pthread_create,
// 2. ThreadStart must finish before this thread continues.
// Otherwise, this thread can call pthread_detach and reset thr->sync
// before the new thread got a chance to acquire from it in ThreadStart.
- atomic_store(&p.tid, tid, memory_order_release);
- while (atomic_load(&p.tid, memory_order_acquire) != 0)
- internal_sched_yield();
+ p.created.Post();
+ p.started.Wait();
}
if (attr == &myattr)
pthread_attr_destroy(&myattr);
@@ -1031,10 +1078,10 @@ TSAN_INTERCEPTOR(int, pthread_create,
TSAN_INTERCEPTOR(int, pthread_join, void *th, void **ret) {
SCOPED_INTERCEPTOR_RAW(pthread_join, th, ret);
- int tid = ThreadConsumeTid(thr, pc, (uptr)th);
+ Tid tid = ThreadConsumeTid(thr, pc, (uptr)th);
ThreadIgnoreBegin(thr, pc);
int res = BLOCK_REAL(pthread_join)(th, ret);
- ThreadIgnoreEnd(thr, pc);
+ ThreadIgnoreEnd(thr);
if (res == 0) {
ThreadJoin(thr, pc, tid);
}
@@ -1045,7 +1092,7 @@ DEFINE_REAL_PTHREAD_FUNCTIONS
TSAN_INTERCEPTOR(int, pthread_detach, void *th) {
SCOPED_INTERCEPTOR_RAW(pthread_detach, th);
- int tid = ThreadConsumeTid(thr, pc, (uptr)th);
+ Tid tid = ThreadConsumeTid(thr, pc, (uptr)th);
int res = REAL(pthread_detach)(th);
if (res == 0) {
ThreadDetach(thr, pc, tid);
@@ -1056,7 +1103,7 @@ TSAN_INTERCEPTOR(int, pthread_detach, void *th) {
TSAN_INTERCEPTOR(void, pthread_exit, void *retval) {
{
SCOPED_INTERCEPTOR_RAW(pthread_exit, retval);
-#if !SANITIZER_MAC && !SANITIZER_ANDROID
+#if !SANITIZER_APPLE && !SANITIZER_ANDROID
CHECK_EQ(thr, &cur_thread_placeholder);
#endif
}
@@ -1066,10 +1113,10 @@ TSAN_INTERCEPTOR(void, pthread_exit, void *retval) {
#if SANITIZER_LINUX
TSAN_INTERCEPTOR(int, pthread_tryjoin_np, void *th, void **ret) {
SCOPED_INTERCEPTOR_RAW(pthread_tryjoin_np, th, ret);
- int tid = ThreadConsumeTid(thr, pc, (uptr)th);
+ Tid tid = ThreadConsumeTid(thr, pc, (uptr)th);
ThreadIgnoreBegin(thr, pc);
int res = REAL(pthread_tryjoin_np)(th, ret);
- ThreadIgnoreEnd(thr, pc);
+ ThreadIgnoreEnd(thr);
if (res == 0)
ThreadJoin(thr, pc, tid);
else
@@ -1080,10 +1127,10 @@ TSAN_INTERCEPTOR(int, pthread_tryjoin_np, void *th, void **ret) {
TSAN_INTERCEPTOR(int, pthread_timedjoin_np, void *th, void **ret,
const struct timespec *abstime) {
SCOPED_INTERCEPTOR_RAW(pthread_timedjoin_np, th, ret, abstime);
- int tid = ThreadConsumeTid(thr, pc, (uptr)th);
+ Tid tid = ThreadConsumeTid(thr, pc, (uptr)th);
ThreadIgnoreBegin(thr, pc);
int res = BLOCK_REAL(pthread_timedjoin_np)(th, ret, abstime);
- ThreadIgnoreEnd(thr, pc);
+ ThreadIgnoreEnd(thr);
if (res == 0)
ThreadJoin(thr, pc, tid);
else
@@ -1152,9 +1199,8 @@ void CondMutexUnlockCtx<Fn>::Unlock() const {
// tsan code. Also ScopedInterceptor and BlockingCall destructors won't run
// since the thread is cancelled, so we have to manually execute them
// (the thread still can run some user code due to pthread_cleanup_push).
- ThreadSignalContext *ctx = SigCtx(thr);
- CHECK_EQ(atomic_load(&ctx->in_blocking_func, memory_order_relaxed), 1);
- atomic_store(&ctx->in_blocking_func, 0, memory_order_relaxed);
+ CHECK_EQ(atomic_load(&thr->in_blocking_func, memory_order_relaxed), 1);
+ atomic_store(&thr->in_blocking_func, 0, memory_order_relaxed);
MutexPostLock(thr, pc, (uptr)m, MutexFlagDoPreLockOnPostLock);
// Undo BlockingCall ctor effects.
thr->ignore_interceptors--;
@@ -1225,7 +1271,7 @@ INTERCEPTOR(int, pthread_cond_clockwait, void *c, void *m,
#define TSAN_MAYBE_PTHREAD_COND_CLOCKWAIT
#endif
-#if SANITIZER_MAC
+#if SANITIZER_APPLE
INTERCEPTOR(int, pthread_cond_timedwait_relative_np, void *c, void *m,
void *reltime) {
void *cond = init_cond(c);
@@ -1292,6 +1338,19 @@ TSAN_INTERCEPTOR(int, pthread_mutex_destroy, void *m) {
return res;
}
+TSAN_INTERCEPTOR(int, pthread_mutex_lock, void *m) {
+ SCOPED_TSAN_INTERCEPTOR(pthread_mutex_lock, m);
+ MutexPreLock(thr, pc, (uptr)m);
+ int res = REAL(pthread_mutex_lock)(m);
+ if (res == errno_EOWNERDEAD)
+ MutexRepair(thr, pc, (uptr)m);
+ if (res == 0 || res == errno_EOWNERDEAD)
+ MutexPostLock(thr, pc, (uptr)m);
+ if (res == errno_EINVAL)
+ MutexInvalidAccess(thr, pc, (uptr)m);
+ return res;
+}
+
TSAN_INTERCEPTOR(int, pthread_mutex_trylock, void *m) {
SCOPED_TSAN_INTERCEPTOR(pthread_mutex_trylock, m);
int res = REAL(pthread_mutex_trylock)(m);
@@ -1302,7 +1361,7 @@ TSAN_INTERCEPTOR(int, pthread_mutex_trylock, void *m) {
return res;
}
-#if !SANITIZER_MAC
+#if !SANITIZER_APPLE
TSAN_INTERCEPTOR(int, pthread_mutex_timedlock, void *m, void *abstime) {
SCOPED_TSAN_INTERCEPTOR(pthread_mutex_timedlock, m, abstime);
int res = REAL(pthread_mutex_timedlock)(m, abstime);
@@ -1313,7 +1372,60 @@ TSAN_INTERCEPTOR(int, pthread_mutex_timedlock, void *m, void *abstime) {
}
#endif
-#if !SANITIZER_MAC
+TSAN_INTERCEPTOR(int, pthread_mutex_unlock, void *m) {
+ SCOPED_TSAN_INTERCEPTOR(pthread_mutex_unlock, m);
+ MutexUnlock(thr, pc, (uptr)m);
+ int res = REAL(pthread_mutex_unlock)(m);
+ if (res == errno_EINVAL)
+ MutexInvalidAccess(thr, pc, (uptr)m);
+ return res;
+}
+
+#if SANITIZER_LINUX
+TSAN_INTERCEPTOR(int, pthread_mutex_clocklock, void *m,
+ __sanitizer_clockid_t clock, void *abstime) {
+ SCOPED_TSAN_INTERCEPTOR(pthread_mutex_clocklock, m, clock, abstime);
+ MutexPreLock(thr, pc, (uptr)m);
+ int res = REAL(pthread_mutex_clocklock)(m, clock, abstime);
+ if (res == errno_EOWNERDEAD)
+ MutexRepair(thr, pc, (uptr)m);
+ if (res == 0 || res == errno_EOWNERDEAD)
+ MutexPostLock(thr, pc, (uptr)m);
+ if (res == errno_EINVAL)
+ MutexInvalidAccess(thr, pc, (uptr)m);
+ return res;
+}
+#endif
+
+#if SANITIZER_GLIBC
+# if !__GLIBC_PREREQ(2, 34)
+// glibc 2.34 applies a non-default version for the two functions. They are no
+// longer expected to be intercepted by programs.
+TSAN_INTERCEPTOR(int, __pthread_mutex_lock, void *m) {
+ SCOPED_TSAN_INTERCEPTOR(__pthread_mutex_lock, m);
+ MutexPreLock(thr, pc, (uptr)m);
+ int res = REAL(__pthread_mutex_lock)(m);
+ if (res == errno_EOWNERDEAD)
+ MutexRepair(thr, pc, (uptr)m);
+ if (res == 0 || res == errno_EOWNERDEAD)
+ MutexPostLock(thr, pc, (uptr)m);
+ if (res == errno_EINVAL)
+ MutexInvalidAccess(thr, pc, (uptr)m);
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, __pthread_mutex_unlock, void *m) {
+ SCOPED_TSAN_INTERCEPTOR(__pthread_mutex_unlock, m);
+ MutexUnlock(thr, pc, (uptr)m);
+ int res = REAL(__pthread_mutex_unlock)(m);
+ if (res == errno_EINVAL)
+ MutexInvalidAccess(thr, pc, (uptr)m);
+ return res;
+}
+# endif
+#endif
+
+#if !SANITIZER_APPLE
TSAN_INTERCEPTOR(int, pthread_spin_init, void *m, int pshared) {
SCOPED_TSAN_INTERCEPTOR(pthread_spin_init, m, pshared);
int res = REAL(pthread_spin_init)(m, pshared);
@@ -1396,7 +1508,7 @@ TSAN_INTERCEPTOR(int, pthread_rwlock_tryrdlock, void *m) {
return res;
}
-#if !SANITIZER_MAC
+#if !SANITIZER_APPLE
TSAN_INTERCEPTOR(int, pthread_rwlock_timedrdlock, void *m, void *abstime) {
SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_timedrdlock, m, abstime);
int res = REAL(pthread_rwlock_timedrdlock)(m, abstime);
@@ -1426,7 +1538,7 @@ TSAN_INTERCEPTOR(int, pthread_rwlock_trywrlock, void *m) {
return res;
}
-#if !SANITIZER_MAC
+#if !SANITIZER_APPLE
TSAN_INTERCEPTOR(int, pthread_rwlock_timedwrlock, void *m, void *abstime) {
SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_timedwrlock, m, abstime);
int res = REAL(pthread_rwlock_timedwrlock)(m, abstime);
@@ -1444,17 +1556,17 @@ TSAN_INTERCEPTOR(int, pthread_rwlock_unlock, void *m) {
return res;
}
-#if !SANITIZER_MAC
+#if !SANITIZER_APPLE
TSAN_INTERCEPTOR(int, pthread_barrier_init, void *b, void *a, unsigned count) {
SCOPED_TSAN_INTERCEPTOR(pthread_barrier_init, b, a, count);
- MemoryWrite(thr, pc, (uptr)b, kSizeLog1);
+ MemoryAccess(thr, pc, (uptr)b, 1, kAccessWrite);
int res = REAL(pthread_barrier_init)(b, a, count);
return res;
}
TSAN_INTERCEPTOR(int, pthread_barrier_destroy, void *b) {
SCOPED_TSAN_INTERCEPTOR(pthread_barrier_destroy, b);
- MemoryWrite(thr, pc, (uptr)b, kSizeLog1);
+ MemoryAccess(thr, pc, (uptr)b, 1, kAccessWrite);
int res = REAL(pthread_barrier_destroy)(b);
return res;
}
@@ -1462,9 +1574,9 @@ TSAN_INTERCEPTOR(int, pthread_barrier_destroy, void *b) {
TSAN_INTERCEPTOR(int, pthread_barrier_wait, void *b) {
SCOPED_TSAN_INTERCEPTOR(pthread_barrier_wait, b);
Release(thr, pc, (uptr)b);
- MemoryRead(thr, pc, (uptr)b, kSizeLog1);
+ MemoryAccess(thr, pc, (uptr)b, 1, kAccessRead);
int res = REAL(pthread_barrier_wait)(b);
- MemoryRead(thr, pc, (uptr)b, kSizeLog1);
+ MemoryAccess(thr, pc, (uptr)b, 1, kAccessRead);
if (res == 0 || res == PTHREAD_BARRIER_SERIAL_THREAD) {
Acquire(thr, pc, (uptr)b);
}
@@ -1478,7 +1590,7 @@ TSAN_INTERCEPTOR(int, pthread_once, void *o, void (*f)()) {
return errno_EINVAL;
atomic_uint32_t *a;
- if (SANITIZER_MAC)
+ if (SANITIZER_APPLE)
a = static_cast<atomic_uint32_t*>((void *)((char *)o + sizeof(long_t)));
else if (SANITIZER_NETBSD)
a = static_cast<atomic_uint32_t*>
@@ -1486,72 +1598,56 @@ TSAN_INTERCEPTOR(int, pthread_once, void *o, void (*f)()) {
else
a = static_cast<atomic_uint32_t*>(o);
- u32 v = atomic_load(a, memory_order_acquire);
- if (v == 0 && atomic_compare_exchange_strong(a, &v, 1,
- memory_order_relaxed)) {
+ // Mac OS X appears to use pthread_once() where calling BlockingRegion hooks
+ // result in crashes due to too little stack space.
+ if (guard_acquire(thr, pc, a, !SANITIZER_APPLE)) {
(*f)();
- if (!thr->in_ignored_lib)
- Release(thr, pc, (uptr)o);
- atomic_store(a, 2, memory_order_release);
- } else {
- while (v != 2) {
- internal_sched_yield();
- v = atomic_load(a, memory_order_acquire);
- }
- if (!thr->in_ignored_lib)
- Acquire(thr, pc, (uptr)o);
+ guard_release(thr, pc, a, kGuardDone);
}
return 0;
}
-#if SANITIZER_LINUX && !SANITIZER_ANDROID
+#if SANITIZER_GLIBC
TSAN_INTERCEPTOR(int, __fxstat, int version, int fd, void *buf) {
SCOPED_TSAN_INTERCEPTOR(__fxstat, version, fd, buf);
if (fd > 0)
FdAccess(thr, pc, fd);
return REAL(__fxstat)(version, fd, buf);
}
-#define TSAN_MAYBE_INTERCEPT___FXSTAT TSAN_INTERCEPT(__fxstat)
+
+TSAN_INTERCEPTOR(int, __fxstat64, int version, int fd, void *buf) {
+ SCOPED_TSAN_INTERCEPTOR(__fxstat64, version, fd, buf);
+ if (fd > 0)
+ FdAccess(thr, pc, fd);
+ return REAL(__fxstat64)(version, fd, buf);
+}
+#define TSAN_MAYBE_INTERCEPT___FXSTAT TSAN_INTERCEPT(__fxstat); TSAN_INTERCEPT(__fxstat64)
#else
#define TSAN_MAYBE_INTERCEPT___FXSTAT
#endif
+#if !SANITIZER_GLIBC || __GLIBC_PREREQ(2, 33)
TSAN_INTERCEPTOR(int, fstat, int fd, void *buf) {
-#if SANITIZER_FREEBSD || SANITIZER_MAC || SANITIZER_ANDROID || SANITIZER_NETBSD
SCOPED_TSAN_INTERCEPTOR(fstat, fd, buf);
if (fd > 0)
FdAccess(thr, pc, fd);
return REAL(fstat)(fd, buf);
-#else
- SCOPED_TSAN_INTERCEPTOR(__fxstat, 0, fd, buf);
- if (fd > 0)
- FdAccess(thr, pc, fd);
- return REAL(__fxstat)(0, fd, buf);
-#endif
-}
-
-#if SANITIZER_LINUX && !SANITIZER_ANDROID
-TSAN_INTERCEPTOR(int, __fxstat64, int version, int fd, void *buf) {
- SCOPED_TSAN_INTERCEPTOR(__fxstat64, version, fd, buf);
- if (fd > 0)
- FdAccess(thr, pc, fd);
- return REAL(__fxstat64)(version, fd, buf);
}
-#define TSAN_MAYBE_INTERCEPT___FXSTAT64 TSAN_INTERCEPT(__fxstat64)
+# define TSAN_MAYBE_INTERCEPT_FSTAT TSAN_INTERCEPT(fstat)
#else
-#define TSAN_MAYBE_INTERCEPT___FXSTAT64
+# define TSAN_MAYBE_INTERCEPT_FSTAT
#endif
-#if SANITIZER_LINUX && !SANITIZER_ANDROID
+#if __GLIBC_PREREQ(2, 33)
TSAN_INTERCEPTOR(int, fstat64, int fd, void *buf) {
- SCOPED_TSAN_INTERCEPTOR(__fxstat64, 0, fd, buf);
+ SCOPED_TSAN_INTERCEPTOR(fstat64, fd, buf);
if (fd > 0)
FdAccess(thr, pc, fd);
- return REAL(__fxstat64)(0, fd, buf);
+ return REAL(fstat64)(fd, buf);
}
-#define TSAN_MAYBE_INTERCEPT_FSTAT64 TSAN_INTERCEPT(fstat64)
+# define TSAN_MAYBE_INTERCEPT_FSTAT64 TSAN_INTERCEPT(fstat64)
#else
-#define TSAN_MAYBE_INTERCEPT_FSTAT64
+# define TSAN_MAYBE_INTERCEPT_FSTAT64
#endif
TSAN_INTERCEPTOR(int, open, const char *name, int oflag, ...) {
@@ -1624,7 +1720,7 @@ TSAN_INTERCEPTOR(int, dup2, int oldfd, int newfd) {
return newfd2;
}
-#if !SANITIZER_MAC
+#if !SANITIZER_APPLE
TSAN_INTERCEPTOR(int, dup3, int oldfd, int newfd, int flags) {
SCOPED_TSAN_INTERCEPTOR(dup3, oldfd, newfd, flags);
int newfd2 = REAL(dup3)(oldfd, newfd, flags);
@@ -1649,11 +1745,10 @@ TSAN_INTERCEPTOR(int, eventfd, unsigned initval, int flags) {
#if SANITIZER_LINUX
TSAN_INTERCEPTOR(int, signalfd, int fd, void *mask, int flags) {
- SCOPED_TSAN_INTERCEPTOR(signalfd, fd, mask, flags);
- if (fd >= 0)
- FdClose(thr, pc, fd);
+ SCOPED_INTERCEPTOR_RAW(signalfd, fd, mask, flags);
+ FdClose(thr, pc, fd);
fd = REAL(signalfd)(fd, mask, flags);
- if (fd >= 0)
+ if (!MustIgnoreInterceptor(thr))
FdSignalCreate(thr, pc, fd);
return fd;
}
@@ -1730,17 +1825,16 @@ TSAN_INTERCEPTOR(int, listen, int fd, int backlog) {
}
TSAN_INTERCEPTOR(int, close, int fd) {
- SCOPED_TSAN_INTERCEPTOR(close, fd);
- if (fd >= 0)
+ SCOPED_INTERCEPTOR_RAW(close, fd);
+ if (!in_symbolizer())
FdClose(thr, pc, fd);
return REAL(close)(fd);
}
#if SANITIZER_LINUX
TSAN_INTERCEPTOR(int, __close, int fd) {
- SCOPED_TSAN_INTERCEPTOR(__close, fd);
- if (fd >= 0)
- FdClose(thr, pc, fd);
+ SCOPED_INTERCEPTOR_RAW(__close, fd);
+ FdClose(thr, pc, fd);
return REAL(__close)(fd);
}
#define TSAN_MAYBE_INTERCEPT___CLOSE TSAN_INTERCEPT(__close)
@@ -1751,13 +1845,10 @@ TSAN_INTERCEPTOR(int, __close, int fd) {
// glibc guts
#if SANITIZER_LINUX && !SANITIZER_ANDROID
TSAN_INTERCEPTOR(void, __res_iclose, void *state, bool free_addr) {
- SCOPED_TSAN_INTERCEPTOR(__res_iclose, state, free_addr);
+ SCOPED_INTERCEPTOR_RAW(__res_iclose, state, free_addr);
int fds[64];
int cnt = ExtractResolvFDs(state, fds, ARRAY_SIZE(fds));
- for (int i = 0; i < cnt; i++) {
- if (fds[i] > 0)
- FdClose(thr, pc, fds[i]);
- }
+ for (int i = 0; i < cnt; i++) FdClose(thr, pc, fds[i]);
REAL(__res_iclose)(state, free_addr);
}
#define TSAN_MAYBE_INTERCEPT___RES_ICLOSE TSAN_INTERCEPT(__res_iclose)
@@ -1773,7 +1864,7 @@ TSAN_INTERCEPTOR(int, pipe, int *pipefd) {
return res;
}
-#if !SANITIZER_MAC
+#if !SANITIZER_APPLE
TSAN_INTERCEPTOR(int, pipe2, int *pipefd, int flags) {
SCOPED_TSAN_INTERCEPTOR(pipe2, pipefd, flags);
int res = REAL(pipe2)(pipefd, flags);
@@ -1838,7 +1929,7 @@ TSAN_INTERCEPTOR(int, rmdir, char *path) {
}
TSAN_INTERCEPTOR(int, closedir, void *dirp) {
- SCOPED_TSAN_INTERCEPTOR(closedir, dirp);
+ SCOPED_INTERCEPTOR_RAW(closedir, dirp);
if (dirp) {
int fd = dirfd(dirp);
FdClose(thr, pc, fd);
@@ -1869,8 +1960,10 @@ TSAN_INTERCEPTOR(int, epoll_ctl, int epfd, int op, int fd, void *ev) {
FdAccess(thr, pc, epfd);
if (epfd >= 0 && fd >= 0)
FdAccess(thr, pc, fd);
- if (op == EPOLL_CTL_ADD && epfd >= 0)
+ if (op == EPOLL_CTL_ADD && epfd >= 0) {
+ FdPollAdd(thr, pc, epfd, fd);
FdRelease(thr, pc, epfd);
+ }
int res = REAL(epoll_ctl)(epfd, op, fd, ev);
return res;
}
@@ -1896,12 +1989,34 @@ TSAN_INTERCEPTOR(int, epoll_pwait, int epfd, void *ev, int cnt, int timeout,
return res;
}
-#define TSAN_MAYBE_INTERCEPT_EPOLL \
- TSAN_INTERCEPT(epoll_create); \
- TSAN_INTERCEPT(epoll_create1); \
- TSAN_INTERCEPT(epoll_ctl); \
- TSAN_INTERCEPT(epoll_wait); \
- TSAN_INTERCEPT(epoll_pwait)
+TSAN_INTERCEPTOR(int, epoll_pwait2, int epfd, void *ev, int cnt, void *timeout,
+ void *sigmask) {
+ SCOPED_INTERCEPTOR_RAW(epoll_pwait2, epfd, ev, cnt, timeout, sigmask);
+ // This function is new and may not be present in libc and/or kernel.
+ // Since we effectively add it to libc (as will be probed by the program
+ // using dlsym or a weak function pointer) we need to handle the case
+ // when it's not present in the actual libc.
+ if (!REAL(epoll_pwait2)) {
+ errno = errno_ENOSYS;
+ return -1;
+ }
+ if (MustIgnoreInterceptor(thr))
+ REAL(epoll_pwait2)(epfd, ev, cnt, timeout, sigmask);
+ if (epfd >= 0)
+ FdAccess(thr, pc, epfd);
+ int res = BLOCK_REAL(epoll_pwait2)(epfd, ev, cnt, timeout, sigmask);
+ if (res > 0 && epfd >= 0)
+ FdAcquire(thr, pc, epfd);
+ return res;
+}
+
+# define TSAN_MAYBE_INTERCEPT_EPOLL \
+ TSAN_INTERCEPT(epoll_create); \
+ TSAN_INTERCEPT(epoll_create1); \
+ TSAN_INTERCEPT(epoll_ctl); \
+ TSAN_INTERCEPT(epoll_wait); \
+ TSAN_INTERCEPT(epoll_pwait); \
+ TSAN_INTERCEPT(epoll_pwait2)
#else
#define TSAN_MAYBE_INTERCEPT_EPOLL
#endif
@@ -1933,24 +2048,47 @@ TSAN_INTERCEPTOR(int, pthread_sigmask, int how, const __sanitizer_sigset_t *set,
namespace __tsan {
+static void ReportErrnoSpoiling(ThreadState *thr, uptr pc, int sig) {
+ VarSizeStackTrace stack;
+ // StackTrace::GetNestInstructionPc(pc) is used because return address is
+ // expected, OutputReport() will undo this.
+ ObtainCurrentStack(thr, StackTrace::GetNextInstructionPc(pc), &stack);
+ ThreadRegistryLock l(&ctx->thread_registry);
+ ScopedReport rep(ReportTypeErrnoInSignal);
+ rep.SetSigNum(sig);
+ if (!IsFiredSuppression(ctx, ReportTypeErrnoInSignal, stack)) {
+ rep.AddStack(stack, true);
+ OutputReport(thr, rep);
+ }
+}
+
static void CallUserSignalHandler(ThreadState *thr, bool sync, bool acquire,
- bool sigact, int sig,
- __sanitizer_siginfo *info, void *uctx) {
+ int sig, __sanitizer_siginfo *info,
+ void *uctx) {
+ CHECK(thr->slot);
__sanitizer_sigaction *sigactions = interceptor_ctx()->sigactions;
if (acquire)
Acquire(thr, 0, (uptr)&sigactions[sig]);
// Signals are generally asynchronous, so if we receive a signals when
// ignores are enabled we should disable ignores. This is critical for sync
- // and interceptors, because otherwise we can miss syncronization and report
+ // and interceptors, because otherwise we can miss synchronization and report
// false races.
int ignore_reads_and_writes = thr->ignore_reads_and_writes;
int ignore_interceptors = thr->ignore_interceptors;
int ignore_sync = thr->ignore_sync;
+ // For symbolizer we only process SIGSEGVs synchronously
+ // (bug in symbolizer or in tsan). But we want to reset
+ // in_symbolizer to fail gracefully. Symbolizer and user code
+ // use different memory allocators, so if we don't reset
+ // in_symbolizer we can get memory allocated with one being
+ // feed with another, which can cause more crashes.
+ int in_symbolizer = thr->in_symbolizer;
if (!ctx->after_multithreaded_fork) {
thr->ignore_reads_and_writes = 0;
thr->fast_state.ClearIgnoreBit();
thr->ignore_interceptors = 0;
thr->ignore_sync = 0;
+ thr->in_symbolizer = 0;
}
// Ensure that the handler does not spoil errno.
const int saved_errno = errno;
@@ -1958,13 +2096,14 @@ static void CallUserSignalHandler(ThreadState *thr, bool sync, bool acquire,
// This code races with sigaction. Be careful to not read sa_sigaction twice.
// Also need to remember pc for reporting before the call,
// because the handler can reset it.
- volatile uptr pc =
- sigact ? (uptr)sigactions[sig].sigaction : (uptr)sigactions[sig].handler;
+ volatile uptr pc = (sigactions[sig].sa_flags & SA_SIGINFO)
+ ? (uptr)sigactions[sig].sigaction
+ : (uptr)sigactions[sig].handler;
if (pc != sig_dfl && pc != sig_ign) {
- if (sigact)
- ((__sanitizer_sigactionhandler_ptr)pc)(sig, info, uctx);
- else
- ((__sanitizer_sighandler_ptr)pc)(sig);
+ // The callback can be either sa_handler or sa_sigaction.
+ // They have different signatures, but we assume that passing
+ // additional arguments to sa_handler works and is harmless.
+ ((__sanitizer_sigactionhandler_ptr)pc)(sig, info, uctx);
}
if (!ctx->after_multithreaded_fork) {
thr->ignore_reads_and_writes = ignore_reads_and_writes;
@@ -1972,6 +2111,7 @@ static void CallUserSignalHandler(ThreadState *thr, bool sync, bool acquire,
thr->fast_state.SetIgnoreBit();
thr->ignore_interceptors = ignore_interceptors;
thr->ignore_sync = ignore_sync;
+ thr->in_symbolizer = in_symbolizer;
}
// We do not detect errno spoiling for SIGTERM,
// because some SIGTERM handlers do spoil errno but reraise SIGTERM,
@@ -1981,27 +2121,16 @@ static void CallUserSignalHandler(ThreadState *thr, bool sync, bool acquire,
// from rtl_generic_sighandler) we have not yet received the reraised
// signal; and it looks too fragile to intercept all ways to reraise a signal.
if (ShouldReport(thr, ReportTypeErrnoInSignal) && !sync && sig != SIGTERM &&
- errno != 99) {
- VarSizeStackTrace stack;
- // StackTrace::GetNestInstructionPc(pc) is used because return address is
- // expected, OutputReport() will undo this.
- ObtainCurrentStack(thr, StackTrace::GetNextInstructionPc(pc), &stack);
- ThreadRegistryLock l(ctx->thread_registry);
- ScopedReport rep(ReportTypeErrnoInSignal);
- if (!IsFiredSuppression(ctx, ReportTypeErrnoInSignal, stack)) {
- rep.AddStack(stack, true);
- OutputReport(thr, rep);
- }
- }
+ errno != 99)
+ ReportErrnoSpoiling(thr, pc, sig);
errno = saved_errno;
}
-void ProcessPendingSignals(ThreadState *thr) {
+void ProcessPendingSignalsImpl(ThreadState *thr) {
+ atomic_store(&thr->pending_signals, 0, memory_order_relaxed);
ThreadSignalContext *sctx = SigCtx(thr);
- if (sctx == 0 ||
- atomic_load(&sctx->have_pending_signals, memory_order_relaxed) == 0)
+ if (sctx == 0)
return;
- atomic_store(&sctx->have_pending_signals, 0, memory_order_relaxed);
atomic_fetch_add(&thr->in_signal_handler, 1, memory_order_relaxed);
internal_sigfillset(&sctx->emptyset);
int res = REAL(pthread_sigmask)(SIG_SETMASK, &sctx->emptyset, &sctx->oldset);
@@ -2010,8 +2139,8 @@ void ProcessPendingSignals(ThreadState *thr) {
SignalDesc *signal = &sctx->pending_signals[sig];
if (signal->armed) {
signal->armed = false;
- CallUserSignalHandler(thr, false, true, signal->sigaction, sig,
- &signal->siginfo, &signal->ctx);
+ CallUserSignalHandler(thr, false, true, sig, &signal->siginfo,
+ &signal->ctx);
}
}
res = REAL(pthread_sigmask)(SIG_SETMASK, &sctx->oldset, 0);
@@ -2021,35 +2150,40 @@ void ProcessPendingSignals(ThreadState *thr) {
} // namespace __tsan
-static bool is_sync_signal(ThreadSignalContext *sctx, int sig) {
+static bool is_sync_signal(ThreadSignalContext *sctx, int sig,
+ __sanitizer_siginfo *info) {
+ // If we are sending signal to ourselves, we must process it now.
+ if (sctx && sig == sctx->int_signal_send)
+ return true;
+#if SANITIZER_HAS_SIGINFO
+ // POSIX timers can be configured to send any kind of signal; however, it
+ // doesn't make any sense to consider a timer signal as synchronous!
+ if (info->si_code == SI_TIMER)
+ return false;
+#endif
return sig == SIGSEGV || sig == SIGBUS || sig == SIGILL || sig == SIGTRAP ||
- sig == SIGABRT || sig == SIGFPE || sig == SIGPIPE || sig == SIGSYS ||
- // If we are sending signal to ourselves, we must process it now.
- (sctx && sig == sctx->int_signal_send);
+ sig == SIGABRT || sig == SIGFPE || sig == SIGPIPE || sig == SIGSYS;
}
-void ALWAYS_INLINE rtl_generic_sighandler(bool sigact, int sig,
- __sanitizer_siginfo *info,
- void *ctx) {
- cur_thread_init();
- ThreadState *thr = cur_thread();
+void sighandler(int sig, __sanitizer_siginfo *info, void *ctx) {
+ ThreadState *thr = cur_thread_init();
ThreadSignalContext *sctx = SigCtx(thr);
if (sig < 0 || sig >= kSigCount) {
VPrintf(1, "ThreadSanitizer: ignoring signal %d\n", sig);
return;
}
// Don't mess with synchronous signals.
- const bool sync = is_sync_signal(sctx, sig);
+ const bool sync = is_sync_signal(sctx, sig, info);
if (sync ||
// If we are in blocking function, we can safely process it now
// (but check if we are in a recursive interceptor,
// i.e. pthread_join()->munmap()).
- (sctx && atomic_load(&sctx->in_blocking_func, memory_order_relaxed))) {
+ atomic_load(&thr->in_blocking_func, memory_order_relaxed)) {
atomic_fetch_add(&thr->in_signal_handler, 1, memory_order_relaxed);
- if (sctx && atomic_load(&sctx->in_blocking_func, memory_order_relaxed)) {
- atomic_store(&sctx->in_blocking_func, 0, memory_order_relaxed);
- CallUserSignalHandler(thr, sync, true, sigact, sig, info, ctx);
- atomic_store(&sctx->in_blocking_func, 1, memory_order_relaxed);
+ if (atomic_load(&thr->in_blocking_func, memory_order_relaxed)) {
+ atomic_store(&thr->in_blocking_func, 0, memory_order_relaxed);
+ CallUserSignalHandler(thr, sync, true, sig, info, ctx);
+ atomic_store(&thr->in_blocking_func, 1, memory_order_relaxed);
} else {
// Be very conservative with when we do acquire in this case.
// It's unsafe to do acquire in async handlers, because ThreadState
@@ -2057,7 +2191,7 @@ void ALWAYS_INLINE rtl_generic_sighandler(bool sigact, int sig,
// SIGSYS looks relatively safe -- it's synchronous and can actually
// need some global state.
bool acq = (sig == SIGSYS);
- CallUserSignalHandler(thr, sync, acq, sigact, sig, info, ctx);
+ CallUserSignalHandler(thr, sync, acq, sig, info, ctx);
}
atomic_fetch_add(&thr->in_signal_handler, -1, memory_order_relaxed);
return;
@@ -2068,23 +2202,12 @@ void ALWAYS_INLINE rtl_generic_sighandler(bool sigact, int sig,
SignalDesc *signal = &sctx->pending_signals[sig];
if (signal->armed == false) {
signal->armed = true;
- signal->sigaction = sigact;
- if (info)
- internal_memcpy(&signal->siginfo, info, sizeof(*info));
- if (ctx)
- internal_memcpy(&signal->ctx, ctx, sizeof(signal->ctx));
- atomic_store(&sctx->have_pending_signals, 1, memory_order_relaxed);
+ internal_memcpy(&signal->siginfo, info, sizeof(*info));
+ internal_memcpy(&signal->ctx, ctx, sizeof(signal->ctx));
+ atomic_store(&thr->pending_signals, 1, memory_order_relaxed);
}
}
-static void rtl_sighandler(int sig) {
- rtl_generic_sighandler(false, sig, 0, 0);
-}
-
-static void rtl_sigaction(int sig, __sanitizer_siginfo *info, void *ctx) {
- rtl_generic_sighandler(true, sig, info, ctx);
-}
-
TSAN_INTERCEPTOR(int, raise, int sig) {
SCOPED_TSAN_INTERCEPTOR(raise, sig);
ThreadSignalContext *sctx = SigCtx(thr);
@@ -2118,11 +2241,11 @@ TSAN_INTERCEPTOR(int, pthread_kill, void *tid, int sig) {
ThreadSignalContext *sctx = SigCtx(thr);
CHECK_NE(sctx, 0);
int prev = sctx->int_signal_send;
- if (tid == pthread_self()) {
+ bool self = pthread_equal(tid, pthread_self());
+ if (self)
sctx->int_signal_send = sig;
- }
int res = REAL(pthread_kill)(tid, sig);
- if (tid == pthread_self()) {
+ if (self) {
CHECK_EQ(sctx->int_signal_send, sig);
sctx->int_signal_send = prev;
}
@@ -2143,7 +2266,7 @@ TSAN_INTERCEPTOR(int, getaddrinfo, void *node, void *service,
// inside of getaddrinfo. So ignore memory accesses.
ThreadIgnoreBegin(thr, pc);
int res = REAL(getaddrinfo)(node, service, hints, rv);
- ThreadIgnoreEnd(thr, pc);
+ ThreadIgnoreEnd(thr);
return res;
}
@@ -2175,10 +2298,11 @@ void atfork_child() {
return;
ThreadState *thr = cur_thread();
const uptr pc = StackTrace::GetCurrentPc();
- ForkChildAfter(thr, pc);
+ ForkChildAfter(thr, pc, true);
FdOnFork(thr, pc);
}
+#if !SANITIZER_IOS
TSAN_INTERCEPTOR(int, vfork, int fake) {
// Some programs (e.g. openjdk) call close for all file descriptors
// in the child process. Under tsan it leads to false positives, because
@@ -2195,8 +2319,40 @@ TSAN_INTERCEPTOR(int, vfork, int fake) {
// Instead we simply turn vfork into fork.
return WRAP(fork)(fake);
}
+#endif
-#if !SANITIZER_MAC && !SANITIZER_ANDROID
+#if SANITIZER_LINUX
+TSAN_INTERCEPTOR(int, clone, int (*fn)(void *), void *stack, int flags,
+ void *arg, int *parent_tid, void *tls, pid_t *child_tid) {
+ SCOPED_INTERCEPTOR_RAW(clone, fn, stack, flags, arg, parent_tid, tls,
+ child_tid);
+ struct Arg {
+ int (*fn)(void *);
+ void *arg;
+ };
+ auto wrapper = +[](void *p) -> int {
+ auto *thr = cur_thread();
+ uptr pc = GET_CURRENT_PC();
+ // Start the background thread for fork, but not for clone.
+ // For fork we did this always and it's known to work (or user code has
+ // adopted). But if we do this for the new clone interceptor some code
+ // (sandbox2) fails. So model we used to do for years and don't start the
+ // background thread after clone.
+ ForkChildAfter(thr, pc, false);
+ FdOnFork(thr, pc);
+ auto *arg = static_cast<Arg *>(p);
+ return arg->fn(arg->arg);
+ };
+ ForkBefore(thr, pc);
+ Arg arg_wrapper = {fn, arg};
+ int pid = REAL(clone)(wrapper, stack, flags, &arg_wrapper, parent_tid, tls,
+ child_tid);
+ ForkParentAfter(thr, pc);
+ return pid;
+}
+#endif
+
+#if !SANITIZER_APPLE && !SANITIZER_ANDROID
typedef int (*dl_iterate_phdr_cb_t)(__sanitizer_dl_phdr_info *info, SIZE_T size,
void *data);
struct dl_iterate_phdr_data {
@@ -2207,7 +2363,7 @@ struct dl_iterate_phdr_data {
};
static bool IsAppNotRodata(uptr addr) {
- return IsAppMem(addr) && *(u64*)MemToShadow(addr) != kShadowRodata;
+ return IsAppMem(addr) && *MemToShadow(addr) != Shadow::kRodata;
}
static int dl_iterate_phdr_cb(__sanitizer_dl_phdr_info *info, SIZE_T size,
@@ -2248,13 +2404,7 @@ static int OnExit(ThreadState *thr) {
return status;
}
-struct TsanInterceptorContext {
- ThreadState *thr;
- const uptr caller_pc;
- const uptr pc;
-};
-
-#if !SANITIZER_MAC
+#if !SANITIZER_APPLE
static void HandleRecvmsg(ThreadState *thr, uptr pc,
__sanitizer_msghdr *msg) {
int fds[64];
@@ -2275,33 +2425,16 @@ static void HandleRecvmsg(ThreadState *thr, uptr pc,
#define SANITIZER_INTERCEPT_TLS_GET_OFFSET 1
#undef SANITIZER_INTERCEPT_PTHREAD_SIGMASK
-#define COMMON_INTERCEPT_FUNCTION(name) INTERCEPT_FUNCTION(name)
#define COMMON_INTERCEPT_FUNCTION_VER(name, ver) \
INTERCEPT_FUNCTION_VER(name, ver)
#define COMMON_INTERCEPT_FUNCTION_VER_UNVERSIONED_FALLBACK(name, ver) \
(INTERCEPT_FUNCTION_VER(name, ver) || INTERCEPT_FUNCTION(name))
-#define COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ptr, size) \
- MemoryAccessRange(((TsanInterceptorContext *)ctx)->thr, \
- ((TsanInterceptorContext *)ctx)->pc, (uptr)ptr, size, \
- true)
-
-#define COMMON_INTERCEPTOR_READ_RANGE(ctx, ptr, size) \
- MemoryAccessRange(((TsanInterceptorContext *) ctx)->thr, \
- ((TsanInterceptorContext *) ctx)->pc, (uptr) ptr, size, \
- false)
-
-#define COMMON_INTERCEPTOR_ENTER(ctx, func, ...) \
- SCOPED_TSAN_INTERCEPTOR(func, __VA_ARGS__); \
- TsanInterceptorContext _ctx = {thr, caller_pc, pc}; \
- ctx = (void *)&_ctx; \
- (void) ctx;
-
#define COMMON_INTERCEPTOR_ENTER_NOIGNORE(ctx, func, ...) \
SCOPED_INTERCEPTOR_RAW(func, __VA_ARGS__); \
- TsanInterceptorContext _ctx = {thr, caller_pc, pc}; \
+ TsanInterceptorContext _ctx = {thr, pc}; \
ctx = (void *)&_ctx; \
- (void) ctx;
+ (void)ctx;
#define COMMON_INTERCEPTOR_FILE_OPEN(ctx, file, path) \
if (path) \
@@ -2314,14 +2447,33 @@ static void HandleRecvmsg(ThreadState *thr, uptr pc,
#define COMMON_INTERCEPTOR_FILE_CLOSE(ctx, file) \
if (file) { \
int fd = fileno_unlocked(file); \
- if (fd >= 0) FdClose(thr, pc, fd); \
- }
-
+ FdClose(thr, pc, fd); \
+ }
+
+#define COMMON_INTERCEPTOR_DLOPEN(filename, flag) \
+ ({ \
+ CheckNoDeepBind(filename, flag); \
+ ThreadIgnoreBegin(thr, 0); \
+ void *res = REAL(dlopen)(filename, flag); \
+ ThreadIgnoreEnd(thr); \
+ res; \
+ })
+
+// Ignore interceptors in OnLibraryLoaded()/Unloaded(). These hooks use code
+// (ListOfModules::init, MemoryMappingLayout::DumpListOfModules) that make
+// intercepted calls, which can cause deadlockes with ReportRace() which also
+// uses this code.
#define COMMON_INTERCEPTOR_LIBRARY_LOADED(filename, handle) \
- libignore()->OnLibraryLoaded(filename)
+ ({ \
+ ScopedIgnoreInterceptors ignore_interceptors; \
+ libignore()->OnLibraryLoaded(filename); \
+ })
-#define COMMON_INTERCEPTOR_LIBRARY_UNLOADED() \
- libignore()->OnLibraryUnloaded()
+#define COMMON_INTERCEPTOR_LIBRARY_UNLOADED() \
+ ({ \
+ ScopedIgnoreInterceptors ignore_interceptors; \
+ libignore()->OnLibraryUnloaded(); \
+ })
#define COMMON_INTERCEPTOR_ACQUIRE(ctx, u) \
Acquire(((TsanInterceptorContext *) ctx)->thr, pc, u)
@@ -2347,34 +2499,17 @@ static void HandleRecvmsg(ThreadState *thr, uptr pc,
#define COMMON_INTERCEPTOR_SET_THREAD_NAME(ctx, name) \
ThreadSetName(((TsanInterceptorContext *) ctx)->thr, name)
-#define COMMON_INTERCEPTOR_SET_PTHREAD_NAME(ctx, thread, name) \
- __tsan::ctx->thread_registry->SetThreadNameByUserId(thread, name)
+#define COMMON_INTERCEPTOR_SET_PTHREAD_NAME(ctx, thread, name) \
+ if (pthread_equal(pthread_self(), reinterpret_cast<void *>(thread))) \
+ COMMON_INTERCEPTOR_SET_THREAD_NAME(ctx, name); \
+ else \
+ __tsan::ctx->thread_registry.SetThreadNameByUserId(thread, name)
#define COMMON_INTERCEPTOR_BLOCK_REAL(name) BLOCK_REAL(name)
#define COMMON_INTERCEPTOR_ON_EXIT(ctx) \
OnExit(((TsanInterceptorContext *) ctx)->thr)
-#define COMMON_INTERCEPTOR_MUTEX_PRE_LOCK(ctx, m) \
- MutexPreLock(((TsanInterceptorContext *)ctx)->thr, \
- ((TsanInterceptorContext *)ctx)->pc, (uptr)m)
-
-#define COMMON_INTERCEPTOR_MUTEX_POST_LOCK(ctx, m) \
- MutexPostLock(((TsanInterceptorContext *)ctx)->thr, \
- ((TsanInterceptorContext *)ctx)->pc, (uptr)m)
-
-#define COMMON_INTERCEPTOR_MUTEX_UNLOCK(ctx, m) \
- MutexUnlock(((TsanInterceptorContext *)ctx)->thr, \
- ((TsanInterceptorContext *)ctx)->pc, (uptr)m)
-
-#define COMMON_INTERCEPTOR_MUTEX_REPAIR(ctx, m) \
- MutexRepair(((TsanInterceptorContext *)ctx)->thr, \
- ((TsanInterceptorContext *)ctx)->pc, (uptr)m)
-
-#define COMMON_INTERCEPTOR_MUTEX_INVALID(ctx, m) \
- MutexInvalidAccess(((TsanInterceptorContext *)ctx)->thr, \
- ((TsanInterceptorContext *)ctx)->pc, (uptr)m)
-
#define COMMON_INTERCEPTOR_MMAP_IMPL(ctx, mmap, addr, sz, prot, flags, fd, \
off) \
do { \
@@ -2382,7 +2517,12 @@ static void HandleRecvmsg(ThreadState *thr, uptr pc,
off); \
} while (false)
-#if !SANITIZER_MAC
+#define COMMON_INTERCEPTOR_MUNMAP_IMPL(ctx, addr, sz) \
+ do { \
+ return munmap_interceptor(thr, pc, REAL(munmap), addr, sz); \
+ } while (false)
+
+#if !SANITIZER_APPLE
#define COMMON_INTERCEPTOR_HANDLE_RECVMSG(ctx, msg) \
HandleRecvmsg(((TsanInterceptorContext *)ctx)->thr, \
((TsanInterceptorContext *)ctx)->pc, msg)
@@ -2415,12 +2555,14 @@ static __sanitizer_sighandler_ptr signal_impl(int sig,
#define SIGNAL_INTERCEPTOR_SIGNAL_IMPL(func, signo, handler) \
{ return (uptr)signal_impl(signo, (__sanitizer_sighandler_ptr)handler); }
+#define SIGNAL_INTERCEPTOR_ENTER() LazyInitialize(cur_thread_init())
+
#include "sanitizer_common/sanitizer_signal_interceptors.inc"
int sigaction_impl(int sig, const __sanitizer_sigaction *act,
__sanitizer_sigaction *old) {
// Note: if we call REAL(sigaction) directly for any reason without proxying
- // the signal handler through rtl_sigaction, very bad things will happen.
+ // the signal handler through sighandler, very bad things will happen.
// The handler will run synchronously and corrupt tsan per-thread state.
SCOPED_INTERCEPTOR_RAW(sigaction, sig, act, old);
if (sig <= 0 || sig >= kSigCount) {
@@ -2435,7 +2577,7 @@ int sigaction_impl(int sig, const __sanitizer_sigaction *act,
// Copy act into sigactions[sig].
// Can't use struct copy, because compiler can emit call to memcpy.
// Can't use internal_memcpy, because it copies byte-by-byte,
- // and signal handler reads the handler concurrently. It it can read
+ // and signal handler reads the handler concurrently. It can read
// some bytes from old value and some bytes from new value.
// Use volatile to prevent insertion of memcpy.
sigactions[sig].handler =
@@ -2443,27 +2585,22 @@ int sigaction_impl(int sig, const __sanitizer_sigaction *act,
sigactions[sig].sa_flags = *(volatile int const *)&act->sa_flags;
internal_memcpy(&sigactions[sig].sa_mask, &act->sa_mask,
sizeof(sigactions[sig].sa_mask));
-#if !SANITIZER_FREEBSD && !SANITIZER_MAC && !SANITIZER_NETBSD
+#if !SANITIZER_FREEBSD && !SANITIZER_APPLE && !SANITIZER_NETBSD
sigactions[sig].sa_restorer = act->sa_restorer;
#endif
internal_memcpy(&newact, act, sizeof(newact));
internal_sigfillset(&newact.sa_mask);
- if ((uptr)act->handler != sig_ign && (uptr)act->handler != sig_dfl) {
- if (newact.sa_flags & SA_SIGINFO)
- newact.sigaction = rtl_sigaction;
- else
- newact.handler = rtl_sighandler;
+ if ((act->sa_flags & SA_SIGINFO) ||
+ ((uptr)act->handler != sig_ign && (uptr)act->handler != sig_dfl)) {
+ newact.sa_flags |= SA_SIGINFO;
+ newact.sigaction = sighandler;
}
ReleaseStore(thr, pc, (uptr)&sigactions[sig]);
act = &newact;
}
int res = REAL(sigaction)(sig, act, old);
- if (res == 0 && old) {
- uptr cb = (uptr)old->sigaction;
- if (cb == (uptr)rtl_sigaction || cb == (uptr)rtl_sighandler) {
- internal_memcpy(old, &old_stored, sizeof(*old));
- }
- }
+ if (res == 0 && old && old->sigaction == sighandler)
+ internal_memcpy(old, &old_stored, sizeof(*old));
return res;
}
@@ -2479,27 +2616,23 @@ static __sanitizer_sighandler_ptr signal_impl(int sig,
return old.handler;
}
-#define TSAN_SYSCALL() \
+#define TSAN_SYSCALL() \
ThreadState *thr = cur_thread(); \
- if (thr->ignore_interceptors) \
- return; \
- ScopedSyscall scoped_syscall(thr) \
-/**/
+ if (thr->ignore_interceptors) \
+ return; \
+ ScopedSyscall scoped_syscall(thr)
struct ScopedSyscall {
ThreadState *thr;
- explicit ScopedSyscall(ThreadState *thr)
- : thr(thr) {
- Initialize(thr);
- }
+ explicit ScopedSyscall(ThreadState *thr) : thr(thr) { LazyInitialize(thr); }
~ScopedSyscall() {
ProcessPendingSignals(thr);
}
};
-#if !SANITIZER_FREEBSD && !SANITIZER_MAC
+#if !SANITIZER_FREEBSD && !SANITIZER_APPLE
static void syscall_access_range(uptr pc, uptr p, uptr s, bool write) {
TSAN_SYSCALL();
MemoryAccessRange(thr, pc, p, s, write);
@@ -2508,29 +2641,29 @@ static void syscall_access_range(uptr pc, uptr p, uptr s, bool write) {
static USED void syscall_acquire(uptr pc, uptr addr) {
TSAN_SYSCALL();
Acquire(thr, pc, addr);
- DPrintf("syscall_acquire(%p)\n", addr);
+ DPrintf("syscall_acquire(0x%zx))\n", addr);
}
static USED void syscall_release(uptr pc, uptr addr) {
TSAN_SYSCALL();
- DPrintf("syscall_release(%p)\n", addr);
+ DPrintf("syscall_release(0x%zx)\n", addr);
Release(thr, pc, addr);
}
static void syscall_fd_close(uptr pc, int fd) {
- TSAN_SYSCALL();
+ auto *thr = cur_thread();
FdClose(thr, pc, fd);
}
static USED void syscall_fd_acquire(uptr pc, int fd) {
TSAN_SYSCALL();
FdAcquire(thr, pc, fd);
- DPrintf("syscall_fd_acquire(%p)\n", fd);
+ DPrintf("syscall_fd_acquire(%d)\n", fd);
}
static USED void syscall_fd_release(uptr pc, int fd) {
TSAN_SYSCALL();
- DPrintf("syscall_fd_release(%p)\n", fd);
+ DPrintf("syscall_fd_release(%d)\n", fd);
FdRelease(thr, pc, fd);
}
@@ -2540,7 +2673,7 @@ static void syscall_post_fork(uptr pc, int pid) {
ThreadState *thr = cur_thread();
if (pid == 0) {
// child
- ForkChildAfter(thr, pc);
+ ForkChildAfter(thr, pc, true);
FdOnFork(thr, pc);
} else if (pid > 0) {
// parent
@@ -2653,6 +2786,26 @@ TSAN_INTERCEPTOR(void, thr_exit, tid_t *state) {
#define TSAN_MAYBE_INTERCEPT_THR_EXIT
#endif
+TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, cond_init, void *c, void *a)
+TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, cond_destroy, void *c)
+TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, cond_signal, void *c)
+TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, cond_broadcast, void *c)
+TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, cond_wait, void *c, void *m)
+TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, mutex_init, void *m, void *a)
+TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, mutex_destroy, void *m)
+TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, mutex_lock, void *m)
+TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, mutex_trylock, void *m)
+TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, mutex_unlock, void *m)
+TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, rwlock_init, void *l, void *a)
+TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, rwlock_destroy, void *l)
+TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, rwlock_rdlock, void *l)
+TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, rwlock_tryrdlock, void *l)
+TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, rwlock_wrlock, void *l)
+TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, rwlock_trywrlock, void *l)
+TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, rwlock_unlock, void *l)
+TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, once, void *o, void (*i)())
+TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, sigmask, int f, void *n, void *o)
+
TSAN_INTERCEPTOR_NETBSD_ALIAS(int, cond_init, void *c, void *a)
TSAN_INTERCEPTOR_NETBSD_ALIAS(int, cond_signal, void *c)
TSAN_INTERCEPTOR_NETBSD_ALIAS(int, cond_broadcast, void *c)
@@ -2660,7 +2813,9 @@ TSAN_INTERCEPTOR_NETBSD_ALIAS(int, cond_wait, void *c, void *m)
TSAN_INTERCEPTOR_NETBSD_ALIAS(int, cond_destroy, void *c)
TSAN_INTERCEPTOR_NETBSD_ALIAS(int, mutex_init, void *m, void *a)
TSAN_INTERCEPTOR_NETBSD_ALIAS(int, mutex_destroy, void *m)
+TSAN_INTERCEPTOR_NETBSD_ALIAS(int, mutex_lock, void *m)
TSAN_INTERCEPTOR_NETBSD_ALIAS(int, mutex_trylock, void *m)
+TSAN_INTERCEPTOR_NETBSD_ALIAS(int, mutex_unlock, void *m)
TSAN_INTERCEPTOR_NETBSD_ALIAS(int, rwlock_init, void *m, void *a)
TSAN_INTERCEPTOR_NETBSD_ALIAS(int, rwlock_destroy, void *m)
TSAN_INTERCEPTOR_NETBSD_ALIAS(int, rwlock_rdlock, void *m)
@@ -2683,7 +2838,7 @@ static void finalize(void *arg) {
Die();
}
-#if !SANITIZER_MAC && !SANITIZER_ANDROID
+#if !SANITIZER_APPLE && !SANITIZER_ANDROID
static void unreachable() {
Report("FATAL: ThreadSanitizer: unreachable called\n");
Die();
@@ -2694,35 +2849,20 @@ static void unreachable() {
SANITIZER_WEAK_ATTRIBUTE void InitializeLibdispatchInterceptors() {}
void InitializeInterceptors() {
-#if !SANITIZER_MAC
+#if !SANITIZER_APPLE
// We need to setup it early, because functions like dlsym() can call it.
REAL(memset) = internal_memset;
REAL(memcpy) = internal_memcpy;
#endif
- // Instruct libc malloc to consume less memory.
-#if SANITIZER_GLIBC
- mallopt(1, 0); // M_MXFAST
- mallopt(-3, 32*1024); // M_MMAP_THRESHOLD
-#endif
-
new(interceptor_ctx()) InterceptorContext();
InitializeCommonInterceptors();
InitializeSignalInterceptors();
InitializeLibdispatchInterceptors();
-#if !SANITIZER_MAC
- // We can not use TSAN_INTERCEPT to get setjmp addr,
- // because it does &setjmp and setjmp is not present in some versions of libc.
- using __interception::InterceptFunction;
- InterceptFunction(TSAN_STRING_SETJMP, (uptr*)&REAL(setjmp_symname), 0, 0);
- InterceptFunction("_setjmp", (uptr*)&REAL(_setjmp), 0, 0);
- InterceptFunction(TSAN_STRING_SIGSETJMP, (uptr*)&REAL(sigsetjmp_symname), 0,
- 0);
-#if !SANITIZER_NETBSD
- InterceptFunction("__sigsetjmp", (uptr*)&REAL(__sigsetjmp), 0, 0);
-#endif
+#if !SANITIZER_APPLE
+ InitializeSetjmpInterceptors();
#endif
TSAN_INTERCEPT(longjmp_symname);
@@ -2768,8 +2908,19 @@ void InitializeInterceptors() {
TSAN_INTERCEPT(pthread_mutex_init);
TSAN_INTERCEPT(pthread_mutex_destroy);
+ TSAN_INTERCEPT(pthread_mutex_lock);
TSAN_INTERCEPT(pthread_mutex_trylock);
TSAN_INTERCEPT(pthread_mutex_timedlock);
+ TSAN_INTERCEPT(pthread_mutex_unlock);
+#if SANITIZER_LINUX
+ TSAN_INTERCEPT(pthread_mutex_clocklock);
+#endif
+#if SANITIZER_GLIBC
+# if !__GLIBC_PREREQ(2, 34)
+ TSAN_INTERCEPT(__pthread_mutex_lock);
+ TSAN_INTERCEPT(__pthread_mutex_unlock);
+# endif
+#endif
TSAN_INTERCEPT(pthread_spin_init);
TSAN_INTERCEPT(pthread_spin_destroy);
@@ -2793,10 +2944,9 @@ void InitializeInterceptors() {
TSAN_INTERCEPT(pthread_once);
- TSAN_INTERCEPT(fstat);
TSAN_MAYBE_INTERCEPT___FXSTAT;
+ TSAN_MAYBE_INTERCEPT_FSTAT;
TSAN_MAYBE_INTERCEPT_FSTAT64;
- TSAN_MAYBE_INTERCEPT___FXSTAT64;
TSAN_INTERCEPT(open);
TSAN_MAYBE_INTERCEPT_OPEN64;
TSAN_INTERCEPT(creat);
@@ -2843,6 +2993,9 @@ void InitializeInterceptors() {
TSAN_INTERCEPT(fork);
TSAN_INTERCEPT(vfork);
+#if SANITIZER_LINUX
+ TSAN_INTERCEPT(clone);
+#endif
#if !SANITIZER_ANDROID
TSAN_INTERCEPT(dl_iterate_phdr);
#endif
@@ -2862,7 +3015,7 @@ void InitializeInterceptors() {
TSAN_MAYBE_INTERCEPT__LWP_EXIT;
TSAN_MAYBE_INTERCEPT_THR_EXIT;
-#if !SANITIZER_MAC && !SANITIZER_ANDROID
+#if !SANITIZER_APPLE && !SANITIZER_ANDROID
// Need to setup it, because interceptors check that the function is resolved.
// But atexit is emitted directly into the module, so can't be resolved.
REAL(atexit) = (int(*)(void(*)()))unreachable;
@@ -2877,13 +3030,33 @@ void InitializeInterceptors() {
Die();
}
-#if !SANITIZER_MAC && !SANITIZER_NETBSD && !SANITIZER_FREEBSD
+#if !SANITIZER_APPLE && !SANITIZER_NETBSD && !SANITIZER_FREEBSD
if (pthread_key_create(&interceptor_ctx()->finalize_key, &thread_finalize)) {
Printf("ThreadSanitizer: failed to create thread key\n");
Die();
}
#endif
+ TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(cond_init);
+ TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(cond_destroy);
+ TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(cond_signal);
+ TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(cond_broadcast);
+ TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(cond_wait);
+ TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(mutex_init);
+ TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(mutex_destroy);
+ TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(mutex_lock);
+ TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(mutex_trylock);
+ TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(mutex_unlock);
+ TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(rwlock_init);
+ TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(rwlock_destroy);
+ TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(rwlock_rdlock);
+ TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(rwlock_tryrdlock);
+ TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(rwlock_wrlock);
+ TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(rwlock_trywrlock);
+ TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(rwlock_unlock);
+ TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(once);
+ TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(sigmask);
+
TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(cond_init);
TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(cond_signal);
TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(cond_broadcast);
@@ -2891,7 +3064,9 @@ void InitializeInterceptors() {
TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(cond_destroy);
TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(mutex_init);
TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(mutex_destroy);
+ TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(mutex_lock);
TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(mutex_trylock);
+ TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(mutex_unlock);
TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(rwlock_init);
TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(rwlock_destroy);
TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(rwlock_rdlock);
@@ -2920,25 +3095,40 @@ void InitializeInterceptors() {
// Note that no_sanitize_thread attribute does not turn off atomic interception
// so attaching it to the function defined in user code does not help.
// That's why we now have what we have.
-extern "C" SANITIZER_INTERFACE_ATTRIBUTE
-void __tsan_testonly_barrier_init(u64 *barrier, u32 count) {
- if (count >= (1 << 8)) {
- Printf("barrier_init: count is too large (%d)\n", count);
- Die();
+constexpr u32 kBarrierThreadBits = 10;
+constexpr u32 kBarrierThreads = 1 << kBarrierThreadBits;
+
+extern "C" {
+
+SANITIZER_INTERFACE_ATTRIBUTE void __tsan_testonly_barrier_init(
+ atomic_uint32_t *barrier, u32 num_threads) {
+ if (num_threads >= kBarrierThreads) {
+ Printf("barrier_init: count is too large (%d)\n", num_threads);
+ Die();
}
- // 8 lsb is thread count, the remaining are count of entered threads.
- *barrier = count;
+ // kBarrierThreadBits lsb is thread count,
+ // the remaining are count of entered threads.
+ atomic_store(barrier, num_threads, memory_order_relaxed);
+}
+
+static u32 barrier_epoch(u32 value) {
+ return (value >> kBarrierThreadBits) / (value & (kBarrierThreads - 1));
}
-extern "C" SANITIZER_INTERFACE_ATTRIBUTE
-void __tsan_testonly_barrier_wait(u64 *barrier) {
- unsigned old = __atomic_fetch_add(barrier, 1 << 8, __ATOMIC_RELAXED);
- unsigned old_epoch = (old >> 8) / (old & 0xff);
+SANITIZER_INTERFACE_ATTRIBUTE void __tsan_testonly_barrier_wait(
+ atomic_uint32_t *barrier) {
+ u32 old = atomic_fetch_add(barrier, kBarrierThreads, memory_order_relaxed);
+ u32 old_epoch = barrier_epoch(old);
+ if (barrier_epoch(old + kBarrierThreads) != old_epoch) {
+ FutexWake(barrier, (1 << 30));
+ return;
+ }
for (;;) {
- unsigned cur = __atomic_load_n(barrier, __ATOMIC_RELAXED);
- unsigned cur_epoch = (cur >> 8) / (cur & 0xff);
- if (cur_epoch != old_epoch)
+ u32 cur = atomic_load(barrier, memory_order_relaxed);
+ if (barrier_epoch(cur) != old_epoch)
return;
- internal_sched_yield();
+ FutexWait(barrier, cur);
}
}
+
+} // extern "C"
diff --git a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_interface.cpp b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_interface.cpp
index 9bd0e8580b17..e6c4bf2e60a7 100644
--- a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_interface.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_interface.cpp
@@ -20,109 +20,44 @@
using namespace __tsan;
-void __tsan_init() {
- cur_thread_init();
- Initialize(cur_thread());
-}
+void __tsan_init() { Initialize(cur_thread_init()); }
void __tsan_flush_memory() {
FlushShadowMemory();
}
-void __tsan_read16(void *addr) {
- MemoryRead(cur_thread(), CALLERPC, (uptr)addr, kSizeLog8);
- MemoryRead(cur_thread(), CALLERPC, (uptr)addr + 8, kSizeLog8);
-}
-
-void __tsan_write16(void *addr) {
- MemoryWrite(cur_thread(), CALLERPC, (uptr)addr, kSizeLog8);
- MemoryWrite(cur_thread(), CALLERPC, (uptr)addr + 8, kSizeLog8);
-}
-
void __tsan_read16_pc(void *addr, void *pc) {
- MemoryRead(cur_thread(), STRIP_PAC_PC(pc), (uptr)addr, kSizeLog8);
- MemoryRead(cur_thread(), STRIP_PAC_PC(pc), (uptr)addr + 8, kSizeLog8);
+ uptr pc_no_pac = STRIP_PAC_PC(pc);
+ ThreadState *thr = cur_thread();
+ MemoryAccess(thr, pc_no_pac, (uptr)addr, 8, kAccessRead);
+ MemoryAccess(thr, pc_no_pac, (uptr)addr + 8, 8, kAccessRead);
}
void __tsan_write16_pc(void *addr, void *pc) {
- MemoryWrite(cur_thread(), STRIP_PAC_PC(pc), (uptr)addr, kSizeLog8);
- MemoryWrite(cur_thread(), STRIP_PAC_PC(pc), (uptr)addr + 8, kSizeLog8);
+ uptr pc_no_pac = STRIP_PAC_PC(pc);
+ ThreadState *thr = cur_thread();
+ MemoryAccess(thr, pc_no_pac, (uptr)addr, 8, kAccessWrite);
+ MemoryAccess(thr, pc_no_pac, (uptr)addr + 8, 8, kAccessWrite);
}
// __tsan_unaligned_read/write calls are emitted by compiler.
-void __tsan_unaligned_read2(const void *addr) {
- UnalignedMemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 2, false, false);
-}
-
-void __tsan_unaligned_read4(const void *addr) {
- UnalignedMemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 4, false, false);
-}
-
-void __tsan_unaligned_read8(const void *addr) {
- UnalignedMemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 8, false, false);
-}
-
void __tsan_unaligned_read16(const void *addr) {
- UnalignedMemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 16, false, false);
-}
-
-void __tsan_unaligned_write2(void *addr) {
- UnalignedMemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 2, true, false);
-}
-
-void __tsan_unaligned_write4(void *addr) {
- UnalignedMemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 4, true, false);
-}
-
-void __tsan_unaligned_write8(void *addr) {
- UnalignedMemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 8, true, false);
+ uptr pc = CALLERPC;
+ ThreadState *thr = cur_thread();
+ UnalignedMemoryAccess(thr, pc, (uptr)addr, 8, kAccessRead);
+ UnalignedMemoryAccess(thr, pc, (uptr)addr + 8, 8, kAccessRead);
}
void __tsan_unaligned_write16(void *addr) {
- UnalignedMemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 16, true, false);
+ uptr pc = CALLERPC;
+ ThreadState *thr = cur_thread();
+ UnalignedMemoryAccess(thr, pc, (uptr)addr, 8, kAccessWrite);
+ UnalignedMemoryAccess(thr, pc, (uptr)addr + 8, 8, kAccessWrite);
}
-// __sanitizer_unaligned_load/store are for user instrumentation.
-
extern "C" {
SANITIZER_INTERFACE_ATTRIBUTE
-u16 __sanitizer_unaligned_load16(const uu16 *addr) {
- __tsan_unaligned_read2(addr);
- return *addr;
-}
-
-SANITIZER_INTERFACE_ATTRIBUTE
-u32 __sanitizer_unaligned_load32(const uu32 *addr) {
- __tsan_unaligned_read4(addr);
- return *addr;
-}
-
-SANITIZER_INTERFACE_ATTRIBUTE
-u64 __sanitizer_unaligned_load64(const uu64 *addr) {
- __tsan_unaligned_read8(addr);
- return *addr;
-}
-
-SANITIZER_INTERFACE_ATTRIBUTE
-void __sanitizer_unaligned_store16(uu16 *addr, u16 v) {
- __tsan_unaligned_write2(addr);
- *addr = v;
-}
-
-SANITIZER_INTERFACE_ATTRIBUTE
-void __sanitizer_unaligned_store32(uu32 *addr, u32 v) {
- __tsan_unaligned_write4(addr);
- *addr = v;
-}
-
-SANITIZER_INTERFACE_ATTRIBUTE
-void __sanitizer_unaligned_store64(uu64 *addr, u64 v) {
- __tsan_unaligned_write8(addr);
- *addr = v;
-}
-
-SANITIZER_INTERFACE_ATTRIBUTE
void *__tsan_get_current_fiber() {
return cur_thread();
}
diff --git a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_interface.h b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_interface.h
index 124aa2fd2143..3731c90d4591 100644
--- a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_interface.h
+++ b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_interface.h
@@ -32,6 +32,9 @@ extern "C" {
// before any instrumented code is executed and before any call to malloc.
SANITIZER_INTERFACE_ATTRIBUTE void __tsan_init();
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE const char *
+__tsan_default_options();
+
SANITIZER_INTERFACE_ATTRIBUTE void __tsan_flush_memory();
SANITIZER_INTERFACE_ATTRIBUTE void __tsan_read1(void *addr);
@@ -72,12 +75,21 @@ SANITIZER_INTERFACE_ATTRIBUTE void __tsan_vptr_read(void **vptr_p);
SANITIZER_INTERFACE_ATTRIBUTE
void __tsan_vptr_update(void **vptr_p, void *new_val);
+SANITIZER_INTERFACE_ATTRIBUTE
+void *__tsan_memcpy(void *dest, const void *src, uptr count);
+SANITIZER_INTERFACE_ATTRIBUTE
+void *__tsan_memset(void *dest, int ch, uptr count);
+SANITIZER_INTERFACE_ATTRIBUTE
+void *__tsan_memmove(void *dest, const void *src, uptr count);
+
SANITIZER_INTERFACE_ATTRIBUTE void __tsan_func_entry(void *call_pc);
SANITIZER_INTERFACE_ATTRIBUTE void __tsan_func_exit();
SANITIZER_INTERFACE_ATTRIBUTE void __tsan_ignore_thread_begin();
SANITIZER_INTERFACE_ATTRIBUTE void __tsan_ignore_thread_end();
+SANITIZER_INTERFACE_ATTRIBUTE void __tsan_on_thread_idle();
+
SANITIZER_INTERFACE_ATTRIBUTE
void *__tsan_external_register_tag(const char *object_type);
SANITIZER_INTERFACE_ATTRIBUTE
@@ -95,9 +107,9 @@ SANITIZER_INTERFACE_ATTRIBUTE
void __tsan_write_range(void *addr, unsigned long size);
SANITIZER_INTERFACE_ATTRIBUTE
-void __tsan_read_range_pc(void *addr, unsigned long size, void *pc); // NOLINT
+void __tsan_read_range_pc(void *addr, unsigned long size, void *pc);
SANITIZER_INTERFACE_ATTRIBUTE
-void __tsan_write_range_pc(void *addr, unsigned long size, void *pc); // NOLINT
+void __tsan_write_range_pc(void *addr, unsigned long size, void *pc);
// User may provide function that would be called right when TSan detects
// an error. The argument 'report' is an opaque pointer that can be used to
@@ -407,6 +419,14 @@ void __tsan_go_atomic32_fetch_add(ThreadState *thr, uptr cpc, uptr pc, u8 *a);
SANITIZER_INTERFACE_ATTRIBUTE
void __tsan_go_atomic64_fetch_add(ThreadState *thr, uptr cpc, uptr pc, u8 *a);
SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_go_atomic32_fetch_and(ThreadState *thr, uptr cpc, uptr pc, u8 *a);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_go_atomic64_fetch_and(ThreadState *thr, uptr cpc, uptr pc, u8 *a);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_go_atomic32_fetch_or(ThreadState *thr, uptr cpc, uptr pc, u8 *a);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_go_atomic64_fetch_or(ThreadState *thr, uptr cpc, uptr pc, u8 *a);
+SANITIZER_INTERFACE_ATTRIBUTE
void __tsan_go_atomic32_exchange(ThreadState *thr, uptr cpc, uptr pc, u8 *a);
SANITIZER_INTERFACE_ATTRIBUTE
void __tsan_go_atomic64_exchange(ThreadState *thr, uptr cpc, uptr pc, u8 *a);
@@ -417,12 +437,6 @@ SANITIZER_INTERFACE_ATTRIBUTE
void __tsan_go_atomic64_compare_exchange(ThreadState *thr, uptr cpc, uptr pc,
u8 *a);
-SANITIZER_INTERFACE_ATTRIBUTE
-void __tsan_on_initialize();
-
-SANITIZER_INTERFACE_ATTRIBUTE
-int __tsan_on_finalize(int failed);
-
} // extern "C"
} // namespace __tsan
diff --git a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_interface.inc b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_interface.inc
new file mode 100644
index 000000000000..b0a424ff9c25
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_interface.inc
@@ -0,0 +1,190 @@
+//===-- tsan_interface.inc --------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_common/sanitizer_ptrauth.h"
+#include "tsan_interface.h"
+#include "tsan_rtl.h"
+
+#define CALLERPC ((uptr)__builtin_return_address(0))
+
+using namespace __tsan;
+
+void __tsan_read1(void *addr) {
+ MemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 1, kAccessRead);
+}
+
+void __tsan_read2(void *addr) {
+ MemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 2, kAccessRead);
+}
+
+void __tsan_read4(void *addr) {
+ MemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 4, kAccessRead);
+}
+
+void __tsan_read8(void *addr) {
+ MemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 8, kAccessRead);
+}
+
+void __tsan_read16(void *addr) {
+ MemoryAccess16(cur_thread(), CALLERPC, (uptr)addr, kAccessRead);
+}
+
+void __tsan_write1(void *addr) {
+ MemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 1, kAccessWrite);
+}
+
+void __tsan_write2(void *addr) {
+ MemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 2, kAccessWrite);
+}
+
+void __tsan_write4(void *addr) {
+ MemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 4, kAccessWrite);
+}
+
+void __tsan_write8(void *addr) {
+ MemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 8, kAccessWrite);
+}
+
+void __tsan_write16(void *addr) {
+ MemoryAccess16(cur_thread(), CALLERPC, (uptr)addr, kAccessWrite);
+}
+
+void __tsan_read1_pc(void *addr, void *pc) {
+ MemoryAccess(cur_thread(), STRIP_PAC_PC(pc), (uptr)addr, 1, kAccessRead | kAccessExternalPC);
+}
+
+void __tsan_read2_pc(void *addr, void *pc) {
+ MemoryAccess(cur_thread(), STRIP_PAC_PC(pc), (uptr)addr, 2, kAccessRead | kAccessExternalPC);
+}
+
+void __tsan_read4_pc(void *addr, void *pc) {
+ MemoryAccess(cur_thread(), STRIP_PAC_PC(pc), (uptr)addr, 4, kAccessRead | kAccessExternalPC);
+}
+
+void __tsan_read8_pc(void *addr, void *pc) {
+ MemoryAccess(cur_thread(), STRIP_PAC_PC(pc), (uptr)addr, 8, kAccessRead | kAccessExternalPC);
+}
+
+void __tsan_write1_pc(void *addr, void *pc) {
+ MemoryAccess(cur_thread(), STRIP_PAC_PC(pc), (uptr)addr, 1, kAccessWrite | kAccessExternalPC);
+}
+
+void __tsan_write2_pc(void *addr, void *pc) {
+ MemoryAccess(cur_thread(), STRIP_PAC_PC(pc), (uptr)addr, 2, kAccessWrite | kAccessExternalPC);
+}
+
+void __tsan_write4_pc(void *addr, void *pc) {
+ MemoryAccess(cur_thread(), STRIP_PAC_PC(pc), (uptr)addr, 4, kAccessWrite | kAccessExternalPC);
+}
+
+void __tsan_write8_pc(void *addr, void *pc) {
+ MemoryAccess(cur_thread(), STRIP_PAC_PC(pc), (uptr)addr, 8, kAccessWrite | kAccessExternalPC);
+}
+
+ALWAYS_INLINE USED void __tsan_unaligned_read2(const void *addr) {
+ UnalignedMemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 2, kAccessRead);
+}
+
+ALWAYS_INLINE USED void __tsan_unaligned_read4(const void *addr) {
+ UnalignedMemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 4, kAccessRead);
+}
+
+ALWAYS_INLINE USED void __tsan_unaligned_read8(const void *addr) {
+ UnalignedMemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 8, kAccessRead);
+}
+
+ALWAYS_INLINE USED void __tsan_unaligned_write2(void *addr) {
+ UnalignedMemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 2, kAccessWrite);
+}
+
+ALWAYS_INLINE USED void __tsan_unaligned_write4(void *addr) {
+ UnalignedMemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 4, kAccessWrite);
+}
+
+ALWAYS_INLINE USED void __tsan_unaligned_write8(void *addr) {
+ UnalignedMemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 8, kAccessWrite);
+}
+
+extern "C" {
+// __sanitizer_unaligned_load/store are for user instrumentation.
+SANITIZER_INTERFACE_ATTRIBUTE
+u16 __sanitizer_unaligned_load16(const uu16 *addr) {
+ __tsan_unaligned_read2(addr);
+ return *addr;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+u32 __sanitizer_unaligned_load32(const uu32 *addr) {
+ __tsan_unaligned_read4(addr);
+ return *addr;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+u64 __sanitizer_unaligned_load64(const uu64 *addr) {
+ __tsan_unaligned_read8(addr);
+ return *addr;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __sanitizer_unaligned_store16(uu16 *addr, u16 v) {
+ *addr = v;
+ __tsan_unaligned_write2(addr);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __sanitizer_unaligned_store32(uu32 *addr, u32 v) {
+ *addr = v;
+ __tsan_unaligned_write4(addr);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __sanitizer_unaligned_store64(uu64 *addr, u64 v) {
+ *addr = v;
+ __tsan_unaligned_write8(addr);
+}
+}
+
+void __tsan_vptr_update(void **vptr_p, void *new_val) {
+ if (*vptr_p == new_val)
+ return;
+ MemoryAccess(cur_thread(), CALLERPC, (uptr)vptr_p, sizeof(*vptr_p),
+ kAccessWrite | kAccessVptr);
+}
+
+void __tsan_vptr_read(void **vptr_p) {
+ MemoryAccess(cur_thread(), CALLERPC, (uptr)vptr_p, sizeof(*vptr_p),
+ kAccessRead | kAccessVptr);
+}
+
+void __tsan_func_entry(void *pc) { FuncEntry(cur_thread(), STRIP_PAC_PC(pc)); }
+
+void __tsan_func_exit() { FuncExit(cur_thread()); }
+
+void __tsan_ignore_thread_begin() { ThreadIgnoreBegin(cur_thread(), CALLERPC); }
+
+void __tsan_ignore_thread_end() { ThreadIgnoreEnd(cur_thread()); }
+
+void __tsan_read_range(void *addr, uptr size) {
+ MemoryAccessRange(cur_thread(), CALLERPC, (uptr)addr, size, false);
+}
+
+void __tsan_write_range(void *addr, uptr size) {
+ MemoryAccessRange(cur_thread(), CALLERPC, (uptr)addr, size, true);
+}
+
+void __tsan_read_range_pc(void *addr, uptr size, void *pc) {
+ MemoryAccessRange(cur_thread(), STRIP_PAC_PC(pc), (uptr)addr, size, false);
+}
+
+void __tsan_write_range_pc(void *addr, uptr size, void *pc) {
+ MemoryAccessRange(cur_thread(), STRIP_PAC_PC(pc), (uptr)addr, size, true);
+}
diff --git a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_interface_ann.cpp b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_interface_ann.cpp
index 47314f5ad812..5154662034c5 100644
--- a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_interface_ann.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_interface_ann.cpp
@@ -43,15 +43,14 @@ class ScopedAnnotation {
ThreadState *const thr_;
};
-#define SCOPED_ANNOTATION_RET(typ, ret) \
- if (!flags()->enable_annotations) \
- return ret; \
- ThreadState *thr = cur_thread(); \
- const uptr caller_pc = (uptr)__builtin_return_address(0); \
- ScopedAnnotation sa(thr, __func__, caller_pc); \
- const uptr pc = StackTrace::GetCurrentPc(); \
- (void)pc; \
-/**/
+#define SCOPED_ANNOTATION_RET(typ, ret) \
+ if (!flags()->enable_annotations) \
+ return ret; \
+ ThreadState *thr = cur_thread(); \
+ const uptr caller_pc = (uptr)__builtin_return_address(0); \
+ ScopedAnnotation sa(thr, __func__, caller_pc); \
+ const uptr pc = StackTrace::GetCurrentPc(); \
+ (void)pc;
#define SCOPED_ANNOTATION(typ) SCOPED_ANNOTATION_RET(typ, )
@@ -71,7 +70,6 @@ struct ExpectRace {
struct DynamicAnnContext {
Mutex mtx;
- ExpectRace expect;
ExpectRace benign;
DynamicAnnContext() : mtx(MutexTypeAnnotations) {}
@@ -90,7 +88,7 @@ static void AddExpectRace(ExpectRace *list,
return;
}
}
- race = (ExpectRace*)internal_alloc(MBlockExpectRace, sizeof(ExpectRace));
+ race = static_cast<ExpectRace *>(Alloc(sizeof(ExpectRace)));
race->addr = addr;
race->size = size;
race->file = f;
@@ -137,81 +135,12 @@ static void InitList(ExpectRace *list) {
void InitializeDynamicAnnotations() {
dyn_ann_ctx = new(dyn_ann_ctx_placeholder) DynamicAnnContext;
- InitList(&dyn_ann_ctx->expect);
InitList(&dyn_ann_ctx->benign);
}
bool IsExpectedReport(uptr addr, uptr size) {
ReadLock lock(&dyn_ann_ctx->mtx);
- if (CheckContains(&dyn_ann_ctx->expect, addr, size))
- return true;
- if (CheckContains(&dyn_ann_ctx->benign, addr, size))
- return true;
- return false;
-}
-
-static void CollectMatchedBenignRaces(Vector<ExpectRace> *matched,
- int *unique_count, int *hit_count, atomic_uintptr_t ExpectRace::*counter) {
- ExpectRace *list = &dyn_ann_ctx->benign;
- for (ExpectRace *race = list->next; race != list; race = race->next) {
- (*unique_count)++;
- const uptr cnt = atomic_load_relaxed(&(race->*counter));
- if (cnt == 0)
- continue;
- *hit_count += cnt;
- uptr i = 0;
- for (; i < matched->Size(); i++) {
- ExpectRace *race0 = &(*matched)[i];
- if (race->line == race0->line
- && internal_strcmp(race->file, race0->file) == 0
- && internal_strcmp(race->desc, race0->desc) == 0) {
- atomic_fetch_add(&(race0->*counter), cnt, memory_order_relaxed);
- break;
- }
- }
- if (i == matched->Size())
- matched->PushBack(*race);
- }
-}
-
-void PrintMatchedBenignRaces() {
- Lock lock(&dyn_ann_ctx->mtx);
- int unique_count = 0;
- int hit_count = 0;
- int add_count = 0;
- Vector<ExpectRace> hit_matched;
- CollectMatchedBenignRaces(&hit_matched, &unique_count, &hit_count,
- &ExpectRace::hitcount);
- Vector<ExpectRace> add_matched;
- CollectMatchedBenignRaces(&add_matched, &unique_count, &add_count,
- &ExpectRace::addcount);
- if (hit_matched.Size()) {
- Printf("ThreadSanitizer: Matched %d \"benign\" races (pid=%d):\n",
- hit_count, (int)internal_getpid());
- for (uptr i = 0; i < hit_matched.Size(); i++) {
- Printf("%d %s:%d %s\n",
- atomic_load_relaxed(&hit_matched[i].hitcount),
- hit_matched[i].file, hit_matched[i].line, hit_matched[i].desc);
- }
- }
- if (hit_matched.Size()) {
- Printf("ThreadSanitizer: Annotated %d \"benign\" races, %d unique"
- " (pid=%d):\n",
- add_count, unique_count, (int)internal_getpid());
- for (uptr i = 0; i < add_matched.Size(); i++) {
- Printf("%d %s:%d %s\n",
- atomic_load_relaxed(&add_matched[i].addcount),
- add_matched[i].file, add_matched[i].line, add_matched[i].desc);
- }
- }
-}
-
-static void ReportMissedExpectedRace(ExpectRace *race) {
- Printf("==================\n");
- Printf("WARNING: ThreadSanitizer: missed expected data race\n");
- Printf(" %s addr=%zx %s:%d\n",
- race->desc, race->addr, race->file, race->line);
- Printf("==================\n");
+ return CheckContains(&dyn_ann_ctx->benign, addr, size);
}
} // namespace __tsan
@@ -229,20 +158,16 @@ void INTERFACE_ATTRIBUTE AnnotateHappensAfter(char *f, int l, uptr addr) {
}
void INTERFACE_ATTRIBUTE AnnotateCondVarSignal(char *f, int l, uptr cv) {
- SCOPED_ANNOTATION(AnnotateCondVarSignal);
}
void INTERFACE_ATTRIBUTE AnnotateCondVarSignalAll(char *f, int l, uptr cv) {
- SCOPED_ANNOTATION(AnnotateCondVarSignalAll);
}
void INTERFACE_ATTRIBUTE AnnotateMutexIsNotPHB(char *f, int l, uptr mu) {
- SCOPED_ANNOTATION(AnnotateMutexIsNotPHB);
}
void INTERFACE_ATTRIBUTE AnnotateCondVarWait(char *f, int l, uptr cv,
uptr lock) {
- SCOPED_ANNOTATION(AnnotateCondVarWait);
}
void INTERFACE_ATTRIBUTE AnnotateRWLockCreate(char *f, int l, uptr m) {
@@ -279,86 +204,56 @@ void INTERFACE_ATTRIBUTE AnnotateRWLockReleased(char *f, int l, uptr m,
}
void INTERFACE_ATTRIBUTE AnnotateTraceMemory(char *f, int l, uptr mem) {
- SCOPED_ANNOTATION(AnnotateTraceMemory);
}
void INTERFACE_ATTRIBUTE AnnotateFlushState(char *f, int l) {
- SCOPED_ANNOTATION(AnnotateFlushState);
}
void INTERFACE_ATTRIBUTE AnnotateNewMemory(char *f, int l, uptr mem,
uptr size) {
- SCOPED_ANNOTATION(AnnotateNewMemory);
}
void INTERFACE_ATTRIBUTE AnnotateNoOp(char *f, int l, uptr mem) {
- SCOPED_ANNOTATION(AnnotateNoOp);
}
void INTERFACE_ATTRIBUTE AnnotateFlushExpectedRaces(char *f, int l) {
- SCOPED_ANNOTATION(AnnotateFlushExpectedRaces);
- Lock lock(&dyn_ann_ctx->mtx);
- while (dyn_ann_ctx->expect.next != &dyn_ann_ctx->expect) {
- ExpectRace *race = dyn_ann_ctx->expect.next;
- if (atomic_load_relaxed(&race->hitcount) == 0) {
- ctx->nmissed_expected++;
- ReportMissedExpectedRace(race);
- }
- race->prev->next = race->next;
- race->next->prev = race->prev;
- internal_free(race);
- }
}
void INTERFACE_ATTRIBUTE AnnotateEnableRaceDetection(
char *f, int l, int enable) {
- SCOPED_ANNOTATION(AnnotateEnableRaceDetection);
- // FIXME: Reconsider this functionality later. It may be irrelevant.
}
void INTERFACE_ATTRIBUTE AnnotateMutexIsUsedAsCondVar(
char *f, int l, uptr mu) {
- SCOPED_ANNOTATION(AnnotateMutexIsUsedAsCondVar);
}
void INTERFACE_ATTRIBUTE AnnotatePCQGet(
char *f, int l, uptr pcq) {
- SCOPED_ANNOTATION(AnnotatePCQGet);
}
void INTERFACE_ATTRIBUTE AnnotatePCQPut(
char *f, int l, uptr pcq) {
- SCOPED_ANNOTATION(AnnotatePCQPut);
}
void INTERFACE_ATTRIBUTE AnnotatePCQDestroy(
char *f, int l, uptr pcq) {
- SCOPED_ANNOTATION(AnnotatePCQDestroy);
}
void INTERFACE_ATTRIBUTE AnnotatePCQCreate(
char *f, int l, uptr pcq) {
- SCOPED_ANNOTATION(AnnotatePCQCreate);
}
void INTERFACE_ATTRIBUTE AnnotateExpectRace(
char *f, int l, uptr mem, char *desc) {
- SCOPED_ANNOTATION(AnnotateExpectRace);
- Lock lock(&dyn_ann_ctx->mtx);
- AddExpectRace(&dyn_ann_ctx->expect,
- f, l, mem, 1, desc);
- DPrintf("Add expected race: %s addr=%zx %s:%d\n", desc, mem, f, l);
}
-static void BenignRaceImpl(
- char *f, int l, uptr mem, uptr size, char *desc) {
+static void BenignRaceImpl(char *f, int l, uptr mem, uptr size, char *desc) {
Lock lock(&dyn_ann_ctx->mtx);
AddExpectRace(&dyn_ann_ctx->benign,
f, l, mem, size, desc);
DPrintf("Add benign race: %s addr=%zx %s:%d\n", desc, mem, f, l);
}
-// FIXME: Turn it off later. WTF is benign race?1?? Go talk to Hans Boehm.
void INTERFACE_ATTRIBUTE AnnotateBenignRaceSized(
char *f, int l, uptr mem, uptr size, char *desc) {
SCOPED_ANNOTATION(AnnotateBenignRaceSized);
@@ -378,7 +273,7 @@ void INTERFACE_ATTRIBUTE AnnotateIgnoreReadsBegin(char *f, int l) {
void INTERFACE_ATTRIBUTE AnnotateIgnoreReadsEnd(char *f, int l) {
SCOPED_ANNOTATION(AnnotateIgnoreReadsEnd);
- ThreadIgnoreEnd(thr, pc);
+ ThreadIgnoreEnd(thr);
}
void INTERFACE_ATTRIBUTE AnnotateIgnoreWritesBegin(char *f, int l) {
@@ -388,7 +283,7 @@ void INTERFACE_ATTRIBUTE AnnotateIgnoreWritesBegin(char *f, int l) {
void INTERFACE_ATTRIBUTE AnnotateIgnoreWritesEnd(char *f, int l) {
SCOPED_ANNOTATION(AnnotateIgnoreWritesEnd);
- ThreadIgnoreEnd(thr, pc);
+ ThreadIgnoreEnd(thr);
}
void INTERFACE_ATTRIBUTE AnnotateIgnoreSyncBegin(char *f, int l) {
@@ -398,17 +293,15 @@ void INTERFACE_ATTRIBUTE AnnotateIgnoreSyncBegin(char *f, int l) {
void INTERFACE_ATTRIBUTE AnnotateIgnoreSyncEnd(char *f, int l) {
SCOPED_ANNOTATION(AnnotateIgnoreSyncEnd);
- ThreadIgnoreSyncEnd(thr, pc);
+ ThreadIgnoreSyncEnd(thr);
}
void INTERFACE_ATTRIBUTE AnnotatePublishMemoryRange(
char *f, int l, uptr addr, uptr size) {
- SCOPED_ANNOTATION(AnnotatePublishMemoryRange);
}
void INTERFACE_ATTRIBUTE AnnotateUnpublishMemoryRange(
char *f, int l, uptr addr, uptr size) {
- SCOPED_ANNOTATION(AnnotateUnpublishMemoryRange);
}
void INTERFACE_ATTRIBUTE AnnotateThreadName(
@@ -421,11 +314,9 @@ void INTERFACE_ATTRIBUTE AnnotateThreadName(
// WTFAnnotateHappensAfter(). Those are being used by Webkit to annotate
// atomic operations, which should be handled by ThreadSanitizer correctly.
void INTERFACE_ATTRIBUTE WTFAnnotateHappensBefore(char *f, int l, uptr addr) {
- SCOPED_ANNOTATION(AnnotateHappensBefore);
}
void INTERFACE_ATTRIBUTE WTFAnnotateHappensAfter(char *f, int l, uptr addr) {
- SCOPED_ANNOTATION(AnnotateHappensAfter);
}
void INTERFACE_ATTRIBUTE WTFAnnotateBenignRaceSized(
@@ -477,15 +368,15 @@ void __tsan_mutex_pre_lock(void *m, unsigned flagz) {
else
MutexPreLock(thr, pc, (uptr)m);
}
- ThreadIgnoreBegin(thr, pc, /*save_stack=*/false);
- ThreadIgnoreSyncBegin(thr, pc, /*save_stack=*/false);
+ ThreadIgnoreBegin(thr, 0);
+ ThreadIgnoreSyncBegin(thr, 0);
}
INTERFACE_ATTRIBUTE
void __tsan_mutex_post_lock(void *m, unsigned flagz, int rec) {
SCOPED_ANNOTATION(__tsan_mutex_post_lock);
- ThreadIgnoreSyncEnd(thr, pc);
- ThreadIgnoreEnd(thr, pc);
+ ThreadIgnoreSyncEnd(thr);
+ ThreadIgnoreEnd(thr);
if (!(flagz & MutexFlagTryLockFailed)) {
if (flagz & MutexFlagReadLock)
MutexPostReadLock(thr, pc, (uptr)m, flagz);
@@ -504,44 +395,66 @@ int __tsan_mutex_pre_unlock(void *m, unsigned flagz) {
} else {
ret = MutexUnlock(thr, pc, (uptr)m, flagz);
}
- ThreadIgnoreBegin(thr, pc, /*save_stack=*/false);
- ThreadIgnoreSyncBegin(thr, pc, /*save_stack=*/false);
+ ThreadIgnoreBegin(thr, 0);
+ ThreadIgnoreSyncBegin(thr, 0);
return ret;
}
INTERFACE_ATTRIBUTE
void __tsan_mutex_post_unlock(void *m, unsigned flagz) {
SCOPED_ANNOTATION(__tsan_mutex_post_unlock);
- ThreadIgnoreSyncEnd(thr, pc);
- ThreadIgnoreEnd(thr, pc);
+ ThreadIgnoreSyncEnd(thr);
+ ThreadIgnoreEnd(thr);
}
INTERFACE_ATTRIBUTE
void __tsan_mutex_pre_signal(void *addr, unsigned flagz) {
SCOPED_ANNOTATION(__tsan_mutex_pre_signal);
- ThreadIgnoreBegin(thr, pc, /*save_stack=*/false);
- ThreadIgnoreSyncBegin(thr, pc, /*save_stack=*/false);
+ ThreadIgnoreBegin(thr, 0);
+ ThreadIgnoreSyncBegin(thr, 0);
}
INTERFACE_ATTRIBUTE
void __tsan_mutex_post_signal(void *addr, unsigned flagz) {
SCOPED_ANNOTATION(__tsan_mutex_post_signal);
- ThreadIgnoreSyncEnd(thr, pc);
- ThreadIgnoreEnd(thr, pc);
+ ThreadIgnoreSyncEnd(thr);
+ ThreadIgnoreEnd(thr);
}
INTERFACE_ATTRIBUTE
void __tsan_mutex_pre_divert(void *addr, unsigned flagz) {
SCOPED_ANNOTATION(__tsan_mutex_pre_divert);
// Exit from ignore region started in __tsan_mutex_pre_lock/unlock/signal.
- ThreadIgnoreSyncEnd(thr, pc);
- ThreadIgnoreEnd(thr, pc);
+ ThreadIgnoreSyncEnd(thr);
+ ThreadIgnoreEnd(thr);
}
INTERFACE_ATTRIBUTE
void __tsan_mutex_post_divert(void *addr, unsigned flagz) {
SCOPED_ANNOTATION(__tsan_mutex_post_divert);
- ThreadIgnoreBegin(thr, pc, /*save_stack=*/false);
- ThreadIgnoreSyncBegin(thr, pc, /*save_stack=*/false);
+ ThreadIgnoreBegin(thr, 0);
+ ThreadIgnoreSyncBegin(thr, 0);
+}
+
+static void ReportMutexHeldWrongContext(ThreadState *thr, uptr pc) {
+ ThreadRegistryLock l(&ctx->thread_registry);
+ ScopedReport rep(ReportTypeMutexHeldWrongContext);
+ for (uptr i = 0; i < thr->mset.Size(); ++i) {
+ MutexSet::Desc desc = thr->mset.Get(i);
+ rep.AddMutex(desc.addr, desc.stack_id);
+ }
+ VarSizeStackTrace trace;
+ ObtainCurrentStack(thr, pc, &trace);
+ rep.AddStack(trace, true);
+ OutputReport(thr, rep);
+}
+
+INTERFACE_ATTRIBUTE
+void __tsan_check_no_mutexes_held() {
+ SCOPED_ANNOTATION(__tsan_check_no_mutexes_held);
+ if (thr->mset.Size() == 0) {
+ return;
+ }
+ ReportMutexHeldWrongContext(thr, pc);
}
} // extern "C"
diff --git a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_interface_atomic.cpp b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_interface_atomic.cpp
index 89bb75394553..2b5a2c6ef79b 100644
--- a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_interface_atomic.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_interface_atomic.cpp
@@ -32,6 +32,7 @@ using namespace __tsan;
static StaticSpinMutex mutex128;
#endif
+#if SANITIZER_DEBUG
static bool IsLoadOrder(morder mo) {
return mo == mo_relaxed || mo == mo_consume
|| mo == mo_acquire || mo == mo_seq_cst;
@@ -40,6 +41,7 @@ static bool IsLoadOrder(morder mo) {
static bool IsStoreOrder(morder mo) {
return mo == mo_relaxed || mo == mo_release || mo == mo_seq_cst;
}
+#endif
static bool IsReleaseOrder(morder mo) {
return mo == mo_release || mo == mo_acq_rel || mo == mo_seq_cst;
@@ -161,16 +163,16 @@ a128 func_cas(volatile a128 *v, a128 cmp, a128 xch) {
}
#endif
-template<typename T>
-static int SizeLog() {
+template <typename T>
+static int AccessSize() {
if (sizeof(T) <= 1)
- return kSizeLog1;
+ return 1;
else if (sizeof(T) <= 2)
- return kSizeLog2;
+ return 2;
else if (sizeof(T) <= 4)
- return kSizeLog4;
+ return 4;
else
- return kSizeLog8;
+ return 8;
// For 16-byte atomics we also use 8-byte memory access,
// this leads to false negatives only in very obscure cases.
}
@@ -202,7 +204,7 @@ static memory_order to_mo(morder mo) {
case mo_acq_rel: return memory_order_acq_rel;
case mo_seq_cst: return memory_order_seq_cst;
}
- CHECK(0);
+ DCHECK(0);
return memory_order_seq_cst;
}
@@ -219,27 +221,28 @@ static a128 NoTsanAtomicLoad(const volatile a128 *a, morder mo) {
#endif
template <typename T>
-static T AtomicLoad(ThreadState *thr, uptr pc, const volatile T *a,
- morder mo) NO_THREAD_SAFETY_ANALYSIS {
- CHECK(IsLoadOrder(mo));
+static T AtomicLoad(ThreadState *thr, uptr pc, const volatile T *a, morder mo) {
+ DCHECK(IsLoadOrder(mo));
// This fast-path is critical for performance.
// Assume the access is atomic.
if (!IsAcquireOrder(mo)) {
- MemoryReadAtomic(thr, pc, (uptr)a, SizeLog<T>());
+ MemoryAccess(thr, pc, (uptr)a, AccessSize<T>(),
+ kAccessRead | kAccessAtomic);
return NoTsanAtomicLoad(a, mo);
}
// Don't create sync object if it does not exist yet. For example, an atomic
// pointer is initialized to nullptr and then periodically acquire-loaded.
T v = NoTsanAtomicLoad(a, mo);
- SyncVar *s = ctx->metamap.GetIfExistsAndLock((uptr)a, false);
+ SyncVar *s = ctx->metamap.GetSyncIfExists((uptr)a);
if (s) {
- AcquireImpl(thr, pc, &s->clock);
+ SlotLocker locker(thr);
+ ReadLock lock(&s->mtx);
+ thr->clock.Acquire(s->clock);
// Re-read under sync mutex because we need a consistent snapshot
// of the value and the clock we acquire.
v = NoTsanAtomicLoad(a, mo);
- s->mtx.ReadUnlock();
}
- MemoryReadAtomic(thr, pc, (uptr)a, SizeLog<T>());
+ MemoryAccess(thr, pc, (uptr)a, AccessSize<T>(), kAccessRead | kAccessAtomic);
return v;
}
@@ -257,9 +260,9 @@ static void NoTsanAtomicStore(volatile a128 *a, a128 v, morder mo) {
template <typename T>
static void AtomicStore(ThreadState *thr, uptr pc, volatile T *a, T v,
- morder mo) NO_THREAD_SAFETY_ANALYSIS {
- CHECK(IsStoreOrder(mo));
- MemoryWriteAtomic(thr, pc, (uptr)a, SizeLog<T>());
+ morder mo) {
+ DCHECK(IsStoreOrder(mo));
+ MemoryAccess(thr, pc, (uptr)a, AccessSize<T>(), kAccessWrite | kAccessAtomic);
// This fast-path is critical for performance.
// Assume the access is atomic.
// Strictly saying even relaxed store cuts off release sequence,
@@ -268,36 +271,35 @@ static void AtomicStore(ThreadState *thr, uptr pc, volatile T *a, T v,
NoTsanAtomicStore(a, v, mo);
return;
}
- __sync_synchronize();
- SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, (uptr)a, true);
- thr->fast_state.IncrementEpoch();
- // Can't increment epoch w/o writing to the trace as well.
- TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
- ReleaseStoreImpl(thr, pc, &s->clock);
- NoTsanAtomicStore(a, v, mo);
- s->mtx.Unlock();
+ SlotLocker locker(thr);
+ {
+ auto s = ctx->metamap.GetSyncOrCreate(thr, pc, (uptr)a, false);
+ Lock lock(&s->mtx);
+ thr->clock.ReleaseStore(&s->clock);
+ NoTsanAtomicStore(a, v, mo);
+ }
+ IncrementEpoch(thr);
}
template <typename T, T (*F)(volatile T *v, T op)>
-static T AtomicRMW(ThreadState *thr, uptr pc, volatile T *a, T v,
- morder mo) NO_THREAD_SAFETY_ANALYSIS {
- MemoryWriteAtomic(thr, pc, (uptr)a, SizeLog<T>());
- SyncVar *s = 0;
- if (mo != mo_relaxed) {
- s = ctx->metamap.GetOrCreateAndLock(thr, pc, (uptr)a, true);
- thr->fast_state.IncrementEpoch();
- // Can't increment epoch w/o writing to the trace as well.
- TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
+static T AtomicRMW(ThreadState *thr, uptr pc, volatile T *a, T v, morder mo) {
+ MemoryAccess(thr, pc, (uptr)a, AccessSize<T>(), kAccessWrite | kAccessAtomic);
+ if (LIKELY(mo == mo_relaxed))
+ return F(a, v);
+ SlotLocker locker(thr);
+ {
+ auto s = ctx->metamap.GetSyncOrCreate(thr, pc, (uptr)a, false);
+ RWLock lock(&s->mtx, IsReleaseOrder(mo));
if (IsAcqRelOrder(mo))
- AcquireReleaseImpl(thr, pc, &s->clock);
+ thr->clock.ReleaseAcquire(&s->clock);
else if (IsReleaseOrder(mo))
- ReleaseImpl(thr, pc, &s->clock);
+ thr->clock.Release(&s->clock);
else if (IsAcquireOrder(mo))
- AcquireImpl(thr, pc, &s->clock);
+ thr->clock.Acquire(s->clock);
+ v = F(a, v);
}
- v = F(a, v);
- if (s)
- s->mtx.Unlock();
+ if (IsReleaseOrder(mo))
+ IncrementEpoch(thr);
return v;
}
@@ -402,46 +404,44 @@ static T NoTsanAtomicCAS(volatile T *a, T c, T v, morder mo, morder fmo) {
}
template <typename T>
-static bool AtomicCAS(ThreadState *thr, uptr pc, volatile T *a, T *c, T v, morder mo,
- morder fmo) NO_THREAD_SAFETY_ANALYSIS {
+static bool AtomicCAS(ThreadState *thr, uptr pc, volatile T *a, T *c, T v,
+ morder mo, morder fmo) {
// 31.7.2.18: "The failure argument shall not be memory_order_release
// nor memory_order_acq_rel". LLVM (2021-05) fallbacks to Monotonic
// (mo_relaxed) when those are used.
- CHECK(IsLoadOrder(fmo));
-
- MemoryWriteAtomic(thr, pc, (uptr)a, SizeLog<T>());
- SyncVar *s = 0;
- bool write_lock = IsReleaseOrder(mo);
-
- if (mo != mo_relaxed || fmo != mo_relaxed)
- s = ctx->metamap.GetOrCreateAndLock(thr, pc, (uptr)a, write_lock);
-
- T cc = *c;
- T pr = func_cas(a, cc, v);
- bool success = pr == cc;
- if (!success) {
+ DCHECK(IsLoadOrder(fmo));
+
+ MemoryAccess(thr, pc, (uptr)a, AccessSize<T>(), kAccessWrite | kAccessAtomic);
+ if (LIKELY(mo == mo_relaxed && fmo == mo_relaxed)) {
+ T cc = *c;
+ T pr = func_cas(a, cc, v);
+ if (pr == cc)
+ return true;
*c = pr;
- mo = fmo;
+ return false;
}
-
- if (s) {
- thr->fast_state.IncrementEpoch();
- // Can't increment epoch w/o writing to the trace as well.
- TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
-
+ SlotLocker locker(thr);
+ bool release = IsReleaseOrder(mo);
+ bool success;
+ {
+ auto s = ctx->metamap.GetSyncOrCreate(thr, pc, (uptr)a, false);
+ RWLock lock(&s->mtx, release);
+ T cc = *c;
+ T pr = func_cas(a, cc, v);
+ success = pr == cc;
+ if (!success) {
+ *c = pr;
+ mo = fmo;
+ }
if (success && IsAcqRelOrder(mo))
- AcquireReleaseImpl(thr, pc, &s->clock);
+ thr->clock.ReleaseAcquire(&s->clock);
else if (success && IsReleaseOrder(mo))
- ReleaseImpl(thr, pc, &s->clock);
+ thr->clock.Release(&s->clock);
else if (IsAcquireOrder(mo))
- AcquireImpl(thr, pc, &s->clock);
-
- if (write_lock)
- s->mtx.Unlock();
- else
- s->mtx.ReadUnlock();
+ thr->clock.Acquire(s->clock);
}
-
+ if (success && release)
+ IncrementEpoch(thr);
return success;
}
@@ -485,380 +485,356 @@ static morder convert_morder(morder mo) {
return (morder)(mo & 0x7fff);
}
-#define SCOPED_ATOMIC(func, ...) \
- ThreadState *const thr = cur_thread(); \
- if (UNLIKELY(thr->ignore_sync || thr->ignore_interceptors)) { \
- ProcessPendingSignals(thr); \
- return NoTsanAtomic##func(__VA_ARGS__); \
- } \
- const uptr callpc = (uptr)__builtin_return_address(0); \
- uptr pc = StackTrace::GetCurrentPc(); \
- mo = convert_morder(mo); \
- ScopedAtomic sa(thr, callpc, a, mo, __func__); \
- return Atomic##func(thr, pc, __VA_ARGS__); \
-/**/
-
-class ScopedAtomic {
- public:
- ScopedAtomic(ThreadState *thr, uptr pc, const volatile void *a,
- morder mo, const char *func)
- : thr_(thr) {
- FuncEntry(thr_, pc);
- DPrintf("#%d: %s(%p, %d)\n", thr_->tid, func, a, mo);
- }
- ~ScopedAtomic() {
- ProcessPendingSignals(thr_);
- FuncExit(thr_);
- }
- private:
- ThreadState *thr_;
-};
+# define ATOMIC_IMPL(func, ...) \
+ ThreadState *const thr = cur_thread(); \
+ ProcessPendingSignals(thr); \
+ if (UNLIKELY(thr->ignore_sync || thr->ignore_interceptors)) \
+ return NoTsanAtomic##func(__VA_ARGS__); \
+ mo = convert_morder(mo); \
+ return Atomic##func(thr, GET_CALLER_PC(), __VA_ARGS__);
extern "C" {
SANITIZER_INTERFACE_ATTRIBUTE
a8 __tsan_atomic8_load(const volatile a8 *a, morder mo) {
- SCOPED_ATOMIC(Load, a, mo);
+ ATOMIC_IMPL(Load, a, mo);
}
SANITIZER_INTERFACE_ATTRIBUTE
a16 __tsan_atomic16_load(const volatile a16 *a, morder mo) {
- SCOPED_ATOMIC(Load, a, mo);
+ ATOMIC_IMPL(Load, a, mo);
}
SANITIZER_INTERFACE_ATTRIBUTE
a32 __tsan_atomic32_load(const volatile a32 *a, morder mo) {
- SCOPED_ATOMIC(Load, a, mo);
+ ATOMIC_IMPL(Load, a, mo);
}
SANITIZER_INTERFACE_ATTRIBUTE
a64 __tsan_atomic64_load(const volatile a64 *a, morder mo) {
- SCOPED_ATOMIC(Load, a, mo);
+ ATOMIC_IMPL(Load, a, mo);
}
#if __TSAN_HAS_INT128
SANITIZER_INTERFACE_ATTRIBUTE
a128 __tsan_atomic128_load(const volatile a128 *a, morder mo) {
- SCOPED_ATOMIC(Load, a, mo);
+ ATOMIC_IMPL(Load, a, mo);
}
#endif
SANITIZER_INTERFACE_ATTRIBUTE
void __tsan_atomic8_store(volatile a8 *a, a8 v, morder mo) {
- SCOPED_ATOMIC(Store, a, v, mo);
+ ATOMIC_IMPL(Store, a, v, mo);
}
SANITIZER_INTERFACE_ATTRIBUTE
void __tsan_atomic16_store(volatile a16 *a, a16 v, morder mo) {
- SCOPED_ATOMIC(Store, a, v, mo);
+ ATOMIC_IMPL(Store, a, v, mo);
}
SANITIZER_INTERFACE_ATTRIBUTE
void __tsan_atomic32_store(volatile a32 *a, a32 v, morder mo) {
- SCOPED_ATOMIC(Store, a, v, mo);
+ ATOMIC_IMPL(Store, a, v, mo);
}
SANITIZER_INTERFACE_ATTRIBUTE
void __tsan_atomic64_store(volatile a64 *a, a64 v, morder mo) {
- SCOPED_ATOMIC(Store, a, v, mo);
+ ATOMIC_IMPL(Store, a, v, mo);
}
#if __TSAN_HAS_INT128
SANITIZER_INTERFACE_ATTRIBUTE
void __tsan_atomic128_store(volatile a128 *a, a128 v, morder mo) {
- SCOPED_ATOMIC(Store, a, v, mo);
+ ATOMIC_IMPL(Store, a, v, mo);
}
#endif
SANITIZER_INTERFACE_ATTRIBUTE
a8 __tsan_atomic8_exchange(volatile a8 *a, a8 v, morder mo) {
- SCOPED_ATOMIC(Exchange, a, v, mo);
+ ATOMIC_IMPL(Exchange, a, v, mo);
}
SANITIZER_INTERFACE_ATTRIBUTE
a16 __tsan_atomic16_exchange(volatile a16 *a, a16 v, morder mo) {
- SCOPED_ATOMIC(Exchange, a, v, mo);
+ ATOMIC_IMPL(Exchange, a, v, mo);
}
SANITIZER_INTERFACE_ATTRIBUTE
a32 __tsan_atomic32_exchange(volatile a32 *a, a32 v, morder mo) {
- SCOPED_ATOMIC(Exchange, a, v, mo);
+ ATOMIC_IMPL(Exchange, a, v, mo);
}
SANITIZER_INTERFACE_ATTRIBUTE
a64 __tsan_atomic64_exchange(volatile a64 *a, a64 v, morder mo) {
- SCOPED_ATOMIC(Exchange, a, v, mo);
+ ATOMIC_IMPL(Exchange, a, v, mo);
}
#if __TSAN_HAS_INT128
SANITIZER_INTERFACE_ATTRIBUTE
a128 __tsan_atomic128_exchange(volatile a128 *a, a128 v, morder mo) {
- SCOPED_ATOMIC(Exchange, a, v, mo);
+ ATOMIC_IMPL(Exchange, a, v, mo);
}
#endif
SANITIZER_INTERFACE_ATTRIBUTE
a8 __tsan_atomic8_fetch_add(volatile a8 *a, a8 v, morder mo) {
- SCOPED_ATOMIC(FetchAdd, a, v, mo);
+ ATOMIC_IMPL(FetchAdd, a, v, mo);
}
SANITIZER_INTERFACE_ATTRIBUTE
a16 __tsan_atomic16_fetch_add(volatile a16 *a, a16 v, morder mo) {
- SCOPED_ATOMIC(FetchAdd, a, v, mo);
+ ATOMIC_IMPL(FetchAdd, a, v, mo);
}
SANITIZER_INTERFACE_ATTRIBUTE
a32 __tsan_atomic32_fetch_add(volatile a32 *a, a32 v, morder mo) {
- SCOPED_ATOMIC(FetchAdd, a, v, mo);
+ ATOMIC_IMPL(FetchAdd, a, v, mo);
}
SANITIZER_INTERFACE_ATTRIBUTE
a64 __tsan_atomic64_fetch_add(volatile a64 *a, a64 v, morder mo) {
- SCOPED_ATOMIC(FetchAdd, a, v, mo);
+ ATOMIC_IMPL(FetchAdd, a, v, mo);
}
#if __TSAN_HAS_INT128
SANITIZER_INTERFACE_ATTRIBUTE
a128 __tsan_atomic128_fetch_add(volatile a128 *a, a128 v, morder mo) {
- SCOPED_ATOMIC(FetchAdd, a, v, mo);
+ ATOMIC_IMPL(FetchAdd, a, v, mo);
}
#endif
SANITIZER_INTERFACE_ATTRIBUTE
a8 __tsan_atomic8_fetch_sub(volatile a8 *a, a8 v, morder mo) {
- SCOPED_ATOMIC(FetchSub, a, v, mo);
+ ATOMIC_IMPL(FetchSub, a, v, mo);
}
SANITIZER_INTERFACE_ATTRIBUTE
a16 __tsan_atomic16_fetch_sub(volatile a16 *a, a16 v, morder mo) {
- SCOPED_ATOMIC(FetchSub, a, v, mo);
+ ATOMIC_IMPL(FetchSub, a, v, mo);
}
SANITIZER_INTERFACE_ATTRIBUTE
a32 __tsan_atomic32_fetch_sub(volatile a32 *a, a32 v, morder mo) {
- SCOPED_ATOMIC(FetchSub, a, v, mo);
+ ATOMIC_IMPL(FetchSub, a, v, mo);
}
SANITIZER_INTERFACE_ATTRIBUTE
a64 __tsan_atomic64_fetch_sub(volatile a64 *a, a64 v, morder mo) {
- SCOPED_ATOMIC(FetchSub, a, v, mo);
+ ATOMIC_IMPL(FetchSub, a, v, mo);
}
#if __TSAN_HAS_INT128
SANITIZER_INTERFACE_ATTRIBUTE
a128 __tsan_atomic128_fetch_sub(volatile a128 *a, a128 v, morder mo) {
- SCOPED_ATOMIC(FetchSub, a, v, mo);
+ ATOMIC_IMPL(FetchSub, a, v, mo);
}
#endif
SANITIZER_INTERFACE_ATTRIBUTE
a8 __tsan_atomic8_fetch_and(volatile a8 *a, a8 v, morder mo) {
- SCOPED_ATOMIC(FetchAnd, a, v, mo);
+ ATOMIC_IMPL(FetchAnd, a, v, mo);
}
SANITIZER_INTERFACE_ATTRIBUTE
a16 __tsan_atomic16_fetch_and(volatile a16 *a, a16 v, morder mo) {
- SCOPED_ATOMIC(FetchAnd, a, v, mo);
+ ATOMIC_IMPL(FetchAnd, a, v, mo);
}
SANITIZER_INTERFACE_ATTRIBUTE
a32 __tsan_atomic32_fetch_and(volatile a32 *a, a32 v, morder mo) {
- SCOPED_ATOMIC(FetchAnd, a, v, mo);
+ ATOMIC_IMPL(FetchAnd, a, v, mo);
}
SANITIZER_INTERFACE_ATTRIBUTE
a64 __tsan_atomic64_fetch_and(volatile a64 *a, a64 v, morder mo) {
- SCOPED_ATOMIC(FetchAnd, a, v, mo);
+ ATOMIC_IMPL(FetchAnd, a, v, mo);
}
#if __TSAN_HAS_INT128
SANITIZER_INTERFACE_ATTRIBUTE
a128 __tsan_atomic128_fetch_and(volatile a128 *a, a128 v, morder mo) {
- SCOPED_ATOMIC(FetchAnd, a, v, mo);
+ ATOMIC_IMPL(FetchAnd, a, v, mo);
}
#endif
SANITIZER_INTERFACE_ATTRIBUTE
a8 __tsan_atomic8_fetch_or(volatile a8 *a, a8 v, morder mo) {
- SCOPED_ATOMIC(FetchOr, a, v, mo);
+ ATOMIC_IMPL(FetchOr, a, v, mo);
}
SANITIZER_INTERFACE_ATTRIBUTE
a16 __tsan_atomic16_fetch_or(volatile a16 *a, a16 v, morder mo) {
- SCOPED_ATOMIC(FetchOr, a, v, mo);
+ ATOMIC_IMPL(FetchOr, a, v, mo);
}
SANITIZER_INTERFACE_ATTRIBUTE
a32 __tsan_atomic32_fetch_or(volatile a32 *a, a32 v, morder mo) {
- SCOPED_ATOMIC(FetchOr, a, v, mo);
+ ATOMIC_IMPL(FetchOr, a, v, mo);
}
SANITIZER_INTERFACE_ATTRIBUTE
a64 __tsan_atomic64_fetch_or(volatile a64 *a, a64 v, morder mo) {
- SCOPED_ATOMIC(FetchOr, a, v, mo);
+ ATOMIC_IMPL(FetchOr, a, v, mo);
}
#if __TSAN_HAS_INT128
SANITIZER_INTERFACE_ATTRIBUTE
a128 __tsan_atomic128_fetch_or(volatile a128 *a, a128 v, morder mo) {
- SCOPED_ATOMIC(FetchOr, a, v, mo);
+ ATOMIC_IMPL(FetchOr, a, v, mo);
}
#endif
SANITIZER_INTERFACE_ATTRIBUTE
a8 __tsan_atomic8_fetch_xor(volatile a8 *a, a8 v, morder mo) {
- SCOPED_ATOMIC(FetchXor, a, v, mo);
+ ATOMIC_IMPL(FetchXor, a, v, mo);
}
SANITIZER_INTERFACE_ATTRIBUTE
a16 __tsan_atomic16_fetch_xor(volatile a16 *a, a16 v, morder mo) {
- SCOPED_ATOMIC(FetchXor, a, v, mo);
+ ATOMIC_IMPL(FetchXor, a, v, mo);
}
SANITIZER_INTERFACE_ATTRIBUTE
a32 __tsan_atomic32_fetch_xor(volatile a32 *a, a32 v, morder mo) {
- SCOPED_ATOMIC(FetchXor, a, v, mo);
+ ATOMIC_IMPL(FetchXor, a, v, mo);
}
SANITIZER_INTERFACE_ATTRIBUTE
a64 __tsan_atomic64_fetch_xor(volatile a64 *a, a64 v, morder mo) {
- SCOPED_ATOMIC(FetchXor, a, v, mo);
+ ATOMIC_IMPL(FetchXor, a, v, mo);
}
#if __TSAN_HAS_INT128
SANITIZER_INTERFACE_ATTRIBUTE
a128 __tsan_atomic128_fetch_xor(volatile a128 *a, a128 v, morder mo) {
- SCOPED_ATOMIC(FetchXor, a, v, mo);
+ ATOMIC_IMPL(FetchXor, a, v, mo);
}
#endif
SANITIZER_INTERFACE_ATTRIBUTE
a8 __tsan_atomic8_fetch_nand(volatile a8 *a, a8 v, morder mo) {
- SCOPED_ATOMIC(FetchNand, a, v, mo);
+ ATOMIC_IMPL(FetchNand, a, v, mo);
}
SANITIZER_INTERFACE_ATTRIBUTE
a16 __tsan_atomic16_fetch_nand(volatile a16 *a, a16 v, morder mo) {
- SCOPED_ATOMIC(FetchNand, a, v, mo);
+ ATOMIC_IMPL(FetchNand, a, v, mo);
}
SANITIZER_INTERFACE_ATTRIBUTE
a32 __tsan_atomic32_fetch_nand(volatile a32 *a, a32 v, morder mo) {
- SCOPED_ATOMIC(FetchNand, a, v, mo);
+ ATOMIC_IMPL(FetchNand, a, v, mo);
}
SANITIZER_INTERFACE_ATTRIBUTE
a64 __tsan_atomic64_fetch_nand(volatile a64 *a, a64 v, morder mo) {
- SCOPED_ATOMIC(FetchNand, a, v, mo);
+ ATOMIC_IMPL(FetchNand, a, v, mo);
}
#if __TSAN_HAS_INT128
SANITIZER_INTERFACE_ATTRIBUTE
a128 __tsan_atomic128_fetch_nand(volatile a128 *a, a128 v, morder mo) {
- SCOPED_ATOMIC(FetchNand, a, v, mo);
+ ATOMIC_IMPL(FetchNand, a, v, mo);
}
#endif
SANITIZER_INTERFACE_ATTRIBUTE
int __tsan_atomic8_compare_exchange_strong(volatile a8 *a, a8 *c, a8 v,
morder mo, morder fmo) {
- SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
+ ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
}
SANITIZER_INTERFACE_ATTRIBUTE
int __tsan_atomic16_compare_exchange_strong(volatile a16 *a, a16 *c, a16 v,
morder mo, morder fmo) {
- SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
+ ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
}
SANITIZER_INTERFACE_ATTRIBUTE
int __tsan_atomic32_compare_exchange_strong(volatile a32 *a, a32 *c, a32 v,
morder mo, morder fmo) {
- SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
+ ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
}
SANITIZER_INTERFACE_ATTRIBUTE
int __tsan_atomic64_compare_exchange_strong(volatile a64 *a, a64 *c, a64 v,
morder mo, morder fmo) {
- SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
+ ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
}
#if __TSAN_HAS_INT128
SANITIZER_INTERFACE_ATTRIBUTE
int __tsan_atomic128_compare_exchange_strong(volatile a128 *a, a128 *c, a128 v,
morder mo, morder fmo) {
- SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
+ ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
}
#endif
SANITIZER_INTERFACE_ATTRIBUTE
int __tsan_atomic8_compare_exchange_weak(volatile a8 *a, a8 *c, a8 v,
morder mo, morder fmo) {
- SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
+ ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
}
SANITIZER_INTERFACE_ATTRIBUTE
int __tsan_atomic16_compare_exchange_weak(volatile a16 *a, a16 *c, a16 v,
morder mo, morder fmo) {
- SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
+ ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
}
SANITIZER_INTERFACE_ATTRIBUTE
int __tsan_atomic32_compare_exchange_weak(volatile a32 *a, a32 *c, a32 v,
morder mo, morder fmo) {
- SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
+ ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
}
SANITIZER_INTERFACE_ATTRIBUTE
int __tsan_atomic64_compare_exchange_weak(volatile a64 *a, a64 *c, a64 v,
morder mo, morder fmo) {
- SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
+ ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
}
#if __TSAN_HAS_INT128
SANITIZER_INTERFACE_ATTRIBUTE
int __tsan_atomic128_compare_exchange_weak(volatile a128 *a, a128 *c, a128 v,
morder mo, morder fmo) {
- SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
+ ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
}
#endif
SANITIZER_INTERFACE_ATTRIBUTE
a8 __tsan_atomic8_compare_exchange_val(volatile a8 *a, a8 c, a8 v,
morder mo, morder fmo) {
- SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
+ ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
}
SANITIZER_INTERFACE_ATTRIBUTE
a16 __tsan_atomic16_compare_exchange_val(volatile a16 *a, a16 c, a16 v,
morder mo, morder fmo) {
- SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
+ ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
}
SANITIZER_INTERFACE_ATTRIBUTE
a32 __tsan_atomic32_compare_exchange_val(volatile a32 *a, a32 c, a32 v,
morder mo, morder fmo) {
- SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
+ ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
}
SANITIZER_INTERFACE_ATTRIBUTE
a64 __tsan_atomic64_compare_exchange_val(volatile a64 *a, a64 c, a64 v,
morder mo, morder fmo) {
- SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
+ ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
}
#if __TSAN_HAS_INT128
SANITIZER_INTERFACE_ATTRIBUTE
a128 __tsan_atomic128_compare_exchange_val(volatile a128 *a, a128 c, a128 v,
morder mo, morder fmo) {
- SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
+ ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
}
#endif
SANITIZER_INTERFACE_ATTRIBUTE
-void __tsan_atomic_thread_fence(morder mo) {
- char* a = 0;
- SCOPED_ATOMIC(Fence, mo);
-}
+void __tsan_atomic_thread_fence(morder mo) { ATOMIC_IMPL(Fence, mo); }
SANITIZER_INTERFACE_ATTRIBUTE
void __tsan_atomic_signal_fence(morder mo) {
@@ -869,25 +845,23 @@ void __tsan_atomic_signal_fence(morder mo) {
// Go
-#define ATOMIC(func, ...) \
- if (thr->ignore_sync) { \
- NoTsanAtomic##func(__VA_ARGS__); \
- } else { \
- FuncEntry(thr, cpc); \
+# define ATOMIC(func, ...) \
+ if (thr->ignore_sync) { \
+ NoTsanAtomic##func(__VA_ARGS__); \
+ } else { \
+ FuncEntry(thr, cpc); \
Atomic##func(thr, pc, __VA_ARGS__); \
- FuncExit(thr); \
- } \
-/**/
-
-#define ATOMIC_RET(func, ret, ...) \
- if (thr->ignore_sync) { \
- (ret) = NoTsanAtomic##func(__VA_ARGS__); \
- } else { \
- FuncEntry(thr, cpc); \
+ FuncExit(thr); \
+ }
+
+# define ATOMIC_RET(func, ret, ...) \
+ if (thr->ignore_sync) { \
+ (ret) = NoTsanAtomic##func(__VA_ARGS__); \
+ } else { \
+ FuncEntry(thr, cpc); \
(ret) = Atomic##func(thr, pc, __VA_ARGS__); \
- FuncExit(thr); \
- } \
-/**/
+ FuncExit(thr); \
+ }
extern "C" {
SANITIZER_INTERFACE_ATTRIBUTE
@@ -921,6 +895,30 @@ void __tsan_go_atomic64_fetch_add(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
}
SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_go_atomic32_fetch_and(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
+ ATOMIC_RET(FetchAnd, *(a32 *)(a + 16), *(a32 **)a, *(a32 *)(a + 8),
+ mo_acq_rel);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_go_atomic64_fetch_and(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
+ ATOMIC_RET(FetchAnd, *(a64 *)(a + 16), *(a64 **)a, *(a64 *)(a + 8),
+ mo_acq_rel);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_go_atomic32_fetch_or(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
+ ATOMIC_RET(FetchOr, *(a32 *)(a + 16), *(a32 **)a, *(a32 *)(a + 8),
+ mo_acq_rel);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_go_atomic64_fetch_or(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
+ ATOMIC_RET(FetchOr, *(a64 *)(a + 16), *(a64 **)a, *(a64 *)(a + 8),
+ mo_acq_rel);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
void __tsan_go_atomic32_exchange(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
ATOMIC_RET(Exchange, *(a32*)(a+16), *(a32**)a, *(a32*)(a+8), mo_acq_rel);
}
diff --git a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_interface_inl.h b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_interface_inl.h
deleted file mode 100644
index 5e77d4d3d288..000000000000
--- a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_interface_inl.h
+++ /dev/null
@@ -1,133 +0,0 @@
-//===-- tsan_interface_inl.h ------------------------------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-//
-// This file is a part of ThreadSanitizer (TSan), a race detector.
-//
-//===----------------------------------------------------------------------===//
-
-#include "tsan_interface.h"
-#include "tsan_rtl.h"
-#include "sanitizer_common/sanitizer_ptrauth.h"
-
-#define CALLERPC ((uptr)__builtin_return_address(0))
-
-using namespace __tsan;
-
-void __tsan_read1(void *addr) {
- MemoryRead(cur_thread(), CALLERPC, (uptr)addr, kSizeLog1);
-}
-
-void __tsan_read2(void *addr) {
- MemoryRead(cur_thread(), CALLERPC, (uptr)addr, kSizeLog2);
-}
-
-void __tsan_read4(void *addr) {
- MemoryRead(cur_thread(), CALLERPC, (uptr)addr, kSizeLog4);
-}
-
-void __tsan_read8(void *addr) {
- MemoryRead(cur_thread(), CALLERPC, (uptr)addr, kSizeLog8);
-}
-
-void __tsan_write1(void *addr) {
- MemoryWrite(cur_thread(), CALLERPC, (uptr)addr, kSizeLog1);
-}
-
-void __tsan_write2(void *addr) {
- MemoryWrite(cur_thread(), CALLERPC, (uptr)addr, kSizeLog2);
-}
-
-void __tsan_write4(void *addr) {
- MemoryWrite(cur_thread(), CALLERPC, (uptr)addr, kSizeLog4);
-}
-
-void __tsan_write8(void *addr) {
- MemoryWrite(cur_thread(), CALLERPC, (uptr)addr, kSizeLog8);
-}
-
-void __tsan_read1_pc(void *addr, void *pc) {
- MemoryRead(cur_thread(), STRIP_PAC_PC(pc), (uptr)addr, kSizeLog1);
-}
-
-void __tsan_read2_pc(void *addr, void *pc) {
- MemoryRead(cur_thread(), STRIP_PAC_PC(pc), (uptr)addr, kSizeLog2);
-}
-
-void __tsan_read4_pc(void *addr, void *pc) {
- MemoryRead(cur_thread(), STRIP_PAC_PC(pc), (uptr)addr, kSizeLog4);
-}
-
-void __tsan_read8_pc(void *addr, void *pc) {
- MemoryRead(cur_thread(), STRIP_PAC_PC(pc), (uptr)addr, kSizeLog8);
-}
-
-void __tsan_write1_pc(void *addr, void *pc) {
- MemoryWrite(cur_thread(), STRIP_PAC_PC(pc), (uptr)addr, kSizeLog1);
-}
-
-void __tsan_write2_pc(void *addr, void *pc) {
- MemoryWrite(cur_thread(), STRIP_PAC_PC(pc), (uptr)addr, kSizeLog2);
-}
-
-void __tsan_write4_pc(void *addr, void *pc) {
- MemoryWrite(cur_thread(), STRIP_PAC_PC(pc), (uptr)addr, kSizeLog4);
-}
-
-void __tsan_write8_pc(void *addr, void *pc) {
- MemoryWrite(cur_thread(), STRIP_PAC_PC(pc), (uptr)addr, kSizeLog8);
-}
-
-void __tsan_vptr_update(void **vptr_p, void *new_val) {
- CHECK_EQ(sizeof(vptr_p), 8);
- if (*vptr_p != new_val) {
- ThreadState *thr = cur_thread();
- thr->is_vptr_access = true;
- MemoryWrite(thr, CALLERPC, (uptr)vptr_p, kSizeLog8);
- thr->is_vptr_access = false;
- }
-}
-
-void __tsan_vptr_read(void **vptr_p) {
- CHECK_EQ(sizeof(vptr_p), 8);
- ThreadState *thr = cur_thread();
- thr->is_vptr_access = true;
- MemoryRead(thr, CALLERPC, (uptr)vptr_p, kSizeLog8);
- thr->is_vptr_access = false;
-}
-
-void __tsan_func_entry(void *pc) {
- FuncEntry(cur_thread(), STRIP_PAC_PC(pc));
-}
-
-void __tsan_func_exit() {
- FuncExit(cur_thread());
-}
-
-void __tsan_ignore_thread_begin() {
- ThreadIgnoreBegin(cur_thread(), CALLERPC);
-}
-
-void __tsan_ignore_thread_end() {
- ThreadIgnoreEnd(cur_thread(), CALLERPC);
-}
-
-void __tsan_read_range(void *addr, uptr size) {
- MemoryAccessRange(cur_thread(), CALLERPC, (uptr)addr, size, false);
-}
-
-void __tsan_write_range(void *addr, uptr size) {
- MemoryAccessRange(cur_thread(), CALLERPC, (uptr)addr, size, true);
-}
-
-void __tsan_read_range_pc(void *addr, uptr size, void *pc) {
- MemoryAccessRange(cur_thread(), STRIP_PAC_PC(pc), (uptr)addr, size, false);
-}
-
-void __tsan_write_range_pc(void *addr, uptr size, void *pc) {
- MemoryAccessRange(cur_thread(), STRIP_PAC_PC(pc), (uptr)addr, size, true);
-}
diff --git a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_interface_java.cpp b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_interface_java.cpp
index 6aa8a7b1d6a7..7c15a1638826 100644
--- a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_interface_java.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_interface_java.cpp
@@ -34,52 +34,49 @@ struct JavaContext {
}
};
-class ScopedJavaFunc {
- public:
- ScopedJavaFunc(ThreadState *thr, uptr pc)
- : thr_(thr) {
- Initialize(thr_);
- FuncEntry(thr, pc);
- }
-
- ~ScopedJavaFunc() {
- FuncExit(thr_);
- // FIXME(dvyukov): process pending signals.
- }
-
- private:
- ThreadState *thr_;
-};
-
static u64 jctx_buf[sizeof(JavaContext) / sizeof(u64) + 1];
static JavaContext *jctx;
+MBlock *JavaHeapBlock(uptr addr, uptr *start) {
+ if (!jctx || addr < jctx->heap_begin ||
+ addr >= jctx->heap_begin + jctx->heap_size)
+ return nullptr;
+ for (uptr p = RoundDown(addr, kMetaShadowCell); p >= jctx->heap_begin;
+ p -= kMetaShadowCell) {
+ MBlock *b = ctx->metamap.GetBlock(p);
+ if (!b)
+ continue;
+ if (p + b->siz <= addr)
+ return nullptr;
+ *start = p;
+ return b;
+ }
+ return nullptr;
+}
+
} // namespace __tsan
-#define SCOPED_JAVA_FUNC(func) \
+#define JAVA_FUNC_ENTER(func) \
ThreadState *thr = cur_thread(); \
- const uptr caller_pc = GET_CALLER_PC(); \
- const uptr pc = StackTrace::GetCurrentPc(); \
- (void)pc; \
- ScopedJavaFunc scoped(thr, caller_pc); \
-/**/
+ (void)thr;
void __tsan_java_init(jptr heap_begin, jptr heap_size) {
- SCOPED_JAVA_FUNC(__tsan_java_init);
- DPrintf("#%d: java_init(%p, %p)\n", thr->tid, heap_begin, heap_size);
- CHECK_EQ(jctx, 0);
- CHECK_GT(heap_begin, 0);
- CHECK_GT(heap_size, 0);
- CHECK_EQ(heap_begin % kHeapAlignment, 0);
- CHECK_EQ(heap_size % kHeapAlignment, 0);
- CHECK_LT(heap_begin, heap_begin + heap_size);
+ JAVA_FUNC_ENTER(__tsan_java_init);
+ Initialize(thr);
+ DPrintf("#%d: java_init(0x%zx, 0x%zx)\n", thr->tid, heap_begin, heap_size);
+ DCHECK_EQ(jctx, 0);
+ DCHECK_GT(heap_begin, 0);
+ DCHECK_GT(heap_size, 0);
+ DCHECK_EQ(heap_begin % kHeapAlignment, 0);
+ DCHECK_EQ(heap_size % kHeapAlignment, 0);
+ DCHECK_LT(heap_begin, heap_begin + heap_size);
jctx = new(jctx_buf) JavaContext(heap_begin, heap_size);
}
int __tsan_java_fini() {
- SCOPED_JAVA_FUNC(__tsan_java_fini);
+ JAVA_FUNC_ENTER(__tsan_java_fini);
DPrintf("#%d: java_fini()\n", thr->tid);
- CHECK_NE(jctx, 0);
+ DCHECK_NE(jctx, 0);
// FIXME(dvyukov): this does not call atexit() callbacks.
int status = Finalize(thr);
DPrintf("#%d: java_fini() = %d\n", thr->tid, status);
@@ -87,74 +84,65 @@ int __tsan_java_fini() {
}
void __tsan_java_alloc(jptr ptr, jptr size) {
- SCOPED_JAVA_FUNC(__tsan_java_alloc);
- DPrintf("#%d: java_alloc(%p, %p)\n", thr->tid, ptr, size);
- CHECK_NE(jctx, 0);
- CHECK_NE(size, 0);
- CHECK_EQ(ptr % kHeapAlignment, 0);
- CHECK_EQ(size % kHeapAlignment, 0);
- CHECK_GE(ptr, jctx->heap_begin);
- CHECK_LE(ptr + size, jctx->heap_begin + jctx->heap_size);
-
- OnUserAlloc(thr, pc, ptr, size, false);
+ JAVA_FUNC_ENTER(__tsan_java_alloc);
+ DPrintf("#%d: java_alloc(0x%zx, 0x%zx)\n", thr->tid, ptr, size);
+ DCHECK_NE(jctx, 0);
+ DCHECK_NE(size, 0);
+ DCHECK_EQ(ptr % kHeapAlignment, 0);
+ DCHECK_EQ(size % kHeapAlignment, 0);
+ DCHECK_GE(ptr, jctx->heap_begin);
+ DCHECK_LE(ptr + size, jctx->heap_begin + jctx->heap_size);
+
+ OnUserAlloc(thr, 0, ptr, size, false);
}
void __tsan_java_free(jptr ptr, jptr size) {
- SCOPED_JAVA_FUNC(__tsan_java_free);
- DPrintf("#%d: java_free(%p, %p)\n", thr->tid, ptr, size);
- CHECK_NE(jctx, 0);
- CHECK_NE(size, 0);
- CHECK_EQ(ptr % kHeapAlignment, 0);
- CHECK_EQ(size % kHeapAlignment, 0);
- CHECK_GE(ptr, jctx->heap_begin);
- CHECK_LE(ptr + size, jctx->heap_begin + jctx->heap_size);
-
- ctx->metamap.FreeRange(thr->proc(), ptr, size);
+ JAVA_FUNC_ENTER(__tsan_java_free);
+ DPrintf("#%d: java_free(0x%zx, 0x%zx)\n", thr->tid, ptr, size);
+ DCHECK_NE(jctx, 0);
+ DCHECK_NE(size, 0);
+ DCHECK_EQ(ptr % kHeapAlignment, 0);
+ DCHECK_EQ(size % kHeapAlignment, 0);
+ DCHECK_GE(ptr, jctx->heap_begin);
+ DCHECK_LE(ptr + size, jctx->heap_begin + jctx->heap_size);
+
+ ctx->metamap.FreeRange(thr->proc(), ptr, size, false);
}
void __tsan_java_move(jptr src, jptr dst, jptr size) {
- SCOPED_JAVA_FUNC(__tsan_java_move);
- DPrintf("#%d: java_move(%p, %p, %p)\n", thr->tid, src, dst, size);
- CHECK_NE(jctx, 0);
- CHECK_NE(size, 0);
- CHECK_EQ(src % kHeapAlignment, 0);
- CHECK_EQ(dst % kHeapAlignment, 0);
- CHECK_EQ(size % kHeapAlignment, 0);
- CHECK_GE(src, jctx->heap_begin);
- CHECK_LE(src + size, jctx->heap_begin + jctx->heap_size);
- CHECK_GE(dst, jctx->heap_begin);
- CHECK_LE(dst + size, jctx->heap_begin + jctx->heap_size);
- CHECK_NE(dst, src);
- CHECK_NE(size, 0);
+ JAVA_FUNC_ENTER(__tsan_java_move);
+ DPrintf("#%d: java_move(0x%zx, 0x%zx, 0x%zx)\n", thr->tid, src, dst, size);
+ DCHECK_NE(jctx, 0);
+ DCHECK_NE(size, 0);
+ DCHECK_EQ(src % kHeapAlignment, 0);
+ DCHECK_EQ(dst % kHeapAlignment, 0);
+ DCHECK_EQ(size % kHeapAlignment, 0);
+ DCHECK_GE(src, jctx->heap_begin);
+ DCHECK_LE(src + size, jctx->heap_begin + jctx->heap_size);
+ DCHECK_GE(dst, jctx->heap_begin);
+ DCHECK_LE(dst + size, jctx->heap_begin + jctx->heap_size);
+ DCHECK_NE(dst, src);
+ DCHECK_NE(size, 0);
// Assuming it's not running concurrently with threads that do
// memory accesses and mutex operations (stop-the-world phase).
ctx->metamap.MoveMemory(src, dst, size);
- // Move shadow.
- u64 *s = (u64*)MemToShadow(src);
- u64 *d = (u64*)MemToShadow(dst);
- u64 *send = (u64*)MemToShadow(src + size);
- uptr inc = 1;
- if (dst > src) {
- s = (u64*)MemToShadow(src + size) - 1;
- d = (u64*)MemToShadow(dst + size) - 1;
- send = (u64*)MemToShadow(src) - 1;
- inc = -1;
- }
- for (; s != send; s += inc, d += inc) {
- *d = *s;
- *s = 0;
- }
+ // Clear the destination shadow range.
+ // We used to move shadow from src to dst, but the trace format does not
+ // support that anymore as it contains addresses of accesses.
+ RawShadow *d = MemToShadow(dst);
+ RawShadow *dend = MemToShadow(dst + size);
+ ShadowSet(d, dend, Shadow::kEmpty);
}
jptr __tsan_java_find(jptr *from_ptr, jptr to) {
- SCOPED_JAVA_FUNC(__tsan_java_find);
- DPrintf("#%d: java_find(&%p, %p)\n", *from_ptr, to);
- CHECK_EQ((*from_ptr) % kHeapAlignment, 0);
- CHECK_EQ(to % kHeapAlignment, 0);
- CHECK_GE(*from_ptr, jctx->heap_begin);
- CHECK_LE(to, jctx->heap_begin + jctx->heap_size);
+ JAVA_FUNC_ENTER(__tsan_java_find);
+ DPrintf("#%d: java_find(&0x%zx, 0x%zx)\n", thr->tid, *from_ptr, to);
+ DCHECK_EQ((*from_ptr) % kHeapAlignment, 0);
+ DCHECK_EQ(to % kHeapAlignment, 0);
+ DCHECK_GE(*from_ptr, jctx->heap_begin);
+ DCHECK_LE(to, jctx->heap_begin + jctx->heap_size);
for (uptr from = *from_ptr; from < to; from += kHeapAlignment) {
MBlock *b = ctx->metamap.GetBlock(from);
if (b) {
@@ -166,101 +154,105 @@ jptr __tsan_java_find(jptr *from_ptr, jptr to) {
}
void __tsan_java_finalize() {
- SCOPED_JAVA_FUNC(__tsan_java_finalize);
- DPrintf("#%d: java_mutex_finalize()\n", thr->tid);
- AcquireGlobal(thr, 0);
+ JAVA_FUNC_ENTER(__tsan_java_finalize);
+ DPrintf("#%d: java_finalize()\n", thr->tid);
+ AcquireGlobal(thr);
}
void __tsan_java_mutex_lock(jptr addr) {
- SCOPED_JAVA_FUNC(__tsan_java_mutex_lock);
- DPrintf("#%d: java_mutex_lock(%p)\n", thr->tid, addr);
- CHECK_NE(jctx, 0);
- CHECK_GE(addr, jctx->heap_begin);
- CHECK_LT(addr, jctx->heap_begin + jctx->heap_size);
-
- MutexPostLock(thr, pc, addr, MutexFlagLinkerInit | MutexFlagWriteReentrant |
- MutexFlagDoPreLockOnPostLock);
+ JAVA_FUNC_ENTER(__tsan_java_mutex_lock);
+ DPrintf("#%d: java_mutex_lock(0x%zx)\n", thr->tid, addr);
+ DCHECK_NE(jctx, 0);
+ DCHECK_GE(addr, jctx->heap_begin);
+ DCHECK_LT(addr, jctx->heap_begin + jctx->heap_size);
+
+ MutexPostLock(thr, 0, addr,
+ MutexFlagLinkerInit | MutexFlagWriteReentrant |
+ MutexFlagDoPreLockOnPostLock);
}
void __tsan_java_mutex_unlock(jptr addr) {
- SCOPED_JAVA_FUNC(__tsan_java_mutex_unlock);
- DPrintf("#%d: java_mutex_unlock(%p)\n", thr->tid, addr);
- CHECK_NE(jctx, 0);
- CHECK_GE(addr, jctx->heap_begin);
- CHECK_LT(addr, jctx->heap_begin + jctx->heap_size);
+ JAVA_FUNC_ENTER(__tsan_java_mutex_unlock);
+ DPrintf("#%d: java_mutex_unlock(0x%zx)\n", thr->tid, addr);
+ DCHECK_NE(jctx, 0);
+ DCHECK_GE(addr, jctx->heap_begin);
+ DCHECK_LT(addr, jctx->heap_begin + jctx->heap_size);
- MutexUnlock(thr, pc, addr);
+ MutexUnlock(thr, 0, addr);
}
void __tsan_java_mutex_read_lock(jptr addr) {
- SCOPED_JAVA_FUNC(__tsan_java_mutex_read_lock);
- DPrintf("#%d: java_mutex_read_lock(%p)\n", thr->tid, addr);
- CHECK_NE(jctx, 0);
- CHECK_GE(addr, jctx->heap_begin);
- CHECK_LT(addr, jctx->heap_begin + jctx->heap_size);
-
- MutexPostReadLock(thr, pc, addr, MutexFlagLinkerInit |
- MutexFlagWriteReentrant | MutexFlagDoPreLockOnPostLock);
+ JAVA_FUNC_ENTER(__tsan_java_mutex_read_lock);
+ DPrintf("#%d: java_mutex_read_lock(0x%zx)\n", thr->tid, addr);
+ DCHECK_NE(jctx, 0);
+ DCHECK_GE(addr, jctx->heap_begin);
+ DCHECK_LT(addr, jctx->heap_begin + jctx->heap_size);
+
+ MutexPostReadLock(thr, 0, addr,
+ MutexFlagLinkerInit | MutexFlagWriteReentrant |
+ MutexFlagDoPreLockOnPostLock);
}
void __tsan_java_mutex_read_unlock(jptr addr) {
- SCOPED_JAVA_FUNC(__tsan_java_mutex_read_unlock);
- DPrintf("#%d: java_mutex_read_unlock(%p)\n", thr->tid, addr);
- CHECK_NE(jctx, 0);
- CHECK_GE(addr, jctx->heap_begin);
- CHECK_LT(addr, jctx->heap_begin + jctx->heap_size);
+ JAVA_FUNC_ENTER(__tsan_java_mutex_read_unlock);
+ DPrintf("#%d: java_mutex_read_unlock(0x%zx)\n", thr->tid, addr);
+ DCHECK_NE(jctx, 0);
+ DCHECK_GE(addr, jctx->heap_begin);
+ DCHECK_LT(addr, jctx->heap_begin + jctx->heap_size);
- MutexReadUnlock(thr, pc, addr);
+ MutexReadUnlock(thr, 0, addr);
}
void __tsan_java_mutex_lock_rec(jptr addr, int rec) {
- SCOPED_JAVA_FUNC(__tsan_java_mutex_lock_rec);
- DPrintf("#%d: java_mutex_lock_rec(%p, %d)\n", thr->tid, addr, rec);
- CHECK_NE(jctx, 0);
- CHECK_GE(addr, jctx->heap_begin);
- CHECK_LT(addr, jctx->heap_begin + jctx->heap_size);
- CHECK_GT(rec, 0);
-
- MutexPostLock(thr, pc, addr, MutexFlagLinkerInit | MutexFlagWriteReentrant |
- MutexFlagDoPreLockOnPostLock | MutexFlagRecursiveLock, rec);
+ JAVA_FUNC_ENTER(__tsan_java_mutex_lock_rec);
+ DPrintf("#%d: java_mutex_lock_rec(0x%zx, %d)\n", thr->tid, addr, rec);
+ DCHECK_NE(jctx, 0);
+ DCHECK_GE(addr, jctx->heap_begin);
+ DCHECK_LT(addr, jctx->heap_begin + jctx->heap_size);
+ DCHECK_GT(rec, 0);
+
+ MutexPostLock(thr, 0, addr,
+ MutexFlagLinkerInit | MutexFlagWriteReentrant |
+ MutexFlagDoPreLockOnPostLock | MutexFlagRecursiveLock,
+ rec);
}
int __tsan_java_mutex_unlock_rec(jptr addr) {
- SCOPED_JAVA_FUNC(__tsan_java_mutex_unlock_rec);
- DPrintf("#%d: java_mutex_unlock_rec(%p)\n", thr->tid, addr);
- CHECK_NE(jctx, 0);
- CHECK_GE(addr, jctx->heap_begin);
- CHECK_LT(addr, jctx->heap_begin + jctx->heap_size);
+ JAVA_FUNC_ENTER(__tsan_java_mutex_unlock_rec);
+ DPrintf("#%d: java_mutex_unlock_rec(0x%zx)\n", thr->tid, addr);
+ DCHECK_NE(jctx, 0);
+ DCHECK_GE(addr, jctx->heap_begin);
+ DCHECK_LT(addr, jctx->heap_begin + jctx->heap_size);
- return MutexUnlock(thr, pc, addr, MutexFlagRecursiveUnlock);
+ return MutexUnlock(thr, 0, addr, MutexFlagRecursiveUnlock);
}
void __tsan_java_acquire(jptr addr) {
- SCOPED_JAVA_FUNC(__tsan_java_acquire);
- DPrintf("#%d: java_acquire(%p)\n", thr->tid, addr);
- CHECK_NE(jctx, 0);
- CHECK_GE(addr, jctx->heap_begin);
- CHECK_LT(addr, jctx->heap_begin + jctx->heap_size);
+ JAVA_FUNC_ENTER(__tsan_java_acquire);
+ DPrintf("#%d: java_acquire(0x%zx)\n", thr->tid, addr);
+ DCHECK_NE(jctx, 0);
+ DCHECK_GE(addr, jctx->heap_begin);
+ DCHECK_LT(addr, jctx->heap_begin + jctx->heap_size);
- Acquire(thr, caller_pc, addr);
+ Acquire(thr, 0, addr);
}
void __tsan_java_release(jptr addr) {
- SCOPED_JAVA_FUNC(__tsan_java_release);
- DPrintf("#%d: java_release(%p)\n", thr->tid, addr);
- CHECK_NE(jctx, 0);
- CHECK_GE(addr, jctx->heap_begin);
- CHECK_LT(addr, jctx->heap_begin + jctx->heap_size);
+ JAVA_FUNC_ENTER(__tsan_java_release);
+ DPrintf("#%d: java_release(0x%zx)\n", thr->tid, addr);
+ DCHECK_NE(jctx, 0);
+ DCHECK_GE(addr, jctx->heap_begin);
+ DCHECK_LT(addr, jctx->heap_begin + jctx->heap_size);
- Release(thr, caller_pc, addr);
+ Release(thr, 0, addr);
}
void __tsan_java_release_store(jptr addr) {
- SCOPED_JAVA_FUNC(__tsan_java_release);
- DPrintf("#%d: java_release_store(%p)\n", thr->tid, addr);
- CHECK_NE(jctx, 0);
- CHECK_GE(addr, jctx->heap_begin);
- CHECK_LT(addr, jctx->heap_begin + jctx->heap_size);
+ JAVA_FUNC_ENTER(__tsan_java_release);
+ DPrintf("#%d: java_release_store(0x%zx)\n", thr->tid, addr);
+ DCHECK_NE(jctx, 0);
+ DCHECK_GE(addr, jctx->heap_begin);
+ DCHECK_LT(addr, jctx->heap_begin + jctx->heap_size);
- ReleaseStore(thr, caller_pc, addr);
+ ReleaseStore(thr, 0, addr);
}
diff --git a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_malloc_mac.cpp b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_malloc_mac.cpp
index 0e861bf1f962..e973be963e57 100644
--- a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_malloc_mac.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_malloc_mac.cpp
@@ -12,11 +12,12 @@
//===----------------------------------------------------------------------===//
#include "sanitizer_common/sanitizer_platform.h"
-#if SANITIZER_MAC
+#if SANITIZER_APPLE
#include "sanitizer_common/sanitizer_errno.h"
#include "tsan_interceptors.h"
#include "tsan_stack_trace.h"
+#include "tsan_mman.h"
using namespace __tsan;
#define COMMON_MALLOC_ZONE_NAME "tsan"
@@ -29,16 +30,30 @@ using namespace __tsan;
user_memalign(cur_thread(), StackTrace::GetCurrentPc(), alignment, size)
#define COMMON_MALLOC_MALLOC(size) \
if (in_symbolizer()) return InternalAlloc(size); \
- SCOPED_INTERCEPTOR_RAW(malloc, size); \
- void *p = user_alloc(thr, pc, size)
+ void *p = 0; \
+ { \
+ SCOPED_INTERCEPTOR_RAW(malloc, size); \
+ p = user_alloc(thr, pc, size); \
+ } \
+ invoke_malloc_hook(p, size)
#define COMMON_MALLOC_REALLOC(ptr, size) \
if (in_symbolizer()) return InternalRealloc(ptr, size); \
- SCOPED_INTERCEPTOR_RAW(realloc, ptr, size); \
- void *p = user_realloc(thr, pc, ptr, size)
+ if (ptr) \
+ invoke_free_hook(ptr); \
+ void *p = 0; \
+ { \
+ SCOPED_INTERCEPTOR_RAW(realloc, ptr, size); \
+ p = user_realloc(thr, pc, ptr, size); \
+ } \
+ invoke_malloc_hook(p, size)
#define COMMON_MALLOC_CALLOC(count, size) \
if (in_symbolizer()) return InternalCalloc(count, size); \
- SCOPED_INTERCEPTOR_RAW(calloc, size, count); \
- void *p = user_calloc(thr, pc, size, count)
+ void *p = 0; \
+ { \
+ SCOPED_INTERCEPTOR_RAW(calloc, size, count); \
+ p = user_calloc(thr, pc, size, count); \
+ } \
+ invoke_malloc_hook(p, size * count)
#define COMMON_MALLOC_POSIX_MEMALIGN(memptr, alignment, size) \
if (in_symbolizer()) { \
void *p = InternalAlloc(size, nullptr, alignment); \
@@ -55,6 +70,7 @@ using namespace __tsan;
void *p = user_valloc(thr, pc, size)
#define COMMON_MALLOC_FREE(ptr) \
if (in_symbolizer()) return InternalFree(ptr); \
+ invoke_free_hook(ptr); \
SCOPED_INTERCEPTOR_RAW(free, ptr); \
user_free(thr, pc, ptr)
#define COMMON_MALLOC_SIZE(ptr) uptr size = user_alloc_usable_size(ptr);
diff --git a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_mman.cpp b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_mman.cpp
index 7765bc070522..6f118e0979e2 100644
--- a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_mman.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_mman.cpp
@@ -15,27 +15,18 @@
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_errno.h"
#include "sanitizer_common/sanitizer_placement_new.h"
+#include "tsan_interface.h"
#include "tsan_mman.h"
#include "tsan_rtl.h"
#include "tsan_report.h"
#include "tsan_flags.h"
-// May be overriden by front-end.
-SANITIZER_WEAK_DEFAULT_IMPL
-void __sanitizer_malloc_hook(void *ptr, uptr size) {
- (void)ptr;
- (void)size;
-}
-
-SANITIZER_WEAK_DEFAULT_IMPL
-void __sanitizer_free_hook(void *ptr) {
- (void)ptr;
-}
-
namespace __tsan {
struct MapUnmapCallback {
void OnMap(uptr p, uptr size) const { }
+ void OnMapSecondary(uptr p, uptr size, uptr user_begin,
+ uptr user_size) const {};
void OnUnmap(uptr p, uptr size) const {
// We are about to unmap a chunk of user memory.
// Mark the corresponding shadow memory as not needed.
@@ -69,8 +60,17 @@ Allocator *allocator() {
struct GlobalProc {
Mutex mtx;
Processor *proc;
-
- GlobalProc() : mtx(MutexTypeGlobalProc), proc(ProcCreate()) {}
+ // This mutex represents the internal allocator combined for
+ // the purposes of deadlock detection. The internal allocator
+ // uses multiple mutexes, moreover they are locked only occasionally
+ // and they are spin mutexes which don't support deadlock detection.
+ // So we use this fake mutex to serve as a substitute for these mutexes.
+ CheckedMutex internal_alloc_mtx;
+
+ GlobalProc()
+ : mtx(MutexTypeGlobalProc),
+ proc(ProcCreate()),
+ internal_alloc_mtx(MutexTypeInternalAlloc) {}
};
static char global_proc_placeholder[sizeof(GlobalProc)] ALIGNED(64);
@@ -78,6 +78,11 @@ GlobalProc *global_proc() {
return reinterpret_cast<GlobalProc*>(&global_proc_placeholder);
}
+static void InternalAllocAccess() {
+ global_proc()->internal_alloc_mtx.Lock();
+ global_proc()->internal_alloc_mtx.Unlock();
+}
+
ScopedGlobalProcessor::ScopedGlobalProcessor() {
GlobalProc *gp = global_proc();
ThreadState *thr = cur_thread();
@@ -110,6 +115,24 @@ ScopedGlobalProcessor::~ScopedGlobalProcessor() {
gp->mtx.Unlock();
}
+void AllocatorLock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
+ global_proc()->internal_alloc_mtx.Lock();
+ InternalAllocatorLock();
+}
+
+void AllocatorUnlock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
+ InternalAllocatorUnlock();
+ global_proc()->internal_alloc_mtx.Unlock();
+}
+
+void GlobalProcessorLock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
+ global_proc()->mtx.Lock();
+}
+
+void GlobalProcessorUnlock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
+ global_proc()->mtx.Unlock();
+}
+
static constexpr uptr kMaxAllowedMallocSize = 1ull << 40;
static uptr max_user_defined_malloc_size;
@@ -148,7 +171,7 @@ static void SignalUnsafeCall(ThreadState *thr, uptr pc) {
ObtainCurrentStack(thr, pc, &stack);
if (IsFiredSuppression(ctx, ReportTypeSignalUnsafe, stack))
return;
- ThreadRegistryLock l(ctx->thread_registry);
+ ThreadRegistryLock l(&ctx->thread_registry);
ScopedReport rep(ReportTypeSignalUnsafe);
rep.AddStack(stack, true);
OutputReport(thr, rep);
@@ -166,6 +189,12 @@ void *user_alloc_internal(ThreadState *thr, uptr pc, uptr sz, uptr align,
GET_STACK_TRACE_FATAL(thr, pc);
ReportAllocationSizeTooBig(sz, malloc_limit, &stack);
}
+ if (UNLIKELY(IsRssLimitExceeded())) {
+ if (AllocatorMayReturnNull())
+ return nullptr;
+ GET_STACK_TRACE_FATAL(thr, pc);
+ ReportRssLimitExceeded(&stack);
+ }
void *p = allocator()->Allocate(&thr->proc()->alloc_cache, sz, align);
if (UNLIKELY(!p)) {
SetAllocatorOutOfMemory();
@@ -218,9 +247,18 @@ void *user_reallocarray(ThreadState *thr, uptr pc, void *p, uptr size, uptr n) {
}
void OnUserAlloc(ThreadState *thr, uptr pc, uptr p, uptr sz, bool write) {
- DPrintf("#%d: alloc(%zu) = %p\n", thr->tid, sz, p);
+ DPrintf("#%d: alloc(%zu) = 0x%zx\n", thr->tid, sz, p);
+ // Note: this can run before thread initialization/after finalization.
+ // As a result this is not necessarily synchronized with DoReset,
+ // which iterates over and resets all sync objects,
+ // but it is fine to create new MBlocks in this context.
ctx->metamap.AllocBlock(thr, pc, p, sz);
- if (write && thr->ignore_reads_and_writes == 0)
+ // If this runs before thread initialization/after finalization
+ // and we don't have trace initialized, we can't imitate writes.
+ // In such case just reset the shadow range, it is fine since
+ // it affects only a small fraction of special objects.
+ if (write && thr->ignore_reads_and_writes == 0 &&
+ atomic_load_relaxed(&thr->trace_pos))
MemoryRangeImitateWrite(thr, pc, (uptr)p, sz);
else
MemoryResetRange(thr, pc, (uptr)p, sz);
@@ -228,8 +266,15 @@ void OnUserAlloc(ThreadState *thr, uptr pc, uptr p, uptr sz, bool write) {
void OnUserFree(ThreadState *thr, uptr pc, uptr p, bool write) {
CHECK_NE(p, (void*)0);
- uptr sz = ctx->metamap.FreeBlock(thr->proc(), p);
- DPrintf("#%d: free(%p, %zu)\n", thr->tid, p, sz);
+ if (!thr->slot) {
+ // Very early/late in thread lifetime, or during fork.
+ UNUSED uptr sz = ctx->metamap.FreeBlock(thr->proc(), p, false);
+ DPrintf("#%d: free(0x%zx, %zu) (no slot)\n", thr->tid, p, sz);
+ return;
+ }
+ SlotLocker locker(thr);
+ uptr sz = ctx->metamap.FreeBlock(thr->proc(), p, true);
+ DPrintf("#%d: free(0x%zx, %zu)\n", thr->tid, p, sz);
if (write && thr->ignore_reads_and_writes == 0)
MemoryRangeFreed(thr, pc, (uptr)p, sz);
}
@@ -309,8 +354,22 @@ void *user_pvalloc(ThreadState *thr, uptr pc, uptr sz) {
return SetErrnoOnNull(user_alloc_internal(thr, pc, sz, PageSize));
}
+static const void *user_alloc_begin(const void *p) {
+ if (p == nullptr || !IsAppMem((uptr)p))
+ return nullptr;
+ void *beg = allocator()->GetBlockBegin(p);
+ if (!beg)
+ return nullptr;
+
+ MBlock *b = ctx->metamap.GetBlock((uptr)beg);
+ if (!b)
+ return nullptr; // Not a valid pointer.
+
+ return (const void *)beg;
+}
+
uptr user_alloc_usable_size(const void *p) {
- if (p == 0)
+ if (p == 0 || !IsAppMem((uptr)p))
return 0;
MBlock *b = ctx->metamap.GetBlock((uptr)p);
if (!b)
@@ -320,11 +379,21 @@ uptr user_alloc_usable_size(const void *p) {
return b->siz;
}
+uptr user_alloc_usable_size_fast(const void *p) {
+ MBlock *b = ctx->metamap.GetBlock((uptr)p);
+ // Static objects may have malloc'd before tsan completes
+ // initialization, and may believe returned ptrs to be valid.
+ if (!b)
+ return 0; // Not a valid pointer.
+ if (b->siz == 0)
+ return 1; // Zero-sized allocations are actually 1 byte.
+ return b->siz;
+}
+
void invoke_malloc_hook(void *ptr, uptr size) {
ThreadState *thr = cur_thread();
if (ctx == 0 || !ctx->initialized || thr->ignore_interceptors)
return;
- __sanitizer_malloc_hook(ptr, size);
RunMallocHooks(ptr, size);
}
@@ -332,25 +401,26 @@ void invoke_free_hook(void *ptr) {
ThreadState *thr = cur_thread();
if (ctx == 0 || !ctx->initialized || thr->ignore_interceptors)
return;
- __sanitizer_free_hook(ptr);
RunFreeHooks(ptr);
}
-void *internal_alloc(MBlockType typ, uptr sz) {
+void *Alloc(uptr sz) {
ThreadState *thr = cur_thread();
if (thr->nomalloc) {
thr->nomalloc = 0; // CHECK calls internal_malloc().
CHECK(0);
}
+ InternalAllocAccess();
return InternalAlloc(sz, &thr->proc()->internal_alloc_cache);
}
-void internal_free(void *p) {
+void FreeImpl(void *p) {
ThreadState *thr = cur_thread();
if (thr->nomalloc) {
thr->nomalloc = 0; // CHECK calls internal_malloc().
CHECK(0);
}
+ InternalAllocAccess();
InternalFree(p, &thr->proc()->internal_alloc_cache);
}
@@ -387,14 +457,27 @@ int __sanitizer_get_ownership(const void *p) {
return allocator()->GetBlockBegin(p) != 0;
}
+const void *__sanitizer_get_allocated_begin(const void *p) {
+ return user_alloc_begin(p);
+}
+
uptr __sanitizer_get_allocated_size(const void *p) {
return user_alloc_usable_size(p);
}
+uptr __sanitizer_get_allocated_size_fast(const void *p) {
+ DCHECK_EQ(p, __sanitizer_get_allocated_begin(p));
+ uptr ret = user_alloc_usable_size_fast(p);
+ DCHECK_EQ(ret, __sanitizer_get_allocated_size(p));
+ return ret;
+}
+
+void __sanitizer_purge_allocator() {
+ allocator()->ForceReleaseToOS();
+}
+
void __tsan_on_thread_idle() {
ThreadState *thr = cur_thread();
- thr->clock.ResetCached(&thr->proc()->clock_cache);
- thr->last_sleep_clock.ResetCached(&thr->proc()->clock_cache);
allocator()->SwallowCache(&thr->proc()->alloc_cache);
internal_allocator()->SwallowCache(&thr->proc()->internal_alloc_cache);
ctx->metamap.OnProcIdle(thr->proc());
diff --git a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_mman.h b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_mman.h
index a5280d4472c9..2095f28c0253 100644
--- a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_mman.h
+++ b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_mman.h
@@ -24,6 +24,10 @@ void ReplaceSystemMalloc();
void AllocatorProcStart(Processor *proc);
void AllocatorProcFinish(Processor *proc);
void AllocatorPrintStats();
+void AllocatorLock();
+void AllocatorUnlock();
+void GlobalProcessorLock();
+void GlobalProcessorUnlock();
// For user allocations.
void *user_alloc_internal(ThreadState *thr, uptr pc, uptr sz,
@@ -47,42 +51,29 @@ uptr user_alloc_usable_size(const void *p);
void invoke_malloc_hook(void *ptr, uptr size);
void invoke_free_hook(void *ptr);
-enum MBlockType {
- MBlockScopedBuf,
- MBlockString,
- MBlockStackTrace,
- MBlockShadowStack,
- MBlockSync,
- MBlockClock,
- MBlockThreadContex,
- MBlockDeadInfo,
- MBlockRacyStacks,
- MBlockRacyAddresses,
- MBlockAtExit,
- MBlockFlag,
- MBlockReport,
- MBlockReportMop,
- MBlockReportThread,
- MBlockReportMutex,
- MBlockReportLoc,
- MBlockReportStack,
- MBlockSuppression,
- MBlockExpectRace,
- MBlockSignal,
- MBlockJmpBuf,
+// For internal data structures.
+void *Alloc(uptr sz);
+void FreeImpl(void *p);
- // This must be the last.
- MBlockTypeCount
-};
+template <typename T, typename... Args>
+T *New(Args &&...args) {
+ return new (Alloc(sizeof(T))) T(static_cast<Args &&>(args)...);
+}
-// For internal data structures.
-void *internal_alloc(MBlockType typ, uptr sz);
-void internal_free(void *p);
+template <typename T>
+void Free(T *&p) {
+ if (p == nullptr)
+ return;
+ FreeImpl(p);
+ p = nullptr;
+}
template <typename T>
-void DestroyAndFree(T *p) {
+void DestroyAndFree(T *&p) {
+ if (p == nullptr)
+ return;
p->~T();
- internal_free(p);
+ Free(p);
}
} // namespace __tsan
diff --git a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_mutexset.cpp b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_mutexset.cpp
index 813fa3bca936..3a75b80ac30f 100644
--- a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_mutexset.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_mutexset.cpp
@@ -10,66 +10,55 @@
//
//===----------------------------------------------------------------------===//
#include "tsan_mutexset.h"
+
+#include "sanitizer_common/sanitizer_placement_new.h"
#include "tsan_rtl.h"
namespace __tsan {
-const uptr MutexSet::kMaxSize;
-
MutexSet::MutexSet() {
- size_ = 0;
- internal_memset(&descs_, 0, sizeof(descs_));
}
-void MutexSet::Add(u64 id, bool write, u64 epoch) {
+void MutexSet::Reset() { internal_memset(this, 0, sizeof(*this)); }
+
+void MutexSet::AddAddr(uptr addr, StackID stack_id, bool write) {
// Look up existing mutex with the same id.
for (uptr i = 0; i < size_; i++) {
- if (descs_[i].id == id) {
+ if (descs_[i].addr == addr) {
descs_[i].count++;
- descs_[i].epoch = epoch;
+ descs_[i].seq = seq_++;
return;
}
}
// On overflow, find the oldest mutex and drop it.
if (size_ == kMaxSize) {
- u64 minepoch = (u64)-1;
- u64 mini = (u64)-1;
+ uptr min = 0;
for (uptr i = 0; i < size_; i++) {
- if (descs_[i].epoch < minepoch) {
- minepoch = descs_[i].epoch;
- mini = i;
- }
+ if (descs_[i].seq < descs_[min].seq)
+ min = i;
}
- RemovePos(mini);
+ RemovePos(min);
CHECK_EQ(size_, kMaxSize - 1);
}
// Add new mutex descriptor.
- descs_[size_].id = id;
+ descs_[size_].addr = addr;
+ descs_[size_].stack_id = stack_id;
descs_[size_].write = write;
- descs_[size_].epoch = epoch;
+ descs_[size_].seq = seq_++;
descs_[size_].count = 1;
size_++;
}
-void MutexSet::Del(u64 id, bool write) {
+void MutexSet::DelAddr(uptr addr, bool destroy) {
for (uptr i = 0; i < size_; i++) {
- if (descs_[i].id == id) {
- if (--descs_[i].count == 0)
+ if (descs_[i].addr == addr) {
+ if (destroy || --descs_[i].count == 0)
RemovePos(i);
return;
}
}
}
-void MutexSet::Remove(u64 id) {
- for (uptr i = 0; i < size_; i++) {
- if (descs_[i].id == id) {
- RemovePos(i);
- return;
- }
- }
-}
-
void MutexSet::RemovePos(uptr i) {
CHECK_LT(i, size_);
descs_[i] = descs_[size_ - 1];
@@ -85,4 +74,7 @@ MutexSet::Desc MutexSet::Get(uptr i) const {
return descs_[i];
}
+DynamicMutexSet::DynamicMutexSet() : ptr_(New<MutexSet>()) {}
+DynamicMutexSet::~DynamicMutexSet() { DestroyAndFree(ptr_); }
+
} // namespace __tsan
diff --git a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_mutexset.h b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_mutexset.h
index d63881f40290..aabd361e6afd 100644
--- a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_mutexset.h
+++ b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_mutexset.h
@@ -21,34 +21,55 @@ class MutexSet {
public:
// Holds limited number of mutexes.
// The oldest mutexes are discarded on overflow.
- static const uptr kMaxSize = 16;
+ static constexpr uptr kMaxSize = 16;
struct Desc {
- u64 id;
- u64 epoch;
- int count;
+ uptr addr;
+ StackID stack_id;
+ u32 seq;
+ u32 count;
bool write;
+
+ Desc() { internal_memset(this, 0, sizeof(*this)); }
+ Desc(const Desc& other) { *this = other; }
+ Desc& operator=(const MutexSet::Desc& other) {
+ internal_memcpy(this, &other, sizeof(*this));
+ return *this;
+ }
};
MutexSet();
- // The 'id' is obtained from SyncVar::GetId().
- void Add(u64 id, bool write, u64 epoch);
- void Del(u64 id, bool write);
- void Remove(u64 id); // Removes the mutex completely (if it's destroyed).
+ void Reset();
+ void AddAddr(uptr addr, StackID stack_id, bool write);
+ void DelAddr(uptr addr, bool destroy = false);
uptr Size() const;
Desc Get(uptr i) const;
- void operator=(const MutexSet &other) {
- internal_memcpy(this, &other, sizeof(*this));
- }
-
private:
#if !SANITIZER_GO
- uptr size_;
+ u32 seq_ = 0;
+ uptr size_ = 0;
Desc descs_[kMaxSize];
-#endif
void RemovePos(uptr i);
- MutexSet(const MutexSet&);
+#endif
+};
+
+// MutexSet is too large to live on stack.
+// DynamicMutexSet can be use used to create local MutexSet's.
+class DynamicMutexSet {
+ public:
+ DynamicMutexSet();
+ ~DynamicMutexSet();
+ MutexSet* operator->() { return ptr_; }
+ operator MutexSet*() { return ptr_; }
+ DynamicMutexSet(const DynamicMutexSet&) = delete;
+ DynamicMutexSet& operator=(const DynamicMutexSet&) = delete;
+
+ private:
+ MutexSet* ptr_;
+#if SANITIZER_GO
+ MutexSet set_;
+#endif
};
// Go does not have mutexes, so do not spend memory and time.
@@ -56,12 +77,13 @@ class MutexSet {
// in different goroutine).
#if SANITIZER_GO
MutexSet::MutexSet() {}
-void MutexSet::Add(u64 id, bool write, u64 epoch) {}
-void MutexSet::Del(u64 id, bool write) {}
-void MutexSet::Remove(u64 id) {}
-void MutexSet::RemovePos(uptr i) {}
+void MutexSet::Reset() {}
+void MutexSet::AddAddr(uptr addr, StackID stack_id, bool write) {}
+void MutexSet::DelAddr(uptr addr, bool destroy) {}
uptr MutexSet::Size() const { return 0; }
MutexSet::Desc MutexSet::Get(uptr i) const { return Desc(); }
+DynamicMutexSet::DynamicMutexSet() : ptr_(&set_) {}
+DynamicMutexSet::~DynamicMutexSet() {}
#endif
} // namespace __tsan
diff --git a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_platform.h b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_platform.h
index 8bd218e25fd6..377f8aeb8d66 100644
--- a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_platform.h
+++ b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_platform.h
@@ -18,41 +18,44 @@
# error "Only 64-bit is supported"
#endif
+#include "sanitizer_common/sanitizer_common.h"
#include "tsan_defs.h"
-#include "tsan_trace.h"
namespace __tsan {
-#if defined(__x86_64__)
-#define HAS_48_BIT_ADDRESS_SPACE 1
-#elif SANITIZER_IOSSIM // arm64 iOS simulators (order of #if matters)
-#define HAS_48_BIT_ADDRESS_SPACE 1
-#elif SANITIZER_IOS // arm64 iOS devices (order of #if matters)
-#define HAS_48_BIT_ADDRESS_SPACE 0
-#elif SANITIZER_MAC // arm64 macOS (order of #if matters)
-#define HAS_48_BIT_ADDRESS_SPACE 1
-#else
-#define HAS_48_BIT_ADDRESS_SPACE 0
-#endif
-
-#if !SANITIZER_GO
+enum {
+ // App memory is not mapped onto shadow memory range.
+ kBrokenMapping = 1 << 0,
+ // Mapping app memory and back does not produce the same address,
+ // this can lead to wrong addresses in reports and potentially
+ // other bad consequences.
+ kBrokenReverseMapping = 1 << 1,
+ // Mapping is non-linear for linear user range.
+ // This is bad and can lead to unpredictable memory corruptions, etc
+ // because range access functions assume linearity.
+ kBrokenLinearity = 1 << 2,
+ // Meta for an app region overlaps with the meta of another app region.
+ // This is determined by recomputing the individual meta regions for
+ // each app region.
+ //
+ // N.B. There is no "kBrokenReverseMetaMapping" constant because there
+ // is no MetaToMem function. However, note that (!kBrokenLinearity
+ // && !kBrokenAliasedMetas) implies that MemToMeta is invertible.
+ kBrokenAliasedMetas = 1 << 3,
+};
-#if HAS_48_BIT_ADDRESS_SPACE
/*
C/C++ on linux/x86_64 and freebsd/x86_64
-0000 0000 1000 - 0080 0000 0000: main binary and/or MAP_32BIT mappings (512GB)
-0040 0000 0000 - 0100 0000 0000: -
-0100 0000 0000 - 2000 0000 0000: shadow
-2000 0000 0000 - 3000 0000 0000: -
-3000 0000 0000 - 4000 0000 0000: metainfo (memory blocks and sync objects)
-4000 0000 0000 - 5500 0000 0000: -
-5500 0000 0000 - 5680 0000 0000: pie binaries without ASLR or on 4.1+ kernels
-5680 0000 0000 - 6000 0000 0000: -
-6000 0000 0000 - 6200 0000 0000: traces
-6200 0000 0000 - 7d00 0000 0000: -
-7b00 0000 0000 - 7c00 0000 0000: heap
-7c00 0000 0000 - 7e80 0000 0000: -
-7e80 0000 0000 - 8000 0000 0000: modules and main thread stack
+0000 0000 1000 - 0200 0000 0000: main binary and/or MAP_32BIT mappings (2TB)
+0200 0000 0000 - 1000 0000 0000: -
+1000 0000 0000 - 3000 0000 0000: shadow (32TB)
+3000 0000 0000 - 3800 0000 0000: metainfo (memory blocks and sync objects; 8TB)
+3800 0000 0000 - 5500 0000 0000: -
+5500 0000 0000 - 5a00 0000 0000: pie binaries without ASLR or on 4.1+ kernels
+5a00 0000 0000 - 7200 0000 0000: -
+7200 0000 0000 - 7300 0000 0000: heap (1TB)
+7300 0000 0000 - 7a00 0000 0000: -
+7a00 0000 0000 - 8000 0000 0000: modules and main thread stack (6TB)
C/C++ on netbsd/amd64 can reuse the same mapping:
* The address space starts from 0x1000 (option with 0x0) and ends with
@@ -65,53 +68,46 @@ C/C++ on netbsd/amd64 can reuse the same mapping:
* Stack on NetBSD/amd64 has prereserved 128MB.
* Heap grows downwards (top-down).
* ASLR must be disabled per-process or globally.
-
*/
-struct Mapping {
+struct Mapping48AddressSpace {
static const uptr kMetaShadowBeg = 0x300000000000ull;
- static const uptr kMetaShadowEnd = 0x340000000000ull;
- static const uptr kTraceMemBeg = 0x600000000000ull;
- static const uptr kTraceMemEnd = 0x620000000000ull;
- static const uptr kShadowBeg = 0x010000000000ull;
- static const uptr kShadowEnd = 0x200000000000ull;
- static const uptr kHeapMemBeg = 0x7b0000000000ull;
- static const uptr kHeapMemEnd = 0x7c0000000000ull;
+ static const uptr kMetaShadowEnd = 0x380000000000ull;
+ static const uptr kShadowBeg = 0x100000000000ull;
+ static const uptr kShadowEnd = 0x300000000000ull;
+ static const uptr kHeapMemBeg = 0x720000000000ull;
+ static const uptr kHeapMemEnd = 0x730000000000ull;
static const uptr kLoAppMemBeg = 0x000000001000ull;
- static const uptr kLoAppMemEnd = 0x008000000000ull;
+ static const uptr kLoAppMemEnd = 0x020000000000ull;
static const uptr kMidAppMemBeg = 0x550000000000ull;
- static const uptr kMidAppMemEnd = 0x568000000000ull;
- static const uptr kHiAppMemBeg = 0x7e8000000000ull;
+ static const uptr kMidAppMemEnd = 0x5a0000000000ull;
+ static const uptr kHiAppMemBeg = 0x7a0000000000ull;
static const uptr kHiAppMemEnd = 0x800000000000ull;
- static const uptr kAppMemMsk = 0x780000000000ull;
- static const uptr kAppMemXor = 0x040000000000ull;
+ static const uptr kShadowMsk = 0x700000000000ull;
+ static const uptr kShadowXor = 0x000000000000ull;
+ static const uptr kShadowAdd = 0x100000000000ull;
static const uptr kVdsoBeg = 0xf000000000000000ull;
};
-#define TSAN_MID_APP_RANGE 1
-#elif defined(__mips64)
/*
C/C++ on linux/mips64 (40-bit VMA)
0000 0000 00 - 0100 0000 00: - (4 GB)
0100 0000 00 - 0200 0000 00: main binary (4 GB)
-0200 0000 00 - 2000 0000 00: - (120 GB)
-2000 0000 00 - 4000 0000 00: shadow (128 GB)
+0200 0000 00 - 1200 0000 00: - (64 GB)
+1200 0000 00 - 2200 0000 00: shadow (64 GB)
+2200 0000 00 - 4000 0000 00: - (120 GB)
4000 0000 00 - 5000 0000 00: metainfo (memory blocks and sync objects) (64 GB)
5000 0000 00 - aa00 0000 00: - (360 GB)
aa00 0000 00 - ab00 0000 00: main binary (PIE) (4 GB)
-ab00 0000 00 - b000 0000 00: - (20 GB)
-b000 0000 00 - b200 0000 00: traces (8 GB)
-b200 0000 00 - fe00 0000 00: - (304 GB)
+ab00 0000 00 - fe00 0000 00: - (332 GB)
fe00 0000 00 - ff00 0000 00: heap (4 GB)
ff00 0000 00 - ff80 0000 00: - (2 GB)
ff80 0000 00 - ffff ffff ff: modules and main thread stack (<2 GB)
*/
-struct Mapping40 {
+struct MappingMips64_40 {
static const uptr kMetaShadowBeg = 0x4000000000ull;
static const uptr kMetaShadowEnd = 0x5000000000ull;
- static const uptr kTraceMemBeg = 0xb000000000ull;
- static const uptr kTraceMemEnd = 0xb200000000ull;
- static const uptr kShadowBeg = 0x2000000000ull;
- static const uptr kShadowEnd = 0x4000000000ull;
+ static const uptr kShadowBeg = 0x1200000000ull;
+ static const uptr kShadowEnd = 0x2200000000ull;
static const uptr kHeapMemBeg = 0xfe00000000ull;
static const uptr kHeapMemEnd = 0xff00000000ull;
static const uptr kLoAppMemBeg = 0x0100000000ull;
@@ -120,152 +116,170 @@ struct Mapping40 {
static const uptr kMidAppMemEnd = 0xab00000000ull;
static const uptr kHiAppMemBeg = 0xff80000000ull;
static const uptr kHiAppMemEnd = 0xffffffffffull;
- static const uptr kAppMemMsk = 0xf800000000ull;
- static const uptr kAppMemXor = 0x0800000000ull;
+ static const uptr kShadowMsk = 0xf800000000ull;
+ static const uptr kShadowXor = 0x0800000000ull;
+ static const uptr kShadowAdd = 0x0000000000ull;
static const uptr kVdsoBeg = 0xfffff00000ull;
};
-#define TSAN_MID_APP_RANGE 1
-#define TSAN_RUNTIME_VMA 1
-#elif defined(__aarch64__) && defined(__APPLE__)
/*
C/C++ on Darwin/iOS/ARM64 (36-bit VMA, 64 GB VM)
0000 0000 00 - 0100 0000 00: - (4 GB)
0100 0000 00 - 0200 0000 00: main binary, modules, thread stacks (4 GB)
0200 0000 00 - 0300 0000 00: heap (4 GB)
0300 0000 00 - 0400 0000 00: - (4 GB)
-0400 0000 00 - 0c00 0000 00: shadow memory (32 GB)
-0c00 0000 00 - 0d00 0000 00: - (4 GB)
+0400 0000 00 - 0800 0000 00: shadow memory (16 GB)
+0800 0000 00 - 0d00 0000 00: - (20 GB)
0d00 0000 00 - 0e00 0000 00: metainfo (4 GB)
-0e00 0000 00 - 0f00 0000 00: - (4 GB)
-0f00 0000 00 - 0fc0 0000 00: traces (3 GB)
-0fc0 0000 00 - 1000 0000 00: -
+0e00 0000 00 - 1000 0000 00: -
*/
-struct Mapping {
+struct MappingAppleAarch64 {
static const uptr kLoAppMemBeg = 0x0100000000ull;
static const uptr kLoAppMemEnd = 0x0200000000ull;
static const uptr kHeapMemBeg = 0x0200000000ull;
static const uptr kHeapMemEnd = 0x0300000000ull;
static const uptr kShadowBeg = 0x0400000000ull;
- static const uptr kShadowEnd = 0x0c00000000ull;
+ static const uptr kShadowEnd = 0x0800000000ull;
static const uptr kMetaShadowBeg = 0x0d00000000ull;
static const uptr kMetaShadowEnd = 0x0e00000000ull;
- static const uptr kTraceMemBeg = 0x0f00000000ull;
- static const uptr kTraceMemEnd = 0x0fc0000000ull;
static const uptr kHiAppMemBeg = 0x0fc0000000ull;
static const uptr kHiAppMemEnd = 0x0fc0000000ull;
- static const uptr kAppMemMsk = 0x0ull;
- static const uptr kAppMemXor = 0x0ull;
+ static const uptr kShadowMsk = 0x0ull;
+ static const uptr kShadowXor = 0x0ull;
+ static const uptr kShadowAdd = 0x0200000000ull;
static const uptr kVdsoBeg = 0x7000000000000000ull;
+ static const uptr kMidAppMemBeg = 0;
+ static const uptr kMidAppMemEnd = 0;
};
-#elif defined(__aarch64__) && !defined(__APPLE__)
-// AArch64 supports multiple VMA which leads to multiple address transformation
-// functions. To support these multiple VMAS transformations and mappings TSAN
-// runtime for AArch64 uses an external memory read (vmaSize) to select which
-// mapping to use. Although slower, it make a same instrumented binary run on
-// multiple kernels.
-
/*
C/C++ on linux/aarch64 (39-bit VMA)
-0000 0010 00 - 0100 0000 00: main binary
-0100 0000 00 - 0800 0000 00: -
-0800 0000 00 - 2000 0000 00: shadow memory
-2000 0000 00 - 3100 0000 00: -
-3100 0000 00 - 3400 0000 00: metainfo
-3400 0000 00 - 5500 0000 00: -
-5500 0000 00 - 5600 0000 00: main binary (PIE)
-5600 0000 00 - 6000 0000 00: -
-6000 0000 00 - 6200 0000 00: traces
-6200 0000 00 - 7d00 0000 00: -
-7c00 0000 00 - 7d00 0000 00: heap
-7d00 0000 00 - 7fff ffff ff: modules and main thread stack
+0000 0010 00 - 0500 0000 00: main binary (20 GB)
+0100 0000 00 - 2000 0000 00: -
+2000 0000 00 - 4000 0000 00: shadow memory (128 GB)
+4000 0000 00 - 4800 0000 00: metainfo (32 GB)
+4800 0000 00 - 5500 0000 00: -
+5500 0000 00 - 5a00 0000 00: main binary (PIE) (20 GB)
+5600 0000 00 - 7c00 0000 00: -
+7a00 0000 00 - 7d00 0000 00: heap (12 GB)
+7d00 0000 00 - 7fff ffff ff: modules and main thread stack (12 GB)
*/
-struct Mapping39 {
+struct MappingAarch64_39 {
static const uptr kLoAppMemBeg = 0x0000001000ull;
- static const uptr kLoAppMemEnd = 0x0100000000ull;
- static const uptr kShadowBeg = 0x0800000000ull;
- static const uptr kShadowEnd = 0x2000000000ull;
- static const uptr kMetaShadowBeg = 0x3100000000ull;
- static const uptr kMetaShadowEnd = 0x3400000000ull;
+ static const uptr kLoAppMemEnd = 0x0500000000ull;
+ static const uptr kShadowBeg = 0x2000000000ull;
+ static const uptr kShadowEnd = 0x4000000000ull;
+ static const uptr kMetaShadowBeg = 0x4000000000ull;
+ static const uptr kMetaShadowEnd = 0x4800000000ull;
static const uptr kMidAppMemBeg = 0x5500000000ull;
- static const uptr kMidAppMemEnd = 0x5600000000ull;
- static const uptr kTraceMemBeg = 0x6000000000ull;
- static const uptr kTraceMemEnd = 0x6200000000ull;
- static const uptr kHeapMemBeg = 0x7c00000000ull;
+ static const uptr kMidAppMemEnd = 0x5a00000000ull;
+ static const uptr kHeapMemBeg = 0x7a00000000ull;
static const uptr kHeapMemEnd = 0x7d00000000ull;
- static const uptr kHiAppMemBeg = 0x7e00000000ull;
+ static const uptr kHiAppMemBeg = 0x7d00000000ull;
static const uptr kHiAppMemEnd = 0x7fffffffffull;
- static const uptr kAppMemMsk = 0x7800000000ull;
- static const uptr kAppMemXor = 0x0200000000ull;
+ static const uptr kShadowMsk = 0x7000000000ull;
+ static const uptr kShadowXor = 0x1000000000ull;
+ static const uptr kShadowAdd = 0x0000000000ull;
static const uptr kVdsoBeg = 0x7f00000000ull;
};
/*
C/C++ on linux/aarch64 (42-bit VMA)
-00000 0010 00 - 01000 0000 00: main binary
-01000 0000 00 - 10000 0000 00: -
-10000 0000 00 - 20000 0000 00: shadow memory
-20000 0000 00 - 26000 0000 00: -
-26000 0000 00 - 28000 0000 00: metainfo
-28000 0000 00 - 2aa00 0000 00: -
-2aa00 0000 00 - 2ab00 0000 00: main binary (PIE)
-2ab00 0000 00 - 36200 0000 00: -
-36200 0000 00 - 36240 0000 00: traces
-36240 0000 00 - 3e000 0000 00: -
-3e000 0000 00 - 3f000 0000 00: heap
-3f000 0000 00 - 3ffff ffff ff: modules and main thread stack
+00000 0010 00 - 02000 0000 00: main binary (128 GB)
+02000 0000 00 - 08000 0000 00: -
+10000 0000 00 - 20000 0000 00: shadow memory (1024 GB)
+20000 0000 00 - 24000 0000 00: metainfo (256 GB)
+24000 0000 00 - 2aa00 0000 00: -
+2aa00 0000 00 - 2c000 0000 00: main binary (PIE) (88 GB)
+2c000 0000 00 - 3c000 0000 00: -
+3c000 0000 00 - 3f000 0000 00: heap (192 GB)
+3f000 0000 00 - 3ffff ffff ff: modules and main thread stack (64 GB)
*/
-struct Mapping42 {
+struct MappingAarch64_42 {
static const uptr kLoAppMemBeg = 0x00000001000ull;
- static const uptr kLoAppMemEnd = 0x01000000000ull;
+ static const uptr kLoAppMemEnd = 0x02000000000ull;
static const uptr kShadowBeg = 0x10000000000ull;
static const uptr kShadowEnd = 0x20000000000ull;
- static const uptr kMetaShadowBeg = 0x26000000000ull;
- static const uptr kMetaShadowEnd = 0x28000000000ull;
+ static const uptr kMetaShadowBeg = 0x20000000000ull;
+ static const uptr kMetaShadowEnd = 0x24000000000ull;
static const uptr kMidAppMemBeg = 0x2aa00000000ull;
- static const uptr kMidAppMemEnd = 0x2ab00000000ull;
- static const uptr kTraceMemBeg = 0x36200000000ull;
- static const uptr kTraceMemEnd = 0x36400000000ull;
- static const uptr kHeapMemBeg = 0x3e000000000ull;
+ static const uptr kMidAppMemEnd = 0x2c000000000ull;
+ static const uptr kHeapMemBeg = 0x3c000000000ull;
static const uptr kHeapMemEnd = 0x3f000000000ull;
static const uptr kHiAppMemBeg = 0x3f000000000ull;
static const uptr kHiAppMemEnd = 0x3ffffffffffull;
- static const uptr kAppMemMsk = 0x3c000000000ull;
- static const uptr kAppMemXor = 0x04000000000ull;
+ static const uptr kShadowMsk = 0x38000000000ull;
+ static const uptr kShadowXor = 0x08000000000ull;
+ static const uptr kShadowAdd = 0x00000000000ull;
static const uptr kVdsoBeg = 0x37f00000000ull;
};
-struct Mapping48 {
+/*
+C/C++ on linux/aarch64 (48-bit VMA)
+0000 0000 1000 - 0a00 0000 0000: main binary (10240 GB)
+0a00 0000 1000 - 1554 0000 0000: -
+1554 0000 1000 - 5400 0000 0000: shadow memory (64176 GB)
+5400 0000 1000 - 8000 0000 0000: -
+8000 0000 1000 - 0a00 0000 0000: metainfo (32768 GB)
+a000 0000 1000 - aaaa 0000 0000: -
+aaaa 0000 1000 - ac00 0000 0000: main binary (PIE) (1368 GB)
+ac00 0000 1000 - fc00 0000 0000: -
+fc00 0000 1000 - ffff ffff ffff: modules and main thread stack (4096 GB)
+
+N.B. the shadow memory region has a strange start address, because it
+contains the shadows for the mid, high and low app regions (in this
+unusual order).
+*/
+struct MappingAarch64_48 {
static const uptr kLoAppMemBeg = 0x0000000001000ull;
- static const uptr kLoAppMemEnd = 0x0000200000000ull;
- static const uptr kShadowBeg = 0x0002000000000ull;
- static const uptr kShadowEnd = 0x0004000000000ull;
- static const uptr kMetaShadowBeg = 0x0005000000000ull;
- static const uptr kMetaShadowEnd = 0x0006000000000ull;
+ static const uptr kLoAppMemEnd = 0x00a0000000000ull;
+ static const uptr kShadowBeg = 0x0155400000000ull;
+ static const uptr kShadowEnd = 0x0540000000000ull;
+ static const uptr kMetaShadowBeg = 0x0800000000000ull;
+ static const uptr kMetaShadowEnd = 0x0a00000000000ull;
static const uptr kMidAppMemBeg = 0x0aaaa00000000ull;
- static const uptr kMidAppMemEnd = 0x0aaaf00000000ull;
- static const uptr kTraceMemBeg = 0x0f06000000000ull;
- static const uptr kTraceMemEnd = 0x0f06200000000ull;
- static const uptr kHeapMemBeg = 0x0ffff00000000ull;
- static const uptr kHeapMemEnd = 0x0ffff00000000ull;
- static const uptr kHiAppMemBeg = 0x0ffff00000000ull;
+ static const uptr kMidAppMemEnd = 0x0ac0000000000ull;
+ static const uptr kHiAppMemBeg = 0x0fc0000000000ull;
static const uptr kHiAppMemEnd = 0x1000000000000ull;
- static const uptr kAppMemMsk = 0x0fff800000000ull;
- static const uptr kAppMemXor = 0x0000800000000ull;
+ static const uptr kHeapMemBeg = 0x0fc0000000000ull;
+ static const uptr kHeapMemEnd = 0x0fc0000000000ull;
+ static const uptr kShadowMsk = 0x0c00000000000ull;
+ static const uptr kShadowXor = 0x0200000000000ull;
+ static const uptr kShadowAdd = 0x0000000000000ull;
static const uptr kVdsoBeg = 0xffff000000000ull;
};
-// Indicates the runtime will define the memory regions at runtime.
-#define TSAN_RUNTIME_VMA 1
-// Indicates that mapping defines a mid range memory segment.
-#define TSAN_MID_APP_RANGE 1
-#elif defined(__powerpc64__)
-// PPC64 supports multiple VMA which leads to multiple address transformation
-// functions. To support these multiple VMAS transformations and mappings TSAN
-// runtime for PPC64 uses an external memory read (vmaSize) to select which
-// mapping to use. Although slower, it make a same instrumented binary run on
-// multiple kernels.
+/* C/C++ on linux/loongarch64 (47-bit VMA)
+0000 0000 4000 - 0080 0000 0000: main binary
+0080 0000 0000 - 0100 0000 0000: -
+0100 0000 0000 - 1000 0000 0000: shadow memory
+1000 0000 0000 - 3000 0000 0000: -
+3000 0000 0000 - 3400 0000 0000: metainfo
+3400 0000 0000 - 5555 0000 0000: -
+5555 0000 0000 - 5556 0000 0000: main binary (PIE)
+5556 0000 0000 - 7ffe 0000 0000: -
+7ffe 0000 0000 - 7fff 0000 0000: heap
+7fff 0000 0000 - 7fff 8000 0000: -
+7fff 8000 0000 - 8000 0000 0000: modules and main thread stack
+*/
+struct MappingLoongArch64_47 {
+ static const uptr kMetaShadowBeg = 0x300000000000ull;
+ static const uptr kMetaShadowEnd = 0x340000000000ull;
+ static const uptr kShadowBeg = 0x010000000000ull;
+ static const uptr kShadowEnd = 0x100000000000ull;
+ static const uptr kHeapMemBeg = 0x7ffe00000000ull;
+ static const uptr kHeapMemEnd = 0x7fff00000000ull;
+ static const uptr kLoAppMemBeg = 0x000000004000ull;
+ static const uptr kLoAppMemEnd = 0x008000000000ull;
+ static const uptr kMidAppMemBeg = 0x555500000000ull;
+ static const uptr kMidAppMemEnd = 0x555600000000ull;
+ static const uptr kHiAppMemBeg = 0x7fff80000000ull;
+ static const uptr kHiAppMemEnd = 0x800000000000ull;
+ static const uptr kShadowMsk = 0x780000000000ull;
+ static const uptr kShadowXor = 0x040000000000ull;
+ static const uptr kShadowAdd = 0x000000000000ull;
+ static const uptr kVdsoBeg = 0x7fffffffc000ull;
+};
/*
C/C++ on linux/powerpc64 (44-bit VMA)
@@ -274,18 +288,16 @@ C/C++ on linux/powerpc64 (44-bit VMA)
0001 0000 0000 - 0b00 0000 0000: shadow
0b00 0000 0000 - 0b00 0000 0000: -
0b00 0000 0000 - 0d00 0000 0000: metainfo (memory blocks and sync objects)
-0d00 0000 0000 - 0d00 0000 0000: -
-0d00 0000 0000 - 0f00 0000 0000: traces
-0f00 0000 0000 - 0f00 0000 0000: -
+0d00 0000 0000 - 0f00 0000 0000: -
0f00 0000 0000 - 0f50 0000 0000: heap
0f50 0000 0000 - 0f60 0000 0000: -
0f60 0000 0000 - 1000 0000 0000: modules and main thread stack
*/
-struct Mapping44 {
+struct MappingPPC64_44 {
+ static const uptr kBroken = kBrokenMapping | kBrokenReverseMapping |
+ kBrokenLinearity | kBrokenAliasedMetas;
static const uptr kMetaShadowBeg = 0x0b0000000000ull;
static const uptr kMetaShadowEnd = 0x0d0000000000ull;
- static const uptr kTraceMemBeg = 0x0d0000000000ull;
- static const uptr kTraceMemEnd = 0x0f0000000000ull;
static const uptr kShadowBeg = 0x000100000000ull;
static const uptr kShadowEnd = 0x0b0000000000ull;
static const uptr kLoAppMemBeg = 0x000000000100ull;
@@ -294,188 +306,261 @@ struct Mapping44 {
static const uptr kHeapMemEnd = 0x0f5000000000ull;
static const uptr kHiAppMemBeg = 0x0f6000000000ull;
static const uptr kHiAppMemEnd = 0x100000000000ull; // 44 bits
- static const uptr kAppMemMsk = 0x0f0000000000ull;
- static const uptr kAppMemXor = 0x002100000000ull;
+ static const uptr kShadowMsk = 0x0f0000000000ull;
+ static const uptr kShadowXor = 0x002100000000ull;
+ static const uptr kShadowAdd = 0x000000000000ull;
static const uptr kVdsoBeg = 0x3c0000000000000ull;
+ static const uptr kMidAppMemBeg = 0;
+ static const uptr kMidAppMemEnd = 0;
};
/*
C/C++ on linux/powerpc64 (46-bit VMA)
0000 0000 1000 - 0100 0000 0000: main binary
0100 0000 0000 - 0200 0000 0000: -
-0100 0000 0000 - 1000 0000 0000: shadow
-1000 0000 0000 - 1000 0000 0000: -
-1000 0000 0000 - 2000 0000 0000: metainfo (memory blocks and sync objects)
-2000 0000 0000 - 2000 0000 0000: -
-2000 0000 0000 - 2200 0000 0000: traces
-2200 0000 0000 - 3d00 0000 0000: -
+0100 0000 0000 - 0800 0000 0000: shadow
+0800 0000 0000 - 1000 0000 0000: -
+1000 0000 0000 - 1200 0000 0000: metainfo (memory blocks and sync objects)
+1200 0000 0000 - 3d00 0000 0000: -
3d00 0000 0000 - 3e00 0000 0000: heap
3e00 0000 0000 - 3e80 0000 0000: -
3e80 0000 0000 - 4000 0000 0000: modules and main thread stack
*/
-struct Mapping46 {
+struct MappingPPC64_46 {
static const uptr kMetaShadowBeg = 0x100000000000ull;
- static const uptr kMetaShadowEnd = 0x200000000000ull;
- static const uptr kTraceMemBeg = 0x200000000000ull;
- static const uptr kTraceMemEnd = 0x220000000000ull;
+ static const uptr kMetaShadowEnd = 0x120000000000ull;
static const uptr kShadowBeg = 0x010000000000ull;
- static const uptr kShadowEnd = 0x100000000000ull;
+ static const uptr kShadowEnd = 0x080000000000ull;
static const uptr kHeapMemBeg = 0x3d0000000000ull;
static const uptr kHeapMemEnd = 0x3e0000000000ull;
static const uptr kLoAppMemBeg = 0x000000001000ull;
static const uptr kLoAppMemEnd = 0x010000000000ull;
static const uptr kHiAppMemBeg = 0x3e8000000000ull;
static const uptr kHiAppMemEnd = 0x400000000000ull; // 46 bits
- static const uptr kAppMemMsk = 0x3c0000000000ull;
- static const uptr kAppMemXor = 0x020000000000ull;
+ static const uptr kShadowMsk = 0x3c0000000000ull;
+ static const uptr kShadowXor = 0x020000000000ull;
+ static const uptr kShadowAdd = 0x000000000000ull;
static const uptr kVdsoBeg = 0x7800000000000000ull;
+ static const uptr kMidAppMemBeg = 0;
+ static const uptr kMidAppMemEnd = 0;
};
/*
C/C++ on linux/powerpc64 (47-bit VMA)
0000 0000 1000 - 0100 0000 0000: main binary
0100 0000 0000 - 0200 0000 0000: -
-0100 0000 0000 - 1000 0000 0000: shadow
-1000 0000 0000 - 1000 0000 0000: -
-1000 0000 0000 - 2000 0000 0000: metainfo (memory blocks and sync objects)
-2000 0000 0000 - 2000 0000 0000: -
-2000 0000 0000 - 2200 0000 0000: traces
-2200 0000 0000 - 7d00 0000 0000: -
+0100 0000 0000 - 0800 0000 0000: shadow
+0800 0000 0000 - 1000 0000 0000: -
+1000 0000 0000 - 1200 0000 0000: metainfo (memory blocks and sync objects)
+1200 0000 0000 - 7d00 0000 0000: -
7d00 0000 0000 - 7e00 0000 0000: heap
7e00 0000 0000 - 7e80 0000 0000: -
7e80 0000 0000 - 8000 0000 0000: modules and main thread stack
*/
-struct Mapping47 {
+struct MappingPPC64_47 {
static const uptr kMetaShadowBeg = 0x100000000000ull;
- static const uptr kMetaShadowEnd = 0x200000000000ull;
- static const uptr kTraceMemBeg = 0x200000000000ull;
- static const uptr kTraceMemEnd = 0x220000000000ull;
+ static const uptr kMetaShadowEnd = 0x120000000000ull;
static const uptr kShadowBeg = 0x010000000000ull;
- static const uptr kShadowEnd = 0x100000000000ull;
+ static const uptr kShadowEnd = 0x080000000000ull;
static const uptr kHeapMemBeg = 0x7d0000000000ull;
static const uptr kHeapMemEnd = 0x7e0000000000ull;
static const uptr kLoAppMemBeg = 0x000000001000ull;
static const uptr kLoAppMemEnd = 0x010000000000ull;
static const uptr kHiAppMemBeg = 0x7e8000000000ull;
static const uptr kHiAppMemEnd = 0x800000000000ull; // 47 bits
- static const uptr kAppMemMsk = 0x7c0000000000ull;
- static const uptr kAppMemXor = 0x020000000000ull;
+ static const uptr kShadowMsk = 0x7c0000000000ull;
+ static const uptr kShadowXor = 0x020000000000ull;
+ static const uptr kShadowAdd = 0x000000000000ull;
static const uptr kVdsoBeg = 0x7800000000000000ull;
+ static const uptr kMidAppMemBeg = 0;
+ static const uptr kMidAppMemEnd = 0;
+};
+
+/*
+C/C++ on linux/riscv64 (39-bit VMA)
+0000 0010 00 - 0200 0000 00: main binary ( 8 GB)
+0200 0000 00 - 1000 0000 00: -
+1000 0000 00 - 4000 0000 00: shadow memory (64 GB)
+4000 0000 00 - 4800 0000 00: metainfo (16 GB)
+4800 0000 00 - 5500 0000 00: -
+5500 0000 00 - 5a00 0000 00: main binary (PIE) (~8 GB)
+5600 0000 00 - 7c00 0000 00: -
+7d00 0000 00 - 7fff ffff ff: libraries and main thread stack ( 8 GB)
+
+mmap by default allocates from top downwards
+VDSO sits below loader and above dynamic libraries, within HiApp region.
+Heap starts after program region whose position depends on pie or non-pie.
+Disable tracking them since their locations are not fixed.
+*/
+struct MappingRiscv64_39 {
+ static const uptr kLoAppMemBeg = 0x0000001000ull;
+ static const uptr kLoAppMemEnd = 0x0200000000ull;
+ static const uptr kShadowBeg = 0x1000000000ull;
+ static const uptr kShadowEnd = 0x2000000000ull;
+ static const uptr kMetaShadowBeg = 0x2000000000ull;
+ static const uptr kMetaShadowEnd = 0x2400000000ull;
+ static const uptr kMidAppMemBeg = 0x2aaaaaa000ull;
+ static const uptr kMidAppMemEnd = 0x2c00000000ull;
+ static const uptr kHeapMemBeg = 0x2c00000000ull;
+ static const uptr kHeapMemEnd = 0x2c00000000ull;
+ static const uptr kHiAppMemBeg = 0x3c00000000ull;
+ static const uptr kHiAppMemEnd = 0x3fffffffffull;
+ static const uptr kShadowMsk = 0x3800000000ull;
+ static const uptr kShadowXor = 0x0800000000ull;
+ static const uptr kShadowAdd = 0x0000000000ull;
+ static const uptr kVdsoBeg = 0x4000000000ull;
+};
+
+/*
+C/C++ on linux/riscv64 (48-bit VMA)
+0000 0000 1000 - 0400 0000 0000: main binary ( 4 TB)
+0500 0000 0000 - 2000 0000 0000: -
+2000 0000 0000 - 4000 0000 0000: shadow memory (32 TB)
+4000 0000 0000 - 4800 0000 0000: metainfo ( 8 TB)
+4800 0000 0000 - 5555 5555 5000: -
+5555 5555 5000 - 5a00 0000 0000: main binary (PIE) (~5 TB)
+5a00 0000 0000 - 7a00 0000 0000: -
+7a00 0000 0000 - 7fff ffff ffff: libraries and main thread stack ( 6 TB)
+*/
+struct MappingRiscv64_48 {
+ static const uptr kLoAppMemBeg = 0x000000001000ull;
+ static const uptr kLoAppMemEnd = 0x040000000000ull;
+ static const uptr kShadowBeg = 0x200000000000ull;
+ static const uptr kShadowEnd = 0x400000000000ull;
+ static const uptr kMetaShadowBeg = 0x400000000000ull;
+ static const uptr kMetaShadowEnd = 0x480000000000ull;
+ static const uptr kMidAppMemBeg = 0x555555555000ull;
+ static const uptr kMidAppMemEnd = 0x5a0000000000ull;
+ static const uptr kHeapMemBeg = 0x5a0000000000ull;
+ static const uptr kHeapMemEnd = 0x5a0000000000ull;
+ static const uptr kHiAppMemBeg = 0x7a0000000000ull;
+ static const uptr kHiAppMemEnd = 0x7fffffffffffull;
+ static const uptr kShadowMsk = 0x700000000000ull;
+ static const uptr kShadowXor = 0x100000000000ull;
+ static const uptr kShadowAdd = 0x000000000000ull;
+ static const uptr kVdsoBeg = 0x800000000000ull;
};
-// Indicates the runtime will define the memory regions at runtime.
-#define TSAN_RUNTIME_VMA 1
-#elif defined(__s390x__)
/*
C/C++ on linux/s390x
While the kernel provides a 64-bit address space, we have to restrict ourselves
to 48 bits due to how e.g. SyncVar::GetId() works.
0000 0000 1000 - 0e00 0000 0000: binary, modules, stacks - 14 TiB
-0e00 0000 0000 - 4000 0000 0000: -
-4000 0000 0000 - 8000 0000 0000: shadow - 64TiB (4 * app)
-8000 0000 0000 - 9000 0000 0000: -
+0e00 0000 0000 - 2000 0000 0000: -
+2000 0000 0000 - 4000 0000 0000: shadow - 32TiB (2 * app)
+4000 0000 0000 - 9000 0000 0000: -
9000 0000 0000 - 9800 0000 0000: metainfo - 8TiB (0.5 * app)
-9800 0000 0000 - a000 0000 0000: -
-a000 0000 0000 - b000 0000 0000: traces - 16TiB (max history * 128k threads)
-b000 0000 0000 - be00 0000 0000: -
+9800 0000 0000 - be00 0000 0000: -
be00 0000 0000 - c000 0000 0000: heap - 2TiB (max supported by the allocator)
*/
-struct Mapping {
+struct MappingS390x {
static const uptr kMetaShadowBeg = 0x900000000000ull;
static const uptr kMetaShadowEnd = 0x980000000000ull;
- static const uptr kTraceMemBeg = 0xa00000000000ull;
- static const uptr kTraceMemEnd = 0xb00000000000ull;
- static const uptr kShadowBeg = 0x400000000000ull;
- static const uptr kShadowEnd = 0x800000000000ull;
+ static const uptr kShadowBeg = 0x200000000000ull;
+ static const uptr kShadowEnd = 0x400000000000ull;
static const uptr kHeapMemBeg = 0xbe0000000000ull;
static const uptr kHeapMemEnd = 0xc00000000000ull;
static const uptr kLoAppMemBeg = 0x000000001000ull;
static const uptr kLoAppMemEnd = 0x0e0000000000ull;
static const uptr kHiAppMemBeg = 0xc00000004000ull;
static const uptr kHiAppMemEnd = 0xc00000004000ull;
- static const uptr kAppMemMsk = 0xb00000000000ull;
- static const uptr kAppMemXor = 0x100000000000ull;
+ static const uptr kShadowMsk = 0xb00000000000ull;
+ static const uptr kShadowXor = 0x100000000000ull;
+ static const uptr kShadowAdd = 0x000000000000ull;
static const uptr kVdsoBeg = 0xfffffffff000ull;
+ static const uptr kMidAppMemBeg = 0;
+ static const uptr kMidAppMemEnd = 0;
};
-#endif
-
-#elif SANITIZER_GO && !SANITIZER_WINDOWS && HAS_48_BIT_ADDRESS_SPACE
/* Go on linux, darwin and freebsd on x86_64
0000 0000 1000 - 0000 1000 0000: executable
0000 1000 0000 - 00c0 0000 0000: -
00c0 0000 0000 - 00e0 0000 0000: heap
00e0 0000 0000 - 2000 0000 0000: -
-2000 0000 0000 - 2380 0000 0000: shadow
-2380 0000 0000 - 3000 0000 0000: -
+2000 0000 0000 - 21c0 0000 0000: shadow
+21c0 0000 0000 - 3000 0000 0000: -
3000 0000 0000 - 4000 0000 0000: metainfo (memory blocks and sync objects)
-4000 0000 0000 - 6000 0000 0000: -
-6000 0000 0000 - 6200 0000 0000: traces
-6200 0000 0000 - 8000 0000 0000: -
+4000 0000 0000 - 8000 0000 0000: -
*/
-struct Mapping {
+struct MappingGo48 {
static const uptr kMetaShadowBeg = 0x300000000000ull;
static const uptr kMetaShadowEnd = 0x400000000000ull;
- static const uptr kTraceMemBeg = 0x600000000000ull;
- static const uptr kTraceMemEnd = 0x620000000000ull;
static const uptr kShadowBeg = 0x200000000000ull;
- static const uptr kShadowEnd = 0x238000000000ull;
- static const uptr kAppMemBeg = 0x000000001000ull;
- static const uptr kAppMemEnd = 0x00e000000000ull;
+ static const uptr kShadowEnd = 0x21c000000000ull;
+ static const uptr kLoAppMemBeg = 0x000000001000ull;
+ static const uptr kLoAppMemEnd = 0x00e000000000ull;
+ static const uptr kMidAppMemBeg = 0;
+ static const uptr kMidAppMemEnd = 0;
+ static const uptr kHiAppMemBeg = 0;
+ static const uptr kHiAppMemEnd = 0;
+ static const uptr kHeapMemBeg = 0;
+ static const uptr kHeapMemEnd = 0;
+ static const uptr kVdsoBeg = 0;
+ static const uptr kShadowMsk = 0;
+ static const uptr kShadowXor = 0;
+ static const uptr kShadowAdd = 0x200000000000ull;
};
-#elif SANITIZER_GO && SANITIZER_WINDOWS
-
/* Go on windows
0000 0000 1000 - 0000 1000 0000: executable
0000 1000 0000 - 00f8 0000 0000: -
00c0 0000 0000 - 00e0 0000 0000: heap
00e0 0000 0000 - 0100 0000 0000: -
-0100 0000 0000 - 0500 0000 0000: shadow
-0500 0000 0000 - 0560 0000 0000: -
-0560 0000 0000 - 0760 0000 0000: traces
-0760 0000 0000 - 07d0 0000 0000: metainfo (memory blocks and sync objects)
+0100 0000 0000 - 0300 0000 0000: shadow
+0300 0000 0000 - 0700 0000 0000: -
+0700 0000 0000 - 0770 0000 0000: metainfo (memory blocks and sync objects)
07d0 0000 0000 - 8000 0000 0000: -
+PIE binaries currently not supported, but it should be theoretically possible.
*/
-struct Mapping {
- static const uptr kMetaShadowBeg = 0x076000000000ull;
- static const uptr kMetaShadowEnd = 0x07d000000000ull;
- static const uptr kTraceMemBeg = 0x056000000000ull;
- static const uptr kTraceMemEnd = 0x076000000000ull;
+struct MappingGoWindows {
+ static const uptr kMetaShadowBeg = 0x070000000000ull;
+ static const uptr kMetaShadowEnd = 0x077000000000ull;
static const uptr kShadowBeg = 0x010000000000ull;
- static const uptr kShadowEnd = 0x050000000000ull;
- static const uptr kAppMemBeg = 0x000000001000ull;
- static const uptr kAppMemEnd = 0x00e000000000ull;
+ static const uptr kShadowEnd = 0x030000000000ull;
+ static const uptr kLoAppMemBeg = 0x000000001000ull;
+ static const uptr kLoAppMemEnd = 0x00e000000000ull;
+ static const uptr kMidAppMemBeg = 0;
+ static const uptr kMidAppMemEnd = 0;
+ static const uptr kHiAppMemBeg = 0;
+ static const uptr kHiAppMemEnd = 0;
+ static const uptr kHeapMemBeg = 0;
+ static const uptr kHeapMemEnd = 0;
+ static const uptr kVdsoBeg = 0;
+ static const uptr kShadowMsk = 0;
+ static const uptr kShadowXor = 0;
+ static const uptr kShadowAdd = 0x010000000000ull;
};
-#elif SANITIZER_GO && defined(__powerpc64__)
-
-/* Only Mapping46 and Mapping47 are currently supported for powercp64 on Go. */
-
/* Go on linux/powerpc64 (46-bit VMA)
0000 0000 1000 - 0000 1000 0000: executable
0000 1000 0000 - 00c0 0000 0000: -
00c0 0000 0000 - 00e0 0000 0000: heap
00e0 0000 0000 - 2000 0000 0000: -
-2000 0000 0000 - 2380 0000 0000: shadow
-2380 0000 0000 - 2400 0000 0000: -
-2400 0000 0000 - 3400 0000 0000: metainfo (memory blocks and sync objects)
-3400 0000 0000 - 3600 0000 0000: -
-3600 0000 0000 - 3800 0000 0000: traces
-3800 0000 0000 - 4000 0000 0000: -
+2000 0000 0000 - 21c0 0000 0000: shadow
+21c0 0000 0000 - 2400 0000 0000: -
+2400 0000 0000 - 2470 0000 0000: metainfo (memory blocks and sync objects)
+2470 0000 0000 - 4000 0000 0000: -
*/
-struct Mapping46 {
+struct MappingGoPPC64_46 {
static const uptr kMetaShadowBeg = 0x240000000000ull;
- static const uptr kMetaShadowEnd = 0x340000000000ull;
- static const uptr kTraceMemBeg = 0x360000000000ull;
- static const uptr kTraceMemEnd = 0x380000000000ull;
+ static const uptr kMetaShadowEnd = 0x247000000000ull;
static const uptr kShadowBeg = 0x200000000000ull;
- static const uptr kShadowEnd = 0x238000000000ull;
- static const uptr kAppMemBeg = 0x000000001000ull;
- static const uptr kAppMemEnd = 0x00e000000000ull;
+ static const uptr kShadowEnd = 0x21c000000000ull;
+ static const uptr kLoAppMemBeg = 0x000000001000ull;
+ static const uptr kLoAppMemEnd = 0x00e000000000ull;
+ static const uptr kMidAppMemBeg = 0;
+ static const uptr kMidAppMemEnd = 0;
+ static const uptr kHiAppMemBeg = 0;
+ static const uptr kHiAppMemEnd = 0;
+ static const uptr kHeapMemBeg = 0;
+ static const uptr kHeapMemEnd = 0;
+ static const uptr kVdsoBeg = 0;
+ static const uptr kShadowMsk = 0;
+ static const uptr kShadowXor = 0;
+ static const uptr kShadowAdd = 0x200000000000ull;
};
/* Go on linux/powerpc64 (47-bit VMA)
@@ -483,718 +568,465 @@ struct Mapping46 {
0000 1000 0000 - 00c0 0000 0000: -
00c0 0000 0000 - 00e0 0000 0000: heap
00e0 0000 0000 - 2000 0000 0000: -
-2000 0000 0000 - 3000 0000 0000: shadow
-3000 0000 0000 - 3000 0000 0000: -
-3000 0000 0000 - 4000 0000 0000: metainfo (memory blocks and sync objects)
-4000 0000 0000 - 6000 0000 0000: -
-6000 0000 0000 - 6200 0000 0000: traces
-6200 0000 0000 - 8000 0000 0000: -
+2000 0000 0000 - 2800 0000 0000: shadow
+2800 0000 0000 - 3000 0000 0000: -
+3000 0000 0000 - 3200 0000 0000: metainfo (memory blocks and sync objects)
+3200 0000 0000 - 8000 0000 0000: -
*/
-struct Mapping47 {
+struct MappingGoPPC64_47 {
static const uptr kMetaShadowBeg = 0x300000000000ull;
- static const uptr kMetaShadowEnd = 0x400000000000ull;
- static const uptr kTraceMemBeg = 0x600000000000ull;
- static const uptr kTraceMemEnd = 0x620000000000ull;
+ static const uptr kMetaShadowEnd = 0x320000000000ull;
static const uptr kShadowBeg = 0x200000000000ull;
- static const uptr kShadowEnd = 0x300000000000ull;
- static const uptr kAppMemBeg = 0x000000001000ull;
- static const uptr kAppMemEnd = 0x00e000000000ull;
+ static const uptr kShadowEnd = 0x280000000000ull;
+ static const uptr kLoAppMemBeg = 0x000000001000ull;
+ static const uptr kLoAppMemEnd = 0x00e000000000ull;
+ static const uptr kMidAppMemBeg = 0;
+ static const uptr kMidAppMemEnd = 0;
+ static const uptr kHiAppMemBeg = 0;
+ static const uptr kHiAppMemEnd = 0;
+ static const uptr kHeapMemBeg = 0;
+ static const uptr kHeapMemEnd = 0;
+ static const uptr kVdsoBeg = 0;
+ static const uptr kShadowMsk = 0;
+ static const uptr kShadowXor = 0;
+ static const uptr kShadowAdd = 0x200000000000ull;
};
-#define TSAN_RUNTIME_VMA 1
-
-#elif SANITIZER_GO && defined(__aarch64__)
-
/* Go on linux/aarch64 (48-bit VMA) and darwin/aarch64 (47-bit VMA)
0000 0000 1000 - 0000 1000 0000: executable
0000 1000 0000 - 00c0 0000 0000: -
00c0 0000 0000 - 00e0 0000 0000: heap
00e0 0000 0000 - 2000 0000 0000: -
-2000 0000 0000 - 3000 0000 0000: shadow
-3000 0000 0000 - 3000 0000 0000: -
-3000 0000 0000 - 4000 0000 0000: metainfo (memory blocks and sync objects)
-4000 0000 0000 - 6000 0000 0000: -
-6000 0000 0000 - 6200 0000 0000: traces
-6200 0000 0000 - 8000 0000 0000: -
+2000 0000 0000 - 2800 0000 0000: shadow
+2800 0000 0000 - 3000 0000 0000: -
+3000 0000 0000 - 3200 0000 0000: metainfo (memory blocks and sync objects)
+3200 0000 0000 - 8000 0000 0000: -
*/
-
-struct Mapping {
+struct MappingGoAarch64 {
static const uptr kMetaShadowBeg = 0x300000000000ull;
- static const uptr kMetaShadowEnd = 0x400000000000ull;
- static const uptr kTraceMemBeg = 0x600000000000ull;
- static const uptr kTraceMemEnd = 0x620000000000ull;
+ static const uptr kMetaShadowEnd = 0x320000000000ull;
static const uptr kShadowBeg = 0x200000000000ull;
- static const uptr kShadowEnd = 0x300000000000ull;
- static const uptr kAppMemBeg = 0x000000001000ull;
- static const uptr kAppMemEnd = 0x00e000000000ull;
+ static const uptr kShadowEnd = 0x280000000000ull;
+ static const uptr kLoAppMemBeg = 0x000000001000ull;
+ static const uptr kLoAppMemEnd = 0x00e000000000ull;
+ static const uptr kMidAppMemBeg = 0;
+ static const uptr kMidAppMemEnd = 0;
+ static const uptr kHiAppMemBeg = 0;
+ static const uptr kHiAppMemEnd = 0;
+ static const uptr kHeapMemBeg = 0;
+ static const uptr kHeapMemEnd = 0;
+ static const uptr kVdsoBeg = 0;
+ static const uptr kShadowMsk = 0;
+ static const uptr kShadowXor = 0;
+ static const uptr kShadowAdd = 0x200000000000ull;
};
-// Indicates the runtime will define the memory regions at runtime.
-#define TSAN_RUNTIME_VMA 1
+/* Go on linux/loongarch64 (47-bit VMA)
+0000 0000 1000 - 0000 1000 0000: executable
+0000 1000 0000 - 00c0 0000 0000: -
+00c0 0000 0000 - 00e0 0000 0000: heap
+00e0 0000 0000 - 2000 0000 0000: -
+2000 0000 0000 - 2800 0000 0000: shadow
+2800 0000 0000 - 3000 0000 0000: -
+3000 0000 0000 - 3200 0000 0000: metainfo (memory blocks and sync objects)
+3200 0000 0000 - 8000 0000 0000: -
+*/
+struct MappingGoLoongArch64_47 {
+ static const uptr kMetaShadowBeg = 0x300000000000ull;
+ static const uptr kMetaShadowEnd = 0x320000000000ull;
+ static const uptr kShadowBeg = 0x200000000000ull;
+ static const uptr kShadowEnd = 0x280000000000ull;
+ static const uptr kLoAppMemBeg = 0x000000001000ull;
+ static const uptr kLoAppMemEnd = 0x00e000000000ull;
+ static const uptr kMidAppMemBeg = 0;
+ static const uptr kMidAppMemEnd = 0;
+ static const uptr kHiAppMemBeg = 0;
+ static const uptr kHiAppMemEnd = 0;
+ static const uptr kHeapMemBeg = 0;
+ static const uptr kHeapMemEnd = 0;
+ static const uptr kVdsoBeg = 0;
+ static const uptr kShadowMsk = 0;
+ static const uptr kShadowXor = 0;
+ static const uptr kShadowAdd = 0x200000000000ull;
+};
-#elif SANITIZER_GO && defined(__mips64)
/*
Go on linux/mips64 (47-bit VMA)
0000 0000 1000 - 0000 1000 0000: executable
0000 1000 0000 - 00c0 0000 0000: -
00c0 0000 0000 - 00e0 0000 0000: heap
00e0 0000 0000 - 2000 0000 0000: -
-2000 0000 0000 - 3000 0000 0000: shadow
-3000 0000 0000 - 3000 0000 0000: -
-3000 0000 0000 - 4000 0000 0000: metainfo (memory blocks and sync objects)
-4000 0000 0000 - 6000 0000 0000: -
-6000 0000 0000 - 6200 0000 0000: traces
-6200 0000 0000 - 8000 0000 0000: -
+2000 0000 0000 - 2800 0000 0000: shadow
+2800 0000 0000 - 3000 0000 0000: -
+3000 0000 0000 - 3200 0000 0000: metainfo (memory blocks and sync objects)
+3200 0000 0000 - 8000 0000 0000: -
*/
-struct Mapping47 {
+struct MappingGoMips64_47 {
static const uptr kMetaShadowBeg = 0x300000000000ull;
- static const uptr kMetaShadowEnd = 0x400000000000ull;
- static const uptr kTraceMemBeg = 0x600000000000ull;
- static const uptr kTraceMemEnd = 0x620000000000ull;
+ static const uptr kMetaShadowEnd = 0x320000000000ull;
static const uptr kShadowBeg = 0x200000000000ull;
- static const uptr kShadowEnd = 0x300000000000ull;
- static const uptr kAppMemBeg = 0x000000001000ull;
- static const uptr kAppMemEnd = 0x00e000000000ull;
+ static const uptr kShadowEnd = 0x280000000000ull;
+ static const uptr kLoAppMemBeg = 0x000000001000ull;
+ static const uptr kLoAppMemEnd = 0x00e000000000ull;
+ static const uptr kMidAppMemBeg = 0;
+ static const uptr kMidAppMemEnd = 0;
+ static const uptr kHiAppMemBeg = 0;
+ static const uptr kHiAppMemEnd = 0;
+ static const uptr kHeapMemBeg = 0;
+ static const uptr kHeapMemEnd = 0;
+ static const uptr kVdsoBeg = 0;
+ static const uptr kShadowMsk = 0;
+ static const uptr kShadowXor = 0;
+ static const uptr kShadowAdd = 0x200000000000ull;
};
-#define TSAN_RUNTIME_VMA 1
-
-#elif SANITIZER_GO && defined(__s390x__)
/*
Go on linux/s390x
0000 0000 1000 - 1000 0000 0000: executable and heap - 16 TiB
1000 0000 0000 - 4000 0000 0000: -
-4000 0000 0000 - 8000 0000 0000: shadow - 64TiB (4 * app)
-8000 0000 0000 - 9000 0000 0000: -
+4000 0000 0000 - 6000 0000 0000: shadow - 64TiB (4 * app)
+6000 0000 0000 - 9000 0000 0000: -
9000 0000 0000 - 9800 0000 0000: metainfo - 8TiB (0.5 * app)
-9800 0000 0000 - a000 0000 0000: -
-a000 0000 0000 - b000 0000 0000: traces - 16TiB (max history * 128k threads)
*/
-struct Mapping {
+struct MappingGoS390x {
static const uptr kMetaShadowBeg = 0x900000000000ull;
static const uptr kMetaShadowEnd = 0x980000000000ull;
- static const uptr kTraceMemBeg = 0xa00000000000ull;
- static const uptr kTraceMemEnd = 0xb00000000000ull;
static const uptr kShadowBeg = 0x400000000000ull;
- static const uptr kShadowEnd = 0x800000000000ull;
- static const uptr kAppMemBeg = 0x000000001000ull;
- static const uptr kAppMemEnd = 0x100000000000ull;
+ static const uptr kShadowEnd = 0x600000000000ull;
+ static const uptr kLoAppMemBeg = 0x000000001000ull;
+ static const uptr kLoAppMemEnd = 0x100000000000ull;
+ static const uptr kMidAppMemBeg = 0;
+ static const uptr kMidAppMemEnd = 0;
+ static const uptr kHiAppMemBeg = 0;
+ static const uptr kHiAppMemEnd = 0;
+ static const uptr kHeapMemBeg = 0;
+ static const uptr kHeapMemEnd = 0;
+ static const uptr kVdsoBeg = 0;
+ static const uptr kShadowMsk = 0;
+ static const uptr kShadowXor = 0;
+ static const uptr kShadowAdd = 0x400000000000ull;
};
-#else
-# error "Unknown platform"
-#endif
-
-
-#ifdef TSAN_RUNTIME_VMA
extern uptr vmaSize;
-#endif
-
-
-enum MappingType {
- MAPPING_LO_APP_BEG,
- MAPPING_LO_APP_END,
- MAPPING_HI_APP_BEG,
- MAPPING_HI_APP_END,
-#ifdef TSAN_MID_APP_RANGE
- MAPPING_MID_APP_BEG,
- MAPPING_MID_APP_END,
-#endif
- MAPPING_HEAP_BEG,
- MAPPING_HEAP_END,
- MAPPING_APP_BEG,
- MAPPING_APP_END,
- MAPPING_SHADOW_BEG,
- MAPPING_SHADOW_END,
- MAPPING_META_SHADOW_BEG,
- MAPPING_META_SHADOW_END,
- MAPPING_TRACE_BEG,
- MAPPING_TRACE_END,
- MAPPING_VDSO_BEG,
-};
-template<typename Mapping, int Type>
-uptr MappingImpl(void) {
- switch (Type) {
-#if !SANITIZER_GO
- case MAPPING_LO_APP_BEG: return Mapping::kLoAppMemBeg;
- case MAPPING_LO_APP_END: return Mapping::kLoAppMemEnd;
-# ifdef TSAN_MID_APP_RANGE
- case MAPPING_MID_APP_BEG: return Mapping::kMidAppMemBeg;
- case MAPPING_MID_APP_END: return Mapping::kMidAppMemEnd;
-# endif
- case MAPPING_HI_APP_BEG: return Mapping::kHiAppMemBeg;
- case MAPPING_HI_APP_END: return Mapping::kHiAppMemEnd;
- case MAPPING_HEAP_BEG: return Mapping::kHeapMemBeg;
- case MAPPING_HEAP_END: return Mapping::kHeapMemEnd;
- case MAPPING_VDSO_BEG: return Mapping::kVdsoBeg;
-#else
- case MAPPING_APP_BEG: return Mapping::kAppMemBeg;
- case MAPPING_APP_END: return Mapping::kAppMemEnd;
-#endif
- case MAPPING_SHADOW_BEG: return Mapping::kShadowBeg;
- case MAPPING_SHADOW_END: return Mapping::kShadowEnd;
- case MAPPING_META_SHADOW_BEG: return Mapping::kMetaShadowBeg;
- case MAPPING_META_SHADOW_END: return Mapping::kMetaShadowEnd;
- case MAPPING_TRACE_BEG: return Mapping::kTraceMemBeg;
- case MAPPING_TRACE_END: return Mapping::kTraceMemEnd;
+template <typename Func, typename Arg>
+ALWAYS_INLINE auto SelectMapping(Arg arg) {
+#if SANITIZER_GO
+# if defined(__powerpc64__)
+ switch (vmaSize) {
+ case 46:
+ return Func::template Apply<MappingGoPPC64_46>(arg);
+ case 47:
+ return Func::template Apply<MappingGoPPC64_47>(arg);
}
-}
-
-template<int Type>
-uptr MappingArchImpl(void) {
-#if defined(__aarch64__) && !defined(__APPLE__) && !SANITIZER_GO
+# elif defined(__mips64)
+ return Func::template Apply<MappingGoMips64_47>(arg);
+# elif defined(__s390x__)
+ return Func::template Apply<MappingGoS390x>(arg);
+# elif defined(__aarch64__)
+ return Func::template Apply<MappingGoAarch64>(arg);
+# elif defined(__loongarch_lp64)
+ return Func::template Apply<MappingGoLoongArch64_47>(arg);
+# elif SANITIZER_WINDOWS
+ return Func::template Apply<MappingGoWindows>(arg);
+# else
+ return Func::template Apply<MappingGo48>(arg);
+# endif
+#else // SANITIZER_GO
+# if SANITIZER_IOS && !SANITIZER_IOSSIM
+ return Func::template Apply<MappingAppleAarch64>(arg);
+# elif defined(__x86_64__) || SANITIZER_APPLE
+ return Func::template Apply<Mapping48AddressSpace>(arg);
+# elif defined(__aarch64__)
switch (vmaSize) {
- case 39: return MappingImpl<Mapping39, Type>();
- case 42: return MappingImpl<Mapping42, Type>();
- case 48: return MappingImpl<Mapping48, Type>();
+ case 39:
+ return Func::template Apply<MappingAarch64_39>(arg);
+ case 42:
+ return Func::template Apply<MappingAarch64_42>(arg);
+ case 48:
+ return Func::template Apply<MappingAarch64_48>(arg);
}
- DCHECK(0);
- return 0;
-#elif defined(__powerpc64__)
+# elif SANITIZER_LOONGARCH64
+ return Func::template Apply<MappingLoongArch64_47>(arg);
+# elif defined(__powerpc64__)
switch (vmaSize) {
-#if !SANITIZER_GO
- case 44: return MappingImpl<Mapping44, Type>();
-#endif
- case 46: return MappingImpl<Mapping46, Type>();
- case 47: return MappingImpl<Mapping47, Type>();
+ case 44:
+ return Func::template Apply<MappingPPC64_44>(arg);
+ case 46:
+ return Func::template Apply<MappingPPC64_46>(arg);
+ case 47:
+ return Func::template Apply<MappingPPC64_47>(arg);
}
- DCHECK(0);
- return 0;
-#elif defined(__mips64)
+# elif defined(__mips64)
+ return Func::template Apply<MappingMips64_40>(arg);
+# elif SANITIZER_RISCV64
switch (vmaSize) {
-#if !SANITIZER_GO
- case 40: return MappingImpl<Mapping40, Type>();
-#else
- case 47: return MappingImpl<Mapping47, Type>();
-#endif
+ case 39:
+ return Func::template Apply<MappingRiscv64_39>(arg);
+ case 48:
+ return Func::template Apply<MappingRiscv64_48>(arg);
}
- DCHECK(0);
- return 0;
-#else
- return MappingImpl<Mapping, Type>();
+# elif defined(__s390x__)
+ return Func::template Apply<MappingS390x>(arg);
+# else
+# error "unsupported platform"
+# endif
#endif
+ Die();
}
-#if !SANITIZER_GO
-ALWAYS_INLINE
-uptr LoAppMemBeg(void) {
- return MappingArchImpl<MAPPING_LO_APP_BEG>();
-}
-ALWAYS_INLINE
-uptr LoAppMemEnd(void) {
- return MappingArchImpl<MAPPING_LO_APP_END>();
+template <typename Func>
+void ForEachMapping() {
+ Func::template Apply<Mapping48AddressSpace>();
+ Func::template Apply<MappingMips64_40>();
+ Func::template Apply<MappingAppleAarch64>();
+ Func::template Apply<MappingAarch64_39>();
+ Func::template Apply<MappingAarch64_42>();
+ Func::template Apply<MappingAarch64_48>();
+ Func::template Apply<MappingLoongArch64_47>();
+ Func::template Apply<MappingPPC64_44>();
+ Func::template Apply<MappingPPC64_46>();
+ Func::template Apply<MappingPPC64_47>();
+ Func::template Apply<MappingRiscv64_39>();
+ Func::template Apply<MappingRiscv64_48>();
+ Func::template Apply<MappingS390x>();
+ Func::template Apply<MappingGo48>();
+ Func::template Apply<MappingGoWindows>();
+ Func::template Apply<MappingGoPPC64_46>();
+ Func::template Apply<MappingGoPPC64_47>();
+ Func::template Apply<MappingGoAarch64>();
+ Func::template Apply<MappingGoLoongArch64_47>();
+ Func::template Apply<MappingGoMips64_47>();
+ Func::template Apply<MappingGoS390x>();
}
-#ifdef TSAN_MID_APP_RANGE
-ALWAYS_INLINE
-uptr MidAppMemBeg(void) {
- return MappingArchImpl<MAPPING_MID_APP_BEG>();
-}
-ALWAYS_INLINE
-uptr MidAppMemEnd(void) {
- return MappingArchImpl<MAPPING_MID_APP_END>();
-}
-#endif
+enum MappingType {
+ kLoAppMemBeg,
+ kLoAppMemEnd,
+ kHiAppMemBeg,
+ kHiAppMemEnd,
+ kMidAppMemBeg,
+ kMidAppMemEnd,
+ kHeapMemBeg,
+ kHeapMemEnd,
+ kShadowBeg,
+ kShadowEnd,
+ kMetaShadowBeg,
+ kMetaShadowEnd,
+ kVdsoBeg,
+};
+
+struct MappingField {
+ template <typename Mapping>
+ static uptr Apply(MappingType type) {
+ switch (type) {
+ case kLoAppMemBeg:
+ return Mapping::kLoAppMemBeg;
+ case kLoAppMemEnd:
+ return Mapping::kLoAppMemEnd;
+ case kMidAppMemBeg:
+ return Mapping::kMidAppMemBeg;
+ case kMidAppMemEnd:
+ return Mapping::kMidAppMemEnd;
+ case kHiAppMemBeg:
+ return Mapping::kHiAppMemBeg;
+ case kHiAppMemEnd:
+ return Mapping::kHiAppMemEnd;
+ case kHeapMemBeg:
+ return Mapping::kHeapMemBeg;
+ case kHeapMemEnd:
+ return Mapping::kHeapMemEnd;
+ case kVdsoBeg:
+ return Mapping::kVdsoBeg;
+ case kShadowBeg:
+ return Mapping::kShadowBeg;
+ case kShadowEnd:
+ return Mapping::kShadowEnd;
+ case kMetaShadowBeg:
+ return Mapping::kMetaShadowBeg;
+ case kMetaShadowEnd:
+ return Mapping::kMetaShadowEnd;
+ }
+ Die();
+ }
+};
ALWAYS_INLINE
-uptr HeapMemBeg(void) {
- return MappingArchImpl<MAPPING_HEAP_BEG>();
-}
+uptr LoAppMemBeg(void) { return SelectMapping<MappingField>(kLoAppMemBeg); }
ALWAYS_INLINE
-uptr HeapMemEnd(void) {
- return MappingArchImpl<MAPPING_HEAP_END>();
-}
+uptr LoAppMemEnd(void) { return SelectMapping<MappingField>(kLoAppMemEnd); }
ALWAYS_INLINE
-uptr HiAppMemBeg(void) {
- return MappingArchImpl<MAPPING_HI_APP_BEG>();
-}
+uptr MidAppMemBeg(void) { return SelectMapping<MappingField>(kMidAppMemBeg); }
ALWAYS_INLINE
-uptr HiAppMemEnd(void) {
- return MappingArchImpl<MAPPING_HI_APP_END>();
-}
+uptr MidAppMemEnd(void) { return SelectMapping<MappingField>(kMidAppMemEnd); }
ALWAYS_INLINE
-uptr VdsoBeg(void) {
- return MappingArchImpl<MAPPING_VDSO_BEG>();
-}
-
-#else
+uptr HeapMemBeg(void) { return SelectMapping<MappingField>(kHeapMemBeg); }
+ALWAYS_INLINE
+uptr HeapMemEnd(void) { return SelectMapping<MappingField>(kHeapMemEnd); }
ALWAYS_INLINE
-uptr AppMemBeg(void) {
- return MappingArchImpl<MAPPING_APP_BEG>();
-}
+uptr HiAppMemBeg(void) { return SelectMapping<MappingField>(kHiAppMemBeg); }
ALWAYS_INLINE
-uptr AppMemEnd(void) {
- return MappingArchImpl<MAPPING_APP_END>();
-}
-
-#endif
-
-static inline
-bool GetUserRegion(int i, uptr *start, uptr *end) {
- switch (i) {
- default:
- return false;
-#if !SANITIZER_GO
- case 0:
- *start = LoAppMemBeg();
- *end = LoAppMemEnd();
- return true;
- case 1:
- *start = HiAppMemBeg();
- *end = HiAppMemEnd();
- return true;
- case 2:
- *start = HeapMemBeg();
- *end = HeapMemEnd();
- return true;
-# ifdef TSAN_MID_APP_RANGE
- case 3:
- *start = MidAppMemBeg();
- *end = MidAppMemEnd();
- return true;
-# endif
-#else
- case 0:
- *start = AppMemBeg();
- *end = AppMemEnd();
- return true;
-#endif
- }
-}
+uptr HiAppMemEnd(void) { return SelectMapping<MappingField>(kHiAppMemEnd); }
ALWAYS_INLINE
-uptr ShadowBeg(void) {
- return MappingArchImpl<MAPPING_SHADOW_BEG>();
-}
-ALWAYS_INLINE
-uptr ShadowEnd(void) {
- return MappingArchImpl<MAPPING_SHADOW_END>();
-}
+uptr VdsoBeg(void) { return SelectMapping<MappingField>(kVdsoBeg); }
ALWAYS_INLINE
-uptr MetaShadowBeg(void) {
- return MappingArchImpl<MAPPING_META_SHADOW_BEG>();
-}
+uptr ShadowBeg(void) { return SelectMapping<MappingField>(kShadowBeg); }
ALWAYS_INLINE
-uptr MetaShadowEnd(void) {
- return MappingArchImpl<MAPPING_META_SHADOW_END>();
-}
+uptr ShadowEnd(void) { return SelectMapping<MappingField>(kShadowEnd); }
ALWAYS_INLINE
-uptr TraceMemBeg(void) {
- return MappingArchImpl<MAPPING_TRACE_BEG>();
-}
+uptr MetaShadowBeg(void) { return SelectMapping<MappingField>(kMetaShadowBeg); }
ALWAYS_INLINE
-uptr TraceMemEnd(void) {
- return MappingArchImpl<MAPPING_TRACE_END>();
-}
+uptr MetaShadowEnd(void) { return SelectMapping<MappingField>(kMetaShadowEnd); }
-
-template<typename Mapping>
-bool IsAppMemImpl(uptr mem) {
-#if !SANITIZER_GO
+struct IsAppMemImpl {
+ template <typename Mapping>
+ static bool Apply(uptr mem) {
return (mem >= Mapping::kHeapMemBeg && mem < Mapping::kHeapMemEnd) ||
-# ifdef TSAN_MID_APP_RANGE
(mem >= Mapping::kMidAppMemBeg && mem < Mapping::kMidAppMemEnd) ||
-# endif
(mem >= Mapping::kLoAppMemBeg && mem < Mapping::kLoAppMemEnd) ||
(mem >= Mapping::kHiAppMemBeg && mem < Mapping::kHiAppMemEnd);
-#else
- return mem >= Mapping::kAppMemBeg && mem < Mapping::kAppMemEnd;
-#endif
-}
-
-ALWAYS_INLINE
-bool IsAppMem(uptr mem) {
-#if defined(__aarch64__) && !defined(__APPLE__) && !SANITIZER_GO
- switch (vmaSize) {
- case 39: return IsAppMemImpl<Mapping39>(mem);
- case 42: return IsAppMemImpl<Mapping42>(mem);
- case 48: return IsAppMemImpl<Mapping48>(mem);
- }
- DCHECK(0);
- return false;
-#elif defined(__powerpc64__)
- switch (vmaSize) {
-#if !SANITIZER_GO
- case 44: return IsAppMemImpl<Mapping44>(mem);
-#endif
- case 46: return IsAppMemImpl<Mapping46>(mem);
- case 47: return IsAppMemImpl<Mapping47>(mem);
- }
- DCHECK(0);
- return false;
-#elif defined(__mips64)
- switch (vmaSize) {
-#if !SANITIZER_GO
- case 40: return IsAppMemImpl<Mapping40>(mem);
-#else
- case 47: return IsAppMemImpl<Mapping47>(mem);
-#endif
}
- DCHECK(0);
- return false;
-#else
- return IsAppMemImpl<Mapping>(mem);
-#endif
-}
-
-
-template<typename Mapping>
-bool IsShadowMemImpl(uptr mem) {
- return mem >= Mapping::kShadowBeg && mem <= Mapping::kShadowEnd;
-}
+};
ALWAYS_INLINE
-bool IsShadowMem(uptr mem) {
-#if defined(__aarch64__) && !defined(__APPLE__) && !SANITIZER_GO
- switch (vmaSize) {
- case 39: return IsShadowMemImpl<Mapping39>(mem);
- case 42: return IsShadowMemImpl<Mapping42>(mem);
- case 48: return IsShadowMemImpl<Mapping48>(mem);
- }
- DCHECK(0);
- return false;
-#elif defined(__powerpc64__)
- switch (vmaSize) {
-#if !SANITIZER_GO
- case 44: return IsShadowMemImpl<Mapping44>(mem);
-#endif
- case 46: return IsShadowMemImpl<Mapping46>(mem);
- case 47: return IsShadowMemImpl<Mapping47>(mem);
- }
- DCHECK(0);
- return false;
-#elif defined(__mips64)
- switch (vmaSize) {
-#if !SANITIZER_GO
- case 40: return IsShadowMemImpl<Mapping40>(mem);
-#else
- case 47: return IsShadowMemImpl<Mapping47>(mem);
-#endif
- }
- DCHECK(0);
- return false;
-#else
- return IsShadowMemImpl<Mapping>(mem);
-#endif
-}
+bool IsAppMem(uptr mem) { return SelectMapping<IsAppMemImpl>(mem); }
-
-template<typename Mapping>
-bool IsMetaMemImpl(uptr mem) {
- return mem >= Mapping::kMetaShadowBeg && mem <= Mapping::kMetaShadowEnd;
-}
+struct IsShadowMemImpl {
+ template <typename Mapping>
+ static bool Apply(uptr mem) {
+ return mem >= Mapping::kShadowBeg && mem <= Mapping::kShadowEnd;
+ }
+};
ALWAYS_INLINE
-bool IsMetaMem(uptr mem) {
-#if defined(__aarch64__) && !defined(__APPLE__) && !SANITIZER_GO
- switch (vmaSize) {
- case 39: return IsMetaMemImpl<Mapping39>(mem);
- case 42: return IsMetaMemImpl<Mapping42>(mem);
- case 48: return IsMetaMemImpl<Mapping48>(mem);
- }
- DCHECK(0);
- return false;
-#elif defined(__powerpc64__)
- switch (vmaSize) {
-#if !SANITIZER_GO
- case 44: return IsMetaMemImpl<Mapping44>(mem);
-#endif
- case 46: return IsMetaMemImpl<Mapping46>(mem);
- case 47: return IsMetaMemImpl<Mapping47>(mem);
- }
- DCHECK(0);
- return false;
-#elif defined(__mips64)
- switch (vmaSize) {
-#if !SANITIZER_GO
- case 40: return IsMetaMemImpl<Mapping40>(mem);
-#else
- case 47: return IsMetaMemImpl<Mapping47>(mem);
-#endif
- }
- DCHECK(0);
- return false;
-#else
- return IsMetaMemImpl<Mapping>(mem);
-#endif
+bool IsShadowMem(RawShadow *p) {
+ return SelectMapping<IsShadowMemImpl>(reinterpret_cast<uptr>(p));
}
-
-template<typename Mapping>
-uptr MemToShadowImpl(uptr x) {
- DCHECK(IsAppMem(x));
-#if !SANITIZER_GO
- return (((x) & ~(Mapping::kAppMemMsk | (kShadowCell - 1)))
- ^ Mapping::kAppMemXor) * kShadowCnt;
-#else
-# ifndef SANITIZER_WINDOWS
- return ((x & ~(kShadowCell - 1)) * kShadowCnt) | Mapping::kShadowBeg;
-# else
- return ((x & ~(kShadowCell - 1)) * kShadowCnt) + Mapping::kShadowBeg;
-# endif
-#endif
-}
+struct IsMetaMemImpl {
+ template <typename Mapping>
+ static bool Apply(uptr mem) {
+ return mem >= Mapping::kMetaShadowBeg && mem <= Mapping::kMetaShadowEnd;
+ }
+};
ALWAYS_INLINE
-uptr MemToShadow(uptr x) {
-#if defined(__aarch64__) && !defined(__APPLE__) && !SANITIZER_GO
- switch (vmaSize) {
- case 39: return MemToShadowImpl<Mapping39>(x);
- case 42: return MemToShadowImpl<Mapping42>(x);
- case 48: return MemToShadowImpl<Mapping48>(x);
- }
- DCHECK(0);
- return 0;
-#elif defined(__powerpc64__)
- switch (vmaSize) {
-#if !SANITIZER_GO
- case 44: return MemToShadowImpl<Mapping44>(x);
-#endif
- case 46: return MemToShadowImpl<Mapping46>(x);
- case 47: return MemToShadowImpl<Mapping47>(x);
- }
- DCHECK(0);
- return 0;
-#elif defined(__mips64)
- switch (vmaSize) {
-#if !SANITIZER_GO
- case 40: return MemToShadowImpl<Mapping40>(x);
-#else
- case 47: return MemToShadowImpl<Mapping47>(x);
-#endif
- }
- DCHECK(0);
- return 0;
-#else
- return MemToShadowImpl<Mapping>(x);
-#endif
+bool IsMetaMem(const u32 *p) {
+ return SelectMapping<IsMetaMemImpl>(reinterpret_cast<uptr>(p));
}
-
-template<typename Mapping>
-u32 *MemToMetaImpl(uptr x) {
- DCHECK(IsAppMem(x));
-#if !SANITIZER_GO
- return (u32*)(((((x) & ~(Mapping::kAppMemMsk | (kMetaShadowCell - 1)))) /
- kMetaShadowCell * kMetaShadowSize) | Mapping::kMetaShadowBeg);
-#else
-# ifndef SANITIZER_WINDOWS
- return (u32*)(((x & ~(kMetaShadowCell - 1)) / \
- kMetaShadowCell * kMetaShadowSize) | Mapping::kMetaShadowBeg);
-# else
- return (u32*)(((x & ~(kMetaShadowCell - 1)) / \
- kMetaShadowCell * kMetaShadowSize) + Mapping::kMetaShadowBeg);
-# endif
-#endif
-}
+struct MemToShadowImpl {
+ template <typename Mapping>
+ static uptr Apply(uptr x) {
+ DCHECK(IsAppMemImpl::Apply<Mapping>(x));
+ return (((x) & ~(Mapping::kShadowMsk | (kShadowCell - 1))) ^
+ Mapping::kShadowXor) *
+ kShadowMultiplier +
+ Mapping::kShadowAdd;
+ }
+};
ALWAYS_INLINE
-u32 *MemToMeta(uptr x) {
-#if defined(__aarch64__) && !defined(__APPLE__) && !SANITIZER_GO
- switch (vmaSize) {
- case 39: return MemToMetaImpl<Mapping39>(x);
- case 42: return MemToMetaImpl<Mapping42>(x);
- case 48: return MemToMetaImpl<Mapping48>(x);
- }
- DCHECK(0);
- return 0;
-#elif defined(__powerpc64__)
- switch (vmaSize) {
-#if !SANITIZER_GO
- case 44: return MemToMetaImpl<Mapping44>(x);
-#endif
- case 46: return MemToMetaImpl<Mapping46>(x);
- case 47: return MemToMetaImpl<Mapping47>(x);
- }
- DCHECK(0);
- return 0;
-#elif defined(__mips64)
- switch (vmaSize) {
-#if !SANITIZER_GO
- case 40: return MemToMetaImpl<Mapping40>(x);
-#else
- case 47: return MemToMetaImpl<Mapping47>(x);
-#endif
- }
- DCHECK(0);
- return 0;
-#else
- return MemToMetaImpl<Mapping>(x);
-#endif
+RawShadow *MemToShadow(uptr x) {
+ return reinterpret_cast<RawShadow *>(SelectMapping<MemToShadowImpl>(x));
}
-
-template<typename Mapping>
-uptr ShadowToMemImpl(uptr s) {
- DCHECK(IsShadowMem(s));
-#if !SANITIZER_GO
- // The shadow mapping is non-linear and we've lost some bits, so we don't have
- // an easy way to restore the original app address. But the mapping is a
- // bijection, so we try to restore the address as belonging to low/mid/high
- // range consecutively and see if shadow->app->shadow mapping gives us the
- // same address.
- uptr p = (s / kShadowCnt) ^ Mapping::kAppMemXor;
- if (p >= Mapping::kLoAppMemBeg && p < Mapping::kLoAppMemEnd &&
- MemToShadow(p) == s)
- return p;
-# ifdef TSAN_MID_APP_RANGE
- p = ((s / kShadowCnt) ^ Mapping::kAppMemXor) +
- (Mapping::kMidAppMemBeg & Mapping::kAppMemMsk);
- if (p >= Mapping::kMidAppMemBeg && p < Mapping::kMidAppMemEnd &&
- MemToShadow(p) == s)
- return p;
-# endif
- return ((s / kShadowCnt) ^ Mapping::kAppMemXor) | Mapping::kAppMemMsk;
-#else // #if !SANITIZER_GO
-# ifndef SANITIZER_WINDOWS
- return (s & ~Mapping::kShadowBeg) / kShadowCnt;
-# else
- return (s - Mapping::kShadowBeg) / kShadowCnt;
-# endif // SANITIZER_WINDOWS
-#endif
-}
+struct MemToMetaImpl {
+ template <typename Mapping>
+ static u32 *Apply(uptr x) {
+ DCHECK(IsAppMemImpl::Apply<Mapping>(x));
+ return (u32 *)(((((x) & ~(Mapping::kShadowMsk | (kMetaShadowCell - 1)))) /
+ kMetaShadowCell * kMetaShadowSize) |
+ Mapping::kMetaShadowBeg);
+ }
+};
ALWAYS_INLINE
-uptr ShadowToMem(uptr s) {
-#if defined(__aarch64__) && !defined(__APPLE__) && !SANITIZER_GO
- switch (vmaSize) {
- case 39: return ShadowToMemImpl<Mapping39>(s);
- case 42: return ShadowToMemImpl<Mapping42>(s);
- case 48: return ShadowToMemImpl<Mapping48>(s);
+u32 *MemToMeta(uptr x) { return SelectMapping<MemToMetaImpl>(x); }
+
+struct ShadowToMemImpl {
+ template <typename Mapping>
+ static uptr Apply(uptr sp) {
+ if (!IsShadowMemImpl::Apply<Mapping>(sp))
+ return 0;
+ // The shadow mapping is non-linear and we've lost some bits, so we don't
+ // have an easy way to restore the original app address. But the mapping is
+ // a bijection, so we try to restore the address as belonging to
+ // low/mid/high range consecutively and see if shadow->app->shadow mapping
+ // gives us the same address.
+ uptr p =
+ ((sp - Mapping::kShadowAdd) / kShadowMultiplier) ^ Mapping::kShadowXor;
+ if (p >= Mapping::kLoAppMemBeg && p < Mapping::kLoAppMemEnd &&
+ MemToShadowImpl::Apply<Mapping>(p) == sp)
+ return p;
+ if (Mapping::kMidAppMemBeg) {
+ uptr p_mid = p + (Mapping::kMidAppMemBeg & Mapping::kShadowMsk);
+ if (p_mid >= Mapping::kMidAppMemBeg && p_mid < Mapping::kMidAppMemEnd &&
+ MemToShadowImpl::Apply<Mapping>(p_mid) == sp)
+ return p_mid;
+ }
+ return p | Mapping::kShadowMsk;
}
- DCHECK(0);
- return 0;
-#elif defined(__powerpc64__)
- switch (vmaSize) {
-#if !SANITIZER_GO
- case 44: return ShadowToMemImpl<Mapping44>(s);
-#endif
- case 46: return ShadowToMemImpl<Mapping46>(s);
- case 47: return ShadowToMemImpl<Mapping47>(s);
- }
- DCHECK(0);
- return 0;
-#elif defined(__mips64)
- switch (vmaSize) {
-#if !SANITIZER_GO
- case 40: return ShadowToMemImpl<Mapping40>(s);
-#else
- case 47: return ShadowToMemImpl<Mapping47>(s);
-#endif
- }
- DCHECK(0);
- return 0;
-#else
- return ShadowToMemImpl<Mapping>(s);
-#endif
-}
-
-
-
-// The additional page is to catch shadow stack overflow as paging fault.
-// Windows wants 64K alignment for mmaps.
-const uptr kTotalTraceSize = (kTraceSize * sizeof(Event) + sizeof(Trace)
- + (64 << 10) + (64 << 10) - 1) & ~((64 << 10) - 1);
-
-template<typename Mapping>
-uptr GetThreadTraceImpl(int tid) {
- uptr p = Mapping::kTraceMemBeg + (uptr)tid * kTotalTraceSize;
- DCHECK_LT(p, Mapping::kTraceMemEnd);
- return p;
-}
+};
ALWAYS_INLINE
-uptr GetThreadTrace(int tid) {
-#if defined(__aarch64__) && !defined(__APPLE__) && !SANITIZER_GO
- switch (vmaSize) {
- case 39: return GetThreadTraceImpl<Mapping39>(tid);
- case 42: return GetThreadTraceImpl<Mapping42>(tid);
- case 48: return GetThreadTraceImpl<Mapping48>(tid);
- }
- DCHECK(0);
- return 0;
-#elif defined(__powerpc64__)
- switch (vmaSize) {
-#if !SANITIZER_GO
- case 44: return GetThreadTraceImpl<Mapping44>(tid);
-#endif
- case 46: return GetThreadTraceImpl<Mapping46>(tid);
- case 47: return GetThreadTraceImpl<Mapping47>(tid);
- }
- DCHECK(0);
- return 0;
-#elif defined(__mips64)
- switch (vmaSize) {
-#if !SANITIZER_GO
- case 40: return GetThreadTraceImpl<Mapping40>(tid);
-#else
- case 47: return GetThreadTraceImpl<Mapping47>(tid);
-#endif
- }
- DCHECK(0);
- return 0;
-#else
- return GetThreadTraceImpl<Mapping>(tid);
-#endif
+uptr ShadowToMem(RawShadow *s) {
+ return SelectMapping<ShadowToMemImpl>(reinterpret_cast<uptr>(s));
}
-
-template<typename Mapping>
-uptr GetThreadTraceHeaderImpl(int tid) {
- uptr p = Mapping::kTraceMemBeg + (uptr)tid * kTotalTraceSize
- + kTraceSize * sizeof(Event);
- DCHECK_LT(p, Mapping::kTraceMemEnd);
- return p;
+// Compresses addr to kCompressedAddrBits stored in least significant bits.
+ALWAYS_INLINE uptr CompressAddr(uptr addr) {
+ return addr & ((1ull << kCompressedAddrBits) - 1);
}
-ALWAYS_INLINE
-uptr GetThreadTraceHeader(int tid) {
-#if defined(__aarch64__) && !defined(__APPLE__) && !SANITIZER_GO
- switch (vmaSize) {
- case 39: return GetThreadTraceHeaderImpl<Mapping39>(tid);
- case 42: return GetThreadTraceHeaderImpl<Mapping42>(tid);
- case 48: return GetThreadTraceHeaderImpl<Mapping48>(tid);
- }
- DCHECK(0);
- return 0;
-#elif defined(__powerpc64__)
- switch (vmaSize) {
-#if !SANITIZER_GO
- case 44: return GetThreadTraceHeaderImpl<Mapping44>(tid);
-#endif
- case 46: return GetThreadTraceHeaderImpl<Mapping46>(tid);
- case 47: return GetThreadTraceHeaderImpl<Mapping47>(tid);
- }
- DCHECK(0);
- return 0;
-#elif defined(__mips64)
- switch (vmaSize) {
-#if !SANITIZER_GO
- case 40: return GetThreadTraceHeaderImpl<Mapping40>(tid);
-#else
- case 47: return GetThreadTraceHeaderImpl<Mapping47>(tid);
-#endif
+struct RestoreAddrImpl {
+ typedef uptr Result;
+ template <typename Mapping>
+ static Result Apply(uptr addr) {
+ // To restore the address we go over all app memory ranges and check if top
+ // 3 bits of the compressed addr match that of the app range. If yes, we
+ // assume that the compressed address come from that range and restore the
+ // missing top bits to match the app range address.
+ const uptr ranges[] = {
+ Mapping::kLoAppMemBeg, Mapping::kLoAppMemEnd, Mapping::kMidAppMemBeg,
+ Mapping::kMidAppMemEnd, Mapping::kHiAppMemBeg, Mapping::kHiAppMemEnd,
+ Mapping::kHeapMemBeg, Mapping::kHeapMemEnd,
+ };
+ const uptr indicator = 0x0e0000000000ull;
+ const uptr ind_lsb = 1ull << LeastSignificantSetBitIndex(indicator);
+ for (uptr i = 0; i < ARRAY_SIZE(ranges); i += 2) {
+ uptr beg = ranges[i];
+ uptr end = ranges[i + 1];
+ if (beg == end)
+ continue;
+ for (uptr p = beg; p < end; p = RoundDown(p + ind_lsb, ind_lsb)) {
+ if ((addr & indicator) == (p & indicator))
+ return addr | (p & ~(ind_lsb - 1));
+ }
+ }
+ Printf("ThreadSanitizer: failed to restore address 0x%zx\n", addr);
+ Die();
}
- DCHECK(0);
- return 0;
-#else
- return GetThreadTraceHeaderImpl<Mapping>(tid);
-#endif
+};
+
+// Restores compressed addr from kCompressedAddrBits to full representation.
+// This is called only during reporting and is not performance-critical.
+inline uptr RestoreAddr(uptr addr) {
+ return SelectMapping<RestoreAddrImpl>(addr);
}
void InitializePlatform();
void InitializePlatformEarly();
-void CheckAndProtect();
+bool CheckAndProtect(bool protect, bool ignore_heap, bool print_warnings);
void InitializeShadowMemoryPlatform();
-void FlushShadowMemory();
-void WriteMemoryProfile(char *buf, uptr buf_size, uptr nthread, uptr nlive);
+void WriteMemoryProfile(char *buf, uptr buf_size, u64 uptime_ns);
int ExtractResolvFDs(void *state, int *fds, int nfd);
int ExtractRecvmsgFDs(void *msg, int *fds, int nfd);
uptr ExtractLongJmpSp(uptr *env);
diff --git a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_platform_linux.cpp b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_platform_linux.cpp
index cfe597e5380e..0d0b1aba1f85 100644
--- a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_platform_linux.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_platform_linux.cpp
@@ -66,7 +66,8 @@ extern "C" void *__libc_stack_end;
void *__libc_stack_end = 0;
#endif
-#if SANITIZER_LINUX && defined(__aarch64__) && !SANITIZER_GO
+#if SANITIZER_LINUX && (defined(__aarch64__) || defined(__loongarch_lp64)) && \
+ !SANITIZER_GO
# define INIT_LONGJMP_XOR_KEY 1
#else
# define INIT_LONGJMP_XOR_KEY 0
@@ -85,80 +86,73 @@ static void InitializeLongjmpXorKey();
static uptr longjmp_xor_key;
#endif
-#ifdef TSAN_RUNTIME_VMA
// Runtime detected VMA size.
uptr vmaSize;
-#endif
enum {
- MemTotal = 0,
- MemShadow = 1,
- MemMeta = 2,
- MemFile = 3,
- MemMmap = 4,
- MemTrace = 5,
- MemHeap = 6,
- MemOther = 7,
- MemCount = 8,
+ MemTotal,
+ MemShadow,
+ MemMeta,
+ MemFile,
+ MemMmap,
+ MemHeap,
+ MemOther,
+ MemCount,
};
-void FillProfileCallback(uptr p, uptr rss, bool file,
- uptr *mem, uptr stats_size) {
+void FillProfileCallback(uptr p, uptr rss, bool file, uptr *mem) {
mem[MemTotal] += rss;
if (p >= ShadowBeg() && p < ShadowEnd())
mem[MemShadow] += rss;
else if (p >= MetaShadowBeg() && p < MetaShadowEnd())
mem[MemMeta] += rss;
-#if !SANITIZER_GO
+ else if ((p >= LoAppMemBeg() && p < LoAppMemEnd()) ||
+ (p >= MidAppMemBeg() && p < MidAppMemEnd()) ||
+ (p >= HiAppMemBeg() && p < HiAppMemEnd()))
+ mem[file ? MemFile : MemMmap] += rss;
else if (p >= HeapMemBeg() && p < HeapMemEnd())
mem[MemHeap] += rss;
- else if (p >= LoAppMemBeg() && p < LoAppMemEnd())
- mem[file ? MemFile : MemMmap] += rss;
- else if (p >= HiAppMemBeg() && p < HiAppMemEnd())
- mem[file ? MemFile : MemMmap] += rss;
-#else
- else if (p >= AppMemBeg() && p < AppMemEnd())
- mem[file ? MemFile : MemMmap] += rss;
-#endif
- else if (p >= TraceMemBeg() && p < TraceMemEnd())
- mem[MemTrace] += rss;
else
mem[MemOther] += rss;
}
-void WriteMemoryProfile(char *buf, uptr buf_size, uptr nthread, uptr nlive) {
+void WriteMemoryProfile(char *buf, uptr buf_size, u64 uptime_ns) {
uptr mem[MemCount];
- internal_memset(mem, 0, sizeof(mem[0]) * MemCount);
- __sanitizer::GetMemoryProfile(FillProfileCallback, mem, 7);
- StackDepotStats *stacks = StackDepotGetStats();
- internal_snprintf(buf, buf_size,
- "RSS %zd MB: shadow:%zd meta:%zd file:%zd mmap:%zd"
- " trace:%zd heap:%zd other:%zd stacks=%zd[%zd] nthr=%zd/%zd\n",
+ internal_memset(mem, 0, sizeof(mem));
+ GetMemoryProfile(FillProfileCallback, mem);
+ auto meta = ctx->metamap.GetMemoryStats();
+ StackDepotStats stacks = StackDepotGetStats();
+ uptr nthread, nlive;
+ ctx->thread_registry.GetNumberOfThreads(&nthread, &nlive);
+ uptr trace_mem;
+ {
+ Lock l(&ctx->slot_mtx);
+ trace_mem = ctx->trace_part_total_allocated * sizeof(TracePart);
+ }
+ uptr internal_stats[AllocatorStatCount];
+ internal_allocator()->GetStats(internal_stats);
+ // All these are allocated from the common mmap region.
+ mem[MemMmap] -= meta.mem_block + meta.sync_obj + trace_mem +
+ stacks.allocated + internal_stats[AllocatorStatMapped];
+ if (s64(mem[MemMmap]) < 0)
+ mem[MemMmap] = 0;
+ internal_snprintf(
+ buf, buf_size,
+ "==%zu== %llus [%zu]: RSS %zd MB: shadow:%zd meta:%zd file:%zd"
+ " mmap:%zd heap:%zd other:%zd intalloc:%zd memblocks:%zd syncobj:%zu"
+ " trace:%zu stacks=%zd threads=%zu/%zu\n",
+ internal_getpid(), uptime_ns / (1000 * 1000 * 1000), ctx->global_epoch,
mem[MemTotal] >> 20, mem[MemShadow] >> 20, mem[MemMeta] >> 20,
- mem[MemFile] >> 20, mem[MemMmap] >> 20, mem[MemTrace] >> 20,
- mem[MemHeap] >> 20, mem[MemOther] >> 20,
- stacks->allocated >> 20, stacks->n_uniq_ids,
- nlive, nthread);
-}
-
-#if SANITIZER_LINUX
-void FlushShadowMemoryCallback(
- const SuspendedThreadsList &suspended_threads_list,
- void *argument) {
- ReleaseMemoryPagesToOS(ShadowBeg(), ShadowEnd());
-}
-#endif
-
-void FlushShadowMemory() {
-#if SANITIZER_LINUX
- StopTheWorld(FlushShadowMemoryCallback, 0);
-#endif
+ mem[MemFile] >> 20, mem[MemMmap] >> 20, mem[MemHeap] >> 20,
+ mem[MemOther] >> 20, internal_stats[AllocatorStatMapped] >> 20,
+ meta.mem_block >> 20, meta.sync_obj >> 20, trace_mem >> 20,
+ stacks.allocated >> 20, nlive, nthread);
}
#if !SANITIZER_GO
-// Mark shadow for .rodata sections with the special kShadowRodata marker.
+// Mark shadow for .rodata sections with the special Shadow::kRodata marker.
// Accesses to .rodata can't race, so this saves time, memory and trace space.
-static void MapRodata() {
+static NOINLINE void MapRodata(char* buffer, uptr size) {
// First create temp file.
const char *tmpdir = GetEnv("TMPDIR");
if (tmpdir == 0)
@@ -169,21 +163,21 @@ static void MapRodata() {
#endif
if (tmpdir == 0)
return;
- char name[256];
- internal_snprintf(name, sizeof(name), "%s/tsan.rodata.%d",
+ internal_snprintf(buffer, size, "%s/tsan.rodata.%d",
tmpdir, (int)internal_getpid());
- uptr openrv = internal_open(name, O_RDWR | O_CREAT | O_EXCL, 0600);
+ uptr openrv = internal_open(buffer, O_RDWR | O_CREAT | O_EXCL, 0600);
if (internal_iserror(openrv))
return;
- internal_unlink(name); // Unlink it now, so that we can reuse the buffer.
+ internal_unlink(buffer); // Unlink it now, so that we can reuse the buffer.
fd_t fd = openrv;
- // Fill the file with kShadowRodata.
- const uptr kMarkerSize = 512 * 1024 / sizeof(u64);
- InternalMmapVector<u64> marker(kMarkerSize);
+ // Fill the file with Shadow::kRodata.
+ const uptr kMarkerSize = 512 * 1024 / sizeof(RawShadow);
+ InternalMmapVector<RawShadow> marker(kMarkerSize);
// volatile to prevent insertion of memset
- for (volatile u64 *p = marker.data(); p < marker.data() + kMarkerSize; p++)
- *p = kShadowRodata;
- internal_write(fd, marker.data(), marker.size() * sizeof(u64));
+ for (volatile RawShadow *p = marker.data(); p < marker.data() + kMarkerSize;
+ p++)
+ *p = Shadow::kRodata;
+ internal_write(fd, marker.data(), marker.size() * sizeof(RawShadow));
// Map the file into memory.
uptr page = internal_mmap(0, GetPageSizeCached(), PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS, fd, 0);
@@ -193,8 +187,8 @@ static void MapRodata() {
}
// Map the file into shadow of .rodata sections.
MemoryMappingLayout proc_maps(/*cache_enabled*/true);
- // Reusing the buffer 'name'.
- MemoryMappedSegment segment(name, ARRAY_SIZE(name));
+ // Reusing the buffer 'buffer'.
+ MemoryMappedSegment segment(buffer, size);
while (proc_maps.Next(&segment)) {
if (segment.filename[0] != 0 && segment.filename[0] != '[' &&
segment.IsReadable() && segment.IsExecutable() &&
@@ -203,9 +197,10 @@ static void MapRodata() {
char *shadow_start = (char *)MemToShadow(segment.start);
char *shadow_end = (char *)MemToShadow(segment.end);
for (char *p = shadow_start; p < shadow_end;
- p += marker.size() * sizeof(u64)) {
- internal_mmap(p, Min<uptr>(marker.size() * sizeof(u64), shadow_end - p),
- PROT_READ, MAP_PRIVATE | MAP_FIXED, fd, 0);
+ p += marker.size() * sizeof(RawShadow)) {
+ internal_mmap(
+ p, Min<uptr>(marker.size() * sizeof(RawShadow), shadow_end - p),
+ PROT_READ, MAP_PRIVATE | MAP_FIXED, fd, 0);
}
}
}
@@ -213,13 +208,95 @@ static void MapRodata() {
}
void InitializeShadowMemoryPlatform() {
- MapRodata();
+ char buffer[256]; // Keep in a different frame.
+ MapRodata(buffer, sizeof(buffer));
}
#endif // #if !SANITIZER_GO
+# if !SANITIZER_GO
+static void ReExecIfNeeded() {
+ // Go maps shadow memory lazily and works fine with limited address space.
+ // Unlimited stack is not a problem as well, because the executable
+ // is not compiled with -pie.
+ bool reexec = false;
+ // TSan doesn't play well with unlimited stack size (as stack
+ // overlaps with shadow memory). If we detect unlimited stack size,
+ // we re-exec the program with limited stack size as a best effort.
+ if (StackSizeIsUnlimited()) {
+ const uptr kMaxStackSize = 32 * 1024 * 1024;
+ VReport(1,
+ "Program is run with unlimited stack size, which wouldn't "
+ "work with ThreadSanitizer.\n"
+ "Re-execing with stack size limited to %zd bytes.\n",
+ kMaxStackSize);
+ SetStackSizeLimitInBytes(kMaxStackSize);
+ reexec = true;
+ }
+
+ if (!AddressSpaceIsUnlimited()) {
+ Report(
+ "WARNING: Program is run with limited virtual address space,"
+ " which wouldn't work with ThreadSanitizer.\n");
+ Report("Re-execing with unlimited virtual address space.\n");
+ SetAddressSpaceUnlimited();
+ reexec = true;
+ }
+
+# if SANITIZER_LINUX
+ // ASLR personality check.
+ int old_personality = personality(0xffffffff);
+ bool aslr_on =
+ (old_personality != -1) && ((old_personality & ADDR_NO_RANDOMIZE) == 0);
+
+# if SANITIZER_ANDROID && (defined(__aarch64__) || defined(__x86_64__))
+ // After patch "arm64: mm: support ARCH_MMAP_RND_BITS." is introduced in
+ // linux kernel, the random gap between stack and mapped area is increased
+ // from 128M to 36G on 39-bit aarch64. As it is almost impossible to cover
+ // this big range, we should disable randomized virtual space on aarch64.
+ if (aslr_on) {
+ VReport(1,
+ "WARNING: Program is run with randomized virtual address "
+ "space, which wouldn't work with ThreadSanitizer on Android.\n"
+ "Re-execing with fixed virtual address space.\n");
+ CHECK_NE(personality(old_personality | ADDR_NO_RANDOMIZE), -1);
+ reexec = true;
+ }
+# endif
+
+ if (reexec) {
+ // Don't check the address space since we're going to re-exec anyway.
+ } else if (!CheckAndProtect(false, false, false)) {
+ if (aslr_on) {
+ // Disable ASLR if the memory layout was incompatible.
+ // Alternatively, we could just keep re-execing until we get lucky
+ // with a compatible randomized layout, but the risk is that if it's
+ // not an ASLR-related issue, we will be stuck in an infinite loop of
+ // re-execing (unless we change ReExec to pass a parameter of the
+ // number of retries allowed.)
+ VReport(1,
+ "WARNING: ThreadSanitizer: memory layout is incompatible, "
+ "possibly due to high-entropy ASLR.\n"
+ "Re-execing with fixed virtual address space.\n"
+ "N.B. reducing ASLR entropy is preferable.\n");
+ CHECK_NE(personality(old_personality | ADDR_NO_RANDOMIZE), -1);
+ reexec = true;
+ } else {
+ VReport(1,
+ "FATAL: ThreadSanitizer: memory layout is incompatible, "
+ "even though ASLR is disabled.\n"
+ "Please file a bug.\n");
+ Die();
+ }
+ }
+# endif // SANITIZER_LINUX
+
+ if (reexec)
+ ReExec();
+}
+# endif
+
void InitializePlatformEarly() {
-#ifdef TSAN_RUNTIME_VMA
vmaSize =
(MostSignificantSetBitIndex(GET_CURRENT_FRAME()) + 1);
#if defined(__aarch64__)
@@ -236,6 +313,20 @@ void InitializePlatformEarly() {
Die();
}
#endif
+#elif SANITIZER_LOONGARCH64
+# if !SANITIZER_GO
+ if (vmaSize != 47) {
+ Printf("FATAL: ThreadSanitizer: unsupported VMA range\n");
+ Printf("FATAL: Found %zd - Supported 47\n", vmaSize);
+ Die();
+ }
+# else
+ if (vmaSize != 47) {
+ Printf("FATAL: ThreadSanitizer: unsupported VMA range\n");
+ Printf("FATAL: Found %zd - Supported 47\n", vmaSize);
+ Die();
+ }
+# endif
#elif defined(__powerpc64__)
# if !SANITIZER_GO
if (vmaSize != 44 && vmaSize != 46 && vmaSize != 47) {
@@ -264,8 +355,21 @@ void InitializePlatformEarly() {
Die();
}
# endif
-#endif
-#endif
+# elif SANITIZER_RISCV64
+ // the bottom half of vma is allocated for userspace
+ vmaSize = vmaSize + 1;
+# if !SANITIZER_GO
+ if (vmaSize != 39 && vmaSize != 48) {
+ Printf("FATAL: ThreadSanitizer: unsupported VMA range\n");
+ Printf("FATAL: Found %zd - Supported 39 and 48\n", vmaSize);
+ Die();
+ }
+# endif
+# endif
+
+# if !SANITIZER_GO
+ ReExecIfNeeded();
+# endif
}
void InitializePlatform() {
@@ -276,48 +380,22 @@ void InitializePlatform() {
// is not compiled with -pie.
#if !SANITIZER_GO
{
- bool reexec = false;
- // TSan doesn't play well with unlimited stack size (as stack
- // overlaps with shadow memory). If we detect unlimited stack size,
- // we re-exec the program with limited stack size as a best effort.
- if (StackSizeIsUnlimited()) {
- const uptr kMaxStackSize = 32 * 1024 * 1024;
- VReport(1, "Program is run with unlimited stack size, which wouldn't "
- "work with ThreadSanitizer.\n"
- "Re-execing with stack size limited to %zd bytes.\n",
- kMaxStackSize);
- SetStackSizeLimitInBytes(kMaxStackSize);
- reexec = true;
- }
-
- if (!AddressSpaceIsUnlimited()) {
- Report("WARNING: Program is run with limited virtual address space,"
- " which wouldn't work with ThreadSanitizer.\n");
- Report("Re-execing with unlimited virtual address space.\n");
- SetAddressSpaceUnlimited();
- reexec = true;
- }
-#if SANITIZER_LINUX && defined(__aarch64__)
- // After patch "arm64: mm: support ARCH_MMAP_RND_BITS." is introduced in
- // linux kernel, the random gap between stack and mapped area is increased
- // from 128M to 36G on 39-bit aarch64. As it is almost impossible to cover
- // this big range, we should disable randomized virtual space on aarch64.
- int old_personality = personality(0xffffffff);
- if (old_personality != -1 && (old_personality & ADDR_NO_RANDOMIZE) == 0) {
- VReport(1, "WARNING: Program is run with randomized virtual address "
- "space, which wouldn't work with ThreadSanitizer.\n"
- "Re-execing with fixed virtual address space.\n");
- CHECK_NE(personality(old_personality | ADDR_NO_RANDOMIZE), -1);
- reexec = true;
- }
+# if SANITIZER_LINUX && (defined(__aarch64__) || defined(__loongarch_lp64))
// Initialize the xor key used in {sig}{set,long}jump.
InitializeLongjmpXorKey();
-#endif
- if (reexec)
- ReExec();
+# endif
+ }
+
+ // Earlier initialization steps already re-exec'ed until we got a compatible
+ // memory layout, so we don't expect any more issues here.
+ if (!CheckAndProtect(true, true, true)) {
+ Printf(
+ "FATAL: ThreadSanitizer: unexpectedly found incompatible memory "
+ "layout.\n");
+ Printf("FATAL: Please file a bug.\n");
+ Die();
}
- CheckAndProtect();
InitTlsSize();
#endif // !SANITIZER_GO
}
@@ -341,7 +419,7 @@ int ExtractResolvFDs(void *state, int *fds, int nfd) {
}
// Extract file descriptors passed via UNIX domain sockets.
-// This is requried to properly handle "open" of these fds.
+// This is required to properly handle "open" of these fds.
// see 'man recvmsg' and 'man 3 cmsg'.
int ExtractRecvmsgFDs(void *msgp, int *fds, int nfd) {
int res = 0;
@@ -382,6 +460,8 @@ static uptr UnmangleLongJmpSp(uptr mangled_sp) {
# else
return mangled_sp;
# endif
+#elif defined(__loongarch_lp64)
+ return mangled_sp ^ longjmp_xor_key;
#elif defined(__powerpc64__)
// Reverse of:
// ld r4, -28696(r13)
@@ -391,13 +471,15 @@ static uptr UnmangleLongJmpSp(uptr mangled_sp) {
return mangled_sp ^ xor_key;
#elif defined(__mips__)
return mangled_sp;
-#elif defined(__s390x__)
+# elif SANITIZER_RISCV64
+ return mangled_sp;
+# elif defined(__s390x__)
// tcbhead_t.stack_guard
uptr xor_key = ((uptr *)__builtin_thread_pointer())[5];
return mangled_sp ^ xor_key;
-#else
- #error "Unknown platform"
-#endif
+# else
+# error "Unknown platform"
+# endif
}
#if SANITIZER_NETBSD
@@ -409,17 +491,25 @@ static uptr UnmangleLongJmpSp(uptr mangled_sp) {
#elif defined(__powerpc__)
# define LONG_JMP_SP_ENV_SLOT 0
#elif SANITIZER_FREEBSD
-# define LONG_JMP_SP_ENV_SLOT 2
+# ifdef __aarch64__
+# define LONG_JMP_SP_ENV_SLOT 1
+# else
+# define LONG_JMP_SP_ENV_SLOT 2
+# endif
#elif SANITIZER_LINUX
# ifdef __aarch64__
# define LONG_JMP_SP_ENV_SLOT 13
+# elif defined(__loongarch__)
+# define LONG_JMP_SP_ENV_SLOT 1
# elif defined(__mips64)
# define LONG_JMP_SP_ENV_SLOT 1
-# elif defined(__s390x__)
-# define LONG_JMP_SP_ENV_SLOT 9
-# else
-# define LONG_JMP_SP_ENV_SLOT 6
-# endif
+# elif SANITIZER_RISCV64
+# define LONG_JMP_SP_ENV_SLOT 13
+# elif defined(__s390x__)
+# define LONG_JMP_SP_ENV_SLOT 9
+# else
+# define LONG_JMP_SP_ENV_SLOT 6
+# endif
#endif
uptr ExtractLongJmpSp(uptr *env) {
@@ -439,7 +529,11 @@ static void InitializeLongjmpXorKey() {
// 2. Retrieve vanilla/mangled SP.
uptr sp;
+#ifdef __loongarch__
+ asm("move %0, $sp" : "=r" (sp));
+#else
asm("mov %0, sp" : "=r" (sp));
+#endif
uptr mangled_sp = ((uptr *)&env)[LONG_JMP_SP_ENV_SLOT];
// 3. xor SPs to obtain key.
@@ -447,6 +541,8 @@ static void InitializeLongjmpXorKey() {
}
#endif
+extern "C" void __tsan_tls_initialization() {}
+
void ImitateTlsWrite(ThreadState *thr, uptr tls_addr, uptr tls_size) {
// Check that the thr object is in tls;
const uptr thr_beg = (uptr)thr;
@@ -456,9 +552,10 @@ void ImitateTlsWrite(ThreadState *thr, uptr tls_addr, uptr tls_size) {
CHECK_GE(thr_end, tls_addr);
CHECK_LE(thr_end, tls_addr + tls_size);
// Since the thr object is huge, skip it.
- MemoryRangeImitateWrite(thr, /*pc=*/2, tls_addr, thr_beg - tls_addr);
- MemoryRangeImitateWrite(thr, /*pc=*/2, thr_end,
- tls_addr + tls_size - thr_end);
+ const uptr pc = StackTrace::GetNextInstructionPc(
+ reinterpret_cast<uptr>(__tsan_tls_initialization));
+ MemoryRangeImitateWrite(thr, pc, tls_addr, thr_beg - tls_addr);
+ MemoryRangeImitateWrite(thr, pc, thr_end, tls_addr + tls_size - thr_end);
}
// Note: this function runs with async signals enabled,
diff --git a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_platform_mac.cpp b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_platform_mac.cpp
index d9719a136b21..07d83e1a9a9f 100644
--- a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_platform_mac.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_platform_mac.cpp
@@ -12,7 +12,7 @@
//===----------------------------------------------------------------------===//
#include "sanitizer_common/sanitizer_platform.h"
-#if SANITIZER_MAC
+#if SANITIZER_APPLE
#include "sanitizer_common/sanitizer_atomic.h"
#include "sanitizer_common/sanitizer_common.h"
@@ -25,6 +25,7 @@
#include "tsan_rtl.h"
#include "tsan_flags.h"
+#include <limits.h>
#include <mach/mach.h>
#include <pthread.h>
#include <signal.h>
@@ -45,76 +46,86 @@
namespace __tsan {
#if !SANITIZER_GO
-static void *SignalSafeGetOrAllocate(uptr *dst, uptr size) {
- atomic_uintptr_t *a = (atomic_uintptr_t *)dst;
- void *val = (void *)atomic_load_relaxed(a);
- atomic_signal_fence(memory_order_acquire); // Turns the previous load into
- // acquire wrt signals.
- if (UNLIKELY(val == nullptr)) {
- val = (void *)internal_mmap(nullptr, size, PROT_READ | PROT_WRITE,
- MAP_PRIVATE | MAP_ANON, -1, 0);
- CHECK(val);
- void *cmp = nullptr;
- if (!atomic_compare_exchange_strong(a, (uintptr_t *)&cmp, (uintptr_t)val,
- memory_order_acq_rel)) {
- internal_munmap(val, size);
- val = cmp;
- }
- }
- return val;
+static char main_thread_state[sizeof(ThreadState)] ALIGNED(
+ SANITIZER_CACHE_LINE_SIZE);
+static ThreadState *dead_thread_state;
+static pthread_key_t thread_state_key;
+
+// We rely on the following documented, but Darwin-specific behavior to keep the
+// reference to the ThreadState object alive in TLS:
+// pthread_key_create man page:
+// If, after all the destructors have been called for all non-NULL values with
+// associated destructors, there are still some non-NULL values with
+// associated destructors, then the process is repeated. If, after at least
+// [PTHREAD_DESTRUCTOR_ITERATIONS] iterations of destructor calls for
+// outstanding non-NULL values, there are still some non-NULL values with
+// associated destructors, the implementation stops calling destructors.
+static_assert(PTHREAD_DESTRUCTOR_ITERATIONS == 4, "Small number of iterations");
+static void ThreadStateDestructor(void *thr) {
+ int res = pthread_setspecific(thread_state_key, thr);
+ CHECK_EQ(res, 0);
}
-// On OS X, accessing TLVs via __thread or manually by using pthread_key_* is
-// problematic, because there are several places where interceptors are called
-// when TLVs are not accessible (early process startup, thread cleanup, ...).
-// The following provides a "poor man's TLV" implementation, where we use the
-// shadow memory of the pointer returned by pthread_self() to store a pointer to
-// the ThreadState object. The main thread's ThreadState is stored separately
-// in a static variable, because we need to access it even before the
-// shadow memory is set up.
-static uptr main_thread_identity = 0;
-ALIGNED(64) static char main_thread_state[sizeof(ThreadState)];
-static ThreadState *main_thread_state_loc = (ThreadState *)main_thread_state;
-
-// We cannot use pthread_self() before libpthread has been initialized. Our
-// current heuristic for guarding this is checking `main_thread_identity` which
-// is only assigned in `__tsan::InitializePlatform`.
-static ThreadState **cur_thread_location() {
- if (main_thread_identity == 0)
- return &main_thread_state_loc;
- uptr thread_identity = (uptr)pthread_self();
- if (thread_identity == main_thread_identity)
- return &main_thread_state_loc;
- return (ThreadState **)MemToShadow(thread_identity);
+static void InitializeThreadStateStorage() {
+ int res;
+ CHECK_EQ(thread_state_key, 0);
+ res = pthread_key_create(&thread_state_key, ThreadStateDestructor);
+ CHECK_EQ(res, 0);
+ res = pthread_setspecific(thread_state_key, main_thread_state);
+ CHECK_EQ(res, 0);
+
+ auto dts = (ThreadState *)MmapOrDie(sizeof(ThreadState), "ThreadState");
+ dts->fast_state.SetIgnoreBit();
+ dts->ignore_interceptors = 1;
+ dts->is_dead = true;
+ const_cast<Tid &>(dts->tid) = kInvalidTid;
+ res = internal_mprotect(dts, sizeof(ThreadState), PROT_READ); // immutable
+ CHECK_EQ(res, 0);
+ dead_thread_state = dts;
}
ThreadState *cur_thread() {
- return (ThreadState *)SignalSafeGetOrAllocate(
- (uptr *)cur_thread_location(), sizeof(ThreadState));
+ // Some interceptors get called before libpthread has been initialized and in
+ // these cases we must avoid calling any pthread APIs.
+ if (UNLIKELY(!thread_state_key)) {
+ return (ThreadState *)main_thread_state;
+ }
+
+ // We only reach this line after InitializeThreadStateStorage() ran, i.e,
+ // after TSan (and therefore libpthread) have been initialized.
+ ThreadState *thr = (ThreadState *)pthread_getspecific(thread_state_key);
+ if (UNLIKELY(!thr)) {
+ thr = (ThreadState *)MmapOrDie(sizeof(ThreadState), "ThreadState");
+ int res = pthread_setspecific(thread_state_key, thr);
+ CHECK_EQ(res, 0);
+ }
+ return thr;
}
void set_cur_thread(ThreadState *thr) {
- *cur_thread_location() = thr;
+ int res = pthread_setspecific(thread_state_key, thr);
+ CHECK_EQ(res, 0);
}
-// TODO(kuba.brecka): This is not async-signal-safe. In particular, we call
-// munmap first and then clear `fake_tls`; if we receive a signal in between,
-// handler will try to access the unmapped ThreadState.
void cur_thread_finalize() {
- ThreadState **thr_state_loc = cur_thread_location();
- if (thr_state_loc == &main_thread_state_loc) {
+ ThreadState *thr = (ThreadState *)pthread_getspecific(thread_state_key);
+ CHECK(thr);
+ if (thr == (ThreadState *)main_thread_state) {
// Calling dispatch_main() or xpc_main() actually invokes pthread_exit to
// exit the main thread. Let's keep the main thread's ThreadState.
return;
}
- internal_munmap(*thr_state_loc, sizeof(ThreadState));
- *thr_state_loc = nullptr;
+ // Intercepted functions can still get called after cur_thread_finalize()
+ // (called from DestroyThreadState()), so put a fake thread state for "dead"
+ // threads. An alternative solution would be to release the ThreadState
+ // object from THREAD_DESTROY (which is delivered later and on the parent
+ // thread) instead of THREAD_TERMINATE.
+ int res = pthread_setspecific(thread_state_key, dead_thread_state);
+ CHECK_EQ(res, 0);
+ UnmapOrDie(thr, sizeof(ThreadState));
}
#endif
-void FlushShadowMemory() {
-}
-
static void RegionMemUsage(uptr start, uptr end, uptr *res, uptr *dirty) {
vm_address_t address = start;
vm_address_t end_address = end;
@@ -139,15 +150,13 @@ static void RegionMemUsage(uptr start, uptr end, uptr *res, uptr *dirty) {
*dirty = dirty_pages * GetPageSizeCached();
}
-void WriteMemoryProfile(char *buf, uptr buf_size, uptr nthread, uptr nlive) {
+void WriteMemoryProfile(char *buf, uptr buf_size, u64 uptime_ns) {
uptr shadow_res, shadow_dirty;
uptr meta_res, meta_dirty;
- uptr trace_res, trace_dirty;
RegionMemUsage(ShadowBeg(), ShadowEnd(), &shadow_res, &shadow_dirty);
RegionMemUsage(MetaShadowBeg(), MetaShadowEnd(), &meta_res, &meta_dirty);
- RegionMemUsage(TraceMemBeg(), TraceMemEnd(), &trace_res, &trace_dirty);
-#if !SANITIZER_GO
+# if !SANITIZER_GO
uptr low_res, low_dirty;
uptr high_res, high_dirty;
uptr heap_res, heap_dirty;
@@ -156,89 +165,70 @@ void WriteMemoryProfile(char *buf, uptr buf_size, uptr nthread, uptr nlive) {
RegionMemUsage(HeapMemBeg(), HeapMemEnd(), &heap_res, &heap_dirty);
#else // !SANITIZER_GO
uptr app_res, app_dirty;
- RegionMemUsage(AppMemBeg(), AppMemEnd(), &app_res, &app_dirty);
+ RegionMemUsage(LoAppMemBeg(), LoAppMemEnd(), &app_res, &app_dirty);
#endif
- StackDepotStats *stacks = StackDepotGetStats();
- internal_snprintf(buf, buf_size,
- "shadow (0x%016zx-0x%016zx): resident %zd kB, dirty %zd kB\n"
- "meta (0x%016zx-0x%016zx): resident %zd kB, dirty %zd kB\n"
- "traces (0x%016zx-0x%016zx): resident %zd kB, dirty %zd kB\n"
-#if !SANITIZER_GO
- "low app (0x%016zx-0x%016zx): resident %zd kB, dirty %zd kB\n"
- "high app (0x%016zx-0x%016zx): resident %zd kB, dirty %zd kB\n"
- "heap (0x%016zx-0x%016zx): resident %zd kB, dirty %zd kB\n"
-#else // !SANITIZER_GO
- "app (0x%016zx-0x%016zx): resident %zd kB, dirty %zd kB\n"
-#endif
- "stacks: %zd unique IDs, %zd kB allocated\n"
- "threads: %zd total, %zd live\n"
- "------------------------------\n",
- ShadowBeg(), ShadowEnd(), shadow_res / 1024, shadow_dirty / 1024,
- MetaShadowBeg(), MetaShadowEnd(), meta_res / 1024, meta_dirty / 1024,
- TraceMemBeg(), TraceMemEnd(), trace_res / 1024, trace_dirty / 1024,
-#if !SANITIZER_GO
- LoAppMemBeg(), LoAppMemEnd(), low_res / 1024, low_dirty / 1024,
- HiAppMemBeg(), HiAppMemEnd(), high_res / 1024, high_dirty / 1024,
- HeapMemBeg(), HeapMemEnd(), heap_res / 1024, heap_dirty / 1024,
-#else // !SANITIZER_GO
- AppMemBeg(), AppMemEnd(), app_res / 1024, app_dirty / 1024,
-#endif
- stacks->n_uniq_ids, stacks->allocated / 1024,
- nthread, nlive);
+ StackDepotStats stacks = StackDepotGetStats();
+ uptr nthread, nlive;
+ ctx->thread_registry.GetNumberOfThreads(&nthread, &nlive);
+ internal_snprintf(
+ buf, buf_size,
+ "shadow (0x%016zx-0x%016zx): resident %zd kB, dirty %zd kB\n"
+ "meta (0x%016zx-0x%016zx): resident %zd kB, dirty %zd kB\n"
+# if !SANITIZER_GO
+ "low app (0x%016zx-0x%016zx): resident %zd kB, dirty %zd kB\n"
+ "high app (0x%016zx-0x%016zx): resident %zd kB, dirty %zd kB\n"
+ "heap (0x%016zx-0x%016zx): resident %zd kB, dirty %zd kB\n"
+# else // !SANITIZER_GO
+ "app (0x%016zx-0x%016zx): resident %zd kB, dirty %zd kB\n"
+# endif
+ "stacks: %zd unique IDs, %zd kB allocated\n"
+ "threads: %zd total, %zd live\n"
+ "------------------------------\n",
+ ShadowBeg(), ShadowEnd(), shadow_res / 1024, shadow_dirty / 1024,
+ MetaShadowBeg(), MetaShadowEnd(), meta_res / 1024, meta_dirty / 1024,
+# if !SANITIZER_GO
+ LoAppMemBeg(), LoAppMemEnd(), low_res / 1024, low_dirty / 1024,
+ HiAppMemBeg(), HiAppMemEnd(), high_res / 1024, high_dirty / 1024,
+ HeapMemBeg(), HeapMemEnd(), heap_res / 1024, heap_dirty / 1024,
+# else // !SANITIZER_GO
+ LoAppMemBeg(), LoAppMemEnd(), app_res / 1024, app_dirty / 1024,
+# endif
+ stacks.n_uniq_ids, stacks.allocated / 1024, nthread, nlive);
}
-#if !SANITIZER_GO
+# if !SANITIZER_GO
void InitializeShadowMemoryPlatform() { }
-// On OS X, GCD worker threads are created without a call to pthread_create. We
-// need to properly register these threads with ThreadCreate and ThreadStart.
-// These threads don't have a parent thread, as they are created "spuriously".
-// We're using a libpthread API that notifies us about a newly created thread.
-// The `thread == pthread_self()` check indicates this is actually a worker
-// thread. If it's just a regular thread, this hook is called on the parent
-// thread.
-typedef void (*pthread_introspection_hook_t)(unsigned int event,
- pthread_t thread, void *addr,
- size_t size);
-extern "C" pthread_introspection_hook_t pthread_introspection_hook_install(
- pthread_introspection_hook_t hook);
-static const uptr PTHREAD_INTROSPECTION_THREAD_CREATE = 1;
-static const uptr PTHREAD_INTROSPECTION_THREAD_TERMINATE = 3;
-static pthread_introspection_hook_t prev_pthread_introspection_hook;
-static void my_pthread_introspection_hook(unsigned int event, pthread_t thread,
- void *addr, size_t size) {
- if (event == PTHREAD_INTROSPECTION_THREAD_CREATE) {
- if (thread == pthread_self()) {
- // The current thread is a newly created GCD worker thread.
- ThreadState *thr = cur_thread();
- Processor *proc = ProcCreate();
- ProcWire(proc, thr);
- ThreadState *parent_thread_state = nullptr; // No parent.
- int tid = ThreadCreate(parent_thread_state, 0, (uptr)thread, true);
- CHECK_NE(tid, 0);
- ThreadStart(thr, tid, GetTid(), ThreadType::Worker);
- }
- } else if (event == PTHREAD_INTROSPECTION_THREAD_TERMINATE) {
- if (thread == pthread_self()) {
- ThreadState *thr = cur_thread();
- if (thr->tctx) {
- DestroyThreadState();
- }
- }
+// Register GCD worker threads, which are created without an observable call to
+// pthread_create().
+static void ThreadCreateCallback(uptr thread, bool gcd_worker) {
+ if (gcd_worker) {
+ ThreadState *thr = cur_thread();
+ Processor *proc = ProcCreate();
+ ProcWire(proc, thr);
+ ThreadState *parent_thread_state = nullptr; // No parent.
+ Tid tid = ThreadCreate(parent_thread_state, 0, (uptr)thread, true);
+ CHECK_NE(tid, kMainTid);
+ ThreadStart(thr, tid, GetTid(), ThreadType::Worker);
}
+}
- if (prev_pthread_introspection_hook != nullptr)
- prev_pthread_introspection_hook(event, thread, addr, size);
+// Destroy thread state for *all* threads.
+static void ThreadTerminateCallback(uptr thread) {
+ ThreadState *thr = cur_thread();
+ if (thr->tctx) {
+ DestroyThreadState();
+ }
}
#endif
void InitializePlatformEarly() {
-#if !SANITIZER_GO && !HAS_48_BIT_ADDRESS_SPACE
+# if !SANITIZER_GO && SANITIZER_IOS
uptr max_vm = GetMaxUserVirtualAddress() + 1;
- if (max_vm != Mapping::kHiAppMemEnd) {
+ if (max_vm != HiAppMemEnd()) {
Printf("ThreadSanitizer: unsupported vm address limit %p, expected %p.\n",
- max_vm, Mapping::kHiAppMemEnd);
+ (void *)max_vm, (void *)HiAppMemEnd());
Die();
}
#endif
@@ -249,13 +239,18 @@ static uptr longjmp_xor_key = 0;
void InitializePlatform() {
DisableCoreDumperIfNecessary();
#if !SANITIZER_GO
- CheckAndProtect();
+ if (!CheckAndProtect(true, true, true)) {
+ Printf("FATAL: ThreadSanitizer: found incompatible memory layout.\n");
+ Die();
+ }
- CHECK_EQ(main_thread_identity, 0);
- main_thread_identity = (uptr)pthread_self();
+ InitializeThreadStateStorage();
- prev_pthread_introspection_hook =
- pthread_introspection_hook_install(&my_pthread_introspection_hook);
+ ThreadEventCallbacks callbacks = {
+ .create = ThreadCreateCallback,
+ .terminate = ThreadTerminateCallback,
+ };
+ InstallPthreadIntrospectionHook(callbacks);
#endif
if (GetMacosAlignedVersion() >= MacosVersion(10, 14)) {
@@ -281,25 +276,14 @@ uptr ExtractLongJmpSp(uptr *env) {
}
#if !SANITIZER_GO
+extern "C" void __tsan_tls_initialization() {}
+
void ImitateTlsWrite(ThreadState *thr, uptr tls_addr, uptr tls_size) {
- // The pointer to the ThreadState object is stored in the shadow memory
- // of the tls.
- uptr tls_end = tls_addr + tls_size;
- uptr thread_identity = (uptr)pthread_self();
- if (thread_identity == main_thread_identity) {
- MemoryRangeImitateWrite(thr, /*pc=*/2, tls_addr, tls_size);
- } else {
- uptr thr_state_start = thread_identity;
- uptr thr_state_end = thr_state_start + sizeof(uptr);
- CHECK_GE(thr_state_start, tls_addr);
- CHECK_LE(thr_state_start, tls_addr + tls_size);
- CHECK_GE(thr_state_end, tls_addr);
- CHECK_LE(thr_state_end, tls_addr + tls_size);
- MemoryRangeImitateWrite(thr, /*pc=*/2, tls_addr,
- thr_state_start - tls_addr);
- MemoryRangeImitateWrite(thr, /*pc=*/2, thr_state_end,
- tls_end - thr_state_end);
- }
+ const uptr pc = StackTrace::GetNextInstructionPc(
+ reinterpret_cast<uptr>(__tsan_tls_initialization));
+ // Unlike Linux, we only store a pointer to the ThreadState object in TLS;
+ // just mark the entire range as written to.
+ MemoryRangeImitateWrite(thr, pc, tls_addr, tls_size);
}
#endif
@@ -320,4 +304,4 @@ int call_pthread_cancel_with_cleanup(int (*fn)(void *arg),
} // namespace __tsan
-#endif // SANITIZER_MAC
+#endif // SANITIZER_APPLE
diff --git a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_platform_posix.cpp b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_platform_posix.cpp
index 1c6198cefcd7..7d5a992dc665 100644
--- a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_platform_posix.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_platform_posix.cpp
@@ -14,12 +14,14 @@
#include "sanitizer_common/sanitizer_platform.h"
#if SANITIZER_POSIX
-#include "sanitizer_common/sanitizer_common.h"
-#include "sanitizer_common/sanitizer_errno.h"
-#include "sanitizer_common/sanitizer_libc.h"
-#include "sanitizer_common/sanitizer_procmaps.h"
-#include "tsan_platform.h"
-#include "tsan_rtl.h"
+# include <dlfcn.h>
+
+# include "sanitizer_common/sanitizer_common.h"
+# include "sanitizer_common/sanitizer_errno.h"
+# include "sanitizer_common/sanitizer_libc.h"
+# include "sanitizer_common/sanitizer_procmaps.h"
+# include "tsan_platform.h"
+# include "tsan_rtl.h"
namespace __tsan {
@@ -29,7 +31,8 @@ static const char kShadowMemoryMappingHint[] =
"HINT: if %s is not supported in your environment, you may set "
"TSAN_OPTIONS=%s=0\n";
-static void DontDumpShadow(uptr addr, uptr size) {
+# if !SANITIZER_GO
+void DontDumpShadow(uptr addr, uptr size) {
if (common_flags()->use_madv_dontdump)
if (!DontDumpShadowMemory(addr, size)) {
Printf(kShadowMemoryMappingWarning, SanitizerToolName, addr, addr + size,
@@ -39,7 +42,6 @@ static void DontDumpShadow(uptr addr, uptr size) {
}
}
-#if !SANITIZER_GO
void InitializeShadowMemory() {
// Map memory shadow.
if (!MmapFixedSuperNoReserve(ShadowBeg(), ShadowEnd() - ShadowBeg(),
@@ -70,6 +72,11 @@ void InitializeShadowMemory() {
meta, meta + meta_size, meta_size >> 30);
InitializeShadowMemoryPlatform();
+
+ on_initialize = reinterpret_cast<void (*)(void)>(
+ dlsym(RTLD_DEFAULT, "__tsan_on_initialize"));
+ on_finalize =
+ reinterpret_cast<int (*)(int)>(dlsym(RTLD_DEFAULT, "__tsan_on_finalize"));
}
static bool TryProtectRange(uptr beg, uptr end) {
@@ -87,43 +94,68 @@ static void ProtectRange(uptr beg, uptr end) {
}
}
-void CheckAndProtect() {
+// CheckAndProtect will check if the memory layout is compatible with TSan.
+// Optionally (if 'protect' is true), it will set the memory regions between
+// app memory to be inaccessible.
+// 'ignore_heap' means it will not consider heap memory allocations to be a
+// conflict. Set this based on whether we are calling CheckAndProtect before
+// or after the allocator has initialized the heap.
+bool CheckAndProtect(bool protect, bool ignore_heap, bool print_warnings) {
// Ensure that the binary is indeed compiled with -pie.
MemoryMappingLayout proc_maps(true);
MemoryMappedSegment segment;
while (proc_maps.Next(&segment)) {
- if (IsAppMem(segment.start)) continue;
+ if (segment.start >= HeapMemBeg() && segment.end <= HeapEnd()) {
+ if (ignore_heap) {
+ continue;
+ } else {
+ return false;
+ }
+ }
+
+ // Note: IsAppMem includes if it is heap memory, hence we must
+ // put this check after the heap bounds check.
+ if (IsAppMem(segment.start) && IsAppMem(segment.end - 1))
+ continue;
+
+ // Guard page after the heap end
if (segment.start >= HeapMemEnd() && segment.start < HeapEnd()) continue;
+
if (segment.protection == 0) // Zero page or mprotected.
continue;
+
if (segment.start >= VdsoBeg()) // vdso
break;
- Printf("FATAL: ThreadSanitizer: unexpected memory mapping %p-%p\n",
- segment.start, segment.end);
- Die();
+
+ // Debug output can break tests. Suppress this message in most cases.
+ if (print_warnings)
+ Printf(
+ "WARNING: ThreadSanitizer: unexpected memory mapping 0x%zx-0x%zx\n",
+ segment.start, segment.end);
+
+ return false;
}
-#if defined(__aarch64__) && defined(__APPLE__) && !HAS_48_BIT_ADDRESS_SPACE
+ if (!protect)
+ return true;
+
+# if SANITIZER_IOS && !SANITIZER_IOSSIM
ProtectRange(HeapMemEnd(), ShadowBeg());
ProtectRange(ShadowEnd(), MetaShadowBeg());
- ProtectRange(MetaShadowEnd(), TraceMemBeg());
-#else
+ ProtectRange(MetaShadowEnd(), HiAppMemBeg());
+# else
ProtectRange(LoAppMemEnd(), ShadowBeg());
ProtectRange(ShadowEnd(), MetaShadowBeg());
-#ifdef TSAN_MID_APP_RANGE
- ProtectRange(MetaShadowEnd(), MidAppMemBeg());
- ProtectRange(MidAppMemEnd(), TraceMemBeg());
-#else
- ProtectRange(MetaShadowEnd(), TraceMemBeg());
-#endif
- // Memory for traces is mapped lazily in MapThreadTrace.
- // Protect the whole range for now, so that user does not map something here.
- ProtectRange(TraceMemBeg(), TraceMemEnd());
- ProtectRange(TraceMemEnd(), HeapMemBeg());
+ if (MidAppMemBeg()) {
+ ProtectRange(MetaShadowEnd(), MidAppMemBeg());
+ ProtectRange(MidAppMemEnd(), HeapMemBeg());
+ } else {
+ ProtectRange(MetaShadowEnd(), HeapMemBeg());
+ }
ProtectRange(HeapEnd(), HiAppMemBeg());
-#endif
+# endif
-#if defined(__s390x__)
+# if defined(__s390x__)
// Protect the rest of the address space.
const uptr user_addr_max_l4 = 0x0020000000000000ull;
const uptr user_addr_max_l5 = 0xfffffffffffff000ull;
@@ -132,8 +164,10 @@ void CheckAndProtect() {
// Older s390x kernels may not support 5-level page tables.
TryProtectRange(user_addr_max_l4, user_addr_max_l5);
#endif
+
+ return true;
}
-#endif
+# endif
} // namespace __tsan
diff --git a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_platform_windows.cpp b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_platform_windows.cpp
index 19437879a41c..eb8f354742f4 100644
--- a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_platform_windows.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_platform_windows.cpp
@@ -20,11 +20,7 @@
namespace __tsan {
-void FlushShadowMemory() {
-}
-
-void WriteMemoryProfile(char *buf, uptr buf_size, uptr nthread, uptr nlive) {
-}
+void WriteMemoryProfile(char *buf, uptr buf_size, u64 uptime_ns) {}
void InitializePlatformEarly() {
}
diff --git a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_report.cpp b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_report.cpp
index 8ef9f0cd4fe8..22ba428cc58b 100644
--- a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_report.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_report.cpp
@@ -19,22 +19,6 @@
namespace __tsan {
-ReportStack::ReportStack() : frames(nullptr), suppressable(false) {}
-
-ReportStack *ReportStack::New() {
- void *mem = internal_alloc(MBlockReportStack, sizeof(ReportStack));
- return new(mem) ReportStack();
-}
-
-ReportLocation::ReportLocation(ReportLocationType type)
- : type(type), global(), heap_chunk_start(0), heap_chunk_size(0), tid(0),
- fd(0), suppressable(false), stack(nullptr) {}
-
-ReportLocation *ReportLocation::New(ReportLocationType type) {
- void *mem = internal_alloc(MBlockReportStack, sizeof(ReportLocation));
- return new(mem) ReportLocation(type);
-}
-
class Decorator: public __sanitizer::SanitizerCommonDecorator {
public:
Decorator() : SanitizerCommonDecorator() { }
@@ -68,7 +52,7 @@ ReportDesc::~ReportDesc() {
#if !SANITIZER_GO
const int kThreadBufSize = 32;
-const char *thread_name(char *buf, int tid) {
+const char *thread_name(char *buf, Tid tid) {
if (tid == kMainTid)
return "main thread";
internal_snprintf(buf, kThreadBufSize, "thread T%d", tid);
@@ -109,17 +93,13 @@ static const char *ReportTypeString(ReportType typ, uptr tag) {
return "signal handler spoils errno";
case ReportTypeDeadlock:
return "lock-order-inversion (potential deadlock)";
- // No default case so compiler warns us if we miss one
+ case ReportTypeMutexHeldWrongContext:
+ return "mutex held in the wrong context";
+ // No default case so compiler warns us if we miss one
}
UNREACHABLE("missing case");
}
-#if SANITIZER_MAC
-static const char *const kInterposedFunctionPrefix = "wrap_";
-#else
-static const char *const kInterposedFunctionPrefix = "__interceptor_";
-#endif
-
void PrintStack(const ReportStack *ent) {
if (ent == 0 || ent->frames == 0) {
Printf(" [failed to restore the stack]\n\n");
@@ -128,10 +108,10 @@ void PrintStack(const ReportStack *ent) {
SymbolizedStack *frame = ent->frames;
for (int i = 0; frame && frame->info.address; frame = frame->next, i++) {
InternalScopedString res;
- RenderFrame(&res, common_flags()->stack_trace_format, i,
- frame->info.address, &frame->info,
- common_flags()->symbolize_vs_style,
- common_flags()->strip_path_prefix, kInterposedFunctionPrefix);
+ StackTracePrinter::GetOrInit()->RenderFrame(
+ &res, common_flags()->stack_trace_format, i, frame->info.address,
+ &frame->info, common_flags()->symbolize_vs_style,
+ common_flags()->strip_path_prefix);
Printf("%s\n", res.data());
}
Printf("\n");
@@ -142,7 +122,7 @@ static void PrintMutexSet(Vector<ReportMopMutex> const& mset) {
if (i == 0)
Printf(" (mutexes:");
const ReportMopMutex m = mset[i];
- Printf(" %s M%llu", m.write ? "write" : "read", m.id);
+ Printf(" %s M%u", m.write ? "write" : "read", m.id);
Printf(i == mset.Size() - 1 ? ")" : ",");
}
}
@@ -189,23 +169,25 @@ static void PrintLocation(const ReportLocation *loc) {
if (loc->type == ReportLocationGlobal) {
const DataInfo &global = loc->global;
if (global.size != 0)
- Printf(" Location is global '%s' of size %zu at %p (%s+%p)\n\n",
- global.name, global.size, global.start,
+ Printf(" Location is global '%s' of size %zu at %p (%s+0x%zx)\n\n",
+ global.name, global.size, reinterpret_cast<void *>(global.start),
StripModuleName(global.module), global.module_offset);
else
- Printf(" Location is global '%s' at %p (%s+%p)\n\n", global.name,
- global.start, StripModuleName(global.module),
- global.module_offset);
+ Printf(" Location is global '%s' at %p (%s+0x%zx)\n\n", global.name,
+ reinterpret_cast<void *>(global.start),
+ StripModuleName(global.module), global.module_offset);
} else if (loc->type == ReportLocationHeap) {
char thrbuf[kThreadBufSize];
const char *object_type = GetObjectTypeFromTag(loc->external_tag);
if (!object_type) {
Printf(" Location is heap block of size %zu at %p allocated by %s:\n",
- loc->heap_chunk_size, loc->heap_chunk_start,
+ loc->heap_chunk_size,
+ reinterpret_cast<void *>(loc->heap_chunk_start),
thread_name(thrbuf, loc->tid));
} else {
Printf(" Location is %s of size %zu at %p allocated by %s:\n",
- object_type, loc->heap_chunk_size, loc->heap_chunk_start,
+ object_type, loc->heap_chunk_size,
+ reinterpret_cast<void *>(loc->heap_chunk_start),
thread_name(thrbuf, loc->tid));
}
print_stack = true;
@@ -214,8 +196,9 @@ static void PrintLocation(const ReportLocation *loc) {
} else if (loc->type == ReportLocationTLS) {
Printf(" Location is TLS of %s.\n\n", thread_name(thrbuf, loc->tid));
} else if (loc->type == ReportLocationFD) {
- Printf(" Location is file descriptor %d created by %s at:\n",
- loc->fd, thread_name(thrbuf, loc->tid));
+ Printf(" Location is file descriptor %d %s by %s at:\n", loc->fd,
+ loc->fd_closed ? "destroyed" : "created",
+ thread_name(thrbuf, loc->tid));
print_stack = true;
}
Printf("%s", d.Default());
@@ -225,27 +208,23 @@ static void PrintLocation(const ReportLocation *loc) {
static void PrintMutexShort(const ReportMutex *rm, const char *after) {
Decorator d;
- Printf("%sM%zd%s%s", d.Mutex(), rm->id, d.Default(), after);
+ Printf("%sM%d%s%s", d.Mutex(), rm->id, d.Default(), after);
}
static void PrintMutexShortWithAddress(const ReportMutex *rm,
const char *after) {
Decorator d;
- Printf("%sM%zd (%p)%s%s", d.Mutex(), rm->id, rm->addr, d.Default(), after);
+ Printf("%sM%d (%p)%s%s", d.Mutex(), rm->id,
+ reinterpret_cast<void *>(rm->addr), d.Default(), after);
}
static void PrintMutex(const ReportMutex *rm) {
Decorator d;
- if (rm->destroyed) {
- Printf("%s", d.Mutex());
- Printf(" Mutex M%llu is already destroyed.\n\n", rm->id);
- Printf("%s", d.Default());
- } else {
- Printf("%s", d.Mutex());
- Printf(" Mutex M%llu (%p) created at:\n", rm->id, rm->addr);
- Printf("%s", d.Default());
- PrintStack(rm->stack);
- }
+ Printf("%s", d.Mutex());
+ Printf(" Mutex M%u (%p) created at:\n", rm->id,
+ reinterpret_cast<void *>(rm->addr));
+ Printf("%s", d.Default());
+ PrintStack(rm->stack);
}
static void PrintThread(const ReportThread *rt) {
@@ -259,12 +238,13 @@ static void PrintThread(const ReportThread *rt) {
char thrbuf[kThreadBufSize];
const char *thread_status = rt->running ? "running" : "finished";
if (rt->thread_type == ThreadType::Worker) {
- Printf(" (tid=%zu, %s) is a GCD worker thread\n", rt->os_id, thread_status);
+ Printf(" (tid=%llu, %s) is a GCD worker thread\n", rt->os_id,
+ thread_status);
Printf("\n");
Printf("%s", d.Default());
return;
}
- Printf(" (tid=%zu, %s) created by %s", rt->os_id, thread_status,
+ Printf(" (tid=%llu, %s) created by %s", rt->os_id, thread_status,
thread_name(thrbuf, rt->parent_tid));
if (rt->stack)
Printf(" at:");
@@ -293,25 +273,10 @@ static ReportStack *ChooseSummaryStack(const ReportDesc *rep) {
return 0;
}
-static bool FrameIsInternal(const SymbolizedStack *frame) {
- if (frame == 0)
- return false;
- const char *file = frame->info.file;
- const char *module = frame->info.module;
- if (file != 0 &&
- (internal_strstr(file, "tsan_interceptors_posix.cpp") ||
- internal_strstr(file, "sanitizer_common_interceptors.inc") ||
- internal_strstr(file, "tsan_interface_")))
- return true;
- if (module != 0 && (internal_strstr(module, "libclang_rt.tsan_")))
- return true;
- return false;
-}
-
-static SymbolizedStack *SkipTsanInternalFrames(SymbolizedStack *frames) {
- while (FrameIsInternal(frames) && frames->next)
- frames = frames->next;
- return frames;
+static const SymbolizedStack *SkipTsanInternalFrames(SymbolizedStack *frames) {
+ if (const SymbolizedStack *f = SkipInternalFrames(frames))
+ return f;
+ return frames; // Fallback to the top frame.
}
void PrintReport(const ReportDesc *rep) {
@@ -323,6 +288,9 @@ void PrintReport(const ReportDesc *rep) {
(int)internal_getpid());
Printf("%s", d.Default());
+ if (rep->typ == ReportTypeErrnoInSignal)
+ Printf(" Signal %u handler invoked at:\n", rep->signum);
+
if (rep->typ == ReportTypeDeadlock) {
char thrbuf[kThreadBufSize];
Printf(" Cycle in lock order graph: ");
@@ -382,7 +350,7 @@ void PrintReport(const ReportDesc *rep) {
Printf(" And %d more similar thread leaks.\n\n", rep->count - 1);
if (ReportStack *stack = ChooseSummaryStack(rep)) {
- if (SymbolizedStack *frame = SkipTsanInternalFrames(stack->frames))
+ if (const SymbolizedStack *frame = SkipTsanInternalFrames(stack->frames))
ReportErrorSummary(rep_typ_str, frame->info);
}
@@ -394,7 +362,7 @@ void PrintReport(const ReportDesc *rep) {
#else // #if !SANITIZER_GO
-const u32 kMainGoroutineId = 1;
+const Tid kMainGoroutineId = 1;
void PrintStack(const ReportStack *ent) {
if (ent == 0 || ent->frames == 0) {
@@ -405,16 +373,17 @@ void PrintStack(const ReportStack *ent) {
for (int i = 0; frame; frame = frame->next, i++) {
const AddressInfo &info = frame->info;
Printf(" %s()\n %s:%d +0x%zx\n", info.function,
- StripPathPrefix(info.file, common_flags()->strip_path_prefix),
- info.line, (void *)info.module_offset);
+ StripPathPrefix(info.file, common_flags()->strip_path_prefix),
+ info.line, info.module_offset);
}
}
static void PrintMop(const ReportMop *mop, bool first) {
Printf("\n");
Printf("%s at %p by ",
- (first ? (mop->write ? "Write" : "Read")
- : (mop->write ? "Previous write" : "Previous read")), mop->addr);
+ (first ? (mop->write ? "Write" : "Read")
+ : (mop->write ? "Previous write" : "Previous read")),
+ reinterpret_cast<void *>(mop->addr));
if (mop->tid == kMainGoroutineId)
Printf("main goroutine:\n");
else
@@ -426,8 +395,8 @@ static void PrintLocation(const ReportLocation *loc) {
switch (loc->type) {
case ReportLocationHeap: {
Printf("\n");
- Printf("Heap block of size %zu at %p allocated by ",
- loc->heap_chunk_size, loc->heap_chunk_start);
+ Printf("Heap block of size %zu at %p allocated by ", loc->heap_chunk_size,
+ reinterpret_cast<void *>(loc->heap_chunk_start));
if (loc->tid == kMainGoroutineId)
Printf("main goroutine:\n");
else
@@ -438,8 +407,9 @@ static void PrintLocation(const ReportLocation *loc) {
case ReportLocationGlobal: {
Printf("\n");
Printf("Global var %s of size %zu at %p declared at %s:%zu\n",
- loc->global.name, loc->global.size, loc->global.start,
- loc->global.file, loc->global.line);
+ loc->global.name, loc->global.size,
+ reinterpret_cast<void *>(loc->global.start), loc->global.file,
+ loc->global.line);
break;
}
default:
@@ -469,13 +439,13 @@ void PrintReport(const ReportDesc *rep) {
} else if (rep->typ == ReportTypeDeadlock) {
Printf("WARNING: DEADLOCK\n");
for (uptr i = 0; i < rep->mutexes.Size(); i++) {
- Printf("Goroutine %d lock mutex %d while holding mutex %d:\n",
- 999, rep->mutexes[i]->id,
- rep->mutexes[(i+1) % rep->mutexes.Size()]->id);
+ Printf("Goroutine %d lock mutex %u while holding mutex %u:\n", 999,
+ rep->mutexes[i]->id,
+ rep->mutexes[(i + 1) % rep->mutexes.Size()]->id);
PrintStack(rep->stacks[2*i]);
Printf("\n");
- Printf("Mutex %d was previously locked here:\n",
- rep->mutexes[(i+1) % rep->mutexes.Size()]->id);
+ Printf("Mutex %u was previously locked here:\n",
+ rep->mutexes[(i + 1) % rep->mutexes.Size()]->id);
PrintStack(rep->stacks[2*i + 1]);
Printf("\n");
}
diff --git a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_report.h b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_report.h
index b4e4d8989379..bfe470797f8f 100644
--- a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_report.h
+++ b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_report.h
@@ -34,20 +34,17 @@ enum ReportType {
ReportTypeMutexBadReadUnlock,
ReportTypeSignalUnsafe,
ReportTypeErrnoInSignal,
- ReportTypeDeadlock
+ ReportTypeDeadlock,
+ ReportTypeMutexHeldWrongContext
};
struct ReportStack {
- SymbolizedStack *frames;
- bool suppressable;
- static ReportStack *New();
-
- private:
- ReportStack();
+ SymbolizedStack *frames = nullptr;
+ bool suppressable = false;
};
struct ReportMopMutex {
- u64 id;
+ int id;
bool write;
};
@@ -73,35 +70,31 @@ enum ReportLocationType {
};
struct ReportLocation {
- ReportLocationType type;
- DataInfo global;
- uptr heap_chunk_start;
- uptr heap_chunk_size;
- uptr external_tag;
- int tid;
- int fd;
- bool suppressable;
- ReportStack *stack;
-
- static ReportLocation *New(ReportLocationType type);
- private:
- explicit ReportLocation(ReportLocationType type);
+ ReportLocationType type = ReportLocationGlobal;
+ DataInfo global = {};
+ uptr heap_chunk_start = 0;
+ uptr heap_chunk_size = 0;
+ uptr external_tag = 0;
+ Tid tid = kInvalidTid;
+ int fd = 0;
+ bool fd_closed = false;
+ bool suppressable = false;
+ ReportStack *stack = nullptr;
};
struct ReportThread {
- int id;
+ Tid id;
tid_t os_id;
bool running;
ThreadType thread_type;
char *name;
- u32 parent_tid;
+ Tid parent_tid;
ReportStack *stack;
};
struct ReportMutex {
- u64 id;
+ int id;
uptr addr;
- bool destroyed;
ReportStack *stack;
};
@@ -114,9 +107,10 @@ class ReportDesc {
Vector<ReportLocation*> locs;
Vector<ReportMutex*> mutexes;
Vector<ReportThread*> threads;
- Vector<int> unique_tids;
+ Vector<Tid> unique_tids;
ReportStack *sleep;
int count;
+ int signum = 0;
ReportDesc();
~ReportDesc();
diff --git a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_rtl.cpp b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_rtl.cpp
index a21da9c81c6f..fd9441dfcb53 100644
--- a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_rtl.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_rtl.cpp
@@ -16,6 +16,7 @@
#include "sanitizer_common/sanitizer_atomic.h"
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_file.h"
+#include "sanitizer_common/sanitizer_interface_internal.h"
#include "sanitizer_common/sanitizer_libc.h"
#include "sanitizer_common/sanitizer_placement_new.h"
#include "sanitizer_common/sanitizer_stackdepot.h"
@@ -28,29 +29,28 @@
#include "tsan_symbolize.h"
#include "ubsan/ubsan_init.h"
-#ifdef __SSE3__
-// <emmintrin.h> transitively includes <stdlib.h>,
-// and it's prohibited to include std headers into tsan runtime.
-// So we do this dirty trick.
-#define _MM_MALLOC_H_INCLUDED
-#define __MM_MALLOC_H
-#include <emmintrin.h>
-typedef __m128i m128;
-#endif
-
volatile int __tsan_resumed = 0;
extern "C" void __tsan_resume() {
__tsan_resumed = 1;
}
+SANITIZER_WEAK_DEFAULT_IMPL
+void __tsan_test_only_on_fork() {}
+
namespace __tsan {
-#if !SANITIZER_GO && !SANITIZER_MAC
+#if !SANITIZER_GO
+void (*on_initialize)(void);
+int (*on_finalize)(int);
+#endif
+
+#if !SANITIZER_GO && !SANITIZER_APPLE
__attribute__((tls_model("initial-exec")))
-THREADLOCAL char cur_thread_placeholder[sizeof(ThreadState)] ALIGNED(64);
+THREADLOCAL char cur_thread_placeholder[sizeof(ThreadState)] ALIGNED(
+ SANITIZER_CACHE_LINE_SIZE);
#endif
-static char ctx_placeholder[sizeof(Context)] ALIGNED(64);
+static char ctx_placeholder[sizeof(Context)] ALIGNED(SANITIZER_CACHE_LINE_SIZE);
Context *ctx;
// Can be overriden by a front-end.
@@ -58,113 +58,404 @@ Context *ctx;
bool OnFinalize(bool failed);
void OnInitialize();
#else
-#include <dlfcn.h>
SANITIZER_WEAK_CXX_DEFAULT_IMPL
bool OnFinalize(bool failed) {
-#if !SANITIZER_GO
- if (auto *ptr = dlsym(RTLD_DEFAULT, "__tsan_on_finalize"))
- return reinterpret_cast<decltype(&__tsan_on_finalize)>(ptr)(failed);
-#endif
+# if !SANITIZER_GO
+ if (on_finalize)
+ return on_finalize(failed);
+# endif
return failed;
}
+
SANITIZER_WEAK_CXX_DEFAULT_IMPL
void OnInitialize() {
-#if !SANITIZER_GO
- if (auto *ptr = dlsym(RTLD_DEFAULT, "__tsan_on_initialize")) {
- return reinterpret_cast<decltype(&__tsan_on_initialize)>(ptr)();
- }
+# if !SANITIZER_GO
+ if (on_initialize)
+ on_initialize();
+# endif
+}
#endif
+
+static TracePart* TracePartAlloc(ThreadState* thr) {
+ TracePart* part = nullptr;
+ {
+ Lock lock(&ctx->slot_mtx);
+ uptr max_parts = Trace::kMinParts + flags()->history_size;
+ Trace* trace = &thr->tctx->trace;
+ if (trace->parts_allocated == max_parts ||
+ ctx->trace_part_finished_excess) {
+ part = ctx->trace_part_recycle.PopFront();
+ DPrintf("#%d: TracePartAlloc: part=%p\n", thr->tid, part);
+ if (part && part->trace) {
+ Trace* trace1 = part->trace;
+ Lock trace_lock(&trace1->mtx);
+ part->trace = nullptr;
+ TracePart* part1 = trace1->parts.PopFront();
+ CHECK_EQ(part, part1);
+ if (trace1->parts_allocated > trace1->parts.Size()) {
+ ctx->trace_part_finished_excess +=
+ trace1->parts_allocated - trace1->parts.Size();
+ trace1->parts_allocated = trace1->parts.Size();
+ }
+ }
+ }
+ if (trace->parts_allocated < max_parts) {
+ trace->parts_allocated++;
+ if (ctx->trace_part_finished_excess)
+ ctx->trace_part_finished_excess--;
+ }
+ if (!part)
+ ctx->trace_part_total_allocated++;
+ else if (ctx->trace_part_recycle_finished)
+ ctx->trace_part_recycle_finished--;
+ }
+ if (!part)
+ part = new (MmapOrDie(sizeof(*part), "TracePart")) TracePart();
+ return part;
+}
+
+static void TracePartFree(TracePart* part) SANITIZER_REQUIRES(ctx->slot_mtx) {
+ DCHECK(part->trace);
+ part->trace = nullptr;
+ ctx->trace_part_recycle.PushFront(part);
+}
+
+void TraceResetForTesting() {
+ Lock lock(&ctx->slot_mtx);
+ while (auto* part = ctx->trace_part_recycle.PopFront()) {
+ if (auto trace = part->trace)
+ CHECK_EQ(trace->parts.PopFront(), part);
+ UnmapOrDie(part, sizeof(*part));
+ }
+ ctx->trace_part_total_allocated = 0;
+ ctx->trace_part_recycle_finished = 0;
+ ctx->trace_part_finished_excess = 0;
}
+
+static void DoResetImpl(uptr epoch) {
+ ThreadRegistryLock lock0(&ctx->thread_registry);
+ Lock lock1(&ctx->slot_mtx);
+ CHECK_EQ(ctx->global_epoch, epoch);
+ ctx->global_epoch++;
+ CHECK(!ctx->resetting);
+ ctx->resetting = true;
+ for (u32 i = ctx->thread_registry.NumThreadsLocked(); i--;) {
+ ThreadContext* tctx = (ThreadContext*)ctx->thread_registry.GetThreadLocked(
+ static_cast<Tid>(i));
+ // Potentially we could purge all ThreadStatusDead threads from the
+ // registry. Since we reset all shadow, they can't race with anything
+ // anymore. However, their tid's can still be stored in some aux places
+ // (e.g. tid of thread that created something).
+ auto trace = &tctx->trace;
+ Lock lock(&trace->mtx);
+ bool attached = tctx->thr && tctx->thr->slot;
+ auto parts = &trace->parts;
+ bool local = false;
+ while (!parts->Empty()) {
+ auto part = parts->Front();
+ local = local || part == trace->local_head;
+ if (local)
+ CHECK(!ctx->trace_part_recycle.Queued(part));
+ else
+ ctx->trace_part_recycle.Remove(part);
+ if (attached && parts->Size() == 1) {
+ // The thread is running and this is the last/current part.
+ // Set the trace position to the end of the current part
+ // to force the thread to call SwitchTracePart and re-attach
+ // to a new slot and allocate a new trace part.
+ // Note: the thread is concurrently modifying the position as well,
+ // so this is only best-effort. The thread can only modify position
+ // within this part, because switching parts is protected by
+ // slot/trace mutexes that we hold here.
+ atomic_store_relaxed(
+ &tctx->thr->trace_pos,
+ reinterpret_cast<uptr>(&part->events[TracePart::kSize]));
+ break;
+ }
+ parts->Remove(part);
+ TracePartFree(part);
+ }
+ CHECK_LE(parts->Size(), 1);
+ trace->local_head = parts->Front();
+ if (tctx->thr && !tctx->thr->slot) {
+ atomic_store_relaxed(&tctx->thr->trace_pos, 0);
+ tctx->thr->trace_prev_pc = 0;
+ }
+ if (trace->parts_allocated > trace->parts.Size()) {
+ ctx->trace_part_finished_excess +=
+ trace->parts_allocated - trace->parts.Size();
+ trace->parts_allocated = trace->parts.Size();
+ }
+ }
+ while (ctx->slot_queue.PopFront()) {
+ }
+ for (auto& slot : ctx->slots) {
+ slot.SetEpoch(kEpochZero);
+ slot.journal.Reset();
+ slot.thr = nullptr;
+ ctx->slot_queue.PushBack(&slot);
+ }
+
+ DPrintf("Resetting shadow...\n");
+ auto shadow_begin = ShadowBeg();
+ auto shadow_end = ShadowEnd();
+#if SANITIZER_GO
+ CHECK_NE(0, ctx->mapped_shadow_begin);
+ shadow_begin = ctx->mapped_shadow_begin;
+ shadow_end = ctx->mapped_shadow_end;
+ VPrintf(2, "shadow_begin-shadow_end: (0x%zx-0x%zx)\n",
+ shadow_begin, shadow_end);
#endif
-static ALIGNED(64) char thread_registry_placeholder[sizeof(ThreadRegistry)];
-
-static ThreadContextBase *CreateThreadContext(u32 tid) {
- // Map thread trace when context is created.
- char name[50];
- internal_snprintf(name, sizeof(name), "trace %u", tid);
- MapThreadTrace(GetThreadTrace(tid), TraceSize() * sizeof(Event), name);
- const uptr hdr = GetThreadTraceHeader(tid);
- internal_snprintf(name, sizeof(name), "trace header %u", tid);
- MapThreadTrace(hdr, sizeof(Trace), name);
- new((void*)hdr) Trace();
- // We are going to use only a small part of the trace with the default
- // value of history_size. However, the constructor writes to the whole trace.
- // Release the unused part.
- uptr hdr_end = hdr + sizeof(Trace);
- hdr_end -= sizeof(TraceHeader) * (kTraceParts - TraceParts());
- hdr_end = RoundUp(hdr_end, GetPageSizeCached());
- if (hdr_end < hdr + sizeof(Trace)) {
- ReleaseMemoryPagesToOS(hdr_end, hdr + sizeof(Trace));
- uptr unused = hdr + sizeof(Trace) - hdr_end;
- if (hdr_end != (uptr)MmapFixedNoAccess(hdr_end, unused)) {
- Report("ThreadSanitizer: failed to mprotect(%p, %p)\n",
- hdr_end, unused);
- CHECK("unable to mprotect" && 0);
+#if SANITIZER_WINDOWS
+ auto resetFailed =
+ !ZeroMmapFixedRegion(shadow_begin, shadow_end - shadow_begin);
+#else
+ auto resetFailed =
+ !MmapFixedSuperNoReserve(shadow_begin, shadow_end-shadow_begin, "shadow");
+# if !SANITIZER_GO
+ DontDumpShadow(shadow_begin, shadow_end - shadow_begin);
+# endif
+#endif
+ if (resetFailed) {
+ Printf("failed to reset shadow memory\n");
+ Die();
+ }
+ DPrintf("Resetting meta shadow...\n");
+ ctx->metamap.ResetClocks();
+ StoreShadow(&ctx->last_spurious_race, Shadow::kEmpty);
+ ctx->resetting = false;
+}
+
+// Clang does not understand locking all slots in the loop:
+// error: expecting mutex 'slot.mtx' to be held at start of each loop
+void DoReset(ThreadState* thr, uptr epoch) SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
+ for (auto& slot : ctx->slots) {
+ slot.mtx.Lock();
+ if (UNLIKELY(epoch == 0))
+ epoch = ctx->global_epoch;
+ if (UNLIKELY(epoch != ctx->global_epoch)) {
+ // Epoch can't change once we've locked the first slot.
+ CHECK_EQ(slot.sid, 0);
+ slot.mtx.Unlock();
+ return;
+ }
+ }
+ DPrintf("#%d: DoReset epoch=%lu\n", thr ? thr->tid : -1, epoch);
+ DoResetImpl(epoch);
+ for (auto& slot : ctx->slots) slot.mtx.Unlock();
+}
+
+void FlushShadowMemory() { DoReset(nullptr, 0); }
+
+static TidSlot* FindSlotAndLock(ThreadState* thr)
+ SANITIZER_ACQUIRE(thr->slot->mtx) SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
+ CHECK(!thr->slot);
+ TidSlot* slot = nullptr;
+ for (;;) {
+ uptr epoch;
+ {
+ Lock lock(&ctx->slot_mtx);
+ epoch = ctx->global_epoch;
+ if (slot) {
+ // This is an exhausted slot from the previous iteration.
+ if (ctx->slot_queue.Queued(slot))
+ ctx->slot_queue.Remove(slot);
+ thr->slot_locked = false;
+ slot->mtx.Unlock();
+ }
+ for (;;) {
+ slot = ctx->slot_queue.PopFront();
+ if (!slot)
+ break;
+ if (slot->epoch() != kEpochLast) {
+ ctx->slot_queue.PushBack(slot);
+ break;
+ }
+ }
+ }
+ if (!slot) {
+ DoReset(thr, epoch);
+ continue;
}
+ slot->mtx.Lock();
+ CHECK(!thr->slot_locked);
+ thr->slot_locked = true;
+ if (slot->thr) {
+ DPrintf("#%d: preempting sid=%d tid=%d\n", thr->tid, (u32)slot->sid,
+ slot->thr->tid);
+ slot->SetEpoch(slot->thr->fast_state.epoch());
+ slot->thr = nullptr;
+ }
+ if (slot->epoch() != kEpochLast)
+ return slot;
}
- void *mem = internal_alloc(MBlockThreadContex, sizeof(ThreadContext));
- return new(mem) ThreadContext(tid);
}
+void SlotAttachAndLock(ThreadState* thr) {
+ TidSlot* slot = FindSlotAndLock(thr);
+ DPrintf("#%d: SlotAttach: slot=%u\n", thr->tid, static_cast<int>(slot->sid));
+ CHECK(!slot->thr);
+ CHECK(!thr->slot);
+ slot->thr = thr;
+ thr->slot = slot;
+ Epoch epoch = EpochInc(slot->epoch());
+ CHECK(!EpochOverflow(epoch));
+ slot->SetEpoch(epoch);
+ thr->fast_state.SetSid(slot->sid);
+ thr->fast_state.SetEpoch(epoch);
+ if (thr->slot_epoch != ctx->global_epoch) {
+ thr->slot_epoch = ctx->global_epoch;
+ thr->clock.Reset();
#if !SANITIZER_GO
-static const u32 kThreadQuarantineSize = 16;
-#else
-static const u32 kThreadQuarantineSize = 64;
+ thr->last_sleep_stack_id = kInvalidStackID;
+ thr->last_sleep_clock.Reset();
+#endif
+ }
+ thr->clock.Set(slot->sid, epoch);
+ slot->journal.PushBack({thr->tid, epoch});
+}
+
+static void SlotDetachImpl(ThreadState* thr, bool exiting) {
+ TidSlot* slot = thr->slot;
+ thr->slot = nullptr;
+ if (thr != slot->thr) {
+ slot = nullptr; // we don't own the slot anymore
+ if (thr->slot_epoch != ctx->global_epoch) {
+ TracePart* part = nullptr;
+ auto* trace = &thr->tctx->trace;
+ {
+ Lock l(&trace->mtx);
+ auto* parts = &trace->parts;
+ // The trace can be completely empty in an unlikely event
+ // the thread is preempted right after it acquired the slot
+ // in ThreadStart and did not trace any events yet.
+ CHECK_LE(parts->Size(), 1);
+ part = parts->PopFront();
+ thr->tctx->trace.local_head = nullptr;
+ atomic_store_relaxed(&thr->trace_pos, 0);
+ thr->trace_prev_pc = 0;
+ }
+ if (part) {
+ Lock l(&ctx->slot_mtx);
+ TracePartFree(part);
+ }
+ }
+ return;
+ }
+ CHECK(exiting || thr->fast_state.epoch() == kEpochLast);
+ slot->SetEpoch(thr->fast_state.epoch());
+ slot->thr = nullptr;
+}
+
+void SlotDetach(ThreadState* thr) {
+ Lock lock(&thr->slot->mtx);
+ SlotDetachImpl(thr, true);
+}
+
+void SlotLock(ThreadState* thr) SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
+ DCHECK(!thr->slot_locked);
+#if SANITIZER_DEBUG
+ // Check these mutexes are not locked.
+ // We can call DoReset from SlotAttachAndLock, which will lock
+ // these mutexes, but it happens only every once in a while.
+ { ThreadRegistryLock lock(&ctx->thread_registry); }
+ { Lock lock(&ctx->slot_mtx); }
#endif
+ TidSlot* slot = thr->slot;
+ slot->mtx.Lock();
+ thr->slot_locked = true;
+ if (LIKELY(thr == slot->thr && thr->fast_state.epoch() != kEpochLast))
+ return;
+ SlotDetachImpl(thr, false);
+ thr->slot_locked = false;
+ slot->mtx.Unlock();
+ SlotAttachAndLock(thr);
+}
+
+void SlotUnlock(ThreadState* thr) {
+ DCHECK(thr->slot_locked);
+ thr->slot_locked = false;
+ thr->slot->mtx.Unlock();
+}
Context::Context()
: initialized(),
report_mtx(MutexTypeReport),
nreported(),
- nmissed_expected(),
- thread_registry(new (thread_registry_placeholder) ThreadRegistry(
- CreateThreadContext, kMaxTid, kThreadQuarantineSize, kMaxTidReuse)),
+ thread_registry([](Tid tid) -> ThreadContextBase* {
+ return new (Alloc(sizeof(ThreadContext))) ThreadContext(tid);
+ }),
racy_mtx(MutexTypeRacy),
racy_stacks(),
- racy_addresses(),
fired_suppressions_mtx(MutexTypeFired),
- clock_alloc(LINKER_INITIALIZED, "clock allocator") {
+ slot_mtx(MutexTypeSlots),
+ resetting() {
fired_suppressions.reserve(8);
+ for (uptr i = 0; i < ARRAY_SIZE(slots); i++) {
+ TidSlot* slot = &slots[i];
+ slot->sid = static_cast<Sid>(i);
+ slot_queue.PushBack(slot);
+ }
+ global_epoch = 1;
}
+TidSlot::TidSlot() : mtx(MutexTypeSlot) {}
+
// The objects are allocated in TLS, so one may rely on zero-initialization.
-ThreadState::ThreadState(Context *ctx, u32 tid, int unique_id, u64 epoch,
- unsigned reuse_count, uptr stk_addr, uptr stk_size,
- uptr tls_addr, uptr tls_size)
- : fast_state(tid, epoch)
- // Do not touch these, rely on zero initialization,
- // they may be accessed before the ctor.
- // , ignore_reads_and_writes()
- // , ignore_interceptors()
- ,
- clock(tid, reuse_count)
-#if !SANITIZER_GO
- ,
- jmp_bufs()
-#endif
- ,
- tid(tid),
- unique_id(unique_id),
- stk_addr(stk_addr),
- stk_size(stk_size),
- tls_addr(tls_addr),
- tls_size(tls_size)
+ThreadState::ThreadState(Tid tid)
+ // Do not touch these, rely on zero initialization,
+ // they may be accessed before the ctor.
+ // ignore_reads_and_writes()
+ // ignore_interceptors()
+ : tid(tid) {
+ CHECK_EQ(reinterpret_cast<uptr>(this) % SANITIZER_CACHE_LINE_SIZE, 0);
#if !SANITIZER_GO
- ,
- last_sleep_clock(tid)
+ // C/C++ uses fixed size shadow stack.
+ const int kInitStackSize = kShadowStackSize;
+ shadow_stack = static_cast<uptr*>(
+ MmapNoReserveOrDie(kInitStackSize * sizeof(uptr), "shadow stack"));
+ SetShadowRegionHugePageMode(reinterpret_cast<uptr>(shadow_stack),
+ kInitStackSize * sizeof(uptr));
+#else
+ // Go uses malloc-allocated shadow stack with dynamic size.
+ const int kInitStackSize = 8;
+ shadow_stack = static_cast<uptr*>(Alloc(kInitStackSize * sizeof(uptr)));
#endif
-{
+ shadow_stack_pos = shadow_stack;
+ shadow_stack_end = shadow_stack + kInitStackSize;
}
#if !SANITIZER_GO
-static void MemoryProfiler(Context *ctx, fd_t fd, int i) {
- uptr n_threads;
- uptr n_running_threads;
- ctx->thread_registry->GetNumberOfThreads(&n_threads, &n_running_threads);
+void MemoryProfiler(u64 uptime) {
+ if (ctx->memprof_fd == kInvalidFd)
+ return;
InternalMmapVector<char> buf(4096);
- WriteMemoryProfile(buf.data(), buf.size(), n_threads, n_running_threads);
- WriteToFile(fd, buf.data(), internal_strlen(buf.data()));
+ WriteMemoryProfile(buf.data(), buf.size(), uptime);
+ WriteToFile(ctx->memprof_fd, buf.data(), internal_strlen(buf.data()));
+}
+
+static bool InitializeMemoryProfiler() {
+ ctx->memprof_fd = kInvalidFd;
+ const char *fname = flags()->profile_memory;
+ if (!fname || !fname[0])
+ return false;
+ if (internal_strcmp(fname, "stdout") == 0) {
+ ctx->memprof_fd = 1;
+ } else if (internal_strcmp(fname, "stderr") == 0) {
+ ctx->memprof_fd = 2;
+ } else {
+ InternalScopedString filename;
+ filename.AppendF("%s.%d", fname, (int)internal_getpid());
+ ctx->memprof_fd = OpenFile(filename.data(), WrOnly);
+ if (ctx->memprof_fd == kInvalidFd) {
+ Printf("ThreadSanitizer: failed to open memory profile file '%s'\n",
+ filename.data());
+ return false;
+ }
+ }
+ MemoryProfiler(0);
+ return true;
}
static void *BackgroundThread(void *arg) {
@@ -172,64 +463,43 @@ static void *BackgroundThread(void *arg) {
// We don't use ScopedIgnoreInterceptors, because we want ignores to be
// enabled even when the thread function exits (e.g. during pthread thread
// shutdown code).
- cur_thread_init();
- cur_thread()->ignore_interceptors++;
+ cur_thread_init()->ignore_interceptors++;
const u64 kMs2Ns = 1000 * 1000;
+ const u64 start = NanoTime();
- fd_t mprof_fd = kInvalidFd;
- if (flags()->profile_memory && flags()->profile_memory[0]) {
- if (internal_strcmp(flags()->profile_memory, "stdout") == 0) {
- mprof_fd = 1;
- } else if (internal_strcmp(flags()->profile_memory, "stderr") == 0) {
- mprof_fd = 2;
- } else {
- InternalScopedString filename;
- filename.append("%s.%d", flags()->profile_memory, (int)internal_getpid());
- fd_t fd = OpenFile(filename.data(), WrOnly);
- if (fd == kInvalidFd) {
- Printf("ThreadSanitizer: failed to open memory profile file '%s'\n",
- filename.data());
- } else {
- mprof_fd = fd;
- }
- }
- }
-
- u64 last_flush = NanoTime();
+ u64 last_flush = start;
uptr last_rss = 0;
- for (int i = 0;
- atomic_load(&ctx->stop_background_thread, memory_order_relaxed) == 0;
- i++) {
+ while (!atomic_load_relaxed(&ctx->stop_background_thread)) {
SleepForMillis(100);
u64 now = NanoTime();
// Flush memory if requested.
if (flags()->flush_memory_ms > 0) {
if (last_flush + flags()->flush_memory_ms * kMs2Ns < now) {
- VPrintf(1, "ThreadSanitizer: periodic memory flush\n");
+ VReport(1, "ThreadSanitizer: periodic memory flush\n");
FlushShadowMemory();
- last_flush = NanoTime();
+ now = last_flush = NanoTime();
}
}
- // GetRSS can be expensive on huge programs, so don't do it every 100ms.
if (flags()->memory_limit_mb > 0) {
uptr rss = GetRSS();
uptr limit = uptr(flags()->memory_limit_mb) << 20;
- VPrintf(1, "ThreadSanitizer: memory flush check"
- " RSS=%llu LAST=%llu LIMIT=%llu\n",
+ VReport(1,
+ "ThreadSanitizer: memory flush check"
+ " RSS=%llu LAST=%llu LIMIT=%llu\n",
(u64)rss >> 20, (u64)last_rss >> 20, (u64)limit >> 20);
if (2 * rss > limit + last_rss) {
- VPrintf(1, "ThreadSanitizer: flushing memory due to RSS\n");
+ VReport(1, "ThreadSanitizer: flushing memory due to RSS\n");
FlushShadowMemory();
rss = GetRSS();
- VPrintf(1, "ThreadSanitizer: memory flushed RSS=%llu\n", (u64)rss>>20);
+ now = NanoTime();
+ VReport(1, "ThreadSanitizer: memory flushed RSS=%llu\n",
+ (u64)rss >> 20);
}
last_rss = rss;
}
- // Write memory profile if requested.
- if (mprof_fd != kInvalidFd)
- MemoryProfiler(ctx, mprof_fd, i);
+ MemoryProfiler(now - start);
// Flush symbolizer cache if requested.
if (flags()->flush_symbolizer_ms > 0) {
@@ -260,31 +530,96 @@ static void StopBackgroundThread() {
#endif
void DontNeedShadowFor(uptr addr, uptr size) {
- ReleaseMemoryPagesToOS(MemToShadow(addr), MemToShadow(addr + size));
+ ReleaseMemoryPagesToOS(reinterpret_cast<uptr>(MemToShadow(addr)),
+ reinterpret_cast<uptr>(MemToShadow(addr + size)));
}
#if !SANITIZER_GO
+// We call UnmapShadow before the actual munmap, at that point we don't yet
+// know if the provided address/size are sane. We can't call UnmapShadow
+// after the actual munmap becuase at that point the memory range can
+// already be reused for something else, so we can't rely on the munmap
+// return value to understand is the values are sane.
+// While calling munmap with insane values (non-canonical address, negative
+// size, etc) is an error, the kernel won't crash. We must also try to not
+// crash as the failure mode is very confusing (paging fault inside of the
+// runtime on some derived shadow address).
+static bool IsValidMmapRange(uptr addr, uptr size) {
+ if (size == 0)
+ return true;
+ if (static_cast<sptr>(size) < 0)
+ return false;
+ if (!IsAppMem(addr) || !IsAppMem(addr + size - 1))
+ return false;
+ // Check that if the start of the region belongs to one of app ranges,
+ // end of the region belongs to the same region.
+ const uptr ranges[][2] = {
+ {LoAppMemBeg(), LoAppMemEnd()},
+ {MidAppMemBeg(), MidAppMemEnd()},
+ {HiAppMemBeg(), HiAppMemEnd()},
+ };
+ for (auto range : ranges) {
+ if (addr >= range[0] && addr < range[1])
+ return addr + size <= range[1];
+ }
+ return false;
+}
+
void UnmapShadow(ThreadState *thr, uptr addr, uptr size) {
- if (size == 0) return;
+ if (size == 0 || !IsValidMmapRange(addr, size))
+ return;
DontNeedShadowFor(addr, size);
ScopedGlobalProcessor sgp;
- ctx->metamap.ResetRange(thr->proc(), addr, size);
+ SlotLocker locker(thr, true);
+ ctx->metamap.ResetRange(thr->proc(), addr, size, true);
}
#endif
void MapShadow(uptr addr, uptr size) {
+ // Ensure thead registry lock held, so as to synchronize
+ // with DoReset, which also access the mapped_shadow_* ctxt fields.
+ ThreadRegistryLock lock0(&ctx->thread_registry);
+ static bool data_mapped = false;
+
+#if !SANITIZER_GO
// Global data is not 64K aligned, but there are no adjacent mappings,
// so we can get away with unaligned mapping.
// CHECK_EQ(addr, addr & ~((64 << 10) - 1)); // windows wants 64K alignment
const uptr kPageSize = GetPageSizeCached();
uptr shadow_begin = RoundDownTo((uptr)MemToShadow(addr), kPageSize);
uptr shadow_end = RoundUpTo((uptr)MemToShadow(addr + size), kPageSize);
- if (!MmapFixedSuperNoReserve(shadow_begin, shadow_end - shadow_begin,
- "shadow"))
+ if (!MmapFixedNoReserve(shadow_begin, shadow_end - shadow_begin, "shadow"))
Die();
+#else
+ uptr shadow_begin = RoundDownTo((uptr)MemToShadow(addr), (64 << 10));
+ uptr shadow_end = RoundUpTo((uptr)MemToShadow(addr + size), (64 << 10));
+ VPrintf(2, "MapShadow for (0x%zx-0x%zx), begin/end: (0x%zx-0x%zx)\n",
+ addr, addr + size, shadow_begin, shadow_end);
+
+ if (!data_mapped) {
+ // First call maps data+bss.
+ if (!MmapFixedSuperNoReserve(shadow_begin, shadow_end - shadow_begin, "shadow"))
+ Die();
+ } else {
+ VPrintf(2, "ctx->mapped_shadow_{begin,end} = (0x%zx-0x%zx)\n",
+ ctx->mapped_shadow_begin, ctx->mapped_shadow_end);
+ // Second and subsequent calls map heap.
+ if (shadow_end <= ctx->mapped_shadow_end)
+ return;
+ if (!ctx->mapped_shadow_begin || ctx->mapped_shadow_begin > shadow_begin)
+ ctx->mapped_shadow_begin = shadow_begin;
+ if (shadow_begin < ctx->mapped_shadow_end)
+ shadow_begin = ctx->mapped_shadow_end;
+ VPrintf(2, "MapShadow begin/end = (0x%zx-0x%zx)\n",
+ shadow_begin, shadow_end);
+ if (!MmapFixedSuperNoReserve(shadow_begin, shadow_end - shadow_begin,
+ "shadow"))
+ Die();
+ ctx->mapped_shadow_end = shadow_end;
+ }
+#endif
// Meta shadow is 2:1, so tread carefully.
- static bool data_mapped = false;
static uptr mapped_meta_end = 0;
uptr meta_begin = (uptr)MemToMeta(addr);
uptr meta_end = (uptr)MemToMeta(addr + size);
@@ -297,12 +632,11 @@ void MapShadow(uptr addr, uptr size) {
"meta shadow"))
Die();
} else {
- // Mapping continous heap.
+ // Mapping continuous heap.
// Windows wants 64K alignment.
meta_begin = RoundDownTo(meta_begin, 64 << 10);
meta_end = RoundUpTo(meta_end, 64 << 10);
- if (meta_end <= mapped_meta_end)
- return;
+ CHECK_GT(meta_end, mapped_meta_end);
if (meta_begin < mapped_meta_end)
meta_begin = mapped_meta_end;
if (!MmapFixedSuperNoReserve(meta_begin, meta_end - meta_begin,
@@ -310,56 +644,8 @@ void MapShadow(uptr addr, uptr size) {
Die();
mapped_meta_end = meta_end;
}
- VPrintf(2, "mapped meta shadow for (%p-%p) at (%p-%p)\n",
- addr, addr+size, meta_begin, meta_end);
-}
-
-void MapThreadTrace(uptr addr, uptr size, const char *name) {
- DPrintf("#0: Mapping trace at %p-%p(0x%zx)\n", addr, addr + size, size);
- CHECK_GE(addr, TraceMemBeg());
- CHECK_LE(addr + size, TraceMemEnd());
- CHECK_EQ(addr, addr & ~((64 << 10) - 1)); // windows wants 64K alignment
- if (!MmapFixedSuperNoReserve(addr, size, name)) {
- Printf("FATAL: ThreadSanitizer can not mmap thread trace (%p/%p)\n",
- addr, size);
- Die();
- }
-}
-
-static void CheckShadowMapping() {
- uptr beg, end;
- for (int i = 0; GetUserRegion(i, &beg, &end); i++) {
- // Skip cases for empty regions (heap definition for architectures that
- // do not use 64-bit allocator).
- if (beg == end)
- continue;
- VPrintf(3, "checking shadow region %p-%p\n", beg, end);
- uptr prev = 0;
- for (uptr p0 = beg; p0 <= end; p0 += (end - beg) / 4) {
- for (int x = -(int)kShadowCell; x <= (int)kShadowCell; x += kShadowCell) {
- const uptr p = RoundDown(p0 + x, kShadowCell);
- if (p < beg || p >= end)
- continue;
- const uptr s = MemToShadow(p);
- const uptr m = (uptr)MemToMeta(p);
- VPrintf(3, " checking pointer %p: shadow=%p meta=%p\n", p, s, m);
- CHECK(IsAppMem(p));
- CHECK(IsShadowMem(s));
- CHECK_EQ(p, ShadowToMem(s));
- CHECK(IsMetaMem(m));
- if (prev) {
- // Ensure that shadow and meta mappings are linear within a single
- // user range. Lots of code that processes memory ranges assumes it.
- const uptr prev_s = MemToShadow(prev);
- const uptr prev_m = (uptr)MemToMeta(prev);
- CHECK_EQ(s - prev_s, (p - prev) * kShadowMultiplier);
- CHECK_EQ((m - prev_m) / kMetaShadowSize,
- (p - prev) / kMetaShadowCell);
- }
- prev = p;
- }
- }
- }
+ VPrintf(2, "mapped meta shadow for (0x%zx-0x%zx) at (0x%zx-0x%zx)\n", addr,
+ addr + size, meta_begin, meta_end);
}
#if !SANITIZER_GO
@@ -380,15 +666,19 @@ void CheckUnwind() {
// since we are going to die soon.
ScopedIgnoreInterceptors ignore;
#if !SANITIZER_GO
- cur_thread()->ignore_sync++;
- cur_thread()->ignore_reads_and_writes++;
+ ThreadState* thr = cur_thread();
+ thr->nomalloc = false;
+ thr->ignore_sync++;
+ thr->ignore_reads_and_writes++;
+ atomic_store_relaxed(&thr->in_signal_handler, 0);
#endif
PrintCurrentStackSlow(StackTrace::GetCurrentPc());
}
+bool is_initialized;
+
void Initialize(ThreadState *thr) {
// Thread safe because done before all threads exist.
- static bool is_initialized = false;
if (is_initialized)
return;
is_initialized = true;
@@ -409,9 +699,6 @@ void Initialize(ThreadState *thr) {
__tsan::InitializePlatformEarly();
#if !SANITIZER_GO
- // Re-exec ourselves if we need to set additional env or command line args.
- MaybeReexec();
-
InitializeAllocator();
ReplaceSystemMalloc();
#endif
@@ -420,7 +707,6 @@ void Initialize(ThreadState *thr) {
Processor *proc = ProcCreate();
ProcWire(proc, thr);
InitializeInterceptors();
- CheckShadowMapping();
InitializePlatform();
InitializeDynamicAnnotations();
#if !SANITIZER_GO
@@ -436,21 +722,23 @@ void Initialize(ThreadState *thr) {
Symbolizer::GetOrInit()->AddHooks(EnterSymbolizer, ExitSymbolizer);
#endif
- VPrintf(1, "***** Running under ThreadSanitizer v2 (pid %d) *****\n",
+ VPrintf(1, "***** Running under ThreadSanitizer v3 (pid %d) *****\n",
(int)internal_getpid());
// Initialize thread 0.
- int tid = ThreadCreate(thr, 0, 0, true);
- CHECK_EQ(tid, 0);
+ Tid tid = ThreadCreate(nullptr, 0, 0, true);
+ CHECK_EQ(tid, kMainTid);
ThreadStart(thr, tid, GetTid(), ThreadType::Regular);
#if TSAN_CONTAINS_UBSAN
__ubsan::InitAsPlugin();
#endif
- ctx->initialized = true;
#if !SANITIZER_GO
Symbolizer::LateInitialize();
+ if (InitializeMemoryProfiler() || flags()->force_background_thread)
+ MaybeSpawnBackgroundThread();
#endif
+ ctx->initialized = true;
if (flags()->stop_on_start) {
Printf("ThreadSanitizer is suspended at startup (pid %d)."
@@ -476,20 +764,21 @@ void MaybeSpawnBackgroundThread() {
#endif
}
-
int Finalize(ThreadState *thr) {
bool failed = false;
+#if !SANITIZER_GO
if (common_flags()->print_module_map == 1)
DumpProcessMap();
+#endif
if (flags()->atexit_sleep_ms > 0 && ThreadCount(thr) > 1)
- SleepForMillis(flags()->atexit_sleep_ms);
+ internal_usleep(u64(flags()->atexit_sleep_ms) * 1000);
- // Wait for pending reports.
- ctx->report_mtx.Lock();
- { ScopedErrorReportLock l; }
- ctx->report_mtx.Unlock();
+ {
+ // Wait for pending reports.
+ ScopedErrorReportLock lock;
+ }
#if !SANITIZER_GO
if (Verbosity()) AllocatorPrintStats();
@@ -506,18 +795,8 @@ int Finalize(ThreadState *thr) {
#endif
}
- if (ctx->nmissed_expected) {
- failed = true;
- Printf("ThreadSanitizer: missed %d expected races\n",
- ctx->nmissed_expected);
- }
-
if (common_flags()->print_suppressions)
PrintMatchedSuppressions();
-#if !SANITIZER_GO
- if (flags()->print_benign)
- PrintMatchedBenignRaces();
-#endif
failed = OnFinalize(failed);
@@ -525,10 +804,16 @@ int Finalize(ThreadState *thr) {
}
#if !SANITIZER_GO
-void ForkBefore(ThreadState *thr, uptr pc) NO_THREAD_SAFETY_ANALYSIS {
- ctx->thread_registry->Lock();
- ctx->report_mtx.Lock();
+void ForkBefore(ThreadState* thr, uptr pc) SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
+ GlobalProcessorLock();
+ // Detaching from the slot makes OnUserFree skip writing to the shadow.
+ // The slot will be locked so any attempts to use it will deadlock anyway.
+ SlotDetach(thr);
+ for (auto& slot : ctx->slots) slot.mtx.Lock();
+ ctx->thread_registry.Lock();
+ ctx->slot_mtx.Lock();
ScopedErrorReportLock::Lock();
+ AllocatorLock();
// Suppress all reports in the pthread_atfork callbacks.
// Reports will deadlock on the report_mtx.
// We could ignore sync operations as well,
@@ -537,36 +822,48 @@ void ForkBefore(ThreadState *thr, uptr pc) NO_THREAD_SAFETY_ANALYSIS {
thr->suppress_reports++;
// On OS X, REAL(fork) can call intercepted functions (OSSpinLockLock), and
// we'll assert in CheckNoLocks() unless we ignore interceptors.
+ // On OS X libSystem_atfork_prepare/parent/child callbacks are called
+ // after/before our callbacks and they call free.
thr->ignore_interceptors++;
+ // Disables memory write in OnUserAlloc/Free.
+ thr->ignore_reads_and_writes++;
+
+ __tsan_test_only_on_fork();
}
-void ForkParentAfter(ThreadState *thr, uptr pc) NO_THREAD_SAFETY_ANALYSIS {
+static void ForkAfter(ThreadState* thr) SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
thr->suppress_reports--; // Enabled in ForkBefore.
thr->ignore_interceptors--;
+ thr->ignore_reads_and_writes--;
+ AllocatorUnlock();
ScopedErrorReportLock::Unlock();
- ctx->report_mtx.Unlock();
- ctx->thread_registry->Unlock();
+ ctx->slot_mtx.Unlock();
+ ctx->thread_registry.Unlock();
+ for (auto& slot : ctx->slots) slot.mtx.Unlock();
+ SlotAttachAndLock(thr);
+ SlotUnlock(thr);
+ GlobalProcessorUnlock();
}
-void ForkChildAfter(ThreadState *thr, uptr pc) NO_THREAD_SAFETY_ANALYSIS {
- thr->suppress_reports--; // Enabled in ForkBefore.
- thr->ignore_interceptors--;
- ScopedErrorReportLock::Unlock();
- ctx->report_mtx.Unlock();
- ctx->thread_registry->Unlock();
+void ForkParentAfter(ThreadState* thr, uptr pc) { ForkAfter(thr); }
- uptr nthread = 0;
- ctx->thread_registry->GetNumberOfThreads(0, 0, &nthread /* alive threads */);
- VPrintf(1, "ThreadSanitizer: forked new process with pid %d,"
- " parent had %d threads\n", (int)internal_getpid(), (int)nthread);
+void ForkChildAfter(ThreadState* thr, uptr pc, bool start_thread) {
+ ForkAfter(thr);
+ u32 nthread = ctx->thread_registry.OnFork(thr->tid);
+ VPrintf(1,
+ "ThreadSanitizer: forked new process with pid %d,"
+ " parent had %d threads\n",
+ (int)internal_getpid(), (int)nthread);
if (nthread == 1) {
- StartBackgroundThread();
+ if (start_thread)
+ StartBackgroundThread();
} else {
// We've just forked a multi-threaded process. We cannot reasonably function
// after that (some mutexes may be locked before fork). So just enable
// ignores for everything in the hope that we will exec soon.
ctx->after_multithreaded_fork = true;
thr->ignore_interceptors++;
+ thr->suppress_reports++;
ThreadIgnoreBegin(thr, pc);
ThreadIgnoreSyncBegin(thr, pc);
}
@@ -578,19 +875,20 @@ NOINLINE
void GrowShadowStack(ThreadState *thr) {
const int sz = thr->shadow_stack_end - thr->shadow_stack;
const int newsz = 2 * sz;
- uptr *newstack = (uptr*)internal_alloc(MBlockShadowStack,
- newsz * sizeof(uptr));
+ auto *newstack = (uptr *)Alloc(newsz * sizeof(uptr));
internal_memcpy(newstack, thr->shadow_stack, sz * sizeof(uptr));
- internal_free(thr->shadow_stack);
+ Free(thr->shadow_stack);
thr->shadow_stack = newstack;
thr->shadow_stack_pos = newstack + sz;
thr->shadow_stack_end = newstack + newsz;
}
#endif
-u32 CurrentStackId(ThreadState *thr, uptr pc) {
+StackID CurrentStackId(ThreadState *thr, uptr pc) {
+#if !SANITIZER_GO
if (!thr->is_inited) // May happen during bootstrap.
- return 0;
+ return kInvalidStackID;
+#endif
if (pc != 0) {
#if !SANITIZER_GO
DCHECK_LT(thr->shadow_stack_pos, thr->shadow_stack_end);
@@ -601,486 +899,149 @@ u32 CurrentStackId(ThreadState *thr, uptr pc) {
thr->shadow_stack_pos[0] = pc;
thr->shadow_stack_pos++;
}
- u32 id = StackDepotPut(
+ StackID id = StackDepotPut(
StackTrace(thr->shadow_stack, thr->shadow_stack_pos - thr->shadow_stack));
if (pc != 0)
thr->shadow_stack_pos--;
return id;
}
-void TraceSwitch(ThreadState *thr) {
-#if !SANITIZER_GO
- if (ctx->after_multithreaded_fork)
- return;
-#endif
- thr->nomalloc++;
- Trace *thr_trace = ThreadTrace(thr->tid);
- Lock l(&thr_trace->mtx);
- unsigned trace = (thr->fast_state.epoch() / kTracePartSize) % TraceParts();
- TraceHeader *hdr = &thr_trace->headers[trace];
- hdr->epoch0 = thr->fast_state.epoch();
- ObtainCurrentStack(thr, 0, &hdr->stack0);
- hdr->mset0 = thr->mset;
- thr->nomalloc--;
-}
-
-Trace *ThreadTrace(int tid) {
- return (Trace*)GetThreadTraceHeader(tid);
-}
-
-uptr TraceTopPC(ThreadState *thr) {
- Event *events = (Event*)GetThreadTrace(thr->tid);
- uptr pc = events[thr->fast_state.GetTracePos()];
- return pc;
-}
-
-uptr TraceSize() {
- return (uptr)(1ull << (kTracePartSizeBits + flags()->history_size + 1));
-}
-
-uptr TraceParts() {
- return TraceSize() / kTracePartSize;
-}
-
-#if !SANITIZER_GO
-extern "C" void __tsan_trace_switch() {
- TraceSwitch(cur_thread());
-}
-
-extern "C" void __tsan_report_race() {
- ReportRace(cur_thread());
-}
-#endif
-
-ALWAYS_INLINE
-Shadow LoadShadow(u64 *p) {
- u64 raw = atomic_load((atomic_uint64_t*)p, memory_order_relaxed);
- return Shadow(raw);
-}
-
-ALWAYS_INLINE
-void StoreShadow(u64 *sp, u64 s) {
- atomic_store((atomic_uint64_t*)sp, s, memory_order_relaxed);
-}
-
-ALWAYS_INLINE
-void StoreIfNotYetStored(u64 *sp, u64 *s) {
- StoreShadow(sp, *s);
- *s = 0;
-}
-
-ALWAYS_INLINE
-void HandleRace(ThreadState *thr, u64 *shadow_mem,
- Shadow cur, Shadow old) {
- thr->racy_state[0] = cur.raw();
- thr->racy_state[1] = old.raw();
- thr->racy_shadow_addr = shadow_mem;
-#if !SANITIZER_GO
- HACKY_CALL(__tsan_report_race);
-#else
- ReportRace(thr);
-#endif
-}
-
-static inline bool HappensBefore(Shadow old, ThreadState *thr) {
- return thr->clock.get(old.TidWithIgnore()) >= old.epoch();
-}
-
-ALWAYS_INLINE
-void MemoryAccessImpl1(ThreadState *thr, uptr addr,
- int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic,
- u64 *shadow_mem, Shadow cur) {
-
- // This potentially can live in an MMX/SSE scratch register.
- // The required intrinsics are:
- // __m128i _mm_move_epi64(__m128i*);
- // _mm_storel_epi64(u64*, __m128i);
- u64 store_word = cur.raw();
- bool stored = false;
-
- // scan all the shadow values and dispatch to 4 categories:
- // same, replace, candidate and race (see comments below).
- // we consider only 3 cases regarding access sizes:
- // equal, intersect and not intersect. initially I considered
- // larger and smaller as well, it allowed to replace some
- // 'candidates' with 'same' or 'replace', but I think
- // it's just not worth it (performance- and complexity-wise).
-
- Shadow old(0);
-
- // It release mode we manually unroll the loop,
- // because empirically gcc generates better code this way.
- // However, we can't afford unrolling in debug mode, because the function
- // consumes almost 4K of stack. Gtest gives only 4K of stack to death test
- // threads, which is not enough for the unrolled loop.
-#if SANITIZER_DEBUG
- for (int idx = 0; idx < 4; idx++) {
-#include "tsan_update_shadow_word_inl.h"
- }
-#else
- int idx = 0;
-#include "tsan_update_shadow_word_inl.h"
- idx = 1;
- if (stored) {
-#include "tsan_update_shadow_word_inl.h"
- } else {
-#include "tsan_update_shadow_word_inl.h"
- }
- idx = 2;
- if (stored) {
-#include "tsan_update_shadow_word_inl.h"
- } else {
-#include "tsan_update_shadow_word_inl.h"
- }
- idx = 3;
- if (stored) {
-#include "tsan_update_shadow_word_inl.h"
- } else {
-#include "tsan_update_shadow_word_inl.h"
- }
-#endif
-
- // we did not find any races and had already stored
- // the current access info, so we are done
- if (LIKELY(stored))
- return;
- // choose a random candidate slot and replace it
- StoreShadow(shadow_mem + (cur.epoch() % kShadowCnt), store_word);
- return;
- RACE:
- HandleRace(thr, shadow_mem, cur, old);
- return;
-}
-
-void UnalignedMemoryAccess(ThreadState *thr, uptr pc, uptr addr,
- int size, bool kAccessIsWrite, bool kIsAtomic) {
- while (size) {
- int size1 = 1;
- int kAccessSizeLog = kSizeLog1;
- if (size >= 8 && (addr & ~7) == ((addr + 7) & ~7)) {
- size1 = 8;
- kAccessSizeLog = kSizeLog8;
- } else if (size >= 4 && (addr & ~7) == ((addr + 3) & ~7)) {
- size1 = 4;
- kAccessSizeLog = kSizeLog4;
- } else if (size >= 2 && (addr & ~7) == ((addr + 1) & ~7)) {
- size1 = 2;
- kAccessSizeLog = kSizeLog2;
- }
- MemoryAccess(thr, pc, addr, kAccessSizeLog, kAccessIsWrite, kIsAtomic);
- addr += size1;
- size -= size1;
- }
-}
-
-ALWAYS_INLINE
-bool ContainsSameAccessSlow(u64 *s, u64 a, u64 sync_epoch, bool is_write) {
- Shadow cur(a);
- for (uptr i = 0; i < kShadowCnt; i++) {
- Shadow old(LoadShadow(&s[i]));
- if (Shadow::Addr0AndSizeAreEqual(cur, old) &&
- old.TidWithIgnore() == cur.TidWithIgnore() &&
- old.epoch() > sync_epoch &&
- old.IsAtomic() == cur.IsAtomic() &&
- old.IsRead() <= cur.IsRead())
- return true;
+static bool TraceSkipGap(ThreadState* thr) {
+ Trace *trace = &thr->tctx->trace;
+ Event *pos = reinterpret_cast<Event *>(atomic_load_relaxed(&thr->trace_pos));
+ DCHECK_EQ(reinterpret_cast<uptr>(pos + 1) & TracePart::kAlignment, 0);
+ auto *part = trace->parts.Back();
+ DPrintf("#%d: TraceSwitchPart enter trace=%p parts=%p-%p pos=%p\n", thr->tid,
+ trace, trace->parts.Front(), part, pos);
+ if (!part)
+ return false;
+ // We can get here when we still have space in the current trace part.
+ // The fast-path check in TraceAcquire has false positives in the middle of
+ // the part. Check if we are indeed at the end of the current part or not,
+ // and fill any gaps with NopEvent's.
+ Event* end = &part->events[TracePart::kSize];
+ DCHECK_GE(pos, &part->events[0]);
+ DCHECK_LE(pos, end);
+ if (pos + 1 < end) {
+ if ((reinterpret_cast<uptr>(pos) & TracePart::kAlignment) ==
+ TracePart::kAlignment)
+ *pos++ = NopEvent;
+ *pos++ = NopEvent;
+ DCHECK_LE(pos + 2, end);
+ atomic_store_relaxed(&thr->trace_pos, reinterpret_cast<uptr>(pos));
+ return true;
}
+ // We are indeed at the end.
+ for (; pos < end; pos++) *pos = NopEvent;
return false;
}
-#if defined(__SSE3__)
-#define SHUF(v0, v1, i0, i1, i2, i3) _mm_castps_si128(_mm_shuffle_ps( \
- _mm_castsi128_ps(v0), _mm_castsi128_ps(v1), \
- (i0)*1 + (i1)*4 + (i2)*16 + (i3)*64))
-ALWAYS_INLINE
-bool ContainsSameAccessFast(u64 *s, u64 a, u64 sync_epoch, bool is_write) {
- // This is an optimized version of ContainsSameAccessSlow.
- // load current access into access[0:63]
- const m128 access = _mm_cvtsi64_si128(a);
- // duplicate high part of access in addr0:
- // addr0[0:31] = access[32:63]
- // addr0[32:63] = access[32:63]
- // addr0[64:95] = access[32:63]
- // addr0[96:127] = access[32:63]
- const m128 addr0 = SHUF(access, access, 1, 1, 1, 1);
- // load 4 shadow slots
- const m128 shadow0 = _mm_load_si128((__m128i*)s);
- const m128 shadow1 = _mm_load_si128((__m128i*)s + 1);
- // load high parts of 4 shadow slots into addr_vect:
- // addr_vect[0:31] = shadow0[32:63]
- // addr_vect[32:63] = shadow0[96:127]
- // addr_vect[64:95] = shadow1[32:63]
- // addr_vect[96:127] = shadow1[96:127]
- m128 addr_vect = SHUF(shadow0, shadow1, 1, 3, 1, 3);
- if (!is_write) {
- // set IsRead bit in addr_vect
- const m128 rw_mask1 = _mm_cvtsi64_si128(1<<15);
- const m128 rw_mask = SHUF(rw_mask1, rw_mask1, 0, 0, 0, 0);
- addr_vect = _mm_or_si128(addr_vect, rw_mask);
- }
- // addr0 == addr_vect?
- const m128 addr_res = _mm_cmpeq_epi32(addr0, addr_vect);
- // epoch1[0:63] = sync_epoch
- const m128 epoch1 = _mm_cvtsi64_si128(sync_epoch);
- // epoch[0:31] = sync_epoch[0:31]
- // epoch[32:63] = sync_epoch[0:31]
- // epoch[64:95] = sync_epoch[0:31]
- // epoch[96:127] = sync_epoch[0:31]
- const m128 epoch = SHUF(epoch1, epoch1, 0, 0, 0, 0);
- // load low parts of shadow cell epochs into epoch_vect:
- // epoch_vect[0:31] = shadow0[0:31]
- // epoch_vect[32:63] = shadow0[64:95]
- // epoch_vect[64:95] = shadow1[0:31]
- // epoch_vect[96:127] = shadow1[64:95]
- const m128 epoch_vect = SHUF(shadow0, shadow1, 0, 2, 0, 2);
- // epoch_vect >= sync_epoch?
- const m128 epoch_res = _mm_cmpgt_epi32(epoch_vect, epoch);
- // addr_res & epoch_res
- const m128 res = _mm_and_si128(addr_res, epoch_res);
- // mask[0] = res[7]
- // mask[1] = res[15]
- // ...
- // mask[15] = res[127]
- const int mask = _mm_movemask_epi8(res);
- return mask != 0;
-}
-#endif
-
-ALWAYS_INLINE
-bool ContainsSameAccess(u64 *s, u64 a, u64 sync_epoch, bool is_write) {
-#if defined(__SSE3__)
- bool res = ContainsSameAccessFast(s, a, sync_epoch, is_write);
- // NOTE: this check can fail if the shadow is concurrently mutated
- // by other threads. But it still can be useful if you modify
- // ContainsSameAccessFast and want to ensure that it's not completely broken.
- // DCHECK_EQ(res, ContainsSameAccessSlow(s, a, sync_epoch, is_write));
- return res;
-#else
- return ContainsSameAccessSlow(s, a, sync_epoch, is_write);
-#endif
-}
-
-ALWAYS_INLINE USED
-void MemoryAccess(ThreadState *thr, uptr pc, uptr addr,
- int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic) {
- u64 *shadow_mem = (u64*)MemToShadow(addr);
- DPrintf2("#%d: MemoryAccess: @%p %p size=%d"
- " is_write=%d shadow_mem=%p {%zx, %zx, %zx, %zx}\n",
- (int)thr->fast_state.tid(), (void*)pc, (void*)addr,
- (int)(1 << kAccessSizeLog), kAccessIsWrite, shadow_mem,
- (uptr)shadow_mem[0], (uptr)shadow_mem[1],
- (uptr)shadow_mem[2], (uptr)shadow_mem[3]);
-#if SANITIZER_DEBUG
- if (!IsAppMem(addr)) {
- Printf("Access to non app mem %zx\n", addr);
- DCHECK(IsAppMem(addr));
- }
- if (!IsShadowMem((uptr)shadow_mem)) {
- Printf("Bad shadow addr %p (%zx)\n", shadow_mem, addr);
- DCHECK(IsShadowMem((uptr)shadow_mem));
- }
-#endif
-
- if (!SANITIZER_GO && !kAccessIsWrite && *shadow_mem == kShadowRodata) {
- // Access to .rodata section, no races here.
- // Measurements show that it can be 10-20% of all memory accesses.
- return;
- }
-
- FastState fast_state = thr->fast_state;
- if (UNLIKELY(fast_state.GetIgnoreBit())) {
- return;
- }
-
- Shadow cur(fast_state);
- cur.SetAddr0AndSizeLog(addr & 7, kAccessSizeLog);
- cur.SetWrite(kAccessIsWrite);
- cur.SetAtomic(kIsAtomic);
-
- if (LIKELY(ContainsSameAccess(shadow_mem, cur.raw(),
- thr->fast_synch_epoch, kAccessIsWrite))) {
- return;
- }
-
- if (kCollectHistory) {
- fast_state.IncrementEpoch();
- thr->fast_state = fast_state;
- TraceAddEvent(thr, fast_state, EventTypeMop, pc);
- cur.IncrementEpoch();
- }
-
- MemoryAccessImpl1(thr, addr, kAccessSizeLog, kAccessIsWrite, kIsAtomic,
- shadow_mem, cur);
-}
-
-// Called by MemoryAccessRange in tsan_rtl_thread.cpp
-ALWAYS_INLINE USED
-void MemoryAccessImpl(ThreadState *thr, uptr addr,
- int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic,
- u64 *shadow_mem, Shadow cur) {
- if (LIKELY(ContainsSameAccess(shadow_mem, cur.raw(),
- thr->fast_synch_epoch, kAccessIsWrite))) {
- return;
- }
-
- MemoryAccessImpl1(thr, addr, kAccessSizeLog, kAccessIsWrite, kIsAtomic,
- shadow_mem, cur);
-}
-
-static void MemoryRangeSet(ThreadState *thr, uptr pc, uptr addr, uptr size,
- u64 val) {
- (void)thr;
- (void)pc;
- if (size == 0)
+NOINLINE
+void TraceSwitchPart(ThreadState* thr) {
+ if (TraceSkipGap(thr))
return;
- // FIXME: fix me.
- uptr offset = addr % kShadowCell;
- if (offset) {
- offset = kShadowCell - offset;
- if (size <= offset)
+#if !SANITIZER_GO
+ if (ctx->after_multithreaded_fork) {
+ // We just need to survive till exec.
+ TracePart* part = thr->tctx->trace.parts.Back();
+ if (part) {
+ atomic_store_relaxed(&thr->trace_pos,
+ reinterpret_cast<uptr>(&part->events[0]));
return;
- addr += offset;
- size -= offset;
- }
- DCHECK_EQ(addr % 8, 0);
- // If a user passes some insane arguments (memset(0)),
- // let it just crash as usual.
- if (!IsAppMem(addr) || !IsAppMem(addr + size - 1))
- return;
- // Don't want to touch lots of shadow memory.
- // If a program maps 10MB stack, there is no need reset the whole range.
- size = (size + (kShadowCell - 1)) & ~(kShadowCell - 1);
- // UnmapOrDie/MmapFixedNoReserve does not work on Windows.
- if (SANITIZER_WINDOWS || size < common_flags()->clear_shadow_mmap_threshold) {
- u64 *p = (u64*)MemToShadow(addr);
- CHECK(IsShadowMem((uptr)p));
- CHECK(IsShadowMem((uptr)(p + size * kShadowCnt / kShadowCell - 1)));
- // FIXME: may overwrite a part outside the region
- for (uptr i = 0; i < size / kShadowCell * kShadowCnt;) {
- p[i++] = val;
- for (uptr j = 1; j < kShadowCnt; j++)
- p[i++] = 0;
- }
- } else {
- // The region is big, reset only beginning and end.
- const uptr kPageSize = GetPageSizeCached();
- u64 *begin = (u64*)MemToShadow(addr);
- u64 *end = begin + size / kShadowCell * kShadowCnt;
- u64 *p = begin;
- // Set at least first kPageSize/2 to page boundary.
- while ((p < begin + kPageSize / kShadowSize / 2) || ((uptr)p % kPageSize)) {
- *p++ = val;
- for (uptr j = 1; j < kShadowCnt; j++)
- *p++ = 0;
- }
- // Reset middle part.
- u64 *p1 = p;
- p = RoundDown(end, kPageSize);
- if (!MmapFixedSuperNoReserve((uptr)p1, (uptr)p - (uptr)p1))
- Die();
- // Set the ending.
- while (p < end) {
- *p++ = val;
- for (uptr j = 1; j < kShadowCnt; j++)
- *p++ = 0;
}
}
+#endif
+ TraceSwitchPartImpl(thr);
}
-void MemoryResetRange(ThreadState *thr, uptr pc, uptr addr, uptr size) {
- MemoryRangeSet(thr, pc, addr, size, 0);
-}
-
-void MemoryRangeFreed(ThreadState *thr, uptr pc, uptr addr, uptr size) {
- // Processing more than 1k (4k of shadow) is expensive,
- // can cause excessive memory consumption (user does not necessary touch
- // the whole range) and most likely unnecessary.
- if (size > 1024)
- size = 1024;
- CHECK_EQ(thr->is_freeing, false);
- thr->is_freeing = true;
- MemoryAccessRange(thr, pc, addr, size, true);
- thr->is_freeing = false;
- if (kCollectHistory) {
- thr->fast_state.IncrementEpoch();
- TraceAddEvent(thr, thr->fast_state, EventTypeMop, pc);
+void TraceSwitchPartImpl(ThreadState* thr) {
+ SlotLocker locker(thr, true);
+ Trace* trace = &thr->tctx->trace;
+ TracePart* part = TracePartAlloc(thr);
+ part->trace = trace;
+ thr->trace_prev_pc = 0;
+ TracePart* recycle = nullptr;
+ // Keep roughly half of parts local to the thread
+ // (not queued into the recycle queue).
+ uptr local_parts = (Trace::kMinParts + flags()->history_size + 1) / 2;
+ {
+ Lock lock(&trace->mtx);
+ if (trace->parts.Empty())
+ trace->local_head = part;
+ if (trace->parts.Size() >= local_parts) {
+ recycle = trace->local_head;
+ trace->local_head = trace->parts.Next(recycle);
+ }
+ trace->parts.PushBack(part);
+ atomic_store_relaxed(&thr->trace_pos,
+ reinterpret_cast<uptr>(&part->events[0]));
}
- Shadow s(thr->fast_state);
- s.ClearIgnoreBit();
- s.MarkAsFreed();
- s.SetWrite(true);
- s.SetAddr0AndSizeLog(0, 3);
- MemoryRangeSet(thr, pc, addr, size, s.raw());
-}
-
-void MemoryRangeImitateWrite(ThreadState *thr, uptr pc, uptr addr, uptr size) {
- if (kCollectHistory) {
- thr->fast_state.IncrementEpoch();
- TraceAddEvent(thr, thr->fast_state, EventTypeMop, pc);
+ // Make this part self-sufficient by restoring the current stack
+ // and mutex set in the beginning of the trace.
+ TraceTime(thr);
+ {
+ // Pathologically large stacks may not fit into the part.
+ // In these cases we log only fixed number of top frames.
+ const uptr kMaxFrames = 1000;
+ // Check that kMaxFrames won't consume the whole part.
+ static_assert(kMaxFrames < TracePart::kSize / 2, "kMaxFrames is too big");
+ uptr* pos = Max(&thr->shadow_stack[0], thr->shadow_stack_pos - kMaxFrames);
+ for (; pos < thr->shadow_stack_pos; pos++) {
+ if (TryTraceFunc(thr, *pos))
+ continue;
+ CHECK(TraceSkipGap(thr));
+ CHECK(TryTraceFunc(thr, *pos));
+ }
}
- Shadow s(thr->fast_state);
- s.ClearIgnoreBit();
- s.SetWrite(true);
- s.SetAddr0AndSizeLog(0, 3);
- MemoryRangeSet(thr, pc, addr, size, s.raw());
-}
-
-void MemoryRangeImitateWriteOrResetRange(ThreadState *thr, uptr pc, uptr addr,
- uptr size) {
- if (thr->ignore_reads_and_writes == 0)
- MemoryRangeImitateWrite(thr, pc, addr, size);
- else
- MemoryResetRange(thr, pc, addr, size);
-}
-
-ALWAYS_INLINE USED
-void FuncEntry(ThreadState *thr, uptr pc) {
- DPrintf2("#%d: FuncEntry %p\n", (int)thr->fast_state.tid(), (void*)pc);
- if (kCollectHistory) {
- thr->fast_state.IncrementEpoch();
- TraceAddEvent(thr, thr->fast_state, EventTypeFuncEnter, pc);
+ for (uptr i = 0; i < thr->mset.Size(); i++) {
+ MutexSet::Desc d = thr->mset.Get(i);
+ for (uptr i = 0; i < d.count; i++)
+ TraceMutexLock(thr, d.write ? EventType::kLock : EventType::kRLock, 0,
+ d.addr, d.stack_id);
}
-
- // Shadow stack maintenance can be replaced with
- // stack unwinding during trace switch (which presumably must be faster).
- DCHECK_GE(thr->shadow_stack_pos, thr->shadow_stack);
-#if !SANITIZER_GO
- DCHECK_LT(thr->shadow_stack_pos, thr->shadow_stack_end);
-#else
- if (thr->shadow_stack_pos == thr->shadow_stack_end)
- GrowShadowStack(thr);
-#endif
- thr->shadow_stack_pos[0] = pc;
- thr->shadow_stack_pos++;
-}
-
-ALWAYS_INLINE USED
-void FuncExit(ThreadState *thr) {
- DPrintf2("#%d: FuncExit\n", (int)thr->fast_state.tid());
- if (kCollectHistory) {
- thr->fast_state.IncrementEpoch();
- TraceAddEvent(thr, thr->fast_state, EventTypeFuncExit, 0);
+ // Callers of TraceSwitchPart expect that TraceAcquire will always succeed
+ // after the call. It's possible that TryTraceFunc/TraceMutexLock above
+ // filled the trace part exactly up to the TracePart::kAlignment gap
+ // and the next TraceAcquire won't succeed. Skip the gap to avoid that.
+ EventFunc *ev;
+ if (!TraceAcquire(thr, &ev)) {
+ CHECK(TraceSkipGap(thr));
+ CHECK(TraceAcquire(thr, &ev));
}
-
- DCHECK_GT(thr->shadow_stack_pos, thr->shadow_stack);
-#if !SANITIZER_GO
- DCHECK_LT(thr->shadow_stack_pos, thr->shadow_stack_end);
-#endif
- thr->shadow_stack_pos--;
+ {
+ Lock lock(&ctx->slot_mtx);
+ // There is a small chance that the slot may be not queued at this point.
+ // This can happen if the slot has kEpochLast epoch and another thread
+ // in FindSlotAndLock discovered that it's exhausted and removed it from
+ // the slot queue. kEpochLast can happen in 2 cases: (1) if TraceSwitchPart
+ // was called with the slot locked and epoch already at kEpochLast,
+ // or (2) if we've acquired a new slot in SlotLock in the beginning
+ // of the function and the slot was at kEpochLast - 1, so after increment
+ // in SlotAttachAndLock it become kEpochLast.
+ if (ctx->slot_queue.Queued(thr->slot)) {
+ ctx->slot_queue.Remove(thr->slot);
+ ctx->slot_queue.PushBack(thr->slot);
+ }
+ if (recycle)
+ ctx->trace_part_recycle.PushBack(recycle);
+ }
+ DPrintf("#%d: TraceSwitchPart exit parts=%p-%p pos=0x%zx\n", thr->tid,
+ trace->parts.Front(), trace->parts.Back(),
+ atomic_load_relaxed(&thr->trace_pos));
}
-void ThreadIgnoreBegin(ThreadState *thr, uptr pc, bool save_stack) {
+void ThreadIgnoreBegin(ThreadState* thr, uptr pc) {
DPrintf("#%d: ThreadIgnoreBegin\n", thr->tid);
thr->ignore_reads_and_writes++;
CHECK_GT(thr->ignore_reads_and_writes, 0);
thr->fast_state.SetIgnoreBit();
#if !SANITIZER_GO
- if (save_stack && !ctx->after_multithreaded_fork)
+ if (pc && !ctx->after_multithreaded_fork)
thr->mop_ignore_set.Add(CurrentStackId(thr, pc));
#endif
}
-void ThreadIgnoreEnd(ThreadState *thr, uptr pc) {
+void ThreadIgnoreEnd(ThreadState *thr) {
DPrintf("#%d: ThreadIgnoreEnd\n", thr->tid);
CHECK_GT(thr->ignore_reads_and_writes, 0);
thr->ignore_reads_and_writes--;
@@ -1100,17 +1061,17 @@ uptr __tsan_testonly_shadow_stack_current_size() {
}
#endif
-void ThreadIgnoreSyncBegin(ThreadState *thr, uptr pc, bool save_stack) {
+void ThreadIgnoreSyncBegin(ThreadState *thr, uptr pc) {
DPrintf("#%d: ThreadIgnoreSyncBegin\n", thr->tid);
thr->ignore_sync++;
CHECK_GT(thr->ignore_sync, 0);
#if !SANITIZER_GO
- if (save_stack && !ctx->after_multithreaded_fork)
+ if (pc && !ctx->after_multithreaded_fork)
thr->sync_ignore_set.Add(CurrentStackId(thr, pc));
#endif
}
-void ThreadIgnoreSyncEnd(ThreadState *thr, uptr pc) {
+void ThreadIgnoreSyncEnd(ThreadState *thr) {
DPrintf("#%d: ThreadIgnoreSyncEnd\n", thr->tid);
CHECK_GT(thr->ignore_sync, 0);
thr->ignore_sync--;
@@ -1129,7 +1090,6 @@ void build_consistency_debug() {}
#else
void build_consistency_release() {}
#endif
-
} // namespace __tsan
#if SANITIZER_CHECK_DEADLOCKS
@@ -1137,23 +1097,27 @@ namespace __sanitizer {
using namespace __tsan;
MutexMeta mutex_meta[] = {
{MutexInvalid, "Invalid", {}},
- {MutexThreadRegistry, "ThreadRegistry", {}},
- {MutexTypeTrace, "Trace", {MutexLeaf}},
- {MutexTypeReport, "Report", {MutexTypeSyncVar}},
- {MutexTypeSyncVar, "SyncVar", {}},
+ {MutexThreadRegistry,
+ "ThreadRegistry",
+ {MutexTypeSlots, MutexTypeTrace, MutexTypeReport}},
+ {MutexTypeReport, "Report", {MutexTypeTrace}},
+ {MutexTypeSyncVar, "SyncVar", {MutexTypeReport, MutexTypeTrace}},
{MutexTypeAnnotations, "Annotations", {}},
- {MutexTypeAtExit, "AtExit", {MutexTypeSyncVar}},
+ {MutexTypeAtExit, "AtExit", {}},
{MutexTypeFired, "Fired", {MutexLeaf}},
{MutexTypeRacy, "Racy", {MutexLeaf}},
- {MutexTypeGlobalProc, "GlobalProc", {}},
+ {MutexTypeGlobalProc, "GlobalProc", {MutexTypeSlot, MutexTypeSlots}},
+ {MutexTypeInternalAlloc, "InternalAlloc", {MutexLeaf}},
+ {MutexTypeTrace, "Trace", {}},
+ {MutexTypeSlot,
+ "Slot",
+ {MutexMulti, MutexTypeTrace, MutexTypeSyncVar, MutexThreadRegistry,
+ MutexTypeSlots}},
+ {MutexTypeSlots, "Slots", {MutexTypeTrace, MutexTypeReport}},
{},
};
void PrintMutexPC(uptr pc) { StackTrace(&pc, 1).Print(); }
-} // namespace __sanitizer
-#endif
-#if !SANITIZER_GO
-// Must be included in this file to make sure everything is inlined.
-# include "tsan_interface_inl.h"
+} // namespace __sanitizer
#endif
diff --git a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_rtl.h b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_rtl.h
index 8567d0ade877..de4ea0bb5f48 100644
--- a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_rtl.h
+++ b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_rtl.h
@@ -34,17 +34,19 @@
#include "sanitizer_common/sanitizer_suppressions.h"
#include "sanitizer_common/sanitizer_thread_registry.h"
#include "sanitizer_common/sanitizer_vector.h"
-#include "tsan_clock.h"
#include "tsan_defs.h"
#include "tsan_flags.h"
+#include "tsan_ignoreset.h"
+#include "tsan_ilist.h"
#include "tsan_mman.h"
-#include "tsan_sync.h"
-#include "tsan_trace.h"
-#include "tsan_report.h"
-#include "tsan_platform.h"
#include "tsan_mutexset.h"
-#include "tsan_ignoreset.h"
+#include "tsan_platform.h"
+#include "tsan_report.h"
+#include "tsan_shadow.h"
#include "tsan_stack_trace.h"
+#include "tsan_sync.h"
+#include "tsan_trace.h"
+#include "tsan_vector_clock.h"
#if SANITIZER_WORDSIZE != 64
# error "ThreadSanitizer is supported only on 64-bit platforms"
@@ -54,7 +56,8 @@ namespace __tsan {
#if !SANITIZER_GO
struct MapUnmapCallback;
-#if defined(__mips64) || defined(__aarch64__) || defined(__powerpc__)
+# if defined(__mips64) || defined(__aarch64__) || defined(__loongarch__) || \
+ defined(__powerpc__) || SANITIZER_RISCV64
struct AP32 {
static const uptr kSpaceBeg = 0;
@@ -69,6 +72,11 @@ struct AP32 {
typedef SizeClassAllocator32<AP32> PrimaryAllocator;
#else
struct AP64 { // Allocator64 parameters. Deliberately using a short name.
+# if defined(__s390x__)
+ typedef MappingS390x Mapping;
+# else
+ typedef Mapping48AddressSpace Mapping;
+# endif
static const uptr kSpaceBeg = Mapping::kHeapMemBeg;
static const uptr kSpaceSize = Mapping::kHeapMemEnd - Mapping::kHeapMemBeg;
static const uptr kMetadataSize = 0;
@@ -84,240 +92,6 @@ typedef Allocator::AllocatorCache AllocatorCache;
Allocator *allocator();
#endif
-const u64 kShadowRodata = (u64)-1; // .rodata shadow marker
-
-// FastState (from most significant bit):
-// ignore : 1
-// tid : kTidBits
-// unused : -
-// history_size : 3
-// epoch : kClkBits
-class FastState {
- public:
- FastState(u64 tid, u64 epoch) {
- x_ = tid << kTidShift;
- x_ |= epoch;
- DCHECK_EQ(tid, this->tid());
- DCHECK_EQ(epoch, this->epoch());
- DCHECK_EQ(GetIgnoreBit(), false);
- }
-
- explicit FastState(u64 x)
- : x_(x) {
- }
-
- u64 raw() const {
- return x_;
- }
-
- u64 tid() const {
- u64 res = (x_ & ~kIgnoreBit) >> kTidShift;
- return res;
- }
-
- u64 TidWithIgnore() const {
- u64 res = x_ >> kTidShift;
- return res;
- }
-
- u64 epoch() const {
- u64 res = x_ & ((1ull << kClkBits) - 1);
- return res;
- }
-
- void IncrementEpoch() {
- u64 old_epoch = epoch();
- x_ += 1;
- DCHECK_EQ(old_epoch + 1, epoch());
- (void)old_epoch;
- }
-
- void SetIgnoreBit() { x_ |= kIgnoreBit; }
- void ClearIgnoreBit() { x_ &= ~kIgnoreBit; }
- bool GetIgnoreBit() const { return (s64)x_ < 0; }
-
- void SetHistorySize(int hs) {
- CHECK_GE(hs, 0);
- CHECK_LE(hs, 7);
- x_ = (x_ & ~(kHistoryMask << kHistoryShift)) | (u64(hs) << kHistoryShift);
- }
-
- ALWAYS_INLINE
- int GetHistorySize() const {
- return (int)((x_ >> kHistoryShift) & kHistoryMask);
- }
-
- void ClearHistorySize() {
- SetHistorySize(0);
- }
-
- ALWAYS_INLINE
- u64 GetTracePos() const {
- const int hs = GetHistorySize();
- // When hs == 0, the trace consists of 2 parts.
- const u64 mask = (1ull << (kTracePartSizeBits + hs + 1)) - 1;
- return epoch() & mask;
- }
-
- private:
- friend class Shadow;
- static const int kTidShift = 64 - kTidBits - 1;
- static const u64 kIgnoreBit = 1ull << 63;
- static const u64 kFreedBit = 1ull << 63;
- static const u64 kHistoryShift = kClkBits;
- static const u64 kHistoryMask = 7;
- u64 x_;
-};
-
-// Shadow (from most significant bit):
-// freed : 1
-// tid : kTidBits
-// is_atomic : 1
-// is_read : 1
-// size_log : 2
-// addr0 : 3
-// epoch : kClkBits
-class Shadow : public FastState {
- public:
- explicit Shadow(u64 x)
- : FastState(x) {
- }
-
- explicit Shadow(const FastState &s)
- : FastState(s.x_) {
- ClearHistorySize();
- }
-
- void SetAddr0AndSizeLog(u64 addr0, unsigned kAccessSizeLog) {
- DCHECK_EQ((x_ >> kClkBits) & 31, 0);
- DCHECK_LE(addr0, 7);
- DCHECK_LE(kAccessSizeLog, 3);
- x_ |= ((kAccessSizeLog << 3) | addr0) << kClkBits;
- DCHECK_EQ(kAccessSizeLog, size_log());
- DCHECK_EQ(addr0, this->addr0());
- }
-
- void SetWrite(unsigned kAccessIsWrite) {
- DCHECK_EQ(x_ & kReadBit, 0);
- if (!kAccessIsWrite)
- x_ |= kReadBit;
- DCHECK_EQ(kAccessIsWrite, IsWrite());
- }
-
- void SetAtomic(bool kIsAtomic) {
- DCHECK(!IsAtomic());
- if (kIsAtomic)
- x_ |= kAtomicBit;
- DCHECK_EQ(IsAtomic(), kIsAtomic);
- }
-
- bool IsAtomic() const {
- return x_ & kAtomicBit;
- }
-
- bool IsZero() const {
- return x_ == 0;
- }
-
- static inline bool TidsAreEqual(const Shadow s1, const Shadow s2) {
- u64 shifted_xor = (s1.x_ ^ s2.x_) >> kTidShift;
- DCHECK_EQ(shifted_xor == 0, s1.TidWithIgnore() == s2.TidWithIgnore());
- return shifted_xor == 0;
- }
-
- static ALWAYS_INLINE
- bool Addr0AndSizeAreEqual(const Shadow s1, const Shadow s2) {
- u64 masked_xor = ((s1.x_ ^ s2.x_) >> kClkBits) & 31;
- return masked_xor == 0;
- }
-
- static ALWAYS_INLINE bool TwoRangesIntersect(Shadow s1, Shadow s2,
- unsigned kS2AccessSize) {
- bool res = false;
- u64 diff = s1.addr0() - s2.addr0();
- if ((s64)diff < 0) { // s1.addr0 < s2.addr0
- // if (s1.addr0() + size1) > s2.addr0()) return true;
- if (s1.size() > -diff)
- res = true;
- } else {
- // if (s2.addr0() + kS2AccessSize > s1.addr0()) return true;
- if (kS2AccessSize > diff)
- res = true;
- }
- DCHECK_EQ(res, TwoRangesIntersectSlow(s1, s2));
- DCHECK_EQ(res, TwoRangesIntersectSlow(s2, s1));
- return res;
- }
-
- u64 ALWAYS_INLINE addr0() const { return (x_ >> kClkBits) & 7; }
- u64 ALWAYS_INLINE size() const { return 1ull << size_log(); }
- bool ALWAYS_INLINE IsWrite() const { return !IsRead(); }
- bool ALWAYS_INLINE IsRead() const { return x_ & kReadBit; }
-
- // The idea behind the freed bit is as follows.
- // When the memory is freed (or otherwise unaccessible) we write to the shadow
- // values with tid/epoch related to the free and the freed bit set.
- // During memory accesses processing the freed bit is considered
- // as msb of tid. So any access races with shadow with freed bit set
- // (it is as if write from a thread with which we never synchronized before).
- // This allows us to detect accesses to freed memory w/o additional
- // overheads in memory access processing and at the same time restore
- // tid/epoch of free.
- void MarkAsFreed() {
- x_ |= kFreedBit;
- }
-
- bool IsFreed() const {
- return x_ & kFreedBit;
- }
-
- bool GetFreedAndReset() {
- bool res = x_ & kFreedBit;
- x_ &= ~kFreedBit;
- return res;
- }
-
- bool ALWAYS_INLINE IsBothReadsOrAtomic(bool kIsWrite, bool kIsAtomic) const {
- bool v = x_ & ((u64(kIsWrite ^ 1) << kReadShift)
- | (u64(kIsAtomic) << kAtomicShift));
- DCHECK_EQ(v, (!IsWrite() && !kIsWrite) || (IsAtomic() && kIsAtomic));
- return v;
- }
-
- bool ALWAYS_INLINE IsRWNotWeaker(bool kIsWrite, bool kIsAtomic) const {
- bool v = ((x_ >> kReadShift) & 3)
- <= u64((kIsWrite ^ 1) | (kIsAtomic << 1));
- DCHECK_EQ(v, (IsAtomic() < kIsAtomic) ||
- (IsAtomic() == kIsAtomic && !IsWrite() <= !kIsWrite));
- return v;
- }
-
- bool ALWAYS_INLINE IsRWWeakerOrEqual(bool kIsWrite, bool kIsAtomic) const {
- bool v = ((x_ >> kReadShift) & 3)
- >= u64((kIsWrite ^ 1) | (kIsAtomic << 1));
- DCHECK_EQ(v, (IsAtomic() > kIsAtomic) ||
- (IsAtomic() == kIsAtomic && !IsWrite() >= !kIsWrite));
- return v;
- }
-
- private:
- static const u64 kReadShift = 5 + kClkBits;
- static const u64 kReadBit = 1ull << kReadShift;
- static const u64 kAtomicShift = 6 + kClkBits;
- static const u64 kAtomicBit = 1ull << kAtomicShift;
-
- u64 size_log() const { return (x_ >> (3 + kClkBits)) & 3; }
-
- static bool TwoRangesIntersectSlow(const Shadow s1, const Shadow s2) {
- if (s1.addr0() == s2.addr0()) return true;
- if (s1.addr0() < s2.addr0() && s1.addr0() + s1.size() > s2.addr0())
- return true;
- if (s2.addr0() < s1.addr0() && s2.addr0() + s2.size() > s1.addr0())
- return true;
- return false;
- }
-};
-
struct ThreadSignalContext;
struct JmpBuf {
@@ -344,7 +118,6 @@ struct Processor {
#endif
DenseSlabAllocCache block_cache;
DenseSlabAllocCache sync_cache;
- DenseSlabAllocCache clock_cache;
DDPhysicalThread *dd_pt;
};
@@ -358,64 +131,86 @@ struct ScopedGlobalProcessor {
};
#endif
+struct TidEpoch {
+ Tid tid;
+ Epoch epoch;
+};
+
+struct TidSlot {
+ Mutex mtx;
+ Sid sid;
+ atomic_uint32_t raw_epoch;
+ ThreadState *thr;
+ Vector<TidEpoch> journal;
+ INode node;
+
+ Epoch epoch() const {
+ return static_cast<Epoch>(atomic_load(&raw_epoch, memory_order_relaxed));
+ }
+
+ void SetEpoch(Epoch v) {
+ atomic_store(&raw_epoch, static_cast<u32>(v), memory_order_relaxed);
+ }
+
+ TidSlot();
+} ALIGNED(SANITIZER_CACHE_LINE_SIZE);
+
// This struct is stored in TLS.
struct ThreadState {
FastState fast_state;
- // Synch epoch represents the threads's epoch before the last synchronization
- // action. It allows to reduce number of shadow state updates.
- // For example, fast_synch_epoch=100, last write to addr X was at epoch=150,
- // if we are processing write to X from the same thread at epoch=200,
- // we do nothing, because both writes happen in the same 'synch epoch'.
- // That is, if another memory access does not race with the former write,
- // it does not race with the latter as well.
- // QUESTION: can we can squeeze this into ThreadState::Fast?
- // E.g. ThreadState::Fast is a 44-bit, 32 are taken by synch_epoch and 12 are
- // taken by epoch between synchs.
- // This way we can save one load from tls.
- u64 fast_synch_epoch;
+ int ignore_sync;
+#if !SANITIZER_GO
+ int ignore_interceptors;
+#endif
+ uptr *shadow_stack_pos;
+
+ // Current position in tctx->trace.Back()->events (Event*).
+ atomic_uintptr_t trace_pos;
+ // PC of the last memory access, used to compute PC deltas in the trace.
+ uptr trace_prev_pc;
+
// Technically `current` should be a separate THREADLOCAL variable;
// but it is placed here in order to share cache line with previous fields.
ThreadState* current;
+
+ atomic_sint32_t pending_signals;
+
+ VectorClock clock;
+
// This is a slow path flag. On fast path, fast_state.GetIgnoreBit() is read.
// We do not distinguish beteween ignoring reads and writes
// for better performance.
int ignore_reads_and_writes;
- int ignore_sync;
int suppress_reports;
// Go does not support ignores.
#if !SANITIZER_GO
IgnoreSet mop_ignore_set;
IgnoreSet sync_ignore_set;
#endif
- // C/C++ uses fixed size shadow stack embed into Trace.
- // Go uses malloc-allocated shadow stack with dynamic size.
uptr *shadow_stack;
uptr *shadow_stack_end;
- uptr *shadow_stack_pos;
- u64 *racy_shadow_addr;
- u64 racy_state[2];
- MutexSet mset;
- ThreadClock clock;
#if !SANITIZER_GO
Vector<JmpBuf> jmp_bufs;
- int ignore_interceptors;
-#endif
- const u32 tid;
- const int unique_id;
- bool in_symbolizer;
+ int in_symbolizer;
+ atomic_uintptr_t in_blocking_func;
bool in_ignored_lib;
bool is_inited;
+#endif
+ MutexSet mset;
bool is_dead;
- bool is_freeing;
- bool is_vptr_access;
- const uptr stk_addr;
- const uptr stk_size;
- const uptr tls_addr;
- const uptr tls_size;
+ const Tid tid;
+ uptr stk_addr;
+ uptr stk_size;
+ uptr tls_addr;
+ uptr tls_size;
ThreadContext *tctx;
DDLogicalThread *dd_lt;
+ TidSlot *slot;
+ uptr slot_epoch;
+ bool slot_locked;
+
// Current wired Processor, or nullptr. Required to handle any events.
Processor *proc1;
#if !SANITIZER_GO
@@ -425,11 +220,11 @@ struct ThreadState {
#endif
atomic_uintptr_t in_signal_handler;
- ThreadSignalContext *signal_ctx;
+ atomic_uintptr_t signal_ctx;
#if !SANITIZER_GO
- u32 last_sleep_stack_id;
- ThreadClock last_sleep_clock;
+ StackID last_sleep_stack_id;
+ VectorClock last_sleep_clock;
#endif
// Set in regions of runtime that must be signal-safe and fork-safe.
@@ -438,47 +233,43 @@ struct ThreadState {
const ReportDesc *current_report;
- explicit ThreadState(Context *ctx, u32 tid, int unique_id, u64 epoch,
- unsigned reuse_count, uptr stk_addr, uptr stk_size,
- uptr tls_addr, uptr tls_size);
-};
+ explicit ThreadState(Tid tid);
+} ALIGNED(SANITIZER_CACHE_LINE_SIZE);
#if !SANITIZER_GO
-#if SANITIZER_MAC || SANITIZER_ANDROID
+#if SANITIZER_APPLE || SANITIZER_ANDROID
ThreadState *cur_thread();
void set_cur_thread(ThreadState *thr);
void cur_thread_finalize();
-inline void cur_thread_init() { }
-#else
+inline ThreadState *cur_thread_init() { return cur_thread(); }
+# else
__attribute__((tls_model("initial-exec")))
extern THREADLOCAL char cur_thread_placeholder[];
inline ThreadState *cur_thread() {
return reinterpret_cast<ThreadState *>(cur_thread_placeholder)->current;
}
-inline void cur_thread_init() {
+inline ThreadState *cur_thread_init() {
ThreadState *thr = reinterpret_cast<ThreadState *>(cur_thread_placeholder);
if (UNLIKELY(!thr->current))
thr->current = thr;
+ return thr->current;
}
inline void set_cur_thread(ThreadState *thr) {
reinterpret_cast<ThreadState *>(cur_thread_placeholder)->current = thr;
}
inline void cur_thread_finalize() { }
-#endif // SANITIZER_MAC || SANITIZER_ANDROID
+# endif // SANITIZER_APPLE || SANITIZER_ANDROID
#endif // SANITIZER_GO
class ThreadContext final : public ThreadContextBase {
public:
- explicit ThreadContext(int tid);
+ explicit ThreadContext(Tid tid);
~ThreadContext();
ThreadState *thr;
- u32 creation_stack_id;
- SyncClock sync;
- // Epoch at which the thread had started.
- // If we see an event from the thread stamped by an older epoch,
- // the event is from a dead thread that shared tid with this thread.
- u64 epoch0;
- u64 epoch1;
+ StackID creation_stack_id;
+ VectorClock *sync;
+ uptr sync_epoch;
+ Trace trace;
// Override superclass callbacks.
void OnDead() override;
@@ -492,13 +283,7 @@ class ThreadContext final : public ThreadContextBase {
struct RacyStacks {
MD5Hash hash[2];
- bool operator==(const RacyStacks &other) const {
- if (hash[0] == other.hash[0] && hash[1] == other.hash[1])
- return true;
- if (hash[0] == other.hash[1] && hash[1] == other.hash[0])
- return true;
- return false;
- }
+ bool operator==(const RacyStacks &other) const;
};
struct RacyAddress {
@@ -524,28 +309,75 @@ struct Context {
Mutex report_mtx;
int nreported;
- int nmissed_expected;
atomic_uint64_t last_symbolize_time_ns;
void *background_thread;
atomic_uint32_t stop_background_thread;
- ThreadRegistry *thread_registry;
+ ThreadRegistry thread_registry;
+
+ // This is used to prevent a very unlikely but very pathological behavior.
+ // Since memory access handling is not synchronized with DoReset,
+ // a thread running concurrently with DoReset can leave a bogus shadow value
+ // that will be later falsely detected as a race. For such false races
+ // RestoreStack will return false and we will not report it.
+ // However, consider that a thread leaves a whole lot of such bogus values
+ // and these values are later read by a whole lot of threads.
+ // This will cause massive amounts of ReportRace calls and lots of
+ // serialization. In very pathological cases the resulting slowdown
+ // can be >100x. This is very unlikely, but it was presumably observed
+ // in practice: https://github.com/google/sanitizers/issues/1552
+ // If this happens, previous access sid+epoch will be the same for all of
+ // these false races b/c if the thread will try to increment epoch, it will
+ // notice that DoReset has happened and will stop producing bogus shadow
+ // values. So, last_spurious_race is used to remember the last sid+epoch
+ // for which RestoreStack returned false. Then it is used to filter out
+ // races with the same sid+epoch very early and quickly.
+ // It is of course possible that multiple threads left multiple bogus shadow
+ // values and all of them are read by lots of threads at the same time.
+ // In such case last_spurious_race will only be able to deduplicate a few
+ // races from one thread, then few from another and so on. An alternative
+ // would be to hold an array of such sid+epoch, but we consider such scenario
+ // as even less likely.
+ // Note: this can lead to some rare false negatives as well:
+ // 1. When a legit access with the same sid+epoch participates in a race
+ // as the "previous" memory access, it will be wrongly filtered out.
+ // 2. When RestoreStack returns false for a legit memory access because it
+ // was already evicted from the thread trace, we will still remember it in
+ // last_spurious_race. Then if there is another racing memory access from
+ // the same thread that happened in the same epoch, but was stored in the
+ // next thread trace part (which is still preserved in the thread trace),
+ // we will also wrongly filter it out while RestoreStack would actually
+ // succeed for that second memory access.
+ RawShadow last_spurious_race;
Mutex racy_mtx;
Vector<RacyStacks> racy_stacks;
- Vector<RacyAddress> racy_addresses;
// Number of fired suppressions may be large enough.
Mutex fired_suppressions_mtx;
InternalMmapVector<FiredSuppression> fired_suppressions;
DDetector *dd;
- ClockAlloc clock_alloc;
-
Flags flags;
-
- u64 int_alloc_cnt[MBlockTypeCount];
- u64 int_alloc_siz[MBlockTypeCount];
+ fd_t memprof_fd;
+
+ // The last slot index (kFreeSid) is used to denote freed memory.
+ TidSlot slots[kThreadSlotCount - 1];
+
+ // Protects global_epoch, slot_queue, trace_part_recycle.
+ Mutex slot_mtx;
+ uptr global_epoch; // guarded by slot_mtx and by all slot mutexes
+ bool resetting; // global reset is in progress
+ IList<TidSlot, &TidSlot::node> slot_queue SANITIZER_GUARDED_BY(slot_mtx);
+ IList<TraceHeader, &TraceHeader::global, TracePart> trace_part_recycle
+ SANITIZER_GUARDED_BY(slot_mtx);
+ uptr trace_part_total_allocated SANITIZER_GUARDED_BY(slot_mtx);
+ uptr trace_part_recycle_finished SANITIZER_GUARDED_BY(slot_mtx);
+ uptr trace_part_finished_excess SANITIZER_GUARDED_BY(slot_mtx);
+#if SANITIZER_GO
+ uptr mapped_shadow_begin;
+ uptr mapped_shadow_end;
+#endif
};
extern Context *ctx; // The one and the only global runtime context.
@@ -574,17 +406,17 @@ uptr TagFromShadowStackFrame(uptr pc);
class ScopedReportBase {
public:
- void AddMemoryAccess(uptr addr, uptr external_tag, Shadow s, StackTrace stack,
- const MutexSet *mset);
+ void AddMemoryAccess(uptr addr, uptr external_tag, Shadow s, Tid tid,
+ StackTrace stack, const MutexSet *mset);
void AddStack(StackTrace stack, bool suppressable = false);
void AddThread(const ThreadContext *tctx, bool suppressable = false);
- void AddThread(int unique_tid, bool suppressable = false);
- void AddUniqueTid(int unique_tid);
- void AddMutex(const SyncVar *s);
- u64 AddMutex(u64 id);
+ void AddThread(Tid tid, bool suppressable = false);
+ void AddUniqueTid(Tid unique_tid);
+ int AddMutex(uptr addr, StackID creation_stack_id);
void AddLocation(uptr addr, uptr size);
- void AddSleep(u32 stack_id);
+ void AddSleep(StackID stack_id);
void SetCount(int count);
+ void SetSigNum(int sig);
const ReportDesc *GetReport() const;
@@ -598,8 +430,6 @@ class ScopedReportBase {
// at best it will cause deadlocks on internal mutexes.
ScopedIgnoreInterceptors ignore_interceptors_;
- void AddDeadMutex(u64 id);
-
ScopedReportBase(const ScopedReportBase &) = delete;
void operator=(const ScopedReportBase &) = delete;
};
@@ -615,8 +445,6 @@ class ScopedReport : public ScopedReportBase {
bool ShouldReport(ThreadState *thr, ReportType typ);
ThreadContext *IsThreadStackOrTls(uptr addr, bool *is_stack);
-void RestoreStack(int tid, const u64 epoch, VarSizeStackTrace *stk,
- MutexSet *mset, uptr *tag = nullptr);
// The stack could look like:
// <start> | <main> | <foo> | tag | <bar>
@@ -656,19 +484,20 @@ void MapThreadTrace(uptr addr, uptr size, const char *name);
void DontNeedShadowFor(uptr addr, uptr size);
void UnmapShadow(ThreadState *thr, uptr addr, uptr size);
void InitializeShadowMemory();
+void DontDumpShadow(uptr addr, uptr size);
void InitializeInterceptors();
void InitializeLibIgnore();
void InitializeDynamicAnnotations();
void ForkBefore(ThreadState *thr, uptr pc);
void ForkParentAfter(ThreadState *thr, uptr pc);
-void ForkChildAfter(ThreadState *thr, uptr pc);
+void ForkChildAfter(ThreadState *thr, uptr pc, bool start_thread);
-void ReportRace(ThreadState *thr);
+void ReportRace(ThreadState *thr, RawShadow *shadow_mem, Shadow cur, Shadow old,
+ AccessType typ);
bool OutputReport(ThreadState *thr, const ScopedReport &srep);
bool IsFiredSuppression(Context *ctx, ReportType type, StackTrace trace);
bool IsExpectedReport(uptr addr, uptr size);
-void PrintMatchedBenignRaces();
#if defined(TSAN_DEBUG_OUTPUT) && TSAN_DEBUG_OUTPUT >= 1
# define DPrintf Printf
@@ -682,10 +511,11 @@ void PrintMatchedBenignRaces();
# define DPrintf2(...)
#endif
-u32 CurrentStackId(ThreadState *thr, uptr pc);
-ReportStack *SymbolizeStackId(u32 stack_id);
+StackID CurrentStackId(ThreadState *thr, uptr pc);
+ReportStack *SymbolizeStackId(StackID stack_id);
void PrintCurrentStack(ThreadState *thr, uptr pc);
void PrintCurrentStackSlow(uptr pc); // uses libunwind
+MBlock *JavaHeapBlock(uptr addr, uptr *start);
void Initialize(ThreadState *thr);
void MaybeSpawnBackgroundThread();
@@ -694,69 +524,49 @@ int Finalize(ThreadState *thr);
void OnUserAlloc(ThreadState *thr, uptr pc, uptr p, uptr sz, bool write);
void OnUserFree(ThreadState *thr, uptr pc, uptr p, bool write);
-void MemoryAccess(ThreadState *thr, uptr pc, uptr addr,
- int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic);
-void MemoryAccessImpl(ThreadState *thr, uptr addr,
- int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic,
- u64 *shadow_mem, Shadow cur);
-void MemoryAccessRange(ThreadState *thr, uptr pc, uptr addr,
- uptr size, bool is_write);
-void MemoryAccessRangeStep(ThreadState *thr, uptr pc, uptr addr,
- uptr size, uptr step, bool is_write);
-void UnalignedMemoryAccess(ThreadState *thr, uptr pc, uptr addr,
- int size, bool kAccessIsWrite, bool kIsAtomic);
-
-const int kSizeLog1 = 0;
-const int kSizeLog2 = 1;
-const int kSizeLog4 = 2;
-const int kSizeLog8 = 3;
-
-void ALWAYS_INLINE MemoryRead(ThreadState *thr, uptr pc,
- uptr addr, int kAccessSizeLog) {
- MemoryAccess(thr, pc, addr, kAccessSizeLog, false, false);
-}
-
-void ALWAYS_INLINE MemoryWrite(ThreadState *thr, uptr pc,
- uptr addr, int kAccessSizeLog) {
- MemoryAccess(thr, pc, addr, kAccessSizeLog, true, false);
-}
-
-void ALWAYS_INLINE MemoryReadAtomic(ThreadState *thr, uptr pc,
- uptr addr, int kAccessSizeLog) {
- MemoryAccess(thr, pc, addr, kAccessSizeLog, false, true);
-}
-
-void ALWAYS_INLINE MemoryWriteAtomic(ThreadState *thr, uptr pc,
- uptr addr, int kAccessSizeLog) {
- MemoryAccess(thr, pc, addr, kAccessSizeLog, true, true);
+void MemoryAccess(ThreadState *thr, uptr pc, uptr addr, uptr size,
+ AccessType typ);
+void UnalignedMemoryAccess(ThreadState *thr, uptr pc, uptr addr, uptr size,
+ AccessType typ);
+// This creates 2 non-inlined specialized versions of MemoryAccessRange.
+template <bool is_read>
+void MemoryAccessRangeT(ThreadState *thr, uptr pc, uptr addr, uptr size);
+
+ALWAYS_INLINE
+void MemoryAccessRange(ThreadState *thr, uptr pc, uptr addr, uptr size,
+ bool is_write) {
+ if (size == 0)
+ return;
+ if (is_write)
+ MemoryAccessRangeT<false>(thr, pc, addr, size);
+ else
+ MemoryAccessRangeT<true>(thr, pc, addr, size);
}
-void MemoryResetRange(ThreadState *thr, uptr pc, uptr addr, uptr size);
+void ShadowSet(RawShadow *p, RawShadow *end, RawShadow v);
void MemoryRangeFreed(ThreadState *thr, uptr pc, uptr addr, uptr size);
+void MemoryResetRange(ThreadState *thr, uptr pc, uptr addr, uptr size);
void MemoryRangeImitateWrite(ThreadState *thr, uptr pc, uptr addr, uptr size);
void MemoryRangeImitateWriteOrResetRange(ThreadState *thr, uptr pc, uptr addr,
uptr size);
-void ThreadIgnoreBegin(ThreadState *thr, uptr pc, bool save_stack = true);
-void ThreadIgnoreEnd(ThreadState *thr, uptr pc);
-void ThreadIgnoreSyncBegin(ThreadState *thr, uptr pc, bool save_stack = true);
-void ThreadIgnoreSyncEnd(ThreadState *thr, uptr pc);
-
-void FuncEntry(ThreadState *thr, uptr pc);
-void FuncExit(ThreadState *thr);
+void ThreadIgnoreBegin(ThreadState *thr, uptr pc);
+void ThreadIgnoreEnd(ThreadState *thr);
+void ThreadIgnoreSyncBegin(ThreadState *thr, uptr pc);
+void ThreadIgnoreSyncEnd(ThreadState *thr);
-int ThreadCreate(ThreadState *thr, uptr pc, uptr uid, bool detached);
-void ThreadStart(ThreadState *thr, int tid, tid_t os_id,
+Tid ThreadCreate(ThreadState *thr, uptr pc, uptr uid, bool detached);
+void ThreadStart(ThreadState *thr, Tid tid, tid_t os_id,
ThreadType thread_type);
void ThreadFinish(ThreadState *thr);
-int ThreadConsumeTid(ThreadState *thr, uptr pc, uptr uid);
-void ThreadJoin(ThreadState *thr, uptr pc, int tid);
-void ThreadDetach(ThreadState *thr, uptr pc, int tid);
+Tid ThreadConsumeTid(ThreadState *thr, uptr pc, uptr uid);
+void ThreadJoin(ThreadState *thr, uptr pc, Tid tid);
+void ThreadDetach(ThreadState *thr, uptr pc, Tid tid);
void ThreadFinalize(ThreadState *thr);
void ThreadSetName(ThreadState *thr, const char *name);
int ThreadCount(ThreadState *thr);
-void ProcessPendingSignals(ThreadState *thr);
-void ThreadNotJoined(ThreadState *thr, uptr pc, int tid, uptr uid);
+void ProcessPendingSignalsImpl(ThreadState *thr);
+void ThreadNotJoined(ThreadState *thr, uptr pc, Tid tid, uptr uid);
Processor *ProcCreate();
void ProcDestroy(Processor *proc);
@@ -785,65 +595,12 @@ void Acquire(ThreadState *thr, uptr pc, uptr addr);
// handle Go finalizers. Namely, finalizer goroutine executes AcquireGlobal
// right before executing finalizers. This provides a coarse, but simple
// approximation of the actual required synchronization.
-void AcquireGlobal(ThreadState *thr, uptr pc);
+void AcquireGlobal(ThreadState *thr);
void Release(ThreadState *thr, uptr pc, uptr addr);
void ReleaseStoreAcquire(ThreadState *thr, uptr pc, uptr addr);
void ReleaseStore(ThreadState *thr, uptr pc, uptr addr);
void AfterSleep(ThreadState *thr, uptr pc);
-void AcquireImpl(ThreadState *thr, uptr pc, SyncClock *c);
-void ReleaseImpl(ThreadState *thr, uptr pc, SyncClock *c);
-void ReleaseStoreAcquireImpl(ThreadState *thr, uptr pc, SyncClock *c);
-void ReleaseStoreImpl(ThreadState *thr, uptr pc, SyncClock *c);
-void AcquireReleaseImpl(ThreadState *thr, uptr pc, SyncClock *c);
-
-// The hacky call uses custom calling convention and an assembly thunk.
-// It is considerably faster that a normal call for the caller
-// if it is not executed (it is intended for slow paths from hot functions).
-// The trick is that the call preserves all registers and the compiler
-// does not treat it as a call.
-// If it does not work for you, use normal call.
-#if !SANITIZER_DEBUG && defined(__x86_64__) && !SANITIZER_MAC
-// The caller may not create the stack frame for itself at all,
-// so we create a reserve stack frame for it (1024b must be enough).
-#define HACKY_CALL(f) \
- __asm__ __volatile__("sub $1024, %%rsp;" \
- CFI_INL_ADJUST_CFA_OFFSET(1024) \
- ".hidden " #f "_thunk;" \
- "call " #f "_thunk;" \
- "add $1024, %%rsp;" \
- CFI_INL_ADJUST_CFA_OFFSET(-1024) \
- ::: "memory", "cc");
-#else
-#define HACKY_CALL(f) f()
-#endif
-
-void TraceSwitch(ThreadState *thr);
-uptr TraceTopPC(ThreadState *thr);
-uptr TraceSize();
-uptr TraceParts();
-Trace *ThreadTrace(int tid);
-
-extern "C" void __tsan_trace_switch();
-void ALWAYS_INLINE TraceAddEvent(ThreadState *thr, FastState fs,
- EventType typ, u64 addr) {
- if (!kCollectHistory)
- return;
- DCHECK_GE((int)typ, 0);
- DCHECK_LE((int)typ, 7);
- DCHECK_EQ(GetLsb(addr, kEventPCBits), addr);
- u64 pos = fs.GetTracePos();
- if (UNLIKELY((pos % kTracePartSize) == 0)) {
-#if !SANITIZER_GO
- HACKY_CALL(__tsan_trace_switch);
-#else
- TraceSwitch(thr);
-#endif
- }
- Event *trace = (Event*)GetThreadTrace(fs.tid());
- Event *evp = &trace[pos];
- Event ev = (u64)addr | ((u64)typ << kEventPCBits);
- *evp = ev;
-}
+void IncrementEpoch(ThreadState *thr);
#if !SANITIZER_GO
uptr ALWAYS_INLINE HeapEnd() {
@@ -851,6 +608,13 @@ uptr ALWAYS_INLINE HeapEnd() {
}
#endif
+void SlotAttachAndLock(ThreadState *thr) SANITIZER_ACQUIRE(thr->slot->mtx);
+void SlotDetach(ThreadState *thr);
+void SlotLock(ThreadState *thr) SANITIZER_ACQUIRE(thr->slot->mtx);
+void SlotUnlock(ThreadState *thr) SANITIZER_RELEASE(thr->slot->mtx);
+void DoReset(ThreadState *thr, uptr epoch);
+void FlushShadowMemory();
+
ThreadState *FiberCreate(ThreadState *thr, uptr pc, unsigned flags);
void FiberDestroy(ThreadState *thr, uptr pc, ThreadState *fiber);
void FiberSwitch(ThreadState *thr, uptr pc, ThreadState *fiber, unsigned flags);
@@ -861,6 +625,189 @@ enum FiberSwitchFlags {
FiberSwitchFlagNoSync = 1 << 0, // __tsan_switch_to_fiber_no_sync
};
+class SlotLocker {
+ public:
+ ALWAYS_INLINE
+ SlotLocker(ThreadState *thr, bool recursive = false)
+ : thr_(thr), locked_(recursive ? thr->slot_locked : false) {
+#if !SANITIZER_GO
+ // We are in trouble if we are here with in_blocking_func set.
+ // If in_blocking_func is set, all signals will be delivered synchronously,
+ // which means we can't lock slots since the signal handler will try
+ // to lock it recursively and deadlock.
+ DCHECK(!atomic_load(&thr->in_blocking_func, memory_order_relaxed));
+#endif
+ if (!locked_)
+ SlotLock(thr_);
+ }
+
+ ALWAYS_INLINE
+ ~SlotLocker() {
+ if (!locked_)
+ SlotUnlock(thr_);
+ }
+
+ private:
+ ThreadState *thr_;
+ bool locked_;
+};
+
+class SlotUnlocker {
+ public:
+ SlotUnlocker(ThreadState *thr) : thr_(thr), locked_(thr->slot_locked) {
+ if (locked_)
+ SlotUnlock(thr_);
+ }
+
+ ~SlotUnlocker() {
+ if (locked_)
+ SlotLock(thr_);
+ }
+
+ private:
+ ThreadState *thr_;
+ bool locked_;
+};
+
+ALWAYS_INLINE void ProcessPendingSignals(ThreadState *thr) {
+ if (UNLIKELY(atomic_load_relaxed(&thr->pending_signals)))
+ ProcessPendingSignalsImpl(thr);
+}
+
+extern bool is_initialized;
+
+ALWAYS_INLINE
+void LazyInitialize(ThreadState *thr) {
+ // If we can use .preinit_array, assume that __tsan_init
+ // called from .preinit_array initializes runtime before
+ // any instrumented code except when tsan is used as a
+ // shared library.
+#if (!SANITIZER_CAN_USE_PREINIT_ARRAY || defined(SANITIZER_SHARED))
+ if (UNLIKELY(!is_initialized))
+ Initialize(thr);
+#endif
+}
+
+void TraceResetForTesting();
+void TraceSwitchPart(ThreadState *thr);
+void TraceSwitchPartImpl(ThreadState *thr);
+bool RestoreStack(EventType type, Sid sid, Epoch epoch, uptr addr, uptr size,
+ AccessType typ, Tid *ptid, VarSizeStackTrace *pstk,
+ MutexSet *pmset, uptr *ptag);
+
+template <typename EventT>
+ALWAYS_INLINE WARN_UNUSED_RESULT bool TraceAcquire(ThreadState *thr,
+ EventT **ev) {
+ // TraceSwitchPart accesses shadow_stack, but it's called infrequently,
+ // so we check it here proactively.
+ DCHECK(thr->shadow_stack);
+ Event *pos = reinterpret_cast<Event *>(atomic_load_relaxed(&thr->trace_pos));
+#if SANITIZER_DEBUG
+ // TraceSwitch acquires these mutexes,
+ // so we lock them here to detect deadlocks more reliably.
+ { Lock lock(&ctx->slot_mtx); }
+ { Lock lock(&thr->tctx->trace.mtx); }
+ TracePart *current = thr->tctx->trace.parts.Back();
+ if (current) {
+ DCHECK_GE(pos, &current->events[0]);
+ DCHECK_LE(pos, &current->events[TracePart::kSize]);
+ } else {
+ DCHECK_EQ(pos, nullptr);
+ }
+#endif
+ // TracePart is allocated with mmap and is at least 4K aligned.
+ // So the following check is a faster way to check for part end.
+ // It may have false positives in the middle of the trace,
+ // they are filtered out in TraceSwitch.
+ if (UNLIKELY(((uptr)(pos + 1) & TracePart::kAlignment) == 0))
+ return false;
+ *ev = reinterpret_cast<EventT *>(pos);
+ return true;
+}
+
+template <typename EventT>
+ALWAYS_INLINE void TraceRelease(ThreadState *thr, EventT *evp) {
+ DCHECK_LE(evp + 1, &thr->tctx->trace.parts.Back()->events[TracePart::kSize]);
+ atomic_store_relaxed(&thr->trace_pos, (uptr)(evp + 1));
+}
+
+template <typename EventT>
+void TraceEvent(ThreadState *thr, EventT ev) {
+ EventT *evp;
+ if (!TraceAcquire(thr, &evp)) {
+ TraceSwitchPart(thr);
+ UNUSED bool res = TraceAcquire(thr, &evp);
+ DCHECK(res);
+ }
+ *evp = ev;
+ TraceRelease(thr, evp);
+}
+
+ALWAYS_INLINE WARN_UNUSED_RESULT bool TryTraceFunc(ThreadState *thr,
+ uptr pc = 0) {
+ if (!kCollectHistory)
+ return true;
+ EventFunc *ev;
+ if (UNLIKELY(!TraceAcquire(thr, &ev)))
+ return false;
+ ev->is_access = 0;
+ ev->is_func = 1;
+ ev->pc = pc;
+ TraceRelease(thr, ev);
+ return true;
+}
+
+WARN_UNUSED_RESULT
+bool TryTraceMemoryAccess(ThreadState *thr, uptr pc, uptr addr, uptr size,
+ AccessType typ);
+WARN_UNUSED_RESULT
+bool TryTraceMemoryAccessRange(ThreadState *thr, uptr pc, uptr addr, uptr size,
+ AccessType typ);
+void TraceMemoryAccessRange(ThreadState *thr, uptr pc, uptr addr, uptr size,
+ AccessType typ);
+void TraceFunc(ThreadState *thr, uptr pc = 0);
+void TraceMutexLock(ThreadState *thr, EventType type, uptr pc, uptr addr,
+ StackID stk);
+void TraceMutexUnlock(ThreadState *thr, uptr addr);
+void TraceTime(ThreadState *thr);
+
+void TraceRestartFuncExit(ThreadState *thr);
+void TraceRestartFuncEntry(ThreadState *thr, uptr pc);
+
+void GrowShadowStack(ThreadState *thr);
+
+ALWAYS_INLINE
+void FuncEntry(ThreadState *thr, uptr pc) {
+ DPrintf2("#%d: FuncEntry %p\n", (int)thr->fast_state.sid(), (void *)pc);
+ if (UNLIKELY(!TryTraceFunc(thr, pc)))
+ return TraceRestartFuncEntry(thr, pc);
+ DCHECK_GE(thr->shadow_stack_pos, thr->shadow_stack);
+#if !SANITIZER_GO
+ DCHECK_LT(thr->shadow_stack_pos, thr->shadow_stack_end);
+#else
+ if (thr->shadow_stack_pos == thr->shadow_stack_end)
+ GrowShadowStack(thr);
+#endif
+ thr->shadow_stack_pos[0] = pc;
+ thr->shadow_stack_pos++;
+}
+
+ALWAYS_INLINE
+void FuncExit(ThreadState *thr) {
+ DPrintf2("#%d: FuncExit\n", (int)thr->fast_state.sid());
+ if (UNLIKELY(!TryTraceFunc(thr, 0)))
+ return TraceRestartFuncExit(thr);
+ DCHECK_GT(thr->shadow_stack_pos, thr->shadow_stack);
+#if !SANITIZER_GO
+ DCHECK_LT(thr->shadow_stack_pos, thr->shadow_stack_end);
+#endif
+ thr->shadow_stack_pos--;
+}
+
+#if !SANITIZER_GO
+extern void (*on_initialize)(void);
+extern int (*on_finalize)(int);
+#endif
} // namespace __tsan
#endif // TSAN_RTL_H
diff --git a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_rtl_aarch64.S b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_rtl_aarch64.S
index e0b4c71dfed9..c6162659b876 100644
--- a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_rtl_aarch64.S
+++ b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_rtl_aarch64.S
@@ -3,28 +3,6 @@
#include "sanitizer_common/sanitizer_asm.h"
-#if defined(__APPLE__)
-.align 2
-
-.section __DATA,__nl_symbol_ptr,non_lazy_symbol_pointers
-.long _setjmp$non_lazy_ptr
-_setjmp$non_lazy_ptr:
-.indirect_symbol _setjmp
-.long 0
-
-.section __DATA,__nl_symbol_ptr,non_lazy_symbol_pointers
-.long __setjmp$non_lazy_ptr
-__setjmp$non_lazy_ptr:
-.indirect_symbol __setjmp
-.long 0
-
-.section __DATA,__nl_symbol_ptr,non_lazy_symbol_pointers
-.long _sigsetjmp$non_lazy_ptr
-_sigsetjmp$non_lazy_ptr:
-.indirect_symbol _sigsetjmp
-.long 0
-#endif
-
#if !defined(__APPLE__)
.section .text
#else
@@ -75,9 +53,8 @@ ASM_SYMBOL_INTERCEPTOR(setjmp):
ldr x1, [x1, #:got_lo12:_ZN14__interception11real_setjmpE]
ldr x1, [x1]
#else
- adrp x1, _setjmp$non_lazy_ptr@page
- add x1, x1, _setjmp$non_lazy_ptr@pageoff
- ldr x1, [x1]
+ adrp x1, _setjmp@GOTPAGE
+ ldr x1, [x1, _setjmp@GOTPAGEOFF]
#endif
br x1
@@ -126,9 +103,8 @@ ASM_SYMBOL_INTERCEPTOR(_setjmp):
ldr x1, [x1, #:got_lo12:_ZN14__interception12real__setjmpE]
ldr x1, [x1]
#else
- adrp x1, __setjmp$non_lazy_ptr@page
- add x1, x1, __setjmp$non_lazy_ptr@pageoff
- ldr x1, [x1]
+ adrp x1, __setjmp@GOTPAGE
+ ldr x1, [x1, __setjmp@GOTPAGEOFF]
#endif
br x1
@@ -179,9 +155,8 @@ ASM_SYMBOL_INTERCEPTOR(sigsetjmp):
ldr x2, [x2, #:got_lo12:_ZN14__interception14real_sigsetjmpE]
ldr x2, [x2]
#else
- adrp x2, _sigsetjmp$non_lazy_ptr@page
- add x2, x2, _sigsetjmp$non_lazy_ptr@pageoff
- ldr x2, [x2]
+ adrp x2, _sigsetjmp@GOTPAGE
+ ldr x2, [x2, _sigsetjmp@GOTPAGEOFF]
#endif
br x2
CFI_ENDPROC
diff --git a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_rtl_access.cpp b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_rtl_access.cpp
new file mode 100644
index 000000000000..8b20984a0100
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_rtl_access.cpp
@@ -0,0 +1,744 @@
+//===-- tsan_rtl_access.cpp -----------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+// Definitions of memory access and function entry/exit entry points.
+//===----------------------------------------------------------------------===//
+
+#include "tsan_rtl.h"
+
+namespace __tsan {
+
+ALWAYS_INLINE USED bool TryTraceMemoryAccess(ThreadState* thr, uptr pc,
+ uptr addr, uptr size,
+ AccessType typ) {
+ DCHECK(size == 1 || size == 2 || size == 4 || size == 8);
+ if (!kCollectHistory)
+ return true;
+ EventAccess* ev;
+ if (UNLIKELY(!TraceAcquire(thr, &ev)))
+ return false;
+ u64 size_log = size == 1 ? 0 : size == 2 ? 1 : size == 4 ? 2 : 3;
+ uptr pc_delta = pc - thr->trace_prev_pc + (1 << (EventAccess::kPCBits - 1));
+ thr->trace_prev_pc = pc;
+ if (LIKELY(pc_delta < (1 << EventAccess::kPCBits))) {
+ ev->is_access = 1;
+ ev->is_read = !!(typ & kAccessRead);
+ ev->is_atomic = !!(typ & kAccessAtomic);
+ ev->size_log = size_log;
+ ev->pc_delta = pc_delta;
+ DCHECK_EQ(ev->pc_delta, pc_delta);
+ ev->addr = CompressAddr(addr);
+ TraceRelease(thr, ev);
+ return true;
+ }
+ auto* evex = reinterpret_cast<EventAccessExt*>(ev);
+ evex->is_access = 0;
+ evex->is_func = 0;
+ evex->type = EventType::kAccessExt;
+ evex->is_read = !!(typ & kAccessRead);
+ evex->is_atomic = !!(typ & kAccessAtomic);
+ evex->size_log = size_log;
+ // Note: this is important, see comment in EventAccessExt.
+ evex->_ = 0;
+ evex->addr = CompressAddr(addr);
+ evex->pc = pc;
+ TraceRelease(thr, evex);
+ return true;
+}
+
+ALWAYS_INLINE
+bool TryTraceMemoryAccessRange(ThreadState* thr, uptr pc, uptr addr, uptr size,
+ AccessType typ) {
+ if (!kCollectHistory)
+ return true;
+ EventAccessRange* ev;
+ if (UNLIKELY(!TraceAcquire(thr, &ev)))
+ return false;
+ thr->trace_prev_pc = pc;
+ ev->is_access = 0;
+ ev->is_func = 0;
+ ev->type = EventType::kAccessRange;
+ ev->is_read = !!(typ & kAccessRead);
+ ev->is_free = !!(typ & kAccessFree);
+ ev->size_lo = size;
+ ev->pc = CompressAddr(pc);
+ ev->addr = CompressAddr(addr);
+ ev->size_hi = size >> EventAccessRange::kSizeLoBits;
+ TraceRelease(thr, ev);
+ return true;
+}
+
+void TraceMemoryAccessRange(ThreadState* thr, uptr pc, uptr addr, uptr size,
+ AccessType typ) {
+ if (LIKELY(TryTraceMemoryAccessRange(thr, pc, addr, size, typ)))
+ return;
+ TraceSwitchPart(thr);
+ UNUSED bool res = TryTraceMemoryAccessRange(thr, pc, addr, size, typ);
+ DCHECK(res);
+}
+
+void TraceFunc(ThreadState* thr, uptr pc) {
+ if (LIKELY(TryTraceFunc(thr, pc)))
+ return;
+ TraceSwitchPart(thr);
+ UNUSED bool res = TryTraceFunc(thr, pc);
+ DCHECK(res);
+}
+
+NOINLINE void TraceRestartFuncEntry(ThreadState* thr, uptr pc) {
+ TraceSwitchPart(thr);
+ FuncEntry(thr, pc);
+}
+
+NOINLINE void TraceRestartFuncExit(ThreadState* thr) {
+ TraceSwitchPart(thr);
+ FuncExit(thr);
+}
+
+void TraceMutexLock(ThreadState* thr, EventType type, uptr pc, uptr addr,
+ StackID stk) {
+ DCHECK(type == EventType::kLock || type == EventType::kRLock);
+ if (!kCollectHistory)
+ return;
+ EventLock ev;
+ ev.is_access = 0;
+ ev.is_func = 0;
+ ev.type = type;
+ ev.pc = CompressAddr(pc);
+ ev.stack_lo = stk;
+ ev.stack_hi = stk >> EventLock::kStackIDLoBits;
+ ev._ = 0;
+ ev.addr = CompressAddr(addr);
+ TraceEvent(thr, ev);
+}
+
+void TraceMutexUnlock(ThreadState* thr, uptr addr) {
+ if (!kCollectHistory)
+ return;
+ EventUnlock ev;
+ ev.is_access = 0;
+ ev.is_func = 0;
+ ev.type = EventType::kUnlock;
+ ev._ = 0;
+ ev.addr = CompressAddr(addr);
+ TraceEvent(thr, ev);
+}
+
+void TraceTime(ThreadState* thr) {
+ if (!kCollectHistory)
+ return;
+ FastState fast_state = thr->fast_state;
+ EventTime ev;
+ ev.is_access = 0;
+ ev.is_func = 0;
+ ev.type = EventType::kTime;
+ ev.sid = static_cast<u64>(fast_state.sid());
+ ev.epoch = static_cast<u64>(fast_state.epoch());
+ ev._ = 0;
+ TraceEvent(thr, ev);
+}
+
+NOINLINE void DoReportRace(ThreadState* thr, RawShadow* shadow_mem, Shadow cur,
+ Shadow old,
+ AccessType typ) SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
+ // For the free shadow markers the first element (that contains kFreeSid)
+ // triggers the race, but the second element contains info about the freeing
+ // thread, take it.
+ if (old.sid() == kFreeSid)
+ old = Shadow(LoadShadow(&shadow_mem[1]));
+ // This prevents trapping on this address in future.
+ for (uptr i = 0; i < kShadowCnt; i++)
+ StoreShadow(&shadow_mem[i], i == 0 ? Shadow::kRodata : Shadow::kEmpty);
+ // See the comment in MemoryRangeFreed as to why the slot is locked
+ // for free memory accesses. ReportRace must not be called with
+ // the slot locked because of the fork. But MemoryRangeFreed is not
+ // called during fork because fork sets ignore_reads_and_writes,
+ // so simply unlocking the slot should be fine.
+ if (typ & kAccessSlotLocked)
+ SlotUnlock(thr);
+ ReportRace(thr, shadow_mem, cur, Shadow(old), typ);
+ if (typ & kAccessSlotLocked)
+ SlotLock(thr);
+}
+
+#if !TSAN_VECTORIZE
+ALWAYS_INLINE
+bool ContainsSameAccess(RawShadow* s, Shadow cur, int unused0, int unused1,
+ AccessType typ) {
+ for (uptr i = 0; i < kShadowCnt; i++) {
+ auto old = LoadShadow(&s[i]);
+ if (!(typ & kAccessRead)) {
+ if (old == cur.raw())
+ return true;
+ continue;
+ }
+ auto masked = static_cast<RawShadow>(static_cast<u32>(old) |
+ static_cast<u32>(Shadow::kRodata));
+ if (masked == cur.raw())
+ return true;
+ if (!(typ & kAccessNoRodata) && !SANITIZER_GO) {
+ if (old == Shadow::kRodata)
+ return true;
+ }
+ }
+ return false;
+}
+
+ALWAYS_INLINE
+bool CheckRaces(ThreadState* thr, RawShadow* shadow_mem, Shadow cur,
+ int unused0, int unused1, AccessType typ) {
+ bool stored = false;
+ for (uptr idx = 0; idx < kShadowCnt; idx++) {
+ RawShadow* sp = &shadow_mem[idx];
+ Shadow old(LoadShadow(sp));
+ if (LIKELY(old.raw() == Shadow::kEmpty)) {
+ if (!(typ & kAccessCheckOnly) && !stored)
+ StoreShadow(sp, cur.raw());
+ return false;
+ }
+ if (LIKELY(!(cur.access() & old.access())))
+ continue;
+ if (LIKELY(cur.sid() == old.sid())) {
+ if (!(typ & kAccessCheckOnly) &&
+ LIKELY(cur.access() == old.access() && old.IsRWWeakerOrEqual(typ))) {
+ StoreShadow(sp, cur.raw());
+ stored = true;
+ }
+ continue;
+ }
+ if (LIKELY(old.IsBothReadsOrAtomic(typ)))
+ continue;
+ if (LIKELY(thr->clock.Get(old.sid()) >= old.epoch()))
+ continue;
+ DoReportRace(thr, shadow_mem, cur, old, typ);
+ return true;
+ }
+ // We did not find any races and had already stored
+ // the current access info, so we are done.
+ if (LIKELY(stored))
+ return false;
+ // Choose a random candidate slot and replace it.
+ uptr index =
+ atomic_load_relaxed(&thr->trace_pos) / sizeof(Event) % kShadowCnt;
+ StoreShadow(&shadow_mem[index], cur.raw());
+ return false;
+}
+
+# define LOAD_CURRENT_SHADOW(cur, shadow_mem) UNUSED int access = 0, shadow = 0
+
+#else /* !TSAN_VECTORIZE */
+
+ALWAYS_INLINE
+bool ContainsSameAccess(RawShadow* unused0, Shadow unused1, m128 shadow,
+ m128 access, AccessType typ) {
+ // Note: we could check if there is a larger access of the same type,
+ // e.g. we just allocated/memset-ed a block (so it contains 8 byte writes)
+ // and now do smaller reads/writes, these can also be considered as "same
+ // access". However, it will make the check more expensive, so it's unclear
+ // if it's worth it. But this would conserve trace space, so it's useful
+ // besides potential speed up.
+ if (!(typ & kAccessRead)) {
+ const m128 same = _mm_cmpeq_epi32(shadow, access);
+ return _mm_movemask_epi8(same);
+ }
+ // For reads we need to reset read bit in the shadow,
+ // because we need to match read with both reads and writes.
+ // Shadow::kRodata has only read bit set, so it does what we want.
+ // We also abuse it for rodata check to save few cycles
+ // since we already loaded Shadow::kRodata into a register.
+ // Reads from rodata can't race.
+ // Measurements show that they can be 10-20% of all memory accesses.
+ // Shadow::kRodata has epoch 0 which cannot appear in shadow normally
+ // (thread epochs start from 1). So the same read bit mask
+ // serves as rodata indicator.
+ const m128 read_mask = _mm_set1_epi32(static_cast<u32>(Shadow::kRodata));
+ const m128 masked_shadow = _mm_or_si128(shadow, read_mask);
+ m128 same = _mm_cmpeq_epi32(masked_shadow, access);
+ // Range memory accesses check Shadow::kRodata before calling this,
+ // Shadow::kRodatas is not possible for free memory access
+ // and Go does not use Shadow::kRodata.
+ if (!(typ & kAccessNoRodata) && !SANITIZER_GO) {
+ const m128 ro = _mm_cmpeq_epi32(shadow, read_mask);
+ same = _mm_or_si128(ro, same);
+ }
+ return _mm_movemask_epi8(same);
+}
+
+NOINLINE void DoReportRaceV(ThreadState* thr, RawShadow* shadow_mem, Shadow cur,
+ u32 race_mask, m128 shadow, AccessType typ) {
+ // race_mask points which of the shadow elements raced with the current
+ // access. Extract that element.
+ CHECK_NE(race_mask, 0);
+ u32 old;
+ // Note: _mm_extract_epi32 index must be a constant value.
+ switch (__builtin_ffs(race_mask) / 4) {
+ case 0:
+ old = _mm_extract_epi32(shadow, 0);
+ break;
+ case 1:
+ old = _mm_extract_epi32(shadow, 1);
+ break;
+ case 2:
+ old = _mm_extract_epi32(shadow, 2);
+ break;
+ case 3:
+ old = _mm_extract_epi32(shadow, 3);
+ break;
+ }
+ Shadow prev(static_cast<RawShadow>(old));
+ // For the free shadow markers the first element (that contains kFreeSid)
+ // triggers the race, but the second element contains info about the freeing
+ // thread, take it.
+ if (prev.sid() == kFreeSid)
+ prev = Shadow(static_cast<RawShadow>(_mm_extract_epi32(shadow, 1)));
+ DoReportRace(thr, shadow_mem, cur, prev, typ);
+}
+
+ALWAYS_INLINE
+bool CheckRaces(ThreadState* thr, RawShadow* shadow_mem, Shadow cur,
+ m128 shadow, m128 access, AccessType typ) {
+ // Note: empty/zero slots don't intersect with any access.
+ const m128 zero = _mm_setzero_si128();
+ const m128 mask_access = _mm_set1_epi32(0x000000ff);
+ const m128 mask_sid = _mm_set1_epi32(0x0000ff00);
+ const m128 mask_read_atomic = _mm_set1_epi32(0xc0000000);
+ const m128 access_and = _mm_and_si128(access, shadow);
+ const m128 access_xor = _mm_xor_si128(access, shadow);
+ const m128 intersect = _mm_and_si128(access_and, mask_access);
+ const m128 not_intersect = _mm_cmpeq_epi32(intersect, zero);
+ const m128 not_same_sid = _mm_and_si128(access_xor, mask_sid);
+ const m128 same_sid = _mm_cmpeq_epi32(not_same_sid, zero);
+ const m128 both_read_or_atomic = _mm_and_si128(access_and, mask_read_atomic);
+ const m128 no_race =
+ _mm_or_si128(_mm_or_si128(not_intersect, same_sid), both_read_or_atomic);
+ const int race_mask = _mm_movemask_epi8(_mm_cmpeq_epi32(no_race, zero));
+ if (UNLIKELY(race_mask))
+ goto SHARED;
+
+STORE : {
+ if (typ & kAccessCheckOnly)
+ return false;
+ // We could also replace different sid's if access is the same,
+ // rw weaker and happens before. However, just checking access below
+ // is not enough because we also need to check that !both_read_or_atomic
+ // (reads from different sids can be concurrent).
+ // Theoretically we could replace smaller accesses with larger accesses,
+ // but it's unclear if it's worth doing.
+ const m128 mask_access_sid = _mm_set1_epi32(0x0000ffff);
+ const m128 not_same_sid_access = _mm_and_si128(access_xor, mask_access_sid);
+ const m128 same_sid_access = _mm_cmpeq_epi32(not_same_sid_access, zero);
+ const m128 access_read_atomic =
+ _mm_set1_epi32((typ & (kAccessRead | kAccessAtomic)) << 30);
+ const m128 rw_weaker =
+ _mm_cmpeq_epi32(_mm_max_epu32(shadow, access_read_atomic), shadow);
+ const m128 rewrite = _mm_and_si128(same_sid_access, rw_weaker);
+ const int rewrite_mask = _mm_movemask_epi8(rewrite);
+ int index = __builtin_ffs(rewrite_mask);
+ if (UNLIKELY(index == 0)) {
+ const m128 empty = _mm_cmpeq_epi32(shadow, zero);
+ const int empty_mask = _mm_movemask_epi8(empty);
+ index = __builtin_ffs(empty_mask);
+ if (UNLIKELY(index == 0))
+ index = (atomic_load_relaxed(&thr->trace_pos) / 2) % 16;
+ }
+ StoreShadow(&shadow_mem[index / 4], cur.raw());
+ // We could zero other slots determined by rewrite_mask.
+ // That would help other threads to evict better slots,
+ // but it's unclear if it's worth it.
+ return false;
+}
+
+SHARED:
+ m128 thread_epochs = _mm_set1_epi32(0x7fffffff);
+ // Need to unwind this because _mm_extract_epi8/_mm_insert_epi32
+ // indexes must be constants.
+# define LOAD_EPOCH(idx) \
+ if (LIKELY(race_mask & (1 << (idx * 4)))) { \
+ u8 sid = _mm_extract_epi8(shadow, idx * 4 + 1); \
+ u16 epoch = static_cast<u16>(thr->clock.Get(static_cast<Sid>(sid))); \
+ thread_epochs = _mm_insert_epi32(thread_epochs, u32(epoch) << 16, idx); \
+ }
+ LOAD_EPOCH(0);
+ LOAD_EPOCH(1);
+ LOAD_EPOCH(2);
+ LOAD_EPOCH(3);
+# undef LOAD_EPOCH
+ const m128 mask_epoch = _mm_set1_epi32(0x3fff0000);
+ const m128 shadow_epochs = _mm_and_si128(shadow, mask_epoch);
+ const m128 concurrent = _mm_cmplt_epi32(thread_epochs, shadow_epochs);
+ const int concurrent_mask = _mm_movemask_epi8(concurrent);
+ if (LIKELY(concurrent_mask == 0))
+ goto STORE;
+
+ DoReportRaceV(thr, shadow_mem, cur, concurrent_mask, shadow, typ);
+ return true;
+}
+
+# define LOAD_CURRENT_SHADOW(cur, shadow_mem) \
+ const m128 access = _mm_set1_epi32(static_cast<u32>((cur).raw())); \
+ const m128 shadow = _mm_load_si128(reinterpret_cast<m128*>(shadow_mem))
+#endif
+
+char* DumpShadow(char* buf, RawShadow raw) {
+ if (raw == Shadow::kEmpty) {
+ internal_snprintf(buf, 64, "0");
+ return buf;
+ }
+ Shadow s(raw);
+ AccessType typ;
+ s.GetAccess(nullptr, nullptr, &typ);
+ internal_snprintf(buf, 64, "{tid=%u@%u access=0x%x typ=%x}",
+ static_cast<u32>(s.sid()), static_cast<u32>(s.epoch()),
+ s.access(), static_cast<u32>(typ));
+ return buf;
+}
+
+// TryTrace* and TraceRestart* functions allow to turn memory access and func
+// entry/exit callbacks into leaf functions with all associated performance
+// benefits. These hottest callbacks do only 2 slow path calls: report a race
+// and trace part switching. Race reporting is easy to turn into a tail call, we
+// just always return from the runtime after reporting a race. But trace part
+// switching is harder because it needs to be in the middle of callbacks. To
+// turn it into a tail call we immidiately return after TraceRestart* functions,
+// but TraceRestart* functions themselves recurse into the callback after
+// switching trace part. As the result the hottest callbacks contain only tail
+// calls, which effectively makes them leaf functions (can use all registers,
+// no frame setup, etc).
+NOINLINE void TraceRestartMemoryAccess(ThreadState* thr, uptr pc, uptr addr,
+ uptr size, AccessType typ) {
+ TraceSwitchPart(thr);
+ MemoryAccess(thr, pc, addr, size, typ);
+}
+
+ALWAYS_INLINE USED void MemoryAccess(ThreadState* thr, uptr pc, uptr addr,
+ uptr size, AccessType typ) {
+ RawShadow* shadow_mem = MemToShadow(addr);
+ UNUSED char memBuf[4][64];
+ DPrintf2("#%d: Access: %d@%d %p/%zd typ=0x%x {%s, %s, %s, %s}\n", thr->tid,
+ static_cast<int>(thr->fast_state.sid()),
+ static_cast<int>(thr->fast_state.epoch()), (void*)addr, size,
+ static_cast<int>(typ), DumpShadow(memBuf[0], shadow_mem[0]),
+ DumpShadow(memBuf[1], shadow_mem[1]),
+ DumpShadow(memBuf[2], shadow_mem[2]),
+ DumpShadow(memBuf[3], shadow_mem[3]));
+
+ FastState fast_state = thr->fast_state;
+ Shadow cur(fast_state, addr, size, typ);
+
+ LOAD_CURRENT_SHADOW(cur, shadow_mem);
+ if (LIKELY(ContainsSameAccess(shadow_mem, cur, shadow, access, typ)))
+ return;
+ if (UNLIKELY(fast_state.GetIgnoreBit()))
+ return;
+ if (!TryTraceMemoryAccess(thr, pc, addr, size, typ))
+ return TraceRestartMemoryAccess(thr, pc, addr, size, typ);
+ CheckRaces(thr, shadow_mem, cur, shadow, access, typ);
+}
+
+void MemoryAccess16(ThreadState* thr, uptr pc, uptr addr, AccessType typ);
+
+NOINLINE
+void RestartMemoryAccess16(ThreadState* thr, uptr pc, uptr addr,
+ AccessType typ) {
+ TraceSwitchPart(thr);
+ MemoryAccess16(thr, pc, addr, typ);
+}
+
+ALWAYS_INLINE USED void MemoryAccess16(ThreadState* thr, uptr pc, uptr addr,
+ AccessType typ) {
+ const uptr size = 16;
+ FastState fast_state = thr->fast_state;
+ if (UNLIKELY(fast_state.GetIgnoreBit()))
+ return;
+ Shadow cur(fast_state, 0, 8, typ);
+ RawShadow* shadow_mem = MemToShadow(addr);
+ bool traced = false;
+ {
+ LOAD_CURRENT_SHADOW(cur, shadow_mem);
+ if (LIKELY(ContainsSameAccess(shadow_mem, cur, shadow, access, typ)))
+ goto SECOND;
+ if (!TryTraceMemoryAccessRange(thr, pc, addr, size, typ))
+ return RestartMemoryAccess16(thr, pc, addr, typ);
+ traced = true;
+ if (UNLIKELY(CheckRaces(thr, shadow_mem, cur, shadow, access, typ)))
+ return;
+ }
+SECOND:
+ shadow_mem += kShadowCnt;
+ LOAD_CURRENT_SHADOW(cur, shadow_mem);
+ if (LIKELY(ContainsSameAccess(shadow_mem, cur, shadow, access, typ)))
+ return;
+ if (!traced && !TryTraceMemoryAccessRange(thr, pc, addr, size, typ))
+ return RestartMemoryAccess16(thr, pc, addr, typ);
+ CheckRaces(thr, shadow_mem, cur, shadow, access, typ);
+}
+
+NOINLINE
+void RestartUnalignedMemoryAccess(ThreadState* thr, uptr pc, uptr addr,
+ uptr size, AccessType typ) {
+ TraceSwitchPart(thr);
+ UnalignedMemoryAccess(thr, pc, addr, size, typ);
+}
+
+ALWAYS_INLINE USED void UnalignedMemoryAccess(ThreadState* thr, uptr pc,
+ uptr addr, uptr size,
+ AccessType typ) {
+ DCHECK_LE(size, 8);
+ FastState fast_state = thr->fast_state;
+ if (UNLIKELY(fast_state.GetIgnoreBit()))
+ return;
+ RawShadow* shadow_mem = MemToShadow(addr);
+ bool traced = false;
+ uptr size1 = Min<uptr>(size, RoundUp(addr + 1, kShadowCell) - addr);
+ {
+ Shadow cur(fast_state, addr, size1, typ);
+ LOAD_CURRENT_SHADOW(cur, shadow_mem);
+ if (LIKELY(ContainsSameAccess(shadow_mem, cur, shadow, access, typ)))
+ goto SECOND;
+ if (!TryTraceMemoryAccessRange(thr, pc, addr, size, typ))
+ return RestartUnalignedMemoryAccess(thr, pc, addr, size, typ);
+ traced = true;
+ if (UNLIKELY(CheckRaces(thr, shadow_mem, cur, shadow, access, typ)))
+ return;
+ }
+SECOND:
+ uptr size2 = size - size1;
+ if (LIKELY(size2 == 0))
+ return;
+ shadow_mem += kShadowCnt;
+ Shadow cur(fast_state, 0, size2, typ);
+ LOAD_CURRENT_SHADOW(cur, shadow_mem);
+ if (LIKELY(ContainsSameAccess(shadow_mem, cur, shadow, access, typ)))
+ return;
+ if (!traced && !TryTraceMemoryAccessRange(thr, pc, addr, size, typ))
+ return RestartUnalignedMemoryAccess(thr, pc, addr, size, typ);
+ CheckRaces(thr, shadow_mem, cur, shadow, access, typ);
+}
+
+void ShadowSet(RawShadow* p, RawShadow* end, RawShadow v) {
+ DCHECK_LE(p, end);
+ DCHECK(IsShadowMem(p));
+ DCHECK(IsShadowMem(end));
+ UNUSED const uptr kAlign = kShadowCnt * kShadowSize;
+ DCHECK_EQ(reinterpret_cast<uptr>(p) % kAlign, 0);
+ DCHECK_EQ(reinterpret_cast<uptr>(end) % kAlign, 0);
+#if !TSAN_VECTORIZE
+ for (; p < end; p += kShadowCnt) {
+ p[0] = v;
+ for (uptr i = 1; i < kShadowCnt; i++) p[i] = Shadow::kEmpty;
+ }
+#else
+ m128 vv = _mm_setr_epi32(
+ static_cast<u32>(v), static_cast<u32>(Shadow::kEmpty),
+ static_cast<u32>(Shadow::kEmpty), static_cast<u32>(Shadow::kEmpty));
+ m128* vp = reinterpret_cast<m128*>(p);
+ m128* vend = reinterpret_cast<m128*>(end);
+ for (; vp < vend; vp++) _mm_store_si128(vp, vv);
+#endif
+}
+
+static void MemoryRangeSet(uptr addr, uptr size, RawShadow val) {
+ if (size == 0)
+ return;
+ DCHECK_EQ(addr % kShadowCell, 0);
+ DCHECK_EQ(size % kShadowCell, 0);
+ // If a user passes some insane arguments (memset(0)),
+ // let it just crash as usual.
+ if (!IsAppMem(addr) || !IsAppMem(addr + size - 1))
+ return;
+ RawShadow* begin = MemToShadow(addr);
+ RawShadow* end = begin + size / kShadowCell * kShadowCnt;
+ // Don't want to touch lots of shadow memory.
+ // If a program maps 10MB stack, there is no need reset the whole range.
+ // UnmapOrDie/MmapFixedNoReserve does not work on Windows.
+ if (SANITIZER_WINDOWS ||
+ size <= common_flags()->clear_shadow_mmap_threshold) {
+ ShadowSet(begin, end, val);
+ return;
+ }
+ // The region is big, reset only beginning and end.
+ const uptr kPageSize = GetPageSizeCached();
+ // Set at least first kPageSize/2 to page boundary.
+ RawShadow* mid1 =
+ Min(end, reinterpret_cast<RawShadow*>(RoundUp(
+ reinterpret_cast<uptr>(begin) + kPageSize / 2, kPageSize)));
+ ShadowSet(begin, mid1, val);
+ // Reset middle part.
+ RawShadow* mid2 = RoundDown(end, kPageSize);
+ if (mid2 > mid1) {
+ if (!MmapFixedSuperNoReserve((uptr)mid1, (uptr)mid2 - (uptr)mid1))
+ Die();
+ }
+ // Set the ending.
+ ShadowSet(mid2, end, val);
+}
+
+void MemoryResetRange(ThreadState* thr, uptr pc, uptr addr, uptr size) {
+ uptr addr1 = RoundDown(addr, kShadowCell);
+ uptr size1 = RoundUp(size + addr - addr1, kShadowCell);
+ MemoryRangeSet(addr1, size1, Shadow::kEmpty);
+}
+
+void MemoryRangeFreed(ThreadState* thr, uptr pc, uptr addr, uptr size) {
+ // Callers must lock the slot to ensure synchronization with the reset.
+ // The problem with "freed" memory is that it's not "monotonic"
+ // with respect to bug detection: freed memory is bad to access,
+ // but then if the heap block is reallocated later, it's good to access.
+ // As the result a garbage "freed" shadow can lead to a false positive
+ // if it happens to match a real free in the thread trace,
+ // but the heap block was reallocated before the current memory access,
+ // so it's still good to access. It's not the case with data races.
+ DCHECK(thr->slot_locked);
+ DCHECK_EQ(addr % kShadowCell, 0);
+ size = RoundUp(size, kShadowCell);
+ // Processing more than 1k (2k of shadow) is expensive,
+ // can cause excessive memory consumption (user does not necessary touch
+ // the whole range) and most likely unnecessary.
+ size = Min<uptr>(size, 1024);
+ const AccessType typ = kAccessWrite | kAccessFree | kAccessSlotLocked |
+ kAccessCheckOnly | kAccessNoRodata;
+ TraceMemoryAccessRange(thr, pc, addr, size, typ);
+ RawShadow* shadow_mem = MemToShadow(addr);
+ Shadow cur(thr->fast_state, 0, kShadowCell, typ);
+#if TSAN_VECTORIZE
+ const m128 access = _mm_set1_epi32(static_cast<u32>(cur.raw()));
+ const m128 freed = _mm_setr_epi32(
+ static_cast<u32>(Shadow::FreedMarker()),
+ static_cast<u32>(Shadow::FreedInfo(cur.sid(), cur.epoch())), 0, 0);
+ for (; size; size -= kShadowCell, shadow_mem += kShadowCnt) {
+ const m128 shadow = _mm_load_si128((m128*)shadow_mem);
+ if (UNLIKELY(CheckRaces(thr, shadow_mem, cur, shadow, access, typ)))
+ return;
+ _mm_store_si128((m128*)shadow_mem, freed);
+ }
+#else
+ for (; size; size -= kShadowCell, shadow_mem += kShadowCnt) {
+ if (UNLIKELY(CheckRaces(thr, shadow_mem, cur, 0, 0, typ)))
+ return;
+ StoreShadow(&shadow_mem[0], Shadow::FreedMarker());
+ StoreShadow(&shadow_mem[1], Shadow::FreedInfo(cur.sid(), cur.epoch()));
+ StoreShadow(&shadow_mem[2], Shadow::kEmpty);
+ StoreShadow(&shadow_mem[3], Shadow::kEmpty);
+ }
+#endif
+}
+
+void MemoryRangeImitateWrite(ThreadState* thr, uptr pc, uptr addr, uptr size) {
+ DCHECK_EQ(addr % kShadowCell, 0);
+ size = RoundUp(size, kShadowCell);
+ TraceMemoryAccessRange(thr, pc, addr, size, kAccessWrite);
+ Shadow cur(thr->fast_state, 0, 8, kAccessWrite);
+ MemoryRangeSet(addr, size, cur.raw());
+}
+
+void MemoryRangeImitateWriteOrResetRange(ThreadState* thr, uptr pc, uptr addr,
+ uptr size) {
+ if (thr->ignore_reads_and_writes == 0)
+ MemoryRangeImitateWrite(thr, pc, addr, size);
+ else
+ MemoryResetRange(thr, pc, addr, size);
+}
+
+ALWAYS_INLINE
+bool MemoryAccessRangeOne(ThreadState* thr, RawShadow* shadow_mem, Shadow cur,
+ AccessType typ) {
+ LOAD_CURRENT_SHADOW(cur, shadow_mem);
+ if (LIKELY(ContainsSameAccess(shadow_mem, cur, shadow, access, typ)))
+ return false;
+ return CheckRaces(thr, shadow_mem, cur, shadow, access, typ);
+}
+
+template <bool is_read>
+NOINLINE void RestartMemoryAccessRange(ThreadState* thr, uptr pc, uptr addr,
+ uptr size) {
+ TraceSwitchPart(thr);
+ MemoryAccessRangeT<is_read>(thr, pc, addr, size);
+}
+
+template <bool is_read>
+void MemoryAccessRangeT(ThreadState* thr, uptr pc, uptr addr, uptr size) {
+ const AccessType typ =
+ (is_read ? kAccessRead : kAccessWrite) | kAccessNoRodata;
+ RawShadow* shadow_mem = MemToShadow(addr);
+ DPrintf2("#%d: MemoryAccessRange: @%p %p size=%d is_read=%d\n", thr->tid,
+ (void*)pc, (void*)addr, (int)size, is_read);
+
+#if SANITIZER_DEBUG
+ if (!IsAppMem(addr)) {
+ Printf("Access to non app mem %zx\n", addr);
+ DCHECK(IsAppMem(addr));
+ }
+ if (!IsAppMem(addr + size - 1)) {
+ Printf("Access to non app mem %zx\n", addr + size - 1);
+ DCHECK(IsAppMem(addr + size - 1));
+ }
+ if (!IsShadowMem(shadow_mem)) {
+ Printf("Bad shadow addr %p (%zx)\n", static_cast<void*>(shadow_mem), addr);
+ DCHECK(IsShadowMem(shadow_mem));
+ }
+ if (!IsShadowMem(shadow_mem + size * kShadowCnt - 1)) {
+ Printf("Bad shadow addr %p (%zx)\n",
+ static_cast<void*>(shadow_mem + size * kShadowCnt - 1),
+ addr + size - 1);
+ DCHECK(IsShadowMem(shadow_mem + size * kShadowCnt - 1));
+ }
+#endif
+
+ // Access to .rodata section, no races here.
+ // Measurements show that it can be 10-20% of all memory accesses.
+ // Check here once to not check for every access separately.
+ // Note: we could (and should) do this only for the is_read case
+ // (writes shouldn't go to .rodata). But it happens in Chromium tests:
+ // https://bugs.chromium.org/p/chromium/issues/detail?id=1275581#c19
+ // Details are unknown since it happens only on CI machines.
+ if (*shadow_mem == Shadow::kRodata)
+ return;
+
+ FastState fast_state = thr->fast_state;
+ if (UNLIKELY(fast_state.GetIgnoreBit()))
+ return;
+
+ if (!TryTraceMemoryAccessRange(thr, pc, addr, size, typ))
+ return RestartMemoryAccessRange<is_read>(thr, pc, addr, size);
+
+ if (UNLIKELY(addr % kShadowCell)) {
+ // Handle unaligned beginning, if any.
+ uptr size1 = Min(size, RoundUp(addr, kShadowCell) - addr);
+ size -= size1;
+ Shadow cur(fast_state, addr, size1, typ);
+ if (UNLIKELY(MemoryAccessRangeOne(thr, shadow_mem, cur, typ)))
+ return;
+ shadow_mem += kShadowCnt;
+ }
+ // Handle middle part, if any.
+ Shadow cur(fast_state, 0, kShadowCell, typ);
+ for (; size >= kShadowCell; size -= kShadowCell, shadow_mem += kShadowCnt) {
+ if (UNLIKELY(MemoryAccessRangeOne(thr, shadow_mem, cur, typ)))
+ return;
+ }
+ // Handle ending, if any.
+ if (UNLIKELY(size)) {
+ Shadow cur(fast_state, 0, size, typ);
+ if (UNLIKELY(MemoryAccessRangeOne(thr, shadow_mem, cur, typ)))
+ return;
+ }
+}
+
+template void MemoryAccessRangeT<true>(ThreadState* thr, uptr pc, uptr addr,
+ uptr size);
+template void MemoryAccessRangeT<false>(ThreadState* thr, uptr pc, uptr addr,
+ uptr size);
+
+} // namespace __tsan
+
+#if !SANITIZER_GO
+// Must be included in this file to make sure everything is inlined.
+# include "tsan_interface.inc"
+#endif
diff --git a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_rtl_amd64.S b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_rtl_amd64.S
index 5913aa360c5d..f848be9dd46c 100644
--- a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_rtl_amd64.S
+++ b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_rtl_amd64.S
@@ -9,166 +9,6 @@
.section __TEXT,__text
#endif
-ASM_HIDDEN(__tsan_trace_switch)
-.globl ASM_SYMBOL(__tsan_trace_switch_thunk)
-ASM_SYMBOL(__tsan_trace_switch_thunk):
- CFI_STARTPROC
- # Save scratch registers.
- push %rax
- CFI_ADJUST_CFA_OFFSET(8)
- CFI_REL_OFFSET(%rax, 0)
- push %rcx
- CFI_ADJUST_CFA_OFFSET(8)
- CFI_REL_OFFSET(%rcx, 0)
- push %rdx
- CFI_ADJUST_CFA_OFFSET(8)
- CFI_REL_OFFSET(%rdx, 0)
- push %rsi
- CFI_ADJUST_CFA_OFFSET(8)
- CFI_REL_OFFSET(%rsi, 0)
- push %rdi
- CFI_ADJUST_CFA_OFFSET(8)
- CFI_REL_OFFSET(%rdi, 0)
- push %r8
- CFI_ADJUST_CFA_OFFSET(8)
- CFI_REL_OFFSET(%r8, 0)
- push %r9
- CFI_ADJUST_CFA_OFFSET(8)
- CFI_REL_OFFSET(%r9, 0)
- push %r10
- CFI_ADJUST_CFA_OFFSET(8)
- CFI_REL_OFFSET(%r10, 0)
- push %r11
- CFI_ADJUST_CFA_OFFSET(8)
- CFI_REL_OFFSET(%r11, 0)
- # Align stack frame.
- push %rbx # non-scratch
- CFI_ADJUST_CFA_OFFSET(8)
- CFI_REL_OFFSET(%rbx, 0)
- mov %rsp, %rbx # save current rsp
- CFI_DEF_CFA_REGISTER(%rbx)
- shr $4, %rsp # clear 4 lsb, align to 16
- shl $4, %rsp
-
- call ASM_SYMBOL(__tsan_trace_switch)
-
- # Unalign stack frame back.
- mov %rbx, %rsp # restore the original rsp
- CFI_DEF_CFA_REGISTER(%rsp)
- pop %rbx
- CFI_ADJUST_CFA_OFFSET(-8)
- # Restore scratch registers.
- pop %r11
- CFI_ADJUST_CFA_OFFSET(-8)
- pop %r10
- CFI_ADJUST_CFA_OFFSET(-8)
- pop %r9
- CFI_ADJUST_CFA_OFFSET(-8)
- pop %r8
- CFI_ADJUST_CFA_OFFSET(-8)
- pop %rdi
- CFI_ADJUST_CFA_OFFSET(-8)
- pop %rsi
- CFI_ADJUST_CFA_OFFSET(-8)
- pop %rdx
- CFI_ADJUST_CFA_OFFSET(-8)
- pop %rcx
- CFI_ADJUST_CFA_OFFSET(-8)
- pop %rax
- CFI_ADJUST_CFA_OFFSET(-8)
- CFI_RESTORE(%rax)
- CFI_RESTORE(%rbx)
- CFI_RESTORE(%rcx)
- CFI_RESTORE(%rdx)
- CFI_RESTORE(%rsi)
- CFI_RESTORE(%rdi)
- CFI_RESTORE(%r8)
- CFI_RESTORE(%r9)
- CFI_RESTORE(%r10)
- CFI_RESTORE(%r11)
- ret
- CFI_ENDPROC
-
-ASM_HIDDEN(__tsan_report_race)
-.globl ASM_SYMBOL(__tsan_report_race_thunk)
-ASM_SYMBOL(__tsan_report_race_thunk):
- CFI_STARTPROC
- # Save scratch registers.
- push %rax
- CFI_ADJUST_CFA_OFFSET(8)
- CFI_REL_OFFSET(%rax, 0)
- push %rcx
- CFI_ADJUST_CFA_OFFSET(8)
- CFI_REL_OFFSET(%rcx, 0)
- push %rdx
- CFI_ADJUST_CFA_OFFSET(8)
- CFI_REL_OFFSET(%rdx, 0)
- push %rsi
- CFI_ADJUST_CFA_OFFSET(8)
- CFI_REL_OFFSET(%rsi, 0)
- push %rdi
- CFI_ADJUST_CFA_OFFSET(8)
- CFI_REL_OFFSET(%rdi, 0)
- push %r8
- CFI_ADJUST_CFA_OFFSET(8)
- CFI_REL_OFFSET(%r8, 0)
- push %r9
- CFI_ADJUST_CFA_OFFSET(8)
- CFI_REL_OFFSET(%r9, 0)
- push %r10
- CFI_ADJUST_CFA_OFFSET(8)
- CFI_REL_OFFSET(%r10, 0)
- push %r11
- CFI_ADJUST_CFA_OFFSET(8)
- CFI_REL_OFFSET(%r11, 0)
- # Align stack frame.
- push %rbx # non-scratch
- CFI_ADJUST_CFA_OFFSET(8)
- CFI_REL_OFFSET(%rbx, 0)
- mov %rsp, %rbx # save current rsp
- CFI_DEF_CFA_REGISTER(%rbx)
- shr $4, %rsp # clear 4 lsb, align to 16
- shl $4, %rsp
-
- call ASM_SYMBOL(__tsan_report_race)
-
- # Unalign stack frame back.
- mov %rbx, %rsp # restore the original rsp
- CFI_DEF_CFA_REGISTER(%rsp)
- pop %rbx
- CFI_ADJUST_CFA_OFFSET(-8)
- # Restore scratch registers.
- pop %r11
- CFI_ADJUST_CFA_OFFSET(-8)
- pop %r10
- CFI_ADJUST_CFA_OFFSET(-8)
- pop %r9
- CFI_ADJUST_CFA_OFFSET(-8)
- pop %r8
- CFI_ADJUST_CFA_OFFSET(-8)
- pop %rdi
- CFI_ADJUST_CFA_OFFSET(-8)
- pop %rsi
- CFI_ADJUST_CFA_OFFSET(-8)
- pop %rdx
- CFI_ADJUST_CFA_OFFSET(-8)
- pop %rcx
- CFI_ADJUST_CFA_OFFSET(-8)
- pop %rax
- CFI_ADJUST_CFA_OFFSET(-8)
- CFI_RESTORE(%rax)
- CFI_RESTORE(%rbx)
- CFI_RESTORE(%rcx)
- CFI_RESTORE(%rdx)
- CFI_RESTORE(%rsi)
- CFI_RESTORE(%rdi)
- CFI_RESTORE(%r8)
- CFI_RESTORE(%r9)
- CFI_RESTORE(%r10)
- CFI_RESTORE(%r11)
- ret
- CFI_ENDPROC
-
ASM_HIDDEN(__tsan_setjmp)
#if defined(__NetBSD__)
.comm _ZN14__interception15real___setjmp14E,8,8
@@ -185,6 +25,7 @@ ASM_TYPE_FUNCTION(ASM_SYMBOL_INTERCEPTOR(setjmp))
ASM_SYMBOL_INTERCEPTOR(setjmp):
#endif
CFI_STARTPROC
+ _CET_ENDBR
// save env parameter
push %rdi
CFI_ADJUST_CFA_OFFSET(8)
@@ -226,6 +67,7 @@ ASM_SIZE(ASM_SYMBOL_INTERCEPTOR(setjmp))
ASM_TYPE_FUNCTION(ASM_SYMBOL_INTERCEPTOR(_setjmp))
ASM_SYMBOL_INTERCEPTOR(_setjmp):
CFI_STARTPROC
+ _CET_ENDBR
// save env parameter
push %rdi
CFI_ADJUST_CFA_OFFSET(8)
@@ -267,6 +109,7 @@ ASM_TYPE_FUNCTION(ASM_SYMBOL_INTERCEPTOR(sigsetjmp))
ASM_SYMBOL_INTERCEPTOR(sigsetjmp):
#endif
CFI_STARTPROC
+ _CET_ENDBR
// save env parameter
push %rdi
CFI_ADJUST_CFA_OFFSET(8)
@@ -323,6 +166,7 @@ ASM_SIZE(ASM_SYMBOL_INTERCEPTOR(sigsetjmp))
ASM_TYPE_FUNCTION(ASM_SYMBOL_INTERCEPTOR(__sigsetjmp))
ASM_SYMBOL_INTERCEPTOR(__sigsetjmp):
CFI_STARTPROC
+ _CET_ENDBR
// save env parameter
push %rdi
CFI_ADJUST_CFA_OFFSET(8)
diff --git a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_rtl_loongarch64.S b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_rtl_loongarch64.S
new file mode 100644
index 000000000000..12856bd110cd
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_rtl_loongarch64.S
@@ -0,0 +1,196 @@
+#include "sanitizer_common/sanitizer_asm.h"
+
+.section .text
+
+ASM_HIDDEN(__tsan_setjmp)
+.comm _ZN14__interception11real_setjmpE,8,8
+.globl ASM_SYMBOL_INTERCEPTOR(setjmp)
+ASM_TYPE_FUNCTION(ASM_SYMBOL_INTERCEPTOR(setjmp))
+ASM_SYMBOL_INTERCEPTOR(setjmp):
+ CFI_STARTPROC
+
+ // Save frame pointer and return address register
+ addi.d $sp, $sp, -32
+ st.d $ra, $sp, 24
+ st.d $fp, $sp, 16
+ CFI_DEF_CFA_OFFSET (32)
+ CFI_OFFSET (1, -8)
+ CFI_OFFSET (22, -16)
+
+ // Adjust the SP for previous frame
+ addi.d $fp, $sp, 32
+ CFI_DEF_CFA_REGISTER (22)
+
+ // Save env parameter
+ st.d $a0, $sp, 8
+ CFI_OFFSET (4, -24)
+
+ // Obtain SP, first argument to `void __tsan_setjmp(uptr sp)`
+ addi.d $a0, $fp, 0
+
+ // call tsan interceptor
+ bl ASM_SYMBOL(__tsan_setjmp)
+
+ // Restore env parameter
+ ld.d $a0, $sp, 8
+ CFI_RESTORE (4)
+
+ // Restore frame/link register
+ ld.d $fp, $sp, 16
+ ld.d $ra, $sp, 24
+ addi.d $sp, $sp, 32
+ CFI_RESTORE (22)
+ CFI_RESTORE (1)
+ CFI_DEF_CFA (3, 0)
+
+ // tail jump to libc setjmp
+ la.local $a1, _ZN14__interception11real_setjmpE
+ ld.d $a1, $a1, 0
+ jr $a1
+
+ CFI_ENDPROC
+ASM_SIZE(ASM_SYMBOL_INTERCEPTOR(setjmp))
+
+.comm _ZN14__interception12real__setjmpE,8,8
+.globl ASM_SYMBOL_INTERCEPTOR(_setjmp)
+ASM_TYPE_FUNCTION(ASM_SYMBOL_INTERCEPTOR(_setjmp))
+ASM_SYMBOL_INTERCEPTOR(_setjmp):
+ CFI_STARTPROC
+
+ // Save frame pointer and return address register
+ addi.d $sp, $sp, -32
+ st.d $ra, $sp, 24
+ st.d $fp, $sp, 16
+ CFI_DEF_CFA_OFFSET (32)
+ CFI_OFFSET (1, -8)
+ CFI_OFFSET (22, -16)
+
+ // Adjust the SP for previous frame
+ addi.d $fp, $sp, 32
+ CFI_DEF_CFA_REGISTER (22)
+
+ // Save env parameter
+ st.d $a0, $sp, 8
+ CFI_OFFSET (4, -24)
+
+ // Obtain SP, first argument to `void __tsan_setjmp(uptr sp)`
+ addi.d $a0, $fp, 0
+
+ // call tsan interceptor
+ bl ASM_SYMBOL(__tsan_setjmp)
+
+ // Restore env parameter
+ ld.d $a0, $sp, 8
+ CFI_RESTORE (4)
+
+ // Restore frame/link register
+ ld.d $fp, $sp, 16
+ ld.d $ra, $sp, 24
+ addi.d $sp, $sp, 32
+ CFI_RESTORE (22)
+ CFI_RESTORE (1)
+ CFI_DEF_CFA (3, 0)
+
+ // tail jump to libc setjmp
+ la.local $a1, _ZN14__interception12real__setjmpE
+ ld.d $a1, $a1, 0
+ jr $a1
+
+ CFI_ENDPROC
+ASM_SIZE(ASM_SYMBOL_INTERCEPTOR(_setjmp))
+
+.comm _ZN14__interception14real_sigsetjmpE,8,8
+.globl ASM_SYMBOL_INTERCEPTOR(sigsetjmp)
+ASM_TYPE_FUNCTION(ASM_SYMBOL_INTERCEPTOR(sigsetjmp))
+ASM_SYMBOL_INTERCEPTOR(sigsetjmp):
+ CFI_STARTPROC
+
+ // Save frame pointer and return address register
+ addi.d $sp, $sp, -32
+ st.d $ra, $sp, 24
+ st.d $fp, $sp, 16
+ CFI_DEF_CFA_OFFSET (32)
+ CFI_OFFSET (1, -8)
+ CFI_OFFSET (22, -16)
+
+ // Adjust the SP for previous frame
+ addi.d $fp, $sp, 32
+ CFI_DEF_CFA_REGISTER (22)
+
+ // Save env parameter
+ st.d $a0, $sp, 8
+ CFI_OFFSET (4, -24)
+
+ // Obtain SP, first argument to `void __tsan_setjmp(uptr sp)`
+ addi.d $a0, $fp, 0
+
+ // call tsan interceptor
+ bl ASM_SYMBOL(__tsan_setjmp)
+
+ // Restore env parameter
+ ld.d $a0, $sp, 8
+ CFI_RESTORE (4)
+
+ // Restore frame/link register
+ ld.d $fp, $sp, 16
+ ld.d $ra, $sp, 24
+ addi.d $sp, $sp, 32
+ CFI_RESTORE (22)
+ CFI_RESTORE (1)
+ CFI_DEF_CFA (3, 0)
+
+ // tail jump to libc setjmp
+ la.local $a1, _ZN14__interception14real_sigsetjmpE
+ ld.d $a1, $a1, 0
+ jr $a1
+
+ CFI_ENDPROC
+ASM_SIZE(ASM_SYMBOL_INTERCEPTOR(sigsetjmp))
+
+.comm _ZN14__interception16real___sigsetjmpE,8,8
+.globl ASM_SYMBOL_INTERCEPTOR(__sigsetjmp)
+ASM_TYPE_FUNCTION(ASM_SYMBOL_INTERCEPTOR(__sigsetjmp))
+ASM_SYMBOL_INTERCEPTOR(__sigsetjmp):
+ CFI_STARTPROC
+
+ // Save frame pointer and return address register
+ addi.d $sp, $sp, -32
+ st.d $ra, $sp, 24
+ st.d $fp, $sp, 16
+ CFI_DEF_CFA_OFFSET (32)
+ CFI_OFFSET (1, -8)
+ CFI_OFFSET (22, -16)
+
+ // Adjust the SP for previous frame
+ addi.d $fp, $sp, 32
+ CFI_DEF_CFA_REGISTER (22)
+
+ // Save env parameter
+ st.d $a0, $sp, 8
+ CFI_OFFSET (4, -24)
+
+ // Obtain SP, first argument to `void __tsan_setjmp(uptr sp)`
+ addi.d $a0, $fp, 0
+
+ // call tsan interceptor
+ bl ASM_SYMBOL(__tsan_setjmp)
+
+ // Restore env parameter
+ ld.d $a0, $sp, 8
+ CFI_RESTORE (4)
+
+ // Restore frame/link register
+ ld.d $fp, $sp, 16
+ ld.d $ra, $sp, 24
+ addi.d $sp, $sp, 32
+ CFI_RESTORE (22)
+ CFI_RESTORE (1)
+ CFI_DEF_CFA (3, 0)
+
+ // tail jump to libc setjmp
+ la.local $a1, _ZN14__interception16real___sigsetjmpE
+ ld.d $a1, $a1, 0
+ jr $a1
+
+ CFI_ENDPROC
+ASM_SIZE(ASM_SYMBOL_INTERCEPTOR(__sigsetjmp))
diff --git a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_rtl_mutex.cpp b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_rtl_mutex.cpp
index 27ae279d6304..2e978852ea7d 100644
--- a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_rtl_mutex.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_rtl_mutex.cpp
@@ -23,6 +23,8 @@
namespace __tsan {
void ReportDeadlock(ThreadState *thr, uptr pc, DDReport *r);
+void ReportDestroyLocked(ThreadState *thr, uptr pc, uptr addr,
+ FastState last_lock, StackID creation_stack_id);
struct Callback final : public DDCallback {
ThreadState *thr;
@@ -35,27 +37,27 @@ struct Callback final : public DDCallback {
DDCallback::lt = thr->dd_lt;
}
- u32 Unwind() override { return CurrentStackId(thr, pc); }
- int UniqueTid() override { return thr->unique_id; }
+ StackID Unwind() override { return CurrentStackId(thr, pc); }
+ int UniqueTid() override { return thr->tid; }
};
void DDMutexInit(ThreadState *thr, uptr pc, SyncVar *s) {
Callback cb(thr, pc);
ctx->dd->MutexInit(&cb, &s->dd);
- s->dd.ctx = s->GetId();
+ s->dd.ctx = s->addr;
}
static void ReportMutexMisuse(ThreadState *thr, uptr pc, ReportType typ,
- uptr addr, u64 mid) {
+ uptr addr, StackID creation_stack_id) {
// In Go, these misuses are either impossible, or detected by std lib,
// or false positives (e.g. unlock in a different thread).
if (SANITIZER_GO)
return;
if (!ShouldReport(thr, typ))
return;
- ThreadRegistryLock l(ctx->thread_registry);
+ ThreadRegistryLock l(&ctx->thread_registry);
ScopedReport rep(typ);
- rep.AddMutex(mid);
+ rep.AddMutex(addr, creation_stack_id);
VarSizeStackTrace trace;
ObtainCurrentStack(thr, pc, &trace);
rep.AddStack(trace, true);
@@ -63,185 +65,197 @@ static void ReportMutexMisuse(ThreadState *thr, uptr pc, ReportType typ,
OutputReport(thr, rep);
}
-void MutexCreate(ThreadState *thr, uptr pc, uptr addr, u32 flagz) NO_THREAD_SAFETY_ANALYSIS {
+static void RecordMutexLock(ThreadState *thr, uptr pc, uptr addr,
+ StackID stack_id, bool write) {
+ auto typ = write ? EventType::kLock : EventType::kRLock;
+ // Note: it's important to trace before modifying mutex set
+ // because tracing can switch trace part and we write the current
+ // mutex set in the beginning of each part.
+ // If we do it in the opposite order, we will write already reduced
+ // mutex set in the beginning of the part and then trace unlock again.
+ TraceMutexLock(thr, typ, pc, addr, stack_id);
+ thr->mset.AddAddr(addr, stack_id, write);
+}
+
+static void RecordMutexUnlock(ThreadState *thr, uptr addr) {
+ // See the comment in RecordMutexLock re order of operations.
+ TraceMutexUnlock(thr, addr);
+ thr->mset.DelAddr(addr);
+}
+
+void MutexCreate(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
DPrintf("#%d: MutexCreate %zx flagz=0x%x\n", thr->tid, addr, flagz);
- if (!(flagz & MutexFlagLinkerInit) && IsAppMem(addr)) {
- CHECK(!thr->is_freeing);
- thr->is_freeing = true;
- MemoryWrite(thr, pc, addr, kSizeLog1);
- thr->is_freeing = false;
- }
- SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
+ if (!(flagz & MutexFlagLinkerInit) && pc && IsAppMem(addr))
+ MemoryAccess(thr, pc, addr, 1, kAccessWrite);
+ SlotLocker locker(thr);
+ auto s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, true);
s->SetFlags(flagz & MutexCreationFlagMask);
- if (!SANITIZER_GO && s->creation_stack_id == 0)
+ // Save stack in the case the sync object was created before as atomic.
+ if (!SANITIZER_GO && s->creation_stack_id == kInvalidStackID)
s->creation_stack_id = CurrentStackId(thr, pc);
- s->mtx.Unlock();
}
-void MutexDestroy(ThreadState *thr, uptr pc, uptr addr, u32 flagz) NO_THREAD_SAFETY_ANALYSIS {
+void MutexDestroy(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
DPrintf("#%d: MutexDestroy %zx\n", thr->tid, addr);
- SyncVar *s = ctx->metamap.GetIfExistsAndLock(addr, true);
- if (s == 0)
- return;
- if ((flagz & MutexFlagLinkerInit)
- || s->IsFlagSet(MutexFlagLinkerInit)
- || ((flagz & MutexFlagNotStatic) && !s->IsFlagSet(MutexFlagNotStatic))) {
- // Destroy is no-op for linker-initialized mutexes.
- s->mtx.Unlock();
- return;
- }
- if (common_flags()->detect_deadlocks) {
- Callback cb(thr, pc);
- ctx->dd->MutexDestroy(&cb, &s->dd);
- ctx->dd->MutexInit(&cb, &s->dd);
- }
bool unlock_locked = false;
- if (flags()->report_destroy_locked && s->owner_tid != kInvalidTid &&
- !s->IsFlagSet(MutexFlagBroken)) {
- s->SetFlags(MutexFlagBroken);
- unlock_locked = true;
- }
- u64 mid = s->GetId();
- u64 last_lock = s->last_lock;
- if (!unlock_locked)
- s->Reset(thr->proc()); // must not reset it before the report is printed
- s->mtx.Unlock();
- if (unlock_locked && ShouldReport(thr, ReportTypeMutexDestroyLocked)) {
- ThreadRegistryLock l(ctx->thread_registry);
- ScopedReport rep(ReportTypeMutexDestroyLocked);
- rep.AddMutex(mid);
- VarSizeStackTrace trace;
- ObtainCurrentStack(thr, pc, &trace);
- rep.AddStack(trace, true);
- FastState last(last_lock);
- RestoreStack(last.tid(), last.epoch(), &trace, 0);
- rep.AddStack(trace, true);
- rep.AddLocation(addr, 1);
- OutputReport(thr, rep);
-
- SyncVar *s = ctx->metamap.GetIfExistsAndLock(addr, true);
- if (s != 0) {
- s->Reset(thr->proc());
- s->mtx.Unlock();
+ StackID creation_stack_id;
+ FastState last_lock;
+ {
+ auto s = ctx->metamap.GetSyncIfExists(addr);
+ if (!s)
+ return;
+ SlotLocker locker(thr);
+ {
+ Lock lock(&s->mtx);
+ creation_stack_id = s->creation_stack_id;
+ last_lock = s->last_lock;
+ if ((flagz & MutexFlagLinkerInit) || s->IsFlagSet(MutexFlagLinkerInit) ||
+ ((flagz & MutexFlagNotStatic) && !s->IsFlagSet(MutexFlagNotStatic))) {
+ // Destroy is no-op for linker-initialized mutexes.
+ return;
+ }
+ if (common_flags()->detect_deadlocks) {
+ Callback cb(thr, pc);
+ ctx->dd->MutexDestroy(&cb, &s->dd);
+ ctx->dd->MutexInit(&cb, &s->dd);
+ }
+ if (flags()->report_destroy_locked && s->owner_tid != kInvalidTid &&
+ !s->IsFlagSet(MutexFlagBroken)) {
+ s->SetFlags(MutexFlagBroken);
+ unlock_locked = true;
+ }
+ s->Reset();
}
+ // Imitate a memory write to catch unlock-destroy races.
+ if (pc && IsAppMem(addr))
+ MemoryAccess(thr, pc, addr, 1,
+ kAccessWrite | kAccessFree | kAccessSlotLocked);
}
- thr->mset.Remove(mid);
- // Imitate a memory write to catch unlock-destroy races.
- // Do this outside of sync mutex, because it can report a race which locks
- // sync mutexes.
- if (IsAppMem(addr)) {
- CHECK(!thr->is_freeing);
- thr->is_freeing = true;
- MemoryWrite(thr, pc, addr, kSizeLog1);
- thr->is_freeing = false;
- }
+ if (unlock_locked && ShouldReport(thr, ReportTypeMutexDestroyLocked))
+ ReportDestroyLocked(thr, pc, addr, last_lock, creation_stack_id);
+ thr->mset.DelAddr(addr, true);
// s will be destroyed and freed in MetaMap::FreeBlock.
}
-void MutexPreLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) NO_THREAD_SAFETY_ANALYSIS {
+void MutexPreLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
DPrintf("#%d: MutexPreLock %zx flagz=0x%x\n", thr->tid, addr, flagz);
- if (!(flagz & MutexFlagTryLock) && common_flags()->detect_deadlocks) {
- SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, false);
+ if (flagz & MutexFlagTryLock)
+ return;
+ if (!common_flags()->detect_deadlocks)
+ return;
+ Callback cb(thr, pc);
+ {
+ SlotLocker locker(thr);
+ auto s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, true);
+ ReadLock lock(&s->mtx);
s->UpdateFlags(flagz);
- if (s->owner_tid != thr->tid) {
- Callback cb(thr, pc);
+ if (s->owner_tid != thr->tid)
ctx->dd->MutexBeforeLock(&cb, &s->dd, true);
- s->mtx.ReadUnlock();
- ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
- } else {
- s->mtx.ReadUnlock();
- }
}
+ ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
}
-void MutexPostLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz,
- int rec) NO_THREAD_SAFETY_ANALYSIS {
+void MutexPostLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz, int rec) {
DPrintf("#%d: MutexPostLock %zx flag=0x%x rec=%d\n",
thr->tid, addr, flagz, rec);
if (flagz & MutexFlagRecursiveLock)
CHECK_GT(rec, 0);
else
rec = 1;
- if (IsAppMem(addr))
- MemoryReadAtomic(thr, pc, addr, kSizeLog1);
- SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
- s->UpdateFlags(flagz);
- thr->fast_state.IncrementEpoch();
- TraceAddEvent(thr, thr->fast_state, EventTypeLock, s->GetId());
+ if (pc && IsAppMem(addr))
+ MemoryAccess(thr, pc, addr, 1, kAccessRead | kAccessAtomic);
bool report_double_lock = false;
- if (s->owner_tid == kInvalidTid) {
- CHECK_EQ(s->recursion, 0);
- s->owner_tid = thr->tid;
- s->last_lock = thr->fast_state.raw();
- } else if (s->owner_tid == thr->tid) {
- CHECK_GT(s->recursion, 0);
- } else if (flags()->report_mutex_bugs && !s->IsFlagSet(MutexFlagBroken)) {
- s->SetFlags(MutexFlagBroken);
- report_double_lock = true;
- }
- const bool first = s->recursion == 0;
- s->recursion += rec;
- if (first) {
- AcquireImpl(thr, pc, &s->clock);
- AcquireImpl(thr, pc, &s->read_clock);
- } else if (!s->IsFlagSet(MutexFlagWriteReentrant)) {
- }
- thr->mset.Add(s->GetId(), true, thr->fast_state.epoch());
bool pre_lock = false;
- if (first && common_flags()->detect_deadlocks) {
- pre_lock = (flagz & MutexFlagDoPreLockOnPostLock) &&
- !(flagz & MutexFlagTryLock);
- Callback cb(thr, pc);
- if (pre_lock)
- ctx->dd->MutexBeforeLock(&cb, &s->dd, true);
- ctx->dd->MutexAfterLock(&cb, &s->dd, true, flagz & MutexFlagTryLock);
+ bool first = false;
+ StackID creation_stack_id = kInvalidStackID;
+ {
+ SlotLocker locker(thr);
+ auto s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, true);
+ creation_stack_id = s->creation_stack_id;
+ RecordMutexLock(thr, pc, addr, creation_stack_id, true);
+ {
+ Lock lock(&s->mtx);
+ first = s->recursion == 0;
+ s->UpdateFlags(flagz);
+ if (s->owner_tid == kInvalidTid) {
+ CHECK_EQ(s->recursion, 0);
+ s->owner_tid = thr->tid;
+ s->last_lock = thr->fast_state;
+ } else if (s->owner_tid == thr->tid) {
+ CHECK_GT(s->recursion, 0);
+ } else if (flags()->report_mutex_bugs && !s->IsFlagSet(MutexFlagBroken)) {
+ s->SetFlags(MutexFlagBroken);
+ report_double_lock = true;
+ }
+ s->recursion += rec;
+ if (first) {
+ if (!thr->ignore_sync) {
+ thr->clock.Acquire(s->clock);
+ thr->clock.Acquire(s->read_clock);
+ }
+ }
+ if (first && common_flags()->detect_deadlocks) {
+ pre_lock = (flagz & MutexFlagDoPreLockOnPostLock) &&
+ !(flagz & MutexFlagTryLock);
+ Callback cb(thr, pc);
+ if (pre_lock)
+ ctx->dd->MutexBeforeLock(&cb, &s->dd, true);
+ ctx->dd->MutexAfterLock(&cb, &s->dd, true, flagz & MutexFlagTryLock);
+ }
+ }
}
- u64 mid = s->GetId();
- s->mtx.Unlock();
- // Can't touch s after this point.
- s = 0;
if (report_double_lock)
- ReportMutexMisuse(thr, pc, ReportTypeMutexDoubleLock, addr, mid);
+ ReportMutexMisuse(thr, pc, ReportTypeMutexDoubleLock, addr,
+ creation_stack_id);
if (first && pre_lock && common_flags()->detect_deadlocks) {
Callback cb(thr, pc);
ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
}
}
-int MutexUnlock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) NO_THREAD_SAFETY_ANALYSIS {
+int MutexUnlock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
DPrintf("#%d: MutexUnlock %zx flagz=0x%x\n", thr->tid, addr, flagz);
- if (IsAppMem(addr))
- MemoryReadAtomic(thr, pc, addr, kSizeLog1);
- SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
- thr->fast_state.IncrementEpoch();
- TraceAddEvent(thr, thr->fast_state, EventTypeUnlock, s->GetId());
- int rec = 0;
+ if (pc && IsAppMem(addr))
+ MemoryAccess(thr, pc, addr, 1, kAccessRead | kAccessAtomic);
+ StackID creation_stack_id;
+ RecordMutexUnlock(thr, addr);
bool report_bad_unlock = false;
- if (!SANITIZER_GO && (s->recursion == 0 || s->owner_tid != thr->tid)) {
- if (flags()->report_mutex_bugs && !s->IsFlagSet(MutexFlagBroken)) {
- s->SetFlags(MutexFlagBroken);
- report_bad_unlock = true;
- }
- } else {
- rec = (flagz & MutexFlagRecursiveUnlock) ? s->recursion : 1;
- s->recursion -= rec;
- if (s->recursion == 0) {
- s->owner_tid = kInvalidTid;
- ReleaseStoreImpl(thr, pc, &s->clock);
- } else {
+ int rec = 0;
+ {
+ SlotLocker locker(thr);
+ auto s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, true);
+ bool released = false;
+ {
+ Lock lock(&s->mtx);
+ creation_stack_id = s->creation_stack_id;
+ if (!SANITIZER_GO && (s->recursion == 0 || s->owner_tid != thr->tid)) {
+ if (flags()->report_mutex_bugs && !s->IsFlagSet(MutexFlagBroken)) {
+ s->SetFlags(MutexFlagBroken);
+ report_bad_unlock = true;
+ }
+ } else {
+ rec = (flagz & MutexFlagRecursiveUnlock) ? s->recursion : 1;
+ s->recursion -= rec;
+ if (s->recursion == 0) {
+ s->owner_tid = kInvalidTid;
+ if (!thr->ignore_sync) {
+ thr->clock.ReleaseStore(&s->clock);
+ released = true;
+ }
+ }
+ }
+ if (common_flags()->detect_deadlocks && s->recursion == 0 &&
+ !report_bad_unlock) {
+ Callback cb(thr, pc);
+ ctx->dd->MutexBeforeUnlock(&cb, &s->dd, true);
+ }
}
+ if (released)
+ IncrementEpoch(thr);
}
- thr->mset.Del(s->GetId(), true);
- if (common_flags()->detect_deadlocks && s->recursion == 0 &&
- !report_bad_unlock) {
- Callback cb(thr, pc);
- ctx->dd->MutexBeforeUnlock(&cb, &s->dd, true);
- }
- u64 mid = s->GetId();
- s->mtx.Unlock();
- // Can't touch s after this point.
if (report_bad_unlock)
- ReportMutexMisuse(thr, pc, ReportTypeMutexBadUnlock, addr, mid);
+ ReportMutexMisuse(thr, pc, ReportTypeMutexBadUnlock, addr,
+ creation_stack_id);
if (common_flags()->detect_deadlocks && !report_bad_unlock) {
Callback cb(thr, pc);
ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
@@ -249,282 +263,275 @@ int MutexUnlock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) NO_THREAD_SAFET
return rec;
}
-void MutexPreReadLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) NO_THREAD_SAFETY_ANALYSIS {
+void MutexPreReadLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
DPrintf("#%d: MutexPreReadLock %zx flagz=0x%x\n", thr->tid, addr, flagz);
- if (!(flagz & MutexFlagTryLock) && common_flags()->detect_deadlocks) {
- SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, false);
+ if ((flagz & MutexFlagTryLock) || !common_flags()->detect_deadlocks)
+ return;
+ Callback cb(thr, pc);
+ {
+ SlotLocker locker(thr);
+ auto s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, true);
+ ReadLock lock(&s->mtx);
s->UpdateFlags(flagz);
- Callback cb(thr, pc);
ctx->dd->MutexBeforeLock(&cb, &s->dd, false);
- s->mtx.ReadUnlock();
- ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
}
+ ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
}
-void MutexPostReadLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) NO_THREAD_SAFETY_ANALYSIS {
+void MutexPostReadLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
DPrintf("#%d: MutexPostReadLock %zx flagz=0x%x\n", thr->tid, addr, flagz);
- if (IsAppMem(addr))
- MemoryReadAtomic(thr, pc, addr, kSizeLog1);
- SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, false);
- s->UpdateFlags(flagz);
- thr->fast_state.IncrementEpoch();
- TraceAddEvent(thr, thr->fast_state, EventTypeRLock, s->GetId());
+ if (pc && IsAppMem(addr))
+ MemoryAccess(thr, pc, addr, 1, kAccessRead | kAccessAtomic);
bool report_bad_lock = false;
- if (s->owner_tid != kInvalidTid) {
- if (flags()->report_mutex_bugs && !s->IsFlagSet(MutexFlagBroken)) {
- s->SetFlags(MutexFlagBroken);
- report_bad_lock = true;
- }
- }
- AcquireImpl(thr, pc, &s->clock);
- s->last_lock = thr->fast_state.raw();
- thr->mset.Add(s->GetId(), false, thr->fast_state.epoch());
bool pre_lock = false;
- if (common_flags()->detect_deadlocks) {
- pre_lock = (flagz & MutexFlagDoPreLockOnPostLock) &&
- !(flagz & MutexFlagTryLock);
- Callback cb(thr, pc);
- if (pre_lock)
- ctx->dd->MutexBeforeLock(&cb, &s->dd, false);
- ctx->dd->MutexAfterLock(&cb, &s->dd, false, flagz & MutexFlagTryLock);
+ StackID creation_stack_id = kInvalidStackID;
+ {
+ SlotLocker locker(thr);
+ auto s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, true);
+ creation_stack_id = s->creation_stack_id;
+ RecordMutexLock(thr, pc, addr, creation_stack_id, false);
+ {
+ ReadLock lock(&s->mtx);
+ s->UpdateFlags(flagz);
+ if (s->owner_tid != kInvalidTid) {
+ if (flags()->report_mutex_bugs && !s->IsFlagSet(MutexFlagBroken)) {
+ s->SetFlags(MutexFlagBroken);
+ report_bad_lock = true;
+ }
+ }
+ if (!thr->ignore_sync)
+ thr->clock.Acquire(s->clock);
+ s->last_lock = thr->fast_state;
+ if (common_flags()->detect_deadlocks) {
+ pre_lock = (flagz & MutexFlagDoPreLockOnPostLock) &&
+ !(flagz & MutexFlagTryLock);
+ Callback cb(thr, pc);
+ if (pre_lock)
+ ctx->dd->MutexBeforeLock(&cb, &s->dd, false);
+ ctx->dd->MutexAfterLock(&cb, &s->dd, false, flagz & MutexFlagTryLock);
+ }
+ }
}
- u64 mid = s->GetId();
- s->mtx.ReadUnlock();
- // Can't touch s after this point.
- s = 0;
if (report_bad_lock)
- ReportMutexMisuse(thr, pc, ReportTypeMutexBadReadLock, addr, mid);
+ ReportMutexMisuse(thr, pc, ReportTypeMutexBadReadLock, addr,
+ creation_stack_id);
if (pre_lock && common_flags()->detect_deadlocks) {
Callback cb(thr, pc);
ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
}
}
-void MutexReadUnlock(ThreadState *thr, uptr pc, uptr addr) NO_THREAD_SAFETY_ANALYSIS {
+void MutexReadUnlock(ThreadState *thr, uptr pc, uptr addr) {
DPrintf("#%d: MutexReadUnlock %zx\n", thr->tid, addr);
- if (IsAppMem(addr))
- MemoryReadAtomic(thr, pc, addr, kSizeLog1);
- SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
- thr->fast_state.IncrementEpoch();
- TraceAddEvent(thr, thr->fast_state, EventTypeRUnlock, s->GetId());
+ if (pc && IsAppMem(addr))
+ MemoryAccess(thr, pc, addr, 1, kAccessRead | kAccessAtomic);
+ RecordMutexUnlock(thr, addr);
+ StackID creation_stack_id;
bool report_bad_unlock = false;
- if (s->owner_tid != kInvalidTid) {
- if (flags()->report_mutex_bugs && !s->IsFlagSet(MutexFlagBroken)) {
- s->SetFlags(MutexFlagBroken);
- report_bad_unlock = true;
+ {
+ SlotLocker locker(thr);
+ auto s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, true);
+ bool released = false;
+ {
+ Lock lock(&s->mtx);
+ creation_stack_id = s->creation_stack_id;
+ if (s->owner_tid != kInvalidTid) {
+ if (flags()->report_mutex_bugs && !s->IsFlagSet(MutexFlagBroken)) {
+ s->SetFlags(MutexFlagBroken);
+ report_bad_unlock = true;
+ }
+ }
+ if (!thr->ignore_sync) {
+ thr->clock.Release(&s->read_clock);
+ released = true;
+ }
+ if (common_flags()->detect_deadlocks && s->recursion == 0) {
+ Callback cb(thr, pc);
+ ctx->dd->MutexBeforeUnlock(&cb, &s->dd, false);
+ }
}
+ if (released)
+ IncrementEpoch(thr);
}
- ReleaseImpl(thr, pc, &s->read_clock);
- if (common_flags()->detect_deadlocks && s->recursion == 0) {
- Callback cb(thr, pc);
- ctx->dd->MutexBeforeUnlock(&cb, &s->dd, false);
- }
- u64 mid = s->GetId();
- s->mtx.Unlock();
- // Can't touch s after this point.
- thr->mset.Del(mid, false);
if (report_bad_unlock)
- ReportMutexMisuse(thr, pc, ReportTypeMutexBadReadUnlock, addr, mid);
+ ReportMutexMisuse(thr, pc, ReportTypeMutexBadReadUnlock, addr,
+ creation_stack_id);
if (common_flags()->detect_deadlocks) {
Callback cb(thr, pc);
ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
}
}
-void MutexReadOrWriteUnlock(ThreadState *thr, uptr pc, uptr addr) NO_THREAD_SAFETY_ANALYSIS {
+void MutexReadOrWriteUnlock(ThreadState *thr, uptr pc, uptr addr) {
DPrintf("#%d: MutexReadOrWriteUnlock %zx\n", thr->tid, addr);
- if (IsAppMem(addr))
- MemoryReadAtomic(thr, pc, addr, kSizeLog1);
- SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
- bool write = true;
+ if (pc && IsAppMem(addr))
+ MemoryAccess(thr, pc, addr, 1, kAccessRead | kAccessAtomic);
+ RecordMutexUnlock(thr, addr);
+ StackID creation_stack_id;
bool report_bad_unlock = false;
- if (s->owner_tid == kInvalidTid) {
- // Seems to be read unlock.
- write = false;
- thr->fast_state.IncrementEpoch();
- TraceAddEvent(thr, thr->fast_state, EventTypeRUnlock, s->GetId());
- ReleaseImpl(thr, pc, &s->read_clock);
- } else if (s->owner_tid == thr->tid) {
- // Seems to be write unlock.
- thr->fast_state.IncrementEpoch();
- TraceAddEvent(thr, thr->fast_state, EventTypeUnlock, s->GetId());
- CHECK_GT(s->recursion, 0);
- s->recursion--;
- if (s->recursion == 0) {
- s->owner_tid = kInvalidTid;
- ReleaseStoreImpl(thr, pc, &s->clock);
- } else {
+ bool write = true;
+ {
+ SlotLocker locker(thr);
+ auto s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, true);
+ bool released = false;
+ {
+ Lock lock(&s->mtx);
+ creation_stack_id = s->creation_stack_id;
+ if (s->owner_tid == kInvalidTid) {
+ // Seems to be read unlock.
+ write = false;
+ if (!thr->ignore_sync) {
+ thr->clock.Release(&s->read_clock);
+ released = true;
+ }
+ } else if (s->owner_tid == thr->tid) {
+ // Seems to be write unlock.
+ CHECK_GT(s->recursion, 0);
+ s->recursion--;
+ if (s->recursion == 0) {
+ s->owner_tid = kInvalidTid;
+ if (!thr->ignore_sync) {
+ thr->clock.ReleaseStore(&s->clock);
+ released = true;
+ }
+ }
+ } else if (!s->IsFlagSet(MutexFlagBroken)) {
+ s->SetFlags(MutexFlagBroken);
+ report_bad_unlock = true;
+ }
+ if (common_flags()->detect_deadlocks && s->recursion == 0) {
+ Callback cb(thr, pc);
+ ctx->dd->MutexBeforeUnlock(&cb, &s->dd, write);
+ }
}
- } else if (!s->IsFlagSet(MutexFlagBroken)) {
- s->SetFlags(MutexFlagBroken);
- report_bad_unlock = true;
+ if (released)
+ IncrementEpoch(thr);
}
- thr->mset.Del(s->GetId(), write);
- if (common_flags()->detect_deadlocks && s->recursion == 0) {
- Callback cb(thr, pc);
- ctx->dd->MutexBeforeUnlock(&cb, &s->dd, write);
- }
- u64 mid = s->GetId();
- s->mtx.Unlock();
- // Can't touch s after this point.
if (report_bad_unlock)
- ReportMutexMisuse(thr, pc, ReportTypeMutexBadUnlock, addr, mid);
+ ReportMutexMisuse(thr, pc, ReportTypeMutexBadUnlock, addr,
+ creation_stack_id);
if (common_flags()->detect_deadlocks) {
Callback cb(thr, pc);
ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
}
}
-void MutexRepair(ThreadState *thr, uptr pc, uptr addr) NO_THREAD_SAFETY_ANALYSIS {
+void MutexRepair(ThreadState *thr, uptr pc, uptr addr) {
DPrintf("#%d: MutexRepair %zx\n", thr->tid, addr);
- SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
+ SlotLocker locker(thr);
+ auto s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, true);
+ Lock lock(&s->mtx);
s->owner_tid = kInvalidTid;
s->recursion = 0;
- s->mtx.Unlock();
}
-void MutexInvalidAccess(ThreadState *thr, uptr pc, uptr addr) NO_THREAD_SAFETY_ANALYSIS {
+void MutexInvalidAccess(ThreadState *thr, uptr pc, uptr addr) {
DPrintf("#%d: MutexInvalidAccess %zx\n", thr->tid, addr);
- SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
- u64 mid = s->GetId();
- s->mtx.Unlock();
- ReportMutexMisuse(thr, pc, ReportTypeMutexInvalidAccess, addr, mid);
+ StackID creation_stack_id = kInvalidStackID;
+ {
+ SlotLocker locker(thr);
+ auto s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, true);
+ if (s)
+ creation_stack_id = s->creation_stack_id;
+ }
+ ReportMutexMisuse(thr, pc, ReportTypeMutexInvalidAccess, addr,
+ creation_stack_id);
}
-void Acquire(ThreadState *thr, uptr pc, uptr addr) NO_THREAD_SAFETY_ANALYSIS {
+void Acquire(ThreadState *thr, uptr pc, uptr addr) {
DPrintf("#%d: Acquire %zx\n", thr->tid, addr);
if (thr->ignore_sync)
return;
- SyncVar *s = ctx->metamap.GetIfExistsAndLock(addr, false);
+ auto s = ctx->metamap.GetSyncIfExists(addr);
if (!s)
return;
- AcquireImpl(thr, pc, &s->clock);
- s->mtx.ReadUnlock();
-}
-
-static void UpdateClockCallback(ThreadContextBase *tctx_base, void *arg) {
- ThreadState *thr = reinterpret_cast<ThreadState*>(arg);
- ThreadContext *tctx = static_cast<ThreadContext*>(tctx_base);
- u64 epoch = tctx->epoch1;
- if (tctx->status == ThreadStatusRunning) {
- epoch = tctx->thr->fast_state.epoch();
- tctx->thr->clock.NoteGlobalAcquire(epoch);
- }
- thr->clock.set(&thr->proc()->clock_cache, tctx->tid, epoch);
+ SlotLocker locker(thr);
+ if (!s->clock)
+ return;
+ ReadLock lock(&s->mtx);
+ thr->clock.Acquire(s->clock);
}
-void AcquireGlobal(ThreadState *thr, uptr pc) {
+void AcquireGlobal(ThreadState *thr) {
DPrintf("#%d: AcquireGlobal\n", thr->tid);
if (thr->ignore_sync)
return;
- ThreadRegistryLock l(ctx->thread_registry);
- ctx->thread_registry->RunCallbackForEachThreadLocked(
- UpdateClockCallback, thr);
+ SlotLocker locker(thr);
+ for (auto &slot : ctx->slots) thr->clock.Set(slot.sid, slot.epoch());
}
-void ReleaseStoreAcquire(ThreadState *thr, uptr pc, uptr addr) NO_THREAD_SAFETY_ANALYSIS {
- DPrintf("#%d: ReleaseStoreAcquire %zx\n", thr->tid, addr);
+void Release(ThreadState *thr, uptr pc, uptr addr) {
+ DPrintf("#%d: Release %zx\n", thr->tid, addr);
if (thr->ignore_sync)
return;
- SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
- thr->fast_state.IncrementEpoch();
- // Can't increment epoch w/o writing to the trace as well.
- TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
- ReleaseStoreAcquireImpl(thr, pc, &s->clock);
- s->mtx.Unlock();
+ SlotLocker locker(thr);
+ {
+ auto s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, false);
+ Lock lock(&s->mtx);
+ thr->clock.Release(&s->clock);
+ }
+ IncrementEpoch(thr);
}
-void Release(ThreadState *thr, uptr pc, uptr addr) NO_THREAD_SAFETY_ANALYSIS {
- DPrintf("#%d: Release %zx\n", thr->tid, addr);
+void ReleaseStore(ThreadState *thr, uptr pc, uptr addr) {
+ DPrintf("#%d: ReleaseStore %zx\n", thr->tid, addr);
if (thr->ignore_sync)
return;
- SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
- thr->fast_state.IncrementEpoch();
- // Can't increment epoch w/o writing to the trace as well.
- TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
- ReleaseImpl(thr, pc, &s->clock);
- s->mtx.Unlock();
+ SlotLocker locker(thr);
+ {
+ auto s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, false);
+ Lock lock(&s->mtx);
+ thr->clock.ReleaseStore(&s->clock);
+ }
+ IncrementEpoch(thr);
}
-void ReleaseStore(ThreadState *thr, uptr pc, uptr addr) NO_THREAD_SAFETY_ANALYSIS {
- DPrintf("#%d: ReleaseStore %zx\n", thr->tid, addr);
+void ReleaseStoreAcquire(ThreadState *thr, uptr pc, uptr addr) {
+ DPrintf("#%d: ReleaseStoreAcquire %zx\n", thr->tid, addr);
if (thr->ignore_sync)
return;
- SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
- thr->fast_state.IncrementEpoch();
- // Can't increment epoch w/o writing to the trace as well.
- TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
- ReleaseStoreImpl(thr, pc, &s->clock);
- s->mtx.Unlock();
+ SlotLocker locker(thr);
+ {
+ auto s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, false);
+ Lock lock(&s->mtx);
+ thr->clock.ReleaseStoreAcquire(&s->clock);
+ }
+ IncrementEpoch(thr);
}
-#if !SANITIZER_GO
-static void UpdateSleepClockCallback(ThreadContextBase *tctx_base, void *arg) {
- ThreadState *thr = reinterpret_cast<ThreadState*>(arg);
- ThreadContext *tctx = static_cast<ThreadContext*>(tctx_base);
- u64 epoch = tctx->epoch1;
- if (tctx->status == ThreadStatusRunning)
- epoch = tctx->thr->fast_state.epoch();
- thr->last_sleep_clock.set(&thr->proc()->clock_cache, tctx->tid, epoch);
+void IncrementEpoch(ThreadState *thr) {
+ DCHECK(!thr->ignore_sync);
+ DCHECK(thr->slot_locked);
+ Epoch epoch = EpochInc(thr->fast_state.epoch());
+ if (!EpochOverflow(epoch)) {
+ Sid sid = thr->fast_state.sid();
+ thr->clock.Set(sid, epoch);
+ thr->fast_state.SetEpoch(epoch);
+ thr->slot->SetEpoch(epoch);
+ TraceTime(thr);
+ }
}
+#if !SANITIZER_GO
void AfterSleep(ThreadState *thr, uptr pc) {
- DPrintf("#%d: AfterSleep %zx\n", thr->tid);
+ DPrintf("#%d: AfterSleep\n", thr->tid);
if (thr->ignore_sync)
return;
thr->last_sleep_stack_id = CurrentStackId(thr, pc);
- ThreadRegistryLock l(ctx->thread_registry);
- ctx->thread_registry->RunCallbackForEachThreadLocked(
- UpdateSleepClockCallback, thr);
+ thr->last_sleep_clock.Reset();
+ SlotLocker locker(thr);
+ for (auto &slot : ctx->slots)
+ thr->last_sleep_clock.Set(slot.sid, slot.epoch());
}
#endif
-void AcquireImpl(ThreadState *thr, uptr pc, SyncClock *c) {
- if (thr->ignore_sync)
- return;
- thr->clock.set(thr->fast_state.epoch());
- thr->clock.acquire(&thr->proc()->clock_cache, c);
-}
-
-void ReleaseStoreAcquireImpl(ThreadState *thr, uptr pc, SyncClock *c) {
- if (thr->ignore_sync)
- return;
- thr->clock.set(thr->fast_state.epoch());
- thr->fast_synch_epoch = thr->fast_state.epoch();
- thr->clock.releaseStoreAcquire(&thr->proc()->clock_cache, c);
-}
-
-void ReleaseImpl(ThreadState *thr, uptr pc, SyncClock *c) {
- if (thr->ignore_sync)
- return;
- thr->clock.set(thr->fast_state.epoch());
- thr->fast_synch_epoch = thr->fast_state.epoch();
- thr->clock.release(&thr->proc()->clock_cache, c);
-}
-
-void ReleaseStoreImpl(ThreadState *thr, uptr pc, SyncClock *c) {
- if (thr->ignore_sync)
- return;
- thr->clock.set(thr->fast_state.epoch());
- thr->fast_synch_epoch = thr->fast_state.epoch();
- thr->clock.ReleaseStore(&thr->proc()->clock_cache, c);
-}
-
-void AcquireReleaseImpl(ThreadState *thr, uptr pc, SyncClock *c) {
- if (thr->ignore_sync)
- return;
- thr->clock.set(thr->fast_state.epoch());
- thr->fast_synch_epoch = thr->fast_state.epoch();
- thr->clock.acq_rel(&thr->proc()->clock_cache, c);
-}
-
void ReportDeadlock(ThreadState *thr, uptr pc, DDReport *r) {
if (r == 0 || !ShouldReport(thr, ReportTypeDeadlock))
return;
- ThreadRegistryLock l(ctx->thread_registry);
+ ThreadRegistryLock l(&ctx->thread_registry);
ScopedReport rep(ReportTypeDeadlock);
for (int i = 0; i < r->n; i++) {
- rep.AddMutex(r->loop[i].mtx_ctx0);
+ rep.AddMutex(r->loop[i].mtx_ctx0, r->loop[i].stk[0]);
rep.AddUniqueTid((int)r->loop[i].thr_ctx);
rep.AddThread((int)r->loop[i].thr_ctx);
}
@@ -532,7 +539,7 @@ void ReportDeadlock(ThreadState *thr, uptr pc, DDReport *r) {
for (int i = 0; i < r->n; i++) {
for (int j = 0; j < (flags()->second_deadlock_stack ? 2 : 1); j++) {
u32 stk = r->loop[i].stk[j];
- if (stk && stk != 0xffffffff) {
+ if (stk && stk != kInvalidStackID) {
rep.AddStack(StackDepotGet(stk), true);
} else {
// Sometimes we fail to extract the stack trace (FIXME: investigate),
@@ -544,4 +551,28 @@ void ReportDeadlock(ThreadState *thr, uptr pc, DDReport *r) {
OutputReport(thr, rep);
}
+void ReportDestroyLocked(ThreadState *thr, uptr pc, uptr addr,
+ FastState last_lock, StackID creation_stack_id) {
+ // We need to lock the slot during RestoreStack because it protects
+ // the slot journal.
+ Lock slot_lock(&ctx->slots[static_cast<uptr>(last_lock.sid())].mtx);
+ ThreadRegistryLock l0(&ctx->thread_registry);
+ Lock slots_lock(&ctx->slot_mtx);
+ ScopedReport rep(ReportTypeMutexDestroyLocked);
+ rep.AddMutex(addr, creation_stack_id);
+ VarSizeStackTrace trace;
+ ObtainCurrentStack(thr, pc, &trace);
+ rep.AddStack(trace, true);
+
+ Tid tid;
+ DynamicMutexSet mset;
+ uptr tag;
+ if (!RestoreStack(EventType::kLock, last_lock.sid(), last_lock.epoch(), addr,
+ 0, kAccessWrite, &tid, &trace, mset, &tag))
+ return;
+ rep.AddStack(trace, true);
+ rep.AddLocation(addr, 1);
+ OutputReport(thr, rep);
+}
+
} // namespace __tsan
diff --git a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_rtl_proc.cpp b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_rtl_proc.cpp
index def61cca14d5..5acc3967208e 100644
--- a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_rtl_proc.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_rtl_proc.cpp
@@ -35,7 +35,6 @@ void ProcDestroy(Processor *proc) {
#if !SANITIZER_GO
AllocatorProcFinish(proc);
#endif
- ctx->clock_alloc.FlushCache(&proc->clock_cache);
ctx->metamap.OnProcIdle(proc);
if (common_flags()->detect_deadlocks)
ctx->dd->DestroyPhysicalThread(proc->dd_pt);
diff --git a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_rtl_report.cpp b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_rtl_report.cpp
index 3e809e653c70..0311df553fdd 100644
--- a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_rtl_report.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_rtl_report.cpp
@@ -10,20 +10,20 @@
//
//===----------------------------------------------------------------------===//
+#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_libc.h"
#include "sanitizer_common/sanitizer_placement_new.h"
#include "sanitizer_common/sanitizer_stackdepot.h"
-#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_stacktrace.h"
+#include "tsan_fd.h"
+#include "tsan_flags.h"
+#include "tsan_mman.h"
#include "tsan_platform.h"
+#include "tsan_report.h"
#include "tsan_rtl.h"
#include "tsan_suppressions.h"
#include "tsan_symbolize.h"
-#include "tsan_report.h"
#include "tsan_sync.h"
-#include "tsan_mman.h"
-#include "tsan_flags.h"
-#include "tsan_fd.h"
namespace __tsan {
@@ -68,8 +68,10 @@ static void StackStripMain(SymbolizedStack *frames) {
} else if (last && 0 == internal_strcmp(last, "__tsan_thread_start_func")) {
last_frame->ClearAll();
last_frame2->next = nullptr;
- // Strip global ctors init.
- } else if (last && 0 == internal_strcmp(last, "__do_global_ctors_aux")) {
+ // Strip global ctors init, .preinit_array and main caller.
+ } else if (last && (0 == internal_strcmp(last, "__do_global_ctors_aux") ||
+ 0 == internal_strcmp(last, "__libc_csu_init") ||
+ 0 == internal_strcmp(last, "__libc_start_main"))) {
last_frame->ClearAll();
last_frame2->next = nullptr;
// If both are 0, then we probably just failed to symbolize.
@@ -120,7 +122,7 @@ static ReportStack *SymbolizeStack(StackTrace trace) {
}
StackStripMain(top);
- ReportStack *stack = ReportStack::New();
+ auto *stack = New<ReportStack>();
stack->frames = top;
return stack;
}
@@ -132,7 +134,7 @@ bool ShouldReport(ThreadState *thr, ReportType typ) {
CheckedMutex::CheckNoLocks();
// For the same reason check we didn't lock thread_registry yet.
if (SANITIZER_DEBUG)
- ThreadRegistryLock l(ctx->thread_registry);
+ ThreadRegistryLock l(&ctx->thread_registry);
if (!flags()->report_bugs || thr->suppress_reports)
return false;
switch (typ) {
@@ -154,9 +156,8 @@ bool ShouldReport(ThreadState *thr, ReportType typ) {
}
ScopedReportBase::ScopedReportBase(ReportType typ, uptr tag) {
- ctx->thread_registry->CheckLocked();
- void *mem = internal_alloc(MBlockReport, sizeof(ReportDesc));
- rep_ = new(mem) ReportDesc;
+ ctx->thread_registry.CheckLocked();
+ rep_ = New<ReportDesc>();
rep_->typ = typ;
rep_->tag = tag;
ctx->report_mtx.Lock();
@@ -165,7 +166,6 @@ ScopedReportBase::ScopedReportBase(ReportType typ, uptr tag) {
ScopedReportBase::~ScopedReportBase() {
ctx->report_mtx.Unlock();
DestroyAndFree(rep_);
- rep_ = nullptr;
}
void ScopedReportBase::AddStack(StackTrace stack, bool suppressable) {
@@ -175,28 +175,31 @@ void ScopedReportBase::AddStack(StackTrace stack, bool suppressable) {
}
void ScopedReportBase::AddMemoryAccess(uptr addr, uptr external_tag, Shadow s,
- StackTrace stack, const MutexSet *mset) {
- void *mem = internal_alloc(MBlockReportMop, sizeof(ReportMop));
- ReportMop *mop = new(mem) ReportMop;
+ Tid tid, StackTrace stack,
+ const MutexSet *mset) {
+ uptr addr0, size;
+ AccessType typ;
+ s.GetAccess(&addr0, &size, &typ);
+ auto *mop = New<ReportMop>();
rep_->mops.PushBack(mop);
- mop->tid = s.tid();
- mop->addr = addr + s.addr0();
- mop->size = s.size();
- mop->write = s.IsWrite();
- mop->atomic = s.IsAtomic();
+ mop->tid = tid;
+ mop->addr = addr + addr0;
+ mop->size = size;
+ mop->write = !(typ & kAccessRead);
+ mop->atomic = typ & kAccessAtomic;
mop->stack = SymbolizeStack(stack);
mop->external_tag = external_tag;
if (mop->stack)
mop->stack->suppressable = true;
for (uptr i = 0; i < mset->Size(); i++) {
MutexSet::Desc d = mset->Get(i);
- u64 mid = this->AddMutex(d.id);
- ReportMopMutex mtx = {mid, d.write};
+ int id = this->AddMutex(d.addr, d.stack_id);
+ ReportMopMutex mtx = {id, d.write};
mop->mset.PushBack(mtx);
}
}
-void ScopedReportBase::AddUniqueTid(int unique_tid) {
+void ScopedReportBase::AddUniqueTid(Tid unique_tid) {
rep_->unique_tids.PushBack(unique_tid);
}
@@ -205,8 +208,7 @@ void ScopedReportBase::AddThread(const ThreadContext *tctx, bool suppressable) {
if ((u32)rep_->threads[i]->id == tctx->tid)
return;
}
- void *mem = internal_alloc(MBlockReportThread, sizeof(ReportThread));
- ReportThread *rt = new(mem) ReportThread;
+ auto *rt = New<ReportThread>();
rep_->threads.PushBack(rt);
rt->id = tctx->tid;
rt->os_id = tctx->os_id;
@@ -221,22 +223,10 @@ void ScopedReportBase::AddThread(const ThreadContext *tctx, bool suppressable) {
}
#if !SANITIZER_GO
-static bool FindThreadByUidLockedCallback(ThreadContextBase *tctx, void *arg) {
- int unique_id = *(int *)arg;
- return tctx->unique_id == (u32)unique_id;
-}
-
-static ThreadContext *FindThreadByUidLocked(int unique_id) {
- ctx->thread_registry->CheckLocked();
+static ThreadContext *FindThreadByTidLocked(Tid tid) {
+ ctx->thread_registry.CheckLocked();
return static_cast<ThreadContext *>(
- ctx->thread_registry->FindThreadContextLocked(
- FindThreadByUidLockedCallback, &unique_id));
-}
-
-static ThreadContext *FindThreadByTidLocked(int tid) {
- ctx->thread_registry->CheckLocked();
- return static_cast<ThreadContext*>(
- ctx->thread_registry->GetThreadLocked(tid));
+ ctx->thread_registry.GetThreadLocked(tid));
}
static bool IsInStackOrTls(ThreadContextBase *tctx_base, void *arg) {
@@ -251,10 +241,10 @@ static bool IsInStackOrTls(ThreadContextBase *tctx_base, void *arg) {
}
ThreadContext *IsThreadStackOrTls(uptr addr, bool *is_stack) {
- ctx->thread_registry->CheckLocked();
- ThreadContext *tctx = static_cast<ThreadContext*>(
- ctx->thread_registry->FindThreadContextLocked(IsInStackOrTls,
- (void*)addr));
+ ctx->thread_registry.CheckLocked();
+ ThreadContext *tctx =
+ static_cast<ThreadContext *>(ctx->thread_registry.FindThreadContextLocked(
+ IsInStackOrTls, (void *)addr));
if (!tctx)
return 0;
ThreadState *thr = tctx->thr;
@@ -264,58 +254,24 @@ ThreadContext *IsThreadStackOrTls(uptr addr, bool *is_stack) {
}
#endif
-void ScopedReportBase::AddThread(int unique_tid, bool suppressable) {
+void ScopedReportBase::AddThread(Tid tid, bool suppressable) {
#if !SANITIZER_GO
- if (const ThreadContext *tctx = FindThreadByUidLocked(unique_tid))
+ if (const ThreadContext *tctx = FindThreadByTidLocked(tid))
AddThread(tctx, suppressable);
#endif
}
-void ScopedReportBase::AddMutex(const SyncVar *s) {
- for (uptr i = 0; i < rep_->mutexes.Size(); i++) {
- if (rep_->mutexes[i]->id == s->uid)
- return;
- }
- void *mem = internal_alloc(MBlockReportMutex, sizeof(ReportMutex));
- ReportMutex *rm = new(mem) ReportMutex;
- rep_->mutexes.PushBack(rm);
- rm->id = s->uid;
- rm->addr = s->addr;
- rm->destroyed = false;
- rm->stack = SymbolizeStackId(s->creation_stack_id);
-}
-
-u64 ScopedReportBase::AddMutex(u64 id) NO_THREAD_SAFETY_ANALYSIS {
- u64 uid = 0;
- u64 mid = id;
- uptr addr = SyncVar::SplitId(id, &uid);
- SyncVar *s = ctx->metamap.GetIfExistsAndLock(addr, true);
- // Check that the mutex is still alive.
- // Another mutex can be created at the same address,
- // so check uid as well.
- if (s && s->CheckId(uid)) {
- mid = s->uid;
- AddMutex(s);
- } else {
- AddDeadMutex(id);
- }
- if (s)
- s->mtx.Unlock();
- return mid;
-}
-
-void ScopedReportBase::AddDeadMutex(u64 id) {
+int ScopedReportBase::AddMutex(uptr addr, StackID creation_stack_id) {
for (uptr i = 0; i < rep_->mutexes.Size(); i++) {
- if (rep_->mutexes[i]->id == id)
- return;
+ if (rep_->mutexes[i]->addr == addr)
+ return rep_->mutexes[i]->id;
}
- void *mem = internal_alloc(MBlockReportMutex, sizeof(ReportMutex));
- ReportMutex *rm = new(mem) ReportMutex;
+ auto *rm = New<ReportMutex>();
rep_->mutexes.PushBack(rm);
- rm->id = id;
- rm->addr = 0;
- rm->destroyed = true;
- rm->stack = 0;
+ rm->id = rep_->mutexes.Size() - 1;
+ rm->addr = addr;
+ rm->stack = SymbolizeStackId(creation_stack_id);
+ return rm->id;
}
void ScopedReportBase::AddLocation(uptr addr, uptr size) {
@@ -323,43 +279,46 @@ void ScopedReportBase::AddLocation(uptr addr, uptr size) {
return;
#if !SANITIZER_GO
int fd = -1;
- int creat_tid = kInvalidTid;
- u32 creat_stack = 0;
- if (FdLocation(addr, &fd, &creat_tid, &creat_stack)) {
- ReportLocation *loc = ReportLocation::New(ReportLocationFD);
+ Tid creat_tid = kInvalidTid;
+ StackID creat_stack = 0;
+ bool closed = false;
+ if (FdLocation(addr, &fd, &creat_tid, &creat_stack, &closed)) {
+ auto *loc = New<ReportLocation>();
+ loc->type = ReportLocationFD;
+ loc->fd_closed = closed;
loc->fd = fd;
loc->tid = creat_tid;
loc->stack = SymbolizeStackId(creat_stack);
rep_->locs.PushBack(loc);
- ThreadContext *tctx = FindThreadByUidLocked(creat_tid);
- if (tctx)
- AddThread(tctx);
+ AddThread(creat_tid);
return;
}
MBlock *b = 0;
+ uptr block_begin = 0;
Allocator *a = allocator();
if (a->PointerIsMine((void*)addr)) {
- void *block_begin = a->GetBlockBegin((void*)addr);
+ block_begin = (uptr)a->GetBlockBegin((void *)addr);
if (block_begin)
- b = ctx->metamap.GetBlock((uptr)block_begin);
+ b = ctx->metamap.GetBlock(block_begin);
}
+ if (!b)
+ b = JavaHeapBlock(addr, &block_begin);
if (b != 0) {
- ThreadContext *tctx = FindThreadByTidLocked(b->tid);
- ReportLocation *loc = ReportLocation::New(ReportLocationHeap);
- loc->heap_chunk_start = (uptr)allocator()->GetBlockBegin((void *)addr);
+ auto *loc = New<ReportLocation>();
+ loc->type = ReportLocationHeap;
+ loc->heap_chunk_start = block_begin;
loc->heap_chunk_size = b->siz;
loc->external_tag = b->tag;
- loc->tid = tctx ? tctx->tid : b->tid;
+ loc->tid = b->tid;
loc->stack = SymbolizeStackId(b->stk);
rep_->locs.PushBack(loc);
- if (tctx)
- AddThread(tctx);
+ AddThread(b->tid);
return;
}
bool is_stack = false;
if (ThreadContext *tctx = IsThreadStackOrTls(addr, &is_stack)) {
- ReportLocation *loc =
- ReportLocation::New(is_stack ? ReportLocationStack : ReportLocationTLS);
+ auto *loc = New<ReportLocation>();
+ loc->type = is_stack ? ReportLocationStack : ReportLocationTLS;
loc->tid = tctx->tid;
rep_->locs.PushBack(loc);
AddThread(tctx);
@@ -373,13 +332,15 @@ void ScopedReportBase::AddLocation(uptr addr, uptr size) {
}
#if !SANITIZER_GO
-void ScopedReportBase::AddSleep(u32 stack_id) {
+void ScopedReportBase::AddSleep(StackID stack_id) {
rep_->sleep = SymbolizeStackId(stack_id);
}
#endif
void ScopedReportBase::SetCount(int count) { rep_->count = count; }
+void ScopedReportBase::SetSigNum(int sig) { rep_->signum = sig; }
+
const ReportDesc *ScopedReportBase::GetReport() const { return rep_; }
ScopedReport::ScopedReport(ReportType typ, uptr tag)
@@ -387,67 +348,256 @@ ScopedReport::ScopedReport(ReportType typ, uptr tag)
ScopedReport::~ScopedReport() {}
-void RestoreStack(int tid, const u64 epoch, VarSizeStackTrace *stk,
- MutexSet *mset, uptr *tag) {
+// Replays the trace up to last_pos position in the last part
+// or up to the provided epoch/sid (whichever is earlier)
+// and calls the provided function f for each event.
+template <typename Func>
+void TraceReplay(Trace *trace, TracePart *last, Event *last_pos, Sid sid,
+ Epoch epoch, Func f) {
+ TracePart *part = trace->parts.Front();
+ Sid ev_sid = kFreeSid;
+ Epoch ev_epoch = kEpochOver;
+ for (;;) {
+ DCHECK_EQ(part->trace, trace);
+ // Note: an event can't start in the last element.
+ // Since an event can take up to 2 elements,
+ // we ensure we have at least 2 before adding an event.
+ Event *end = &part->events[TracePart::kSize - 1];
+ if (part == last)
+ end = last_pos;
+ f(kFreeSid, kEpochOver, nullptr); // notify about part start
+ for (Event *evp = &part->events[0]; evp < end; evp++) {
+ Event *evp0 = evp;
+ if (!evp->is_access && !evp->is_func) {
+ switch (evp->type) {
+ case EventType::kTime: {
+ auto *ev = reinterpret_cast<EventTime *>(evp);
+ ev_sid = static_cast<Sid>(ev->sid);
+ ev_epoch = static_cast<Epoch>(ev->epoch);
+ if (ev_sid == sid && ev_epoch > epoch)
+ return;
+ break;
+ }
+ case EventType::kAccessExt:
+ FALLTHROUGH;
+ case EventType::kAccessRange:
+ FALLTHROUGH;
+ case EventType::kLock:
+ FALLTHROUGH;
+ case EventType::kRLock:
+ // These take 2 Event elements.
+ evp++;
+ break;
+ case EventType::kUnlock:
+ // This takes 1 Event element.
+ break;
+ }
+ }
+ CHECK_NE(ev_sid, kFreeSid);
+ CHECK_NE(ev_epoch, kEpochOver);
+ f(ev_sid, ev_epoch, evp0);
+ }
+ if (part == last)
+ return;
+ part = trace->parts.Next(part);
+ CHECK(part);
+ }
+ CHECK(0);
+}
+
+static void RestoreStackMatch(VarSizeStackTrace *pstk, MutexSet *pmset,
+ Vector<uptr> *stack, MutexSet *mset, uptr pc,
+ bool *found) {
+ DPrintf2(" MATCHED\n");
+ *pmset = *mset;
+ stack->PushBack(pc);
+ pstk->Init(&(*stack)[0], stack->Size());
+ stack->PopBack();
+ *found = true;
+}
+
+// Checks if addr1|size1 is fully contained in addr2|size2.
+// We check for fully contained instread of just overlapping
+// because a memory access is always traced once, but can be
+// split into multiple accesses in the shadow.
+static constexpr bool IsWithinAccess(uptr addr1, uptr size1, uptr addr2,
+ uptr size2) {
+ return addr1 >= addr2 && addr1 + size1 <= addr2 + size2;
+}
+
+// Replays the trace of slot sid up to the target event identified
+// by epoch/addr/size/typ and restores and returns tid, stack, mutex set
+// and tag for that event. If there are multiple such events, it returns
+// the last one. Returns false if the event is not present in the trace.
+bool RestoreStack(EventType type, Sid sid, Epoch epoch, uptr addr, uptr size,
+ AccessType typ, Tid *ptid, VarSizeStackTrace *pstk,
+ MutexSet *pmset, uptr *ptag) {
// This function restores stack trace and mutex set for the thread/epoch.
// It does so by getting stack trace and mutex set at the beginning of
// trace part, and then replaying the trace till the given epoch.
- Trace* trace = ThreadTrace(tid);
- ReadLock l(&trace->mtx);
- const int partidx = (epoch / kTracePartSize) % TraceParts();
- TraceHeader* hdr = &trace->headers[partidx];
- if (epoch < hdr->epoch0 || epoch >= hdr->epoch0 + kTracePartSize)
- return;
- CHECK_EQ(RoundDown(epoch, kTracePartSize), hdr->epoch0);
- const u64 epoch0 = RoundDown(epoch, TraceSize());
- const u64 eend = epoch % TraceSize();
- const u64 ebegin = RoundDown(eend, kTracePartSize);
- DPrintf("#%d: RestoreStack epoch=%zu ebegin=%zu eend=%zu partidx=%d\n",
- tid, (uptr)epoch, (uptr)ebegin, (uptr)eend, partidx);
- Vector<uptr> stack;
- stack.Resize(hdr->stack0.size + 64);
- for (uptr i = 0; i < hdr->stack0.size; i++) {
- stack[i] = hdr->stack0.trace[i];
- DPrintf2(" #%02zu: pc=%zx\n", i, stack[i]);
- }
- if (mset)
- *mset = hdr->mset0;
- uptr pos = hdr->stack0.size;
- Event *events = (Event*)GetThreadTrace(tid);
- for (uptr i = ebegin; i <= eend; i++) {
- Event ev = events[i];
- EventType typ = (EventType)(ev >> kEventPCBits);
- uptr pc = (uptr)(ev & ((1ull << kEventPCBits) - 1));
- DPrintf2(" %zu typ=%d pc=%zx\n", i, typ, pc);
- if (typ == EventTypeMop) {
- stack[pos] = pc;
- } else if (typ == EventTypeFuncEnter) {
- if (stack.Size() < pos + 2)
- stack.Resize(pos + 2);
- stack[pos++] = pc;
- } else if (typ == EventTypeFuncExit) {
- if (pos > 0)
- pos--;
+ DPrintf2("RestoreStack: sid=%u@%u addr=0x%zx/%zu typ=%x\n",
+ static_cast<int>(sid), static_cast<int>(epoch), addr, size,
+ static_cast<int>(typ));
+ ctx->slot_mtx.CheckLocked(); // needed to prevent trace part recycling
+ ctx->thread_registry.CheckLocked();
+ TidSlot *slot = &ctx->slots[static_cast<uptr>(sid)];
+ Tid tid = kInvalidTid;
+ // Need to lock the slot mutex as it protects slot->journal.
+ slot->mtx.CheckLocked();
+ for (uptr i = 0; i < slot->journal.Size(); i++) {
+ DPrintf2(" journal: epoch=%d tid=%d\n",
+ static_cast<int>(slot->journal[i].epoch), slot->journal[i].tid);
+ if (i == slot->journal.Size() - 1 || slot->journal[i + 1].epoch > epoch) {
+ tid = slot->journal[i].tid;
+ break;
}
- if (mset) {
- if (typ == EventTypeLock) {
- mset->Add(pc, true, epoch0 + i);
- } else if (typ == EventTypeUnlock) {
- mset->Del(pc, true);
- } else if (typ == EventTypeRLock) {
- mset->Add(pc, false, epoch0 + i);
- } else if (typ == EventTypeRUnlock) {
- mset->Del(pc, false);
- }
+ }
+ if (tid == kInvalidTid)
+ return false;
+ *ptid = tid;
+ ThreadContext *tctx =
+ static_cast<ThreadContext *>(ctx->thread_registry.GetThreadLocked(tid));
+ Trace *trace = &tctx->trace;
+ // Snapshot first/last parts and the current position in the last part.
+ TracePart *first_part;
+ TracePart *last_part;
+ Event *last_pos;
+ {
+ Lock lock(&trace->mtx);
+ first_part = trace->parts.Front();
+ if (!first_part) {
+ DPrintf2("RestoreStack: tid=%d trace=%p no trace parts\n", tid, trace);
+ return false;
}
- for (uptr j = 0; j <= pos; j++)
- DPrintf2(" #%zu: %zx\n", j, stack[j]);
+ last_part = trace->parts.Back();
+ last_pos = trace->final_pos;
+ if (tctx->thr)
+ last_pos = (Event *)atomic_load_relaxed(&tctx->thr->trace_pos);
}
- if (pos == 0 && stack[0] == 0)
- return;
- pos++;
- stk->Init(&stack[0], pos);
- ExtractTagFromStack(stk, tag);
+ DynamicMutexSet mset;
+ Vector<uptr> stack;
+ uptr prev_pc = 0;
+ bool found = false;
+ bool is_read = typ & kAccessRead;
+ bool is_atomic = typ & kAccessAtomic;
+ bool is_free = typ & kAccessFree;
+ DPrintf2("RestoreStack: tid=%d parts=[%p-%p] last_pos=%p\n", tid,
+ trace->parts.Front(), last_part, last_pos);
+ TraceReplay(
+ trace, last_part, last_pos, sid, epoch,
+ [&](Sid ev_sid, Epoch ev_epoch, Event *evp) {
+ if (evp == nullptr) {
+ // Each trace part is self-consistent, so we reset state.
+ stack.Resize(0);
+ mset->Reset();
+ prev_pc = 0;
+ return;
+ }
+ bool match = ev_sid == sid && ev_epoch == epoch;
+ if (evp->is_access) {
+ if (evp->is_func == 0 && evp->type == EventType::kAccessExt &&
+ evp->_ == 0) // NopEvent
+ return;
+ auto *ev = reinterpret_cast<EventAccess *>(evp);
+ uptr ev_addr = RestoreAddr(ev->addr);
+ uptr ev_size = 1 << ev->size_log;
+ uptr ev_pc =
+ prev_pc + ev->pc_delta - (1 << (EventAccess::kPCBits - 1));
+ prev_pc = ev_pc;
+ DPrintf2(" Access: pc=0x%zx addr=0x%zx/%zu type=%u/%u\n", ev_pc,
+ ev_addr, ev_size, ev->is_read, ev->is_atomic);
+ if (match && type == EventType::kAccessExt &&
+ IsWithinAccess(addr, size, ev_addr, ev_size) &&
+ is_read == ev->is_read && is_atomic == ev->is_atomic && !is_free)
+ RestoreStackMatch(pstk, pmset, &stack, mset, ev_pc, &found);
+ return;
+ }
+ if (evp->is_func) {
+ auto *ev = reinterpret_cast<EventFunc *>(evp);
+ if (ev->pc) {
+ DPrintf2(" FuncEnter: pc=0x%llx\n", ev->pc);
+ stack.PushBack(ev->pc);
+ } else {
+ DPrintf2(" FuncExit\n");
+ // We don't log pathologically large stacks in each part,
+ // if the stack was truncated we can have more func exits than
+ // entries.
+ if (stack.Size())
+ stack.PopBack();
+ }
+ return;
+ }
+ switch (evp->type) {
+ case EventType::kAccessExt: {
+ auto *ev = reinterpret_cast<EventAccessExt *>(evp);
+ uptr ev_addr = RestoreAddr(ev->addr);
+ uptr ev_size = 1 << ev->size_log;
+ prev_pc = ev->pc;
+ DPrintf2(" AccessExt: pc=0x%llx addr=0x%zx/%zu type=%u/%u\n",
+ ev->pc, ev_addr, ev_size, ev->is_read, ev->is_atomic);
+ if (match && type == EventType::kAccessExt &&
+ IsWithinAccess(addr, size, ev_addr, ev_size) &&
+ is_read == ev->is_read && is_atomic == ev->is_atomic &&
+ !is_free)
+ RestoreStackMatch(pstk, pmset, &stack, mset, ev->pc, &found);
+ break;
+ }
+ case EventType::kAccessRange: {
+ auto *ev = reinterpret_cast<EventAccessRange *>(evp);
+ uptr ev_addr = RestoreAddr(ev->addr);
+ uptr ev_size =
+ (ev->size_hi << EventAccessRange::kSizeLoBits) + ev->size_lo;
+ uptr ev_pc = RestoreAddr(ev->pc);
+ prev_pc = ev_pc;
+ DPrintf2(" Range: pc=0x%zx addr=0x%zx/%zu type=%u/%u\n", ev_pc,
+ ev_addr, ev_size, ev->is_read, ev->is_free);
+ if (match && type == EventType::kAccessExt &&
+ IsWithinAccess(addr, size, ev_addr, ev_size) &&
+ is_read == ev->is_read && !is_atomic && is_free == ev->is_free)
+ RestoreStackMatch(pstk, pmset, &stack, mset, ev_pc, &found);
+ break;
+ }
+ case EventType::kLock:
+ FALLTHROUGH;
+ case EventType::kRLock: {
+ auto *ev = reinterpret_cast<EventLock *>(evp);
+ bool is_write = ev->type == EventType::kLock;
+ uptr ev_addr = RestoreAddr(ev->addr);
+ uptr ev_pc = RestoreAddr(ev->pc);
+ StackID stack_id =
+ (ev->stack_hi << EventLock::kStackIDLoBits) + ev->stack_lo;
+ DPrintf2(" Lock: pc=0x%zx addr=0x%zx stack=%u write=%d\n", ev_pc,
+ ev_addr, stack_id, is_write);
+ mset->AddAddr(ev_addr, stack_id, is_write);
+ // Events with ev_pc == 0 are written to the beginning of trace
+ // part as initial mutex set (are not real).
+ if (match && type == EventType::kLock && addr == ev_addr && ev_pc)
+ RestoreStackMatch(pstk, pmset, &stack, mset, ev_pc, &found);
+ break;
+ }
+ case EventType::kUnlock: {
+ auto *ev = reinterpret_cast<EventUnlock *>(evp);
+ uptr ev_addr = RestoreAddr(ev->addr);
+ DPrintf2(" Unlock: addr=0x%zx\n", ev_addr);
+ mset->DelAddr(ev_addr);
+ break;
+ }
+ case EventType::kTime:
+ // TraceReplay already extracted sid/epoch from it,
+ // nothing else to do here.
+ break;
+ }
+ });
+ ExtractTagFromStack(pstk, ptag);
+ return found;
+}
+
+bool RacyStacks::operator==(const RacyStacks &other) const {
+ if (hash[0] == other.hash[0] && hash[1] == other.hash[1])
+ return true;
+ if (hash[0] == other.hash[1] && hash[1] == other.hash[0])
+ return true;
+ return false;
}
static bool FindRacyStacks(const RacyStacks &hash) {
@@ -478,35 +628,6 @@ static bool HandleRacyStacks(ThreadState *thr, VarSizeStackTrace traces[2]) {
return false;
}
-static bool FindRacyAddress(const RacyAddress &ra0) {
- for (uptr i = 0; i < ctx->racy_addresses.Size(); i++) {
- RacyAddress ra2 = ctx->racy_addresses[i];
- uptr maxbeg = max(ra0.addr_min, ra2.addr_min);
- uptr minend = min(ra0.addr_max, ra2.addr_max);
- if (maxbeg < minend) {
- VPrintf(2, "ThreadSanitizer: suppressing report as doubled (addr)\n");
- return true;
- }
- }
- return false;
-}
-
-static bool HandleRacyAddress(ThreadState *thr, uptr addr_min, uptr addr_max) {
- if (!flags()->suppress_equal_addresses)
- return false;
- RacyAddress ra0 = {addr_min, addr_max};
- {
- ReadLock lock(&ctx->racy_mtx);
- if (FindRacyAddress(ra0))
- return true;
- }
- Lock lock(&ctx->racy_mtx);
- if (FindRacyAddress(ra0))
- return true;
- ctx->racy_addresses.PushBack(ra0);
- return false;
-}
-
bool OutputReport(ThreadState *thr, const ScopedReport &srep) {
// These should have been checked in ShouldReport.
// It's too late to check them here, we have already taken locks.
@@ -532,10 +653,7 @@ bool OutputReport(ThreadState *thr, const ScopedReport &srep) {
ctx->fired_suppressions.push_back(s);
}
{
- bool old_is_freeing = thr->is_freeing;
- thr->is_freeing = false;
bool suppressed = OnReport(rep, pc_or_addr != 0);
- thr->is_freeing = old_is_freeing;
if (suppressed) {
thr->current_report = nullptr;
return false;
@@ -582,101 +700,81 @@ static bool IsFiredSuppression(Context *ctx, ReportType type, uptr addr) {
return false;
}
-static bool RaceBetweenAtomicAndFree(ThreadState *thr) {
- Shadow s0(thr->racy_state[0]);
- Shadow s1(thr->racy_state[1]);
- CHECK(!(s0.IsAtomic() && s1.IsAtomic()));
- if (!s0.IsAtomic() && !s1.IsAtomic())
- return true;
- if (s0.IsAtomic() && s1.IsFreed())
- return true;
- if (s1.IsAtomic() && thr->is_freeing)
- return true;
- return false;
+static bool SpuriousRace(Shadow old) {
+ Shadow last(LoadShadow(&ctx->last_spurious_race));
+ return last.sid() == old.sid() && last.epoch() == old.epoch();
}
-void ReportRace(ThreadState *thr) {
+void ReportRace(ThreadState *thr, RawShadow *shadow_mem, Shadow cur, Shadow old,
+ AccessType typ0) {
CheckedMutex::CheckNoLocks();
// Symbolizer makes lots of intercepted calls. If we try to process them,
// at best it will cause deadlocks on internal mutexes.
ScopedIgnoreInterceptors ignore;
+ uptr addr = ShadowToMem(shadow_mem);
+ DPrintf("#%d: ReportRace %p\n", thr->tid, (void *)addr);
if (!ShouldReport(thr, ReportTypeRace))
return;
- if (!flags()->report_atomic_races && !RaceBetweenAtomicAndFree(thr))
+ uptr addr_off0, size0;
+ cur.GetAccess(&addr_off0, &size0, nullptr);
+ uptr addr_off1, size1, typ1;
+ old.GetAccess(&addr_off1, &size1, &typ1);
+ if (!flags()->report_atomic_races &&
+ ((typ0 & kAccessAtomic) || (typ1 & kAccessAtomic)) &&
+ !(typ0 & kAccessFree) && !(typ1 & kAccessFree))
+ return;
+ if (SpuriousRace(old))
return;
- bool freed = false;
- {
- Shadow s(thr->racy_state[1]);
- freed = s.GetFreedAndReset();
- thr->racy_state[1] = s.raw();
- }
-
- uptr addr = ShadowToMem((uptr)thr->racy_shadow_addr);
- uptr addr_min = 0;
- uptr addr_max = 0;
- {
- uptr a0 = addr + Shadow(thr->racy_state[0]).addr0();
- uptr a1 = addr + Shadow(thr->racy_state[1]).addr0();
- uptr e0 = a0 + Shadow(thr->racy_state[0]).size();
- uptr e1 = a1 + Shadow(thr->racy_state[1]).size();
- addr_min = min(a0, a1);
- addr_max = max(e0, e1);
- if (IsExpectedReport(addr_min, addr_max - addr_min))
- return;
- }
- if (HandleRacyAddress(thr, addr_min, addr_max))
+ const uptr kMop = 2;
+ Shadow s[kMop] = {cur, old};
+ uptr addr0 = addr + addr_off0;
+ uptr addr1 = addr + addr_off1;
+ uptr end0 = addr0 + size0;
+ uptr end1 = addr1 + size1;
+ uptr addr_min = min(addr0, addr1);
+ uptr addr_max = max(end0, end1);
+ if (IsExpectedReport(addr_min, addr_max - addr_min))
return;
- ReportType typ = ReportTypeRace;
- if (thr->is_vptr_access && freed)
- typ = ReportTypeVptrUseAfterFree;
- else if (thr->is_vptr_access)
- typ = ReportTypeVptrRace;
- else if (freed)
- typ = ReportTypeUseAfterFree;
+ ReportType rep_typ = ReportTypeRace;
+ if ((typ0 & kAccessVptr) && (typ1 & kAccessFree))
+ rep_typ = ReportTypeVptrUseAfterFree;
+ else if (typ0 & kAccessVptr)
+ rep_typ = ReportTypeVptrRace;
+ else if (typ1 & kAccessFree)
+ rep_typ = ReportTypeUseAfterFree;
- if (IsFiredSuppression(ctx, typ, addr))
+ if (IsFiredSuppression(ctx, rep_typ, addr))
return;
- const uptr kMop = 2;
VarSizeStackTrace traces[kMop];
- uptr tags[kMop] = {kExternalTagNone};
- uptr toppc = TraceTopPC(thr);
- if (toppc >> kEventPCBits) {
- // This is a work-around for a known issue.
- // The scenario where this happens is rather elaborate and requires
- // an instrumented __sanitizer_report_error_summary callback and
- // a __tsan_symbolize_external callback and a race during a range memory
- // access larger than 8 bytes. MemoryAccessRange adds the current PC to
- // the trace and starts processing memory accesses. A first memory access
- // triggers a race, we report it and call the instrumented
- // __sanitizer_report_error_summary, which adds more stuff to the trace
- // since it is intrumented. Then a second memory access in MemoryAccessRange
- // also triggers a race and we get here and call TraceTopPC to get the
- // current PC, however now it contains some unrelated events from the
- // callback. Most likely, TraceTopPC will now return a EventTypeFuncExit
- // event. Later we subtract -1 from it (in GetPreviousInstructionPc)
- // and the resulting PC has kExternalPCBit set, so we pass it to
- // __tsan_symbolize_external_ex. __tsan_symbolize_external_ex is within its
- // rights to crash since the PC is completely bogus.
- // test/tsan/double_race.cpp contains a test case for this.
- toppc = 0;
- }
- ObtainCurrentStack(thr, toppc, &traces[0], &tags[0]);
- if (IsFiredSuppression(ctx, typ, traces[0]))
+ Tid tids[kMop] = {thr->tid, kInvalidTid};
+ uptr tags[kMop] = {kExternalTagNone, kExternalTagNone};
+
+ ObtainCurrentStack(thr, thr->trace_prev_pc, &traces[0], &tags[0]);
+ if (IsFiredSuppression(ctx, rep_typ, traces[0]))
return;
- // MutexSet is too large to live on stack.
- Vector<u64> mset_buffer;
- mset_buffer.Resize(sizeof(MutexSet) / sizeof(u64) + 1);
- MutexSet *mset2 = new(&mset_buffer[0]) MutexSet();
+ DynamicMutexSet mset1;
+ MutexSet *mset[kMop] = {&thr->mset, mset1};
- Shadow s2(thr->racy_state[1]);
- RestoreStack(s2.tid(), s2.epoch(), &traces[1], mset2, &tags[1]);
- if (IsFiredSuppression(ctx, typ, traces[1]))
+ // We need to lock the slot during RestoreStack because it protects
+ // the slot journal.
+ Lock slot_lock(&ctx->slots[static_cast<uptr>(s[1].sid())].mtx);
+ ThreadRegistryLock l0(&ctx->thread_registry);
+ Lock slots_lock(&ctx->slot_mtx);
+ if (SpuriousRace(old))
+ return;
+ if (!RestoreStack(EventType::kAccessExt, s[1].sid(), s[1].epoch(), addr1,
+ size1, typ1, &tids[1], &traces[1], mset[1], &tags[1])) {
+ StoreShadow(&ctx->last_spurious_race, old.raw());
+ return;
+ }
+
+ if (IsFiredSuppression(ctx, rep_typ, traces[1]))
return;
if (HandleRacyStacks(thr, traces))
@@ -686,39 +784,41 @@ void ReportRace(ThreadState *thr) {
uptr tag = kExternalTagNone;
for (uptr i = 0; i < kMop; i++) {
if (tags[i] != kExternalTagNone) {
- typ = ReportTypeExternalRace;
+ rep_typ = ReportTypeExternalRace;
tag = tags[i];
break;
}
}
- ThreadRegistryLock l0(ctx->thread_registry);
- ScopedReport rep(typ, tag);
- for (uptr i = 0; i < kMop; i++) {
- Shadow s(thr->racy_state[i]);
- rep.AddMemoryAccess(addr, tags[i], s, traces[i],
- i == 0 ? &thr->mset : mset2);
- }
+ ScopedReport rep(rep_typ, tag);
+ for (uptr i = 0; i < kMop; i++)
+ rep.AddMemoryAccess(addr, tags[i], s[i], tids[i], traces[i], mset[i]);
for (uptr i = 0; i < kMop; i++) {
- FastState s(thr->racy_state[i]);
- ThreadContext *tctx = static_cast<ThreadContext*>(
- ctx->thread_registry->GetThreadLocked(s.tid()));
- if (s.epoch() < tctx->epoch0 || s.epoch() > tctx->epoch1)
- continue;
+ ThreadContext *tctx = static_cast<ThreadContext *>(
+ ctx->thread_registry.GetThreadLocked(tids[i]));
rep.AddThread(tctx);
}
rep.AddLocation(addr_min, addr_max - addr_min);
-#if !SANITIZER_GO
- {
- Shadow s(thr->racy_state[1]);
- if (s.epoch() <= thr->last_sleep_clock.get(s.tid()))
- rep.AddSleep(thr->last_sleep_stack_id);
+ if (flags()->print_full_thread_history) {
+ const ReportDesc *rep_desc = rep.GetReport();
+ for (uptr i = 0; i < rep_desc->threads.Size(); i++) {
+ Tid parent_tid = rep_desc->threads[i]->parent_tid;
+ if (parent_tid == kMainTid || parent_tid == kInvalidTid)
+ continue;
+ ThreadContext *parent_tctx = static_cast<ThreadContext *>(
+ ctx->thread_registry.GetThreadLocked(parent_tid));
+ rep.AddThread(parent_tctx);
+ }
}
-#endif
+#if !SANITIZER_GO
+ if (!((typ0 | typ1) & kAccessFree) &&
+ s[1].epoch() <= thr->last_sleep_clock.Get(s[1].sid()))
+ rep.AddSleep(thr->last_sleep_stack_id);
+#endif
OutputReport(thr, rep);
}
@@ -738,9 +838,7 @@ void PrintCurrentStack(ThreadState *thr, uptr pc) {
ALWAYS_INLINE USED void PrintCurrentStackSlow(uptr pc) {
#if !SANITIZER_GO
uptr bp = GET_CURRENT_FRAME();
- BufferedStackTrace *ptrace =
- new(internal_alloc(MBlockStackTrace, sizeof(BufferedStackTrace)))
- BufferedStackTrace();
+ auto *ptrace = New<BufferedStackTrace>();
ptrace->Unwind(pc, bp, nullptr, false);
for (uptr i = 0; i < ptrace->size / 2; i++) {
diff --git a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_rtl_riscv64.S b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_rtl_riscv64.S
new file mode 100644
index 000000000000..8e6b9b9432ef
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_rtl_riscv64.S
@@ -0,0 +1,203 @@
+#include "sanitizer_common/sanitizer_asm.h"
+
+.section .text
+
+.comm _ZN14__interception11real_setjmpE,8,8
+.globl ASM_SYMBOL_INTERCEPTOR(setjmp)
+ASM_TYPE_FUNCTION(ASM_SYMBOL_INTERCEPTOR(setjmp))
+ASM_SYMBOL_INTERCEPTOR(setjmp):
+ CFI_STARTPROC
+
+ // Save frame pointer and return address register
+ addi sp, sp, -32
+ sd ra, 24(sp)
+ sd s0, 16(sp)
+ CFI_DEF_CFA_OFFSET (32)
+ CFI_OFFSET (1, -8)
+ CFI_OFFSET (8, -16)
+
+ // Adjust the SP for previous frame
+ addi s0, sp, 32
+ CFI_DEF_CFA_REGISTER (8)
+
+ // Save env parameter
+ sd a0, 8(sp)
+ CFI_OFFSET (10, -24)
+
+ // Obtain SP, first argument to `void __tsan_setjmp(uptr sp)`
+ addi a0, s0, 0
+
+ // call tsan interceptor
+ call ASM_SYMBOL(__tsan_setjmp)
+
+ // Restore env parameter
+ ld a0, 8(sp)
+ CFI_RESTORE (10)
+
+ // Restore frame/link register
+ ld s0, 16(sp)
+ ld ra, 24(sp)
+ addi sp, sp, 32
+ CFI_RESTORE (8)
+ CFI_RESTORE (1)
+ CFI_DEF_CFA (2, 0)
+
+ // tail jump to libc setjmp
+ la t1, _ZN14__interception11real_setjmpE
+ ld t1, 0(t1)
+ jr t1
+
+ CFI_ENDPROC
+ASM_SIZE(ASM_SYMBOL_INTERCEPTOR(setjmp))
+
+.comm _ZN14__interception12real__setjmpE,8,8
+.globl ASM_SYMBOL_INTERCEPTOR(_setjmp)
+ASM_TYPE_FUNCTION(ASM_SYMBOL_INTERCEPTOR(_setjmp))
+ASM_SYMBOL_INTERCEPTOR(_setjmp):
+ CFI_STARTPROC
+
+ // Save frame pointer and return address register
+ addi sp, sp, -32
+ sd ra, 24(sp)
+ sd s0, 16(sp)
+ CFI_DEF_CFA_OFFSET (32)
+ CFI_OFFSET (1, -8)
+ CFI_OFFSET (8, -16)
+
+ // Adjust the SP for previous frame
+ addi s0, sp, 32
+ CFI_DEF_CFA_REGISTER (8)
+
+ // Save env parameter
+ sd a0, 8(sp)
+ CFI_OFFSET (10, -24)
+
+ // Obtain SP, first argument to `void __tsan_setjmp(uptr sp)`
+ addi a0, s0, 0
+
+ // call tsan interceptor
+ call ASM_SYMBOL(__tsan_setjmp)
+
+ // Restore env parameter
+ ld a0, 8(sp)
+ CFI_RESTORE (10)
+
+ // Restore frame/link register
+ ld s0, 16(sp)
+ ld ra, 24(sp)
+ addi sp, sp, 32
+ CFI_RESTORE (8)
+ CFI_RESTORE (1)
+ CFI_DEF_CFA (2, 0)
+
+ // tail jump to libc setjmp
+ la t1, _ZN14__interception12real__setjmpE
+ ld t1, 0(t1)
+ jr t1
+
+ CFI_ENDPROC
+ASM_SIZE(ASM_SYMBOL_INTERCEPTOR(_setjmp))
+
+.comm _ZN14__interception14real_sigsetjmpE,8,8
+.globl ASM_SYMBOL_INTERCEPTOR(sigsetjmp)
+ASM_TYPE_FUNCTION(ASM_SYMBOL_INTERCEPTOR(sigsetjmp))
+ASM_SYMBOL_INTERCEPTOR(sigsetjmp):
+ CFI_STARTPROC
+
+ // Save frame pointer and return address register
+ addi sp, sp, -32
+ sd ra, 24(sp)
+ sd s0, 16(sp)
+ CFI_DEF_CFA_OFFSET (32)
+ CFI_OFFSET (1, -8)
+ CFI_OFFSET (8, -16)
+
+ // Adjust the SP for previous frame
+ addi s0, sp, 32
+ CFI_DEF_CFA_REGISTER (8)
+
+ // Save env parameter
+ sd a0, 8(sp)
+ sd a1, 0(sp)
+ CFI_OFFSET (10, -24)
+ CFI_OFFSET (11, -32)
+
+ // Obtain SP, first argument to `void __tsan_setjmp(uptr sp)`
+ addi a0, s0, 0
+
+ // call tsan interceptor
+ call ASM_SYMBOL(__tsan_setjmp)
+
+ // Restore env parameter
+ ld a0, 8(sp)
+ ld a1, 0(sp)
+ CFI_RESTORE (10)
+ CFI_RESTORE (11)
+
+ // Restore frame/link register
+ ld s0, 16(sp)
+ ld ra, 24(sp)
+ addi sp, sp, 32
+ CFI_RESTORE (8)
+ CFI_RESTORE (1)
+ CFI_DEF_CFA (2, 0)
+
+ // tail jump to libc setjmp
+ la t1, _ZN14__interception14real_sigsetjmpE
+ ld t1, 0(t1)
+ jr t1
+
+ CFI_ENDPROC
+ASM_SIZE(ASM_SYMBOL_INTERCEPTOR(sigsetjmp))
+
+.comm _ZN14__interception16real___sigsetjmpE,8,8
+.globl ASM_SYMBOL_INTERCEPTOR(__sigsetjmp)
+ASM_TYPE_FUNCTION(ASM_SYMBOL_INTERCEPTOR(__sigsetjmp))
+ASM_SYMBOL_INTERCEPTOR(__sigsetjmp):
+ CFI_STARTPROC
+
+ // Save frame pointer and return address register
+ addi sp, sp, -32
+ sd ra, 24(sp)
+ sd s0, 16(sp)
+ CFI_DEF_CFA_OFFSET (32)
+ CFI_OFFSET (1, -8)
+ CFI_OFFSET (8, -16)
+
+ // Adjust the SP for previous frame
+ addi s0, sp, 32
+ CFI_DEF_CFA_REGISTER (8)
+
+ // Save env parameter
+ sd a0, 8(sp)
+ sd a1, 0(sp)
+ CFI_OFFSET (10, -24)
+ CFI_OFFSET (11, -32)
+
+ // Obtain SP, first argument to `void __tsan_setjmp(uptr sp)`
+ addi a0, s0, 0
+
+ // call tsan interceptor
+ call ASM_SYMBOL(__tsan_setjmp)
+
+ // Restore env parameter
+ ld a0, 8(sp)
+ ld a1, 0(sp)
+ CFI_RESTORE (10)
+ CFI_RESTORE (11)
+
+ // Restore frame/link register
+ ld s0, 16(sp)
+ ld ra, 24(sp)
+ addi sp, sp, 32
+ CFI_RESTORE (8)
+ CFI_RESTORE (1)
+ CFI_DEF_CFA (2, 0)
+
+ // tail jump to libc setjmp
+ la t1, _ZN14__interception16real___sigsetjmpE
+ ld t1, 0(t1)
+ jr t1
+
+ CFI_ENDPROC
+ASM_SIZE(ASM_SYMBOL_INTERCEPTOR(__sigsetjmp))
diff --git a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_rtl_s390x.S b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_rtl_s390x.S
index fcff35fbc7e0..2f445e8f1b20 100644
--- a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_rtl_s390x.S
+++ b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_rtl_s390x.S
@@ -45,3 +45,5 @@ intercept setjmp, _ZN14__interception11real_setjmpE
intercept _setjmp, _ZN14__interception12real__setjmpE
intercept sigsetjmp, _ZN14__interception14real_sigsetjmpE
intercept __sigsetjmp, _ZN14__interception16real___sigsetjmpE
+
+NO_EXEC_STACK_DIRECTIVE
diff --git a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_rtl_thread.cpp b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_rtl_thread.cpp
index cdb6e60ebbd0..77488f843285 100644
--- a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_rtl_thread.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_rtl_thread.cpp
@@ -21,133 +21,14 @@ namespace __tsan {
// ThreadContext implementation.
-ThreadContext::ThreadContext(int tid)
- : ThreadContextBase(tid)
- , thr()
- , sync()
- , epoch0()
- , epoch1() {
-}
+ThreadContext::ThreadContext(Tid tid) : ThreadContextBase(tid), thr(), sync() {}
#if !SANITIZER_GO
ThreadContext::~ThreadContext() {
}
#endif
-void ThreadContext::OnDead() {
- CHECK_EQ(sync.size(), 0);
-}
-
-void ThreadContext::OnJoined(void *arg) {
- ThreadState *caller_thr = static_cast<ThreadState *>(arg);
- AcquireImpl(caller_thr, 0, &sync);
- sync.Reset(&caller_thr->proc()->clock_cache);
-}
-
-struct OnCreatedArgs {
- ThreadState *thr;
- uptr pc;
-};
-
-void ThreadContext::OnCreated(void *arg) {
- thr = 0;
- if (tid == kMainTid)
- return;
- OnCreatedArgs *args = static_cast<OnCreatedArgs *>(arg);
- if (!args->thr) // GCD workers don't have a parent thread.
- return;
- args->thr->fast_state.IncrementEpoch();
- // Can't increment epoch w/o writing to the trace as well.
- TraceAddEvent(args->thr, args->thr->fast_state, EventTypeMop, 0);
- ReleaseImpl(args->thr, 0, &sync);
- creation_stack_id = CurrentStackId(args->thr, args->pc);
-}
-
-void ThreadContext::OnReset() {
- CHECK_EQ(sync.size(), 0);
- uptr trace_p = GetThreadTrace(tid);
- ReleaseMemoryPagesToOS(trace_p, trace_p + TraceSize() * sizeof(Event));
- //!!! ReleaseMemoryToOS(GetThreadTraceHeader(tid), sizeof(Trace));
-}
-
-void ThreadContext::OnDetached(void *arg) {
- ThreadState *thr1 = static_cast<ThreadState*>(arg);
- sync.Reset(&thr1->proc()->clock_cache);
-}
-
-struct OnStartedArgs {
- ThreadState *thr;
- uptr stk_addr;
- uptr stk_size;
- uptr tls_addr;
- uptr tls_size;
-};
-
-void ThreadContext::OnStarted(void *arg) {
- OnStartedArgs *args = static_cast<OnStartedArgs*>(arg);
- thr = args->thr;
- // RoundUp so that one trace part does not contain events
- // from different threads.
- epoch0 = RoundUp(epoch1 + 1, kTracePartSize);
- epoch1 = (u64)-1;
- new(thr) ThreadState(ctx, tid, unique_id, epoch0, reuse_count,
- args->stk_addr, args->stk_size, args->tls_addr, args->tls_size);
-#if !SANITIZER_GO
- thr->shadow_stack = &ThreadTrace(thr->tid)->shadow_stack[0];
- thr->shadow_stack_pos = thr->shadow_stack;
- thr->shadow_stack_end = thr->shadow_stack + kShadowStackSize;
-#else
- // Setup dynamic shadow stack.
- const int kInitStackSize = 8;
- thr->shadow_stack = (uptr*)internal_alloc(MBlockShadowStack,
- kInitStackSize * sizeof(uptr));
- thr->shadow_stack_pos = thr->shadow_stack;
- thr->shadow_stack_end = thr->shadow_stack + kInitStackSize;
-#endif
- if (common_flags()->detect_deadlocks)
- thr->dd_lt = ctx->dd->CreateLogicalThread(unique_id);
- thr->fast_state.SetHistorySize(flags()->history_size);
- // Commit switch to the new part of the trace.
- // TraceAddEvent will reset stack0/mset0 in the new part for us.
- TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
-
- thr->fast_synch_epoch = epoch0;
- AcquireImpl(thr, 0, &sync);
- sync.Reset(&thr->proc()->clock_cache);
- thr->is_inited = true;
- DPrintf("#%d: ThreadStart epoch=%zu stk_addr=%zx stk_size=%zx "
- "tls_addr=%zx tls_size=%zx\n",
- tid, (uptr)epoch0, args->stk_addr, args->stk_size,
- args->tls_addr, args->tls_size);
-}
-
-void ThreadContext::OnFinished() {
-#if SANITIZER_GO
- internal_free(thr->shadow_stack);
- thr->shadow_stack = nullptr;
- thr->shadow_stack_pos = nullptr;
- thr->shadow_stack_end = nullptr;
-#endif
- if (!detached) {
- thr->fast_state.IncrementEpoch();
- // Can't increment epoch w/o writing to the trace as well.
- TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
- ReleaseImpl(thr, 0, &sync);
- }
- epoch1 = thr->fast_state.epoch();
-
- if (common_flags()->detect_deadlocks)
- ctx->dd->DestroyLogicalThread(thr->dd_lt);
- thr->clock.ResetCached(&thr->proc()->clock_cache);
-#if !SANITIZER_GO
- thr->last_sleep_clock.ResetCached(&thr->proc()->clock_cache);
-#endif
-#if !SANITIZER_GO
- PlatformCleanUpThreadState(thr);
-#endif
- thr->~ThreadState();
- thr = 0;
-}
+void ThreadContext::OnReset() { CHECK(!sync); }
#if !SANITIZER_GO
struct ThreadLeak {
@@ -155,9 +36,9 @@ struct ThreadLeak {
int count;
};
-static void MaybeReportThreadLeak(ThreadContextBase *tctx_base, void *arg) {
- Vector<ThreadLeak> &leaks = *(Vector<ThreadLeak>*)arg;
- ThreadContext *tctx = static_cast<ThreadContext*>(tctx_base);
+static void CollectThreadLeaks(ThreadContextBase *tctx_base, void *arg) {
+ auto &leaks = *static_cast<Vector<ThreadLeak> *>(arg);
+ auto *tctx = static_cast<ThreadContext *>(tctx_base);
if (tctx->detached || tctx->status != ThreadStatusFinished)
return;
for (uptr i = 0; i < leaks.Size(); i++) {
@@ -166,12 +47,13 @@ static void MaybeReportThreadLeak(ThreadContextBase *tctx_base, void *arg) {
return;
}
}
- ThreadLeak leak = {tctx, 1};
- leaks.PushBack(leak);
+ leaks.PushBack({tctx, 1});
}
#endif
-#if !SANITIZER_GO
+// Disabled on Mac because lldb test TestTsanBasic fails:
+// https://reviews.llvm.org/D112603#3163158
+#if !SANITIZER_GO && !SANITIZER_APPLE
static void ReportIgnoresEnabled(ThreadContext *tctx, IgnoreSet *set) {
if (tctx->tid == kMainTid) {
Printf("ThreadSanitizer: main thread finished with ignores enabled\n");
@@ -206,10 +88,10 @@ void ThreadFinalize(ThreadState *thr) {
#if !SANITIZER_GO
if (!ShouldReport(thr, ReportTypeThreadLeak))
return;
- ThreadRegistryLock l(ctx->thread_registry);
+ ThreadRegistryLock l(&ctx->thread_registry);
Vector<ThreadLeak> leaks;
- ctx->thread_registry->RunCallbackForEachThreadLocked(
- MaybeReportThreadLeak, &leaks);
+ ctx->thread_registry.RunCallbackForEachThreadLocked(CollectThreadLeaks,
+ &leaks);
for (uptr i = 0; i < leaks.Size(); i++) {
ScopedReport rep(ReportTypeThreadLeak);
rep.AddThread(leaks[i].tctx, true);
@@ -221,21 +103,63 @@ void ThreadFinalize(ThreadState *thr) {
int ThreadCount(ThreadState *thr) {
uptr result;
- ctx->thread_registry->GetNumberOfThreads(0, 0, &result);
+ ctx->thread_registry.GetNumberOfThreads(0, 0, &result);
return (int)result;
}
-int ThreadCreate(ThreadState *thr, uptr pc, uptr uid, bool detached) {
- OnCreatedArgs args = { thr, pc };
- u32 parent_tid = thr ? thr->tid : kInvalidTid; // No parent for GCD workers.
- int tid =
- ctx->thread_registry->CreateThread(uid, detached, parent_tid, &args);
- DPrintf("#%d: ThreadCreate tid=%d uid=%zu\n", parent_tid, tid, uid);
+struct OnCreatedArgs {
+ VectorClock *sync;
+ uptr sync_epoch;
+ StackID stack;
+};
+
+Tid ThreadCreate(ThreadState *thr, uptr pc, uptr uid, bool detached) {
+ // The main thread and GCD workers don't have a parent thread.
+ Tid parent = kInvalidTid;
+ OnCreatedArgs arg = {nullptr, 0, kInvalidStackID};
+ if (thr) {
+ parent = thr->tid;
+ arg.stack = CurrentStackId(thr, pc);
+ if (!thr->ignore_sync) {
+ SlotLocker locker(thr);
+ thr->clock.ReleaseStore(&arg.sync);
+ arg.sync_epoch = ctx->global_epoch;
+ IncrementEpoch(thr);
+ }
+ }
+ Tid tid = ctx->thread_registry.CreateThread(uid, detached, parent, &arg);
+ DPrintf("#%d: ThreadCreate tid=%d uid=%zu\n", parent, tid, uid);
return tid;
}
-void ThreadStart(ThreadState *thr, int tid, tid_t os_id,
+void ThreadContext::OnCreated(void *arg) {
+ OnCreatedArgs *args = static_cast<OnCreatedArgs *>(arg);
+ sync = args->sync;
+ sync_epoch = args->sync_epoch;
+ creation_stack_id = args->stack;
+}
+
+extern "C" void __tsan_stack_initialization() {}
+
+struct OnStartedArgs {
+ ThreadState *thr;
+ uptr stk_addr;
+ uptr stk_size;
+ uptr tls_addr;
+ uptr tls_size;
+};
+
+void ThreadStart(ThreadState *thr, Tid tid, tid_t os_id,
ThreadType thread_type) {
+ ctx->thread_registry.StartThread(tid, os_id, thread_type, thr);
+ if (!thr->ignore_sync) {
+ SlotAttachAndLock(thr);
+ if (thr->tctx->sync_epoch == ctx->global_epoch)
+ thr->clock.Acquire(thr->tctx->sync);
+ SlotUnlock(thr);
+ }
+ Free(thr->tctx->sync);
+
uptr stk_addr = 0;
uptr stk_size = 0;
uptr tls_addr = 0;
@@ -244,22 +168,11 @@ void ThreadStart(ThreadState *thr, int tid, tid_t os_id,
if (thread_type != ThreadType::Fiber)
GetThreadStackAndTls(tid == kMainTid, &stk_addr, &stk_size, &tls_addr,
&tls_size);
-
- if (tid != kMainTid) {
- if (stk_addr && stk_size)
- MemoryRangeImitateWrite(thr, /*pc=*/ 1, stk_addr, stk_size);
-
- if (tls_addr && tls_size) ImitateTlsWrite(thr, tls_addr, tls_size);
- }
#endif
-
- ThreadRegistry *tr = ctx->thread_registry;
- OnStartedArgs args = { thr, stk_addr, stk_size, tls_addr, tls_size };
- tr->StartThread(tid, os_id, thread_type, &args);
-
- tr->Lock();
- thr->tctx = (ThreadContext*)tr->GetThreadLocked(tid);
- tr->Unlock();
+ thr->stk_addr = stk_addr;
+ thr->stk_size = stk_size;
+ thr->tls_addr = tls_addr;
+ thr->tls_size = tls_size;
#if !SANITIZER_GO
if (ctx->after_multithreaded_fork) {
@@ -268,16 +181,99 @@ void ThreadStart(ThreadState *thr, int tid, tid_t os_id,
ThreadIgnoreSyncBegin(thr, 0);
}
#endif
+
+#if !SANITIZER_GO
+ // Don't imitate stack/TLS writes for the main thread,
+ // because its initialization is synchronized with all
+ // subsequent threads anyway.
+ if (tid != kMainTid) {
+ if (stk_addr && stk_size) {
+ const uptr pc = StackTrace::GetNextInstructionPc(
+ reinterpret_cast<uptr>(__tsan_stack_initialization));
+ MemoryRangeImitateWrite(thr, pc, stk_addr, stk_size);
+ }
+
+ if (tls_addr && tls_size)
+ ImitateTlsWrite(thr, tls_addr, tls_size);
+ }
+#endif
+}
+
+void ThreadContext::OnStarted(void *arg) {
+ thr = static_cast<ThreadState *>(arg);
+ DPrintf("#%d: ThreadStart\n", tid);
+ new (thr) ThreadState(tid);
+ if (common_flags()->detect_deadlocks)
+ thr->dd_lt = ctx->dd->CreateLogicalThread(tid);
+ thr->tctx = this;
+#if !SANITIZER_GO
+ thr->is_inited = true;
+#endif
}
void ThreadFinish(ThreadState *thr) {
+ DPrintf("#%d: ThreadFinish\n", thr->tid);
ThreadCheckIgnore(thr);
if (thr->stk_addr && thr->stk_size)
DontNeedShadowFor(thr->stk_addr, thr->stk_size);
if (thr->tls_addr && thr->tls_size)
DontNeedShadowFor(thr->tls_addr, thr->tls_size);
thr->is_dead = true;
- ctx->thread_registry->FinishThread(thr->tid);
+#if !SANITIZER_GO
+ thr->is_inited = false;
+ thr->ignore_interceptors++;
+ PlatformCleanUpThreadState(thr);
+#endif
+ if (!thr->ignore_sync) {
+ SlotLocker locker(thr);
+ ThreadRegistryLock lock(&ctx->thread_registry);
+ // Note: detached is protected by the thread registry mutex,
+ // the thread may be detaching concurrently in another thread.
+ if (!thr->tctx->detached) {
+ thr->clock.ReleaseStore(&thr->tctx->sync);
+ thr->tctx->sync_epoch = ctx->global_epoch;
+ IncrementEpoch(thr);
+ }
+ }
+#if !SANITIZER_GO
+ UnmapOrDie(thr->shadow_stack, kShadowStackSize * sizeof(uptr));
+#else
+ Free(thr->shadow_stack);
+#endif
+ thr->shadow_stack = nullptr;
+ thr->shadow_stack_pos = nullptr;
+ thr->shadow_stack_end = nullptr;
+ if (common_flags()->detect_deadlocks)
+ ctx->dd->DestroyLogicalThread(thr->dd_lt);
+ SlotDetach(thr);
+ ctx->thread_registry.FinishThread(thr->tid);
+ thr->~ThreadState();
+}
+
+void ThreadContext::OnFinished() {
+ Lock lock(&ctx->slot_mtx);
+ Lock lock1(&trace.mtx);
+ // Queue all trace parts into the global recycle queue.
+ auto parts = &trace.parts;
+ while (trace.local_head) {
+ CHECK(parts->Queued(trace.local_head));
+ ctx->trace_part_recycle.PushBack(trace.local_head);
+ trace.local_head = parts->Next(trace.local_head);
+ }
+ ctx->trace_part_recycle_finished += parts->Size();
+ if (ctx->trace_part_recycle_finished > Trace::kFinishedThreadHi) {
+ ctx->trace_part_finished_excess += parts->Size();
+ trace.parts_allocated = 0;
+ } else if (ctx->trace_part_recycle_finished > Trace::kFinishedThreadLo &&
+ parts->Size() > 1) {
+ ctx->trace_part_finished_excess += parts->Size() - 1;
+ trace.parts_allocated = 1;
+ }
+ // From now on replay will use trace->final_pos.
+ trace.final_pos = (Event *)atomic_load_relaxed(&thr->trace_pos);
+ atomic_store_relaxed(&thr->trace_pos, 0);
+ thr->tctx = nullptr;
+ thr = nullptr;
}
struct ConsumeThreadContext {
@@ -285,131 +281,52 @@ struct ConsumeThreadContext {
ThreadContextBase *tctx;
};
-static bool ConsumeThreadByUid(ThreadContextBase *tctx, void *arg) {
- ConsumeThreadContext *findCtx = (ConsumeThreadContext *)arg;
- if (tctx->user_id == findCtx->uid && tctx->status != ThreadStatusInvalid) {
- if (findCtx->tctx) {
- // Ensure that user_id is unique. If it's not the case we are screwed.
- // Something went wrong before, but now there is no way to recover.
- // Returning a wrong thread is not an option, it may lead to very hard
- // to debug false positives (e.g. if we join a wrong thread).
- Report("ThreadSanitizer: dup thread with used id 0x%zx\n", findCtx->uid);
- Die();
- }
- findCtx->tctx = tctx;
- tctx->user_id = 0;
- }
- return false;
+Tid ThreadConsumeTid(ThreadState *thr, uptr pc, uptr uid) {
+ return ctx->thread_registry.ConsumeThreadUserId(uid);
}
-int ThreadConsumeTid(ThreadState *thr, uptr pc, uptr uid) {
- ConsumeThreadContext findCtx = {uid, nullptr};
- ctx->thread_registry->FindThread(ConsumeThreadByUid, &findCtx);
- int tid = findCtx.tctx ? findCtx.tctx->tid : kInvalidTid;
- DPrintf("#%d: ThreadTid uid=%zu tid=%d\n", thr->tid, uid, tid);
- return tid;
-}
+struct JoinArg {
+ VectorClock *sync;
+ uptr sync_epoch;
+};
-void ThreadJoin(ThreadState *thr, uptr pc, int tid) {
+void ThreadJoin(ThreadState *thr, uptr pc, Tid tid) {
CHECK_GT(tid, 0);
- CHECK_LT(tid, kMaxTid);
DPrintf("#%d: ThreadJoin tid=%d\n", thr->tid, tid);
- ctx->thread_registry->JoinThread(tid, thr);
+ JoinArg arg = {};
+ ctx->thread_registry.JoinThread(tid, &arg);
+ if (!thr->ignore_sync) {
+ SlotLocker locker(thr);
+ if (arg.sync_epoch == ctx->global_epoch)
+ thr->clock.Acquire(arg.sync);
+ }
+ Free(arg.sync);
}
-void ThreadDetach(ThreadState *thr, uptr pc, int tid) {
- CHECK_GT(tid, 0);
- CHECK_LT(tid, kMaxTid);
- ctx->thread_registry->DetachThread(tid, thr);
+void ThreadContext::OnJoined(void *ptr) {
+ auto arg = static_cast<JoinArg *>(ptr);
+ arg->sync = sync;
+ arg->sync_epoch = sync_epoch;
+ sync = nullptr;
+ sync_epoch = 0;
}
-void ThreadNotJoined(ThreadState *thr, uptr pc, int tid, uptr uid) {
- CHECK_GT(tid, 0);
- CHECK_LT(tid, kMaxTid);
- ctx->thread_registry->SetThreadUserId(tid, uid);
-}
+void ThreadContext::OnDead() { CHECK_EQ(sync, nullptr); }
-void ThreadSetName(ThreadState *thr, const char *name) {
- ctx->thread_registry->SetThreadName(thr->tid, name);
+void ThreadDetach(ThreadState *thr, uptr pc, Tid tid) {
+ CHECK_GT(tid, 0);
+ ctx->thread_registry.DetachThread(tid, thr);
}
-void MemoryAccessRange(ThreadState *thr, uptr pc, uptr addr,
- uptr size, bool is_write) {
- if (size == 0)
- return;
-
- u64 *shadow_mem = (u64*)MemToShadow(addr);
- DPrintf2("#%d: MemoryAccessRange: @%p %p size=%d is_write=%d\n",
- thr->tid, (void*)pc, (void*)addr,
- (int)size, is_write);
+void ThreadContext::OnDetached(void *arg) { Free(sync); }
-#if SANITIZER_DEBUG
- if (!IsAppMem(addr)) {
- Printf("Access to non app mem %zx\n", addr);
- DCHECK(IsAppMem(addr));
- }
- if (!IsAppMem(addr + size - 1)) {
- Printf("Access to non app mem %zx\n", addr + size - 1);
- DCHECK(IsAppMem(addr + size - 1));
- }
- if (!IsShadowMem((uptr)shadow_mem)) {
- Printf("Bad shadow addr %p (%zx)\n", shadow_mem, addr);
- DCHECK(IsShadowMem((uptr)shadow_mem));
- }
- if (!IsShadowMem((uptr)(shadow_mem + size * kShadowCnt / 8 - 1))) {
- Printf("Bad shadow addr %p (%zx)\n",
- shadow_mem + size * kShadowCnt / 8 - 1, addr + size - 1);
- DCHECK(IsShadowMem((uptr)(shadow_mem + size * kShadowCnt / 8 - 1)));
- }
-#endif
-
- if (*shadow_mem == kShadowRodata) {
- DCHECK(!is_write);
- // Access to .rodata section, no races here.
- // Measurements show that it can be 10-20% of all memory accesses.
- return;
- }
-
- FastState fast_state = thr->fast_state;
- if (fast_state.GetIgnoreBit())
- return;
-
- fast_state.IncrementEpoch();
- thr->fast_state = fast_state;
- TraceAddEvent(thr, fast_state, EventTypeMop, pc);
-
- bool unaligned = (addr % kShadowCell) != 0;
+void ThreadNotJoined(ThreadState *thr, uptr pc, Tid tid, uptr uid) {
+ CHECK_GT(tid, 0);
+ ctx->thread_registry.SetThreadUserId(tid, uid);
+}
- // Handle unaligned beginning, if any.
- for (; addr % kShadowCell && size; addr++, size--) {
- int const kAccessSizeLog = 0;
- Shadow cur(fast_state);
- cur.SetWrite(is_write);
- cur.SetAddr0AndSizeLog(addr & (kShadowCell - 1), kAccessSizeLog);
- MemoryAccessImpl(thr, addr, kAccessSizeLog, is_write, false,
- shadow_mem, cur);
- }
- if (unaligned)
- shadow_mem += kShadowCnt;
- // Handle middle part, if any.
- for (; size >= kShadowCell; addr += kShadowCell, size -= kShadowCell) {
- int const kAccessSizeLog = 3;
- Shadow cur(fast_state);
- cur.SetWrite(is_write);
- cur.SetAddr0AndSizeLog(0, kAccessSizeLog);
- MemoryAccessImpl(thr, addr, kAccessSizeLog, is_write, false,
- shadow_mem, cur);
- shadow_mem += kShadowCnt;
- }
- // Handle ending, if any.
- for (; size; addr++, size--) {
- int const kAccessSizeLog = 0;
- Shadow cur(fast_state);
- cur.SetWrite(is_write);
- cur.SetAddr0AndSizeLog(addr & (kShadowCell - 1), kAccessSizeLog);
- MemoryAccessImpl(thr, addr, kAccessSizeLog, is_write, false,
- shadow_mem, cur);
- }
+void ThreadSetName(ThreadState *thr, const char *name) {
+ ctx->thread_registry.SetThreadName(thr->tid, name);
}
#if !SANITIZER_GO
@@ -421,10 +338,10 @@ void FiberSwitchImpl(ThreadState *from, ThreadState *to) {
}
ThreadState *FiberCreate(ThreadState *thr, uptr pc, unsigned flags) {
- void *mem = internal_alloc(MBlockThreadContex, sizeof(ThreadState));
+ void *mem = Alloc(sizeof(ThreadState));
ThreadState *fiber = static_cast<ThreadState *>(mem);
internal_memset(fiber, 0, sizeof(*fiber));
- int tid = ThreadCreate(thr, pc, 0, true);
+ Tid tid = ThreadCreate(thr, pc, 0, true);
FiberSwitchImpl(thr, fiber);
ThreadStart(fiber, tid, 0, ThreadType::Fiber);
FiberSwitchImpl(fiber, thr);
@@ -435,7 +352,7 @@ void FiberDestroy(ThreadState *thr, uptr pc, ThreadState *fiber) {
FiberSwitchImpl(thr, fiber);
ThreadFinish(fiber);
FiberSwitchImpl(fiber, thr);
- internal_free(fiber);
+ Free(fiber);
}
void FiberSwitch(ThreadState *thr, uptr pc,
diff --git a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_shadow.h b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_shadow.h
new file mode 100644
index 000000000000..6b8114ef5132
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_shadow.h
@@ -0,0 +1,193 @@
+//===-- tsan_shadow.h -------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef TSAN_SHADOW_H
+#define TSAN_SHADOW_H
+
+#include "tsan_defs.h"
+
+namespace __tsan {
+
+class FastState {
+ public:
+ FastState() { Reset(); }
+
+ void Reset() {
+ part_.unused0_ = 0;
+ part_.sid_ = static_cast<u8>(kFreeSid);
+ part_.epoch_ = static_cast<u16>(kEpochLast);
+ part_.unused1_ = 0;
+ part_.ignore_accesses_ = false;
+ }
+
+ void SetSid(Sid sid) { part_.sid_ = static_cast<u8>(sid); }
+
+ Sid sid() const { return static_cast<Sid>(part_.sid_); }
+
+ Epoch epoch() const { return static_cast<Epoch>(part_.epoch_); }
+
+ void SetEpoch(Epoch epoch) { part_.epoch_ = static_cast<u16>(epoch); }
+
+ void SetIgnoreBit() { part_.ignore_accesses_ = 1; }
+ void ClearIgnoreBit() { part_.ignore_accesses_ = 0; }
+ bool GetIgnoreBit() const { return part_.ignore_accesses_; }
+
+ private:
+ friend class Shadow;
+ struct Parts {
+ u32 unused0_ : 8;
+ u32 sid_ : 8;
+ u32 epoch_ : kEpochBits;
+ u32 unused1_ : 1;
+ u32 ignore_accesses_ : 1;
+ };
+ union {
+ Parts part_;
+ u32 raw_;
+ };
+};
+
+static_assert(sizeof(FastState) == kShadowSize, "bad FastState size");
+
+class Shadow {
+ public:
+ static constexpr RawShadow kEmpty = static_cast<RawShadow>(0);
+
+ Shadow(FastState state, u32 addr, u32 size, AccessType typ) {
+ raw_ = state.raw_;
+ DCHECK_GT(size, 0);
+ DCHECK_LE(size, 8);
+ UNUSED Sid sid0 = part_.sid_;
+ UNUSED u16 epoch0 = part_.epoch_;
+ raw_ |= (!!(typ & kAccessAtomic) << kIsAtomicShift) |
+ (!!(typ & kAccessRead) << kIsReadShift) |
+ (((((1u << size) - 1) << (addr & 0x7)) & 0xff) << kAccessShift);
+ // Note: we don't check kAccessAtomic because it overlaps with
+ // FastState::ignore_accesses_ and it may be set spuriously.
+ DCHECK_EQ(part_.is_read_, !!(typ & kAccessRead));
+ DCHECK_EQ(sid(), sid0);
+ DCHECK_EQ(epoch(), epoch0);
+ }
+
+ explicit Shadow(RawShadow x = Shadow::kEmpty) { raw_ = static_cast<u32>(x); }
+
+ RawShadow raw() const { return static_cast<RawShadow>(raw_); }
+ Sid sid() const { return part_.sid_; }
+ Epoch epoch() const { return static_cast<Epoch>(part_.epoch_); }
+ u8 access() const { return part_.access_; }
+
+ void GetAccess(uptr *addr, uptr *size, AccessType *typ) const {
+ DCHECK(part_.access_ != 0 || raw_ == static_cast<u32>(Shadow::kRodata));
+ if (addr)
+ *addr = part_.access_ ? __builtin_ffs(part_.access_) - 1 : 0;
+ if (size)
+ *size = part_.access_ == kFreeAccess ? kShadowCell
+ : __builtin_popcount(part_.access_);
+ if (typ) {
+ *typ = part_.is_read_ ? kAccessRead : kAccessWrite;
+ if (part_.is_atomic_)
+ *typ |= kAccessAtomic;
+ if (part_.access_ == kFreeAccess)
+ *typ |= kAccessFree;
+ }
+ }
+
+ ALWAYS_INLINE
+ bool IsBothReadsOrAtomic(AccessType typ) const {
+ u32 is_read = !!(typ & kAccessRead);
+ u32 is_atomic = !!(typ & kAccessAtomic);
+ bool res =
+ raw_ & ((is_atomic << kIsAtomicShift) | (is_read << kIsReadShift));
+ DCHECK_EQ(res,
+ (part_.is_read_ && is_read) || (part_.is_atomic_ && is_atomic));
+ return res;
+ }
+
+ ALWAYS_INLINE
+ bool IsRWWeakerOrEqual(AccessType typ) const {
+ u32 is_read = !!(typ & kAccessRead);
+ u32 is_atomic = !!(typ & kAccessAtomic);
+ UNUSED u32 res0 =
+ (part_.is_atomic_ > is_atomic) ||
+ (part_.is_atomic_ == is_atomic && part_.is_read_ >= is_read);
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+ const u32 kAtomicReadMask = (1 << kIsAtomicShift) | (1 << kIsReadShift);
+ bool res = (raw_ & kAtomicReadMask) >=
+ ((is_atomic << kIsAtomicShift) | (is_read << kIsReadShift));
+
+ DCHECK_EQ(res, res0);
+ return res;
+#else
+ return res0;
+#endif
+ }
+
+ // The FreedMarker must not pass "the same access check" so that we don't
+ // return from the race detection algorithm early.
+ static RawShadow FreedMarker() {
+ FastState fs;
+ fs.SetSid(kFreeSid);
+ fs.SetEpoch(kEpochLast);
+ Shadow s(fs, 0, 8, kAccessWrite);
+ return s.raw();
+ }
+
+ static RawShadow FreedInfo(Sid sid, Epoch epoch) {
+ Shadow s;
+ s.part_.sid_ = sid;
+ s.part_.epoch_ = static_cast<u16>(epoch);
+ s.part_.access_ = kFreeAccess;
+ return s.raw();
+ }
+
+ private:
+ struct Parts {
+ u8 access_;
+ Sid sid_;
+ u16 epoch_ : kEpochBits;
+ u16 is_read_ : 1;
+ u16 is_atomic_ : 1;
+ };
+ union {
+ Parts part_;
+ u32 raw_;
+ };
+
+ static constexpr u8 kFreeAccess = 0x81;
+
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+ static constexpr uptr kAccessShift = 0;
+ static constexpr uptr kIsReadShift = 30;
+ static constexpr uptr kIsAtomicShift = 31;
+#else
+ static constexpr uptr kAccessShift = 24;
+ static constexpr uptr kIsReadShift = 1;
+ static constexpr uptr kIsAtomicShift = 0;
+#endif
+
+ public:
+ // .rodata shadow marker, see MapRodata and ContainsSameAccessFast.
+ static constexpr RawShadow kRodata =
+ static_cast<RawShadow>(1 << kIsReadShift);
+};
+
+static_assert(sizeof(Shadow) == kShadowSize, "bad Shadow size");
+
+ALWAYS_INLINE RawShadow LoadShadow(RawShadow *p) {
+ return static_cast<RawShadow>(
+ atomic_load((atomic_uint32_t *)p, memory_order_relaxed));
+}
+
+ALWAYS_INLINE void StoreShadow(RawShadow *sp, RawShadow s) {
+ atomic_store((atomic_uint32_t *)sp, static_cast<u32>(s),
+ memory_order_relaxed);
+}
+
+} // namespace __tsan
+
+#endif
diff --git a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_spinlock_defs_mac.h b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_spinlock_defs_mac.h
new file mode 100644
index 000000000000..1a99a81c0302
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_spinlock_defs_mac.h
@@ -0,0 +1,45 @@
+//===-- tsan_spinlock_defs_mac.h -------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+// Mac-specific forward-declared function defintions that may be
+// deprecated in later versions of the OS.
+// These are needed for interceptors.
+//
+//===----------------------------------------------------------------------===//
+
+#if SANITIZER_APPLE
+
+#ifndef TSAN_SPINLOCK_DEFS_MAC_H
+#define TSAN_SPINLOCK_DEFS_MAC_H
+
+#include <stdint.h>
+
+extern "C" {
+
+/*
+Provides forward declarations related to OSSpinLocks on Darwin. These functions are
+deprecated on macOS version 10.12 and later,
+and are no longer included in the system headers.
+
+However, the symbols are still available on the system, so we provide these forward
+declarations to prevent compilation errors in tsan_interceptors_mac.cpp, which
+references these functions when defining TSAN interceptor functions.
+*/
+
+typedef int32_t OSSpinLock;
+
+void OSSpinLockLock(volatile OSSpinLock *__lock);
+void OSSpinLockUnlock(volatile OSSpinLock *__lock);
+bool OSSpinLockTry(volatile OSSpinLock *__lock);
+
+}
+
+#endif //TSAN_SPINLOCK_DEFS_MAC_H
+#endif // SANITIZER_APPLE
diff --git a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_stack_trace.cpp b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_stack_trace.cpp
index 6c703d7f2b10..9bbaafb3a85f 100644
--- a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_stack_trace.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_stack_trace.cpp
@@ -23,14 +23,10 @@ VarSizeStackTrace::~VarSizeStackTrace() {
}
void VarSizeStackTrace::ResizeBuffer(uptr new_size) {
- if (trace_buffer) {
- internal_free(trace_buffer);
- }
- trace_buffer =
- (new_size > 0)
- ? (uptr *)internal_alloc(MBlockStackTrace,
- new_size * sizeof(trace_buffer[0]))
- : nullptr;
+ Free(trace_buffer);
+ trace_buffer = (new_size > 0)
+ ? (uptr *)Alloc(new_size * sizeof(trace_buffer[0]))
+ : nullptr;
trace = trace_buffer;
size = new_size;
}
diff --git a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_suppressions.cpp b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_suppressions.cpp
index a1c1bf81bf67..70642124990d 100644
--- a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_suppressions.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_suppressions.cpp
@@ -10,15 +10,16 @@
//
//===----------------------------------------------------------------------===//
+#include "tsan_suppressions.h"
+
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_libc.h"
#include "sanitizer_common/sanitizer_placement_new.h"
#include "sanitizer_common/sanitizer_suppressions.h"
-#include "tsan_suppressions.h"
-#include "tsan_rtl.h"
#include "tsan_flags.h"
#include "tsan_mman.h"
#include "tsan_platform.h"
+#include "tsan_rtl.h"
#if !SANITIZER_GO
// Suppressions for true/false positives in standard libraries.
@@ -80,6 +81,7 @@ static const char *conv(ReportType typ) {
case ReportTypeMutexBadUnlock:
case ReportTypeMutexBadReadLock:
case ReportTypeMutexBadReadUnlock:
+ case ReportTypeMutexHeldWrongContext:
return kSuppressionMutex;
case ReportTypeSignalUnsafe:
case ReportTypeErrnoInSignal:
diff --git a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_symbolize.cpp b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_symbolize.cpp
index 6478f3a754ac..2e2744d2eae7 100644
--- a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_symbolize.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_symbolize.cpp
@@ -110,7 +110,8 @@ ReportLocation *SymbolizeData(uptr addr) {
DataInfo info;
if (!Symbolizer::GetOrInit()->SymbolizeData(addr, &info))
return 0;
- ReportLocation *ent = ReportLocation::New(ReportLocationGlobal);
+ auto *ent = New<ReportLocation>();
+ ent->type = ReportLocationGlobal;
internal_memcpy(&ent->global, &info, sizeof(info));
return ent;
}
diff --git a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_sync.cpp b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_sync.cpp
index 5e226b2d12b1..09d41780d188 100644
--- a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_sync.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_sync.cpp
@@ -18,42 +18,31 @@ namespace __tsan {
void DDMutexInit(ThreadState *thr, uptr pc, SyncVar *s);
-SyncVar::SyncVar() : mtx(MutexTypeSyncVar) { Reset(0); }
+SyncVar::SyncVar() : mtx(MutexTypeSyncVar) { Reset(); }
-void SyncVar::Init(ThreadState *thr, uptr pc, uptr addr, u64 uid) {
+void SyncVar::Init(ThreadState *thr, uptr pc, uptr addr, bool save_stack) {
+ Reset();
this->addr = addr;
- this->uid = uid;
- this->next = 0;
-
- creation_stack_id = 0;
- if (!SANITIZER_GO) // Go does not use them
+ next = 0;
+ if (save_stack && !SANITIZER_GO) // Go does not use them
creation_stack_id = CurrentStackId(thr, pc);
if (common_flags()->detect_deadlocks)
DDMutexInit(thr, pc, this);
}
-void SyncVar::Reset(Processor *proc) {
- uid = 0;
- creation_stack_id = 0;
+void SyncVar::Reset() {
+ CHECK(!ctx->resetting);
+ creation_stack_id = kInvalidStackID;
owner_tid = kInvalidTid;
- last_lock = 0;
+ last_lock.Reset();
recursion = 0;
atomic_store_relaxed(&flags, 0);
-
- if (proc == 0) {
- CHECK_EQ(clock.size(), 0);
- CHECK_EQ(read_clock.size(), 0);
- } else {
- clock.Reset(&proc->clock_cache);
- read_clock.Reset(&proc->clock_cache);
- }
+ Free(clock);
+ Free(read_clock);
}
MetaMap::MetaMap()
- : block_alloc_(LINKER_INITIALIZED, "heap block allocator"),
- sync_alloc_(LINKER_INITIALIZED, "sync allocator") {
- atomic_store(&uid_gen_, 0, memory_order_relaxed);
-}
+ : block_alloc_("heap block allocator"), sync_alloc_("sync allocator") {}
void MetaMap::AllocBlock(ThreadState *thr, uptr pc, uptr p, uptr sz) {
u32 idx = block_alloc_.Alloc(&thr->proc()->block_cache);
@@ -67,16 +56,16 @@ void MetaMap::AllocBlock(ThreadState *thr, uptr pc, uptr p, uptr sz) {
*meta = idx | kFlagBlock;
}
-uptr MetaMap::FreeBlock(Processor *proc, uptr p) {
+uptr MetaMap::FreeBlock(Processor *proc, uptr p, bool reset) {
MBlock* b = GetBlock(p);
if (b == 0)
return 0;
uptr sz = RoundUpTo(b->siz, kMetaShadowCell);
- FreeRange(proc, p, sz);
+ FreeRange(proc, p, sz, reset);
return sz;
}
-bool MetaMap::FreeRange(Processor *proc, uptr p, uptr sz) {
+bool MetaMap::FreeRange(Processor *proc, uptr p, uptr sz, bool reset) {
bool has_something = false;
u32 *meta = MemToMeta(p);
u32 *end = MemToMeta(p + sz);
@@ -98,7 +87,8 @@ bool MetaMap::FreeRange(Processor *proc, uptr p, uptr sz) {
DCHECK(idx & kFlagSync);
SyncVar *s = sync_alloc_.Map(idx & ~kFlagMask);
u32 next = s->next;
- s->Reset(proc);
+ if (reset)
+ s->Reset();
sync_alloc_.Free(&proc->sync_cache, idx & ~kFlagMask);
idx = next;
} else {
@@ -115,30 +105,30 @@ bool MetaMap::FreeRange(Processor *proc, uptr p, uptr sz) {
// which can be huge. The function probes pages one-by-one until it finds a page
// without meta objects, at this point it stops freeing meta objects. Because
// thread stacks grow top-down, we do the same starting from end as well.
-void MetaMap::ResetRange(Processor *proc, uptr p, uptr sz) {
+void MetaMap::ResetRange(Processor *proc, uptr p, uptr sz, bool reset) {
if (SANITIZER_GO) {
// UnmapOrDie/MmapFixedNoReserve does not work on Windows,
// so we do the optimization only for C/C++.
- FreeRange(proc, p, sz);
+ FreeRange(proc, p, sz, reset);
return;
}
const uptr kMetaRatio = kMetaShadowCell / kMetaShadowSize;
const uptr kPageSize = GetPageSizeCached() * kMetaRatio;
if (sz <= 4 * kPageSize) {
// If the range is small, just do the normal free procedure.
- FreeRange(proc, p, sz);
+ FreeRange(proc, p, sz, reset);
return;
}
// First, round both ends of the range to page size.
uptr diff = RoundUp(p, kPageSize) - p;
if (diff != 0) {
- FreeRange(proc, p, diff);
+ FreeRange(proc, p, diff, reset);
p += diff;
sz -= diff;
}
diff = p + sz - RoundDown(p + sz, kPageSize);
if (diff != 0) {
- FreeRange(proc, p + sz - diff, diff);
+ FreeRange(proc, p + sz - diff, diff, reset);
sz -= diff;
}
// Now we must have a non-empty page-aligned range.
@@ -149,7 +139,7 @@ void MetaMap::ResetRange(Processor *proc, uptr p, uptr sz) {
const uptr sz0 = sz;
// Probe start of the range.
for (uptr checked = 0; sz > 0; checked += kPageSize) {
- bool has_something = FreeRange(proc, p, kPageSize);
+ bool has_something = FreeRange(proc, p, kPageSize, reset);
p += kPageSize;
sz -= kPageSize;
if (!has_something && checked > (128 << 10))
@@ -157,7 +147,7 @@ void MetaMap::ResetRange(Processor *proc, uptr p, uptr sz) {
}
// Probe end of the range.
for (uptr checked = 0; sz > 0; checked += kPageSize) {
- bool has_something = FreeRange(proc, p + sz - kPageSize, kPageSize);
+ bool has_something = FreeRange(proc, p + sz - kPageSize, kPageSize, reset);
sz -= kPageSize;
// Stacks grow down, so sync object are most likely at the end of the region
// (if it is a stack). The very end of the stack is TLS and tsan increases
@@ -176,6 +166,27 @@ void MetaMap::ResetRange(Processor *proc, uptr p, uptr sz) {
Die();
}
+void MetaMap::ResetClocks() {
+ // This can be called from the background thread
+ // which does not have proc/cache.
+ // The cache is too large for stack.
+ static InternalAllocatorCache cache;
+ internal_memset(&cache, 0, sizeof(cache));
+ internal_allocator()->InitCache(&cache);
+ sync_alloc_.ForEach([&](SyncVar *s) {
+ if (s->clock) {
+ InternalFree(s->clock, &cache);
+ s->clock = nullptr;
+ }
+ if (s->read_clock) {
+ InternalFree(s->read_clock, &cache);
+ s->read_clock = nullptr;
+ }
+ s->last_lock.Reset();
+ });
+ internal_allocator()->DestroyCache(&cache);
+}
+
MBlock* MetaMap::GetBlock(uptr p) {
u32 *meta = MemToMeta(p);
u32 idx = *meta;
@@ -190,63 +201,41 @@ MBlock* MetaMap::GetBlock(uptr p) {
}
}
-SyncVar* MetaMap::GetOrCreateAndLock(ThreadState *thr, uptr pc,
- uptr addr, bool write_lock) {
- return GetAndLock(thr, pc, addr, write_lock, true);
-}
-
-SyncVar* MetaMap::GetIfExistsAndLock(uptr addr, bool write_lock) {
- return GetAndLock(0, 0, addr, write_lock, false);
-}
-
-SyncVar *MetaMap::GetAndLock(ThreadState *thr, uptr pc, uptr addr, bool write_lock,
- bool create) NO_THREAD_SAFETY_ANALYSIS {
+SyncVar *MetaMap::GetSync(ThreadState *thr, uptr pc, uptr addr, bool create,
+ bool save_stack) {
+ DCHECK(!create || thr->slot_locked);
u32 *meta = MemToMeta(addr);
u32 idx0 = *meta;
u32 myidx = 0;
- SyncVar *mys = 0;
+ SyncVar *mys = nullptr;
for (;;) {
- u32 idx = idx0;
- for (;;) {
- if (idx == 0)
- break;
- if (idx & kFlagBlock)
- break;
+ for (u32 idx = idx0; idx && !(idx & kFlagBlock);) {
DCHECK(idx & kFlagSync);
SyncVar * s = sync_alloc_.Map(idx & ~kFlagMask);
- if (s->addr == addr) {
- if (myidx != 0) {
- mys->Reset(thr->proc());
+ if (LIKELY(s->addr == addr)) {
+ if (UNLIKELY(myidx != 0)) {
+ mys->Reset();
sync_alloc_.Free(&thr->proc()->sync_cache, myidx);
}
- if (write_lock)
- s->mtx.Lock();
- else
- s->mtx.ReadLock();
return s;
}
idx = s->next;
}
if (!create)
- return 0;
- if (*meta != idx0) {
+ return nullptr;
+ if (UNLIKELY(*meta != idx0)) {
idx0 = *meta;
continue;
}
- if (myidx == 0) {
- const u64 uid = atomic_fetch_add(&uid_gen_, 1, memory_order_relaxed);
+ if (LIKELY(myidx == 0)) {
myidx = sync_alloc_.Alloc(&thr->proc()->sync_cache);
mys = sync_alloc_.Map(myidx);
- mys->Init(thr, pc, addr, uid);
+ mys->Init(thr, pc, addr, save_stack);
}
mys->next = idx0;
if (atomic_compare_exchange_strong((atomic_uint32_t*)meta, &idx0,
myidx | kFlagSync, memory_order_release)) {
- if (write_lock)
- mys->mtx.Lock();
- else
- mys->mtx.ReadLock();
return mys;
}
}
@@ -290,4 +279,11 @@ void MetaMap::OnProcIdle(Processor *proc) {
sync_alloc_.FlushCache(&proc->sync_cache);
}
+MetaMap::MemoryStats MetaMap::GetMemoryStats() const {
+ MemoryStats stats;
+ stats.mem_block = block_alloc_.AllocatedMemory();
+ stats.sync_obj = sync_alloc_.AllocatedMemory();
+ return stats;
+}
+
} // namespace __tsan
diff --git a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_sync.h b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_sync.h
index 324aa1b0cea1..67d3c0b5e7dd 100644
--- a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_sync.h
+++ b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_sync.h
@@ -16,8 +16,9 @@
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_deadlock_detector_interface.h"
#include "tsan_defs.h"
-#include "tsan_clock.h"
#include "tsan_dense_alloc.h"
+#include "tsan_shadow.h"
+#include "tsan_vector_clock.h"
namespace __tsan {
@@ -46,39 +47,25 @@ enum MutexFlags {
MutexFlagNotStatic,
};
+// SyncVar is a descriptor of a user synchronization object
+// (mutex or an atomic variable).
struct SyncVar {
SyncVar();
uptr addr; // overwritten by DenseSlabAlloc freelist
Mutex mtx;
- u64 uid; // Globally unique id.
- u32 creation_stack_id;
- u32 owner_tid; // Set only by exclusive owners.
- u64 last_lock;
+ StackID creation_stack_id;
+ Tid owner_tid; // Set only by exclusive owners.
+ FastState last_lock;
int recursion;
atomic_uint32_t flags;
u32 next; // in MetaMap
DDMutex dd;
- SyncClock read_clock; // Used for rw mutexes only.
- // The clock is placed last, so that it is situated on a different cache line
- // with the mtx. This reduces contention for hot sync objects.
- SyncClock clock;
+ VectorClock *read_clock; // Used for rw mutexes only.
+ VectorClock *clock;
- void Init(ThreadState *thr, uptr pc, uptr addr, u64 uid);
- void Reset(Processor *proc);
-
- u64 GetId() const {
- // 48 lsb is addr, then 14 bits is low part of uid, then 2 zero bits.
- return GetLsb((u64)addr | (uid << 48), 60);
- }
- bool CheckId(u64 uid) const {
- CHECK_EQ(uid, GetLsb(uid, 14));
- return GetLsb(this->uid, 14) == uid;
- }
- static uptr SplitId(u64 id, u64 *uid) {
- *uid = id >> 48;
- return (uptr)GetLsb(id, 48);
- }
+ void Init(ThreadState *thr, uptr pc, uptr addr, bool save_stack);
+ void Reset();
bool IsFlagSet(u32 f) const {
return atomic_load_relaxed(&flags) & f;
@@ -101,28 +88,48 @@ struct SyncVar {
}
};
-/* MetaMap allows to map arbitrary user pointers onto various descriptors.
- Currently it maps pointers to heap block descriptors and sync var descs.
- It uses 1/2 direct shadow, see tsan_platform.h.
-*/
+// MetaMap maps app addresses to heap block (MBlock) and sync var (SyncVar)
+// descriptors. It uses 1/2 direct shadow, see tsan_platform.h for the mapping.
class MetaMap {
public:
MetaMap();
void AllocBlock(ThreadState *thr, uptr pc, uptr p, uptr sz);
- uptr FreeBlock(Processor *proc, uptr p);
- bool FreeRange(Processor *proc, uptr p, uptr sz);
- void ResetRange(Processor *proc, uptr p, uptr sz);
+
+ // FreeBlock resets all sync objects in the range if reset=true and must not
+ // run concurrently with ResetClocks which resets all sync objects
+ // w/o any synchronization (as part of DoReset).
+ // If we don't have a thread slot (very early/late in thread lifetime or
+ // Go/Java callbacks) or the slot is not locked, then reset must be set to
+ // false. In such case sync object clocks will be reset later (when it's
+ // reused or during the next ResetClocks).
+ uptr FreeBlock(Processor *proc, uptr p, bool reset);
+ bool FreeRange(Processor *proc, uptr p, uptr sz, bool reset);
+ void ResetRange(Processor *proc, uptr p, uptr sz, bool reset);
+ // Reset vector clocks of all sync objects.
+ // Must be called when no other threads access sync objects.
+ void ResetClocks();
MBlock* GetBlock(uptr p);
- SyncVar* GetOrCreateAndLock(ThreadState *thr, uptr pc,
- uptr addr, bool write_lock);
- SyncVar* GetIfExistsAndLock(uptr addr, bool write_lock);
+ SyncVar *GetSyncOrCreate(ThreadState *thr, uptr pc, uptr addr,
+ bool save_stack) {
+ return GetSync(thr, pc, addr, true, save_stack);
+ }
+ SyncVar *GetSyncIfExists(uptr addr) {
+ return GetSync(nullptr, 0, addr, false, false);
+ }
void MoveMemory(uptr src, uptr dst, uptr sz);
void OnProcIdle(Processor *proc);
+ struct MemoryStats {
+ uptr mem_block;
+ uptr sync_obj;
+ };
+
+ MemoryStats GetMemoryStats() const;
+
private:
static const u32 kFlagMask = 3u << 30;
static const u32 kFlagBlock = 1u << 30;
@@ -131,10 +138,9 @@ class MetaMap {
typedef DenseSlabAlloc<SyncVar, 1 << 20, 1 << 10, kFlagMask> SyncAlloc;
BlockAlloc block_alloc_;
SyncAlloc sync_alloc_;
- atomic_uint64_t uid_gen_;
- SyncVar* GetAndLock(ThreadState *thr, uptr pc, uptr addr, bool write_lock,
- bool create);
+ SyncVar *GetSync(ThreadState *thr, uptr pc, uptr addr, bool create,
+ bool save_stack);
};
} // namespace __tsan
diff --git a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_trace.h b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_trace.h
index f5e0c407cda8..01bb7b34f43a 100644
--- a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_trace.h
+++ b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_trace.h
@@ -13,58 +13,201 @@
#define TSAN_TRACE_H
#include "tsan_defs.h"
-#include "tsan_stack_trace.h"
+#include "tsan_ilist.h"
#include "tsan_mutexset.h"
+#include "tsan_stack_trace.h"
namespace __tsan {
-const int kTracePartSizeBits = 13;
-const int kTracePartSize = 1 << kTracePartSizeBits;
-const int kTraceParts = 2 * 1024 * 1024 / kTracePartSize;
-const int kTraceSize = kTracePartSize * kTraceParts;
-
-// Must fit into 3 bits.
-enum EventType {
- EventTypeMop,
- EventTypeFuncEnter,
- EventTypeFuncExit,
- EventTypeLock,
- EventTypeUnlock,
- EventTypeRLock,
- EventTypeRUnlock
+enum class EventType : u64 {
+ kAccessExt,
+ kAccessRange,
+ kLock,
+ kRLock,
+ kUnlock,
+ kTime,
+};
+
+// "Base" type for all events for type dispatch.
+struct Event {
+ // We use variable-length type encoding to give more bits to some event
+ // types that need them. If is_access is set, this is EventAccess.
+ // Otherwise, if is_func is set, this is EventFunc.
+ // Otherwise type denotes the type.
+ u64 is_access : 1;
+ u64 is_func : 1;
+ EventType type : 3;
+ u64 _ : 59;
+};
+static_assert(sizeof(Event) == 8, "bad Event size");
+
+// Nop event used as padding and does not affect state during replay.
+static constexpr Event NopEvent = {1, 0, EventType::kAccessExt, 0};
+
+// Compressed memory access can represent only some events with PCs
+// close enough to each other. Otherwise we fall back to EventAccessExt.
+struct EventAccess {
+ static constexpr uptr kPCBits = 15;
+ static_assert(kPCBits + kCompressedAddrBits + 5 == 64,
+ "unused bits in EventAccess");
+
+ u64 is_access : 1; // = 1
+ u64 is_read : 1;
+ u64 is_atomic : 1;
+ u64 size_log : 2;
+ u64 pc_delta : kPCBits; // signed delta from the previous memory access PC
+ u64 addr : kCompressedAddrBits;
};
+static_assert(sizeof(EventAccess) == 8, "bad EventAccess size");
-// Represents a thread event (from most significant bit):
-// u64 typ : 3; // EventType.
-// u64 addr : 61; // Associated pc.
-typedef u64 Event;
+// Function entry (pc != 0) or exit (pc == 0).
+struct EventFunc {
+ u64 is_access : 1; // = 0
+ u64 is_func : 1; // = 1
+ u64 pc : 62;
+};
+static_assert(sizeof(EventFunc) == 8, "bad EventFunc size");
+
+// Extended memory access with full PC.
+struct EventAccessExt {
+ // Note: precisely specifying the unused parts of the bitfield is critical for
+ // performance. If we don't specify them, compiler will generate code to load
+ // the old value and shuffle it to extract the unused bits to apply to the new
+ // value. If we specify the unused part and store 0 in there, all that
+ // unnecessary code goes away (store of the 0 const is combined with other
+ // constant parts).
+ static constexpr uptr kUnusedBits = 11;
+ static_assert(kCompressedAddrBits + kUnusedBits + 9 == 64,
+ "unused bits in EventAccessExt");
+
+ u64 is_access : 1; // = 0
+ u64 is_func : 1; // = 0
+ EventType type : 3; // = EventType::kAccessExt
+ u64 is_read : 1;
+ u64 is_atomic : 1;
+ u64 size_log : 2;
+ u64 _ : kUnusedBits;
+ u64 addr : kCompressedAddrBits;
+ u64 pc;
+};
+static_assert(sizeof(EventAccessExt) == 16, "bad EventAccessExt size");
+
+// Access to a memory range.
+struct EventAccessRange {
+ static constexpr uptr kSizeLoBits = 13;
+ static_assert(kCompressedAddrBits + kSizeLoBits + 7 == 64,
+ "unused bits in EventAccessRange");
+
+ u64 is_access : 1; // = 0
+ u64 is_func : 1; // = 0
+ EventType type : 3; // = EventType::kAccessRange
+ u64 is_read : 1;
+ u64 is_free : 1;
+ u64 size_lo : kSizeLoBits;
+ u64 pc : kCompressedAddrBits;
+ u64 addr : kCompressedAddrBits;
+ u64 size_hi : 64 - kCompressedAddrBits;
+};
+static_assert(sizeof(EventAccessRange) == 16, "bad EventAccessRange size");
-const uptr kEventPCBits = 61;
+// Mutex lock.
+struct EventLock {
+ static constexpr uptr kStackIDLoBits = 15;
+ static constexpr uptr kStackIDHiBits =
+ sizeof(StackID) * kByteBits - kStackIDLoBits;
+ static constexpr uptr kUnusedBits = 3;
+ static_assert(kCompressedAddrBits + kStackIDLoBits + 5 == 64,
+ "unused bits in EventLock");
+ static_assert(kCompressedAddrBits + kStackIDHiBits + kUnusedBits == 64,
+ "unused bits in EventLock");
+
+ u64 is_access : 1; // = 0
+ u64 is_func : 1; // = 0
+ EventType type : 3; // = EventType::kLock or EventType::kRLock
+ u64 pc : kCompressedAddrBits;
+ u64 stack_lo : kStackIDLoBits;
+ u64 stack_hi : sizeof(StackID) * kByteBits - kStackIDLoBits;
+ u64 _ : kUnusedBits;
+ u64 addr : kCompressedAddrBits;
+};
+static_assert(sizeof(EventLock) == 16, "bad EventLock size");
+
+// Mutex unlock.
+struct EventUnlock {
+ static constexpr uptr kUnusedBits = 15;
+ static_assert(kCompressedAddrBits + kUnusedBits + 5 == 64,
+ "unused bits in EventUnlock");
+
+ u64 is_access : 1; // = 0
+ u64 is_func : 1; // = 0
+ EventType type : 3; // = EventType::kUnlock
+ u64 _ : kUnusedBits;
+ u64 addr : kCompressedAddrBits;
+};
+static_assert(sizeof(EventUnlock) == 8, "bad EventUnlock size");
+
+// Time change event.
+struct EventTime {
+ static constexpr uptr kUnusedBits = 37;
+ static_assert(kUnusedBits + sizeof(Sid) * kByteBits + kEpochBits + 5 == 64,
+ "unused bits in EventTime");
+
+ u64 is_access : 1; // = 0
+ u64 is_func : 1; // = 0
+ EventType type : 3; // = EventType::kTime
+ u64 sid : sizeof(Sid) * kByteBits;
+ u64 epoch : kEpochBits;
+ u64 _ : kUnusedBits;
+};
+static_assert(sizeof(EventTime) == 8, "bad EventTime size");
+
+struct Trace;
struct TraceHeader {
-#if !SANITIZER_GO
- BufferedStackTrace stack0; // Start stack for the trace.
-#else
- VarSizeStackTrace stack0;
-#endif
- u64 epoch0; // Start epoch for the trace.
- MutexSet mset0;
-
- TraceHeader() : stack0(), epoch0() {}
+ Trace* trace = nullptr; // back-pointer to Trace containing this part
+ INode trace_parts; // in Trace::parts
+ INode global; // in Contex::trace_part_recycle
};
+struct TracePart : TraceHeader {
+ // There are a lot of goroutines in Go, so we use smaller parts.
+ static constexpr uptr kByteSize = (SANITIZER_GO ? 128 : 256) << 10;
+ static constexpr uptr kSize =
+ (kByteSize - sizeof(TraceHeader)) / sizeof(Event);
+ // TraceAcquire does a fast event pointer overflow check by comparing
+ // pointer into TracePart::events with kAlignment mask. Since TracePart's
+ // are allocated page-aligned, this check detects end of the array
+ // (it also have false positives in the middle that are filtered separately).
+ // This also requires events to be the last field.
+ static constexpr uptr kAlignment = 0xff0;
+ Event events[kSize];
+
+ TracePart() {}
+};
+static_assert(sizeof(TracePart) == TracePart::kByteSize, "bad TracePart size");
+
struct Trace {
Mutex mtx;
-#if !SANITIZER_GO
- // Must be last to catch overflow as paging fault.
- // Go shadow stack is dynamically allocated.
- uptr shadow_stack[kShadowStackSize];
-#endif
- // Must be the last field, because we unmap the unused part in
- // CreateThreadContext.
- TraceHeader headers[kTraceParts];
+ IList<TraceHeader, &TraceHeader::trace_parts, TracePart> parts;
+ // First node non-queued into ctx->trace_part_recycle.
+ TracePart* local_head;
+ // Final position in the last part for finished threads.
+ Event* final_pos = nullptr;
+ // Number of trace parts allocated on behalf of this trace specifically.
+ // Total number of parts in this trace can be larger if we retake some
+ // parts from other traces.
+ uptr parts_allocated = 0;
Trace() : mtx(MutexTypeTrace) {}
+
+ // We need at least 3 parts per thread, because we want to keep at last
+ // 2 parts per thread that are not queued into ctx->trace_part_recycle
+ // (the current one being filled and one full part that ensures that
+ // we always have at least one part worth of previous memory accesses).
+ static constexpr uptr kMinParts = 3;
+
+ static constexpr uptr kFinishedThreadLo = 16;
+ static constexpr uptr kFinishedThreadHi = 64;
};
} // namespace __tsan
diff --git a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_update_shadow_word_inl.h b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_update_shadow_word_inl.h
deleted file mode 100644
index d23dfb0ba061..000000000000
--- a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_update_shadow_word_inl.h
+++ /dev/null
@@ -1,59 +0,0 @@
-//===-- tsan_update_shadow_word_inl.h ---------------------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-//
-// This file is a part of ThreadSanitizer (TSan), a race detector.
-//
-// Body of the hottest inner loop.
-// If we wrap this body into a function, compilers (both gcc and clang)
-// produce sligtly less efficient code.
-//===----------------------------------------------------------------------===//
-do {
- const unsigned kAccessSize = 1 << kAccessSizeLog;
- u64 *sp = &shadow_mem[idx];
- old = LoadShadow(sp);
- if (LIKELY(old.IsZero())) {
- if (!stored) {
- StoreIfNotYetStored(sp, &store_word);
- stored = true;
- }
- break;
- }
- // is the memory access equal to the previous?
- if (LIKELY(Shadow::Addr0AndSizeAreEqual(cur, old))) {
- // same thread?
- if (LIKELY(Shadow::TidsAreEqual(old, cur))) {
- if (LIKELY(old.IsRWWeakerOrEqual(kAccessIsWrite, kIsAtomic))) {
- StoreIfNotYetStored(sp, &store_word);
- stored = true;
- }
- break;
- }
- if (HappensBefore(old, thr)) {
- if (old.IsRWWeakerOrEqual(kAccessIsWrite, kIsAtomic)) {
- StoreIfNotYetStored(sp, &store_word);
- stored = true;
- }
- break;
- }
- if (LIKELY(old.IsBothReadsOrAtomic(kAccessIsWrite, kIsAtomic)))
- break;
- goto RACE;
- }
- // Do the memory access intersect?
- if (Shadow::TwoRangesIntersect(old, cur, kAccessSize)) {
- if (Shadow::TidsAreEqual(old, cur))
- break;
- if (old.IsBothReadsOrAtomic(kAccessIsWrite, kIsAtomic))
- break;
- if (LIKELY(HappensBefore(old, thr)))
- break;
- goto RACE;
- }
- // The accesses do not intersect.
- break;
-} while (0);
diff --git a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_vector_clock.cpp b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_vector_clock.cpp
new file mode 100644
index 000000000000..278298565d3f
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_vector_clock.cpp
@@ -0,0 +1,126 @@
+//===-- tsan_vector_clock.cpp ---------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+#include "tsan_vector_clock.h"
+
+#include "sanitizer_common/sanitizer_placement_new.h"
+#include "tsan_mman.h"
+
+namespace __tsan {
+
+#if TSAN_VECTORIZE
+const uptr kVectorClockSize = kThreadSlotCount * sizeof(Epoch) / sizeof(m128);
+#endif
+
+VectorClock::VectorClock() { Reset(); }
+
+void VectorClock::Reset() {
+#if !TSAN_VECTORIZE
+ for (uptr i = 0; i < kThreadSlotCount; i++)
+ clk_[i] = kEpochZero;
+#else
+ m128 z = _mm_setzero_si128();
+ m128* vclk = reinterpret_cast<m128*>(clk_);
+ for (uptr i = 0; i < kVectorClockSize; i++) _mm_store_si128(&vclk[i], z);
+#endif
+}
+
+void VectorClock::Acquire(const VectorClock* src) {
+ if (!src)
+ return;
+#if !TSAN_VECTORIZE
+ for (uptr i = 0; i < kThreadSlotCount; i++)
+ clk_[i] = max(clk_[i], src->clk_[i]);
+#else
+ m128* __restrict vdst = reinterpret_cast<m128*>(clk_);
+ m128 const* __restrict vsrc = reinterpret_cast<m128 const*>(src->clk_);
+ for (uptr i = 0; i < kVectorClockSize; i++) {
+ m128 s = _mm_load_si128(&vsrc[i]);
+ m128 d = _mm_load_si128(&vdst[i]);
+ m128 m = _mm_max_epu16(s, d);
+ _mm_store_si128(&vdst[i], m);
+ }
+#endif
+}
+
+static VectorClock* AllocClock(VectorClock** dstp) {
+ if (UNLIKELY(!*dstp))
+ *dstp = New<VectorClock>();
+ return *dstp;
+}
+
+void VectorClock::Release(VectorClock** dstp) const {
+ VectorClock* dst = AllocClock(dstp);
+ dst->Acquire(this);
+}
+
+void VectorClock::ReleaseStore(VectorClock** dstp) const {
+ VectorClock* dst = AllocClock(dstp);
+ *dst = *this;
+}
+
+VectorClock& VectorClock::operator=(const VectorClock& other) {
+#if !TSAN_VECTORIZE
+ for (uptr i = 0; i < kThreadSlotCount; i++)
+ clk_[i] = other.clk_[i];
+#else
+ m128* __restrict vdst = reinterpret_cast<m128*>(clk_);
+ m128 const* __restrict vsrc = reinterpret_cast<m128 const*>(other.clk_);
+ for (uptr i = 0; i < kVectorClockSize; i++) {
+ m128 s = _mm_load_si128(&vsrc[i]);
+ _mm_store_si128(&vdst[i], s);
+ }
+#endif
+ return *this;
+}
+
+void VectorClock::ReleaseStoreAcquire(VectorClock** dstp) {
+ VectorClock* dst = AllocClock(dstp);
+#if !TSAN_VECTORIZE
+ for (uptr i = 0; i < kThreadSlotCount; i++) {
+ Epoch tmp = dst->clk_[i];
+ dst->clk_[i] = clk_[i];
+ clk_[i] = max(clk_[i], tmp);
+ }
+#else
+ m128* __restrict vdst = reinterpret_cast<m128*>(dst->clk_);
+ m128* __restrict vclk = reinterpret_cast<m128*>(clk_);
+ for (uptr i = 0; i < kVectorClockSize; i++) {
+ m128 t = _mm_load_si128(&vdst[i]);
+ m128 c = _mm_load_si128(&vclk[i]);
+ m128 m = _mm_max_epu16(c, t);
+ _mm_store_si128(&vdst[i], c);
+ _mm_store_si128(&vclk[i], m);
+ }
+#endif
+}
+
+void VectorClock::ReleaseAcquire(VectorClock** dstp) {
+ VectorClock* dst = AllocClock(dstp);
+#if !TSAN_VECTORIZE
+ for (uptr i = 0; i < kThreadSlotCount; i++) {
+ dst->clk_[i] = max(dst->clk_[i], clk_[i]);
+ clk_[i] = dst->clk_[i];
+ }
+#else
+ m128* __restrict vdst = reinterpret_cast<m128*>(dst->clk_);
+ m128* __restrict vclk = reinterpret_cast<m128*>(clk_);
+ for (uptr i = 0; i < kVectorClockSize; i++) {
+ m128 c = _mm_load_si128(&vclk[i]);
+ m128 d = _mm_load_si128(&vdst[i]);
+ m128 m = _mm_max_epu16(c, d);
+ _mm_store_si128(&vdst[i], m);
+ _mm_store_si128(&vclk[i], m);
+ }
+#endif
+}
+
+} // namespace __tsan
diff --git a/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_vector_clock.h b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_vector_clock.h
new file mode 100644
index 000000000000..63b206302190
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_vector_clock.h
@@ -0,0 +1,51 @@
+//===-- tsan_vector_clock.h -------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+#ifndef TSAN_VECTOR_CLOCK_H
+#define TSAN_VECTOR_CLOCK_H
+
+#include "tsan_defs.h"
+
+namespace __tsan {
+
+// Fixed-size vector clock, used both for threads and sync objects.
+class VectorClock {
+ public:
+ VectorClock();
+
+ Epoch Get(Sid sid) const;
+ void Set(Sid sid, Epoch v);
+
+ void Reset();
+ void Acquire(const VectorClock* src);
+ void Release(VectorClock** dstp) const;
+ void ReleaseStore(VectorClock** dstp) const;
+ void ReleaseStoreAcquire(VectorClock** dstp);
+ void ReleaseAcquire(VectorClock** dstp);
+
+ VectorClock& operator=(const VectorClock& other);
+
+ private:
+ Epoch clk_[kThreadSlotCount] VECTOR_ALIGNED;
+};
+
+ALWAYS_INLINE Epoch VectorClock::Get(Sid sid) const {
+ return clk_[static_cast<u8>(sid)];
+}
+
+ALWAYS_INLINE void VectorClock::Set(Sid sid, Epoch v) {
+ DCHECK_GE(v, clk_[static_cast<u8>(sid)]);
+ clk_[static_cast<u8>(sid)] = v;
+}
+
+} // namespace __tsan
+
+#endif // TSAN_VECTOR_CLOCK_H
diff --git a/contrib/llvm-project/compiler-rt/lib/ubsan/ubsan_diag.cpp b/contrib/llvm-project/compiler-rt/lib/ubsan/ubsan_diag.cpp
index ef2e495cac8e..67e884e4916c 100644
--- a/contrib/llvm-project/compiler-rt/lib/ubsan/ubsan_diag.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/ubsan/ubsan_diag.cpp
@@ -32,15 +32,13 @@ using namespace __ubsan;
// Windows.
// TODO(yln): This is a temporary workaround. GetStackTrace functions will be
// removed in the future.
-void ubsan_GetStackTrace(BufferedStackTrace *stack, uptr max_depth,
- uptr pc, uptr bp, void *context, bool fast) {
+void ubsan_GetStackTrace(BufferedStackTrace *stack, uptr max_depth, uptr pc,
+ uptr bp, void *context, bool request_fast) {
uptr top = 0;
uptr bottom = 0;
- if (StackTrace::WillUseFastUnwind(fast)) {
- GetThreadStackTopAndBottom(false, &top, &bottom);
- stack->Unwind(max_depth, pc, bp, nullptr, top, bottom, true);
- } else
- stack->Unwind(max_depth, pc, bp, context, 0, 0, false);
+ GetThreadStackTopAndBottom(false, &top, &bottom);
+ bool fast = StackTrace::WillUseFastUnwind(request_fast);
+ stack->Unwind(max_depth, pc, bp, context, top, bottom, fast);
}
static void MaybePrintStackTrace(uptr pc, uptr bp) {
@@ -90,7 +88,7 @@ static void MaybeReportErrorSummary(Location Loc, ErrorType Type) {
AI.file = internal_strdup(SLoc.getFilename());
AI.line = SLoc.getLine();
AI.column = SLoc.getColumn();
- AI.function = internal_strdup(""); // Avoid printing ?? as function name.
+ AI.function = nullptr;
ReportErrorSummary(ErrorKind, AI, GetSanititizerToolName());
AI.Clear();
return;
@@ -136,9 +134,9 @@ Diag &Diag::operator<<(const Value &V) {
/// Hexadecimal printing for numbers too large for Printf to handle directly.
static void RenderHex(InternalScopedString *Buffer, UIntMax Val) {
#if HAVE_INT128_T
- Buffer->append("0x%08x%08x%08x%08x", (unsigned int)(Val >> 96),
- (unsigned int)(Val >> 64), (unsigned int)(Val >> 32),
- (unsigned int)(Val));
+ Buffer->AppendF("0x%08x%08x%08x%08x", (unsigned int)(Val >> 96),
+ (unsigned int)(Val >> 64), (unsigned int)(Val >> 32),
+ (unsigned int)(Val));
#else
UNREACHABLE("long long smaller than 64 bits?");
#endif
@@ -149,31 +147,34 @@ static void RenderLocation(InternalScopedString *Buffer, Location Loc) {
case Location::LK_Source: {
SourceLocation SLoc = Loc.getSourceLocation();
if (SLoc.isInvalid())
- Buffer->append("<unknown>");
+ Buffer->AppendF("<unknown>");
else
- RenderSourceLocation(Buffer, SLoc.getFilename(), SLoc.getLine(),
- SLoc.getColumn(), common_flags()->symbolize_vs_style,
- common_flags()->strip_path_prefix);
+ StackTracePrinter::GetOrInit()->RenderSourceLocation(
+ Buffer, SLoc.getFilename(), SLoc.getLine(), SLoc.getColumn(),
+ common_flags()->symbolize_vs_style,
+ common_flags()->strip_path_prefix);
return;
}
case Location::LK_Memory:
- Buffer->append("%p", Loc.getMemoryLocation());
+ Buffer->AppendF("%p", reinterpret_cast<void *>(Loc.getMemoryLocation()));
return;
case Location::LK_Symbolized: {
const AddressInfo &Info = Loc.getSymbolizedStack()->info;
if (Info.file)
- RenderSourceLocation(Buffer, Info.file, Info.line, Info.column,
- common_flags()->symbolize_vs_style,
- common_flags()->strip_path_prefix);
+ StackTracePrinter::GetOrInit()->RenderSourceLocation(
+ Buffer, Info.file, Info.line, Info.column,
+ common_flags()->symbolize_vs_style,
+ common_flags()->strip_path_prefix);
else if (Info.module)
- RenderModuleLocation(Buffer, Info.module, Info.module_offset,
- Info.module_arch, common_flags()->strip_path_prefix);
+ StackTracePrinter::GetOrInit()->RenderModuleLocation(
+ Buffer, Info.module, Info.module_offset, Info.module_arch,
+ common_flags()->strip_path_prefix);
else
- Buffer->append("%p", Info.address);
+ Buffer->AppendF("%p", reinterpret_cast<void *>(Info.address));
return;
}
case Location::LK_Null:
- Buffer->append("<unknown>");
+ Buffer->AppendF("<unknown>");
return;
}
}
@@ -182,32 +183,32 @@ static void RenderText(InternalScopedString *Buffer, const char *Message,
const Diag::Arg *Args) {
for (const char *Msg = Message; *Msg; ++Msg) {
if (*Msg != '%') {
- Buffer->append("%c", *Msg);
+ Buffer->AppendF("%c", *Msg);
continue;
}
const Diag::Arg &A = Args[*++Msg - '0'];
switch (A.Kind) {
case Diag::AK_String:
- Buffer->append("%s", A.String);
+ Buffer->AppendF("%s", A.String);
break;
case Diag::AK_TypeName: {
if (SANITIZER_WINDOWS)
// The Windows implementation demangles names early.
- Buffer->append("'%s'", A.String);
+ Buffer->AppendF("'%s'", A.String);
else
- Buffer->append("'%s'", Symbolizer::GetOrInit()->Demangle(A.String));
+ Buffer->AppendF("'%s'", Symbolizer::GetOrInit()->Demangle(A.String));
break;
}
case Diag::AK_SInt:
// 'long long' is guaranteed to be at least 64 bits wide.
if (A.SInt >= INT64_MIN && A.SInt <= INT64_MAX)
- Buffer->append("%lld", (long long)A.SInt);
+ Buffer->AppendF("%lld", (long long)A.SInt);
else
RenderHex(Buffer, A.SInt);
break;
case Diag::AK_UInt:
if (A.UInt <= UINT64_MAX)
- Buffer->append("%llu", (unsigned long long)A.UInt);
+ Buffer->AppendF("%llu", (unsigned long long)A.UInt);
else
RenderHex(Buffer, A.UInt);
break;
@@ -216,15 +217,20 @@ static void RenderText(InternalScopedString *Buffer, const char *Message,
// printf, and stop using snprintf here.
char FloatBuffer[32];
#if SANITIZER_WINDOWS
- sprintf_s(FloatBuffer, sizeof(FloatBuffer), "%Lg", (long double)A.Float);
+ // On MSVC platforms, long doubles are equal to regular doubles.
+ // In MinGW environments on x86, long doubles are 80 bit, but here,
+ // we're calling an MS CRT provided printf function which considers
+ // long doubles to be 64 bit. Just cast the float value to a regular
+ // double to avoid the potential ambiguity in MinGW mode.
+ sprintf_s(FloatBuffer, sizeof(FloatBuffer), "%g", (double)A.Float);
#else
snprintf(FloatBuffer, sizeof(FloatBuffer), "%Lg", (long double)A.Float);
#endif
- Buffer->append("%s", FloatBuffer);
+ Buffer->Append(FloatBuffer);
break;
}
case Diag::AK_Pointer:
- Buffer->append("%p", A.Pointer);
+ Buffer->AppendF("%p", A.Pointer);
break;
}
}
@@ -281,12 +287,12 @@ static void PrintMemorySnippet(const Decorator &Decor, MemoryLocation Loc,
InternalScopedString Buffer;
for (uptr P = Min; P != Max; ++P) {
unsigned char C = *reinterpret_cast<const unsigned char*>(P);
- Buffer.append("%s%02x", (P % 8 == 0) ? " " : " ", C);
+ Buffer.AppendF("%s%02x", (P % 8 == 0) ? " " : " ", C);
}
- Buffer.append("\n");
+ Buffer.AppendF("\n");
// Emit highlights.
- Buffer.append(Decor.Highlight());
+ Buffer.Append(Decor.Highlight());
Range *InRange = upperBound(Min, Ranges, NumRanges);
for (uptr P = Min; P != Max; ++P) {
char Pad = ' ', Byte = ' ';
@@ -299,12 +305,12 @@ static void PrintMemorySnippet(const Decorator &Decor, MemoryLocation Loc,
if (InRange && InRange->getStart().getMemoryLocation() <= P)
Byte = '~';
if (P % 8 == 0)
- Buffer.append("%c", Pad);
- Buffer.append("%c", Pad);
- Buffer.append("%c", P == Loc ? '^' : Byte);
- Buffer.append("%c", Byte);
+ Buffer.AppendF("%c", Pad);
+ Buffer.AppendF("%c", Pad);
+ Buffer.AppendF("%c", P == Loc ? '^' : Byte);
+ Buffer.AppendF("%c", Byte);
}
- Buffer.append("%s\n", Decor.Default());
+ Buffer.AppendF("%s\n", Decor.Default());
// Go over the line again, and print names for the ranges.
InRange = 0;
@@ -319,9 +325,9 @@ static void PrintMemorySnippet(const Decorator &Decor, MemoryLocation Loc,
if (InRange && InRange->getStart().getMemoryLocation() == P) {
while (Spaces--)
- Buffer.append(" ");
+ Buffer.AppendF(" ");
RenderText(&Buffer, InRange->getText(), Args);
- Buffer.append("\n");
+ Buffer.AppendF("\n");
// FIXME: We only support naming one range for now!
break;
}
@@ -355,24 +361,24 @@ Diag::~Diag() {
Buffer.clear();
}
- Buffer.append(Decor.Bold());
+ Buffer.Append(Decor.Bold());
RenderLocation(&Buffer, Loc);
- Buffer.append(":");
+ Buffer.AppendF(":");
switch (Level) {
case DL_Error:
- Buffer.append("%s runtime error: %s%s", Decor.Warning(), Decor.Default(),
- Decor.Bold());
+ Buffer.AppendF("%s runtime error: %s%s", Decor.Warning(), Decor.Default(),
+ Decor.Bold());
break;
case DL_Note:
- Buffer.append("%s note: %s", Decor.Note(), Decor.Default());
+ Buffer.AppendF("%s note: %s", Decor.Note(), Decor.Default());
break;
}
RenderText(&Buffer, Message, Args);
- Buffer.append("%s\n", Decor.Default());
+ Buffer.AppendF("%s\n", Decor.Default());
Printf("%s", Buffer.data());
if (Loc.isMemoryLocation())
diff --git a/contrib/llvm-project/compiler-rt/lib/ubsan/ubsan_diag.h b/contrib/llvm-project/compiler-rt/lib/ubsan/ubsan_diag.h
index b444e971b228..c836647c98f3 100644
--- a/contrib/llvm-project/compiler-rt/lib/ubsan/ubsan_diag.h
+++ b/contrib/llvm-project/compiler-rt/lib/ubsan/ubsan_diag.h
@@ -18,26 +18,6 @@
namespace __ubsan {
-class SymbolizedStackHolder {
- SymbolizedStack *Stack;
-
- void clear() {
- if (Stack)
- Stack->ClearAll();
- }
-
-public:
- explicit SymbolizedStackHolder(SymbolizedStack *Stack = nullptr)
- : Stack(Stack) {}
- ~SymbolizedStackHolder() { clear(); }
- void reset(SymbolizedStack *S) {
- if (Stack != S)
- clear();
- Stack = S;
- }
- const SymbolizedStack *get() const { return Stack; }
-};
-
SymbolizedStack *getSymbolizedLocation(uptr PC);
inline SymbolizedStack *getCallerLocation(uptr CallerPC) {
diff --git a/contrib/llvm-project/compiler-rt/lib/ubsan/ubsan_handlers.cpp b/contrib/llvm-project/compiler-rt/lib/ubsan/ubsan_handlers.cpp
index e201e6bba220..0f16507d5d88 100644
--- a/contrib/llvm-project/compiler-rt/lib/ubsan/ubsan_handlers.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/ubsan/ubsan_handlers.cpp
@@ -76,7 +76,7 @@ enum TypeCheckKind {
TCK_DynamicOperation
};
-const char *TypeCheckKinds[] = {
+extern const char *const TypeCheckKinds[] = {
"load of", "store to", "reference binding to", "member access within",
"member call on", "constructor call on", "downcast of", "downcast of",
"upcast of", "cast to virtual base of", "_Nonnull binding to",
@@ -915,4 +915,39 @@ void __ubsan::__ubsan_handle_cfi_check_fail_abort(CFICheckFailData *Data,
Die();
}
+static bool handleFunctionTypeMismatch(FunctionTypeMismatchData *Data,
+ ValueHandle Function,
+ ReportOptions Opts) {
+ SourceLocation CallLoc = Data->Loc.acquire();
+ ErrorType ET = ErrorType::FunctionTypeMismatch;
+ if (ignoreReport(CallLoc, Opts, ET))
+ return true;
+
+ ScopedReport R(Opts, CallLoc, ET);
+
+ SymbolizedStackHolder FLoc(getSymbolizedLocation(Function));
+ const char *FName = FLoc.get()->info.function;
+ if (!FName)
+ FName = "(unknown)";
+
+ Diag(CallLoc, DL_Error, ET,
+ "call to function %0 through pointer to incorrect function type %1")
+ << FName << Data->Type;
+ Diag(FLoc, DL_Note, ET, "%0 defined here") << FName;
+ return true;
+}
+
+void __ubsan::__ubsan_handle_function_type_mismatch(
+ FunctionTypeMismatchData *Data, ValueHandle Function) {
+ GET_REPORT_OPTIONS(false);
+ handleFunctionTypeMismatch(Data, Function, Opts);
+}
+
+void __ubsan::__ubsan_handle_function_type_mismatch_abort(
+ FunctionTypeMismatchData *Data, ValueHandle Function) {
+ GET_REPORT_OPTIONS(true);
+ if (handleFunctionTypeMismatch(Data, Function, Opts))
+ Die();
+}
+
#endif // CAN_SANITIZE_UB
diff --git a/contrib/llvm-project/compiler-rt/lib/ubsan/ubsan_handlers.h b/contrib/llvm-project/compiler-rt/lib/ubsan/ubsan_handlers.h
index 219fb15de55f..3bd5046de3d7 100644
--- a/contrib/llvm-project/compiler-rt/lib/ubsan/ubsan_handlers.h
+++ b/contrib/llvm-project/compiler-rt/lib/ubsan/ubsan_handlers.h
@@ -231,6 +231,17 @@ extern "C" SANITIZER_INTERFACE_ATTRIBUTE void __ubsan_handle_cfi_bad_type(
CFICheckFailData *Data, ValueHandle Vtable, bool ValidVtable,
ReportOptions Opts);
+struct FunctionTypeMismatchData {
+ SourceLocation Loc;
+ const TypeDescriptor &Type;
+};
+
+extern "C" SANITIZER_INTERFACE_ATTRIBUTE void
+__ubsan_handle_function_type_mismatch(FunctionTypeMismatchData *Data,
+ ValueHandle Val);
+extern "C" SANITIZER_INTERFACE_ATTRIBUTE void
+__ubsan_handle_function_type_mismatch_abort(FunctionTypeMismatchData *Data,
+ ValueHandle Val);
}
#endif // UBSAN_HANDLERS_H
diff --git a/contrib/llvm-project/compiler-rt/lib/ubsan/ubsan_handlers_cxx.cpp b/contrib/llvm-project/compiler-rt/lib/ubsan/ubsan_handlers_cxx.cpp
index 2a6d558de034..206a0bb485a9 100644
--- a/contrib/llvm-project/compiler-rt/lib/ubsan/ubsan_handlers_cxx.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/ubsan/ubsan_handlers_cxx.cpp
@@ -26,7 +26,7 @@ using namespace __sanitizer;
using namespace __ubsan;
namespace __ubsan {
- extern const char *TypeCheckKinds[];
+ extern const char *const TypeCheckKinds[];
}
// Returns true if UBSan has printed an error report.
@@ -156,50 +156,6 @@ void __ubsan_handle_cfi_bad_type(CFICheckFailData *Data, ValueHandle Vtable,
Diag(Loc, DL_Note, ET, "check failed in %0, vtable located in %1")
<< SrcModule << DstModule;
}
-
-static bool handleFunctionTypeMismatch(FunctionTypeMismatchData *Data,
- ValueHandle Function,
- ValueHandle calleeRTTI,
- ValueHandle fnRTTI, ReportOptions Opts) {
- if (checkTypeInfoEquality(reinterpret_cast<void *>(calleeRTTI),
- reinterpret_cast<void *>(fnRTTI)))
- return false;
-
- SourceLocation CallLoc = Data->Loc.acquire();
- ErrorType ET = ErrorType::FunctionTypeMismatch;
-
- if (ignoreReport(CallLoc, Opts, ET))
- return true;
-
- ScopedReport R(Opts, CallLoc, ET);
-
- SymbolizedStackHolder FLoc(getSymbolizedLocation(Function));
- const char *FName = FLoc.get()->info.function;
- if (!FName)
- FName = "(unknown)";
-
- Diag(CallLoc, DL_Error, ET,
- "call to function %0 through pointer to incorrect function type %1")
- << FName << Data->Type;
- Diag(FLoc, DL_Note, ET, "%0 defined here") << FName;
- return true;
-}
-
-void __ubsan_handle_function_type_mismatch_v1(FunctionTypeMismatchData *Data,
- ValueHandle Function,
- ValueHandle calleeRTTI,
- ValueHandle fnRTTI) {
- GET_REPORT_OPTIONS(false);
- handleFunctionTypeMismatch(Data, Function, calleeRTTI, fnRTTI, Opts);
-}
-
-void __ubsan_handle_function_type_mismatch_v1_abort(
- FunctionTypeMismatchData *Data, ValueHandle Function,
- ValueHandle calleeRTTI, ValueHandle fnRTTI) {
- GET_REPORT_OPTIONS(true);
- if (handleFunctionTypeMismatch(Data, Function, calleeRTTI, fnRTTI, Opts))
- Die();
-}
} // namespace __ubsan
#endif // CAN_SANITIZE_UB
diff --git a/contrib/llvm-project/compiler-rt/lib/ubsan/ubsan_handlers_cxx.h b/contrib/llvm-project/compiler-rt/lib/ubsan/ubsan_handlers_cxx.h
index f7b9fc54f472..71695cbdc090 100644
--- a/contrib/llvm-project/compiler-rt/lib/ubsan/ubsan_handlers_cxx.h
+++ b/contrib/llvm-project/compiler-rt/lib/ubsan/ubsan_handlers_cxx.h
@@ -33,22 +33,6 @@ void __ubsan_handle_dynamic_type_cache_miss(
extern "C" SANITIZER_INTERFACE_ATTRIBUTE
void __ubsan_handle_dynamic_type_cache_miss_abort(
DynamicTypeCacheMissData *Data, ValueHandle Pointer, ValueHandle Hash);
-
-struct FunctionTypeMismatchData {
- SourceLocation Loc;
- const TypeDescriptor &Type;
-};
-
-extern "C" SANITIZER_INTERFACE_ATTRIBUTE void
-__ubsan_handle_function_type_mismatch_v1(FunctionTypeMismatchData *Data,
- ValueHandle Val,
- ValueHandle calleeRTTI,
- ValueHandle fnRTTI);
-extern "C" SANITIZER_INTERFACE_ATTRIBUTE void
-__ubsan_handle_function_type_mismatch_v1_abort(FunctionTypeMismatchData *Data,
- ValueHandle Val,
- ValueHandle calleeRTTI,
- ValueHandle fnRTTI);
}
-#endif // UBSAN_HANDLERS_H
+#endif // UBSAN_HANDLERS_CXX_H
diff --git a/contrib/llvm-project/compiler-rt/lib/ubsan/ubsan_init.cpp b/contrib/llvm-project/compiler-rt/lib/ubsan/ubsan_init.cpp
index 9931d85bf40c..5802d58896f0 100644
--- a/contrib/llvm-project/compiler-rt/lib/ubsan/ubsan_init.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/ubsan/ubsan_init.cpp
@@ -12,13 +12,14 @@
#include "ubsan_platform.h"
#if CAN_SANITIZE_UB
-#include "ubsan_diag.h"
-#include "ubsan_init.h"
-#include "ubsan_flags.h"
#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_interface_internal.h"
#include "sanitizer_common/sanitizer_libc.h"
#include "sanitizer_common/sanitizer_mutex.h"
#include "sanitizer_common/sanitizer_symbolizer.h"
+#include "ubsan_diag.h"
+#include "ubsan_flags.h"
+#include "ubsan_init.h"
using namespace __ubsan;
diff --git a/contrib/llvm-project/compiler-rt/lib/ubsan/ubsan_interface.inc b/contrib/llvm-project/compiler-rt/lib/ubsan/ubsan_interface.inc
index 94337d85017b..cb27feb5d7e9 100644
--- a/contrib/llvm-project/compiler-rt/lib/ubsan/ubsan_interface.inc
+++ b/contrib/llvm-project/compiler-rt/lib/ubsan/ubsan_interface.inc
@@ -21,8 +21,8 @@ INTERFACE_FUNCTION(__ubsan_handle_dynamic_type_cache_miss)
INTERFACE_FUNCTION(__ubsan_handle_dynamic_type_cache_miss_abort)
INTERFACE_FUNCTION(__ubsan_handle_float_cast_overflow)
INTERFACE_FUNCTION(__ubsan_handle_float_cast_overflow_abort)
-INTERFACE_FUNCTION(__ubsan_handle_function_type_mismatch_v1)
-INTERFACE_FUNCTION(__ubsan_handle_function_type_mismatch_v1_abort)
+INTERFACE_FUNCTION(__ubsan_handle_function_type_mismatch)
+INTERFACE_FUNCTION(__ubsan_handle_function_type_mismatch_abort)
INTERFACE_FUNCTION(__ubsan_handle_implicit_conversion)
INTERFACE_FUNCTION(__ubsan_handle_implicit_conversion_abort)
INTERFACE_FUNCTION(__ubsan_handle_invalid_builtin)
diff --git a/contrib/llvm-project/compiler-rt/lib/ubsan/ubsan_monitor.cpp b/contrib/llvm-project/compiler-rt/lib/ubsan/ubsan_monitor.cpp
index 69dd986f9bdf..caed9726d48b 100644
--- a/contrib/llvm-project/compiler-rt/lib/ubsan/ubsan_monitor.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/ubsan/ubsan_monitor.cpp
@@ -23,7 +23,8 @@ UndefinedBehaviorReport::UndefinedBehaviorReport(const char *IssueKind,
RegisterUndefinedBehaviorReport(this);
// Make a copy of the diagnostic.
- Buffer.append("%s", Msg.data());
+ if (Msg.length())
+ Buffer.Append(Msg.data());
// Let the monitor know that a report is available.
__ubsan_on_report();
diff --git a/contrib/llvm-project/compiler-rt/lib/ubsan/ubsan_signals_standalone.cpp b/contrib/llvm-project/compiler-rt/lib/ubsan/ubsan_signals_standalone.cpp
index 2c91db8ca397..354f847fab71 100644
--- a/contrib/llvm-project/compiler-rt/lib/ubsan/ubsan_signals_standalone.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/ubsan/ubsan_signals_standalone.cpp
@@ -34,7 +34,12 @@ void InitializeDeadlySignals() {}
#else
+namespace __ubsan {
+void InitializeDeadlySignals();
+} // namespace __ubsan
+
#define COMMON_INTERCEPT_FUNCTION(name) INTERCEPT_FUNCTION(name)
+#define SIGNAL_INTERCEPTOR_ENTER() __ubsan::InitializeDeadlySignals()
#include "sanitizer_common/sanitizer_signal_interceptors.inc"
// TODO(yln): Temporary workaround. Will be removed.
diff --git a/contrib/llvm-project/compiler-rt/lib/ubsan/ubsan_type_hash_itanium.cpp b/contrib/llvm-project/compiler-rt/lib/ubsan/ubsan_type_hash_itanium.cpp
index d82b542a020e..468a8fcd603f 100644
--- a/contrib/llvm-project/compiler-rt/lib/ubsan/ubsan_type_hash_itanium.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/ubsan/ubsan_type_hash_itanium.cpp
@@ -17,6 +17,7 @@
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_ptrauth.h"
+#include <stdint.h>
// The following are intended to be binary compatible with the definitions
// given in the Itanium ABI. We make no attempt to be ODR-compatible with
@@ -25,9 +26,20 @@
namespace std {
class type_info {
public:
+ typedef const char *__type_name_t;
virtual ~type_info();
const char *__type_name;
+
+ __type_name_t name() const {
+#if defined(__APPLE__) && defined(__LP64__) && !defined(__x86_64__)
+ uintptr_t __non_unique_rtti_bit =
+ (1ULL << ((__CHAR_BIT__ * sizeof(__type_name_t)) - 1));
+ return (__type_name_t)((uintptr_t)__type_name & ~__non_unique_rtti_bit);
+#else
+ return __type_name;
+#endif
+ }
};
}
@@ -117,7 +129,7 @@ static __ubsan::HashValue *getTypeCacheHashTableBucket(__ubsan::HashValue V) {
static bool isDerivedFromAtOffset(const abi::__class_type_info *Derived,
const abi::__class_type_info *Base,
sptr Offset) {
- if (Derived->__type_name == Base->__type_name ||
+ if (Derived->name() == Base->name() ||
__ubsan::checkTypeInfoEquality(Derived, Base))
return Offset == 0;
@@ -254,17 +266,16 @@ __ubsan::getDynamicTypeInfoFromVtable(void *VtablePtr) {
const abi::__class_type_info *ObjectType = findBaseAtOffset(
static_cast<const abi::__class_type_info*>(Vtable->TypeInfo),
-Vtable->Offset);
- return DynamicTypeInfo(Vtable->TypeInfo->__type_name, -Vtable->Offset,
- ObjectType ? ObjectType->__type_name : "<unknown>");
+ return DynamicTypeInfo(Vtable->TypeInfo->name(), -Vtable->Offset,
+ ObjectType ? ObjectType->name() : "<unknown>");
}
bool __ubsan::checkTypeInfoEquality(const void *TypeInfo1,
const void *TypeInfo2) {
auto TI1 = static_cast<const std::type_info *>(TypeInfo1);
auto TI2 = static_cast<const std::type_info *>(TypeInfo2);
- return SANITIZER_NON_UNIQUE_TYPEINFO && TI1->__type_name[0] != '*' &&
- TI2->__type_name[0] != '*' &&
- !internal_strcmp(TI1->__type_name, TI2->__type_name);
+ return SANITIZER_NON_UNIQUE_TYPEINFO && TI1->name()[0] != '*' &&
+ TI2->name()[0] != '*' && !internal_strcmp(TI1->name(), TI2->name());
}
#endif // CAN_SANITIZE_UB && !SANITIZER_WINDOWS
diff --git a/contrib/llvm-project/compiler-rt/lib/ubsan/ubsan_value.cpp b/contrib/llvm-project/compiler-rt/lib/ubsan/ubsan_value.cpp
index 40042bf3a903..dc61e5b939d9 100644
--- a/contrib/llvm-project/compiler-rt/lib/ubsan/ubsan_value.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/ubsan/ubsan_value.cpp
@@ -18,9 +18,7 @@
#include "sanitizer_common/sanitizer_libc.h"
#include "sanitizer_common/sanitizer_mutex.h"
-// TODO(dliew): Prefer '__APPLE__' here over 'SANITIZER_MAC', as the latter is
-// unclear. rdar://58124919 tracks using a more obviously portable guard.
-#if defined(__APPLE__)
+#if SANITIZER_APPLE
#include <dlfcn.h>
#endif
@@ -29,7 +27,7 @@ using namespace __ubsan;
typedef const char *(*ObjCGetClassNameTy)(void *);
const char *__ubsan::getObjCClassName(ValueHandle Pointer) {
-#if defined(__APPLE__)
+#if SANITIZER_APPLE
// We need to query the ObjC runtime for some information, but do not want
// to introduce a static dependency from the ubsan runtime onto ObjC. Try to
// grab a handle to the ObjC runtime used by the process.
diff --git a/contrib/llvm-project/compiler-rt/lib/ubsan_minimal/ubsan_minimal_handlers.cpp b/contrib/llvm-project/compiler-rt/lib/ubsan_minimal/ubsan_minimal_handlers.cpp
index 6a1903da62ce..53a3273e4e69 100644
--- a/contrib/llvm-project/compiler-rt/lib/ubsan_minimal/ubsan_minimal_handlers.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/ubsan_minimal/ubsan_minimal_handlers.cpp
@@ -20,9 +20,9 @@ static __sanitizer::atomic_uintptr_t caller_pcs[kMaxCallerPcs];
// that "too many errors" has already been reported.
static __sanitizer::atomic_uint32_t caller_pcs_sz;
-__attribute__((noinline)) static bool report_this_error(void *caller_p) {
- uintptr_t caller = reinterpret_cast<uintptr_t>(caller_p);
- if (caller == 0) return false;
+__attribute__((noinline)) static bool report_this_error(uintptr_t caller) {
+ if (caller == 0)
+ return false;
while (true) {
unsigned sz = __sanitizer::atomic_load_relaxed(&caller_pcs_sz);
if (sz > kMaxCallerPcs) return false; // early exit
@@ -51,6 +51,19 @@ __attribute__((noinline)) static bool report_this_error(void *caller_p) {
}
}
+__attribute__((noinline)) static void decorate_msg(char *buf,
+ uintptr_t caller) {
+ // print the address by nibbles
+ for (unsigned shift = sizeof(uintptr_t) * 8; shift;) {
+ shift -= 4;
+ unsigned nibble = (caller >> shift) & 0xf;
+ *(buf++) = nibble < 10 ? nibble + '0' : nibble - 10 + 'a';
+ }
+ // finish the message
+ buf[0] = '\n';
+ buf[1] = '\0';
+}
+
#if defined(__ANDROID__)
extern "C" __attribute__((weak)) void android_set_abort_message(const char *);
static void abort_with_message(const char *msg) {
@@ -76,18 +89,28 @@ void NORETURN CheckFailed(const char *file, int, const char *cond, u64, u64) {
#define INTERFACE extern "C" __attribute__((visibility("default")))
-// FIXME: add caller pc to the error message (possibly as "ubsan: error-type
-// @1234ABCD").
+// How many chars we need to reserve to print an address.
+constexpr unsigned kAddrBuf = SANITIZER_WORDSIZE / 4;
+#define MSG_TMPL(msg) "ubsan: " msg " by 0x"
+#define MSG_TMPL_END(buf, msg) (buf + sizeof(MSG_TMPL(msg)) - 1)
+// Reserve an additional byte for '\n'.
+#define MSG_BUF_LEN(msg) (sizeof(MSG_TMPL(msg)) + kAddrBuf + 1)
+
#define HANDLER_RECOVER(name, msg) \
INTERFACE void __ubsan_handle_##name##_minimal() { \
- if (!report_this_error(__builtin_return_address(0))) return; \
- message("ubsan: " msg "\n"); \
+ uintptr_t caller = GET_CALLER_PC(); \
+ if (!report_this_error(caller)) return; \
+ char msg_buf[MSG_BUF_LEN(msg)] = MSG_TMPL(msg); \
+ decorate_msg(MSG_TMPL_END(msg_buf, msg), caller); \
+ message(msg_buf); \
}
#define HANDLER_NORECOVER(name, msg) \
INTERFACE void __ubsan_handle_##name##_minimal_abort() { \
- message("ubsan: " msg "\n"); \
- abort_with_message("ubsan: " msg); \
+ char msg_buf[MSG_BUF_LEN(msg)] = MSG_TMPL(msg); \
+ decorate_msg(MSG_TMPL_END(msg_buf, msg), GET_CALLER_PC()); \
+ message(msg_buf); \
+ abort_with_message(msg_buf); \
}
#define HANDLER(name, msg) \
diff --git a/contrib/llvm-project/compiler-rt/lib/xray/xray_AArch64.cpp b/contrib/llvm-project/compiler-rt/lib/xray/xray_AArch64.cpp
index 00105d30b4db..c1d77758946e 100644
--- a/contrib/llvm-project/compiler-rt/lib/xray/xray_AArch64.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/xray/xray_AArch64.cpp
@@ -24,7 +24,6 @@ namespace __xray {
// The machine codes for some instructions used in runtime patching.
enum class PatchOpcodes : uint32_t {
PO_StpX0X30SP_m16e = 0xA9BF7BE0, // STP X0, X30, [SP, #-16]!
- PO_LdrW0_12 = 0x18000060, // LDR W0, #12
PO_LdrX16_12 = 0x58000070, // LDR X16, #12
PO_BlrX16 = 0xD63F0200, // BLR X16
PO_LdpX0X30SP_16 = 0xA8C17BE0, // LDP X0, X30, [SP], #16
@@ -45,7 +44,7 @@ inline static bool patchSled(const bool Enable, const uint32_t FuncId,
//
// xray_sled_n:
// STP X0, X30, [SP, #-16]! ; PUSH {r0, lr}
- // LDR W0, #12 ; W0 := function ID
+ // LDR W17, #12 ; W17 := function ID
// LDR X16,#12 ; X16 := address of the trampoline
// BLR X16
// ;DATA: 32 bits of function ID
@@ -64,8 +63,7 @@ inline static bool patchSled(const bool Enable, const uint32_t FuncId,
uint32_t *FirstAddress = reinterpret_cast<uint32_t *>(Sled.address());
uint32_t *CurAddress = FirstAddress + 1;
if (Enable) {
- *CurAddress = uint32_t(PatchOpcodes::PO_LdrW0_12);
- CurAddress++;
+ *CurAddress++ = 0x18000071; // ldr w17, #12
*CurAddress = uint32_t(PatchOpcodes::PO_LdrX16_12);
CurAddress++;
*CurAddress = uint32_t(PatchOpcodes::PO_BlrX16);
@@ -105,15 +103,37 @@ bool patchFunctionTailExit(const bool Enable, const uint32_t FuncId,
return patchSled(Enable, FuncId, Sled, __xray_FunctionTailExit);
}
+// AArch64AsmPrinter::LowerPATCHABLE_EVENT_CALL generates this code sequence:
+//
+// .Lxray_event_sled_N:
+// b 1f
+// save x0 and x1 (and also x2 for TYPED_EVENT_CALL)
+// set up x0 and x1 (and also x2 for TYPED_EVENT_CALL)
+// bl __xray_CustomEvent or __xray_TypedEvent
+// restore x0 and x1 (and also x2 for TYPED_EVENT_CALL)
+// 1f
+//
+// There are 6 instructions for EVENT_CALL and 9 for TYPED_EVENT_CALL.
+//
+// Enable: b .+24 => nop
+// Disable: nop => b .+24
bool patchCustomEvent(const bool Enable, const uint32_t FuncId,
- const XRaySledEntry &Sled)
- XRAY_NEVER_INSTRUMENT { // FIXME: Implement in aarch64?
+ const XRaySledEntry &Sled) XRAY_NEVER_INSTRUMENT {
+ uint32_t Inst = Enable ? 0xd503201f : 0x14000006;
+ std::atomic_store_explicit(
+ reinterpret_cast<std::atomic<uint32_t> *>(Sled.address()), Inst,
+ std::memory_order_release);
return false;
}
+// Enable: b +36 => nop
+// Disable: nop => b +36
bool patchTypedEvent(const bool Enable, const uint32_t FuncId,
const XRaySledEntry &Sled) XRAY_NEVER_INSTRUMENT {
- // FIXME: Implement in aarch64?
+ uint32_t Inst = Enable ? 0xd503201f : 0x14000009;
+ std::atomic_store_explicit(
+ reinterpret_cast<std::atomic<uint32_t> *>(Sled.address()), Inst,
+ std::memory_order_release);
return false;
}
@@ -121,7 +141,3 @@ bool patchTypedEvent(const bool Enable, const uint32_t FuncId,
bool probeRequiredCPUFeatures() XRAY_NEVER_INSTRUMENT { return true; }
} // namespace __xray
-
-extern "C" void __xray_ArgLoggerEntry() XRAY_NEVER_INSTRUMENT {
- // FIXME: this will have to be implemented in the trampoline assembly file
-}
diff --git a/contrib/llvm-project/compiler-rt/lib/xray/xray_allocator.h b/contrib/llvm-project/compiler-rt/lib/xray/xray_allocator.h
index 4b42c473261d..0284f4299fb1 100644
--- a/contrib/llvm-project/compiler-rt/lib/xray/xray_allocator.h
+++ b/contrib/llvm-project/compiler-rt/lib/xray/xray_allocator.h
@@ -65,9 +65,9 @@ template <class T> T *allocate() XRAY_NEVER_INSTRUMENT {
int ErrNo = 0;
if (UNLIKELY(internal_iserror(B, &ErrNo))) {
if (Verbosity())
- Report(
- "XRay Profiling: Failed to allocate memory of size %d; Error = %d.\n",
- RoundedSize, B);
+ Report("XRay Profiling: Failed to allocate memory of size %zu; Error = "
+ "%zu\n",
+ RoundedSize, B);
return nullptr;
}
#endif
@@ -114,9 +114,9 @@ T *allocateBuffer(size_t S) XRAY_NEVER_INSTRUMENT {
int ErrNo = 0;
if (UNLIKELY(internal_iserror(B, &ErrNo))) {
if (Verbosity())
- Report(
- "XRay Profiling: Failed to allocate memory of size %d; Error = %d.\n",
- RoundedSize, B);
+ Report("XRay Profiling: Failed to allocate memory of size %zu; Error = "
+ "%zu\n",
+ RoundedSize, B);
return nullptr;
}
#endif
@@ -183,7 +183,7 @@ private:
BackingStore = allocateBuffer(MaxMemory);
if (BackingStore == nullptr) {
if (Verbosity())
- Report("XRay Profiling: Failed to allocate memory for allocator.\n");
+ Report("XRay Profiling: Failed to allocate memory for allocator\n");
return nullptr;
}
@@ -198,7 +198,7 @@ private:
AlignedNextBlock = BackingStore = nullptr;
if (Verbosity())
Report("XRay Profiling: Cannot obtain enough memory from "
- "preallocated region.\n");
+ "preallocated region\n");
return nullptr;
}
diff --git a/contrib/llvm-project/compiler-rt/lib/xray/xray_basic_flags.h b/contrib/llvm-project/compiler-rt/lib/xray/xray_basic_flags.h
index 2459effa8bae..b846c1233e8a 100644
--- a/contrib/llvm-project/compiler-rt/lib/xray/xray_basic_flags.h
+++ b/contrib/llvm-project/compiler-rt/lib/xray/xray_basic_flags.h
@@ -6,7 +6,7 @@
//
//===----------------------------------------------------------------------===//
//
-// This file is a part of XRay, a dynamic runtime instruementation system.
+// This file is a part of XRay, a dynamic runtime instrumentation system.
//
// XRay Basic Mode runtime flags.
//===----------------------------------------------------------------------===//
diff --git a/contrib/llvm-project/compiler-rt/lib/xray/xray_basic_logging.cpp b/contrib/llvm-project/compiler-rt/lib/xray/xray_basic_logging.cpp
index a58ae9b5e267..6ac5417bef75 100644
--- a/contrib/llvm-project/compiler-rt/lib/xray/xray_basic_logging.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/xray/xray_basic_logging.cpp
@@ -18,7 +18,7 @@
#include <fcntl.h>
#include <pthread.h>
#include <sys/stat.h>
-#if SANITIZER_FREEBSD || SANITIZER_NETBSD || SANITIZER_MAC
+#if SANITIZER_FREEBSD || SANITIZER_NETBSD || SANITIZER_APPLE
#include <sys/syscall.h>
#endif
#include <sys/types.h>
@@ -345,12 +345,12 @@ static void TLDDestructor(void *P) XRAY_NEVER_INSTRUMENT {
if (TLD.ShadowStack)
InternalFree(TLD.ShadowStack);
if (Verbosity())
- Report("Cleaned up log for TID: %d\n", GetTid());
+ Report("Cleaned up log for TID: %llu\n", GetTid());
});
if (TLD.LogWriter == nullptr || TLD.BufferOffset == 0) {
if (Verbosity())
- Report("Skipping buffer for TID: %d; Offset = %llu\n", GetTid(),
+ Report("Skipping buffer for TID: %llu; Offset = %zu\n", GetTid(),
TLD.BufferOffset);
return;
}
diff --git a/contrib/llvm-project/compiler-rt/lib/xray/xray_buffer_queue.cpp b/contrib/llvm-project/compiler-rt/lib/xray/xray_buffer_queue.cpp
index bad91e036cef..748708ccd0f4 100644
--- a/contrib/llvm-project/compiler-rt/lib/xray/xray_buffer_queue.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/xray/xray_buffer_queue.cpp
@@ -6,7 +6,7 @@
//
//===----------------------------------------------------------------------===//
//
-// This file is a part of XRay, a dynamic runtime instruementation system.
+// This file is a part of XRay, a dynamic runtime instrumentation system.
//
// Defines the interface for a buffer queue implementation.
//
diff --git a/contrib/llvm-project/compiler-rt/lib/xray/xray_fdr_logging.cpp b/contrib/llvm-project/compiler-rt/lib/xray/xray_fdr_logging.cpp
index 799814f437f9..378a8c0f4a70 100644
--- a/contrib/llvm-project/compiler-rt/lib/xray/xray_fdr_logging.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/xray/xray_fdr_logging.cpp
@@ -15,6 +15,7 @@
//===----------------------------------------------------------------------===//
#include "xray_fdr_logging.h"
#include <cassert>
+#include <cstddef>
#include <errno.h>
#include <limits>
#include <memory>
@@ -140,7 +141,7 @@ static ThreadLocalData &getThreadLocalData() {
}
static XRayFileHeader &fdrCommonHeaderInfo() {
- static std::aligned_storage<sizeof(XRayFileHeader)>::type HStorage;
+ alignas(XRayFileHeader) static std::byte HStorage[sizeof(XRayFileHeader)];
static pthread_once_t OnceInit = PTHREAD_ONCE_INIT;
static bool TSCSupported = true;
static uint64_t CycleFrequency = NanosecondsPerSecond;
@@ -204,7 +205,8 @@ XRayBuffer fdrIterator(const XRayBuffer B) {
// initialized the first time this function is called. We'll update one part
// of this information with some relevant data (in particular the number of
// buffers to expect).
- static std::aligned_storage<sizeof(XRayFileHeader)>::type HeaderStorage;
+ alignas(
+ XRayFileHeader) static std::byte HeaderStorage[sizeof(XRayFileHeader)];
static pthread_once_t HeaderOnce = PTHREAD_ONCE_INIT;
pthread_once(
&HeaderOnce, +[] {
@@ -580,9 +582,9 @@ void fdrLoggingHandleCustomEvent(void *Event,
TLD.Controller->customEvent(TSC, CPU, Event, ReducedEventSize);
}
-void fdrLoggingHandleTypedEvent(
- uint16_t EventType, const void *Event,
- std::size_t EventSize) noexcept XRAY_NEVER_INSTRUMENT {
+void fdrLoggingHandleTypedEvent(size_t EventType, const void *Event,
+ size_t EventSize) noexcept
+ XRAY_NEVER_INSTRUMENT {
auto TC = getTimestamp();
auto &TSC = TC.TSC;
auto &CPU = TC.CPU;
@@ -607,7 +609,8 @@ void fdrLoggingHandleTypedEvent(
return;
int32_t ReducedEventSize = static_cast<int32_t>(EventSize);
- TLD.Controller->typedEvent(TSC, CPU, EventType, Event, ReducedEventSize);
+ TLD.Controller->typedEvent(TSC, CPU, static_cast<uint16_t>(EventType), Event,
+ ReducedEventSize);
}
XRayLogInitStatus fdrLoggingInit(size_t, size_t, void *Options,
diff --git a/contrib/llvm-project/compiler-rt/lib/xray/xray_flags.h b/contrib/llvm-project/compiler-rt/lib/xray/xray_flags.h
index edb5a5119f86..cce6fe9d62f9 100644
--- a/contrib/llvm-project/compiler-rt/lib/xray/xray_flags.h
+++ b/contrib/llvm-project/compiler-rt/lib/xray/xray_flags.h
@@ -6,7 +6,7 @@
//
//===----------------------------------------------------------------------===//
//
-// This file is a part of XRay, a dynamic runtime instruementation system.
+// This file is a part of XRay, a dynamic runtime instrumentation system.
//
// XRay runtime flags.
//===----------------------------------------------------------------------===//
diff --git a/contrib/llvm-project/compiler-rt/lib/xray/xray_hexagon.cpp b/contrib/llvm-project/compiler-rt/lib/xray/xray_hexagon.cpp
new file mode 100644
index 000000000000..7f127b2b499c
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/xray/xray_hexagon.cpp
@@ -0,0 +1,168 @@
+//===-- xray_hexagon.cpp --------------------------------------*- C++ ---*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of XRay, a dynamic runtime instrumentation system.
+//
+// Implementation of hexagon-specific routines (32-bit).
+//
+//===----------------------------------------------------------------------===//
+#include "sanitizer_common/sanitizer_common.h"
+#include "xray_defs.h"
+#include "xray_interface_internal.h"
+#include <assert.h>
+#include <atomic>
+
+namespace __xray {
+
+// The machine codes for some instructions used in runtime patching.
+enum PatchOpcodes : uint32_t {
+ PO_JUMPI_14 = 0x5800c00a, // jump #0x014 (PC + 0x014)
+ PO_CALLR_R6 = 0x50a6c000, // indirect call: callr r6
+ PO_TFR_IMM = 0x78000000, // transfer immed
+ // ICLASS 0x7 - S2-type A-type
+ PO_IMMEXT = 0x00000000, // constant extender
+};
+
+enum PacketWordParseBits : uint32_t {
+ PP_DUPLEX = 0x00 << 14,
+ PP_NOT_END = 0x01 << 14,
+ PP_PACKET_END = 0x03 << 14,
+};
+
+enum RegNum : uint32_t {
+ RN_R6 = 0x6,
+ RN_R7 = 0x7,
+};
+
+inline static uint32_t
+encodeExtendedTransferImmediate(uint32_t Imm, RegNum DestReg,
+ bool PacketEnd = false) XRAY_NEVER_INSTRUMENT {
+ static const uint32_t REG_MASK = 0x1f;
+ assert((DestReg & (~REG_MASK)) == 0);
+ // The constant-extended register transfer encodes the 6 least
+ // significant bits of the effective constant:
+ Imm = Imm & 0x03f;
+ const PacketWordParseBits ParseBits = PacketEnd ? PP_PACKET_END : PP_NOT_END;
+
+ return PO_TFR_IMM | ParseBits | (Imm << 5) | (DestReg & REG_MASK);
+}
+
+inline static uint32_t
+encodeConstantExtender(uint32_t Imm) XRAY_NEVER_INSTRUMENT {
+ // Bits Name Description
+ // ----- ------- ------------------------------------------
+ // 31:28 ICLASS Instruction class = 0000
+ // 27:16 high High 12 bits of 26-bit constant extension
+ // 15:14 Parse Parse bits
+ // 13:0 low Low 14 bits of 26-bit constant extension
+ static const uint32_t IMM_MASK_LOW = 0x03fff;
+ static const uint32_t IMM_MASK_HIGH = 0x00fff << 14;
+
+ // The extender encodes the 26 most significant bits of the effective
+ // constant:
+ Imm = Imm >> 6;
+
+ const uint32_t high = (Imm & IMM_MASK_HIGH) << 16;
+ const uint32_t low = Imm & IMM_MASK_LOW;
+
+ return PO_IMMEXT | high | PP_NOT_END | low;
+}
+
+static void WriteInstFlushCache(void *Addr, uint32_t NewInstruction) {
+ asm volatile("icinva(%[inst_addr])\n\t"
+ "isync\n\t"
+ "memw(%[inst_addr]) = %[new_inst]\n\t"
+ "dccleaninva(%[inst_addr])\n\t"
+ "syncht\n\t"
+ :
+ : [ inst_addr ] "r"(Addr), [ new_inst ] "r"(NewInstruction)
+ : "memory");
+}
+
+inline static bool patchSled(const bool Enable, const uint32_t FuncId,
+ const XRaySledEntry &Sled,
+ void (*TracingHook)()) XRAY_NEVER_INSTRUMENT {
+ // When |Enable| == true,
+ // We replace the following compile-time stub (sled):
+ //
+ // .L_xray_sled_N:
+ // <xray_sled_base>:
+ // { jump .Ltmp0 }
+ // { nop
+ // nop
+ // nop
+ // nop }
+ // .Ltmp0:
+
+ // With the following runtime patch:
+ //
+ // xray_sled_n (32-bit):
+ //
+ // <xray_sled_n>:
+ // { immext(#...) // upper 26-bits of func id
+ // r7 = ##... // lower 6-bits of func id
+ // immext(#...) // upper 26-bits of trampoline
+ // r6 = ##... } // lower 6 bits of trampoline
+ // { callr r6 }
+ //
+ // When |Enable|==false, we set back the first instruction in the sled to be
+ // { jump .Ltmp0 }
+
+ uint32_t *FirstAddress = reinterpret_cast<uint32_t *>(Sled.address());
+ if (Enable) {
+ uint32_t *CurAddress = FirstAddress + 1;
+ *CurAddress = encodeExtendedTransferImmediate(FuncId, RN_R7);
+ CurAddress++;
+ *CurAddress = encodeConstantExtender(reinterpret_cast<uint32_t>(TracingHook));
+ CurAddress++;
+ *CurAddress =
+ encodeExtendedTransferImmediate(reinterpret_cast<uint32_t>(TracingHook), RN_R6, true);
+ CurAddress++;
+
+ *CurAddress = uint32_t(PO_CALLR_R6);
+
+ WriteInstFlushCache(FirstAddress, uint32_t(encodeConstantExtender(FuncId)));
+ } else {
+ WriteInstFlushCache(FirstAddress, uint32_t(PatchOpcodes::PO_JUMPI_14));
+ }
+ return true;
+}
+
+bool patchFunctionEntry(const bool Enable, const uint32_t FuncId,
+ const XRaySledEntry &Sled,
+ void (*Trampoline)()) XRAY_NEVER_INSTRUMENT {
+ return patchSled(Enable, FuncId, Sled, Trampoline);
+}
+
+bool patchFunctionExit(const bool Enable, const uint32_t FuncId,
+ const XRaySledEntry &Sled) XRAY_NEVER_INSTRUMENT {
+ return patchSled(Enable, FuncId, Sled, __xray_FunctionExit);
+}
+
+bool patchFunctionTailExit(const bool Enable, const uint32_t FuncId,
+ const XRaySledEntry &Sled) XRAY_NEVER_INSTRUMENT {
+ return patchSled(Enable, FuncId, Sled, __xray_FunctionExit);
+}
+
+bool patchCustomEvent(const bool Enable, const uint32_t FuncId,
+ const XRaySledEntry &Sled) XRAY_NEVER_INSTRUMENT {
+ // FIXME: Implement in hexagon?
+ return false;
+}
+
+bool patchTypedEvent(const bool Enable, const uint32_t FuncId,
+ const XRaySledEntry &Sled) XRAY_NEVER_INSTRUMENT {
+ // FIXME: Implement in hexagon?
+ return false;
+}
+
+} // namespace __xray
+
+extern "C" void __xray_ArgLoggerEntry() XRAY_NEVER_INSTRUMENT {
+ // FIXME: this will have to be implemented in the trampoline assembly file
+}
diff --git a/contrib/llvm-project/compiler-rt/lib/xray/xray_init.cpp b/contrib/llvm-project/compiler-rt/lib/xray/xray_init.cpp
index 00ba5fe4a52b..f22a31b95686 100644
--- a/contrib/llvm-project/compiler-rt/lib/xray/xray_init.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/xray/xray_init.cpp
@@ -27,7 +27,7 @@ extern const XRaySledEntry __stop_xray_instr_map[] __attribute__((weak));
extern const XRayFunctionSledIndex __start_xray_fn_idx[] __attribute__((weak));
extern const XRayFunctionSledIndex __stop_xray_fn_idx[] __attribute__((weak));
-#if SANITIZER_MAC
+#if SANITIZER_APPLE
// HACK: This is a temporary workaround to make XRay build on
// Darwin, but it will probably not work at runtime.
const XRaySledEntry __start_xray_instr_map[] = {};
diff --git a/contrib/llvm-project/compiler-rt/lib/xray/xray_interface.cpp b/contrib/llvm-project/compiler-rt/lib/xray/xray_interface.cpp
index 7669b9ab82be..5839043fcb93 100644
--- a/contrib/llvm-project/compiler-rt/lib/xray/xray_interface.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/xray/xray_interface.cpp
@@ -14,7 +14,7 @@
#include "xray_interface_internal.h"
-#include <cstdint>
+#include <cinttypes>
#include <cstdio>
#include <errno.h>
#include <limits>
@@ -46,12 +46,16 @@ static const int16_t cSledLength = 12;
static const int16_t cSledLength = 32;
#elif defined(__arm__)
static const int16_t cSledLength = 28;
+#elif SANITIZER_LOONGARCH64
+static const int16_t cSledLength = 48;
#elif SANITIZER_MIPS32
static const int16_t cSledLength = 48;
#elif SANITIZER_MIPS64
static const int16_t cSledLength = 64;
#elif defined(__powerpc64__)
static const int16_t cSledLength = 8;
+#elif defined(__hexagon__)
+static const int16_t cSledLength = 20;
#else
#error "Unsupported CPU Architecture"
#endif /* CPU architecture */
@@ -169,7 +173,8 @@ bool patchSled(const XRaySledEntry &Sled, bool Enable,
Success = patchTypedEvent(Enable, FuncId, Sled);
break;
default:
- Report("Unsupported sled kind '%d' @%04x\n", Sled.Address, int(Sled.Kind));
+ Report("Unsupported sled kind '%" PRIu64 "' @%04x\n", Sled.Address,
+ int(Sled.Kind));
return false;
}
return Success;
@@ -180,7 +185,7 @@ findFunctionSleds(int32_t FuncId,
const XRaySledMap &InstrMap) XRAY_NEVER_INSTRUMENT {
int32_t CurFn = 0;
uint64_t LastFnAddr = 0;
- XRayFunctionSledIndex Index = {nullptr, nullptr};
+ XRayFunctionSledIndex Index = {nullptr, 0};
for (std::size_t I = 0; I < InstrMap.Entries && CurFn <= FuncId; I++) {
const auto &Sled = InstrMap.Sleds[I];
@@ -193,12 +198,10 @@ findFunctionSleds(int32_t FuncId,
if (CurFn == FuncId) {
if (Index.Begin == nullptr)
Index.Begin = &Sled;
- Index.End = &Sled;
+ Index.Size = &Sled - Index.Begin + 1;
}
}
- Index.End += 1;
-
return Index;
}
@@ -232,13 +235,17 @@ XRayPatchingStatus patchFunction(int32_t FuncId,
}
// Now we patch ths sleds for this specific function.
- auto SledRange = InstrMap.SledsIndex ? InstrMap.SledsIndex[FuncId - 1]
- : findFunctionSleds(FuncId, InstrMap);
+ XRayFunctionSledIndex SledRange;
+ if (InstrMap.SledsIndex) {
+ SledRange = {InstrMap.SledsIndex[FuncId - 1].fromPCRelative(),
+ InstrMap.SledsIndex[FuncId - 1].Size};
+ } else {
+ SledRange = findFunctionSleds(FuncId, InstrMap);
+ }
auto *f = SledRange.Begin;
- auto *e = SledRange.End;
bool SucceedOnce = false;
- while (f != e)
- SucceedOnce |= patchSled(*f++, Enable, FuncId);
+ for (size_t i = 0; i != SledRange.Size; ++i)
+ SucceedOnce |= patchSled(f[i], Enable, FuncId);
atomic_store(&XRayPatching, false,
memory_order_release);
@@ -305,7 +312,7 @@ XRayPatchingStatus controlPatching(bool Enable) XRAY_NEVER_INSTRUMENT {
? flags()->xray_page_size_override
: GetPageSizeCached();
if ((PageSize == 0) || ((PageSize & (PageSize - 1)) != 0)) {
- Report("System page size is not a power of two: %lld\n", PageSize);
+ Report("System page size is not a power of two: %zu\n", PageSize);
return XRayPatchingStatus::FAILED;
}
@@ -356,18 +363,23 @@ XRayPatchingStatus mprotectAndPatchFunction(int32_t FuncId,
? flags()->xray_page_size_override
: GetPageSizeCached();
if ((PageSize == 0) || ((PageSize & (PageSize - 1)) != 0)) {
- Report("Provided page size is not a power of two: %lld\n", PageSize);
+ Report("Provided page size is not a power of two: %zu\n", PageSize);
return XRayPatchingStatus::FAILED;
}
- // Here we compute the minumum sled and maximum sled associated with a
+ // Here we compute the minimum sled and maximum sled associated with a
// particular function ID.
- auto SledRange = InstrMap.SledsIndex ? InstrMap.SledsIndex[FuncId - 1]
- : findFunctionSleds(FuncId, InstrMap);
+ XRayFunctionSledIndex SledRange;
+ if (InstrMap.SledsIndex) {
+ SledRange = {InstrMap.SledsIndex[FuncId - 1].fromPCRelative(),
+ InstrMap.SledsIndex[FuncId - 1].Size};
+ } else {
+ SledRange = findFunctionSleds(FuncId, InstrMap);
+ }
auto *f = SledRange.Begin;
- auto *e = SledRange.End;
+ auto *e = SledRange.Begin + SledRange.Size;
auto *MinSled = f;
- auto *MaxSled = (SledRange.End - 1);
+ auto *MaxSled = e - 1;
while (f != e) {
if (f->address() < MinSled->address())
MinSled = f;
@@ -423,8 +435,8 @@ int __xray_set_customevent_handler(void (*entry)(void *, size_t))
return 0;
}
-int __xray_set_typedevent_handler(void (*entry)(
- uint16_t, const void *, size_t)) XRAY_NEVER_INSTRUMENT {
+int __xray_set_typedevent_handler(void (*entry)(size_t, const void *,
+ size_t)) XRAY_NEVER_INSTRUMENT {
if (atomic_load(&XRayInitialized,
memory_order_acquire)) {
atomic_store(&__xray::XRayPatchedTypedEvent,
@@ -499,9 +511,9 @@ uintptr_t __xray_function_address(int32_t FuncId) XRAY_NEVER_INSTRUMENT {
if (FuncId <= 0 || static_cast<size_t>(FuncId) > InstrMap.Functions)
return 0;
- const XRaySledEntry *Sled = InstrMap.SledsIndex
- ? InstrMap.SledsIndex[FuncId - 1].Begin
- : findFunctionSleds(FuncId, InstrMap).Begin;
+ const XRaySledEntry *Sled =
+ InstrMap.SledsIndex ? InstrMap.SledsIndex[FuncId - 1].fromPCRelative()
+ : findFunctionSleds(FuncId, InstrMap).Begin;
return Sled->function()
// On PPC, function entries are always aligned to 16 bytes. The beginning of a
// sled might be a local entry, which is always +8 based on the global entry.
diff --git a/contrib/llvm-project/compiler-rt/lib/xray/xray_interface_internal.h b/contrib/llvm-project/compiler-rt/lib/xray/xray_interface_internal.h
index 390f389b1dca..80c07c167f64 100644
--- a/contrib/llvm-project/compiler-rt/lib/xray/xray_interface_internal.h
+++ b/contrib/llvm-project/compiler-rt/lib/xray/xray_interface_internal.h
@@ -30,14 +30,10 @@ struct XRaySledEntry {
unsigned char Version;
unsigned char Padding[13]; // Need 32 bytes
uint64_t function() const {
- if (Version < 2)
- return Function;
// The target address is relative to the location of the Function variable.
return reinterpret_cast<uint64_t>(&Function) + Function;
}
uint64_t address() const {
- if (Version < 2)
- return Address;
// The target address is relative to the location of the Address variable.
return reinterpret_cast<uint64_t>(&Address) + Address;
}
@@ -49,14 +45,10 @@ struct XRaySledEntry {
unsigned char Version;
unsigned char Padding[5]; // Need 16 bytes
uint32_t function() const {
- if (Version < 2)
- return Function;
// The target address is relative to the location of the Function variable.
return reinterpret_cast<uint32_t>(&Function) + Function;
}
uint32_t address() const {
- if (Version < 2)
- return Address;
// The target address is relative to the location of the Address variable.
return reinterpret_cast<uint32_t>(&Address) + Address;
}
@@ -67,7 +59,13 @@ struct XRaySledEntry {
struct XRayFunctionSledIndex {
const XRaySledEntry *Begin;
- const XRaySledEntry *End;
+ size_t Size;
+ // For an entry in the xray_fn_idx section, the address is relative to the
+ // location of the Begin variable.
+ const XRaySledEntry *fromPCRelative() const {
+ return reinterpret_cast<const XRaySledEntry *>(uintptr_t(&Begin) +
+ uintptr_t(Begin));
+ }
};
}
diff --git a/contrib/llvm-project/compiler-rt/lib/xray/xray_loongarch64.cpp b/contrib/llvm-project/compiler-rt/lib/xray/xray_loongarch64.cpp
new file mode 100644
index 000000000000..b839adba00d2
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/xray/xray_loongarch64.cpp
@@ -0,0 +1,160 @@
+//===-------- xray_loongarch64.cpp ------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of XRay, a dynamic runtime instrumentation system.
+//
+// Implementation of loongarch-specific routines.
+//
+//===----------------------------------------------------------------------===//
+#include "sanitizer_common/sanitizer_common.h"
+#include "xray_defs.h"
+#include "xray_interface_internal.h"
+#include <atomic>
+
+namespace __xray {
+
+enum RegNum : uint32_t {
+ RN_RA = 1,
+ RN_SP = 3,
+ RN_T0 = 12,
+ RN_T1 = 13,
+};
+
+// Encode instructions in the 2RIx format, where the primary formats here
+// are 2RI12-type and 2RI16-type.
+static inline uint32_t
+encodeInstruction2RIx(uint32_t Opcode, uint32_t Rd, uint32_t Rj,
+ uint32_t Imm) XRAY_NEVER_INSTRUMENT {
+ return Opcode | (Imm << 10) | (Rj << 5) | Rd;
+}
+
+// Encode instructions in 1RI20 format, e.g. lu12i.w/lu32i.d.
+static inline uint32_t
+encodeInstruction1RI20(uint32_t Opcode, uint32_t Rd,
+ uint32_t Imm) XRAY_NEVER_INSTRUMENT {
+ return Opcode | (Imm << 5) | Rd;
+}
+
+static inline bool patchSled(const bool Enable, const uint32_t FuncId,
+ const XRaySledEntry &Sled,
+ void (*TracingHook)()) XRAY_NEVER_INSTRUMENT {
+ // When |Enable| == true,
+ // We replace the following compile-time stub (sled):
+ //
+ // .Lxray_sled_beginN:
+ // B .Lxray_sled_endN
+ // 11 NOPs (44 bytes)
+ // .Lxray_sled_endN:
+ //
+ // With the following runtime patch:
+ //
+ // xray_sled_n:
+ // addi.d sp, sp, -16 ; create the stack frame
+ // st.d ra, sp, 8 ; save the return address
+ // lu12i.w t0, %abs_hi20(__xray_FunctionEntry/Exit)
+ // ori t0, t0, %abs_lo12(__xray_FunctionEntry/Exit)
+ // lu32i.d t0, %abs64_lo20(__xray_FunctionEntry/Exit)
+ // lu52i.d t0, t0, %abs64_hi12(__xray_FunctionEntry/Exit)
+ // lu12i.w t1, %abs_hi20(function_id)
+ // ori t1, t1, %abs_lo12(function_id) ; pass the function id
+ // jirl ra, t0, 0 ; call the tracing hook
+ // ld.d ra, sp, 8 ; restore the return address
+ // addi.d sp, sp, 16 ; de-allocate the stack frame
+ //
+ // Replacement of the first 4-byte instruction should be the last and atomic
+ // operation, so that the user code which reaches the sled concurrently
+ // either jumps over the whole sled, or executes the whole sled when the
+ // latter is ready.
+ //
+ // When |Enable|==false, we set the first instruction in the sled back to
+ // B #48
+
+ uint32_t *Address = reinterpret_cast<uint32_t *>(Sled.address());
+ if (Enable) {
+ uint32_t LoTracingHookAddr = reinterpret_cast<int64_t>(TracingHook) & 0xfff;
+ uint32_t HiTracingHookAddr =
+ (reinterpret_cast<int64_t>(TracingHook) >> 12) & 0xfffff;
+ uint32_t HigherTracingHookAddr =
+ (reinterpret_cast<int64_t>(TracingHook) >> 32) & 0xfffff;
+ uint32_t HighestTracingHookAddr =
+ (reinterpret_cast<int64_t>(TracingHook) >> 52) & 0xfff;
+ uint32_t LoFunctionID = FuncId & 0xfff;
+ uint32_t HiFunctionID = (FuncId >> 12) & 0xfffff;
+ Address[1] = encodeInstruction2RIx(0x29c00000, RegNum::RN_RA, RegNum::RN_SP,
+ 0x8); // st.d ra, sp, 8
+ Address[2] = encodeInstruction1RI20(
+ 0x14000000, RegNum::RN_T0,
+ HiTracingHookAddr); // lu12i.w t0, HiTracingHookAddr
+ Address[3] = encodeInstruction2RIx(
+ 0x03800000, RegNum::RN_T0, RegNum::RN_T0,
+ LoTracingHookAddr); // ori t0, t0, LoTracingHookAddr
+ Address[4] = encodeInstruction1RI20(
+ 0x16000000, RegNum::RN_T0,
+ HigherTracingHookAddr); // lu32i.d t0, HigherTracingHookAddr
+ Address[5] = encodeInstruction2RIx(
+ 0x03000000, RegNum::RN_T0, RegNum::RN_T0,
+ HighestTracingHookAddr); // lu52i.d t0, t0, HighestTracingHookAddr
+ Address[6] =
+ encodeInstruction1RI20(0x14000000, RegNum::RN_T1,
+ HiFunctionID); // lu12i.w t1, HiFunctionID
+ Address[7] =
+ encodeInstruction2RIx(0x03800000, RegNum::RN_T1, RegNum::RN_T1,
+ LoFunctionID); // ori t1, t1, LoFunctionID
+ Address[8] = encodeInstruction2RIx(0x4c000000, RegNum::RN_RA, RegNum::RN_T0,
+ 0); // jirl ra, t0, 0
+ Address[9] = encodeInstruction2RIx(0x28c00000, RegNum::RN_RA, RegNum::RN_SP,
+ 0x8); // ld.d ra, sp, 8
+ Address[10] = encodeInstruction2RIx(
+ 0x02c00000, RegNum::RN_SP, RegNum::RN_SP, 0x10); // addi.d sp, sp, 16
+ uint32_t CreateStackSpace = encodeInstruction2RIx(
+ 0x02c00000, RegNum::RN_SP, RegNum::RN_SP, 0xff0); // addi.d sp, sp, -16
+ std::atomic_store_explicit(
+ reinterpret_cast<std::atomic<uint32_t> *>(Address), CreateStackSpace,
+ std::memory_order_release);
+ } else {
+ std::atomic_store_explicit(
+ reinterpret_cast<std::atomic<uint32_t> *>(Address),
+ uint32_t(0x50003000), std::memory_order_release); // b #48
+ }
+ return true;
+}
+
+bool patchFunctionEntry(const bool Enable, const uint32_t FuncId,
+ const XRaySledEntry &Sled,
+ void (*Trampoline)()) XRAY_NEVER_INSTRUMENT {
+ return patchSled(Enable, FuncId, Sled, Trampoline);
+}
+
+bool patchFunctionExit(const bool Enable, const uint32_t FuncId,
+ const XRaySledEntry &Sled) XRAY_NEVER_INSTRUMENT {
+ return patchSled(Enable, FuncId, Sled, __xray_FunctionExit);
+}
+
+bool patchFunctionTailExit(const bool Enable, const uint32_t FuncId,
+ const XRaySledEntry &Sled) XRAY_NEVER_INSTRUMENT {
+ // TODO: In the future we'd need to distinguish between non-tail exits and
+ // tail exits for better information preservation.
+ return patchSled(Enable, FuncId, Sled, __xray_FunctionExit);
+}
+
+bool patchCustomEvent(const bool Enable, const uint32_t FuncId,
+ const XRaySledEntry &Sled) XRAY_NEVER_INSTRUMENT {
+ // FIXME: Implement in loongarch?
+ return false;
+}
+
+bool patchTypedEvent(const bool Enable, const uint32_t FuncId,
+ const XRaySledEntry &Sled) XRAY_NEVER_INSTRUMENT {
+ // FIXME: Implement in loongarch?
+ return false;
+}
+} // namespace __xray
+
+extern "C" void __xray_ArgLoggerEntry() XRAY_NEVER_INSTRUMENT {
+ // TODO: This will have to be implemented in the trampoline assembly file.
+}
diff --git a/contrib/llvm-project/compiler-rt/lib/xray/xray_powerpc64.inc b/contrib/llvm-project/compiler-rt/lib/xray/xray_powerpc64.inc
index e4e16d5b28e0..7e872b5b42e6 100644
--- a/contrib/llvm-project/compiler-rt/lib/xray/xray_powerpc64.inc
+++ b/contrib/llvm-project/compiler-rt/lib/xray/xray_powerpc64.inc
@@ -12,7 +12,22 @@
#include <cstdint>
#include <mutex>
+#ifdef __linux__
#include <sys/platform/ppc.h>
+#elif defined(__FreeBSD__)
+#include <sys/types.h>
+#include <sys/sysctl.h>
+
+#define __ppc_get_timebase __builtin_ppc_get_timebase
+
+uint64_t __ppc_get_timebase_freq (void)
+{
+ uint64_t tb_freq = 0;
+ size_t length = sizeof(tb_freq);
+ sysctlbyname("kern.timecounter.tc.timebase.frequency", &tb_freq, &length, nullptr, 0);
+ return tb_freq;
+}
+#endif
#include "xray_defs.h"
diff --git a/contrib/llvm-project/compiler-rt/lib/xray/xray_profiling.cpp b/contrib/llvm-project/compiler-rt/lib/xray/xray_profiling.cpp
index ef16691562cc..259ec65a76a1 100644
--- a/contrib/llvm-project/compiler-rt/lib/xray/xray_profiling.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/xray/xray_profiling.cpp
@@ -253,8 +253,8 @@ XRayLogFlushStatus profilingFlush() XRAY_NEVER_INSTRUMENT {
reinterpret_cast<const char *>(B.Data) + B.Size);
B = profileCollectorService::nextBuffer(B);
}
+ LogWriter::Close(LW);
}
- LogWriter::Close(LW);
}
}
@@ -402,7 +402,7 @@ profilingLoggingInit(size_t, size_t, void *Options,
return XRayLogInitStatus::XRAY_LOG_UNINITIALIZED;
}
- // If we've succeded, set the global pointer to the initialised storage.
+ // If we've succeeded, set the global pointer to the initialised storage.
BQ = reinterpret_cast<BufferQueue *>(&BufferQueueStorage);
} else {
BQ->finalize();
diff --git a/contrib/llvm-project/compiler-rt/lib/xray/xray_trampoline_AArch64.S b/contrib/llvm-project/compiler-rt/lib/xray/xray_trampoline_AArch64.S
index 3bf52cef60fe..6f10dda3602b 100644
--- a/contrib/llvm-project/compiler-rt/lib/xray/xray_trampoline_AArch64.S
+++ b/contrib/llvm-project/compiler-rt/lib/xray/xray_trampoline_AArch64.S
@@ -1,163 +1,169 @@
#include "../builtins/assembly.h"
+#include "../sanitizer_common/sanitizer_asm.h"
- .text
- /* The variable containing the handler function pointer */
- .global _ZN6__xray19XRayPatchedFunctionE
- /* Word-aligned function entry point */
- .p2align 2
- /* Let C/C++ see the symbol */
- .global __xray_FunctionEntry
- .hidden __xray_FunctionEntry
- .type __xray_FunctionEntry, %function
- /* In C++ it is void extern "C" __xray_FunctionEntry(uint32_t FuncId) with
- FuncId passed in W0 register. */
-__xray_FunctionEntry:
+.macro SAVE_REGISTERS
+ stp x1, x2, [sp, #-16]!
+ stp x3, x4, [sp, #-16]!
+ stp x5, x6, [sp, #-16]!
+ stp x7, x30, [sp, #-16]!
+ stp q0, q1, [sp, #-32]!
+ stp q2, q3, [sp, #-32]!
+ stp q4, q5, [sp, #-32]!
+ stp q6, q7, [sp, #-32]!
+ // x8 is the indirect result register and needs to be preserved for the body of the function to use.
+ stp x8, x0, [sp, #-16]!
+.endm
+
+.macro RESTORE_REGISTERS
+ ldp x8, x0, [sp], #16
+ ldp q6, q7, [sp], #32
+ ldp q4, q5, [sp], #32
+ ldp q2, q3, [sp], #32
+ ldp q0, q1, [sp], #32
+ ldp x7, x30, [sp], #16
+ ldp x5, x6, [sp], #16
+ ldp x3, x4, [sp], #16
+ ldp x1, x2, [sp], #16
+.endm
+
+.text
+.p2align 2
+.global ASM_SYMBOL(__xray_FunctionEntry)
+ASM_HIDDEN(__xray_FunctionEntry)
+ASM_TYPE_FUNCTION(__xray_FunctionEntry)
+ASM_SYMBOL(__xray_FunctionEntry):
/* Move the return address beyond the end of sled data. The 12 bytes of
data are inserted in the code of the runtime patch, between the call
instruction and the instruction returned into. The data contains 32
bits of instrumented function ID and 64 bits of the address of
the current trampoline. */
- ADD X30, X30, #12
- /* Push the registers which may be modified by the handler function */
- STP X1, X2, [SP, #-16]!
- STP X3, X4, [SP, #-16]!
- STP X5, X6, [SP, #-16]!
- STP X7, X30, [SP, #-16]!
- STP Q0, Q1, [SP, #-32]!
- STP Q2, Q3, [SP, #-32]!
- STP Q4, Q5, [SP, #-32]!
- STP Q6, Q7, [SP, #-32]!
- /* X8 is the indirect result register and needs to be preserved for the body
- of the function to use */
- STP X8, X0, [SP, #-16]!
+ add x30, x30, #12
+ // Push the registers which may be modified by the handler function.
+ SAVE_REGISTERS
- /* Load the page address of _ZN6__xray19XRayPatchedFunctionE into X1 */
- ADRP X1, _ZN6__xray19XRayPatchedFunctionE
- /* Load the handler function pointer into X2 */
- LDR X2, [X1, #:lo12:_ZN6__xray19XRayPatchedFunctionE]
- /* Handler address is nullptr if handler is not set */
- CMP X2, #0
- BEQ FunctionEntry_restore
- /* Function ID is already in W0 (the first parameter).
- X1=0 means that we are tracing an entry event */
- MOV X1, #0
- /* Call the handler with 2 parameters in W0 and X1 */
- BLR X2
-FunctionEntry_restore:
- /* Pop the saved registers */
- LDP X8, X0, [SP], #16
- LDP Q6, Q7, [SP], #32
- LDP Q4, Q5, [SP], #32
- LDP Q2, Q3, [SP], #32
- LDP Q0, Q1, [SP], #32
- LDP X7, X30, [SP], #16
- LDP X5, X6, [SP], #16
- LDP X3, X4, [SP], #16
- LDP X1, X2, [SP], #16
- RET
+ // Load the handler function pointer.
+ adrp x2, ASM_SYMBOL(_ZN6__xray19XRayPatchedFunctionE)
+ ldr x2, [x2, #:lo12:ASM_SYMBOL(_ZN6__xray19XRayPatchedFunctionE)]
+ cbz x2, 1f
+ // Set w0 to the function ID (w17). Set x1 to XRayEntryType::ENTRY = 0.
+ mov w0, w17
+ mov x1, #0
+ // Call the handler with 2 parameters.
+ blr x2
+1:
+ RESTORE_REGISTERS
+ ret
+ASM_SIZE(__xray_FunctionEntry)
- /* Word-aligned function entry point */
- .p2align 2
- /* Let C/C++ see the symbol */
- .global __xray_FunctionExit
- .hidden __xray_FunctionExit
- .type __xray_FunctionExit, %function
- /* In C++ it is void extern "C" __xray_FunctionExit(uint32_t FuncId) with
- FuncId passed in W0 register. */
-__xray_FunctionExit:
+.p2align 2
+.global ASM_SYMBOL(__xray_FunctionExit)
+ASM_HIDDEN(__xray_FunctionExit)
+ASM_TYPE_FUNCTION(__xray_FunctionExit)
+ASM_SYMBOL(__xray_FunctionExit):
/* Move the return address beyond the end of sled data. The 12 bytes of
data are inserted in the code of the runtime patch, between the call
instruction and the instruction returned into. The data contains 32
bits of instrumented function ID and 64 bits of the address of
the current trampoline. */
- ADD X30, X30, #12
- /* Push the registers which may be modified by the handler function */
- STP X1, X2, [SP, #-16]!
- STP X3, X4, [SP, #-16]!
- STP X5, X6, [SP, #-16]!
- STP X7, X30, [SP, #-16]!
- STP Q0, Q1, [SP, #-32]!
- STP Q2, Q3, [SP, #-32]!
- STP Q4, Q5, [SP, #-32]!
- STP Q6, Q7, [SP, #-32]!
- /* X8 is the indirect result register and needs to be preserved for the body
- of the function to use */
- STP X8, X0, [SP, #-16]!
+ add x30, x30, #12
+ SAVE_REGISTERS
- /* Load the page address of _ZN6__xray19XRayPatchedFunctionE into X1 */
- ADRP X1, _ZN6__xray19XRayPatchedFunctionE
- /* Load the handler function pointer into X2 */
- LDR X2, [X1, #:lo12:_ZN6__xray19XRayPatchedFunctionE]
- /* Handler address is nullptr if handler is not set */
- CMP X2, #0
- BEQ FunctionExit_restore
- /* Function ID is already in W0 (the first parameter).
- X1=1 means that we are tracing an exit event */
- MOV X1, #1
- /* Call the handler with 2 parameters in W0 and X1 */
- BLR X2
-FunctionExit_restore:
- LDP X8, X0, [SP], #16
- LDP Q6, Q7, [SP], #32
- LDP Q4, Q5, [SP], #32
- LDP Q2, Q3, [SP], #32
- LDP Q0, Q1, [SP], #32
- LDP X7, X30, [SP], #16
- LDP X5, X6, [SP], #16
- LDP X3, X4, [SP], #16
- LDP X1, X2, [SP], #16
- RET
+ // Load the handler function pointer into x2.
+ adrp x2, ASM_SYMBOL(_ZN6__xray19XRayPatchedFunctionE)
+ ldr x2, [x2, #:lo12:ASM_SYMBOL(_ZN6__xray19XRayPatchedFunctionE)]
+ cbz x2, 1f
+ // Set w0 to the function ID (w17). Set x1 to XRayEntryType::EXIT = 1.
+ mov w0, w17
+ mov x1, #1
+ // Call the handler with 2 parameters.
+ blr x2
+1:
+ RESTORE_REGISTERS
+ ret
+ASM_SIZE(__xray_FunctionExit)
- /* Word-aligned function entry point */
- .p2align 2
- /* Let C/C++ see the symbol */
- .global __xray_FunctionTailExit
- .hidden __xray_FunctionTailExit
- .type __xray_FunctionTailExit, %function
- /* In C++ it is void extern "C" __xray_FunctionTailExit(uint32_t FuncId)
- with FuncId passed in W0 register. */
-__xray_FunctionTailExit:
+.p2align 2
+.global ASM_SYMBOL(__xray_FunctionTailExit)
+ASM_HIDDEN(__xray_FunctionTailExit)
+ASM_TYPE_FUNCTION(__xray_FunctionTailExit)
+ASM_SYMBOL(__xray_FunctionTailExit):
/* Move the return address beyond the end of sled data. The 12 bytes of
data are inserted in the code of the runtime patch, between the call
instruction and the instruction returned into. The data contains 32
bits of instrumented function ID and 64 bits of the address of
the current trampoline. */
- ADD X30, X30, #12
- /* Push the registers which may be modified by the handler function */
- STP X1, X2, [SP, #-16]!
- STP X3, X4, [SP, #-16]!
- STP X5, X6, [SP, #-16]!
- STP X7, X30, [SP, #-16]!
- /* Push the parameters of the tail called function */
- STP Q0, Q1, [SP, #-32]!
- STP Q2, Q3, [SP, #-32]!
- STP Q4, Q5, [SP, #-32]!
- STP Q6, Q7, [SP, #-32]!
- /* Load the page address of _ZN6__xray19XRayPatchedFunctionE into X1 */
- ADRP X1, _ZN6__xray19XRayPatchedFunctionE
- /* Load the handler function pointer into X2 */
- LDR X2, [X1, #:lo12:_ZN6__xray19XRayPatchedFunctionE]
- /* Handler address is nullptr if handler is not set */
- CMP X2, #0
- BEQ FunctionTailExit_restore
- /* Function ID is already in W0 (the first parameter).
- X1=2 means that we are tracing a tail exit event, but before the
- logging part of XRay is ready, we pretend that here a normal function
- exit happens, so we give the handler code 1 */
- MOV X1, #1
- /* Call the handler with 2 parameters in W0 and X1 */
- BLR X2
-FunctionTailExit_restore:
- /* Pop the parameters of the tail called function */
- LDP Q6, Q7, [SP], #32
- LDP Q4, Q5, [SP], #32
- LDP Q2, Q3, [SP], #32
- LDP Q0, Q1, [SP], #32
- /* Pop the registers which may be modified by the handler function */
- LDP X7, X30, [SP], #16
- LDP X5, X6, [SP], #16
- LDP X3, X4, [SP], #16
- LDP X1, X2, [SP], #16
- RET
+ add x30, x30, #12
+ // Save the registers which may be modified by the handler function.
+ SAVE_REGISTERS
+ // Load the handler function pointer into x2.
+ adrp x2, ASM_SYMBOL(_ZN6__xray19XRayPatchedFunctionE)
+ ldr x2, [x2, #:lo12:ASM_SYMBOL(_ZN6__xray19XRayPatchedFunctionE)]
+ cbz x2, 1f
+ // Set w0 to the function ID (w17). Set x1 to XRayEntryType::TAIL = 2.
+ mov w0, w17
+ mov x1, #2
+ // Call the handler with 2 parameters.
+ blr x2
+1:
+ RESTORE_REGISTERS
+ ret
+ASM_SIZE(__xray_FunctionTailExit)
+
+.p2align 2
+.global ASM_SYMBOL(__xray_ArgLoggerEntry)
+ASM_HIDDEN(__xray_ArgLoggerEntry)
+ASM_TYPE_FUNCTION(__xray_ArgLoggerEntry)
+ASM_SYMBOL(__xray_ArgLoggerEntry):
+ add x30, x30, #12
+ // Push the registers which may be modified by the handler function.
+ SAVE_REGISTERS
+
+ adrp x8, ASM_SYMBOL(_ZN6__xray13XRayArgLoggerE)
+ ldr x8, [x8, #:lo12:ASM_SYMBOL(_ZN6__xray13XRayArgLoggerE)]
+ cbnz x8, 2f
+
+ // Load the handler function pointer.
+ adrp x8, ASM_SYMBOL(_ZN6__xray19XRayPatchedFunctionE)
+ ldr x8, [x8, #:lo12:ASM_SYMBOL(_ZN6__xray19XRayPatchedFunctionE)]
+ cbz x8, 1f
+
+2:
+ mov x2, x0
+ mov x1, #3 // XRayEntryType::LOG_ARGS_ENTRY
+ mov w0, w17
+ blr x8
+
+1:
+ RESTORE_REGISTERS
+ ret
+ASM_SIZE(__xray_ArgLoggerEntry)
+
+.global ASM_SYMBOL(__xray_CustomEvent)
+ASM_HIDDEN(__xray_CustomEvent)
+ASM_TYPE_FUNCTION(__xray_CustomEvent)
+ASM_SYMBOL(__xray_CustomEvent):
+ SAVE_REGISTERS
+ adrp x8, ASM_SYMBOL(_ZN6__xray22XRayPatchedCustomEventE)
+ ldr x8, [x8, #:lo12:ASM_SYMBOL(_ZN6__xray22XRayPatchedCustomEventE)]
+ cbz x8, 1f
+ blr x8
+1:
+ RESTORE_REGISTERS
+ ret
+ASM_SIZE(__xray_CustomEvent)
+
+.global ASM_SYMBOL(__xray_TypedEvent)
+ASM_HIDDEN(__xray_TypedEvent)
+ASM_TYPE_FUNCTION(__xray_TypedEvent)
+ASM_SYMBOL(__xray_TypedEvent):
+ SAVE_REGISTERS
+ adrp x8, ASM_SYMBOL(_ZN6__xray21XRayPatchedTypedEventE)
+ ldr x8, [x8, #:lo12:ASM_SYMBOL(_ZN6__xray21XRayPatchedTypedEventE)]
+ cbz x8, 1f
+ blr x8
+1:
+ RESTORE_REGISTERS
+ ret
+ASM_SIZE(__xray_TypedEvent)
NO_EXEC_STACK_DIRECTIVE
diff --git a/contrib/llvm-project/compiler-rt/lib/xray/xray_trampoline_hexagon.S b/contrib/llvm-project/compiler-rt/lib/xray/xray_trampoline_hexagon.S
new file mode 100644
index 000000000000..c87ec4bed1f9
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/xray/xray_trampoline_hexagon.S
@@ -0,0 +1,99 @@
+//===-- xray_trampoline_hexagon.s -------------------------------*- ASM -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of XRay, a dynamic runtime instrumentation system.
+//
+// This implements the hexagon-specific assembler for the trampolines.
+//
+//===----------------------------------------------------------------------===//
+
+#include "../builtins/assembly.h"
+#include "../sanitizer_common/sanitizer_asm.h"
+
+.macro SAVE_REGISTERS
+memw(sp+#0)=r0
+memw(sp+#4)=r1
+memw(sp+#8)=r2
+memw(sp+#12)=r3
+memw(sp+#16)=r4
+.endm
+.macro RESTORE_REGISTERS
+r0=memw(sp+#0)
+r1=memw(sp+#4)
+r2=memw(sp+#8)
+r3=memw(sp+#12)
+r4=memw(sp+#16)
+.endm
+
+.macro CALL_PATCHED_FUNC entry_type
+ // if (xray::XRayPatchedFunctionE != NULL)
+ // xray::XRayPatchedFunctionE(FuncType);
+
+ r8 = #ASM_SYMBOL(_ZN6__xray19XRayPatchedFunctionE)
+
+ // The patched sled puts the function type
+ // into r6. Move it into r0 to pass it to
+ // the patched function.
+ { r0 = r6
+ r1 = \entry_type
+ p0 = !cmp.eq(r8, #0)
+ if (p0) callr r8 }
+.endm
+
+ .text
+ .globl ASM_SYMBOL(__xray_FunctionEntry)
+ ASM_HIDDEN(__xray_FunctionEntry)
+ ASM_TYPE_FUNCTION(__xray_FunctionEntry)
+# LLVM-MCA-BEGIN __xray_FunctionEntry
+ASM_SYMBOL(__xray_FunctionEntry):
+ CFI_STARTPROC
+ SAVE_REGISTERS
+
+ CALL_PATCHED_FUNC #0 // XRayEntryType::ENTRY
+.Ltmp0:
+ RESTORE_REGISTERS
+ // return
+# LLVM-MCA-END
+ ASM_SIZE(__xray_FunctionEntry)
+ CFI_ENDPROC
+
+
+ .globl ASM_SYMBOL(__xray_FunctionExit)
+ ASM_HIDDEN(__xray_FunctionExit)
+ ASM_TYPE_FUNCTION(__xray_FunctionExit)
+# LLVM-MCA-BEGIN __xray_FunctionExit
+ASM_SYMBOL(__xray_FunctionExit):
+ CFI_STARTPROC
+ SAVE_REGISTERS
+
+ CALL_PATCHED_FUNC #1 // XRayEntryType::EXIT
+.Ltmp1:
+ RESTORE_REGISTERS
+ // return
+ jumpr r31
+# LLVM-MCA-END
+ ASM_SIZE(__xray_FunctionExit)
+ CFI_ENDPROC
+
+
+ .globl ASM_SYMBOL(__xray_FunctionTailExit)
+ ASM_HIDDEN(__xray_FunctionTailExit)
+ ASM_TYPE_FUNCTION(__xray_FunctionTailExit)
+# LLVM-MCA-BEGIN __xray_FunctionTailExit
+ASM_SYMBOL(__xray_FunctionTailExit):
+ CFI_STARTPROC
+ SAVE_REGISTERS
+
+ CALL_PATCHED_FUNC #2 // XRayEntryType::TAIL
+.Ltmp2:
+ RESTORE_REGISTERS
+ // return
+ jumpr r31
+# LLVM-MCA-END
+ ASM_SIZE(__xray_FunctionTailExit)
+ CFI_ENDPROC
diff --git a/contrib/llvm-project/compiler-rt/lib/xray/xray_trampoline_loongarch64.S b/contrib/llvm-project/compiler-rt/lib/xray/xray_trampoline_loongarch64.S
new file mode 100644
index 000000000000..fcbefcc5f7a2
--- /dev/null
+++ b/contrib/llvm-project/compiler-rt/lib/xray/xray_trampoline_loongarch64.S
@@ -0,0 +1,124 @@
+//===-- xray_trampoline_loongarch64.s ---------------------------*- ASM -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of XRay, a dynamic runtime instrumentation system.
+//
+// This implements the loongarch-specific assembler for the trampolines.
+//
+//===----------------------------------------------------------------------===//
+
+#include "../sanitizer_common/sanitizer_asm.h"
+
+#define FROM_0_TO_7 0,1,2,3,4,5,6,7
+#define FROM_7_TO_0 7,6,5,4,3,2,1,0
+
+.macro SAVE_ARG_REGISTERS
+ .irp i,FROM_7_TO_0
+ st.d $a\i, $sp, (8 * 8 + 8 * \i)
+ .endr
+ .irp i,FROM_7_TO_0
+ fst.d $f\i, $sp, (8 * \i)
+ .endr
+.endm
+
+.macro RESTORE_ARG_REGISTERS
+ .irp i,FROM_0_TO_7
+ fld.d $f\i, $sp, (8 * \i)
+ .endr
+ .irp i,FROM_0_TO_7
+ ld.d $a\i, $sp, (8 * 8 + 8 * \i)
+ .endr
+.endm
+
+.macro SAVE_RET_REGISTERS
+ st.d $a1, $sp, 24
+ st.d $a0, $sp, 16
+ fst.d $f1, $sp, 8
+ fst.d $f0, $sp, 0
+.endm
+
+.macro RESTORE_RET_REGISTERS
+ fld.d $f0, $sp, 0
+ fld.d $f1, $sp, 8
+ ld.d $a0, $sp, 16
+ ld.d $a1, $sp, 24
+.endm
+
+ .text
+ .file "xray_trampoline_loongarch64.S"
+ .globl ASM_SYMBOL(__xray_FunctionEntry)
+ ASM_HIDDEN(__xray_FunctionEntry)
+ .p2align 2
+ ASM_TYPE_FUNCTION(__xray_FunctionEntry)
+ASM_SYMBOL(__xray_FunctionEntry):
+ .cfi_startproc
+ // Save argument registers before doing any actual work.
+ .cfi_def_cfa_offset 136
+ addi.d $sp, $sp, -136
+ st.d $ra, $sp, 128
+ .cfi_offset 1, -8
+ SAVE_ARG_REGISTERS
+
+ la.got $t2, ASM_SYMBOL(_ZN6__xray19XRayPatchedFunctionE)
+ ld.d $t2, $t2, 0
+
+ beqz $t2, FunctionEntry_restore
+
+ // a1=0 means that we are tracing an entry event.
+ move $a1, $zero
+ // Function ID is in t1 (the first parameter).
+ move $a0, $t1
+ jirl $ra, $t2, 0
+
+FunctionEntry_restore:
+ // Restore argument registers.
+ RESTORE_ARG_REGISTERS
+ ld.d $ra, $sp, 128
+ addi.d $sp, $sp, 136
+ ret
+FunctionEntry_end:
+ ASM_SIZE(__xray_FunctionEntry)
+ .cfi_endproc
+
+ .text
+ .globl ASM_SYMBOL(__xray_FunctionExit)
+ ASM_HIDDEN(__xray_FunctionExit)
+ .p2align 2
+ ASM_TYPE_FUNCTION(__xray_FunctionExit)
+ASM_SYMBOL(__xray_FunctionExit):
+ .cfi_startproc
+ // Save return registers before doing any actual work.
+ .cfi_def_cfa_offset 48
+ addi.d $sp, $sp, -48
+ st.d $ra, $sp, 40
+ .cfi_offset 1, -8
+ st.d $fp, $sp, 32
+ SAVE_RET_REGISTERS
+
+ la.got $t2, ASM_SYMBOL(_ZN6__xray19XRayPatchedFunctionE)
+ ld.d $t2, $t2, 0
+
+ beqz $t2, FunctionExit_restore
+
+ // a1=1 means that we are tracing an exit event.
+ li.w $a1, 1
+ // Function ID is in t1 (the first parameter).
+ move $a0, $t1
+ jirl $ra, $t2, 0
+
+FunctionExit_restore:
+ // Restore return registers.
+ RESTORE_RET_REGISTERS
+ ld.d $fp, $sp, 32
+ ld.d $ra, $sp, 40
+ addi.d $sp, $sp, 48
+ ret
+
+FunctionExit_end:
+ ASM_SIZE(__xray_FunctionExit)
+ .cfi_endproc
diff --git a/contrib/llvm-project/compiler-rt/lib/xray/xray_trampoline_x86_64.S b/contrib/llvm-project/compiler-rt/lib/xray/xray_trampoline_x86_64.S
index 02cf69f766c4..0f00bcc41508 100644
--- a/contrib/llvm-project/compiler-rt/lib/xray/xray_trampoline_x86_64.S
+++ b/contrib/llvm-project/compiler-rt/lib/xray/xray_trampoline_x86_64.S
@@ -124,14 +124,14 @@ ASM_SYMBOL(__xray_FunctionEntry):
// On x86/amd64, a simple (type-aligned) MOV instruction is enough.
movq ASM_SYMBOL(_ZN6__xray19XRayPatchedFunctionE)(%rip), %rax
testq %rax, %rax
- je .Ltmp0
+ je LOCAL_LABEL(tmp0)
// The patched function prologue puts its xray_instr_map index into %r10d.
movl %r10d, %edi
xor %esi,%esi
callq *%rax
-.Ltmp0:
+LOCAL_LABEL(tmp0):
RESTORE_REGISTERS
RESTORE_STACK_ALIGNMENT
retq
@@ -162,13 +162,13 @@ ASM_SYMBOL(__xray_FunctionExit):
movq %rdx, 0(%rsp)
movq ASM_SYMBOL(_ZN6__xray19XRayPatchedFunctionE)(%rip), %rax
testq %rax,%rax
- je .Ltmp2
+ je LOCAL_LABEL(tmp2)
movl %r10d, %edi
movl $1, %esi
callq *%rax
-.Ltmp2:
+LOCAL_LABEL(tmp2):
// Restore the important registers.
movq 48(%rsp), %rbp
movupd 32(%rsp), %xmm0
@@ -198,13 +198,13 @@ ASM_SYMBOL(__xray_FunctionTailExit):
movq ASM_SYMBOL(_ZN6__xray19XRayPatchedFunctionE)(%rip), %rax
testq %rax,%rax
- je .Ltmp4
+ je LOCAL_LABEL(tmp4)
movl %r10d, %edi
movl $2, %esi
callq *%rax
-.Ltmp4:
+LOCAL_LABEL(tmp4):
RESTORE_REGISTERS
RESTORE_STACK_ALIGNMENT
retq
@@ -227,14 +227,14 @@ ASM_SYMBOL(__xray_ArgLoggerEntry):
// Again, these function pointer loads must be atomic; MOV is fine.
movq ASM_SYMBOL(_ZN6__xray13XRayArgLoggerE)(%rip), %rax
testq %rax, %rax
- jne .Larg1entryLog
+ jne LOCAL_LABEL(arg1entryLog)
// If [arg1 logging handler] not set, defer to no-arg logging.
movq ASM_SYMBOL(_ZN6__xray19XRayPatchedFunctionE)(%rip), %rax
testq %rax, %rax
- je .Larg1entryFail
+ je LOCAL_LABEL(arg1entryFail)
-.Larg1entryLog:
+LOCAL_LABEL(arg1entryLog):
// First argument will become the third
movq %rdi, %rdx
@@ -247,7 +247,7 @@ ASM_SYMBOL(__xray_ArgLoggerEntry):
callq *%rax
-.Larg1entryFail:
+LOCAL_LABEL(arg1entryFail):
RESTORE_REGISTERS
RESTORE_STACK_ALIGNMENT
retq
@@ -270,11 +270,11 @@ ASM_SYMBOL(__xray_CustomEvent):
// already.
movq ASM_SYMBOL(_ZN6__xray22XRayPatchedCustomEventE)(%rip), %rax
testq %rax,%rax
- je .LcustomEventCleanup
+ je LOCAL_LABEL(customEventCleanup)
callq *%rax
-.LcustomEventCleanup:
+LOCAL_LABEL(customEventCleanup):
RESTORE_REGISTERS
retq
# LLVM-MCA-END
@@ -296,11 +296,11 @@ ASM_SYMBOL(__xray_TypedEvent):
// and rdx without our intervention.
movq ASM_SYMBOL(_ZN6__xray21XRayPatchedTypedEventE)(%rip), %rax
testq %rax,%rax
- je .LtypedEventCleanup
+ je LOCAL_LABEL(typedEventCleanup)
callq *%rax
-.LtypedEventCleanup:
+LOCAL_LABEL(typedEventCleanup):
RESTORE_REGISTERS
retq
# LLVM-MCA-END
diff --git a/contrib/llvm-project/compiler-rt/lib/xray/xray_tsc.h b/contrib/llvm-project/compiler-rt/lib/xray/xray_tsc.h
index bd7e1911abb3..e1cafe1bf11d 100644
--- a/contrib/llvm-project/compiler-rt/lib/xray/xray_tsc.h
+++ b/contrib/llvm-project/compiler-rt/lib/xray/xray_tsc.h
@@ -42,7 +42,8 @@ inline uint64_t getTSCFrequency() XRAY_NEVER_INSTRUMENT {
#include "xray_x86_64.inc"
#elif defined(__powerpc64__)
#include "xray_powerpc64.inc"
-#elif defined(__arm__) || defined(__aarch64__) || defined(__mips__)
+#elif defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
+ defined(__hexagon__) || defined(__loongarch_lp64)
// Emulated TSC.
// There is no instruction like RDTSCP in user mode on ARM. ARM's CP15 does
// not have a constant frequency like TSC on x86(_64), it may go faster
diff --git a/contrib/llvm-project/compiler-rt/lib/xray/xray_utils.cpp b/contrib/llvm-project/compiler-rt/lib/xray/xray_utils.cpp
index befbabfe4532..5d51df9937c2 100644
--- a/contrib/llvm-project/compiler-rt/lib/xray/xray_utils.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/xray/xray_utils.cpp
@@ -28,7 +28,7 @@
#include <utility>
#if SANITIZER_FUCHSIA
-#include "sanitizer_common/sanitizer_symbolizer_fuchsia.h"
+#include "sanitizer_common/sanitizer_symbolizer_markup_constants.h"
#include <inttypes.h>
#include <zircon/process.h>
diff --git a/contrib/llvm-project/compiler-rt/lib/xray/xray_x86_64.cpp b/contrib/llvm-project/compiler-rt/lib/xray/xray_x86_64.cpp
index c58584b3a14b..b9666a40861d 100644
--- a/contrib/llvm-project/compiler-rt/lib/xray/xray_x86_64.cpp
+++ b/contrib/llvm-project/compiler-rt/lib/xray/xray_x86_64.cpp
@@ -6,7 +6,7 @@
#include "xray_defs.h"
#include "xray_interface_internal.h"
-#if SANITIZER_FREEBSD || SANITIZER_NETBSD || SANITIZER_MAC
+#if SANITIZER_FREEBSD || SANITIZER_NETBSD || SANITIZER_APPLE
#include <sys/types.h>
#include <sys/sysctl.h>
#elif SANITIZER_FUCHSIA
@@ -82,11 +82,11 @@ uint64_t getTSCFrequency() XRAY_NEVER_INSTRUMENT {
}
return TSCFrequency == -1 ? 0 : static_cast<uint64_t>(TSCFrequency);
}
-#elif SANITIZER_FREEBSD || SANITIZER_NETBSD || SANITIZER_MAC
+#elif SANITIZER_FREEBSD || SANITIZER_NETBSD || SANITIZER_APPLE
uint64_t getTSCFrequency() XRAY_NEVER_INSTRUMENT {
long long TSCFrequency = -1;
size_t tscfreqsz = sizeof(TSCFrequency);
-#if SANITIZER_MAC
+#if SANITIZER_APPLE
if (internal_sysctlbyname("machdep.tsc.frequency", &TSCFrequency,
&tscfreqsz, NULL, 0) != -1) {
@@ -148,7 +148,8 @@ bool patchFunctionEntry(const bool Enable, const uint32_t FuncId,
int64_t TrampolineOffset = reinterpret_cast<int64_t>(Trampoline) -
(static_cast<int64_t>(Address) + 11);
if (TrampolineOffset < MinOffset || TrampolineOffset > MaxOffset) {
- Report("XRay Entry trampoline (%p) too far from sled (%p)\n", Trampoline,
+ Report("XRay Entry trampoline (%p) too far from sled (%p)\n",
+ reinterpret_cast<void *>(Trampoline),
reinterpret_cast<void *>(Address));
return false;
}
@@ -195,7 +196,8 @@ bool patchFunctionExit(const bool Enable, const uint32_t FuncId,
(static_cast<int64_t>(Address) + 11);
if (TrampolineOffset < MinOffset || TrampolineOffset > MaxOffset) {
Report("XRay Exit trampoline (%p) too far from sled (%p)\n",
- __xray_FunctionExit, reinterpret_cast<void *>(Address));
+ reinterpret_cast<void *>(__xray_FunctionExit),
+ reinterpret_cast<void *>(Address));
return false;
}
if (Enable) {
@@ -224,7 +226,8 @@ bool patchFunctionTailExit(const bool Enable, const uint32_t FuncId,
(static_cast<int64_t>(Address) + 11);
if (TrampolineOffset < MinOffset || TrampolineOffset > MaxOffset) {
Report("XRay Tail Exit trampoline (%p) too far from sled (%p)\n",
- __xray_FunctionTailExit, reinterpret_cast<void *>(Address));
+ reinterpret_cast<void *>(__xray_FunctionTailExit),
+ reinterpret_cast<void *>(Address));
return false;
}
if (Enable) {
@@ -247,10 +250,8 @@ bool patchCustomEvent(const bool Enable, const uint32_t FuncId,
const XRaySledEntry &Sled) XRAY_NEVER_INSTRUMENT {
// Here we do the dance of replacing the following sled:
//
- // In Version 0:
- //
// xray_sled_n:
- // jmp +20 // 2 bytes
+ // jmp +15 // 2 bytes
// ...
//
// With the following:
@@ -259,36 +260,17 @@ bool patchCustomEvent(const bool Enable, const uint32_t FuncId,
// ...
//
//
- // The "unpatch" should just turn the 'nopw' back to a 'jmp +20'.
- //
- // ---
- //
- // In Version 1 or 2:
- //
- // The jump offset is now 15 bytes (0x0f), so when restoring the nopw back
- // to a jmp, use 15 bytes instead.
- //
+ // The "unpatch" should just turn the 'nopw' back to a 'jmp +15'.
const uint64_t Address = Sled.address();
if (Enable) {
std::atomic_store_explicit(
reinterpret_cast<std::atomic<uint16_t> *>(Address), NopwSeq,
std::memory_order_release);
} else {
- switch (Sled.Version) {
- case 1:
- case 2:
- std::atomic_store_explicit(
- reinterpret_cast<std::atomic<uint16_t> *>(Address), Jmp15Seq,
- std::memory_order_release);
- break;
- case 0:
- default:
- std::atomic_store_explicit(
- reinterpret_cast<std::atomic<uint16_t> *>(Address), Jmp20Seq,
- std::memory_order_release);
- break;
- }
- }
+ std::atomic_store_explicit(
+ reinterpret_cast<std::atomic<uint16_t> *>(Address), Jmp15Seq,
+ std::memory_order_release);
+ }
return false;
}