aboutsummaryrefslogtreecommitdiff
path: root/lib/lsan
diff options
context:
space:
mode:
Diffstat (limited to 'lib/lsan')
-rw-r--r--lib/lsan/CMakeLists.txt19
-rw-r--r--lib/lsan/lsan_common.cc110
-rw-r--r--lib/lsan/lsan_common.h3
-rw-r--r--lib/lsan/lsan_common_linux.cc31
-rw-r--r--lib/lsan/lsan_interceptors.cc2
-rw-r--r--lib/lsan/lsan_thread.h4
6 files changed, 109 insertions, 60 deletions
diff --git a/lib/lsan/CMakeLists.txt b/lib/lsan/CMakeLists.txt
index 2ea765de1bc7..37f794e2e11b 100644
--- a/lib/lsan/CMakeLists.txt
+++ b/lib/lsan/CMakeLists.txt
@@ -18,20 +18,13 @@ set(LSAN_SRC_DIR ${CMAKE_CURRENT_SOURCE_DIR})
add_custom_target(lsan)
-if(APPLE)
- foreach(os ${SANITIZER_COMMON_SUPPORTED_DARWIN_OS})
- add_compiler_rt_darwin_object_library(RTLSanCommon ${os}
- ARCH ${LSAN_COMMON_SUPPORTED_ARCH}
- SOURCES ${LSAN_COMMON_SOURCES}
- CFLAGS ${LSAN_CFLAGS})
- endforeach()
-else()
- foreach(arch ${LSAN_COMMON_SUPPORTED_ARCH})
- add_compiler_rt_object_library(RTLSanCommon ${arch}
- SOURCES ${LSAN_COMMON_SOURCES}
- CFLAGS ${LSAN_CFLAGS})
- endforeach()
+add_compiler_rt_object_libraries(RTLSanCommon
+ OS ${SANITIZER_COMMON_SUPPORTED_OS}
+ ARCHS ${LSAN_COMMON_SUPPORTED_ARCH}
+ SOURCES ${LSAN_COMMON_SOURCES}
+ CFLAGS ${LSAN_CFLAGS})
+if(COMPILER_RT_HAS_LSAN)
foreach(arch ${LSAN_SUPPORTED_ARCH})
add_compiler_rt_runtime(clang_rt.lsan-${arch} ${arch} STATIC
SOURCES ${LSAN_SOURCES}
diff --git a/lib/lsan/lsan_common.cc b/lib/lsan/lsan_common.cc
index b9e2a1104326..0ffba505cc70 100644
--- a/lib/lsan/lsan_common.cc
+++ b/lib/lsan/lsan_common.cc
@@ -21,7 +21,6 @@
#include "sanitizer_common/sanitizer_procmaps.h"
#include "sanitizer_common/sanitizer_stackdepot.h"
#include "sanitizer_common/sanitizer_stacktrace.h"
-#include "sanitizer_common/sanitizer_stoptheworld.h"
#include "sanitizer_common/sanitizer_suppressions.h"
#include "sanitizer_common/sanitizer_report_decorator.h"
@@ -127,13 +126,14 @@ static inline bool CanBeAHeapPointer(uptr p) {
// Scans the memory range, looking for byte patterns that point into allocator
// chunks. Marks those chunks with |tag| and adds them to |frontier|.
-// There are two usage modes for this function: finding reachable or ignored
-// chunks (|tag| = kReachable or kIgnored) and finding indirectly leaked chunks
+// There are two usage modes for this function: finding reachable chunks
+// (|tag| = kReachable) and finding indirectly leaked chunks
// (|tag| = kIndirectlyLeaked). In the second case, there's no flood fill,
// so |frontier| = 0.
void ScanRangeForPointers(uptr begin, uptr end,
Frontier *frontier,
const char *region_type, ChunkTag tag) {
+ CHECK(tag == kReachable || tag == kIndirectlyLeaked);
const uptr alignment = flags()->pointer_alignment();
LOG_POINTERS("Scanning %s range %p-%p.\n", region_type, begin, end);
uptr pp = begin;
@@ -147,9 +147,7 @@ void ScanRangeForPointers(uptr begin, uptr end,
// Pointers to self don't count. This matters when tag == kIndirectlyLeaked.
if (chunk == begin) continue;
LsanMetadata m(chunk);
- // Reachable beats ignored beats leaked.
- if (m.tag() == kReachable) continue;
- if (m.tag() == kIgnored && tag != kReachable) continue;
+ if (m.tag() == kReachable || m.tag() == kIgnored) continue;
// Do this check relatively late so we can log only the interesting cases.
if (!flags()->use_poisoned && WordIsPoisoned(pp)) {
@@ -288,7 +286,7 @@ static void MarkIndirectlyLeakedCb(uptr chunk, void *arg) {
LsanMetadata m(chunk);
if (m.allocated() && m.tag() != kReachable) {
ScanRangeForPointers(chunk, chunk + m.requested_size(),
- /* frontier */ 0, "HEAP", kIndirectlyLeaked);
+ /* frontier */ nullptr, "HEAP", kIndirectlyLeaked);
}
}
@@ -298,8 +296,11 @@ static void CollectIgnoredCb(uptr chunk, void *arg) {
CHECK(arg);
chunk = GetUserBegin(chunk);
LsanMetadata m(chunk);
- if (m.allocated() && m.tag() == kIgnored)
+ if (m.allocated() && m.tag() == kIgnored) {
+ LOG_POINTERS("Ignored: chunk %p-%p of size %zu.\n",
+ chunk, chunk + m.requested_size(), m.requested_size());
reinterpret_cast<Frontier *>(arg)->push_back(chunk);
+ }
}
// Sets the appropriate tag on each chunk.
@@ -307,26 +308,33 @@ static void ClassifyAllChunks(SuspendedThreadsList const &suspended_threads) {
// Holds the flood fill frontier.
Frontier frontier(1);
+ ForEachChunk(CollectIgnoredCb, &frontier);
ProcessGlobalRegions(&frontier);
ProcessThreads(suspended_threads, &frontier);
ProcessRootRegions(&frontier);
FloodFillTag(&frontier, kReachable);
+
// The check here is relatively expensive, so we do this in a separate flood
// fill. That way we can skip the check for chunks that are reachable
// otherwise.
LOG_POINTERS("Processing platform-specific allocations.\n");
+ CHECK_EQ(0, frontier.size());
ProcessPlatformSpecificAllocations(&frontier);
FloodFillTag(&frontier, kReachable);
- LOG_POINTERS("Scanning ignored chunks.\n");
- CHECK_EQ(0, frontier.size());
- ForEachChunk(CollectIgnoredCb, &frontier);
- FloodFillTag(&frontier, kIgnored);
-
// Iterate over leaked chunks and mark those that are reachable from other
// leaked chunks.
LOG_POINTERS("Scanning leaked chunks.\n");
- ForEachChunk(MarkIndirectlyLeakedCb, 0 /* arg */);
+ ForEachChunk(MarkIndirectlyLeakedCb, nullptr);
+}
+
+// ForEachChunk callback. Resets the tags to pre-leak-check state.
+static void ResetTagsCb(uptr chunk, void *arg) {
+ (void)arg;
+ chunk = GetUserBegin(chunk);
+ LsanMetadata m(chunk);
+ if (m.allocated() && m.tag() != kIgnored)
+ m.set_tag(kDirectlyLeaked);
}
static void PrintStackTraceById(u32 stack_trace_id) {
@@ -372,35 +380,33 @@ static void PrintMatchedSuppressions() {
Printf("%s\n\n", line);
}
-struct DoLeakCheckParam {
+struct CheckForLeaksParam {
bool success;
LeakReport leak_report;
};
-static void DoLeakCheckCallback(const SuspendedThreadsList &suspended_threads,
- void *arg) {
- DoLeakCheckParam *param = reinterpret_cast<DoLeakCheckParam *>(arg);
+static void CheckForLeaksCallback(const SuspendedThreadsList &suspended_threads,
+ void *arg) {
+ CheckForLeaksParam *param = reinterpret_cast<CheckForLeaksParam *>(arg);
CHECK(param);
CHECK(!param->success);
ClassifyAllChunks(suspended_threads);
ForEachChunk(CollectLeaksCb, &param->leak_report);
+ // Clean up for subsequent leak checks. This assumes we did not overwrite any
+ // kIgnored tags.
+ ForEachChunk(ResetTagsCb, nullptr);
param->success = true;
}
-void DoLeakCheck() {
- EnsureMainThreadIDIsCorrect();
- BlockingMutexLock l(&global_mutex);
- static bool already_done;
- if (already_done) return;
- already_done = true;
+static bool CheckForLeaks() {
if (&__lsan_is_turned_off && __lsan_is_turned_off())
- return;
-
- DoLeakCheckParam param;
+ return false;
+ EnsureMainThreadIDIsCorrect();
+ CheckForLeaksParam param;
param.success = false;
LockThreadRegistry();
LockAllocator();
- StopTheWorld(DoLeakCheckCallback, &param);
+ DoStopTheWorld(CheckForLeaksCallback, &param);
UnlockAllocator();
UnlockThreadRegistry();
@@ -424,25 +430,42 @@ void DoLeakCheck() {
PrintMatchedSuppressions();
if (unsuppressed_count > 0) {
param.leak_report.PrintSummary();
- if (flags()->exitcode) {
- if (common_flags()->coverage)
- __sanitizer_cov_dump();
- internal__exit(flags()->exitcode);
- }
+ return true;
}
+ return false;
+}
+
+void DoLeakCheck() {
+ BlockingMutexLock l(&global_mutex);
+ static bool already_done;
+ if (already_done) return;
+ already_done = true;
+ bool have_leaks = CheckForLeaks();
+ if (!have_leaks) {
+ return;
+ }
+ if (flags()->exitcode) {
+ if (common_flags()->coverage)
+ __sanitizer_cov_dump();
+ internal__exit(flags()->exitcode);
+ }
+}
+
+static int DoRecoverableLeakCheck() {
+ BlockingMutexLock l(&global_mutex);
+ bool have_leaks = CheckForLeaks();
+ return have_leaks ? 1 : 0;
}
static Suppression *GetSuppressionForAddr(uptr addr) {
Suppression *s = nullptr;
// Suppress by module name.
- const char *module_name;
- uptr module_offset;
SuppressionContext *suppressions = GetSuppressionContext();
- if (Symbolizer::GetOrInit()->GetModuleNameAndOffsetForPC(addr, &module_name,
- &module_offset) &&
- suppressions->Match(module_name, kSuppressionLeak, &s))
- return s;
+ if (const char *module_name =
+ Symbolizer::GetOrInit()->GetModuleNameForPc(addr))
+ if (suppressions->Match(module_name, kSuppressionLeak, &s))
+ return s;
// Suppress by file or function name.
SymbolizedStack *frames = Symbolizer::GetOrInit()->SymbolizePC(addr);
@@ -679,6 +702,15 @@ void __lsan_do_leak_check() {
#endif // CAN_SANITIZE_LEAKS
}
+SANITIZER_INTERFACE_ATTRIBUTE
+int __lsan_do_recoverable_leak_check() {
+#if CAN_SANITIZE_LEAKS
+ if (common_flags()->detect_leaks)
+ return __lsan::DoRecoverableLeakCheck();
+#endif // CAN_SANITIZE_LEAKS
+ return 0;
+}
+
#if !SANITIZER_SUPPORTS_WEAK_HOOKS
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
int __lsan_is_turned_off() {
diff --git a/lib/lsan/lsan_common.h b/lib/lsan/lsan_common.h
index 2c3a12ab6bd8..4f9d24fb3ab9 100644
--- a/lib/lsan/lsan_common.h
+++ b/lib/lsan/lsan_common.h
@@ -19,6 +19,7 @@
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_internal_defs.h"
#include "sanitizer_common/sanitizer_platform.h"
+#include "sanitizer_common/sanitizer_stoptheworld.h"
#include "sanitizer_common/sanitizer_symbolizer.h"
#if SANITIZER_LINUX && (defined(__x86_64__) || defined(__mips64)) \
@@ -99,6 +100,8 @@ typedef InternalMmapVector<uptr> Frontier;
void InitializePlatformSpecificModules();
void ProcessGlobalRegions(Frontier *frontier);
void ProcessPlatformSpecificAllocations(Frontier *frontier);
+// Run stoptheworld while holding any platform-specific locks.
+void DoStopTheWorld(StopTheWorldCallback callback, void* argument);
void ScanRangeForPointers(uptr begin, uptr end,
Frontier *frontier,
diff --git a/lib/lsan/lsan_common_linux.cc b/lib/lsan/lsan_common_linux.cc
index ba51868c76e8..2955343e1f0b 100644
--- a/lib/lsan/lsan_common_linux.cc
+++ b/lib/lsan/lsan_common_linux.cc
@@ -85,10 +85,6 @@ static int ProcessGlobalRegionsCallback(struct dl_phdr_info *info, size_t size,
// Scans global variables for heap pointers.
void ProcessGlobalRegions(Frontier *frontier) {
if (!flags()->use_globals) return;
- // FIXME: dl_iterate_phdr acquires a linker lock, so we run a risk of
- // deadlocking by running this under StopTheWorld. However, the lock is
- // reentrant, so we should be able to fix this by acquiring the lock before
- // suspending threads.
dl_iterate_phdr(ProcessGlobalRegionsCallback, frontier);
}
@@ -114,7 +110,7 @@ static void ProcessPlatformSpecificAllocationsCb(uptr chunk, void *arg) {
reinterpret_cast<ProcessPlatformAllocParam *>(arg);
chunk = GetUserBegin(chunk);
LsanMetadata m(chunk);
- if (m.allocated() && m.tag() != kReachable) {
+ if (m.allocated() && m.tag() != kReachable && m.tag() != kIgnored) {
u32 stack_id = m.stack_trace_id();
uptr caller_pc = 0;
if (stack_id > 0)
@@ -153,5 +149,30 @@ void ProcessPlatformSpecificAllocations(Frontier *frontier) {
ForEachChunk(ProcessPlatformSpecificAllocationsCb, &arg);
}
+struct DoStopTheWorldParam {
+ StopTheWorldCallback callback;
+ void *argument;
+};
+
+static int DoStopTheWorldCallback(struct dl_phdr_info *info, size_t size,
+ void *data) {
+ DoStopTheWorldParam *param = reinterpret_cast<DoStopTheWorldParam *>(data);
+ StopTheWorld(param->callback, param->argument);
+ return 1;
+}
+
+// LSan calls dl_iterate_phdr() from the tracer task. This may deadlock: if one
+// of the threads is frozen while holding the libdl lock, the tracer will hang
+// in dl_iterate_phdr() forever.
+// Luckily, (a) the lock is reentrant and (b) libc can't distinguish between the
+// tracer task and the thread that spawned it. Thus, if we run the tracer task
+// while holding the libdl lock in the parent thread, we can safely reenter it
+// in the tracer. The solution is to run stoptheworld from a dl_iterate_phdr()
+// callback in the parent thread.
+void DoStopTheWorld(StopTheWorldCallback callback, void *argument) {
+ DoStopTheWorldParam param = {callback, argument};
+ dl_iterate_phdr(DoStopTheWorldCallback, &param);
+}
+
} // namespace __lsan
#endif // CAN_SANITIZE_LEAKS && SANITIZER_LINUX
diff --git a/lib/lsan/lsan_interceptors.cc b/lib/lsan/lsan_interceptors.cc
index ba2519dcb25d..61a92154d95e 100644
--- a/lib/lsan/lsan_interceptors.cc
+++ b/lib/lsan/lsan_interceptors.cc
@@ -208,7 +208,7 @@ extern "C" void *__lsan_thread_start_func(void *arg) {
// Wait until the last iteration to maximize the chance that we are the last
// destructor to run.
if (pthread_setspecific(g_thread_finalize_key,
- (void*)kPthreadDestructorIterations)) {
+ (void*)GetPthreadDestructorIterations())) {
Report("LeakSanitizer: failed to set thread key.\n");
Die();
}
diff --git a/lib/lsan/lsan_thread.h b/lib/lsan/lsan_thread.h
index 4641b32ed6e6..99e2c1d5e64b 100644
--- a/lib/lsan/lsan_thread.h
+++ b/lib/lsan/lsan_thread.h
@@ -22,8 +22,8 @@ namespace __lsan {
class ThreadContext : public ThreadContextBase {
public:
explicit ThreadContext(int tid);
- void OnStarted(void *arg);
- void OnFinished();
+ void OnStarted(void *arg) override;
+ void OnFinished() override;
uptr stack_begin() { return stack_begin_; }
uptr stack_end() { return stack_end_; }
uptr tls_begin() { return tls_begin_; }