aboutsummaryrefslogtreecommitdiff
path: root/lib/msan/msan_allocator.cc
diff options
context:
space:
mode:
Diffstat (limited to 'lib/msan/msan_allocator.cc')
-rw-r--r--lib/msan/msan_allocator.cc154
1 files changed, 93 insertions, 61 deletions
diff --git a/lib/msan/msan_allocator.cc b/lib/msan/msan_allocator.cc
index 2badf712188b..f21d71409ce2 100644
--- a/lib/msan/msan_allocator.cc
+++ b/lib/msan/msan_allocator.cc
@@ -13,8 +13,11 @@
//===----------------------------------------------------------------------===//
#include "sanitizer_common/sanitizer_allocator.h"
-#include "sanitizer_common/sanitizer_stackdepot.h"
+#include "sanitizer_common/sanitizer_allocator_interface.h"
#include "msan.h"
+#include "msan_allocator.h"
+#include "msan_origin.h"
+#include "msan_thread.h"
namespace __msan {
@@ -22,20 +25,47 @@ struct Metadata {
uptr requested_size;
};
-static const uptr kAllocatorSpace = 0x600000000000ULL;
-static const uptr kAllocatorSize = 0x80000000000; // 8T.
-static const uptr kMetadataSize = sizeof(Metadata);
-static const uptr kMaxAllowedMallocSize = 8UL << 30;
+struct MsanMapUnmapCallback {
+ void OnMap(uptr p, uptr size) const {}
+ void OnUnmap(uptr p, uptr size) const {
+ __msan_unpoison((void *)p, size);
-typedef SizeClassAllocator64<kAllocatorSpace, kAllocatorSize, kMetadataSize,
- DefaultSizeClassMap> PrimaryAllocator;
+ // We are about to unmap a chunk of user memory.
+ // Mark the corresponding shadow memory as not needed.
+ FlushUnneededShadowMemory(MEM_TO_SHADOW(p), size);
+ if (__msan_get_track_origins())
+ FlushUnneededShadowMemory(MEM_TO_ORIGIN(p), size);
+ }
+};
+
+#if defined(__mips64)
+ static const uptr kMaxAllowedMallocSize = 2UL << 30;
+ static const uptr kRegionSizeLog = 20;
+ static const uptr kNumRegions = SANITIZER_MMAP_RANGE_SIZE >> kRegionSizeLog;
+ typedef TwoLevelByteMap<(kNumRegions >> 12), 1 << 12> ByteMap;
+ typedef CompactSizeClassMap SizeClassMap;
+
+ typedef SizeClassAllocator32<0, SANITIZER_MMAP_RANGE_SIZE, sizeof(Metadata),
+ SizeClassMap, kRegionSizeLog, ByteMap,
+ MsanMapUnmapCallback> PrimaryAllocator;
+#elif defined(__x86_64__)
+ static const uptr kAllocatorSpace = 0x600000000000ULL;
+ static const uptr kAllocatorSize = 0x80000000000; // 8T.
+ static const uptr kMetadataSize = sizeof(Metadata);
+ static const uptr kMaxAllowedMallocSize = 8UL << 30;
+
+ typedef SizeClassAllocator64<kAllocatorSpace, kAllocatorSize, kMetadataSize,
+ DefaultSizeClassMap,
+ MsanMapUnmapCallback> PrimaryAllocator;
+#endif
typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache;
-typedef LargeMmapAllocator<> SecondaryAllocator;
+typedef LargeMmapAllocator<MsanMapUnmapCallback> SecondaryAllocator;
typedef CombinedAllocator<PrimaryAllocator, AllocatorCache,
SecondaryAllocator> Allocator;
-static THREADLOCAL AllocatorCache cache;
static Allocator allocator;
+static AllocatorCache fallback_allocator_cache;
+static SpinMutex fallback_mutex;
static int inited = 0;
@@ -46,42 +76,55 @@ static inline void Init() {
allocator.Init();
}
-void MsanAllocatorThreadFinish() {
- allocator.SwallowCache(&cache);
+AllocatorCache *GetAllocatorCache(MsanThreadLocalMallocStorage *ms) {
+ CHECK(ms);
+ CHECK_LE(sizeof(AllocatorCache), sizeof(ms->allocator_cache));
+ return reinterpret_cast<AllocatorCache *>(ms->allocator_cache);
}
-static void *MsanAllocate(StackTrace *stack, uptr size,
- uptr alignment, bool zeroise) {
+void MsanThreadLocalMallocStorage::CommitBack() {
+ allocator.SwallowCache(GetAllocatorCache(this));
+}
+
+static void *MsanAllocate(StackTrace *stack, uptr size, uptr alignment,
+ bool zeroise) {
Init();
if (size > kMaxAllowedMallocSize) {
Report("WARNING: MemorySanitizer failed to allocate %p bytes\n",
(void *)size);
return AllocatorReturnNull();
}
- void *res = allocator.Allocate(&cache, size, alignment, false);
- Metadata *meta = reinterpret_cast<Metadata*>(allocator.GetMetaData(res));
+ MsanThread *t = GetCurrentThread();
+ void *allocated;
+ if (t) {
+ AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage());
+ allocated = allocator.Allocate(cache, size, alignment, false);
+ } else {
+ SpinMutexLock l(&fallback_mutex);
+ AllocatorCache *cache = &fallback_allocator_cache;
+ allocated = allocator.Allocate(cache, size, alignment, false);
+ }
+ Metadata *meta =
+ reinterpret_cast<Metadata *>(allocator.GetMetaData(allocated));
meta->requested_size = size;
if (zeroise) {
- __msan_clear_and_unpoison(res, size);
+ __msan_clear_and_unpoison(allocated, size);
} else if (flags()->poison_in_malloc) {
- __msan_poison(res, size);
+ __msan_poison(allocated, size);
if (__msan_get_track_origins()) {
- u32 stack_id = StackDepotPut(stack->trace, stack->size);
- CHECK(stack_id);
- CHECK_EQ((stack_id >> 31),
- 0); // Higher bit is occupied by stack origins.
- __msan_set_origin(res, size, stack_id);
+ Origin o = Origin::CreateHeapOrigin(stack);
+ __msan_set_origin(allocated, size, o.raw_id());
}
}
- MSAN_MALLOC_HOOK(res, size);
- return res;
+ MSAN_MALLOC_HOOK(allocated, size);
+ return allocated;
}
void MsanDeallocate(StackTrace *stack, void *p) {
CHECK(p);
Init();
MSAN_FREE_HOOK(p);
- Metadata *meta = reinterpret_cast<Metadata*>(allocator.GetMetaData(p));
+ Metadata *meta = reinterpret_cast<Metadata *>(allocator.GetMetaData(p));
uptr size = meta->requested_size;
meta->requested_size = 0;
// This memory will not be reused by anyone else, so we are free to keep it
@@ -89,14 +132,19 @@ void MsanDeallocate(StackTrace *stack, void *p) {
if (flags()->poison_in_free) {
__msan_poison(p, size);
if (__msan_get_track_origins()) {
- u32 stack_id = StackDepotPut(stack->trace, stack->size);
- CHECK(stack_id);
- CHECK_EQ((stack_id >> 31),
- 0); // Higher bit is occupied by stack origins.
- __msan_set_origin(p, size, stack_id);
+ Origin o = Origin::CreateHeapOrigin(stack);
+ __msan_set_origin(p, size, o.raw_id());
}
}
- allocator.Deallocate(&cache, p);
+ MsanThread *t = GetCurrentThread();
+ if (t) {
+ AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage());
+ allocator.Deallocate(cache, p);
+ } else {
+ SpinMutexLock l(&fallback_mutex);
+ AllocatorCache *cache = &fallback_allocator_cache;
+ allocator.Deallocate(cache, p);
+ }
}
void *MsanReallocate(StackTrace *stack, void *old_p, uptr new_size,
@@ -128,12 +176,10 @@ void *MsanReallocate(StackTrace *stack, void *old_p, uptr new_size,
}
static uptr AllocationSize(const void *p) {
- if (p == 0)
- return 0;
+ if (p == 0) return 0;
const void *beg = allocator.GetBlockBegin(p);
- if (beg != p)
- return 0;
- Metadata *b = (Metadata*)allocator.GetMetaData(p);
+ if (beg != p) return 0;
+ Metadata *b = (Metadata *)allocator.GetMetaData(p);
return b->requested_size;
}
@@ -141,38 +187,24 @@ static uptr AllocationSize(const void *p) {
using namespace __msan;
-uptr __msan_get_current_allocated_bytes() {
- u64 stats[AllocatorStatCount];
+uptr __sanitizer_get_current_allocated_bytes() {
+ uptr stats[AllocatorStatCount];
allocator.GetStats(stats);
- u64 m = stats[AllocatorStatMalloced];
- u64 f = stats[AllocatorStatFreed];
- return m >= f ? m - f : 1;
+ return stats[AllocatorStatAllocated];
}
-uptr __msan_get_heap_size() {
- u64 stats[AllocatorStatCount];
+uptr __sanitizer_get_heap_size() {
+ uptr stats[AllocatorStatCount];
allocator.GetStats(stats);
- u64 m = stats[AllocatorStatMmapped];
- u64 f = stats[AllocatorStatUnmapped];
- return m >= f ? m - f : 1;
+ return stats[AllocatorStatMapped];
}
-uptr __msan_get_free_bytes() {
- return 1;
-}
+uptr __sanitizer_get_free_bytes() { return 1; }
-uptr __msan_get_unmapped_bytes() {
- return 1;
-}
+uptr __sanitizer_get_unmapped_bytes() { return 1; }
-uptr __msan_get_estimated_allocated_size(uptr size) {
- return size;
-}
+uptr __sanitizer_get_estimated_allocated_size(uptr size) { return size; }
-int __msan_get_ownership(const void *p) {
- return AllocationSize(p) != 0;
-}
+int __sanitizer_get_ownership(const void *p) { return AllocationSize(p) != 0; }
-uptr __msan_get_allocated_size(const void *p) {
- return AllocationSize(p);
-}
+uptr __sanitizer_get_allocated_size(const void *p) { return AllocationSize(p); }