aboutsummaryrefslogtreecommitdiff
path: root/lib/msan/msan_allocator.cc
diff options
context:
space:
mode:
Diffstat (limited to 'lib/msan/msan_allocator.cc')
-rw-r--r--lib/msan/msan_allocator.cc99
1 files changed, 85 insertions, 14 deletions
diff --git a/lib/msan/msan_allocator.cc b/lib/msan/msan_allocator.cc
index 7435843ce61e..2badf712188b 100644
--- a/lib/msan/msan_allocator.cc
+++ b/lib/msan/msan_allocator.cc
@@ -25,6 +25,7 @@ struct Metadata {
static const uptr kAllocatorSpace = 0x600000000000ULL;
static const uptr kAllocatorSize = 0x80000000000; // 8T.
static const uptr kMetadataSize = sizeof(Metadata);
+static const uptr kMaxAllowedMallocSize = 8UL << 30;
typedef SizeClassAllocator64<kAllocatorSpace, kAllocatorSize, kMetadataSize,
DefaultSizeClassMap> PrimaryAllocator;
@@ -45,35 +46,56 @@ static inline void Init() {
allocator.Init();
}
+void MsanAllocatorThreadFinish() {
+ allocator.SwallowCache(&cache);
+}
+
static void *MsanAllocate(StackTrace *stack, uptr size,
uptr alignment, bool zeroise) {
Init();
+ if (size > kMaxAllowedMallocSize) {
+ Report("WARNING: MemorySanitizer failed to allocate %p bytes\n",
+ (void *)size);
+ return AllocatorReturnNull();
+ }
void *res = allocator.Allocate(&cache, size, alignment, false);
Metadata *meta = reinterpret_cast<Metadata*>(allocator.GetMetaData(res));
meta->requested_size = size;
- if (zeroise)
+ if (zeroise) {
__msan_clear_and_unpoison(res, size);
- else if (flags()->poison_in_malloc)
+ } else if (flags()->poison_in_malloc) {
__msan_poison(res, size);
- if (__msan_get_track_origins()) {
- u32 stack_id = StackDepotPut(stack->trace, stack->size);
- CHECK(stack_id);
- CHECK_EQ((stack_id >> 31), 0); // Higher bit is occupied by stack origins.
- __msan_set_origin(res, size, stack_id);
+ if (__msan_get_track_origins()) {
+ u32 stack_id = StackDepotPut(stack->trace, stack->size);
+ CHECK(stack_id);
+ CHECK_EQ((stack_id >> 31),
+ 0); // Higher bit is occupied by stack origins.
+ __msan_set_origin(res, size, stack_id);
+ }
}
+ MSAN_MALLOC_HOOK(res, size);
return res;
}
-void MsanDeallocate(void *p) {
+void MsanDeallocate(StackTrace *stack, void *p) {
CHECK(p);
Init();
+ MSAN_FREE_HOOK(p);
Metadata *meta = reinterpret_cast<Metadata*>(allocator.GetMetaData(p));
uptr size = meta->requested_size;
+ meta->requested_size = 0;
// This memory will not be reused by anyone else, so we are free to keep it
// poisoned.
- __msan_poison(p, size);
- if (__msan_get_track_origins())
- __msan_set_origin(p, size, -1);
+ if (flags()->poison_in_free) {
+ __msan_poison(p, size);
+ if (__msan_get_track_origins()) {
+ u32 stack_id = StackDepotPut(stack->trace, stack->size);
+ CHECK(stack_id);
+ CHECK_EQ((stack_id >> 31),
+ 0); // Higher bit is occupied by stack origins.
+ __msan_set_origin(p, size, stack_id);
+ }
+ }
allocator.Deallocate(&cache, p);
}
@@ -82,7 +104,7 @@ void *MsanReallocate(StackTrace *stack, void *old_p, uptr new_size,
if (!old_p)
return MsanAllocate(stack, new_size, alignment, zeroise);
if (!new_size) {
- MsanDeallocate(old_p);
+ MsanDeallocate(stack, old_p);
return 0;
}
Metadata *meta = reinterpret_cast<Metadata*>(allocator.GetMetaData(old_p));
@@ -98,10 +120,59 @@ void *MsanReallocate(StackTrace *stack, void *old_p, uptr new_size,
uptr memcpy_size = Min(new_size, old_size);
void *new_p = MsanAllocate(stack, new_size, alignment, zeroise);
// Printf("realloc: old_size %zd new_size %zd\n", old_size, new_size);
- if (new_p)
+ if (new_p) {
__msan_memcpy(new_p, old_p, memcpy_size);
- MsanDeallocate(old_p);
+ MsanDeallocate(stack, old_p);
+ }
return new_p;
}
+static uptr AllocationSize(const void *p) {
+ if (p == 0)
+ return 0;
+ const void *beg = allocator.GetBlockBegin(p);
+ if (beg != p)
+ return 0;
+ Metadata *b = (Metadata*)allocator.GetMetaData(p);
+ return b->requested_size;
+}
+
} // namespace __msan
+
+using namespace __msan;
+
+uptr __msan_get_current_allocated_bytes() {
+ u64 stats[AllocatorStatCount];
+ allocator.GetStats(stats);
+ u64 m = stats[AllocatorStatMalloced];
+ u64 f = stats[AllocatorStatFreed];
+ return m >= f ? m - f : 1;
+}
+
+uptr __msan_get_heap_size() {
+ u64 stats[AllocatorStatCount];
+ allocator.GetStats(stats);
+ u64 m = stats[AllocatorStatMmapped];
+ u64 f = stats[AllocatorStatUnmapped];
+ return m >= f ? m - f : 1;
+}
+
+uptr __msan_get_free_bytes() {
+ return 1;
+}
+
+uptr __msan_get_unmapped_bytes() {
+ return 1;
+}
+
+uptr __msan_get_estimated_allocated_size(uptr size) {
+ return size;
+}
+
+int __msan_get_ownership(const void *p) {
+ return AllocationSize(p) != 0;
+}
+
+uptr __msan_get_allocated_size(const void *p) {
+ return AllocationSize(p);
+}