aboutsummaryrefslogtreecommitdiff
path: root/lib/sanitizer_common/sanitizer_allocator.h
diff options
context:
space:
mode:
authorDimitry Andric <dim@FreeBSD.org>2014-11-06 22:49:13 +0000
committerDimitry Andric <dim@FreeBSD.org>2014-11-06 22:49:13 +0000
commit8ef50bf3d1c287b5013c3168de77a462dfce3495 (patch)
tree3467f3372c1195b1546172d89af2205a50b1866d /lib/sanitizer_common/sanitizer_allocator.h
parent11023dc647fd8f41418da90d59db138400d0f334 (diff)
downloadsrc-8ef50bf3d1c287b5013c3168de77a462dfce3495.tar.gz
src-8ef50bf3d1c287b5013c3168de77a462dfce3495.zip
Import compiler-rt release_34 branch r197381.vendor/compiler-rt/compiler-rt-r197381
Notes
Notes: svn path=/vendor/compiler-rt/dist/; revision=274201 svn path=/vendor/compiler-rt/compiler-rt-r197381/; revision=274202; tag=vendor/compiler-rt/compiler-rt-r197381
Diffstat (limited to 'lib/sanitizer_common/sanitizer_allocator.h')
-rw-r--r--lib/sanitizer_common/sanitizer_allocator.h164
1 files changed, 111 insertions, 53 deletions
diff --git a/lib/sanitizer_common/sanitizer_allocator.h b/lib/sanitizer_common/sanitizer_allocator.h
index 0542addb7f37..6075cfe92aac 100644
--- a/lib/sanitizer_common/sanitizer_allocator.h
+++ b/lib/sanitizer_common/sanitizer_allocator.h
@@ -23,6 +23,9 @@
namespace __sanitizer {
+// Depending on allocator_may_return_null either return 0 or crash.
+void *AllocatorReturnNull();
+
// SizeClassMap maps allocation sizes into size classes and back.
// Class 0 corresponds to size 0.
// Classes 1 - 16 correspond to sizes 16 to 256 (size = class_id * 16).
@@ -279,6 +282,9 @@ struct NoOpMapUnmapCallback {
void OnUnmap(uptr p, uptr size) const { }
};
+// Callback type for iterating over chunks.
+typedef void (*ForEachChunkCallback)(uptr chunk, void *arg);
+
// SizeClassAllocator64 -- allocator for 64-bit address space.
//
// Space: a portion of address space of kSpaceSize bytes starting at
@@ -344,15 +350,15 @@ class SizeClassAllocator64 {
region->n_freed += b->count;
}
- static bool PointerIsMine(void *p) {
+ static bool PointerIsMine(const void *p) {
return reinterpret_cast<uptr>(p) / kSpaceSize == kSpaceBeg / kSpaceSize;
}
- static uptr GetSizeClass(void *p) {
+ static uptr GetSizeClass(const void *p) {
return (reinterpret_cast<uptr>(p) / kRegionSize) % kNumClassesRounded;
}
- void *GetBlockBegin(void *p) {
+ void *GetBlockBegin(const void *p) {
uptr class_id = GetSizeClass(p);
uptr size = SizeClassMap::Size(class_id);
if (!size) return 0;
@@ -374,7 +380,7 @@ class SizeClassAllocator64 {
uptr ClassID(uptr size) { return SizeClassMap::ClassID(size); }
- void *GetMetaData(void *p) {
+ void *GetMetaData(const void *p) {
uptr class_id = GetSizeClass(p);
uptr size = SizeClassMap::Size(class_id);
uptr chunk_idx = GetChunkIdx(reinterpret_cast<uptr>(p), size);
@@ -433,20 +439,18 @@ class SizeClassAllocator64 {
}
}
- // Iterate over existing chunks. May include chunks that are not currently
- // allocated to the user (e.g. freed).
- // The caller is expected to call ForceLock() before calling this function.
- template<typename Callable>
- void ForEachChunk(const Callable &callback) {
+ // Iterate over all existing chunks.
+ // The allocator must be locked when calling this function.
+ void ForEachChunk(ForEachChunkCallback callback, void *arg) {
for (uptr class_id = 1; class_id < kNumClasses; class_id++) {
RegionInfo *region = GetRegionInfo(class_id);
uptr chunk_size = SizeClassMap::Size(class_id);
uptr region_beg = kSpaceBeg + class_id * kRegionSize;
- for (uptr p = region_beg;
- p < region_beg + region->allocated_user;
- p += chunk_size) {
- // Too slow: CHECK_EQ((void *)p, GetBlockBegin((void *)p));
- callback((void *)p);
+ for (uptr chunk = region_beg;
+ chunk < region_beg + region->allocated_user;
+ chunk += chunk_size) {
+ // Too slow: CHECK_EQ((void *)chunk, GetBlockBegin((void *)chunk));
+ callback(chunk, arg);
}
}
}
@@ -639,7 +643,7 @@ class SizeClassAllocator32 {
alignment <= SizeClassMap::kMaxSize;
}
- void *GetMetaData(void *p) {
+ void *GetMetaData(const void *p) {
CHECK(PointerIsMine(p));
uptr mem = reinterpret_cast<uptr>(p);
uptr beg = ComputeRegionBeg(mem);
@@ -671,15 +675,15 @@ class SizeClassAllocator32 {
sci->free_list.push_front(b);
}
- bool PointerIsMine(void *p) {
+ bool PointerIsMine(const void *p) {
return GetSizeClass(p) != 0;
}
- uptr GetSizeClass(void *p) {
+ uptr GetSizeClass(const void *p) {
return possible_regions[ComputeRegionId(reinterpret_cast<uptr>(p))];
}
- void *GetBlockBegin(void *p) {
+ void *GetBlockBegin(const void *p) {
CHECK(PointerIsMine(p));
uptr mem = reinterpret_cast<uptr>(p);
uptr beg = ComputeRegionBeg(mem);
@@ -726,21 +730,19 @@ class SizeClassAllocator32 {
}
}
- // Iterate over existing chunks. May include chunks that are not currently
- // allocated to the user (e.g. freed).
- // The caller is expected to call ForceLock() before calling this function.
- template<typename Callable>
- void ForEachChunk(const Callable &callback) {
+ // Iterate over all existing chunks.
+ // The allocator must be locked when calling this function.
+ void ForEachChunk(ForEachChunkCallback callback, void *arg) {
for (uptr region = 0; region < kNumPossibleRegions; region++)
if (possible_regions[region]) {
uptr chunk_size = SizeClassMap::Size(possible_regions[region]);
uptr max_chunks_in_region = kRegionSize / (chunk_size + kMetadataSize);
uptr region_beg = region * kRegionSize;
- for (uptr p = region_beg;
- p < region_beg + max_chunks_in_region * chunk_size;
- p += chunk_size) {
- // Too slow: CHECK_EQ((void *)p, GetBlockBegin((void *)p));
- callback((void *)p);
+ for (uptr chunk = region_beg;
+ chunk < region_beg + max_chunks_in_region * chunk_size;
+ chunk += chunk_size) {
+ // Too slow: CHECK_EQ((void *)chunk, GetBlockBegin((void *)chunk));
+ callback(chunk, arg);
}
}
}
@@ -779,7 +781,7 @@ class SizeClassAllocator32 {
MapUnmapCallback().OnMap(res, kRegionSize);
stat->Add(AllocatorStatMmapped, kRegionSize);
CHECK_EQ(0U, (res & (kRegionSize - 1)));
- possible_regions.set(ComputeRegionId(res), class_id);
+ possible_regions.set(ComputeRegionId(res), static_cast<u8>(class_id));
return res;
}
@@ -942,7 +944,7 @@ class LargeMmapAllocator {
uptr map_size = RoundUpMapSize(size);
if (alignment > page_size_)
map_size += alignment;
- if (map_size < size) return 0; // Overflow.
+ if (map_size < size) return AllocatorReturnNull(); // Overflow.
uptr map_beg = reinterpret_cast<uptr>(
MmapOrDie(map_size, "LargeMmapAllocator"));
MapUnmapCallback().OnMap(map_beg, map_size);
@@ -961,6 +963,7 @@ class LargeMmapAllocator {
{
SpinMutexLock l(&mutex_);
uptr idx = n_chunks_++;
+ chunks_sorted_ = false;
CHECK_LT(idx, kMaxNumChunks);
h->chunk_idx = idx;
chunks_[idx] = h;
@@ -984,6 +987,7 @@ class LargeMmapAllocator {
chunks_[idx] = chunks_[n_chunks_ - 1];
chunks_[idx]->chunk_idx = idx;
n_chunks_--;
+ chunks_sorted_ = false;
stats.n_frees++;
stats.currently_allocated -= h->map_size;
stat->Add(AllocatorStatFreed, h->map_size);
@@ -1004,7 +1008,7 @@ class LargeMmapAllocator {
return res;
}
- bool PointerIsMine(void *p) {
+ bool PointerIsMine(const void *p) {
return GetBlockBegin(p) != 0;
}
@@ -1013,13 +1017,16 @@ class LargeMmapAllocator {
}
// At least page_size_/2 metadata bytes is available.
- void *GetMetaData(void *p) {
+ void *GetMetaData(const void *p) {
// Too slow: CHECK_EQ(p, GetBlockBegin(p));
- CHECK(IsAligned(reinterpret_cast<uptr>(p), page_size_));
+ if (!IsAligned(reinterpret_cast<uptr>(p), page_size_)) {
+ Printf("%s: bad pointer %p\n", SanitizerToolName, p);
+ CHECK(IsAligned(reinterpret_cast<uptr>(p), page_size_));
+ }
return GetHeader(p) + 1;
}
- void *GetBlockBegin(void *ptr) {
+ void *GetBlockBegin(const void *ptr) {
uptr p = reinterpret_cast<uptr>(ptr);
SpinMutexLock l(&mutex_);
uptr nearest_chunk = 0;
@@ -1041,6 +1048,49 @@ class LargeMmapAllocator {
return GetUser(h);
}
+ // This function does the same as GetBlockBegin, but is much faster.
+ // Must be called with the allocator locked.
+ void *GetBlockBeginFastLocked(void *ptr) {
+ mutex_.CheckLocked();
+ uptr p = reinterpret_cast<uptr>(ptr);
+ uptr n = n_chunks_;
+ if (!n) return 0;
+ if (!chunks_sorted_) {
+ // Do one-time sort. chunks_sorted_ is reset in Allocate/Deallocate.
+ SortArray(reinterpret_cast<uptr*>(chunks_), n);
+ for (uptr i = 0; i < n; i++)
+ chunks_[i]->chunk_idx = i;
+ chunks_sorted_ = true;
+ min_mmap_ = reinterpret_cast<uptr>(chunks_[0]);
+ max_mmap_ = reinterpret_cast<uptr>(chunks_[n - 1]) +
+ chunks_[n - 1]->map_size;
+ }
+ if (p < min_mmap_ || p >= max_mmap_)
+ return 0;
+ uptr beg = 0, end = n - 1;
+ // This loop is a log(n) lower_bound. It does not check for the exact match
+ // to avoid expensive cache-thrashing loads.
+ while (end - beg >= 2) {
+ uptr mid = (beg + end) / 2; // Invariant: mid >= beg + 1
+ if (p < reinterpret_cast<uptr>(chunks_[mid]))
+ end = mid - 1; // We are not interested in chunks_[mid].
+ else
+ beg = mid; // chunks_[mid] may still be what we want.
+ }
+
+ if (beg < end) {
+ CHECK_EQ(beg + 1, end);
+ // There are 2 chunks left, choose one.
+ if (p >= reinterpret_cast<uptr>(chunks_[end]))
+ beg = end;
+ }
+
+ Header *h = chunks_[beg];
+ if (h->map_beg + h->map_size <= p || p < h->map_beg)
+ return 0;
+ return GetUser(h);
+ }
+
void PrintStats() {
Printf("Stats: LargeMmapAllocator: allocated %zd times, "
"remains %zd (%zd K) max %zd M; by size logs: ",
@@ -1064,13 +1114,11 @@ class LargeMmapAllocator {
mutex_.Unlock();
}
- // Iterate over existing chunks. May include chunks that are not currently
- // allocated to the user (e.g. freed).
- // The caller is expected to call ForceLock() before calling this function.
- template<typename Callable>
- void ForEachChunk(const Callable &callback) {
+ // Iterate over all existing chunks.
+ // The allocator must be locked when calling this function.
+ void ForEachChunk(ForEachChunkCallback callback, void *arg) {
for (uptr i = 0; i < n_chunks_; i++)
- callback(GetUser(chunks_[i]));
+ callback(reinterpret_cast<uptr>(GetUser(chunks_[i])), arg);
}
private:
@@ -1083,13 +1131,15 @@ class LargeMmapAllocator {
};
Header *GetHeader(uptr p) {
- CHECK_EQ(p % page_size_, 0);
+ CHECK(IsAligned(p, page_size_));
return reinterpret_cast<Header*>(p - page_size_);
}
- Header *GetHeader(void *p) { return GetHeader(reinterpret_cast<uptr>(p)); }
+ Header *GetHeader(const void *p) {
+ return GetHeader(reinterpret_cast<uptr>(p));
+ }
void *GetUser(Header *h) {
- CHECK_EQ((uptr)h % page_size_, 0);
+ CHECK(IsAligned((uptr)h, page_size_));
return reinterpret_cast<void*>(reinterpret_cast<uptr>(h) + page_size_);
}
@@ -1100,6 +1150,8 @@ class LargeMmapAllocator {
uptr page_size_;
Header *chunks_[kMaxNumChunks];
uptr n_chunks_;
+ uptr min_mmap_, max_mmap_;
+ bool chunks_sorted_;
struct Stats {
uptr n_allocs, n_frees, currently_allocated, max_allocated, by_size_log[64];
} stats;
@@ -1128,7 +1180,7 @@ class CombinedAllocator {
if (size == 0)
size = 1;
if (size + alignment < size)
- return 0;
+ return AllocatorReturnNull();
if (alignment > 8)
size = RoundUpTo(size, alignment);
void *res;
@@ -1179,18 +1231,26 @@ class CombinedAllocator {
return primary_.PointerIsMine(p);
}
- void *GetMetaData(void *p) {
+ void *GetMetaData(const void *p) {
if (primary_.PointerIsMine(p))
return primary_.GetMetaData(p);
return secondary_.GetMetaData(p);
}
- void *GetBlockBegin(void *p) {
+ void *GetBlockBegin(const void *p) {
if (primary_.PointerIsMine(p))
return primary_.GetBlockBegin(p);
return secondary_.GetBlockBegin(p);
}
+ // This function does the same as GetBlockBegin, but is much faster.
+ // Must be called with the allocator locked.
+ void *GetBlockBeginFastLocked(void *p) {
+ if (primary_.PointerIsMine(p))
+ return primary_.GetBlockBegin(p);
+ return secondary_.GetBlockBeginFastLocked(p);
+ }
+
uptr GetActuallyAllocatedSize(void *p) {
if (primary_.PointerIsMine(p))
return primary_.GetActuallyAllocatedSize(p);
@@ -1236,13 +1296,11 @@ class CombinedAllocator {
primary_.ForceUnlock();
}
- // Iterate over existing chunks. May include chunks that are not currently
- // allocated to the user (e.g. freed).
- // The caller is expected to call ForceLock() before calling this function.
- template<typename Callable>
- void ForEachChunk(const Callable &callback) {
- primary_.ForEachChunk(callback);
- secondary_.ForEachChunk(callback);
+ // Iterate over all existing chunks.
+ // The allocator must be locked when calling this function.
+ void ForEachChunk(ForEachChunkCallback callback, void *arg) {
+ primary_.ForEachChunk(callback, arg);
+ secondary_.ForEachChunk(callback, arg);
}
private: