aboutsummaryrefslogtreecommitdiff
path: root/contrib/llvm-project/llvm/include/llvm/ProfileData/MemProfData.inc
diff options
context:
space:
mode:
Diffstat (limited to 'contrib/llvm-project/llvm/include/llvm/ProfileData/MemProfData.inc')
-rw-r--r--contrib/llvm-project/llvm/include/llvm/ProfileData/MemProfData.inc99
1 files changed, 95 insertions, 4 deletions
diff --git a/contrib/llvm-project/llvm/include/llvm/ProfileData/MemProfData.inc b/contrib/llvm-project/llvm/include/llvm/ProfileData/MemProfData.inc
index f2cb3738f053..ff22a697965c 100644
--- a/contrib/llvm-project/llvm/include/llvm/ProfileData/MemProfData.inc
+++ b/contrib/llvm-project/llvm/include/llvm/ProfileData/MemProfData.inc
@@ -20,11 +20,10 @@
*
\*===----------------------------------------------------------------------===*/
-
#ifdef _MSC_VER
-#define PACKED(__decl__) __pragma(pack(push,1)) __decl__ __pragma(pack(pop))
+#define PACKED(...) __pragma(pack(push,1)) __VA_ARGS__ __pragma(pack(pop))
#else
-#define PACKED(__decl__) __decl__ __attribute__((__packed__))
+#define PACKED(...) __VA_ARGS__ __attribute__((__packed__))
#endif
// A 64-bit magic number to uniquely identify the raw binary memprof profile file.
@@ -47,14 +46,106 @@ PACKED(struct Header {
uint64_t StackOffset;
});
+
// A struct describing the information necessary to describe a /proc/maps
// segment entry for a particular binary/library identified by its build id.
PACKED(struct SegmentEntry {
uint64_t Start;
uint64_t End;
uint64_t Offset;
- uint8_t BuildId[32];
+ // This field is unused until sanitizer procmaps support for build ids for
+ // Linux-Elf is implemented.
+ uint8_t BuildId[32] = {0};
+
+ SegmentEntry(uint64_t S, uint64_t E, uint64_t O) :
+ Start(S), End(E), Offset(O) {}
+
+ SegmentEntry(const SegmentEntry& S) {
+ Start = S.Start;
+ End = S.End;
+ Offset = S.Offset;
+ }
+
+ SegmentEntry& operator=(const SegmentEntry& S) {
+ Start = S.Start;
+ End = S.End;
+ Offset = S.Offset;
+ return *this;
+ }
+
+ bool operator==(const SegmentEntry& S) const {
+ return Start == S.Start &&
+ End == S.End &&
+ Offset == S.Offset;
+ }
});
+
+// A struct representing the heap allocation characteristics of a particular
+// runtime context. This struct is shared between the compiler-rt runtime and
+// the raw profile reader. The indexed format uses a separate, self-describing
+// backwards compatible format.
+PACKED(struct MemInfoBlock {
+ uint32_t alloc_count;
+ uint64_t total_access_count, min_access_count, max_access_count;
+ uint64_t total_size;
+ uint32_t min_size, max_size;
+ uint32_t alloc_timestamp, dealloc_timestamp;
+ uint64_t total_lifetime;
+ uint32_t min_lifetime, max_lifetime;
+ uint32_t alloc_cpu_id, dealloc_cpu_id;
+ uint32_t num_migrated_cpu;
+
+ // Only compared to prior deallocated object currently.
+ uint32_t num_lifetime_overlaps;
+ uint32_t num_same_alloc_cpu;
+ uint32_t num_same_dealloc_cpu;
+
+ uint64_t data_type_id; // TODO: hash of type name
+
+ MemInfoBlock() : alloc_count(0) {}
+
+ MemInfoBlock(uint32_t size, uint64_t access_count, uint32_t alloc_timestamp,
+ uint32_t dealloc_timestamp, uint32_t alloc_cpu, uint32_t dealloc_cpu)
+ : alloc_count(1), total_access_count(access_count),
+ min_access_count(access_count), max_access_count(access_count),
+ total_size(size), min_size(size), max_size(size),
+ alloc_timestamp(alloc_timestamp), dealloc_timestamp(dealloc_timestamp),
+ total_lifetime(dealloc_timestamp - alloc_timestamp),
+ min_lifetime(total_lifetime), max_lifetime(total_lifetime),
+ alloc_cpu_id(alloc_cpu), dealloc_cpu_id(dealloc_cpu),
+ num_lifetime_overlaps(0), num_same_alloc_cpu(0),
+ num_same_dealloc_cpu(0) {
+ num_migrated_cpu = alloc_cpu_id != dealloc_cpu_id;
+ }
+
+ void Merge(const MemInfoBlock &newMIB) {
+ alloc_count += newMIB.alloc_count;
+
+ total_access_count += newMIB.total_access_count;
+ min_access_count = newMIB.min_access_count < min_access_count ? newMIB.min_access_count : min_access_count;
+ max_access_count = newMIB.max_access_count < max_access_count ? newMIB.max_access_count : max_access_count;
+
+ total_size += newMIB.total_size;
+ min_size = newMIB.min_size < min_size ? newMIB.min_size : min_size;
+ max_size = newMIB.max_size < max_size ? newMIB.max_size : max_size;
+
+ total_lifetime += newMIB.total_lifetime;
+ min_lifetime = newMIB.min_lifetime < min_lifetime ? newMIB.min_lifetime : min_lifetime;
+ max_lifetime = newMIB.max_lifetime > max_lifetime ? newMIB.max_lifetime : max_lifetime;
+
+ // We know newMIB was deallocated later, so just need to check if it was
+ // allocated before last one deallocated.
+ num_lifetime_overlaps += newMIB.alloc_timestamp < dealloc_timestamp;
+ alloc_timestamp = newMIB.alloc_timestamp;
+ dealloc_timestamp = newMIB.dealloc_timestamp;
+
+ num_same_alloc_cpu += alloc_cpu_id == newMIB.alloc_cpu_id;
+ num_same_dealloc_cpu += dealloc_cpu_id == newMIB.dealloc_cpu_id;
+ alloc_cpu_id = newMIB.alloc_cpu_id;
+ dealloc_cpu_id = newMIB.dealloc_cpu_id;
+ }
+});
+
} // namespace memprof
} // namespace llvm