aboutsummaryrefslogtreecommitdiff
path: root/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_dense_alloc.h
blob: 7a39a39d51de452a984b7e35c95fb7f694515133 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
//===-- tsan_dense_alloc.h --------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file is a part of ThreadSanitizer (TSan), a race detector.
//
// A DenseSlabAlloc is a freelist-based allocator of fixed-size objects.
// DenseSlabAllocCache is a thread-local cache for DenseSlabAlloc.
// The only difference with traditional slab allocators is that DenseSlabAlloc
// allocates/free indices of objects and provide a functionality to map
// the index onto the real pointer. The index is u32, that is, 2 times smaller
// than uptr (hense the Dense prefix).
//===----------------------------------------------------------------------===//
#ifndef TSAN_DENSE_ALLOC_H
#define TSAN_DENSE_ALLOC_H

#include "sanitizer_common/sanitizer_common.h"
#include "tsan_defs.h"

namespace __tsan {

class DenseSlabAllocCache {
  static const uptr kSize = 128;
  typedef u32 IndexT;
  uptr pos;
  IndexT cache[kSize];
  template <typename, uptr, uptr, u64>
  friend class DenseSlabAlloc;
};

template <typename T, uptr kL1Size, uptr kL2Size, u64 kReserved = 0>
class DenseSlabAlloc {
 public:
  typedef DenseSlabAllocCache Cache;
  typedef typename Cache::IndexT IndexT;

  static_assert((kL1Size & (kL1Size - 1)) == 0,
                "kL1Size must be a power-of-two");
  static_assert((kL2Size & (kL2Size - 1)) == 0,
                "kL2Size must be a power-of-two");
  static_assert((kL1Size * kL2Size) <= (1ull << (sizeof(IndexT) * 8)),
                "kL1Size/kL2Size are too large");
  static_assert(((kL1Size * kL2Size - 1) & kReserved) == 0,
                "reserved bits don't fit");
  static_assert(sizeof(T) > sizeof(IndexT),
                "it doesn't make sense to use dense alloc");

  DenseSlabAlloc(LinkerInitialized, const char *name) : name_(name) {}

  explicit DenseSlabAlloc(const char *name)
      : DenseSlabAlloc(LINKER_INITIALIZED, name) {
    // It can be very large.
    // Don't page it in for linker initialized objects.
    internal_memset(map_, 0, sizeof(map_));
  }

  ~DenseSlabAlloc() {
    for (uptr i = 0; i < kL1Size; i++) {
      if (map_[i] != 0)
        UnmapOrDie(map_[i], kL2Size * sizeof(T));
    }
  }

  IndexT Alloc(Cache *c) {
    if (c->pos == 0)
      Refill(c);
    return c->cache[--c->pos];
  }

  void Free(Cache *c, IndexT idx) {
    DCHECK_NE(idx, 0);
    if (c->pos == Cache::kSize)
      Drain(c);
    c->cache[c->pos++] = idx;
  }

  T *Map(IndexT idx) {
    DCHECK_NE(idx, 0);
    DCHECK_LE(idx, kL1Size * kL2Size);
    return &map_[idx / kL2Size][idx % kL2Size];
  }

  void FlushCache(Cache *c) {
    if (!c->pos)
      return;
    SpinMutexLock lock(&mtx_);
    while (c->pos) {
      IndexT idx = c->cache[--c->pos];
      *(IndexT*)Map(idx) = freelist_;
      freelist_ = idx;
    }
  }

  void InitCache(Cache *c) {
    c->pos = 0;
    internal_memset(c->cache, 0, sizeof(c->cache));
  }

  uptr AllocatedMemory() const {
    return atomic_load_relaxed(&fillpos_) * kL2Size * sizeof(T);
  }

  template <typename Func>
  void ForEach(Func func) {
    SpinMutexLock lock(&mtx_);
    uptr fillpos = atomic_load_relaxed(&fillpos_);
    for (uptr l1 = 0; l1 < fillpos; l1++) {
      for (IndexT l2 = l1 == 0 ? 1 : 0; l2 < kL2Size; l2++) func(&map_[l1][l2]);
    }
  }

 private:
  T *map_[kL1Size];
  SpinMutex mtx_;
  IndexT freelist_ = {0};
  atomic_uintptr_t fillpos_ = {0};
  const char *const name_;

  void Refill(Cache *c) {
    SpinMutexLock lock(&mtx_);
    if (freelist_ == 0) {
      uptr fillpos = atomic_load_relaxed(&fillpos_);
      if (fillpos == kL1Size) {
        Printf("ThreadSanitizer: %s overflow (%zu*%zu). Dying.\n",
            name_, kL1Size, kL2Size);
        Die();
      }
      VPrintf(2, "ThreadSanitizer: growing %s: %zu out of %zu*%zu\n", name_,
              fillpos, kL1Size, kL2Size);
      T *batch = (T*)MmapOrDie(kL2Size * sizeof(T), name_);
      // Reserve 0 as invalid index.
      IndexT start = fillpos == 0 ? 1 : 0;
      for (IndexT i = start; i < kL2Size; i++) {
        new(batch + i) T;
        *(IndexT *)(batch + i) = i + 1 + fillpos * kL2Size;
      }
      *(IndexT*)(batch + kL2Size - 1) = 0;
      freelist_ = fillpos * kL2Size + start;
      map_[fillpos] = batch;
      atomic_store_relaxed(&fillpos_, fillpos + 1);
    }
    for (uptr i = 0; i < Cache::kSize / 2 && freelist_ != 0; i++) {
      IndexT idx = freelist_;
      c->cache[c->pos++] = idx;
      freelist_ = *(IndexT*)Map(idx);
    }
  }

  void Drain(Cache *c) {
    SpinMutexLock lock(&mtx_);
    for (uptr i = 0; i < Cache::kSize / 2; i++) {
      IndexT idx = c->cache[--c->pos];
      *(IndexT*)Map(idx) = freelist_;
      freelist_ = idx;
    }
  }
};

}  // namespace __tsan

#endif  // TSAN_DENSE_ALLOC_H