aboutsummaryrefslogtreecommitdiff
path: root/include/llvm/ADT
diff options
context:
space:
mode:
Diffstat (limited to 'include/llvm/ADT')
-rw-r--r--include/llvm/ADT/APFloat.h1
-rw-r--r--include/llvm/ADT/APInt.h50
-rw-r--r--include/llvm/ADT/APSInt.h48
-rw-r--r--include/llvm/ADT/ArrayRef.h2
-rw-r--r--include/llvm/ADT/BitVector.h56
-rw-r--r--include/llvm/ADT/DenseMap.h882
-rw-r--r--include/llvm/ADT/DepthFirstIterator.h2
-rw-r--r--include/llvm/ADT/FoldingSet.h105
-rw-r--r--include/llvm/ADT/Hashing.h4
-rw-r--r--include/llvm/ADT/ImmutableSet.h4
-rw-r--r--include/llvm/ADT/IndexedMap.h9
-rw-r--r--include/llvm/ADT/IntrusiveRefCntPtr.h34
-rw-r--r--include/llvm/ADT/PointerIntPair.h13
-rw-r--r--include/llvm/ADT/PointerUnion.h4
-rw-r--r--include/llvm/ADT/PostOrderIterator.h75
-rw-r--r--include/llvm/ADT/STLExtras.h22
-rw-r--r--include/llvm/ADT/SmallBitVector.h83
-rw-r--r--include/llvm/ADT/SmallString.h7
-rw-r--r--include/llvm/ADT/SmallVector.h298
-rw-r--r--include/llvm/ADT/SparseSet.h112
-rw-r--r--include/llvm/ADT/StringRef.h29
-rw-r--r--include/llvm/ADT/StringSwitch.h4
-rw-r--r--include/llvm/ADT/TinyPtrVector.h184
-rw-r--r--include/llvm/ADT/Triple.h14
-rw-r--r--include/llvm/ADT/ValueMap.h22
25 files changed, 1650 insertions, 414 deletions
diff --git a/include/llvm/ADT/APFloat.h b/include/llvm/ADT/APFloat.h
index 2b466f900c81..5a625a4c832f 100644
--- a/include/llvm/ADT/APFloat.h
+++ b/include/llvm/ADT/APFloat.h
@@ -274,6 +274,7 @@ namespace llvm {
/* C fmod, or llvm frem. */
opStatus mod(const APFloat &, roundingMode);
opStatus fusedMultiplyAdd(const APFloat &, const APFloat &, roundingMode);
+ opStatus roundToIntegral(roundingMode);
/* Sign operations. */
void changeSign();
diff --git a/include/llvm/ADT/APInt.h b/include/llvm/ADT/APInt.h
index 41019899766b..f30a6e3f081c 100644
--- a/include/llvm/ADT/APInt.h
+++ b/include/llvm/ADT/APInt.h
@@ -16,6 +16,7 @@
#define LLVM_APINT_H
#include "llvm/ADT/ArrayRef.h"
+#include "llvm/Support/Compiler.h"
#include "llvm/Support/MathExtras.h"
#include <cassert>
#include <climits>
@@ -273,6 +274,13 @@ public:
initSlowCase(that);
}
+#if LLVM_USE_RVALUE_REFERENCES
+ /// @brief Move Constructor.
+ APInt(APInt&& that) : BitWidth(that.BitWidth), VAL(that.VAL) {
+ that.BitWidth = 0;
+ }
+#endif
+
/// @brief Destructor.
~APInt() {
if (!isSingleWord())
@@ -349,13 +357,7 @@ public:
/// @brief Check if this APInt has an N-bits unsigned integer value.
bool isIntN(unsigned N) const {
assert(N && "N == 0 ???");
- if (N >= getBitWidth())
- return true;
-
- if (isSingleWord())
- return isUIntN(N, VAL);
- return APInt(N, makeArrayRef(pVal, getNumWords())).zext(getBitWidth())
- == (*this);
+ return getActiveBits() <= N;
}
/// @brief Check if this APInt has an N-bits signed integer value.
@@ -503,6 +505,18 @@ public:
return getAllOnesValue(numBits).lshr(numBits - loBitsSet);
}
+ /// \brief Determine if two APInts have the same value, after zero-extending
+ /// one of them (if needed!) to ensure that the bit-widths match.
+ static bool isSameValue(const APInt &I1, const APInt &I2) {
+ if (I1.getBitWidth() == I2.getBitWidth())
+ return I1 == I2;
+
+ if (I1.getBitWidth() > I2.getBitWidth())
+ return I1 == I2.zext(I1.getBitWidth());
+
+ return I1.zext(I2.getBitWidth()) == I2;
+ }
+
/// \brief Overload to compute a hash_code for an APInt value.
friend hash_code hash_value(const APInt &Arg);
@@ -587,6 +601,21 @@ public:
return AssignSlowCase(RHS);
}
+#if LLVM_USE_RVALUE_REFERENCES
+ /// @brief Move assignment operator.
+ APInt& operator=(APInt&& that) {
+ if (!isSingleWord())
+ delete [] pVal;
+
+ BitWidth = that.BitWidth;
+ VAL = that.VAL;
+
+ that.BitWidth = 0;
+
+ return *this;
+ }
+#endif
+
/// The RHS value is assigned to *this. If the significant bits in RHS exceed
/// the bit width, the excess bits are truncated. If the bit width is larger
/// than 64, the value is zero filled in the unspecified high order bits.
@@ -817,9 +846,10 @@ public:
if (LHS.isNegative()) {
if (RHS.isNegative())
APInt::udivrem(-LHS, -RHS, Quotient, Remainder);
- else
+ else {
APInt::udivrem(-LHS, RHS, Quotient, Remainder);
- Quotient = -Quotient;
+ Quotient = -Quotient;
+ }
Remainder = -Remainder;
} else if (RHS.isNegative()) {
APInt::udivrem(LHS, -RHS, Quotient, Remainder);
@@ -1087,7 +1117,7 @@ public:
else {
// Set all the bits in all the words.
for (unsigned i = 0; i < getNumWords(); ++i)
- pVal[i] = -1ULL;
+ pVal[i] = -1ULL;
}
// Clear the unused ones
clearUnusedBits();
diff --git a/include/llvm/ADT/APSInt.h b/include/llvm/ADT/APSInt.h
index 54a7b601d1f1..048c65ce2c77 100644
--- a/include/llvm/ADT/APSInt.h
+++ b/include/llvm/ADT/APSInt.h
@@ -135,6 +135,19 @@ public:
assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!");
return IsUnsigned ? uge(RHS) : sge(RHS);
}
+ inline bool operator==(const APSInt& RHS) const {
+ assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!");
+ return eq(RHS);
+ }
+ inline bool operator==(int64_t RHS) const {
+ return isSameValue(*this, APSInt(APInt(64, RHS), true));
+ }
+ inline bool operator!=(const APSInt& RHS) const {
+ return !((*this) == RHS);
+ }
+ inline bool operator!=(int64_t RHS) const {
+ return !((*this) == RHS);
+ }
// The remaining operators just wrap the logic of APInt, but retain the
// signedness information.
@@ -250,17 +263,50 @@ public:
: APInt::getSignedMinValue(numBits), Unsigned);
}
+ /// \brief Determine if two APSInts have the same value, zero- or
+ /// sign-extending as needed.
+ static bool isSameValue(const APSInt &I1, const APSInt &I2) {
+ if (I1.getBitWidth() == I2.getBitWidth() && I1.isSigned() == I2.isSigned())
+ return I1 == I2;
+
+ // Check for a bit-width mismatch.
+ if (I1.getBitWidth() > I2.getBitWidth())
+ return isSameValue(I1, I2.extend(I1.getBitWidth()));
+ else if (I2.getBitWidth() > I1.getBitWidth())
+ return isSameValue(I1.extend(I2.getBitWidth()), I2);
+
+ // We have a signedness mismatch. Turn the signed value into an unsigned
+ // value.
+ if (I1.isSigned()) {
+ if (I1.isNegative())
+ return false;
+
+ return APSInt(I1, true) == I2;
+ }
+
+ if (I2.isNegative())
+ return false;
+
+ return I1 == APSInt(I2, true);
+ }
+
/// Profile - Used to insert APSInt objects, or objects that contain APSInt
/// objects, into FoldingSets.
void Profile(FoldingSetNodeID& ID) const;
};
+inline bool operator==(int64_t V1, const APSInt& V2) {
+ return V2 == V1;
+}
+inline bool operator!=(int64_t V1, const APSInt& V2) {
+ return V2 != V1;
+}
+
inline raw_ostream &operator<<(raw_ostream &OS, const APSInt &I) {
I.print(OS, I.isSigned());
return OS;
}
-
} // end namespace llvm
#endif
diff --git a/include/llvm/ADT/ArrayRef.h b/include/llvm/ADT/ArrayRef.h
index f4c8e5586213..cf55aadef31a 100644
--- a/include/llvm/ADT/ArrayRef.h
+++ b/include/llvm/ADT/ArrayRef.h
@@ -60,7 +60,7 @@ namespace llvm {
: Data(begin), Length(end - begin) {}
/// Construct an ArrayRef from a SmallVector.
- /*implicit*/ ArrayRef(const SmallVectorImpl<T> &Vec)
+ /*implicit*/ ArrayRef(const SmallVectorTemplateCommon<T> &Vec)
: Data(Vec.data()), Length(Vec.size()) {}
/// Construct an ArrayRef from a std::vector.
diff --git a/include/llvm/ADT/BitVector.h b/include/llvm/ADT/BitVector.h
index 7e0b5ba37196..3e2e5f230a3a 100644
--- a/include/llvm/ADT/BitVector.h
+++ b/include/llvm/ADT/BitVector.h
@@ -14,6 +14,7 @@
#ifndef LLVM_ADT_BITVECTOR_H
#define LLVM_ADT_BITVECTOR_H
+#include "llvm/Support/Compiler.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/MathExtras.h"
#include <algorithm>
@@ -97,6 +98,13 @@ public:
std::memcpy(Bits, RHS.Bits, Capacity * sizeof(BitWord));
}
+#if LLVM_USE_RVALUE_REFERENCES
+ BitVector(BitVector &&RHS)
+ : Bits(RHS.Bits), Size(RHS.Size), Capacity(RHS.Capacity) {
+ RHS.Bits = 0;
+ }
+#endif
+
~BitVector() {
std::free(Bits);
}
@@ -251,11 +259,6 @@ public:
return *this;
}
- // No argument flip.
- BitVector operator~() const {
- return BitVector(*this).flip();
- }
-
// Indexing.
reference operator[](unsigned Idx) {
assert (Idx < Size && "Out-of-bounds Bit access.");
@@ -272,6 +275,16 @@ public:
return (*this)[Idx];
}
+ /// Test if any common bits are set.
+ bool anyCommon(const BitVector &RHS) const {
+ unsigned ThisWords = NumBitWords(size());
+ unsigned RHSWords = NumBitWords(RHS.size());
+ for (unsigned i = 0, e = std::min(ThisWords, RHSWords); i != e; ++i)
+ if (Bits[i] & RHS.Bits[i])
+ return true;
+ return false;
+ }
+
// Comparison operators.
bool operator==(const BitVector &RHS) const {
unsigned ThisWords = NumBitWords(size());
@@ -366,6 +379,21 @@ public:
return *this;
}
+#if LLVM_USE_RVALUE_REFERENCES
+ const BitVector &operator=(BitVector &&RHS) {
+ if (this == &RHS) return *this;
+
+ std::free(Bits);
+ Bits = RHS.Bits;
+ Size = RHS.Size;
+ Capacity = RHS.Capacity;
+
+ RHS.Bits = 0;
+
+ return *this;
+ }
+#endif
+
void swap(BitVector &RHS) {
std::swap(Bits, RHS.Bits);
std::swap(Size, RHS.Size);
@@ -472,24 +500,6 @@ private:
}
};
-inline BitVector operator&(const BitVector &LHS, const BitVector &RHS) {
- BitVector Result(LHS);
- Result &= RHS;
- return Result;
-}
-
-inline BitVector operator|(const BitVector &LHS, const BitVector &RHS) {
- BitVector Result(LHS);
- Result |= RHS;
- return Result;
-}
-
-inline BitVector operator^(const BitVector &LHS, const BitVector &RHS) {
- BitVector Result(LHS);
- Result ^= RHS;
- return Result;
-}
-
} // End llvm namespace
namespace std {
diff --git a/include/llvm/ADT/DenseMap.h b/include/llvm/ADT/DenseMap.h
index 8d4a19d0919c..65a70fbfa6d0 100644
--- a/include/llvm/ADT/DenseMap.h
+++ b/include/llvm/ADT/DenseMap.h
@@ -14,6 +14,8 @@
#ifndef LLVM_ADT_DENSEMAP_H
#define LLVM_ADT_DENSEMAP_H
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/AlignOf.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/PointerLikeTypeTraits.h"
#include "llvm/Support/type_traits.h"
@@ -23,6 +25,7 @@
#include <new>
#include <utility>
#include <cassert>
+#include <climits>
#include <cstddef>
#include <cstring>
@@ -33,116 +36,83 @@ template<typename KeyT, typename ValueT,
bool IsConst = false>
class DenseMapIterator;
-template<typename KeyT, typename ValueT,
- typename KeyInfoT = DenseMapInfo<KeyT> >
-class DenseMap {
+template<typename DerivedT,
+ typename KeyT, typename ValueT, typename KeyInfoT>
+class DenseMapBase {
+protected:
typedef std::pair<KeyT, ValueT> BucketT;
- unsigned NumBuckets;
- BucketT *Buckets;
- unsigned NumEntries;
- unsigned NumTombstones;
public:
typedef KeyT key_type;
typedef ValueT mapped_type;
typedef BucketT value_type;
- DenseMap(const DenseMap &other) {
- NumBuckets = 0;
- CopyFrom(other);
- }
-
- explicit DenseMap(unsigned NumInitBuckets = 0) {
- init(NumInitBuckets);
- }
-
- template<typename InputIt>
- DenseMap(const InputIt &I, const InputIt &E) {
- init(NextPowerOf2(std::distance(I, E)));
- insert(I, E);
- }
-
- ~DenseMap() {
- const KeyT EmptyKey = getEmptyKey(), TombstoneKey = getTombstoneKey();
- for (BucketT *P = Buckets, *E = Buckets+NumBuckets; P != E; ++P) {
- if (!KeyInfoT::isEqual(P->first, EmptyKey) &&
- !KeyInfoT::isEqual(P->first, TombstoneKey))
- P->second.~ValueT();
- P->first.~KeyT();
- }
-#ifndef NDEBUG
- if (NumBuckets)
- memset((void*)Buckets, 0x5a, sizeof(BucketT)*NumBuckets);
-#endif
- operator delete(Buckets);
- }
-
typedef DenseMapIterator<KeyT, ValueT, KeyInfoT> iterator;
typedef DenseMapIterator<KeyT, ValueT,
KeyInfoT, true> const_iterator;
inline iterator begin() {
// When the map is empty, avoid the overhead of AdvancePastEmptyBuckets().
- return empty() ? end() : iterator(Buckets, Buckets+NumBuckets);
+ return empty() ? end() : iterator(getBuckets(), getBucketsEnd());
}
inline iterator end() {
- return iterator(Buckets+NumBuckets, Buckets+NumBuckets, true);
+ return iterator(getBucketsEnd(), getBucketsEnd(), true);
}
inline const_iterator begin() const {
- return empty() ? end() : const_iterator(Buckets, Buckets+NumBuckets);
+ return empty() ? end() : const_iterator(getBuckets(), getBucketsEnd());
}
inline const_iterator end() const {
- return const_iterator(Buckets+NumBuckets, Buckets+NumBuckets, true);
+ return const_iterator(getBucketsEnd(), getBucketsEnd(), true);
}
- bool empty() const { return NumEntries == 0; }
- unsigned size() const { return NumEntries; }
+ bool empty() const { return getNumEntries() == 0; }
+ unsigned size() const { return getNumEntries(); }
/// Grow the densemap so that it has at least Size buckets. Does not shrink
void resize(size_t Size) {
- if (Size > NumBuckets)
+ if (Size > getNumBuckets())
grow(Size);
}
void clear() {
- if (NumEntries == 0 && NumTombstones == 0) return;
+ if (getNumEntries() == 0 && getNumTombstones() == 0) return;
// If the capacity of the array is huge, and the # elements used is small,
// shrink the array.
- if (NumEntries * 4 < NumBuckets && NumBuckets > 64) {
+ if (getNumEntries() * 4 < getNumBuckets() && getNumBuckets() > 64) {
shrink_and_clear();
return;
}
const KeyT EmptyKey = getEmptyKey(), TombstoneKey = getTombstoneKey();
- for (BucketT *P = Buckets, *E = Buckets+NumBuckets; P != E; ++P) {
+ for (BucketT *P = getBuckets(), *E = getBucketsEnd(); P != E; ++P) {
if (!KeyInfoT::isEqual(P->first, EmptyKey)) {
if (!KeyInfoT::isEqual(P->first, TombstoneKey)) {
P->second.~ValueT();
- --NumEntries;
+ decrementNumEntries();
}
P->first = EmptyKey;
}
}
- assert(NumEntries == 0 && "Node count imbalance!");
- NumTombstones = 0;
+ assert(getNumEntries() == 0 && "Node count imbalance!");
+ setNumTombstones(0);
}
/// count - Return true if the specified key is in the map.
bool count(const KeyT &Val) const {
- BucketT *TheBucket;
+ const BucketT *TheBucket;
return LookupBucketFor(Val, TheBucket);
}
iterator find(const KeyT &Val) {
BucketT *TheBucket;
if (LookupBucketFor(Val, TheBucket))
- return iterator(TheBucket, Buckets+NumBuckets, true);
+ return iterator(TheBucket, getBucketsEnd(), true);
return end();
}
const_iterator find(const KeyT &Val) const {
- BucketT *TheBucket;
+ const BucketT *TheBucket;
if (LookupBucketFor(Val, TheBucket))
- return const_iterator(TheBucket, Buckets+NumBuckets, true);
+ return const_iterator(TheBucket, getBucketsEnd(), true);
return end();
}
@@ -155,21 +125,21 @@ public:
iterator find_as(const LookupKeyT &Val) {
BucketT *TheBucket;
if (LookupBucketFor(Val, TheBucket))
- return iterator(TheBucket, Buckets+NumBuckets, true);
+ return iterator(TheBucket, getBucketsEnd(), true);
return end();
}
template<class LookupKeyT>
const_iterator find_as(const LookupKeyT &Val) const {
- BucketT *TheBucket;
+ const BucketT *TheBucket;
if (LookupBucketFor(Val, TheBucket))
- return const_iterator(TheBucket, Buckets+NumBuckets, true);
+ return const_iterator(TheBucket, getBucketsEnd(), true);
return end();
}
/// lookup - Return the entry for the specified key, or a default
/// constructed value if no such entry exists.
ValueT lookup(const KeyT &Val) const {
- BucketT *TheBucket;
+ const BucketT *TheBucket;
if (LookupBucketFor(Val, TheBucket))
return TheBucket->second;
return ValueT();
@@ -181,12 +151,12 @@ public:
std::pair<iterator, bool> insert(const std::pair<KeyT, ValueT> &KV) {
BucketT *TheBucket;
if (LookupBucketFor(KV.first, TheBucket))
- return std::make_pair(iterator(TheBucket, Buckets+NumBuckets, true),
+ return std::make_pair(iterator(TheBucket, getBucketsEnd(), true),
false); // Already in map.
// Otherwise, insert the new element.
TheBucket = InsertIntoBucket(KV.first, KV.second, TheBucket);
- return std::make_pair(iterator(TheBucket, Buckets+NumBuckets, true), true);
+ return std::make_pair(iterator(TheBucket, getBucketsEnd(), true), true);
}
/// insert - Range insertion of pairs.
@@ -204,23 +174,16 @@ public:
TheBucket->second.~ValueT();
TheBucket->first = getTombstoneKey();
- --NumEntries;
- ++NumTombstones;
+ decrementNumEntries();
+ incrementNumTombstones();
return true;
}
void erase(iterator I) {
BucketT *TheBucket = &*I;
TheBucket->second.~ValueT();
TheBucket->first = getTombstoneKey();
- --NumEntries;
- ++NumTombstones;
- }
-
- void swap(DenseMap& RHS) {
- std::swap(NumBuckets, RHS.NumBuckets);
- std::swap(Buckets, RHS.Buckets);
- std::swap(NumEntries, RHS.NumEntries);
- std::swap(NumTombstones, RHS.NumTombstones);
+ decrementNumEntries();
+ incrementNumTombstones();
}
value_type& FindAndConstruct(const KeyT &Key) {
@@ -235,68 +198,211 @@ public:
return FindAndConstruct(Key).second;
}
- DenseMap& operator=(const DenseMap& other) {
- CopyFrom(other);
- return *this;
+#if LLVM_USE_RVALUE_REFERENCES
+ value_type& FindAndConstruct(KeyT &&Key) {
+ BucketT *TheBucket;
+ if (LookupBucketFor(Key, TheBucket))
+ return *TheBucket;
+
+ return *InsertIntoBucket(Key, ValueT(), TheBucket);
}
+ ValueT &operator[](KeyT &&Key) {
+ return FindAndConstruct(Key).second;
+ }
+#endif
+
/// isPointerIntoBucketsArray - Return true if the specified pointer points
/// somewhere into the DenseMap's array of buckets (i.e. either to a key or
/// value in the DenseMap).
bool isPointerIntoBucketsArray(const void *Ptr) const {
- return Ptr >= Buckets && Ptr < Buckets+NumBuckets;
+ return Ptr >= getBuckets() && Ptr < getBucketsEnd();
}
/// getPointerIntoBucketsArray() - Return an opaque pointer into the buckets
/// array. In conjunction with the previous method, this can be used to
/// determine whether an insertion caused the DenseMap to reallocate.
- const void *getPointerIntoBucketsArray() const { return Buckets; }
+ const void *getPointerIntoBucketsArray() const { return getBuckets(); }
-private:
- void CopyFrom(const DenseMap& other) {
- if (NumBuckets != 0 &&
- (!isPodLike<KeyT>::value || !isPodLike<ValueT>::value)) {
- const KeyT EmptyKey = getEmptyKey(), TombstoneKey = getTombstoneKey();
- for (BucketT *P = Buckets, *E = Buckets+NumBuckets; P != E; ++P) {
- if (!KeyInfoT::isEqual(P->first, EmptyKey) &&
- !KeyInfoT::isEqual(P->first, TombstoneKey))
- P->second.~ValueT();
- P->first.~KeyT();
- }
- }
+protected:
+ DenseMapBase() {}
- NumEntries = other.NumEntries;
- NumTombstones = other.NumTombstones;
+ void destroyAll() {
+ if (getNumBuckets() == 0) // Nothing to do.
+ return;
+
+ const KeyT EmptyKey = getEmptyKey(), TombstoneKey = getTombstoneKey();
+ for (BucketT *P = getBuckets(), *E = getBucketsEnd(); P != E; ++P) {
+ if (!KeyInfoT::isEqual(P->first, EmptyKey) &&
+ !KeyInfoT::isEqual(P->first, TombstoneKey))
+ P->second.~ValueT();
+ P->first.~KeyT();
+ }
- if (NumBuckets) {
#ifndef NDEBUG
- memset((void*)Buckets, 0x5a, sizeof(BucketT)*NumBuckets);
+ memset((void*)getBuckets(), 0x5a, sizeof(BucketT)*getNumBuckets());
#endif
- operator delete(Buckets);
- }
+ }
- NumBuckets = other.NumBuckets;
+ void initEmpty() {
+ setNumEntries(0);
+ setNumTombstones(0);
- if (NumBuckets == 0) {
- Buckets = 0;
- return;
+ assert((getNumBuckets() & (getNumBuckets()-1)) == 0 &&
+ "# initial buckets must be a power of two!");
+ const KeyT EmptyKey = getEmptyKey();
+ for (BucketT *B = getBuckets(), *E = getBucketsEnd(); B != E; ++B)
+ new (&B->first) KeyT(EmptyKey);
+ }
+
+ void moveFromOldBuckets(BucketT *OldBucketsBegin, BucketT *OldBucketsEnd) {
+ initEmpty();
+
+ // Insert all the old elements.
+ const KeyT EmptyKey = getEmptyKey();
+ const KeyT TombstoneKey = getTombstoneKey();
+ for (BucketT *B = OldBucketsBegin, *E = OldBucketsEnd; B != E; ++B) {
+ if (!KeyInfoT::isEqual(B->first, EmptyKey) &&
+ !KeyInfoT::isEqual(B->first, TombstoneKey)) {
+ // Insert the key/value into the new table.
+ BucketT *DestBucket;
+ bool FoundVal = LookupBucketFor(B->first, DestBucket);
+ (void)FoundVal; // silence warning.
+ assert(!FoundVal && "Key already in new map?");
+ DestBucket->first = llvm_move(B->first);
+ new (&DestBucket->second) ValueT(llvm_move(B->second));
+ incrementNumEntries();
+
+ // Free the value.
+ B->second.~ValueT();
+ }
+ B->first.~KeyT();
}
- Buckets = static_cast<BucketT*>(operator new(sizeof(BucketT) * NumBuckets));
+#ifndef NDEBUG
+ if (OldBucketsBegin != OldBucketsEnd)
+ memset((void*)OldBucketsBegin, 0x5a,
+ sizeof(BucketT) * (OldBucketsEnd - OldBucketsBegin));
+#endif
+ }
+
+ template <typename OtherBaseT>
+ void copyFrom(const DenseMapBase<OtherBaseT, KeyT, ValueT, KeyInfoT>& other) {
+ assert(getNumBuckets() == other.getNumBuckets());
+
+ setNumEntries(other.getNumEntries());
+ setNumTombstones(other.getNumTombstones());
if (isPodLike<KeyT>::value && isPodLike<ValueT>::value)
- memcpy(Buckets, other.Buckets, NumBuckets * sizeof(BucketT));
+ memcpy(getBuckets(), other.getBuckets(),
+ getNumBuckets() * sizeof(BucketT));
else
- for (size_t i = 0; i < NumBuckets; ++i) {
- new (&Buckets[i].first) KeyT(other.Buckets[i].first);
- if (!KeyInfoT::isEqual(Buckets[i].first, getEmptyKey()) &&
- !KeyInfoT::isEqual(Buckets[i].first, getTombstoneKey()))
- new (&Buckets[i].second) ValueT(other.Buckets[i].second);
+ for (size_t i = 0; i < getNumBuckets(); ++i) {
+ new (&getBuckets()[i].first) KeyT(other.getBuckets()[i].first);
+ if (!KeyInfoT::isEqual(getBuckets()[i].first, getEmptyKey()) &&
+ !KeyInfoT::isEqual(getBuckets()[i].first, getTombstoneKey()))
+ new (&getBuckets()[i].second) ValueT(other.getBuckets()[i].second);
}
}
+ void swap(DenseMapBase& RHS) {
+ std::swap(getNumEntries(), RHS.getNumEntries());
+ std::swap(getNumTombstones(), RHS.getNumTombstones());
+ }
+
+ static unsigned getHashValue(const KeyT &Val) {
+ return KeyInfoT::getHashValue(Val);
+ }
+ template<typename LookupKeyT>
+ static unsigned getHashValue(const LookupKeyT &Val) {
+ return KeyInfoT::getHashValue(Val);
+ }
+ static const KeyT getEmptyKey() {
+ return KeyInfoT::getEmptyKey();
+ }
+ static const KeyT getTombstoneKey() {
+ return KeyInfoT::getTombstoneKey();
+ }
+
+private:
+ unsigned getNumEntries() const {
+ return static_cast<const DerivedT *>(this)->getNumEntries();
+ }
+ void setNumEntries(unsigned Num) {
+ static_cast<DerivedT *>(this)->setNumEntries(Num);
+ }
+ void incrementNumEntries() {
+ setNumEntries(getNumEntries() + 1);
+ }
+ void decrementNumEntries() {
+ setNumEntries(getNumEntries() - 1);
+ }
+ unsigned getNumTombstones() const {
+ return static_cast<const DerivedT *>(this)->getNumTombstones();
+ }
+ void setNumTombstones(unsigned Num) {
+ static_cast<DerivedT *>(this)->setNumTombstones(Num);
+ }
+ void incrementNumTombstones() {
+ setNumTombstones(getNumTombstones() + 1);
+ }
+ void decrementNumTombstones() {
+ setNumTombstones(getNumTombstones() - 1);
+ }
+ const BucketT *getBuckets() const {
+ return static_cast<const DerivedT *>(this)->getBuckets();
+ }
+ BucketT *getBuckets() {
+ return static_cast<DerivedT *>(this)->getBuckets();
+ }
+ unsigned getNumBuckets() const {
+ return static_cast<const DerivedT *>(this)->getNumBuckets();
+ }
+ BucketT *getBucketsEnd() {
+ return getBuckets() + getNumBuckets();
+ }
+ const BucketT *getBucketsEnd() const {
+ return getBuckets() + getNumBuckets();
+ }
+
+ void grow(unsigned AtLeast) {
+ static_cast<DerivedT *>(this)->grow(AtLeast);
+ }
+
+ void shrink_and_clear() {
+ static_cast<DerivedT *>(this)->shrink_and_clear();
+ }
+
+
BucketT *InsertIntoBucket(const KeyT &Key, const ValueT &Value,
BucketT *TheBucket) {
+ TheBucket = InsertIntoBucketImpl(Key, TheBucket);
+
+ TheBucket->first = Key;
+ new (&TheBucket->second) ValueT(Value);
+ return TheBucket;
+ }
+
+#if LLVM_USE_RVALUE_REFERENCES
+ BucketT *InsertIntoBucket(const KeyT &Key, ValueT &&Value,
+ BucketT *TheBucket) {
+ TheBucket = InsertIntoBucketImpl(Key, TheBucket);
+
+ TheBucket->first = Key;
+ new (&TheBucket->second) ValueT(std::move(Value));
+ return TheBucket;
+ }
+
+ BucketT *InsertIntoBucket(KeyT &&Key, ValueT &&Value, BucketT *TheBucket) {
+ TheBucket = InsertIntoBucketImpl(Key, TheBucket);
+
+ TheBucket->first = std::move(Key);
+ new (&TheBucket->second) ValueT(std::move(Value));
+ return TheBucket;
+ }
+#endif
+
+ BucketT *InsertIntoBucketImpl(const KeyT &Key, BucketT *TheBucket) {
// If the load of the hash table is more than 3/4, or if fewer than 1/8 of
// the buckets are empty (meaning that many are filled with tombstones),
// grow the table.
@@ -306,48 +412,38 @@ private:
// probe almost the entire table until it found the empty bucket. If the
// table completely filled with tombstones, no lookup would ever succeed,
// causing infinite loops in lookup.
- ++NumEntries;
- if (NumEntries*4 >= NumBuckets*3) {
+ unsigned NewNumEntries = getNumEntries() + 1;
+ unsigned NumBuckets = getNumBuckets();
+ if (NewNumEntries*4 >= NumBuckets*3) {
this->grow(NumBuckets * 2);
LookupBucketFor(Key, TheBucket);
+ NumBuckets = getNumBuckets();
}
- if (NumBuckets-(NumEntries+NumTombstones) < NumBuckets/8) {
+ if (NumBuckets-(NewNumEntries+getNumTombstones()) <= NumBuckets/8) {
this->grow(NumBuckets);
LookupBucketFor(Key, TheBucket);
}
+ // Only update the state after we've grown our bucket space appropriately
+ // so that when growing buckets we have self-consistent entry count.
+ incrementNumEntries();
+
// If we are writing over a tombstone, remember this.
if (!KeyInfoT::isEqual(TheBucket->first, getEmptyKey()))
- --NumTombstones;
+ decrementNumTombstones();
- TheBucket->first = Key;
- new (&TheBucket->second) ValueT(Value);
return TheBucket;
}
- static unsigned getHashValue(const KeyT &Val) {
- return KeyInfoT::getHashValue(Val);
- }
- template<typename LookupKeyT>
- static unsigned getHashValue(const LookupKeyT &Val) {
- return KeyInfoT::getHashValue(Val);
- }
- static const KeyT getEmptyKey() {
- return KeyInfoT::getEmptyKey();
- }
- static const KeyT getTombstoneKey() {
- return KeyInfoT::getTombstoneKey();
- }
-
/// LookupBucketFor - Lookup the appropriate bucket for Val, returning it in
/// FoundBucket. If the bucket contains the key and a value, this returns
/// true, otherwise it returns a bucket with an empty marker or tombstone and
/// returns false.
template<typename LookupKeyT>
- bool LookupBucketFor(const LookupKeyT &Val, BucketT *&FoundBucket) const {
- unsigned BucketNo = getHashValue(Val);
- unsigned ProbeAmt = 1;
- BucketT *BucketsPtr = Buckets;
+ bool LookupBucketFor(const LookupKeyT &Val,
+ const BucketT *&FoundBucket) const {
+ const BucketT *BucketsPtr = getBuckets();
+ const unsigned NumBuckets = getNumBuckets();
if (NumBuckets == 0) {
FoundBucket = 0;
@@ -355,15 +451,17 @@ private:
}
// FoundTombstone - Keep track of whether we find a tombstone while probing.
- BucketT *FoundTombstone = 0;
+ const BucketT *FoundTombstone = 0;
const KeyT EmptyKey = getEmptyKey();
const KeyT TombstoneKey = getTombstoneKey();
assert(!KeyInfoT::isEqual(Val, EmptyKey) &&
!KeyInfoT::isEqual(Val, TombstoneKey) &&
"Empty/Tombstone value shouldn't be inserted into map!");
+ unsigned BucketNo = getHashValue(Val) & (NumBuckets-1);
+ unsigned ProbeAmt = 1;
while (1) {
- BucketT *ThisBucket = BucketsPtr + (BucketNo & (NumBuckets-1));
+ const BucketT *ThisBucket = BucketsPtr + BucketNo;
// Found Val's bucket? If so, return it.
if (KeyInfoT::isEqual(Val, ThisBucket->first)) {
FoundBucket = ThisBucket;
@@ -388,115 +486,481 @@ private:
// Otherwise, it's a hash collision or a tombstone, continue quadratic
// probing.
BucketNo += ProbeAmt++;
+ BucketNo &= (NumBuckets-1);
}
}
- void init(unsigned InitBuckets) {
- NumEntries = 0;
- NumTombstones = 0;
- NumBuckets = InitBuckets;
+ template <typename LookupKeyT>
+ bool LookupBucketFor(const LookupKeyT &Val, BucketT *&FoundBucket) {
+ const BucketT *ConstFoundBucket;
+ bool Result = const_cast<const DenseMapBase *>(this)
+ ->LookupBucketFor(Val, ConstFoundBucket);
+ FoundBucket = const_cast<BucketT *>(ConstFoundBucket);
+ return Result;
+ }
- if (InitBuckets == 0) {
- Buckets = 0;
- return;
+public:
+ /// Return the approximate size (in bytes) of the actual map.
+ /// This is just the raw memory used by DenseMap.
+ /// If entries are pointers to objects, the size of the referenced objects
+ /// are not included.
+ size_t getMemorySize() const {
+ return getNumBuckets() * sizeof(BucketT);
+ }
+};
+
+template<typename KeyT, typename ValueT,
+ typename KeyInfoT = DenseMapInfo<KeyT> >
+class DenseMap
+ : public DenseMapBase<DenseMap<KeyT, ValueT, KeyInfoT>,
+ KeyT, ValueT, KeyInfoT> {
+ // Lift some types from the dependent base class into this class for
+ // simplicity of referring to them.
+ typedef DenseMapBase<DenseMap, KeyT, ValueT, KeyInfoT> BaseT;
+ typedef typename BaseT::BucketT BucketT;
+ friend class DenseMapBase<DenseMap, KeyT, ValueT, KeyInfoT>;
+
+ BucketT *Buckets;
+ unsigned NumEntries;
+ unsigned NumTombstones;
+ unsigned NumBuckets;
+
+public:
+ explicit DenseMap(unsigned NumInitBuckets = 0) {
+ init(NumInitBuckets);
+ }
+
+ DenseMap(const DenseMap &other) {
+ init(0);
+ copyFrom(other);
+ }
+
+#if LLVM_USE_RVALUE_REFERENCES
+ DenseMap(DenseMap &&other) {
+ init(0);
+ swap(other);
+ }
+#endif
+
+ template<typename InputIt>
+ DenseMap(const InputIt &I, const InputIt &E) {
+ init(NextPowerOf2(std::distance(I, E)));
+ this->insert(I, E);
+ }
+
+ ~DenseMap() {
+ this->destroyAll();
+ operator delete(Buckets);
+ }
+
+ void swap(DenseMap& RHS) {
+ std::swap(Buckets, RHS.Buckets);
+ std::swap(NumEntries, RHS.NumEntries);
+ std::swap(NumTombstones, RHS.NumTombstones);
+ std::swap(NumBuckets, RHS.NumBuckets);
+ }
+
+ DenseMap& operator=(const DenseMap& other) {
+ copyFrom(other);
+ return *this;
+ }
+
+#if LLVM_USE_RVALUE_REFERENCES
+ DenseMap& operator=(DenseMap &&other) {
+ this->destroyAll();
+ operator delete(Buckets);
+ init(0);
+ swap(other);
+ return *this;
+ }
+#endif
+
+ void copyFrom(const DenseMap& other) {
+ this->destroyAll();
+ operator delete(Buckets);
+ if (allocateBuckets(other.NumBuckets)) {
+ this->BaseT::copyFrom(other);
+ } else {
+ NumEntries = 0;
+ NumTombstones = 0;
}
+ }
- assert(InitBuckets && (InitBuckets & (InitBuckets-1)) == 0 &&
- "# initial buckets must be a power of two!");
- Buckets = static_cast<BucketT*>(operator new(sizeof(BucketT)*InitBuckets));
- // Initialize all the keys to EmptyKey.
- const KeyT EmptyKey = getEmptyKey();
- for (unsigned i = 0; i != InitBuckets; ++i)
- new (&Buckets[i].first) KeyT(EmptyKey);
+ void init(unsigned InitBuckets) {
+ if (allocateBuckets(InitBuckets)) {
+ this->BaseT::initEmpty();
+ } else {
+ NumEntries = 0;
+ NumTombstones = 0;
+ }
}
void grow(unsigned AtLeast) {
unsigned OldNumBuckets = NumBuckets;
BucketT *OldBuckets = Buckets;
- if (NumBuckets < 64)
- NumBuckets = 64;
+ allocateBuckets(std::max<unsigned>(64, NextPowerOf2(AtLeast)));
+ assert(Buckets);
+ if (!OldBuckets) {
+ this->BaseT::initEmpty();
+ return;
+ }
- // Double the number of buckets.
- while (NumBuckets < AtLeast)
- NumBuckets <<= 1;
- NumTombstones = 0;
- Buckets = static_cast<BucketT*>(operator new(sizeof(BucketT)*NumBuckets));
+ this->moveFromOldBuckets(OldBuckets, OldBuckets+OldNumBuckets);
- // Initialize all the keys to EmptyKey.
- const KeyT EmptyKey = getEmptyKey();
- for (unsigned i = 0, e = NumBuckets; i != e; ++i)
- new (&Buckets[i].first) KeyT(EmptyKey);
+ // Free the old table.
+ operator delete(OldBuckets);
+ }
- // Insert all the old elements.
- const KeyT TombstoneKey = getTombstoneKey();
- for (BucketT *B = OldBuckets, *E = OldBuckets+OldNumBuckets; B != E; ++B) {
- if (!KeyInfoT::isEqual(B->first, EmptyKey) &&
- !KeyInfoT::isEqual(B->first, TombstoneKey)) {
- // Insert the key/value into the new table.
- BucketT *DestBucket;
- bool FoundVal = LookupBucketFor(B->first, DestBucket);
- (void)FoundVal; // silence warning.
- assert(!FoundVal && "Key already in new map?");
- DestBucket->first = B->first;
- new (&DestBucket->second) ValueT(B->second);
+ void shrink_and_clear() {
+ unsigned OldNumEntries = NumEntries;
+ this->destroyAll();
- // Free the value.
- B->second.~ValueT();
- }
- B->first.~KeyT();
+ // Reduce the number of buckets.
+ unsigned NewNumBuckets = 0;
+ if (OldNumEntries)
+ NewNumBuckets = std::max(64, 1 << (Log2_32_Ceil(OldNumEntries) + 1));
+ if (NewNumBuckets == NumBuckets) {
+ this->BaseT::initEmpty();
+ return;
}
-#ifndef NDEBUG
- if (OldNumBuckets)
- memset((void*)OldBuckets, 0x5a, sizeof(BucketT)*OldNumBuckets);
+ operator delete(Buckets);
+ init(NewNumBuckets);
+ }
+
+private:
+ unsigned getNumEntries() const {
+ return NumEntries;
+ }
+ void setNumEntries(unsigned Num) {
+ NumEntries = Num;
+ }
+
+ unsigned getNumTombstones() const {
+ return NumTombstones;
+ }
+ void setNumTombstones(unsigned Num) {
+ NumTombstones = Num;
+ }
+
+ BucketT *getBuckets() const {
+ return Buckets;
+ }
+
+ unsigned getNumBuckets() const {
+ return NumBuckets;
+ }
+
+ bool allocateBuckets(unsigned Num) {
+ NumBuckets = Num;
+ if (NumBuckets == 0) {
+ Buckets = 0;
+ return false;
+ }
+
+ Buckets = static_cast<BucketT*>(operator new(sizeof(BucketT) * NumBuckets));
+ return true;
+ }
+};
+
+template<typename KeyT, typename ValueT,
+ unsigned InlineBuckets = 4,
+ typename KeyInfoT = DenseMapInfo<KeyT> >
+class SmallDenseMap
+ : public DenseMapBase<SmallDenseMap<KeyT, ValueT, InlineBuckets, KeyInfoT>,
+ KeyT, ValueT, KeyInfoT> {
+ // Lift some types from the dependent base class into this class for
+ // simplicity of referring to them.
+ typedef DenseMapBase<SmallDenseMap, KeyT, ValueT, KeyInfoT> BaseT;
+ typedef typename BaseT::BucketT BucketT;
+ friend class DenseMapBase<SmallDenseMap, KeyT, ValueT, KeyInfoT>;
+
+ unsigned Small : 1;
+ unsigned NumEntries : 31;
+ unsigned NumTombstones;
+
+ struct LargeRep {
+ BucketT *Buckets;
+ unsigned NumBuckets;
+ };
+
+ /// A "union" of an inline bucket array and the struct representing
+ /// a large bucket. This union will be discriminated by the 'Small' bit.
+ typename AlignedCharArray<BucketT[InlineBuckets], LargeRep>::union_type
+ storage;
+
+public:
+ explicit SmallDenseMap(unsigned NumInitBuckets = 0) {
+ init(NumInitBuckets);
+ }
+
+ SmallDenseMap(const SmallDenseMap &other) {
+ init(0);
+ copyFrom(other);
+ }
+
+#if LLVM_USE_RVALUE_REFERENCES
+ SmallDenseMap(SmallDenseMap &&other) {
+ init(0);
+ swap(other);
+ }
#endif
- // Free the old table.
- operator delete(OldBuckets);
+
+ template<typename InputIt>
+ SmallDenseMap(const InputIt &I, const InputIt &E) {
+ init(NextPowerOf2(std::distance(I, E)));
+ this->insert(I, E);
}
- void shrink_and_clear() {
- unsigned OldNumBuckets = NumBuckets;
- BucketT *OldBuckets = Buckets;
+ ~SmallDenseMap() {
+ this->destroyAll();
+ deallocateBuckets();
+ }
- // Reduce the number of buckets.
- NumBuckets = NumEntries > 32 ? 1 << (Log2_32_Ceil(NumEntries) + 1)
- : 64;
- NumTombstones = 0;
- Buckets = static_cast<BucketT*>(operator new(sizeof(BucketT)*NumBuckets));
+ void swap(SmallDenseMap& RHS) {
+ unsigned TmpNumEntries = RHS.NumEntries;
+ RHS.NumEntries = NumEntries;
+ NumEntries = TmpNumEntries;
+ std::swap(NumTombstones, RHS.NumTombstones);
- // Initialize all the keys to EmptyKey.
- const KeyT EmptyKey = getEmptyKey();
- for (unsigned i = 0, e = NumBuckets; i != e; ++i)
- new (&Buckets[i].first) KeyT(EmptyKey);
+ const KeyT EmptyKey = this->getEmptyKey();
+ const KeyT TombstoneKey = this->getTombstoneKey();
+ if (Small && RHS.Small) {
+ // If we're swapping inline bucket arrays, we have to cope with some of
+ // the tricky bits of DenseMap's storage system: the buckets are not
+ // fully initialized. Thus we swap every key, but we may have
+ // a one-directional move of the value.
+ for (unsigned i = 0, e = InlineBuckets; i != e; ++i) {
+ BucketT *LHSB = &getInlineBuckets()[i],
+ *RHSB = &RHS.getInlineBuckets()[i];
+ bool hasLHSValue = (!KeyInfoT::isEqual(LHSB->first, EmptyKey) &&
+ !KeyInfoT::isEqual(LHSB->first, TombstoneKey));
+ bool hasRHSValue = (!KeyInfoT::isEqual(RHSB->first, EmptyKey) &&
+ !KeyInfoT::isEqual(RHSB->first, TombstoneKey));
+ if (hasLHSValue && hasRHSValue) {
+ // Swap together if we can...
+ std::swap(*LHSB, *RHSB);
+ continue;
+ }
+ // Swap separately and handle any assymetry.
+ std::swap(LHSB->first, RHSB->first);
+ if (hasLHSValue) {
+ new (&RHSB->second) ValueT(llvm_move(LHSB->second));
+ LHSB->second.~ValueT();
+ } else if (hasRHSValue) {
+ new (&LHSB->second) ValueT(llvm_move(RHSB->second));
+ RHSB->second.~ValueT();
+ }
+ }
+ return;
+ }
+ if (!Small && !RHS.Small) {
+ std::swap(getLargeRep()->Buckets, RHS.getLargeRep()->Buckets);
+ std::swap(getLargeRep()->NumBuckets, RHS.getLargeRep()->NumBuckets);
+ return;
+ }
- // Free the old buckets.
- const KeyT TombstoneKey = getTombstoneKey();
- for (BucketT *B = OldBuckets, *E = OldBuckets+OldNumBuckets; B != E; ++B) {
- if (!KeyInfoT::isEqual(B->first, EmptyKey) &&
- !KeyInfoT::isEqual(B->first, TombstoneKey)) {
- // Free the value.
- B->second.~ValueT();
+ SmallDenseMap &SmallSide = Small ? *this : RHS;
+ SmallDenseMap &LargeSide = Small ? RHS : *this;
+
+ // First stash the large side's rep and move the small side across.
+ LargeRep TmpRep = llvm_move(*LargeSide.getLargeRep());
+ LargeSide.getLargeRep()->~LargeRep();
+ LargeSide.Small = true;
+ // This is similar to the standard move-from-old-buckets, but the bucket
+ // count hasn't actually rotated in this case. So we have to carefully
+ // move construct the keys and values into their new locations, but there
+ // is no need to re-hash things.
+ for (unsigned i = 0, e = InlineBuckets; i != e; ++i) {
+ BucketT *NewB = &LargeSide.getInlineBuckets()[i],
+ *OldB = &SmallSide.getInlineBuckets()[i];
+ new (&NewB->first) KeyT(llvm_move(OldB->first));
+ OldB->first.~KeyT();
+ if (!KeyInfoT::isEqual(NewB->first, EmptyKey) &&
+ !KeyInfoT::isEqual(NewB->first, TombstoneKey)) {
+ new (&NewB->second) ValueT(llvm_move(OldB->second));
+ OldB->second.~ValueT();
}
- B->first.~KeyT();
}
-#ifndef NDEBUG
- memset((void*)OldBuckets, 0x5a, sizeof(BucketT)*OldNumBuckets);
+ // The hard part of moving the small buckets across is done, just move
+ // the TmpRep into its new home.
+ SmallSide.Small = false;
+ new (SmallSide.getLargeRep()) LargeRep(llvm_move(TmpRep));
+ }
+
+ SmallDenseMap& operator=(const SmallDenseMap& other) {
+ copyFrom(other);
+ return *this;
+ }
+
+#if LLVM_USE_RVALUE_REFERENCES
+ SmallDenseMap& operator=(SmallDenseMap &&other) {
+ this->destroyAll();
+ deallocateBuckets();
+ init(0);
+ swap(other);
+ return *this;
+ }
#endif
+
+ void copyFrom(const SmallDenseMap& other) {
+ this->destroyAll();
+ deallocateBuckets();
+ Small = true;
+ if (other.getNumBuckets() > InlineBuckets) {
+ Small = false;
+ allocateBuckets(other.getNumBuckets());
+ }
+ this->BaseT::copyFrom(other);
+ }
+
+ void init(unsigned InitBuckets) {
+ Small = true;
+ if (InitBuckets > InlineBuckets) {
+ Small = false;
+ new (getLargeRep()) LargeRep(allocateBuckets(InitBuckets));
+ }
+ this->BaseT::initEmpty();
+ }
+
+ void grow(unsigned AtLeast) {
+ if (AtLeast > InlineBuckets)
+ AtLeast = std::max<unsigned>(64, NextPowerOf2(AtLeast));
+
+ if (Small) {
+ if (AtLeast <= InlineBuckets)
+ return; // Nothing to do.
+
+ // First move the inline buckets into a temporary storage.
+ typename AlignedCharArray<BucketT[InlineBuckets]>::union_type
+ TmpStorage;
+ BucketT *TmpBegin = reinterpret_cast<BucketT *>(TmpStorage.buffer);
+ BucketT *TmpEnd = TmpBegin;
+
+ // Loop over the buckets, moving non-empty, non-tombstones into the
+ // temporary storage. Have the loop move the TmpEnd forward as it goes.
+ const KeyT EmptyKey = this->getEmptyKey();
+ const KeyT TombstoneKey = this->getTombstoneKey();
+ for (BucketT *P = getBuckets(), *E = P + InlineBuckets; P != E; ++P) {
+ if (!KeyInfoT::isEqual(P->first, EmptyKey) &&
+ !KeyInfoT::isEqual(P->first, TombstoneKey)) {
+ assert(size_t(TmpEnd - TmpBegin) < InlineBuckets &&
+ "Too many inline buckets!");
+ new (&TmpEnd->first) KeyT(llvm_move(P->first));
+ new (&TmpEnd->second) ValueT(llvm_move(P->second));
+ ++TmpEnd;
+ P->second.~ValueT();
+ }
+ P->first.~KeyT();
+ }
+
+ // Now make this map use the large rep, and move all the entries back
+ // into it.
+ Small = false;
+ new (getLargeRep()) LargeRep(allocateBuckets(AtLeast));
+ this->moveFromOldBuckets(TmpBegin, TmpEnd);
+ return;
+ }
+
+ LargeRep OldRep = llvm_move(*getLargeRep());
+ getLargeRep()->~LargeRep();
+ if (AtLeast <= InlineBuckets) {
+ Small = true;
+ } else {
+ new (getLargeRep()) LargeRep(allocateBuckets(AtLeast));
+ }
+
+ this->moveFromOldBuckets(OldRep.Buckets, OldRep.Buckets+OldRep.NumBuckets);
+
// Free the old table.
- operator delete(OldBuckets);
+ operator delete(OldRep.Buckets);
+ }
+
+ void shrink_and_clear() {
+ unsigned OldSize = this->size();
+ this->destroyAll();
+
+ // Reduce the number of buckets.
+ unsigned NewNumBuckets = 0;
+ if (OldSize) {
+ NewNumBuckets = 1 << (Log2_32_Ceil(OldSize) + 1);
+ if (NewNumBuckets > InlineBuckets && NewNumBuckets < 64u)
+ NewNumBuckets = 64;
+ }
+ if ((Small && NewNumBuckets <= InlineBuckets) ||
+ (!Small && NewNumBuckets == getLargeRep()->NumBuckets)) {
+ this->BaseT::initEmpty();
+ return;
+ }
- NumEntries = 0;
+ deallocateBuckets();
+ init(NewNumBuckets);
}
-
-public:
- /// Return the approximate size (in bytes) of the actual map.
- /// This is just the raw memory used by DenseMap.
- /// If entries are pointers to objects, the size of the referenced objects
- /// are not included.
- size_t getMemorySize() const {
- return NumBuckets * sizeof(BucketT);
+
+private:
+ unsigned getNumEntries() const {
+ return NumEntries;
+ }
+ void setNumEntries(unsigned Num) {
+ assert(Num < INT_MAX && "Cannot support more than INT_MAX entries");
+ NumEntries = Num;
+ }
+
+ unsigned getNumTombstones() const {
+ return NumTombstones;
+ }
+ void setNumTombstones(unsigned Num) {
+ NumTombstones = Num;
+ }
+
+ const BucketT *getInlineBuckets() const {
+ assert(Small);
+ // Note that this cast does not violate aliasing rules as we assert that
+ // the memory's dynamic type is the small, inline bucket buffer, and the
+ // 'storage.buffer' static type is 'char *'.
+ return reinterpret_cast<const BucketT *>(storage.buffer);
+ }
+ BucketT *getInlineBuckets() {
+ return const_cast<BucketT *>(
+ const_cast<const SmallDenseMap *>(this)->getInlineBuckets());
+ }
+ const LargeRep *getLargeRep() const {
+ assert(!Small);
+ // Note, same rule about aliasing as with getInlineBuckets.
+ return reinterpret_cast<const LargeRep *>(storage.buffer);
+ }
+ LargeRep *getLargeRep() {
+ return const_cast<LargeRep *>(
+ const_cast<const SmallDenseMap *>(this)->getLargeRep());
+ }
+
+ const BucketT *getBuckets() const {
+ return Small ? getInlineBuckets() : getLargeRep()->Buckets;
+ }
+ BucketT *getBuckets() {
+ return const_cast<BucketT *>(
+ const_cast<const SmallDenseMap *>(this)->getBuckets());
+ }
+ unsigned getNumBuckets() const {
+ return Small ? InlineBuckets : getLargeRep()->NumBuckets;
+ }
+
+ void deallocateBuckets() {
+ if (Small)
+ return;
+
+ operator delete(getLargeRep()->Buckets);
+ getLargeRep()->~LargeRep();
+ }
+
+ LargeRep allocateBuckets(unsigned Num) {
+ assert(Num > InlineBuckets && "Must allocate more buckets than are inline");
+ LargeRep Rep = {
+ static_cast<BucketT*>(operator new(sizeof(BucketT) * Num)), Num
+ };
+ return Rep;
}
};
diff --git a/include/llvm/ADT/DepthFirstIterator.h b/include/llvm/ADT/DepthFirstIterator.h
index dd13a2c02053..519b18052b6d 100644
--- a/include/llvm/ADT/DepthFirstIterator.h
+++ b/include/llvm/ADT/DepthFirstIterator.h
@@ -187,7 +187,7 @@ public:
/// current node, counting both nodes.
unsigned getPathLength() const { return VisitStack.size(); }
- /// getPath - Return the n'th node in the path from the the entry node to the
+ /// getPath - Return the n'th node in the path from the entry node to the
/// current node.
NodeType *getPath(unsigned n) const {
return VisitStack[n].first.getPointer();
diff --git a/include/llvm/ADT/FoldingSet.h b/include/llvm/ADT/FoldingSet.h
index 7d7c77770020..ba415ac2d61f 100644
--- a/include/llvm/ADT/FoldingSet.h
+++ b/include/llvm/ADT/FoldingSet.h
@@ -518,6 +518,111 @@ public:
};
//===----------------------------------------------------------------------===//
+/// FoldingSetVectorIterator - This implements an iterator for
+/// FoldingSetVector. It is only necessary because FoldingSetIterator provides
+/// a value_type of T, while the vector in FoldingSetVector exposes
+/// a value_type of T*. Fortunately, FoldingSetIterator doesn't expose very
+/// much besides operator* and operator->, so we just wrap the inner vector
+/// iterator and perform the extra dereference.
+template <class T, class VectorIteratorT>
+class FoldingSetVectorIterator {
+ // Provide a typedef to workaround the lack of correct injected class name
+ // support in older GCCs.
+ typedef FoldingSetVectorIterator<T, VectorIteratorT> SelfT;
+
+ VectorIteratorT Iterator;
+
+public:
+ FoldingSetVectorIterator(VectorIteratorT I) : Iterator(I) {}
+
+ bool operator==(const SelfT &RHS) const {
+ return Iterator == RHS.Iterator;
+ }
+ bool operator!=(const SelfT &RHS) const {
+ return Iterator != RHS.Iterator;
+ }
+
+ T &operator*() const { return **Iterator; }
+
+ T *operator->() const { return *Iterator; }
+
+ inline SelfT &operator++() {
+ ++Iterator;
+ return *this;
+ }
+ SelfT operator++(int) {
+ SelfT tmp = *this;
+ ++*this;
+ return tmp;
+ }
+};
+
+//===----------------------------------------------------------------------===//
+/// FoldingSetVector - This template class combines a FoldingSet and a vector
+/// to provide the interface of FoldingSet but with deterministic iteration
+/// order based on the insertion order. T must be a subclass of FoldingSetNode
+/// and implement a Profile function.
+template <class T, class VectorT = SmallVector<T*, 8> >
+class FoldingSetVector {
+ FoldingSet<T> Set;
+ VectorT Vector;
+
+public:
+ explicit FoldingSetVector(unsigned Log2InitSize = 6)
+ : Set(Log2InitSize) {
+ }
+
+ typedef FoldingSetVectorIterator<T, typename VectorT::iterator> iterator;
+ iterator begin() { return Vector.begin(); }
+ iterator end() { return Vector.end(); }
+
+ typedef FoldingSetVectorIterator<const T, typename VectorT::const_iterator>
+ const_iterator;
+ const_iterator begin() const { return Vector.begin(); }
+ const_iterator end() const { return Vector.end(); }
+
+ /// clear - Remove all nodes from the folding set.
+ void clear() { Set.clear(); Vector.clear(); }
+
+ /// FindNodeOrInsertPos - Look up the node specified by ID. If it exists,
+ /// return it. If not, return the insertion token that will make insertion
+ /// faster.
+ T *FindNodeOrInsertPos(const FoldingSetNodeID &ID, void *&InsertPos) {
+ return Set.FindNodeOrInsertPos(ID, InsertPos);
+ }
+
+ /// GetOrInsertNode - If there is an existing simple Node exactly
+ /// equal to the specified node, return it. Otherwise, insert 'N' and
+ /// return it instead.
+ T *GetOrInsertNode(T *N) {
+ T *Result = Set.GetOrInsertNode(N);
+ if (Result == N) Vector.push_back(N);
+ return Result;
+ }
+
+ /// InsertNode - Insert the specified node into the folding set, knowing that
+ /// it is not already in the folding set. InsertPos must be obtained from
+ /// FindNodeOrInsertPos.
+ void InsertNode(T *N, void *InsertPos) {
+ Set.InsertNode(N, InsertPos);
+ Vector.push_back(N);
+ }
+
+ /// InsertNode - Insert the specified node into the folding set, knowing that
+ /// it is not already in the folding set.
+ void InsertNode(T *N) {
+ Set.InsertNode(N);
+ Vector.push_back(N);
+ }
+
+ /// size - Returns the number of nodes in the folding set.
+ unsigned size() const { return Set.size(); }
+
+ /// empty - Returns true if there are no nodes in the folding set.
+ bool empty() const { return Set.empty(); }
+};
+
+//===----------------------------------------------------------------------===//
/// FoldingSetIteratorImpl - This is the common iterator support shared by all
/// folding sets, which knows how to walk the folding set hash table.
class FoldingSetIteratorImpl {
diff --git a/include/llvm/ADT/Hashing.h b/include/llvm/ADT/Hashing.h
index 53032ee538d2..6ab07254a21d 100644
--- a/include/llvm/ADT/Hashing.h
+++ b/include/llvm/ADT/Hashing.h
@@ -76,10 +76,6 @@ namespace llvm {
/// using llvm::hash_value;
/// llvm::hash_code code = hash_value(x);
/// \endcode
-///
-/// Also note that there are two numerical values which are reserved, and the
-/// implementation ensures will never be produced for real hash_codes. These
-/// can be used as sentinels within hashing data structures.
class hash_code {
size_t value;
diff --git a/include/llvm/ADT/ImmutableSet.h b/include/llvm/ADT/ImmutableSet.h
index 89b164819d37..949dc44daba6 100644
--- a/include/llvm/ADT/ImmutableSet.h
+++ b/include/llvm/ADT/ImmutableSet.h
@@ -431,7 +431,7 @@ protected:
// Make sure the index is not the Tombstone or Entry key of the DenseMap.
static inline unsigned maskCacheIndex(unsigned I) {
- return (I & ~0x02);
+ return (I & ~0x02);
}
unsigned incrementHeight(TreeTy* L, TreeTy* R) const {
@@ -667,7 +667,7 @@ public:
return reinterpret_cast<TreeTy*>(stack.back() & ~Flags);
}
- uintptr_t getVisitState() {
+ uintptr_t getVisitState() const {
assert(!stack.empty());
return stack.back() & Flags;
}
diff --git a/include/llvm/ADT/IndexedMap.h b/include/llvm/ADT/IndexedMap.h
index 87126ea49187..2ffb5058e5bb 100644
--- a/include/llvm/ADT/IndexedMap.h
+++ b/include/llvm/ADT/IndexedMap.h
@@ -20,19 +20,14 @@
#ifndef LLVM_ADT_INDEXEDMAP_H
#define LLVM_ADT_INDEXEDMAP_H
+#include "llvm/ADT/STLExtras.h"
#include <cassert>
#include <functional>
#include <vector>
namespace llvm {
- struct IdentityFunctor : public std::unary_function<unsigned, unsigned> {
- unsigned operator()(unsigned Index) const {
- return Index;
- }
- };
-
- template <typename T, typename ToIndexT = IdentityFunctor>
+template <typename T, typename ToIndexT = llvm::identity<unsigned> >
class IndexedMap {
typedef typename ToIndexT::argument_type IndexT;
typedef std::vector<T> StorageT;
diff --git a/include/llvm/ADT/IntrusiveRefCntPtr.h b/include/llvm/ADT/IntrusiveRefCntPtr.h
index 3a1a3f4634cf..a9724ee15447 100644
--- a/include/llvm/ADT/IntrusiveRefCntPtr.h
+++ b/include/llvm/ADT/IntrusiveRefCntPtr.h
@@ -21,9 +21,9 @@
#ifndef LLVM_ADT_INTRUSIVE_REF_CNT_PTR
#define LLVM_ADT_INTRUSIVE_REF_CNT_PTR
-#include <cassert>
-
#include "llvm/Support/Casting.h"
+#include "llvm/Support/Compiler.h"
+#include <memory>
namespace llvm {
@@ -34,7 +34,7 @@ namespace llvm {
/// RefCountedBase - A generic base class for objects that wish to
/// have their lifetimes managed using reference counts. Classes
/// subclass RefCountedBase to obtain such functionality, and are
-/// typically handled with IntrusivePtr "smart pointers" (see below)
+/// typically handled with IntrusiveRefCntPtr "smart pointers" (see below)
/// which automatically handle the management of reference counts.
/// Objects that subclass RefCountedBase should not be allocated on
/// the stack, as invoking "delete" (which is called when the
@@ -123,25 +123,25 @@ namespace llvm {
retain();
}
- template <class X>
- IntrusiveRefCntPtr(const IntrusiveRefCntPtr<X>& S)
- : Obj(S.getPtr()) {
- retain();
+#if LLVM_USE_RVALUE_REFERENCES
+ IntrusiveRefCntPtr(IntrusiveRefCntPtr&& S) : Obj(S.Obj) {
+ S.Obj = 0;
}
- IntrusiveRefCntPtr& operator=(const IntrusiveRefCntPtr& S) {
- replace(S.getPtr());
- return *this;
+ template <class X>
+ IntrusiveRefCntPtr(IntrusiveRefCntPtr<X>&& S) : Obj(S.getPtr()) {
+ S.Obj = 0;
}
+#endif
template <class X>
- IntrusiveRefCntPtr& operator=(const IntrusiveRefCntPtr<X>& S) {
- replace(S.getPtr());
- return *this;
+ IntrusiveRefCntPtr(const IntrusiveRefCntPtr<X>& S)
+ : Obj(S.getPtr()) {
+ retain();
}
- IntrusiveRefCntPtr& operator=(T * S) {
- replace(S);
+ IntrusiveRefCntPtr& operator=(IntrusiveRefCntPtr S) {
+ swap(S);
return *this;
}
@@ -176,10 +176,6 @@ namespace llvm {
private:
void retain() { if (Obj) IntrusiveRefCntPtrInfo<T>::retain(Obj); }
void release() { if (Obj) IntrusiveRefCntPtrInfo<T>::release(Obj); }
-
- void replace(T* S) {
- this_type(S).swap(*this);
- }
};
template<class T, class U>
diff --git a/include/llvm/ADT/PointerIntPair.h b/include/llvm/ADT/PointerIntPair.h
index ccdcd1a8d1b9..fcc758b43a27 100644
--- a/include/llvm/ADT/PointerIntPair.h
+++ b/include/llvm/ADT/PointerIntPair.h
@@ -108,7 +108,14 @@ public:
static PointerIntPair getFromOpaqueValue(void *V) {
PointerIntPair P; P.setFromOpaqueValue(V); return P;
}
-
+
+ // Allow PointerIntPairs to be created from const void * if and only if the
+ // pointer type could be created from a const void *.
+ static PointerIntPair getFromOpaqueValue(const void *V) {
+ (void)PtrTraits::getFromVoidPointer(V);
+ return getFromOpaqueValue(const_cast<void *>(V));
+ }
+
bool operator==(const PointerIntPair &RHS) const {return Value == RHS.Value;}
bool operator!=(const PointerIntPair &RHS) const {return Value != RHS.Value;}
bool operator<(const PointerIntPair &RHS) const {return Value < RHS.Value;}
@@ -158,6 +165,10 @@ public:
getFromVoidPointer(void *P) {
return PointerIntPair<PointerTy, IntBits, IntType>::getFromOpaqueValue(P);
}
+ static inline PointerIntPair<PointerTy, IntBits, IntType>
+ getFromVoidPointer(const void *P) {
+ return PointerIntPair<PointerTy, IntBits, IntType>::getFromOpaqueValue(P);
+ }
enum {
NumLowBitsAvailable = PtrTraits::NumLowBitsAvailable - IntBits
};
diff --git a/include/llvm/ADT/PointerUnion.h b/include/llvm/ADT/PointerUnion.h
index 614b59c844e3..a9e86d22002d 100644
--- a/include/llvm/ADT/PointerUnion.h
+++ b/include/llvm/ADT/PointerUnion.h
@@ -54,8 +54,8 @@ namespace llvm {
static inline void *getAsVoidPointer(void *P) { return P; }
static inline void *getFromVoidPointer(void *P) { return P; }
enum {
- PT1BitsAv = PointerLikeTypeTraits<PT1>::NumLowBitsAvailable,
- PT2BitsAv = PointerLikeTypeTraits<PT2>::NumLowBitsAvailable,
+ PT1BitsAv = (int)(PointerLikeTypeTraits<PT1>::NumLowBitsAvailable),
+ PT2BitsAv = (int)(PointerLikeTypeTraits<PT2>::NumLowBitsAvailable),
NumLowBitsAvailable = PT1BitsAv < PT2BitsAv ? PT1BitsAv : PT2BitsAv
};
};
diff --git a/include/llvm/ADT/PostOrderIterator.h b/include/llvm/ADT/PostOrderIterator.h
index 63a2b5219f73..7f6350e4443e 100644
--- a/include/llvm/ADT/PostOrderIterator.h
+++ b/include/llvm/ADT/PostOrderIterator.h
@@ -23,26 +23,65 @@
namespace llvm {
-template<class SetType, bool External> // Non-external set
+// The po_iterator_storage template provides access to the set of already
+// visited nodes during the po_iterator's depth-first traversal.
+//
+// The default implementation simply contains a set of visited nodes, while
+// the Extended=true version uses a reference to an external set.
+//
+// It is possible to prune the depth-first traversal in several ways:
+//
+// - When providing an external set that already contains some graph nodes,
+// those nodes won't be visited again. This is useful for restarting a
+// post-order traversal on a graph with nodes that aren't dominated by a
+// single node.
+//
+// - By providing a custom SetType class, unwanted graph nodes can be excluded
+// by having the insert() function return false. This could for example
+// confine a CFG traversal to blocks in a specific loop.
+//
+// - Finally, by specializing the po_iterator_storage template itself, graph
+// edges can be pruned by returning false in the insertEdge() function. This
+// could be used to remove loop back-edges from the CFG seen by po_iterator.
+//
+// A specialized po_iterator_storage class can observe both the pre-order and
+// the post-order. The insertEdge() function is called in a pre-order, while
+// the finishPostorder() function is called just before the po_iterator moves
+// on to the next node.
+
+/// Default po_iterator_storage implementation with an internal set object.
+template<class SetType, bool External>
class po_iterator_storage {
-public:
SetType Visited;
-};
+public:
+ // Return true if edge destination should be visited.
+ template<typename NodeType>
+ bool insertEdge(NodeType *From, NodeType *To) {
+ return Visited.insert(To);
+ }
-/// DFSetTraits - Allow the SetType used to record depth-first search results to
-/// optionally record node postorder.
-template<class SetType>
-struct DFSetTraits {
- static void finishPostorder(
- typename SetType::iterator::value_type, SetType &) {}
+ // Called after all children of BB have been visited.
+ template<typename NodeType>
+ void finishPostorder(NodeType *BB) {}
};
+/// Specialization of po_iterator_storage that references an external set.
template<class SetType>
class po_iterator_storage<SetType, true> {
+ SetType &Visited;
public:
po_iterator_storage(SetType &VSet) : Visited(VSet) {}
po_iterator_storage(const po_iterator_storage &S) : Visited(S.Visited) {}
- SetType &Visited;
+
+ // Return true if edge destination should be visited, called with From = 0 for
+ // the root node.
+ // Graph edges can be pruned by specializing this function.
+ template<class NodeType>
+ bool insertEdge(NodeType *From, NodeType *To) { return Visited.insert(To); }
+
+ // Called after all children of BB have been visited.
+ template<class NodeType>
+ void finishPostorder(NodeType *BB) {}
};
template<class GraphT,
@@ -64,14 +103,15 @@ class po_iterator : public std::iterator<std::forward_iterator_tag,
void traverseChild() {
while (VisitStack.back().second != GT::child_end(VisitStack.back().first)) {
NodeType *BB = *VisitStack.back().second++;
- if (this->Visited.insert(BB)) { // If the block is not visited...
+ if (this->insertEdge(VisitStack.back().first, BB)) {
+ // If the block is not visited...
VisitStack.push_back(std::make_pair(BB, GT::child_begin(BB)));
}
}
}
inline po_iterator(NodeType *BB) {
- this->Visited.insert(BB);
+ this->insertEdge((NodeType*)0, BB);
VisitStack.push_back(std::make_pair(BB, GT::child_begin(BB)));
traverseChild();
}
@@ -79,7 +119,7 @@ class po_iterator : public std::iterator<std::forward_iterator_tag,
inline po_iterator(NodeType *BB, SetType &S) :
po_iterator_storage<SetType, ExtStorage>(S) {
- if (this->Visited.insert(BB)) {
+ if (this->insertEdge((NodeType*)0, BB)) {
VisitStack.push_back(std::make_pair(BB, GT::child_begin(BB)));
traverseChild();
}
@@ -117,8 +157,7 @@ public:
inline NodeType *operator->() const { return operator*(); }
inline _Self& operator++() { // Preincrement
- DFSetTraits<SetType>::finishPostorder(VisitStack.back().first,
- this->Visited);
+ this->finishPostorder(VisitStack.back().first);
VisitStack.pop_back();
if (!VisitStack.empty())
traverseChild();
@@ -173,14 +212,14 @@ ipo_iterator<T> ipo_end(T G){
return ipo_iterator<T>::end(G);
}
-//Provide global definitions of external inverse postorder iterators...
+// Provide global definitions of external inverse postorder iterators...
template <class T,
class SetType = std::set<typename GraphTraits<T>::NodeType*> >
struct ipo_ext_iterator : public ipo_iterator<T, SetType, true> {
ipo_ext_iterator(const ipo_iterator<T, SetType, true> &V) :
- ipo_iterator<T, SetType, true>(&V) {}
+ ipo_iterator<T, SetType, true>(V) {}
ipo_ext_iterator(const po_iterator<Inverse<T>, SetType, true> &V) :
- ipo_iterator<T, SetType, true>(&V) {}
+ ipo_iterator<T, SetType, true>(V) {}
};
template <class T, class SetType>
diff --git a/include/llvm/ADT/STLExtras.h b/include/llvm/ADT/STLExtras.h
index 5da906dc8cf0..aee500d4fb6c 100644
--- a/include/llvm/ADT/STLExtras.h
+++ b/include/llvm/ADT/STLExtras.h
@@ -30,6 +30,16 @@ namespace llvm {
//===----------------------------------------------------------------------===//
template<class Ty>
+struct identity : public std::unary_function<Ty, Ty> {
+ Ty &operator()(Ty &self) const {
+ return self;
+ }
+ const Ty &operator()(const Ty &self) const {
+ return self;
+ }
+};
+
+template<class Ty>
struct less_ptr : public std::binary_function<Ty, Ty, bool> {
bool operator()(const Ty* left, const Ty* right) const {
return *left < *right;
@@ -49,7 +59,7 @@ struct greater_ptr : public std::binary_function<Ty, Ty, bool> {
// for_each(V.begin(), B.end(), deleter<Interval>);
//
template <class T>
-static inline void deleter(T *Ptr) {
+inline void deleter(T *Ptr) {
delete Ptr;
}
@@ -228,7 +238,7 @@ inline size_t array_lengthof(T (&)[N]) {
/// array_pod_sort_comparator - This is helper function for array_pod_sort,
/// which just uses operator< on T.
template<typename T>
-static inline int array_pod_sort_comparator(const void *P1, const void *P2) {
+inline int array_pod_sort_comparator(const void *P1, const void *P2) {
if (*reinterpret_cast<const T*>(P1) < *reinterpret_cast<const T*>(P2))
return -1;
if (*reinterpret_cast<const T*>(P2) < *reinterpret_cast<const T*>(P1))
@@ -239,7 +249,7 @@ static inline int array_pod_sort_comparator(const void *P1, const void *P2) {
/// get_array_pad_sort_comparator - This is an internal helper function used to
/// get type deduction of T right.
template<typename T>
-static int (*get_array_pad_sort_comparator(const T &))
+inline int (*get_array_pad_sort_comparator(const T &))
(const void*, const void*) {
return array_pod_sort_comparator<T>;
}
@@ -260,7 +270,7 @@ static int (*get_array_pad_sort_comparator(const T &))
/// NOTE: If qsort_r were portable, we could allow a custom comparator and
/// default to std::less.
template<class IteratorTy>
-static inline void array_pod_sort(IteratorTy Start, IteratorTy End) {
+inline void array_pod_sort(IteratorTy Start, IteratorTy End) {
// Don't dereference start iterator of empty sequence.
if (Start == End) return;
qsort(&*Start, End-Start, sizeof(*Start),
@@ -268,13 +278,13 @@ static inline void array_pod_sort(IteratorTy Start, IteratorTy End) {
}
template<class IteratorTy>
-static inline void array_pod_sort(IteratorTy Start, IteratorTy End,
+inline void array_pod_sort(IteratorTy Start, IteratorTy End,
int (*Compare)(const void*, const void*)) {
// Don't dereference start iterator of empty sequence.
if (Start == End) return;
qsort(&*Start, End-Start, sizeof(*Start), Compare);
}
-
+
//===----------------------------------------------------------------------===//
// Extra additions to <algorithm>
//===----------------------------------------------------------------------===//
diff --git a/include/llvm/ADT/SmallBitVector.h b/include/llvm/ADT/SmallBitVector.h
index a3469a1c6226..7a645e0c7241 100644
--- a/include/llvm/ADT/SmallBitVector.h
+++ b/include/llvm/ADT/SmallBitVector.h
@@ -15,6 +15,7 @@
#define LLVM_ADT_SMALLBITVECTOR_H
#include "llvm/ADT/BitVector.h"
+#include "llvm/Support/Compiler.h"
#include "llvm/Support/MathExtras.h"
#include <cassert>
@@ -152,6 +153,12 @@ public:
switchToLarge(new BitVector(*RHS.getPointer()));
}
+#if LLVM_USE_RVALUE_REFERENCES
+ SmallBitVector(SmallBitVector &&RHS) : X(RHS.X) {
+ RHS.X = 1;
+ }
+#endif
+
~SmallBitVector() {
if (!isSmall())
delete getPointer();
@@ -347,6 +354,19 @@ public:
return (*this)[Idx];
}
+ /// Test if any common bits are set.
+ bool anyCommon(const SmallBitVector &RHS) const {
+ if (isSmall() && RHS.isSmall())
+ return (getSmallBits() & RHS.getSmallBits()) != 0;
+ if (!isSmall() && !RHS.isSmall())
+ return getPointer()->anyCommon(*RHS.getPointer());
+
+ for (unsigned i = 0, e = std::min(size(), RHS.size()); i != e; ++i)
+ if (test(i) && RHS.test(i))
+ return true;
+ return false;
+ }
+
// Comparison operators.
bool operator==(const SmallBitVector &RHS) const {
if (size() != RHS.size())
@@ -422,9 +442,72 @@ public:
return *this;
}
+#if LLVM_USE_RVALUE_REFERENCES
+ const SmallBitVector &operator=(SmallBitVector &&RHS) {
+ if (this != &RHS) {
+ clear();
+ swap(RHS);
+ }
+ return *this;
+ }
+#endif
+
void swap(SmallBitVector &RHS) {
std::swap(X, RHS.X);
}
+
+ /// setBitsInMask - Add '1' bits from Mask to this vector. Don't resize.
+ /// This computes "*this |= Mask".
+ void setBitsInMask(const uint32_t *Mask, unsigned MaskWords = ~0u) {
+ if (isSmall())
+ applyMask<true, false>(Mask, MaskWords);
+ else
+ getPointer()->setBitsInMask(Mask, MaskWords);
+ }
+
+ /// clearBitsInMask - Clear any bits in this vector that are set in Mask.
+ /// Don't resize. This computes "*this &= ~Mask".
+ void clearBitsInMask(const uint32_t *Mask, unsigned MaskWords = ~0u) {
+ if (isSmall())
+ applyMask<false, false>(Mask, MaskWords);
+ else
+ getPointer()->clearBitsInMask(Mask, MaskWords);
+ }
+
+ /// setBitsNotInMask - Add a bit to this vector for every '0' bit in Mask.
+ /// Don't resize. This computes "*this |= ~Mask".
+ void setBitsNotInMask(const uint32_t *Mask, unsigned MaskWords = ~0u) {
+ if (isSmall())
+ applyMask<true, true>(Mask, MaskWords);
+ else
+ getPointer()->setBitsNotInMask(Mask, MaskWords);
+ }
+
+ /// clearBitsNotInMask - Clear a bit in this vector for every '0' bit in Mask.
+ /// Don't resize. This computes "*this &= Mask".
+ void clearBitsNotInMask(const uint32_t *Mask, unsigned MaskWords = ~0u) {
+ if (isSmall())
+ applyMask<false, true>(Mask, MaskWords);
+ else
+ getPointer()->clearBitsNotInMask(Mask, MaskWords);
+ }
+
+private:
+ template<bool AddBits, bool InvertMask>
+ void applyMask(const uint32_t *Mask, unsigned MaskWords) {
+ assert((NumBaseBits == 64 || NumBaseBits == 32) && "Unsupported word size");
+ if (NumBaseBits == 64 && MaskWords >= 2) {
+ uint64_t M = Mask[0] | (uint64_t(Mask[1]) << 32);
+ if (InvertMask) M = ~M;
+ if (AddBits) setSmallBits(getSmallBits() | M);
+ else setSmallBits(getSmallBits() & ~M);
+ } else {
+ uint32_t M = Mask[0];
+ if (InvertMask) M = ~M;
+ if (AddBits) setSmallBits(getSmallBits() | M);
+ else setSmallBits(getSmallBits() & ~M);
+ }
+ }
};
inline SmallBitVector
diff --git a/include/llvm/ADT/SmallString.h b/include/llvm/ADT/SmallString.h
index 199783ba3899..c6f0a5bf1542 100644
--- a/include/llvm/ADT/SmallString.h
+++ b/include/llvm/ADT/SmallString.h
@@ -45,7 +45,7 @@ public:
/// @{
/// Assign from a repeated element
- void assign(unsigned NumElts, char Elt) {
+ void assign(size_t NumElts, char Elt) {
this->SmallVectorImpl<char>::assign(NumElts, Elt);
}
@@ -77,6 +77,11 @@ public:
void append(in_iter S, in_iter E) {
SmallVectorImpl<char>::append(S, E);
}
+
+ void append(size_t NumInputs, char Elt) {
+ SmallVectorImpl<char>::append(NumInputs, Elt);
+ }
+
/// Append from a StringRef
void append(StringRef RHS) {
diff --git a/include/llvm/ADT/SmallVector.h b/include/llvm/ADT/SmallVector.h
index 0d9d0d12e868..9fbbbe4f3b5d 100644
--- a/include/llvm/ADT/SmallVector.h
+++ b/include/llvm/ADT/SmallVector.h
@@ -14,6 +14,7 @@
#ifndef LLVM_ADT_SMALLVECTOR_H
#define LLVM_ADT_SMALLVECTOR_H
+#include "llvm/Support/Compiler.h"
#include "llvm/Support/type_traits.h"
#include <algorithm>
#include <cassert>
@@ -54,6 +55,11 @@ protected:
return BeginX == static_cast<const void*>(&FirstEl);
}
+ /// resetToSmall - Put this vector in a state of being small.
+ void resetToSmall() {
+ BeginX = EndX = CapacityX = &FirstEl;
+ }
+
/// grow_pod - This is an implementation of the grow() method which only works
/// on POD-like data types and is out of line to reduce code duplication.
void grow_pod(size_t MinSizeInBytes, size_t TSize);
@@ -160,28 +166,84 @@ protected:
}
}
- /// uninitialized_copy - Copy the range [I, E) onto the uninitialized memory
- /// starting with "Dest", constructing elements into it as needed.
+ /// move - Use move-assignment to move the range [I, E) onto the
+ /// objects starting with "Dest". This is just <memory>'s
+ /// std::move, but not all stdlibs actually provide that.
+ template<typename It1, typename It2>
+ static It2 move(It1 I, It1 E, It2 Dest) {
+#if LLVM_USE_RVALUE_REFERENCES
+ for (; I != E; ++I, ++Dest)
+ *Dest = ::std::move(*I);
+ return Dest;
+#else
+ return ::std::copy(I, E, Dest);
+#endif
+ }
+
+ /// move_backward - Use move-assignment to move the range
+ /// [I, E) onto the objects ending at "Dest", moving objects
+ /// in reverse order. This is just <algorithm>'s
+ /// std::move_backward, but not all stdlibs actually provide that.
+ template<typename It1, typename It2>
+ static It2 move_backward(It1 I, It1 E, It2 Dest) {
+#if LLVM_USE_RVALUE_REFERENCES
+ while (I != E)
+ *--Dest = ::std::move(*--E);
+ return Dest;
+#else
+ return ::std::copy_backward(I, E, Dest);
+#endif
+ }
+
+ /// uninitialized_move - Move the range [I, E) into the uninitialized
+ /// memory starting with "Dest", constructing elements as needed.
+ template<typename It1, typename It2>
+ static void uninitialized_move(It1 I, It1 E, It2 Dest) {
+#if LLVM_USE_RVALUE_REFERENCES
+ for (; I != E; ++I, ++Dest)
+ ::new ((void*) &*Dest) T(::std::move(*I));
+#else
+ ::std::uninitialized_copy(I, E, Dest);
+#endif
+ }
+
+ /// uninitialized_copy - Copy the range [I, E) onto the uninitialized
+ /// memory starting with "Dest", constructing elements as needed.
template<typename It1, typename It2>
static void uninitialized_copy(It1 I, It1 E, It2 Dest) {
std::uninitialized_copy(I, E, Dest);
}
- /// grow - double the size of the allocated memory, guaranteeing space for at
- /// least one more element or MinSize if specified.
+ /// grow - Grow the allocated memory (without initializing new
+ /// elements), doubling the size of the allocated memory.
+ /// Guarantees space for at least one more element, or MinSize more
+ /// elements if specified.
void grow(size_t MinSize = 0);
public:
void push_back(const T &Elt) {
if (this->EndX < this->CapacityX) {
Retry:
- new (this->end()) T(Elt);
+ ::new ((void*) this->end()) T(Elt);
this->setEnd(this->end()+1);
return;
}
this->grow();
goto Retry;
}
+
+#if LLVM_USE_RVALUE_REFERENCES
+ void push_back(T &&Elt) {
+ if (this->EndX < this->CapacityX) {
+ Retry:
+ ::new ((void*) this->end()) T(::std::move(Elt));
+ this->setEnd(this->end()+1);
+ return;
+ }
+ this->grow();
+ goto Retry;
+ }
+#endif
void pop_back() {
this->setEnd(this->end()-1);
@@ -199,8 +261,8 @@ void SmallVectorTemplateBase<T, isPodLike>::grow(size_t MinSize) {
NewCapacity = MinSize;
T *NewElts = static_cast<T*>(malloc(NewCapacity*sizeof(T)));
- // Copy the elements over.
- this->uninitialized_copy(this->begin(), this->end(), NewElts);
+ // Move the elements over.
+ this->uninitialized_move(this->begin(), this->end(), NewElts);
// Destroy the original elements.
destroy_range(this->begin(), this->end());
@@ -225,6 +287,29 @@ protected:
// No need to do a destroy loop for POD's.
static void destroy_range(T *, T *) {}
+ /// move - Use move-assignment to move the range [I, E) onto the
+ /// objects starting with "Dest". For PODs, this is just memcpy.
+ template<typename It1, typename It2>
+ static It2 move(It1 I, It1 E, It2 Dest) {
+ return ::std::copy(I, E, Dest);
+ }
+
+ /// move_backward - Use move-assignment to move the range
+ /// [I, E) onto the objects ending at "Dest", moving objects
+ /// in reverse order.
+ template<typename It1, typename It2>
+ static It2 move_backward(It1 I, It1 E, It2 Dest) {
+ return ::std::copy_backward(I, E, Dest);
+ }
+
+ /// uninitialized_move - Move the range [I, E) onto the uninitialized memory
+ /// starting with "Dest", constructing elements into it as needed.
+ template<typename It1, typename It2>
+ static void uninitialized_move(It1 I, It1 E, It2 Dest) {
+ // Just do a copy.
+ uninitialized_copy(I, E, Dest);
+ }
+
/// uninitialized_copy - Copy the range [I, E) onto the uninitialized memory
/// starting with "Dest", constructing elements into it as needed.
template<typename It1, typename It2>
@@ -252,7 +337,7 @@ public:
void push_back(const T &Elt) {
if (this->EndX < this->CapacityX) {
Retry:
- *this->end() = Elt;
+ memcpy(this->end(), &Elt, sizeof(T));
this->setEnd(this->end()+1);
return;
}
@@ -330,7 +415,11 @@ public:
}
T pop_back_val() {
+#if LLVM_USE_RVALUE_REFERENCES
+ T Result = ::std::move(this->back());
+#else
T Result = this->back();
+#endif
this->pop_back();
return Result;
}
@@ -374,36 +463,79 @@ public:
}
iterator erase(iterator I) {
+ assert(I >= this->begin() && "Iterator to erase is out of bounds.");
+ assert(I < this->end() && "Erasing at past-the-end iterator.");
+
iterator N = I;
// Shift all elts down one.
- std::copy(I+1, this->end(), I);
+ this->move(I+1, this->end(), I);
// Drop the last elt.
this->pop_back();
return(N);
}
iterator erase(iterator S, iterator E) {
+ assert(S >= this->begin() && "Range to erase is out of bounds.");
+ assert(S <= E && "Trying to erase invalid range.");
+ assert(E <= this->end() && "Trying to erase past the end.");
+
iterator N = S;
// Shift all elts down.
- iterator I = std::copy(E, this->end(), S);
+ iterator I = this->move(E, this->end(), S);
// Drop the last elts.
this->destroy_range(I, this->end());
this->setEnd(I);
return(N);
}
+#if LLVM_USE_RVALUE_REFERENCES
+ iterator insert(iterator I, T &&Elt) {
+ if (I == this->end()) { // Important special case for empty vector.
+ this->push_back(::std::move(Elt));
+ return this->end()-1;
+ }
+
+ assert(I >= this->begin() && "Insertion iterator is out of bounds.");
+ assert(I <= this->end() && "Inserting past the end of the vector.");
+
+ if (this->EndX < this->CapacityX) {
+ Retry:
+ ::new ((void*) this->end()) T(::std::move(this->back()));
+ this->setEnd(this->end()+1);
+ // Push everything else over.
+ this->move_backward(I, this->end()-1, this->end());
+
+ // If we just moved the element we're inserting, be sure to update
+ // the reference.
+ T *EltPtr = &Elt;
+ if (I <= EltPtr && EltPtr < this->EndX)
+ ++EltPtr;
+
+ *I = ::std::move(*EltPtr);
+ return I;
+ }
+ size_t EltNo = I-this->begin();
+ this->grow();
+ I = this->begin()+EltNo;
+ goto Retry;
+ }
+#endif
+
iterator insert(iterator I, const T &Elt) {
if (I == this->end()) { // Important special case for empty vector.
this->push_back(Elt);
return this->end()-1;
}
+ assert(I >= this->begin() && "Insertion iterator is out of bounds.");
+ assert(I <= this->end() && "Inserting past the end of the vector.");
+
if (this->EndX < this->CapacityX) {
Retry:
- new (this->end()) T(this->back());
+ ::new ((void*) this->end()) T(this->back());
this->setEnd(this->end()+1);
// Push everything else over.
- std::copy_backward(I, this->end()-1, this->end());
+ this->move_backward(I, this->end()-1, this->end());
// If we just moved the element we're inserting, be sure to update
// the reference.
@@ -421,13 +553,16 @@ public:
}
iterator insert(iterator I, size_type NumToInsert, const T &Elt) {
+ // Convert iterator to elt# to avoid invalidating iterator when we reserve()
+ size_t InsertElt = I - this->begin();
+
if (I == this->end()) { // Important special case for empty vector.
append(NumToInsert, Elt);
- return this->end()-1;
+ return this->begin()+InsertElt;
}
- // Convert iterator to elt# to avoid invalidating iterator when we reserve()
- size_t InsertElt = I - this->begin();
+ assert(I >= this->begin() && "Insertion iterator is out of bounds.");
+ assert(I <= this->end() && "Inserting past the end of the vector.");
// Ensure there is enough space.
reserve(static_cast<unsigned>(this->size() + NumToInsert));
@@ -444,7 +579,7 @@ public:
append(this->end()-NumToInsert, this->end());
// Copy the existing elements that get replaced.
- std::copy_backward(I, OldEnd-NumToInsert, OldEnd);
+ this->move_backward(I, OldEnd-NumToInsert, OldEnd);
std::fill_n(I, NumToInsert, Elt);
return I;
@@ -453,11 +588,11 @@ public:
// Otherwise, we're inserting more elements than exist already, and we're
// not inserting at the end.
- // Copy over the elements that we're about to overwrite.
+ // Move over the elements that we're about to overwrite.
T *OldEnd = this->end();
this->setEnd(this->end() + NumToInsert);
size_t NumOverwritten = OldEnd-I;
- this->uninitialized_copy(I, OldEnd, this->end()-NumOverwritten);
+ this->uninitialized_move(I, OldEnd, this->end()-NumOverwritten);
// Replace the overwritten part.
std::fill_n(I, NumOverwritten, Elt);
@@ -469,14 +604,18 @@ public:
template<typename ItTy>
iterator insert(iterator I, ItTy From, ItTy To) {
+ // Convert iterator to elt# to avoid invalidating iterator when we reserve()
+ size_t InsertElt = I - this->begin();
+
if (I == this->end()) { // Important special case for empty vector.
append(From, To);
- return this->end()-1;
+ return this->begin()+InsertElt;
}
+ assert(I >= this->begin() && "Insertion iterator is out of bounds.");
+ assert(I <= this->end() && "Inserting past the end of the vector.");
+
size_t NumToInsert = std::distance(From, To);
- // Convert iterator to elt# to avoid invalidating iterator when we reserve()
- size_t InsertElt = I - this->begin();
// Ensure there is enough space.
reserve(static_cast<unsigned>(this->size() + NumToInsert));
@@ -493,7 +632,7 @@ public:
append(this->end()-NumToInsert, this->end());
// Copy the existing elements that get replaced.
- std::copy_backward(I, OldEnd-NumToInsert, OldEnd);
+ this->move_backward(I, OldEnd-NumToInsert, OldEnd);
std::copy(From, To, I);
return I;
@@ -502,16 +641,16 @@ public:
// Otherwise, we're inserting more elements than exist already, and we're
// not inserting at the end.
- // Copy over the elements that we're about to overwrite.
+ // Move over the elements that we're about to overwrite.
T *OldEnd = this->end();
this->setEnd(this->end() + NumToInsert);
size_t NumOverwritten = OldEnd-I;
- this->uninitialized_copy(I, OldEnd, this->end()-NumOverwritten);
+ this->uninitialized_move(I, OldEnd, this->end()-NumOverwritten);
// Replace the overwritten part.
- for (; NumOverwritten > 0; --NumOverwritten) {
- *I = *From;
- ++I; ++From;
+ for (T *J = I; NumOverwritten > 0; --NumOverwritten) {
+ *J = *From;
+ ++J; ++From;
}
// Insert the non-overwritten middle part.
@@ -519,8 +658,11 @@ public:
return I;
}
- const SmallVectorImpl
- &operator=(const SmallVectorImpl &RHS);
+ SmallVectorImpl &operator=(const SmallVectorImpl &RHS);
+
+#if LLVM_USE_RVALUE_REFERENCES
+ SmallVectorImpl &operator=(SmallVectorImpl &&RHS);
+#endif
bool operator==(const SmallVectorImpl &RHS) const {
if (this->size() != RHS.size()) return false;
@@ -590,7 +732,7 @@ void SmallVectorImpl<T>::swap(SmallVectorImpl<T> &RHS) {
}
template <typename T>
-const SmallVectorImpl<T> &SmallVectorImpl<T>::
+SmallVectorImpl<T> &SmallVectorImpl<T>::
operator=(const SmallVectorImpl<T> &RHS) {
// Avoid self-assignment.
if (this == &RHS) return *this;
@@ -617,6 +759,7 @@ const SmallVectorImpl<T> &SmallVectorImpl<T>::
// If we have to grow to have enough elements, destroy the current elements.
// This allows us to avoid copying them during the grow.
+ // FIXME: don't do this if they're efficiently moveable.
if (this->capacity() < RHSSize) {
// Destroy current elements.
this->destroy_range(this->begin(), this->end());
@@ -637,6 +780,69 @@ const SmallVectorImpl<T> &SmallVectorImpl<T>::
return *this;
}
+#if LLVM_USE_RVALUE_REFERENCES
+template <typename T>
+SmallVectorImpl<T> &SmallVectorImpl<T>::operator=(SmallVectorImpl<T> &&RHS) {
+ // Avoid self-assignment.
+ if (this == &RHS) return *this;
+
+ // If the RHS isn't small, clear this vector and then steal its buffer.
+ if (!RHS.isSmall()) {
+ this->destroy_range(this->begin(), this->end());
+ if (!this->isSmall()) free(this->begin());
+ this->BeginX = RHS.BeginX;
+ this->EndX = RHS.EndX;
+ this->CapacityX = RHS.CapacityX;
+ RHS.resetToSmall();
+ return *this;
+ }
+
+ // If we already have sufficient space, assign the common elements, then
+ // destroy any excess.
+ size_t RHSSize = RHS.size();
+ size_t CurSize = this->size();
+ if (CurSize >= RHSSize) {
+ // Assign common elements.
+ iterator NewEnd = this->begin();
+ if (RHSSize)
+ NewEnd = this->move(RHS.begin(), RHS.end(), NewEnd);
+
+ // Destroy excess elements and trim the bounds.
+ this->destroy_range(NewEnd, this->end());
+ this->setEnd(NewEnd);
+
+ // Clear the RHS.
+ RHS.clear();
+
+ return *this;
+ }
+
+ // If we have to grow to have enough elements, destroy the current elements.
+ // This allows us to avoid copying them during the grow.
+ // FIXME: this may not actually make any sense if we can efficiently move
+ // elements.
+ if (this->capacity() < RHSSize) {
+ // Destroy current elements.
+ this->destroy_range(this->begin(), this->end());
+ this->setEnd(this->begin());
+ CurSize = 0;
+ this->grow(RHSSize);
+ } else if (CurSize) {
+ // Otherwise, use assignment for the already-constructed elements.
+ this->move(RHS.begin(), RHS.end(), this->begin());
+ }
+
+ // Move-construct the new elements in place.
+ this->uninitialized_move(RHS.begin()+CurSize, RHS.end(),
+ this->begin()+CurSize);
+
+ // Set end.
+ this->setEnd(this->begin()+RHSSize);
+
+ RHS.clear();
+ return *this;
+}
+#endif
/// SmallVector - This is a 'vector' (really, a variable-sized array), optimized
/// for the case when the array is small. It contains some number of elements
@@ -692,6 +898,18 @@ public:
return *this;
}
+#if LLVM_USE_RVALUE_REFERENCES
+ SmallVector(SmallVector &&RHS) : SmallVectorImpl<T>(NumTsAvailable) {
+ if (!RHS.empty())
+ SmallVectorImpl<T>::operator=(::std::move(RHS));
+ }
+
+ const SmallVector &operator=(SmallVector &&RHS) {
+ SmallVectorImpl<T>::operator=(::std::move(RHS));
+ return *this;
+ }
+#endif
+
};
/// Specialize SmallVector at N=0. This specialization guarantees
@@ -700,7 +918,8 @@ public:
template <typename T>
class SmallVector<T,0> : public SmallVectorImpl<T> {
public:
- SmallVector() : SmallVectorImpl<T>(0) {}
+ SmallVector() : SmallVectorImpl<T>(0) {
+ }
explicit SmallVector(unsigned Size, const T &Value = T())
: SmallVectorImpl<T>(0) {
@@ -713,13 +932,26 @@ public:
}
SmallVector(const SmallVector &RHS) : SmallVectorImpl<T>(0) {
+ if (!RHS.empty())
+ SmallVectorImpl<T>::operator=(RHS);
+ }
+
+ const SmallVector &operator=(const SmallVector &RHS) {
SmallVectorImpl<T>::operator=(RHS);
+ return *this;
}
- SmallVector &operator=(const SmallVectorImpl<T> &RHS) {
- return SmallVectorImpl<T>::operator=(RHS);
+#if LLVM_USE_RVALUE_REFERENCES
+ SmallVector(SmallVector &&RHS) : SmallVectorImpl<T>(0) {
+ if (!RHS.empty())
+ SmallVectorImpl<T>::operator=(::std::move(RHS));
}
+ const SmallVector &operator=(SmallVector &&RHS) {
+ SmallVectorImpl<T>::operator=(::std::move(RHS));
+ return *this;
+ }
+#endif
};
template<typename T, unsigned N>
diff --git a/include/llvm/ADT/SparseSet.h b/include/llvm/ADT/SparseSet.h
index 923c6a5954d0..556963334894 100644
--- a/include/llvm/ADT/SparseSet.h
+++ b/include/llvm/ADT/SparseSet.h
@@ -21,33 +21,62 @@
#define LLVM_ADT_SPARSESET_H
#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/STLExtras.h"
#include "llvm/Support/DataTypes.h"
#include <limits>
namespace llvm {
-/// SparseSetFunctor - Objects in a SparseSet are identified by small integer
-/// keys. A functor object is used to compute the key of an object. The
-/// functor's operator() must return an unsigned smaller than the universe.
+/// SparseSetValTraits - Objects in a SparseSet are identified by keys that can
+/// be uniquely converted to a small integer less than the set's universe. This
+/// class allows the set to hold values that differ from the set's key type as
+/// long as an index can still be derived from the value. SparseSet never
+/// directly compares ValueT, only their indices, so it can map keys to
+/// arbitrary values. SparseSetValTraits computes the index from the value
+/// object. To compute the index from a key, SparseSet uses a separate
+/// KeyFunctorT template argument.
///
-/// The default functor implementation forwards to a getSparseSetKey() method
-/// on the object. It is intended for sparse sets holding ad-hoc structs.
+/// A simple type declaration, SparseSet<Type>, handles these cases:
+/// - unsigned key, identity index, identity value
+/// - unsigned key, identity index, fat value providing getSparseSetIndex()
+///
+/// The type declaration SparseSet<Type, UnaryFunction> handles:
+/// - unsigned key, remapped index, identity value (virtual registers)
+/// - pointer key, pointer-derived index, identity value (node+ID)
+/// - pointer key, pointer-derived index, fat value with getSparseSetIndex()
+///
+/// Only other, unexpected cases require specializing SparseSetValTraits.
+///
+/// For best results, ValueT should not require a destructor.
///
template<typename ValueT>
-struct SparseSetFunctor {
- unsigned operator()(const ValueT &Val) {
- return Val.getSparseSetKey();
+struct SparseSetValTraits {
+ static unsigned getValIndex(const ValueT &Val) {
+ return Val.getSparseSetIndex();
}
};
-/// SparseSetFunctor<unsigned> - Provide a trivial identity functor for
-/// SparseSet<unsigned>.
+/// SparseSetValFunctor - Helper class for selecting SparseSetValTraits. The
+/// generic implementation handles ValueT classes which either provide
+/// getSparseSetIndex() or specialize SparseSetValTraits<>.
///
-template<> struct SparseSetFunctor<unsigned> {
- unsigned operator()(unsigned Val) { return Val; }
+template<typename KeyT, typename ValueT, typename KeyFunctorT>
+struct SparseSetValFunctor {
+ unsigned operator()(const ValueT &Val) const {
+ return SparseSetValTraits<ValueT>::getValIndex(Val);
+ }
};
-/// SparseSet - Fast set implementation for objects that can be identified by
+/// SparseSetValFunctor<KeyT, KeyT> - Helper class for the common case of
+/// identity key/value sets.
+template<typename KeyT, typename KeyFunctorT>
+struct SparseSetValFunctor<KeyT, KeyT, KeyFunctorT> {
+ unsigned operator()(const KeyT &Key) const {
+ return KeyFunctorT()(Key);
+ }
+};
+
+/// SparseSet - Fast set implmentation for objects that can be identified by
/// small unsigned keys.
///
/// SparseSet allocates memory proportional to the size of the key universe, so
@@ -82,18 +111,20 @@ template<> struct SparseSetFunctor<unsigned> {
/// uint16_t or uint32_t.
///
/// @param ValueT The type of objects in the set.
+/// @param KeyFunctorT A functor that computes an unsigned index from KeyT.
/// @param SparseT An unsigned integer type. See above.
-/// @param KeyFunctorT A functor that computes the unsigned key of a ValueT.
///
template<typename ValueT,
- typename SparseT = uint8_t,
- typename KeyFunctorT = SparseSetFunctor<ValueT> >
+ typename KeyFunctorT = llvm::identity<unsigned>,
+ typename SparseT = uint8_t>
class SparseSet {
+ typedef typename KeyFunctorT::argument_type KeyT;
typedef SmallVector<ValueT, 8> DenseT;
DenseT Dense;
SparseT *Sparse;
unsigned Universe;
- KeyFunctorT KeyOf;
+ KeyFunctorT KeyIndexOf;
+ SparseSetValFunctor<KeyT, ValueT, KeyFunctorT> ValIndexOf;
// Disable copy construction and assignment.
// This data structure is not meant to be used that way.
@@ -160,21 +191,21 @@ public:
Dense.clear();
}
- /// find - Find an element by its key.
+ /// findIndex - Find an element by its index.
///
- /// @param Key A valid key to find.
+ /// @param Idx A valid index to find.
/// @returns An iterator to the element identified by key, or end().
///
- iterator find(unsigned Key) {
- assert(Key < Universe && "Key out of range");
+ iterator findIndex(unsigned Idx) {
+ assert(Idx < Universe && "Key out of range");
assert(std::numeric_limits<SparseT>::is_integer &&
!std::numeric_limits<SparseT>::is_signed &&
"SparseT must be an unsigned integer type");
const unsigned Stride = std::numeric_limits<SparseT>::max() + 1u;
- for (unsigned i = Sparse[Key], e = size(); i < e; i += Stride) {
- const unsigned FoundKey = KeyOf(Dense[i]);
- assert(FoundKey < Universe && "Invalid key in set. Did object mutate?");
- if (Key == FoundKey)
+ for (unsigned i = Sparse[Idx], e = size(); i < e; i += Stride) {
+ const unsigned FoundIdx = ValIndexOf(Dense[i]);
+ assert(FoundIdx < Universe && "Invalid key in set. Did object mutate?");
+ if (Idx == FoundIdx)
return begin() + i;
// Stride is 0 when SparseT >= unsigned. We don't need to loop.
if (!Stride)
@@ -183,13 +214,22 @@ public:
return end();
}
- const_iterator find(unsigned Key) const {
- return const_cast<SparseSet*>(this)->find(Key);
+ /// find - Find an element by its key.
+ ///
+ /// @param Key A valid key to find.
+ /// @returns An iterator to the element identified by key, or end().
+ ///
+ iterator find(const KeyT &Key) {
+ return findIndex(KeyIndexOf(Key));
+ }
+
+ const_iterator find(const KeyT &Key) const {
+ return const_cast<SparseSet*>(this)->findIndex(KeyIndexOf(Key));
}
/// count - Returns true if this set contains an element identified by Key.
///
- bool count(unsigned Key) const {
+ bool count(const KeyT &Key) const {
return find(Key) != end();
}
@@ -204,11 +244,11 @@ public:
/// Insertion invalidates all iterators.
///
std::pair<iterator, bool> insert(const ValueT &Val) {
- unsigned Key = KeyOf(Val);
- iterator I = find(Key);
+ unsigned Idx = ValIndexOf(Val);
+ iterator I = findIndex(Idx);
if (I != end())
return std::make_pair(I, false);
- Sparse[Key] = size();
+ Sparse[Idx] = size();
Dense.push_back(Val);
return std::make_pair(end() - 1, true);
}
@@ -216,7 +256,7 @@ public:
/// array subscript - If an element already exists with this key, return it.
/// Otherwise, automatically construct a new value from Key, insert it,
/// and return the newly inserted element.
- ValueT &operator[](unsigned Key) {
+ ValueT &operator[](const KeyT &Key) {
return *insert(ValueT(Key)).first;
}
@@ -238,9 +278,9 @@ public:
assert(unsigned(I - begin()) < size() && "Invalid iterator");
if (I != end() - 1) {
*I = Dense.back();
- unsigned BackKey = KeyOf(Dense.back());
- assert(BackKey < Universe && "Invalid key in set. Did object mutate?");
- Sparse[BackKey] = I - begin();
+ unsigned BackIdx = ValIndexOf(Dense.back());
+ assert(BackIdx < Universe && "Invalid key in set. Did object mutate?");
+ Sparse[BackIdx] = I - begin();
}
// This depends on SmallVector::pop_back() not invalidating iterators.
// std::vector::pop_back() doesn't give that guarantee.
@@ -253,7 +293,7 @@ public:
/// @param Key The key identifying the element to erase.
/// @returns True when an element was erased, false if no element was found.
///
- bool erase(unsigned Key) {
+ bool erase(const KeyT &Key) {
iterator I = find(Key);
if (I == end())
return false;
diff --git a/include/llvm/ADT/StringRef.h b/include/llvm/ADT/StringRef.h
index 76ba66e746ce..cd846031c5a0 100644
--- a/include/llvm/ADT/StringRef.h
+++ b/include/llvm/ADT/StringRef.h
@@ -12,6 +12,7 @@
#include "llvm/Support/type_traits.h"
+#include <algorithm>
#include <cassert>
#include <cstring>
#include <limits>
@@ -292,6 +293,16 @@ namespace llvm {
/// Note: O(size() + Chars.size())
size_type find_last_of(StringRef Chars, size_t From = npos) const;
+ /// find_last_not_of - Find the last character in the string that is not
+ /// \arg C, or npos if not found.
+ size_type find_last_not_of(char C, size_t From = npos) const;
+
+ /// find_last_not_of - Find the last character in the string that is not in
+ /// \arg Chars, or npos if not found.
+ ///
+ /// Note: O(size() + Chars.size())
+ size_type find_last_not_of(StringRef Chars, size_t From = npos) const;
+
/// @}
/// @name Helpful Algorithms
/// @{
@@ -480,6 +491,24 @@ namespace llvm {
return std::make_pair(slice(0, Idx), slice(Idx+1, npos));
}
+ /// ltrim - Return string with consecutive characters in \arg Chars starting
+ /// from the left removed.
+ StringRef ltrim(StringRef Chars = " \t\n\v\f\r") const {
+ return drop_front(std::min(Length, find_first_not_of(Chars)));
+ }
+
+ /// rtrim - Return string with consecutive characters in \arg Chars starting
+ /// from the right removed.
+ StringRef rtrim(StringRef Chars = " \t\n\v\f\r") const {
+ return drop_back(Length - std::min(Length, find_last_not_of(Chars) + 1));
+ }
+
+ /// trim - Return string with consecutive characters in \arg Chars starting
+ /// from the left and right removed.
+ StringRef trim(StringRef Chars = " \t\n\v\f\r") const {
+ return ltrim(Chars).rtrim(Chars);
+ }
+
/// @}
};
diff --git a/include/llvm/ADT/StringSwitch.h b/include/llvm/ADT/StringSwitch.h
index 74805830d854..7fd6e2796032 100644
--- a/include/llvm/ADT/StringSwitch.h
+++ b/include/llvm/ADT/StringSwitch.h
@@ -48,8 +48,8 @@ class StringSwitch {
const T *Result;
public:
- explicit StringSwitch(StringRef Str)
- : Str(Str), Result(0) { }
+ explicit StringSwitch(StringRef S)
+ : Str(S), Result(0) { }
template<unsigned N>
StringSwitch& Case(const char (&S)[N], const T& Value) {
diff --git a/include/llvm/ADT/TinyPtrVector.h b/include/llvm/ADT/TinyPtrVector.h
index 5014517c9e05..d3d33b8adde1 100644
--- a/include/llvm/ADT/TinyPtrVector.h
+++ b/include/llvm/ADT/TinyPtrVector.h
@@ -10,8 +10,11 @@
#ifndef LLVM_ADT_TINYPTRVECTOR_H
#define LLVM_ADT_TINYPTRVECTOR_H
-#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/PointerUnion.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Support/Compiler.h"
namespace llvm {
@@ -25,18 +28,78 @@ template <typename EltTy>
class TinyPtrVector {
public:
typedef llvm::SmallVector<EltTy, 4> VecTy;
+ typedef typename VecTy::value_type value_type;
+
llvm::PointerUnion<EltTy, VecTy*> Val;
-
+
TinyPtrVector() {}
+ ~TinyPtrVector() {
+ if (VecTy *V = Val.template dyn_cast<VecTy*>())
+ delete V;
+ }
+
TinyPtrVector(const TinyPtrVector &RHS) : Val(RHS.Val) {
if (VecTy *V = Val.template dyn_cast<VecTy*>())
Val = new VecTy(*V);
}
- ~TinyPtrVector() {
- if (VecTy *V = Val.template dyn_cast<VecTy*>())
+ TinyPtrVector &operator=(const TinyPtrVector &RHS) {
+ if (this == &RHS)
+ return *this;
+ if (RHS.empty()) {
+ this->clear();
+ return *this;
+ }
+
+ // Try to squeeze into the single slot. If it won't fit, allocate a copied
+ // vector.
+ if (Val.template is<EltTy>()) {
+ if (RHS.size() == 1)
+ Val = RHS.front();
+ else
+ Val = new VecTy(*RHS.Val.template get<VecTy*>());
+ return *this;
+ }
+
+ // If we have a full vector allocated, try to re-use it.
+ if (RHS.Val.template is<EltTy>()) {
+ Val.template get<VecTy*>()->clear();
+ Val.template get<VecTy*>()->push_back(RHS.front());
+ } else {
+ *Val.template get<VecTy*>() = *RHS.Val.template get<VecTy*>();
+ }
+ return *this;
+ }
+
+#if LLVM_USE_RVALUE_REFERENCES
+ TinyPtrVector(TinyPtrVector &&RHS) : Val(RHS.Val) {
+ RHS.Val = (EltTy)0;
+ }
+ TinyPtrVector &operator=(TinyPtrVector &&RHS) {
+ if (this == &RHS)
+ return *this;
+ if (RHS.empty()) {
+ this->clear();
+ return *this;
+ }
+
+ // If this vector has been allocated on the heap, re-use it if cheap. If it
+ // would require more copying, just delete it and we'll steal the other
+ // side.
+ if (VecTy *V = Val.template dyn_cast<VecTy*>()) {
+ if (RHS.Val.template is<EltTy>()) {
+ V->clear();
+ V->push_back(RHS.front());
+ return *this;
+ }
delete V;
+ }
+
+ Val = RHS.Val;
+ RHS.Val = (EltTy)0;
+ return *this;
}
-
+#endif
+
// implicit conversion operator to ArrayRef.
operator ArrayRef<EltTy>() const {
if (Val.isNull())
@@ -45,7 +108,7 @@ public:
return *Val.getAddrOfPtr1();
return *Val.template get<VecTy*>();
}
-
+
bool empty() const {
// This vector can be empty if it contains no element, or if it
// contains a pointer to an empty vector.
@@ -54,7 +117,7 @@ public:
return Vec->empty();
return false;
}
-
+
unsigned size() const {
if (empty())
return 0;
@@ -62,27 +125,21 @@ public:
return 1;
return Val.template get<VecTy*>()->size();
}
-
+
typedef const EltTy *const_iterator;
typedef EltTy *iterator;
iterator begin() {
- if (empty())
- return 0;
-
if (Val.template is<EltTy>())
return Val.getAddrOfPtr1();
-
+
return Val.template get<VecTy *>()->begin();
}
iterator end() {
- if (empty())
- return 0;
-
if (Val.template is<EltTy>())
- return begin() + 1;
-
+ return begin() + (Val.isNull() ? 0 : 1);
+
return Val.template get<VecTy *>()->end();
}
@@ -100,38 +157,53 @@ public:
assert(i == 0 && "tinyvector index out of range");
return V;
}
-
- assert(i < Val.template get<VecTy*>()->size() &&
+
+ assert(i < Val.template get<VecTy*>()->size() &&
"tinyvector index out of range");
return (*Val.template get<VecTy*>())[i];
}
-
+
EltTy front() const {
assert(!empty() && "vector empty");
if (EltTy V = Val.template dyn_cast<EltTy>())
return V;
return Val.template get<VecTy*>()->front();
}
-
+
+ EltTy back() const {
+ assert(!empty() && "vector empty");
+ if (EltTy V = Val.template dyn_cast<EltTy>())
+ return V;
+ return Val.template get<VecTy*>()->back();
+ }
+
void push_back(EltTy NewVal) {
assert(NewVal != 0 && "Can't add a null value");
-
+
// If we have nothing, add something.
if (Val.isNull()) {
Val = NewVal;
return;
}
-
+
// If we have a single value, convert to a vector.
if (EltTy V = Val.template dyn_cast<EltTy>()) {
Val = new VecTy();
Val.template get<VecTy*>()->push_back(V);
}
-
+
// Add the new value, we know we have a vector.
Val.template get<VecTy*>()->push_back(NewVal);
}
-
+
+ void pop_back() {
+ // If we have a single value, convert to empty.
+ if (Val.template is<EltTy>())
+ Val = (EltTy)0;
+ else if (VecTy *Vec = Val.template get<VecTy*>())
+ Vec->pop_back();
+ }
+
void clear() {
// If we have a single value, convert to empty.
if (Val.template is<EltTy>()) {
@@ -144,6 +216,9 @@ public:
}
iterator erase(iterator I) {
+ assert(I >= begin() && "Iterator to erase is out of bounds.");
+ assert(I < end() && "Erasing at past-the-end iterator.");
+
// If we have a single value, convert to empty.
if (Val.template is<EltTy>()) {
if (I == begin())
@@ -153,12 +228,63 @@ public:
// benefit to collapsing back to a pointer
return Vec->erase(I);
}
+ return end();
+ }
+
+ iterator erase(iterator S, iterator E) {
+ assert(S >= begin() && "Range to erase is out of bounds.");
+ assert(S <= E && "Trying to erase invalid range.");
+ assert(E <= end() && "Trying to erase past the end.");
+
+ if (Val.template is<EltTy>()) {
+ if (S == begin() && S != E)
+ Val = (EltTy)0;
+ } else if (VecTy *Vec = Val.template dyn_cast<VecTy*>()) {
+ return Vec->erase(S, E);
+ }
+ return end();
+ }
- return 0;
+ iterator insert(iterator I, const EltTy &Elt) {
+ assert(I >= this->begin() && "Insertion iterator is out of bounds.");
+ assert(I <= this->end() && "Inserting past the end of the vector.");
+ if (I == end()) {
+ push_back(Elt);
+ return llvm::prior(end());
+ }
+ assert(!Val.isNull() && "Null value with non-end insert iterator.");
+ if (EltTy V = Val.template dyn_cast<EltTy>()) {
+ assert(I == begin());
+ Val = Elt;
+ push_back(V);
+ return begin();
+ }
+
+ return Val.template get<VecTy*>()->insert(I, Elt);
+ }
+
+ template<typename ItTy>
+ iterator insert(iterator I, ItTy From, ItTy To) {
+ assert(I >= this->begin() && "Insertion iterator is out of bounds.");
+ assert(I <= this->end() && "Inserting past the end of the vector.");
+ if (From == To)
+ return I;
+
+ // If we have a single value, convert to a vector.
+ ptrdiff_t Offset = I - begin();
+ if (Val.isNull()) {
+ if (llvm::next(From) == To) {
+ Val = *From;
+ return begin();
+ }
+
+ Val = new VecTy();
+ } else if (EltTy V = Val.template dyn_cast<EltTy>()) {
+ Val = new VecTy();
+ Val.template get<VecTy*>()->push_back(V);
+ }
+ return Val.template get<VecTy*>()->insert(begin() + Offset, From, To);
}
-
-private:
- void operator=(const TinyPtrVector&); // NOT IMPLEMENTED YET.
};
} // end namespace llvm
diff --git a/include/llvm/ADT/Triple.h b/include/llvm/ADT/Triple.h
index f5f99d0f1b82..7f7061ab01b9 100644
--- a/include/llvm/ADT/Triple.h
+++ b/include/llvm/ADT/Triple.h
@@ -62,8 +62,8 @@ public:
x86_64, // X86-64: amd64, x86_64
xcore, // XCore: xcore
mblaze, // MBlaze: mblaze
- ptx32, // PTX: ptx (32-bit)
- ptx64, // PTX: ptx (64-bit)
+ nvptx, // NVPTX: 32-bit
+ nvptx64, // NVPTX: 64-bit
le32, // le32: generic little-endian 32-bit CPU (PNaCl / Emscripten)
amdil // amdil: amd IL
};
@@ -98,7 +98,8 @@ public:
Minix,
RTEMS,
NativeClient,
- CNK // BG/P Compute-Node Kernel
+ CNK, // BG/P Compute-Node Kernel
+ Bitrig
};
enum EnvironmentType {
UnknownEnvironment,
@@ -194,6 +195,11 @@ public:
bool getMacOSXVersion(unsigned &Major, unsigned &Minor,
unsigned &Micro) const;
+ /// getiOSVersion - Parse the version number as with getOSVersion. This should
+ /// only be called with IOS triples.
+ void getiOSVersion(unsigned &Major, unsigned &Minor,
+ unsigned &Micro) const;
+
/// @}
/// @name Direct Component Access
/// @{
@@ -266,7 +272,7 @@ public:
/// compatibility, which handles supporting skewed version numbering schemes
/// used by the "darwin" triples.
unsigned isMacOSXVersionLT(unsigned Major, unsigned Minor = 0,
- unsigned Micro = 0) const {
+ unsigned Micro = 0) const {
assert(isMacOSX() && "Not an OS X triple!");
// If this is OS X, expect a sane version number.
diff --git a/include/llvm/ADT/ValueMap.h b/include/llvm/ADT/ValueMap.h
index 707d07d32cbc..f7e255181e23 100644
--- a/include/llvm/ADT/ValueMap.h
+++ b/include/llvm/ADT/ValueMap.h
@@ -111,20 +111,21 @@ public:
/// count - Return true if the specified key is in the map.
bool count(const KeyT &Val) const {
- return Map.count(Wrap(Val));
+ return Map.find_as(Val) != Map.end();
}
iterator find(const KeyT &Val) {
- return iterator(Map.find(Wrap(Val)));
+ return iterator(Map.find_as(Val));
}
const_iterator find(const KeyT &Val) const {
- return const_iterator(Map.find(Wrap(Val)));
+ return const_iterator(Map.find_as(Val));
}
/// lookup - Return the entry for the specified key, or a default
/// constructed value if no such entry exists.
ValueT lookup(const KeyT &Val) const {
- return Map.lookup(Wrap(Val));
+ typename MapT::const_iterator I = Map.find_as(Val);
+ return I != Map.end() ? I->second : ValueT();
}
// Inserts key,value pair into the map if the key isn't already in the map.
@@ -145,7 +146,12 @@ public:
bool erase(const KeyT &Val) {
- return Map.erase(Wrap(Val));
+ typename MapT::iterator I = Map.find_as(Val);
+ if (I == Map.end())
+ return false;
+
+ Map.erase(I);
+ return true;
}
void erase(iterator I) {
return Map.erase(I.base());
@@ -256,9 +262,15 @@ struct DenseMapInfo<ValueMapCallbackVH<KeyT, ValueT, Config> > {
static unsigned getHashValue(const VH &Val) {
return PointerInfo::getHashValue(Val.Unwrap());
}
+ static unsigned getHashValue(const KeyT &Val) {
+ return PointerInfo::getHashValue(Val);
+ }
static bool isEqual(const VH &LHS, const VH &RHS) {
return LHS == RHS;
}
+ static bool isEqual(const KeyT &LHS, const VH &RHS) {
+ return LHS == RHS.getValPtr();
+ }
};