summaryrefslogtreecommitdiffstats
path: root/include/llvm/ADT
diff options
context:
space:
mode:
Diffstat (limited to 'include/llvm/ADT')
-rw-r--r--include/llvm/ADT/APFloat.h7
-rw-r--r--include/llvm/ADT/APInt.h142
-rw-r--r--include/llvm/ADT/APSInt.h18
-rw-r--r--include/llvm/ADT/ArrayRef.h121
-rw-r--r--include/llvm/ADT/BitVector.h36
-rw-r--r--include/llvm/ADT/DenseMap.h3
-rw-r--r--include/llvm/ADT/DenseMapInfo.h14
-rw-r--r--include/llvm/ADT/DenseSet.h3
-rw-r--r--include/llvm/ADT/EquivalenceClasses.h2
-rw-r--r--include/llvm/ADT/FoldingSet.h2
-rw-r--r--include/llvm/ADT/ImmutableIntervalMap.h54
-rw-r--r--include/llvm/ADT/ImmutableList.h14
-rw-r--r--include/llvm/ADT/ImmutableMap.h53
-rw-r--r--include/llvm/ADT/ImmutableSet.h553
-rw-r--r--include/llvm/ADT/InMemoryStruct.h77
-rw-r--r--include/llvm/ADT/IndexedMap.h14
-rw-r--r--include/llvm/ADT/IntEqClasses.h88
-rw-r--r--include/llvm/ADT/IntervalMap.h2139
-rw-r--r--include/llvm/ADT/Optional.h54
-rw-r--r--include/llvm/ADT/PointerIntPair.h7
-rw-r--r--include/llvm/ADT/PointerUnion.h12
-rw-r--r--include/llvm/ADT/PostOrderIterator.h6
-rw-r--r--include/llvm/ADT/SCCIterator.h2
-rw-r--r--include/llvm/ADT/ScopedHashTable.h103
-rw-r--r--include/llvm/ADT/SetVector.h4
-rw-r--r--include/llvm/ADT/SmallBitVector.h7
-rw-r--r--include/llvm/ADT/SmallPtrSet.h5
-rw-r--r--include/llvm/ADT/SmallString.h12
-rw-r--r--include/llvm/ADT/SmallVector.h26
-rw-r--r--include/llvm/ADT/SparseBitVector.h2
-rw-r--r--include/llvm/ADT/Statistic.h2
-rw-r--r--include/llvm/ADT/StringExtras.h9
-rw-r--r--include/llvm/ADT/StringMap.h21
-rw-r--r--include/llvm/ADT/StringRef.h25
-rw-r--r--include/llvm/ADT/Triple.h77
-rw-r--r--include/llvm/ADT/Twine.h30
-rw-r--r--include/llvm/ADT/ValueMap.h2
-rw-r--r--include/llvm/ADT/ilist.h1
38 files changed, 3227 insertions, 520 deletions
diff --git a/include/llvm/ADT/APFloat.h b/include/llvm/ADT/APFloat.h
index dfe4e0f..ca4138b 100644
--- a/include/llvm/ADT/APFloat.h
+++ b/include/llvm/ADT/APFloat.h
@@ -246,6 +246,13 @@ namespace llvm {
static APFloat getSmallestNormalized(const fltSemantics &Sem,
bool Negative = false);
+ /// getAllOnesValue - Returns a float which is bitcasted from
+ /// an all one value int.
+ ///
+ /// \param BitWidth - Select float type
+ /// \param isIEEE - If 128 bit number, select between PPC and IEEE
+ static APFloat getAllOnesValue(unsigned BitWidth, bool isIEEE = false);
+
/// Profile - Used to insert APFloat objects, or objects that contain
/// APFloat objects, into FoldingSets.
void Profile(FoldingSetNodeID& NID) const;
diff --git a/include/llvm/ADT/APInt.h b/include/llvm/ADT/APInt.h
index 8004cb4..b91d5dc 100644
--- a/include/llvm/ADT/APInt.h
+++ b/include/llvm/ADT/APInt.h
@@ -275,12 +275,6 @@ public:
/// objects, into FoldingSets.
void Profile(FoldingSetNodeID& id) const;
- /// @brief Used by the Bitcode serializer to emit APInts to Bitcode.
- void Emit(Serializer& S) const;
-
- /// @brief Used by the Bitcode deserializer to deserialize APInts.
- void Read(Deserializer& D);
-
/// @}
/// @name Value Tests
/// @{
@@ -302,7 +296,7 @@ public:
/// @returns true if this APInt is positive.
/// @brief Determine if this APInt Value is positive.
bool isStrictlyPositive() const {
- return isNonNegative() && (*this) != 0;
+ return isNonNegative() && !!*this;
}
/// This checks to see if the value has all bits of the APInt are set or not.
@@ -330,15 +324,14 @@ public:
/// value for the APInt's bit width.
/// @brief Determine if this is the smallest unsigned value.
bool isMinValue() const {
- return countPopulation() == 0;
+ return !*this;
}
/// This checks to see if the value of this APInt is the minimum signed
/// value for the APInt's bit width.
/// @brief Determine if this is the smallest signed value.
bool isMinSignedValue() const {
- return BitWidth == 1 ? VAL == 1 :
- isNegative() && countPopulation() == 1;
+ return BitWidth == 1 ? VAL == 1 : isNegative() && isPowerOf2();
}
/// @brief Check if this APInt has an N-bits unsigned integer value.
@@ -348,10 +341,8 @@ public:
return true;
if (isSingleWord())
- return VAL == (VAL & (~0ULL >> (64 - N)));
- APInt Tmp(N, getNumWords(), pVal);
- Tmp.zext(getBitWidth());
- return Tmp == (*this);
+ return isUIntN(N, VAL);
+ return APInt(N, getNumWords(), pVal).zext(getBitWidth()) == (*this);
}
/// @brief Check if this APInt has an N-bits signed integer value.
@@ -361,7 +352,11 @@ public:
}
/// @returns true if the argument APInt value is a power of two > 0.
- bool isPowerOf2() const;
+ bool isPowerOf2() const {
+ if (isSingleWord())
+ return isPowerOf2_64(VAL);
+ return countPopulationSlowCase() == 1;
+ }
/// isSignBit - Return true if this is the value returned by getSignBit.
bool isSignBit() const { return isMinSignedValue(); }
@@ -369,7 +364,7 @@ public:
/// This converts the APInt to a boolean value as a test against zero.
/// @brief Boolean conversion function.
bool getBoolValue() const {
- return *this != 0;
+ return !!*this;
}
/// getLimitedValue - If this value is smaller than the specified limit,
@@ -385,12 +380,14 @@ public:
/// @{
/// @brief Gets maximum unsigned value of APInt for specific bit width.
static APInt getMaxValue(unsigned numBits) {
- return APInt(numBits, 0).set();
+ return getAllOnesValue(numBits);
}
/// @brief Gets maximum signed value of APInt for a specific bit width.
static APInt getSignedMaxValue(unsigned numBits) {
- return APInt(numBits, 0).set().clear(numBits - 1);
+ APInt API = getAllOnesValue(numBits);
+ API.clearBit(numBits - 1);
+ return API;
}
/// @brief Gets minimum unsigned value of APInt for a specific bit width.
@@ -400,7 +397,9 @@ public:
/// @brief Gets minimum signed value of APInt for a specific bit width.
static APInt getSignedMinValue(unsigned numBits) {
- return APInt(numBits, 0).set(numBits - 1);
+ APInt API(numBits, 0);
+ API.setBit(numBits - 1);
+ return API;
}
/// getSignBit - This is just a wrapper function of getSignedMinValue(), and
@@ -413,7 +412,7 @@ public:
/// @returns the all-ones value for an APInt of the specified bit-width.
/// @brief Get the all-ones value.
static APInt getAllOnesValue(unsigned numBits) {
- return APInt(numBits, 0).set();
+ return APInt(numBits, -1ULL, true);
}
/// @returns the '0' value for an APInt of the specified bit-width.
@@ -432,6 +431,13 @@ public:
/// @returns the low "numBits" bits of this APInt.
APInt getLoBits(unsigned numBits) const;
+ /// getOneBitSet - Return an APInt with exactly one bit set in the result.
+ static APInt getOneBitSet(unsigned numBits, unsigned BitNo) {
+ APInt Res(numBits, 0);
+ Res.setBit(BitNo);
+ return Res;
+ }
+
/// Constructs an APInt value that has a contiguous range of bits set. The
/// bits from loBit (inclusive) to hiBit (exclusive) will be set. All other
/// bits will be zero. For example, with parameters(32, 0, 16) you would get
@@ -530,7 +536,7 @@ public:
/// @brief Unary bitwise complement operator.
APInt operator~() const {
APInt Result(*this);
- Result.flip();
+ Result.flipAllBits();
return Result;
}
@@ -741,11 +747,11 @@ public:
/// RHS are treated as unsigned quantities for purposes of this division.
/// @returns a new APInt value containing the division result
/// @brief Unsigned division operation.
- APInt udiv(const APInt& RHS) const;
+ APInt udiv(const APInt &RHS) const;
/// Signed divide this APInt by APInt RHS.
/// @brief Signed division function for APInt.
- APInt sdiv(const APInt& RHS) const {
+ APInt sdiv(const APInt &RHS) const {
if (isNegative())
if (RHS.isNegative())
return (-(*this)).udiv(-RHS);
@@ -763,11 +769,11 @@ public:
/// which is *this.
/// @returns a new APInt value containing the remainder result
/// @brief Unsigned remainder operation.
- APInt urem(const APInt& RHS) const;
+ APInt urem(const APInt &RHS) const;
/// Signed remainder operation on APInt.
/// @brief Function for signed remainder operation.
- APInt srem(const APInt& RHS) const {
+ APInt srem(const APInt &RHS) const {
if (isNegative())
if (RHS.isNegative())
return -((-(*this)).urem(-RHS));
@@ -788,8 +794,7 @@ public:
APInt &Quotient, APInt &Remainder);
static void sdivrem(const APInt &LHS, const APInt &RHS,
- APInt &Quotient, APInt &Remainder)
- {
+ APInt &Quotient, APInt &Remainder) {
if (LHS.isNegative()) {
if (RHS.isNegative())
APInt::udivrem(-LHS, -RHS, Quotient, Remainder);
@@ -804,6 +809,16 @@ public:
APInt::udivrem(LHS, RHS, Quotient, Remainder);
}
}
+
+
+ // Operations that return overflow indicators.
+ APInt sadd_ov(const APInt &RHS, bool &Overflow) const;
+ APInt uadd_ov(const APInt &RHS, bool &Overflow) const;
+ APInt ssub_ov(const APInt &RHS, bool &Overflow) const;
+ APInt usub_ov(const APInt &RHS, bool &Overflow) const;
+ APInt sdiv_ov(const APInt &RHS, bool &Overflow) const;
+ APInt smul_ov(const APInt &RHS, bool &Overflow) const;
+ APInt sshl_ov(unsigned Amt, bool &Overflow) const;
/// @returns the bit value at bitPosition
/// @brief Array-indexing support.
@@ -868,7 +883,7 @@ public:
/// the validity of the less-than relationship.
/// @returns true if *this < RHS when both are considered unsigned.
/// @brief Unsigned less than comparison
- bool ult(const APInt& RHS) const;
+ bool ult(const APInt &RHS) const;
/// Regards both *this as an unsigned quantity and compares it with RHS for
/// the validity of the less-than relationship.
@@ -988,6 +1003,9 @@ public:
return sge(APInt(getBitWidth(), RHS));
}
+
+
+
/// This operation tests if there are any pairs of corresponding bits
/// between this APInt and RHS that are both set.
bool intersects(const APInt &RHS) const {
@@ -1000,80 +1018,78 @@ public:
/// Truncate the APInt to a specified width. It is an error to specify a width
/// that is greater than or equal to the current width.
/// @brief Truncate to new width.
- APInt &trunc(unsigned width);
+ APInt trunc(unsigned width) const;
/// This operation sign extends the APInt to a new width. If the high order
/// bit is set, the fill on the left will be done with 1 bits, otherwise zero.
/// It is an error to specify a width that is less than or equal to the
/// current width.
/// @brief Sign extend to a new width.
- APInt &sext(unsigned width);
+ APInt sext(unsigned width) const;
/// This operation zero extends the APInt to a new width. The high order bits
/// are filled with 0 bits. It is an error to specify a width that is less
/// than or equal to the current width.
/// @brief Zero extend to a new width.
- APInt &zext(unsigned width);
+ APInt zext(unsigned width) const;
/// Make this APInt have the bit width given by \p width. The value is sign
/// extended, truncated, or left alone to make it that width.
/// @brief Sign extend or truncate to width
- APInt &sextOrTrunc(unsigned width);
+ APInt sextOrTrunc(unsigned width) const;
/// Make this APInt have the bit width given by \p width. The value is zero
/// extended, truncated, or left alone to make it that width.
/// @brief Zero extend or truncate to width
- APInt &zextOrTrunc(unsigned width);
+ APInt zextOrTrunc(unsigned width) const;
/// @}
/// @name Bit Manipulation Operators
/// @{
/// @brief Set every bit to 1.
- APInt& set() {
- if (isSingleWord()) {
+ void setAllBits() {
+ if (isSingleWord())
VAL = -1ULL;
- return clearUnusedBits();
+ else {
+ // Set all the bits in all the words.
+ for (unsigned i = 0; i < getNumWords(); ++i)
+ pVal[i] = -1ULL;
}
-
- // Set all the bits in all the words.
- for (unsigned i = 0; i < getNumWords(); ++i)
- pVal[i] = -1ULL;
// Clear the unused ones
- return clearUnusedBits();
+ clearUnusedBits();
}
/// Set the given bit to 1 whose position is given as "bitPosition".
/// @brief Set a given bit to 1.
- APInt& set(unsigned bitPosition);
+ void setBit(unsigned bitPosition);
/// @brief Set every bit to 0.
- APInt& clear() {
+ void clearAllBits() {
if (isSingleWord())
VAL = 0;
else
memset(pVal, 0, getNumWords() * APINT_WORD_SIZE);
- return *this;
}
/// Set the given bit to 0 whose position is given as "bitPosition".
/// @brief Set a given bit to 0.
- APInt& clear(unsigned bitPosition);
+ void clearBit(unsigned bitPosition);
/// @brief Toggle every bit to its opposite value.
- APInt& flip() {
- if (isSingleWord()) {
+ void flipAllBits() {
+ if (isSingleWord())
VAL ^= -1ULL;
- return clearUnusedBits();
+ else {
+ for (unsigned i = 0; i < getNumWords(); ++i)
+ pVal[i] ^= -1ULL;
}
- for (unsigned i = 0; i < getNumWords(); ++i)
- pVal[i] ^= -1ULL;
- return clearUnusedBits();
+ clearUnusedBits();
}
/// Toggle a given bit to its opposite value whose position is given
/// as "bitPosition".
/// @brief Toggles a given bit to its opposite value.
- APInt& flip(unsigned bitPosition);
+ void flipBit(unsigned bitPosition);
/// @}
/// @name Value Characterization Functions
@@ -1281,37 +1297,27 @@ public:
}
/// The conversion does not do a translation from double to integer, it just
- /// re-interprets the bits of the double. Note that it is valid to do this on
- /// any bit width but bits from V may get truncated.
+ /// re-interprets the bits of the double.
/// @brief Converts a double to APInt bits.
- APInt& doubleToBits(double V) {
+ static APInt doubleToBits(double V) {
union {
uint64_t I;
double D;
} T;
T.D = V;
- if (isSingleWord())
- VAL = T.I;
- else
- pVal[0] = T.I;
- return clearUnusedBits();
+ return APInt(sizeof T * CHAR_BIT, T.I);
}
/// The conversion does not do a translation from float to integer, it just
- /// re-interprets the bits of the float. Note that it is valid to do this on
- /// any bit width but bits from V may get truncated.
+ /// re-interprets the bits of the float.
/// @brief Converts a float to APInt bits.
- APInt& floatToBits(float V) {
+ static APInt floatToBits(float V) {
union {
unsigned I;
float F;
} T;
T.F = V;
- if (isSingleWord())
- VAL = T.I;
- else
- pVal[0] = T.I;
- return clearUnusedBits();
+ return APInt(sizeof T * CHAR_BIT, T.I);
}
/// @}
diff --git a/include/llvm/ADT/APSInt.h b/include/llvm/ADT/APSInt.h
index 1c9931c..54a7b60 100644
--- a/include/llvm/ADT/APSInt.h
+++ b/include/llvm/ADT/APSInt.h
@@ -68,20 +68,22 @@ public:
}
using APInt::toString;
- APSInt& extend(uint32_t width) {
+ APSInt trunc(uint32_t width) const {
+ return APSInt(APInt::trunc(width), IsUnsigned);
+ }
+
+ APSInt extend(uint32_t width) const {
if (IsUnsigned)
- zext(width);
+ return APSInt(zext(width), IsUnsigned);
else
- sext(width);
- return *this;
+ return APSInt(sext(width), IsUnsigned);
}
- APSInt& extOrTrunc(uint32_t width) {
+ APSInt extOrTrunc(uint32_t width) const {
if (IsUnsigned)
- zextOrTrunc(width);
+ return APSInt(zextOrTrunc(width), IsUnsigned);
else
- sextOrTrunc(width);
- return *this;
+ return APSInt(sextOrTrunc(width), IsUnsigned);
}
const APSInt &operator%=(const APSInt &RHS) {
diff --git a/include/llvm/ADT/ArrayRef.h b/include/llvm/ADT/ArrayRef.h
new file mode 100644
index 0000000..1c5470d
--- /dev/null
+++ b/include/llvm/ADT/ArrayRef.h
@@ -0,0 +1,121 @@
+//===--- ArrayRef.h - Array Reference Wrapper -------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_ARRAYREF_H
+#define LLVM_ADT_ARRAYREF_H
+
+#include "llvm/ADT/SmallVector.h"
+#include <vector>
+
+namespace llvm {
+ class APInt;
+
+ /// ArrayRef - Represent a constant reference to an array (0 or more elements
+ /// consecutively in memory), i.e. a start pointer and a length. It allows
+ /// various APIs to take consecutive elements easily and conveniently.
+ ///
+ /// This class does not own the underlying data, it is expected to be used in
+ /// situations where the data resides in some other buffer, whose lifetime
+ /// extends past that of the StringRef. For this reason, it is not in general
+ /// safe to store a ArrayRef.
+ ///
+ /// This is intended to be trivially copyable, so it should be passed by
+ /// value.
+ template<typename T>
+ class ArrayRef {
+ public:
+ typedef const T *iterator;
+ typedef const T *const_iterator;
+ typedef size_t size_type;
+
+ private:
+ /// The start of the array, in an external buffer.
+ const T *Data;
+
+ /// The number of elements.
+ size_t Length;
+
+ public:
+ /// @name Constructors
+ /// @{
+
+ /// Construct an empty ArrayRef.
+ /*implicit*/ ArrayRef() : Data(0), Length(0) {}
+
+ /// Construct an ArrayRef from a single element.
+ /*implicit*/ ArrayRef(const T &OneElt)
+ : Data(&OneElt), Length(1) {}
+
+ /// Construct an ArrayRef from a pointer and length.
+ /*implicit*/ ArrayRef(const T *data, size_t length)
+ : Data(data), Length(length) {}
+
+ /// Construct an ArrayRef from a SmallVector.
+ /*implicit*/ ArrayRef(const SmallVectorImpl<T> &Vec)
+ : Data(Vec.data()), Length(Vec.size()) {}
+
+ /// Construct an ArrayRef from a std::vector.
+ /*implicit*/ ArrayRef(const std::vector<T> &Vec)
+ : Data(Vec.empty() ? (T*)0 : &Vec[0]), Length(Vec.size()) {}
+
+ // TODO: C arrays.
+
+ /// @}
+ /// @name Simple Operations
+ /// @{
+
+ iterator begin() const { return Data; }
+ iterator end() const { return Data + Length; }
+
+ /// empty - Check if the array is empty.
+ bool empty() const { return Length == 0; }
+
+ /// size - Get the array size.
+ size_t size() const { return Length; }
+
+ /// front - Get the first element.
+ const T &front() const {
+ assert(!empty());
+ return Data[0];
+ }
+
+ /// back - Get the last element.
+ const T &back() const {
+ assert(!empty());
+ return Data[Length-1];
+ }
+
+ /// @}
+ /// @name Operator Overloads
+ /// @{
+
+ const T &operator[](size_t Index) const {
+ assert(Index < Length && "Invalid index!");
+ return Data[Index];
+ }
+
+ /// @}
+ /// @name Expensive Operations
+ /// @{
+
+ std::vector<T> vec() const {
+ return std::vector<T>(Data, Data+Length);
+ }
+
+ /// @}
+ };
+
+ // ArrayRefs can be treated like a POD type.
+ template <typename T> struct isPodLike;
+ template <typename T> struct isPodLike<ArrayRef<T> > {
+ static const bool value = true;
+ };
+}
+
+#endif
diff --git a/include/llvm/ADT/BitVector.h b/include/llvm/ADT/BitVector.h
index 9dcb9e1..ac1cf0c 100644
--- a/include/llvm/ADT/BitVector.h
+++ b/include/llvm/ADT/BitVector.h
@@ -18,6 +18,7 @@
#include <algorithm>
#include <cassert>
#include <climits>
+#include <cstdlib>
#include <cstring>
namespace llvm {
@@ -77,7 +78,7 @@ public:
/// bits are initialized to the specified value.
explicit BitVector(unsigned s, bool t = false) : Size(s) {
Capacity = NumBitWords(s);
- Bits = new BitWord[Capacity];
+ Bits = (BitWord *)std::malloc(Capacity * sizeof(BitWord));
init_words(Bits, Capacity, t);
if (t)
clear_unused_bits();
@@ -92,12 +93,12 @@ public:
}
Capacity = NumBitWords(RHS.size());
- Bits = new BitWord[Capacity];
- std::copy(RHS.Bits, &RHS.Bits[Capacity], Bits);
+ Bits = (BitWord *)std::malloc(Capacity * sizeof(BitWord));
+ std::memcpy(Bits, RHS.Bits, Capacity * sizeof(BitWord));
}
~BitVector() {
- delete[] Bits;
+ std::free(Bits);
}
/// empty - Tests whether there are no bits in this bitvector.
@@ -127,6 +128,12 @@ public:
return false;
}
+ /// all - Returns true if all bits are set.
+ bool all() const {
+ // TODO: Optimize this.
+ return count() == size();
+ }
+
/// none - Returns true if none of the bits are set.
bool none() const {
return !any();
@@ -335,18 +342,18 @@ public:
unsigned RHSWords = NumBitWords(Size);
if (Size <= Capacity * BITWORD_SIZE) {
if (Size)
- std::copy(RHS.Bits, &RHS.Bits[RHSWords], Bits);
+ std::memcpy(Bits, RHS.Bits, RHSWords * sizeof(BitWord));
clear_unused_bits();
return *this;
}
// Grow the bitvector to have enough elements.
Capacity = RHSWords;
- BitWord *NewBits = new BitWord[Capacity];
- std::copy(RHS.Bits, &RHS.Bits[RHSWords], NewBits);
+ BitWord *NewBits = (BitWord *)std::malloc(Capacity * sizeof(BitWord));
+ std::memcpy(NewBits, RHS.Bits, Capacity * sizeof(BitWord));
// Destroy the old bits.
- delete[] Bits;
+ std::free(Bits);
Bits = NewBits;
return *this;
@@ -384,17 +391,8 @@ private:
}
void grow(unsigned NewSize) {
- unsigned OldCapacity = Capacity;
- Capacity = NumBitWords(NewSize);
- BitWord *NewBits = new BitWord[Capacity];
-
- // Copy the old bits over.
- if (OldCapacity != 0)
- std::copy(Bits, &Bits[OldCapacity], NewBits);
-
- // Destroy the old bits.
- delete[] Bits;
- Bits = NewBits;
+ Capacity = std::max(NumBitWords(NewSize), Capacity * 2);
+ Bits = (BitWord *)std::realloc(Bits, Capacity * sizeof(BitWord));
clear_unused_bits();
}
diff --git a/include/llvm/ADT/DenseMap.h b/include/llvm/ADT/DenseMap.h
index 06a1575..61d6ae7 100644
--- a/include/llvm/ADT/DenseMap.h
+++ b/include/llvm/ADT/DenseMap.h
@@ -18,6 +18,7 @@
#include "llvm/Support/PointerLikeTypeTraits.h"
#include "llvm/Support/type_traits.h"
#include "llvm/ADT/DenseMapInfo.h"
+#include <algorithm>
#include <iterator>
#include <new>
#include <utility>
@@ -385,7 +386,7 @@ private:
// Insert the key/value into the new table.
BucketT *DestBucket;
bool FoundVal = LookupBucketFor(B->first, DestBucket);
- FoundVal = FoundVal; // silence warning.
+ (void)FoundVal; // silence warning.
assert(!FoundVal && "Key already in new map?");
DestBucket->first = B->first;
new (&DestBucket->second) ValueT(B->second);
diff --git a/include/llvm/ADT/DenseMapInfo.h b/include/llvm/ADT/DenseMapInfo.h
index 5299386..25e341b 100644
--- a/include/llvm/ADT/DenseMapInfo.h
+++ b/include/llvm/ADT/DenseMapInfo.h
@@ -102,6 +102,20 @@ template<> struct DenseMapInfo<int> {
}
};
+// Provide DenseMapInfo for longs.
+template<> struct DenseMapInfo<long> {
+ static inline long getEmptyKey() {
+ return (1UL << (sizeof(long) * 8 - 1)) - 1L;
+ }
+ static inline long getTombstoneKey() { return getEmptyKey() - 1L; }
+ static unsigned getHashValue(const long& Val) {
+ return (unsigned)(Val * 37L);
+ }
+ static bool isEqual(const long& LHS, const long& RHS) {
+ return LHS == RHS;
+ }
+};
+
// Provide DenseMapInfo for long longs.
template<> struct DenseMapInfo<long long> {
static inline long long getEmptyKey() { return 0x7fffffffffffffffLL; }
diff --git a/include/llvm/ADT/DenseSet.h b/include/llvm/ADT/DenseSet.h
index 00bcf64..67321f5 100644
--- a/include/llvm/ADT/DenseSet.h
+++ b/include/llvm/ADT/DenseSet.h
@@ -33,6 +33,9 @@ public:
bool empty() const { return TheMap.empty(); }
unsigned size() const { return TheMap.size(); }
+ /// Grow the denseset so that it has at least Size buckets. Does not shrink
+ void resize(size_t Size) { TheMap.resize(Size); }
+
void clear() {
TheMap.clear();
}
diff --git a/include/llvm/ADT/EquivalenceClasses.h b/include/llvm/ADT/EquivalenceClasses.h
index 07a5edf..771476c 100644
--- a/include/llvm/ADT/EquivalenceClasses.h
+++ b/include/llvm/ADT/EquivalenceClasses.h
@@ -15,7 +15,7 @@
#ifndef LLVM_ADT_EQUIVALENCECLASSES_H
#define LLVM_ADT_EQUIVALENCECLASSES_H
-#include "llvm/System/DataTypes.h"
+#include "llvm/Support/DataTypes.h"
#include <cassert>
#include <set>
diff --git a/include/llvm/ADT/FoldingSet.h b/include/llvm/ADT/FoldingSet.h
index 662b5e2..879dbd0 100644
--- a/include/llvm/ADT/FoldingSet.h
+++ b/include/llvm/ADT/FoldingSet.h
@@ -16,7 +16,7 @@
#ifndef LLVM_ADT_FOLDINGSET_H
#define LLVM_ADT_FOLDINGSET_H
-#include "llvm/System/DataTypes.h"
+#include "llvm/Support/DataTypes.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
diff --git a/include/llvm/ADT/ImmutableIntervalMap.h b/include/llvm/ADT/ImmutableIntervalMap.h
index 968ce15..d3196ca 100644
--- a/include/llvm/ADT/ImmutableIntervalMap.h
+++ b/include/llvm/ADT/ImmutableIntervalMap.h
@@ -94,7 +94,7 @@ public:
: ImutAVLFactory<ImutInfo>(Alloc) {}
TreeTy *Add(TreeTy *T, value_type_ref V) {
- T = Add_internal(V,T);
+ T = add_internal(V,T);
this->MarkImmutable(T);
return T;
}
@@ -103,20 +103,20 @@ public:
if (!T)
return NULL;
- key_type_ref CurrentKey = ImutInfo::KeyOfValue(this->Value(T));
+ key_type_ref CurrentKey = ImutInfo::KeyOfValue(this->getValue(T));
if (ImutInfo::isContainedIn(K, CurrentKey))
return T;
else if (ImutInfo::isLess(K, CurrentKey))
- return Find(this->Left(T), K);
+ return Find(this->getLeft(T), K);
else
- return Find(this->Right(T), K);
+ return Find(this->getRight(T), K);
}
private:
- TreeTy *Add_internal(value_type_ref V, TreeTy *T) {
+ TreeTy *add_internal(value_type_ref V, TreeTy *T) {
key_type_ref K = ImutInfo::KeyOfValue(V);
- T = RemoveAllOverlaps(T, K);
+ T = removeAllOverlaps(T, K);
if (this->isEmpty(T))
return this->CreateNode(NULL, V, NULL);
@@ -125,38 +125,38 @@ private:
key_type_ref KCurrent = ImutInfo::KeyOfValue(this->Value(T));
if (ImutInfo::isLess(K, KCurrent))
- return this->Balance(Add_internal(V, this->Left(T)), this->Value(T),
+ return this->Balance(add_internal(V, this->Left(T)), this->Value(T),
this->Right(T));
else
return this->Balance(this->Left(T), this->Value(T),
- Add_internal(V, this->Right(T)));
+ add_internal(V, this->Right(T)));
}
// Remove all overlaps from T.
- TreeTy *RemoveAllOverlaps(TreeTy *T, key_type_ref K) {
+ TreeTy *removeAllOverlaps(TreeTy *T, key_type_ref K) {
bool Changed;
do {
Changed = false;
- T = RemoveOverlap(T, K, Changed);
- this->MarkImmutable(T);
+ T = removeOverlap(T, K, Changed);
+ this->markImmutable(T);
} while (Changed);
return T;
}
// Remove one overlap from T.
- TreeTy *RemoveOverlap(TreeTy *T, key_type_ref K, bool &Changed) {
+ TreeTy *removeOverlap(TreeTy *T, key_type_ref K, bool &Changed) {
if (!T)
return NULL;
Interval CurrentK = ImutInfo::KeyOfValue(this->Value(T));
// If current key does not overlap the inserted key.
if (CurrentK.getStart() > K.getEnd())
- return this->Balance(RemoveOverlap(this->Left(T), K, Changed),
+ return this->Balance(removeOverlap(this->Left(T), K, Changed),
this->Value(T), this->Right(T));
else if (CurrentK.getEnd() < K.getStart())
return this->Balance(this->Left(T), this->Value(T),
- RemoveOverlap(this->Right(T), K, Changed));
+ removeOverlap(this->Right(T), K, Changed));
// Current key overlaps with the inserted key.
// Remove the current key.
@@ -167,18 +167,18 @@ private:
if (CurrentK.getStart() < K.getStart()) {
if (CurrentK.getEnd() <= K.getEnd()) {
Interval NewK(CurrentK.getStart(), K.getStart()-1);
- return Add_internal(std::make_pair(NewK, OldData), T);
+ return add_internal(std::make_pair(NewK, OldData), T);
} else {
Interval NewK1(CurrentK.getStart(), K.getStart()-1);
- T = Add_internal(std::make_pair(NewK1, OldData), T);
+ T = add_internal(std::make_pair(NewK1, OldData), T);
Interval NewK2(K.getEnd()+1, CurrentK.getEnd());
- return Add_internal(std::make_pair(NewK2, OldData), T);
+ return add_internal(std::make_pair(NewK2, OldData), T);
}
} else {
if (CurrentK.getEnd() > K.getEnd()) {
Interval NewK(K.getEnd()+1, CurrentK.getEnd());
- return Add_internal(std::make_pair(NewK, OldData), T);
+ return add_internal(std::make_pair(NewK, OldData), T);
} else
return T;
}
@@ -209,22 +209,22 @@ public:
public:
Factory(BumpPtrAllocator& Alloc) : F(Alloc) {}
- ImmutableIntervalMap GetEmptyMap() {
- return ImmutableIntervalMap(F.GetEmptyTree());
+ ImmutableIntervalMap getEmptyMap() {
+ return ImmutableIntervalMap(F.getEmptyTree());
}
- ImmutableIntervalMap Add(ImmutableIntervalMap Old,
+ ImmutableIntervalMap add(ImmutableIntervalMap Old,
key_type_ref K, data_type_ref D) {
- TreeTy *T = F.Add(Old.Root, std::make_pair<key_type, data_type>(K, D));
- return ImmutableIntervalMap(F.GetCanonicalTree(T));
+ TreeTy *T = F.add(Old.Root, std::make_pair<key_type, data_type>(K, D));
+ return ImmutableIntervalMap(F.getCanonicalTree(T));
}
- ImmutableIntervalMap Remove(ImmutableIntervalMap Old, key_type_ref K) {
- TreeTy *T = F.Remove(Old.Root, K);
- return ImmutableIntervalMap(F.GetCanonicalTree(T));
+ ImmutableIntervalMap remove(ImmutableIntervalMap Old, key_type_ref K) {
+ TreeTy *T = F.remove(Old.Root, K);
+ return ImmutableIntervalMap(F.getCanonicalTree(T));
}
- data_type *Lookup(ImmutableIntervalMap M, key_type_ref K) {
+ data_type *lookup(ImmutableIntervalMap M, key_type_ref K) {
TreeTy *T = F.Find(M.getRoot(), K);
if (T)
return &T->getValue().second;
diff --git a/include/llvm/ADT/ImmutableList.h b/include/llvm/ADT/ImmutableList.h
index 7757c08..714355b 100644
--- a/include/llvm/ADT/ImmutableList.h
+++ b/include/llvm/ADT/ImmutableList.h
@@ -16,7 +16,7 @@
#include "llvm/Support/Allocator.h"
#include "llvm/ADT/FoldingSet.h"
-#include "llvm/System/DataTypes.h"
+#include "llvm/Support/DataTypes.h"
#include <cassert>
namespace llvm {
@@ -156,7 +156,7 @@ public:
if (ownsAllocator()) delete &getAllocator();
}
- ImmutableList<T> Concat(const T& Head, ImmutableList<T> Tail) {
+ ImmutableList<T> concat(const T& Head, ImmutableList<T> Tail) {
// Profile the new list to see if it already exists in our cache.
FoldingSetNodeID ID;
void* InsertPos;
@@ -178,16 +178,16 @@ public:
return L;
}
- ImmutableList<T> Add(const T& D, ImmutableList<T> L) {
- return Concat(D, L);
+ ImmutableList<T> add(const T& D, ImmutableList<T> L) {
+ return concat(D, L);
}
- ImmutableList<T> GetEmptyList() const {
+ ImmutableList<T> getEmptyList() const {
return ImmutableList<T>(0);
}
- ImmutableList<T> Create(const T& X) {
- return Concat(X, GetEmptyList());
+ ImmutableList<T> create(const T& X) {
+ return Concat(X, getEmptyList());
}
};
diff --git a/include/llvm/ADT/ImmutableMap.h b/include/llvm/ADT/ImmutableMap.h
index 8af128e..e439a09 100644
--- a/include/llvm/ADT/ImmutableMap.h
+++ b/include/llvm/ADT/ImmutableMap.h
@@ -76,7 +76,23 @@ public:
/// should use a Factory object to create maps instead of directly
/// invoking the constructor, but there are cases where make this
/// constructor public is useful.
- explicit ImmutableMap(const TreeTy* R) : Root(const_cast<TreeTy*>(R)) {}
+ explicit ImmutableMap(const TreeTy* R) : Root(const_cast<TreeTy*>(R)) {
+ if (Root) { Root->retain(); }
+ }
+ ImmutableMap(const ImmutableMap &X) : Root(X.Root) {
+ if (Root) { Root->retain(); }
+ }
+ ImmutableMap &operator=(const ImmutableMap &X) {
+ if (Root != X.Root) {
+ if (X.Root) { X.Root->retain(); }
+ if (Root) { Root->release(); }
+ Root = X.Root;
+ }
+ return *this;
+ }
+ ~ImmutableMap() {
+ if (Root) { Root->release(); }
+ }
class Factory {
typename TreeTy::Factory F;
@@ -89,16 +105,16 @@ public:
Factory(BumpPtrAllocator& Alloc, bool canonicalize = true)
: F(Alloc), Canonicalize(canonicalize) {}
- ImmutableMap GetEmptyMap() { return ImmutableMap(F.GetEmptyTree()); }
+ ImmutableMap getEmptyMap() { return ImmutableMap(F.getEmptyTree()); }
- ImmutableMap Add(ImmutableMap Old, key_type_ref K, data_type_ref D) {
- TreeTy *T = F.Add(Old.Root, std::make_pair<key_type,data_type>(K,D));
- return ImmutableMap(Canonicalize ? F.GetCanonicalTree(T): T);
+ ImmutableMap add(ImmutableMap Old, key_type_ref K, data_type_ref D) {
+ TreeTy *T = F.add(Old.Root, std::make_pair<key_type,data_type>(K,D));
+ return ImmutableMap(Canonicalize ? F.getCanonicalTree(T): T);
}
- ImmutableMap Remove(ImmutableMap Old, key_type_ref K) {
- TreeTy *T = F.Remove(Old.Root,K);
- return ImmutableMap(Canonicalize ? F.GetCanonicalTree(T): T);
+ ImmutableMap remove(ImmutableMap Old, key_type_ref K) {
+ TreeTy *T = F.remove(Old.Root,K);
+ return ImmutableMap(Canonicalize ? F.getCanonicalTree(T): T);
}
private:
@@ -110,15 +126,30 @@ public:
return Root ? Root->contains(K) : false;
}
- bool operator==(ImmutableMap RHS) const {
+ bool operator==(const ImmutableMap &RHS) const {
return Root && RHS.Root ? Root->isEqual(*RHS.Root) : Root == RHS.Root;
}
- bool operator!=(ImmutableMap RHS) const {
+ bool operator!=(const ImmutableMap &RHS) const {
return Root && RHS.Root ? Root->isNotEqual(*RHS.Root) : Root != RHS.Root;
}
- TreeTy* getRoot() const { return Root; }
+ TreeTy *getRoot() const {
+ if (Root) { Root->retain(); }
+ return Root;
+ }
+
+ TreeTy *getRootWithoutRetain() const {
+ return Root;
+ }
+
+ void manualRetain() {
+ if (Root) Root->retain();
+ }
+
+ void manualRelease() {
+ if (Root) Root->release();
+ }
bool isEmpty() const { return !Root; }
diff --git a/include/llvm/ADT/ImmutableSet.h b/include/llvm/ADT/ImmutableSet.h
index 70c3caf..3ca910c 100644
--- a/include/llvm/ADT/ImmutableSet.h
+++ b/include/llvm/ADT/ImmutableSet.h
@@ -15,10 +15,13 @@
#define LLVM_ADT_IMSET_H
#include "llvm/Support/Allocator.h"
+#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/FoldingSet.h"
-#include "llvm/System/DataTypes.h"
+#include "llvm/Support/DataTypes.h"
#include <cassert>
#include <functional>
+#include <vector>
+#include <stdio.h>
namespace llvm {
@@ -32,7 +35,7 @@ template <typename ImutInfo> class ImutAVLTreeInOrderIterator;
template <typename ImutInfo> class ImutAVLTreeGenericIterator;
template <typename ImutInfo >
-class ImutAVLTree : public FoldingSetNode {
+class ImutAVLTree {
public:
typedef typename ImutInfo::key_type_ref key_type_ref;
typedef typename ImutInfo::value_type value_type;
@@ -43,7 +46,6 @@ public:
friend class ImutIntervalAVLFactory<ImutInfo>;
friend class ImutAVLTreeGenericIterator<ImutInfo>;
- friend class FoldingSet<ImutAVLTree>;
typedef ImutAVLTreeInOrderIterator<ImutInfo> iterator;
@@ -51,29 +53,27 @@ public:
// Public Interface.
//===----------------------------------------------------===//
- /// getLeft - Returns a pointer to the left subtree. This value
+ /// Return a pointer to the left subtree. This value
/// is NULL if there is no left subtree.
- ImutAVLTree *getLeft() const { return Left; }
+ ImutAVLTree *getLeft() const { return left; }
- /// getRight - Returns a pointer to the right subtree. This value is
+ /// Return a pointer to the right subtree. This value is
/// NULL if there is no right subtree.
- ImutAVLTree *getRight() const { return Right; }
+ ImutAVLTree *getRight() const { return right; }
/// getHeight - Returns the height of the tree. A tree with no subtrees
/// has a height of 1.
- unsigned getHeight() const { return Height; }
+ unsigned getHeight() const { return height; }
/// getValue - Returns the data value associated with the tree node.
- const value_type& getValue() const { return Value; }
+ const value_type& getValue() const { return value; }
/// find - Finds the subtree associated with the specified key value.
/// This method returns NULL if no matching subtree is found.
ImutAVLTree* find(key_type_ref K) {
ImutAVLTree *T = this;
-
while (T) {
key_type_ref CurrentKey = ImutInfo::KeyOfValue(T->getValue());
-
if (ImutInfo::isEqual(K,CurrentKey))
return T;
else if (ImutInfo::isLess(K,CurrentKey))
@@ -81,7 +81,6 @@ public:
else
T = T->getRight();
}
-
return NULL;
}
@@ -90,7 +89,7 @@ public:
ImutAVLTree* getMaxElement() {
ImutAVLTree *T = this;
ImutAVLTree *Right = T->getRight();
- while (Right) { T = Right; Right = T->getRight(); }
+ while (Right) { T = right; right = T->getRight(); }
return T;
}
@@ -98,10 +97,10 @@ public:
/// both leaves and non-leaf nodes.
unsigned size() const {
unsigned n = 1;
-
- if (const ImutAVLTree* L = getLeft()) n += L->size();
- if (const ImutAVLTree* R = getRight()) n += R->size();
-
+ if (const ImutAVLTree* L = getLeft())
+ n += L->size();
+ if (const ImutAVLTree* R = getRight())
+ n += R->size();
return n;
}
@@ -114,7 +113,7 @@ public:
/// inorder traversal.
iterator end() const { return iterator(); }
- bool ElementEqual(value_type_ref V) const {
+ bool isElementEqual(value_type_ref V) const {
// Compare the keys.
if (!ImutInfo::isEqual(ImutInfo::KeyOfValue(getValue()),
ImutInfo::KeyOfValue(V)))
@@ -128,8 +127,8 @@ public:
return true;
}
- bool ElementEqual(const ImutAVLTree* RHS) const {
- return ElementEqual(RHS->getValue());
+ bool isElementEqual(const ImutAVLTree* RHS) const {
+ return isElementEqual(RHS->getValue());
}
/// isEqual - Compares two trees for structural equality and returns true
@@ -144,12 +143,12 @@ public:
while (LItr != LEnd && RItr != REnd) {
if (*LItr == *RItr) {
- LItr.SkipSubTree();
- RItr.SkipSubTree();
+ LItr.skipSubTree();
+ RItr.skipSubTree();
continue;
}
- if (!LItr->ElementEqual(*RItr))
+ if (!LItr->isElementEqual(*RItr))
return false;
++LItr;
@@ -173,22 +172,24 @@ public:
/// Nodes are visited using an inorder traversal.
template <typename Callback>
void foreach(Callback& C) {
- if (ImutAVLTree* L = getLeft()) L->foreach(C);
+ if (ImutAVLTree* L = getLeft())
+ L->foreach(C);
- C(Value);
+ C(value);
- if (ImutAVLTree* R = getRight()) R->foreach(C);
+ if (ImutAVLTree* R = getRight())
+ R->foreach(C);
}
- /// verify - A utility method that checks that the balancing and
+ /// validateTree - A utility method that checks that the balancing and
/// ordering invariants of the tree are satisifed. It is a recursive
/// method that returns the height of the tree, which is then consumed
- /// by the enclosing verify call. External callers should ignore the
+ /// by the enclosing validateTree call. External callers should ignore the
/// return value. An invalid tree will cause an assertion to fire in
/// a debug build.
- unsigned verify() const {
- unsigned HL = getLeft() ? getLeft()->verify() : 0;
- unsigned HR = getRight() ? getRight()->verify() : 0;
+ unsigned validateTree() const {
+ unsigned HL = getLeft() ? getLeft()->validateTree() : 0;
+ unsigned HR = getRight() ? getRight()->validateTree() : 0;
(void) HL;
(void) HR;
@@ -198,37 +199,39 @@ public:
assert((HL > HR ? HL-HR : HR-HL) <= 2
&& "Balancing invariant violated");
- assert(!getLeft()
- || ImutInfo::isLess(ImutInfo::KeyOfValue(getLeft()->getValue()),
- ImutInfo::KeyOfValue(getValue()))
- && "Value in left child is not less that current value");
+ assert((!getLeft() ||
+ ImutInfo::isLess(ImutInfo::KeyOfValue(getLeft()->getValue()),
+ ImutInfo::KeyOfValue(getValue()))) &&
+ "Value in left child is not less that current value");
- assert(!getRight()
- || ImutInfo::isLess(ImutInfo::KeyOfValue(getValue()),
- ImutInfo::KeyOfValue(getRight()->getValue()))
- && "Current value is not less that value of right child");
+ assert(!(getRight() ||
+ ImutInfo::isLess(ImutInfo::KeyOfValue(getValue()),
+ ImutInfo::KeyOfValue(getRight()->getValue()))) &&
+ "Current value is not less that value of right child");
return getHeight();
}
- /// Profile - Profiling for ImutAVLTree.
- void Profile(llvm::FoldingSetNodeID& ID) {
- ID.AddInteger(ComputeDigest());
- }
-
//===----------------------------------------------------===//
- // Internal Values.
+ // Internal values.
//===----------------------------------------------------===//
private:
- ImutAVLTree* Left;
- ImutAVLTree* Right;
- unsigned Height : 28;
- unsigned Mutable : 1;
- unsigned CachedDigest : 1;
- value_type Value;
- uint32_t Digest;
+ Factory *factory;
+ ImutAVLTree *left;
+ ImutAVLTree *right;
+ ImutAVLTree *prev;
+ ImutAVLTree *next;
+
+ unsigned height : 28;
+ unsigned IsMutable : 1;
+ unsigned IsDigestCached : 1;
+ unsigned IsCanonicalized : 1;
+
+ value_type value;
+ uint32_t digest;
+ uint32_t refCount;
//===----------------------------------------------------===//
// Internal methods (node manipulation; used by Factory).
@@ -237,10 +240,15 @@ private:
private:
/// ImutAVLTree - Internal constructor that is only called by
/// ImutAVLFactory.
- ImutAVLTree(ImutAVLTree* l, ImutAVLTree* r, value_type_ref v,
+ ImutAVLTree(Factory *f, ImutAVLTree* l, ImutAVLTree* r, value_type_ref v,
unsigned height)
- : Left(l), Right(r), Height(height), Mutable(true), CachedDigest(false),
- Value(v), Digest(0) {}
+ : factory(f), left(l), right(r), prev(0), next(0), height(height),
+ IsMutable(true), IsDigestCached(false), IsCanonicalized(0),
+ value(v), digest(0), refCount(0)
+ {
+ if (left) left->retain();
+ if (right) right->retain();
+ }
/// isMutable - Returns true if the left and right subtree references
/// (as well as height) can be changed. If this method returns false,
@@ -248,11 +256,11 @@ private:
/// object should always have this method return true. Further, if this
/// method returns false for an instance of ImutAVLTree, all subtrees
/// will also have this method return false. The converse is not true.
- bool isMutable() const { return Mutable; }
+ bool isMutable() const { return IsMutable; }
/// hasCachedDigest - Returns true if the digest for this tree is cached.
/// This can only be true if the tree is immutable.
- bool hasCachedDigest() const { return CachedDigest; }
+ bool hasCachedDigest() const { return IsDigestCached; }
//===----------------------------------------------------===//
// Mutating operations. A tree root can be manipulated as
@@ -265,51 +273,32 @@ private:
// immutable.
//===----------------------------------------------------===//
- /// MarkImmutable - Clears the mutable flag for a tree. After this happens,
+ /// markImmutable - Clears the mutable flag for a tree. After this happens,
/// it is an error to call setLeft(), setRight(), and setHeight().
- void MarkImmutable() {
+ void markImmutable() {
assert(isMutable() && "Mutable flag already removed.");
- Mutable = false;
+ IsMutable = false;
}
- /// MarkedCachedDigest - Clears the NoCachedDigest flag for a tree.
- void MarkedCachedDigest() {
+ /// markedCachedDigest - Clears the NoCachedDigest flag for a tree.
+ void markedCachedDigest() {
assert(!hasCachedDigest() && "NoCachedDigest flag already removed.");
- CachedDigest = true;
- }
-
- /// setLeft - Changes the reference of the left subtree. Used internally
- /// by ImutAVLFactory.
- void setLeft(ImutAVLTree* NewLeft) {
- assert(isMutable() &&
- "Only a mutable tree can have its left subtree changed.");
- Left = NewLeft;
- CachedDigest = false;
- }
-
- /// setRight - Changes the reference of the right subtree. Used internally
- /// by ImutAVLFactory.
- void setRight(ImutAVLTree* NewRight) {
- assert(isMutable() &&
- "Only a mutable tree can have its right subtree changed.");
-
- Right = NewRight;
- CachedDigest = false;
+ IsDigestCached = true;
}
/// setHeight - Changes the height of the tree. Used internally by
/// ImutAVLFactory.
void setHeight(unsigned h) {
assert(isMutable() && "Only a mutable tree can have its height changed.");
- Height = h;
+ height = h;
}
static inline
- uint32_t ComputeDigest(ImutAVLTree* L, ImutAVLTree* R, value_type_ref V) {
+ uint32_t computeDigest(ImutAVLTree* L, ImutAVLTree* R, value_type_ref V) {
uint32_t digest = 0;
if (L)
- digest += L->ComputeDigest();
+ digest += L->computeDigest();
// Compute digest of stored data.
FoldingSetNodeID ID;
@@ -317,22 +306,54 @@ private:
digest += ID.ComputeHash();
if (R)
- digest += R->ComputeDigest();
+ digest += R->computeDigest();
return digest;
}
- inline uint32_t ComputeDigest() {
+ inline uint32_t computeDigest() {
// Check the lowest bit to determine if digest has actually been
// pre-computed.
if (hasCachedDigest())
- return Digest;
+ return digest;
- uint32_t X = ComputeDigest(getLeft(), getRight(), getValue());
- Digest = X;
- MarkedCachedDigest();
+ uint32_t X = computeDigest(getLeft(), getRight(), getValue());
+ digest = X;
+ markedCachedDigest();
return X;
}
+
+ //===----------------------------------------------------===//
+ // Reference count operations.
+ //===----------------------------------------------------===//
+
+public:
+ void retain() { ++refCount; }
+ void release() {
+ assert(refCount > 0);
+ if (--refCount == 0)
+ destroy();
+ }
+ void destroy() {
+ if (left)
+ left->release();
+ if (right)
+ right->release();
+ if (IsCanonicalized) {
+ if (next)
+ next->prev = prev;
+
+ if (prev)
+ prev->next = next;
+ else
+ factory->Cache[computeDigest()] = next;
+ }
+
+ // We need to clear the mutability bit in case we are
+ // destroying the node as part of a sweep in ImutAVLFactory::recoverNodes().
+ IsMutable = false;
+ factory->freeNodes.push_back(this);
+ }
};
//===----------------------------------------------------------------------===//
@@ -341,14 +362,17 @@ private:
template <typename ImutInfo >
class ImutAVLFactory {
+ friend class ImutAVLTree<ImutInfo>;
typedef ImutAVLTree<ImutInfo> TreeTy;
typedef typename TreeTy::value_type_ref value_type_ref;
typedef typename TreeTy::key_type_ref key_type_ref;
- typedef FoldingSet<TreeTy> CacheTy;
+ typedef DenseMap<unsigned, TreeTy*> CacheTy;
CacheTy Cache;
uintptr_t Allocator;
+ std::vector<TreeTy*> createdNodes;
+ std::vector<TreeTy*> freeNodes;
bool ownsAllocator() const {
return Allocator & 0x1 ? false : true;
@@ -373,55 +397,56 @@ public:
if (ownsAllocator()) delete &getAllocator();
}
- TreeTy* Add(TreeTy* T, value_type_ref V) {
- T = Add_internal(V,T);
- MarkImmutable(T);
+ TreeTy* add(TreeTy* T, value_type_ref V) {
+ T = add_internal(V,T);
+ markImmutable(T);
+ recoverNodes();
return T;
}
- TreeTy* Remove(TreeTy* T, key_type_ref V) {
- T = Remove_internal(V,T);
- MarkImmutable(T);
+ TreeTy* remove(TreeTy* T, key_type_ref V) {
+ T = remove_internal(V,T);
+ markImmutable(T);
+ recoverNodes();
return T;
}
- TreeTy* GetEmptyTree() const { return NULL; }
+ TreeTy* getEmptyTree() const { return NULL; }
+protected:
+
//===--------------------------------------------------===//
// A bunch of quick helper functions used for reasoning
// about the properties of trees and their children.
// These have succinct names so that the balancing code
// is as terse (and readable) as possible.
//===--------------------------------------------------===//
-protected:
- bool isEmpty(TreeTy* T) const { return !T; }
- unsigned Height(TreeTy* T) const { return T ? T->getHeight() : 0; }
- TreeTy* Left(TreeTy* T) const { return T->getLeft(); }
- TreeTy* Right(TreeTy* T) const { return T->getRight(); }
- value_type_ref Value(TreeTy* T) const { return T->Value; }
+ bool isEmpty(TreeTy* T) const { return !T; }
+ unsigned getHeight(TreeTy* T) const { return T ? T->getHeight() : 0; }
+ TreeTy* getLeft(TreeTy* T) const { return T->getLeft(); }
+ TreeTy* getRight(TreeTy* T) const { return T->getRight(); }
+ value_type_ref getValue(TreeTy* T) const { return T->value; }
- unsigned IncrementHeight(TreeTy* L, TreeTy* R) const {
- unsigned hl = Height(L);
- unsigned hr = Height(R);
+ unsigned incrementHeight(TreeTy* L, TreeTy* R) const {
+ unsigned hl = getHeight(L);
+ unsigned hr = getHeight(R);
return (hl > hr ? hl : hr) + 1;
}
- static bool CompareTreeWithSection(TreeTy* T,
+ static bool compareTreeWithSection(TreeTy* T,
typename TreeTy::iterator& TI,
typename TreeTy::iterator& TE) {
-
typename TreeTy::iterator I = T->begin(), E = T->end();
-
- for ( ; I!=E ; ++I, ++TI)
- if (TI == TE || !I->ElementEqual(*TI))
+ for ( ; I!=E ; ++I, ++TI) {
+ if (TI == TE || !I->isElementEqual(*TI))
return false;
-
+ }
return true;
}
//===--------------------------------------------------===//
- // "CreateNode" is used to generate new tree roots that link
+ // "createNode" is used to generate new tree roots that link
// to other trees. The functon may also simply move links
// in an existing root if that root is still marked mutable.
// This is necessary because otherwise our balancing code
@@ -430,181 +455,188 @@ protected:
// returned to the caller.
//===--------------------------------------------------===//
- TreeTy* CreateNode(TreeTy* L, value_type_ref V, TreeTy* R) {
+ TreeTy* createNode(TreeTy* L, value_type_ref V, TreeTy* R) {
BumpPtrAllocator& A = getAllocator();
- TreeTy* T = (TreeTy*) A.Allocate<TreeTy>();
- new (T) TreeTy(L, R, V, IncrementHeight(L,R));
+ TreeTy* T;
+ if (!freeNodes.empty()) {
+ T = freeNodes.back();
+ freeNodes.pop_back();
+ assert(T != L);
+ assert(T != R);
+ }
+ else {
+ T = (TreeTy*) A.Allocate<TreeTy>();
+ }
+ new (T) TreeTy(this, L, R, V, incrementHeight(L,R));
+ createdNodes.push_back(T);
return T;
}
- TreeTy* CreateNode(TreeTy* L, TreeTy* OldTree, TreeTy* R) {
- assert(!isEmpty(OldTree));
+ TreeTy* createNode(TreeTy* newLeft, TreeTy* oldTree, TreeTy* newRight) {
+ return createNode(newLeft, getValue(oldTree), newRight);
+ }
- if (OldTree->isMutable()) {
- OldTree->setLeft(L);
- OldTree->setRight(R);
- OldTree->setHeight(IncrementHeight(L, R));
- return OldTree;
+ void recoverNodes() {
+ for (unsigned i = 0, n = createdNodes.size(); i < n; ++i) {
+ TreeTy *N = createdNodes[i];
+ if (N->isMutable() && N->refCount == 0)
+ N->destroy();
}
- else
- return CreateNode(L, Value(OldTree), R);
+ createdNodes.clear();
}
- /// Balance - Used by Add_internal and Remove_internal to
+ /// balanceTree - Used by add_internal and remove_internal to
/// balance a newly created tree.
- TreeTy* Balance(TreeTy* L, value_type_ref V, TreeTy* R) {
-
- unsigned hl = Height(L);
- unsigned hr = Height(R);
+ TreeTy* balanceTree(TreeTy* L, value_type_ref V, TreeTy* R) {
+ unsigned hl = getHeight(L);
+ unsigned hr = getHeight(R);
if (hl > hr + 2) {
assert(!isEmpty(L) && "Left tree cannot be empty to have a height >= 2");
- TreeTy* LL = Left(L);
- TreeTy* LR = Right(L);
+ TreeTy *LL = getLeft(L);
+ TreeTy *LR = getRight(L);
- if (Height(LL) >= Height(LR))
- return CreateNode(LL, L, CreateNode(LR,V,R));
+ if (getHeight(LL) >= getHeight(LR))
+ return createNode(LL, L, createNode(LR,V,R));
assert(!isEmpty(LR) && "LR cannot be empty because it has a height >= 1");
- TreeTy* LRL = Left(LR);
- TreeTy* LRR = Right(LR);
+ TreeTy *LRL = getLeft(LR);
+ TreeTy *LRR = getRight(LR);
- return CreateNode(CreateNode(LL,L,LRL), LR, CreateNode(LRR,V,R));
+ return createNode(createNode(LL,L,LRL), LR, createNode(LRR,V,R));
}
else if (hr > hl + 2) {
assert(!isEmpty(R) && "Right tree cannot be empty to have a height >= 2");
- TreeTy* RL = Left(R);
- TreeTy* RR = Right(R);
+ TreeTy *RL = getLeft(R);
+ TreeTy *RR = getRight(R);
- if (Height(RR) >= Height(RL))
- return CreateNode(CreateNode(L,V,RL), R, RR);
+ if (getHeight(RR) >= getHeight(RL))
+ return createNode(createNode(L,V,RL), R, RR);
assert(!isEmpty(RL) && "RL cannot be empty because it has a height >= 1");
- TreeTy* RLL = Left(RL);
- TreeTy* RLR = Right(RL);
+ TreeTy *RLL = getLeft(RL);
+ TreeTy *RLR = getRight(RL);
- return CreateNode(CreateNode(L,V,RLL), RL, CreateNode(RLR,R,RR));
+ return createNode(createNode(L,V,RLL), RL, createNode(RLR,R,RR));
}
else
- return CreateNode(L,V,R);
+ return createNode(L,V,R);
}
- /// Add_internal - Creates a new tree that includes the specified
+ /// add_internal - Creates a new tree that includes the specified
/// data and the data from the original tree. If the original tree
/// already contained the data item, the original tree is returned.
- TreeTy* Add_internal(value_type_ref V, TreeTy* T) {
+ TreeTy* add_internal(value_type_ref V, TreeTy* T) {
if (isEmpty(T))
- return CreateNode(T, V, T);
-
+ return createNode(T, V, T);
assert(!T->isMutable());
key_type_ref K = ImutInfo::KeyOfValue(V);
- key_type_ref KCurrent = ImutInfo::KeyOfValue(Value(T));
+ key_type_ref KCurrent = ImutInfo::KeyOfValue(getValue(T));
if (ImutInfo::isEqual(K,KCurrent))
- return CreateNode(Left(T), V, Right(T));
+ return createNode(getLeft(T), V, getRight(T));
else if (ImutInfo::isLess(K,KCurrent))
- return Balance(Add_internal(V,Left(T)), Value(T), Right(T));
+ return balanceTree(add_internal(V, getLeft(T)), getValue(T), getRight(T));
else
- return Balance(Left(T), Value(T), Add_internal(V,Right(T)));
+ return balanceTree(getLeft(T), getValue(T), add_internal(V, getRight(T)));
}
- /// Remove_internal - Creates a new tree that includes all the data
+ /// remove_internal - Creates a new tree that includes all the data
/// from the original tree except the specified data. If the
/// specified data did not exist in the original tree, the original
/// tree is returned.
- TreeTy* Remove_internal(key_type_ref K, TreeTy* T) {
+ TreeTy* remove_internal(key_type_ref K, TreeTy* T) {
if (isEmpty(T))
return T;
assert(!T->isMutable());
- key_type_ref KCurrent = ImutInfo::KeyOfValue(Value(T));
+ key_type_ref KCurrent = ImutInfo::KeyOfValue(getValue(T));
- if (ImutInfo::isEqual(K,KCurrent))
- return CombineLeftRightTrees(Left(T),Right(T));
- else if (ImutInfo::isLess(K,KCurrent))
- return Balance(Remove_internal(K,Left(T)), Value(T), Right(T));
- else
- return Balance(Left(T), Value(T), Remove_internal(K,Right(T)));
+ if (ImutInfo::isEqual(K,KCurrent)) {
+ return combineTrees(getLeft(T), getRight(T));
+ } else if (ImutInfo::isLess(K,KCurrent)) {
+ return balanceTree(remove_internal(K, getLeft(T)),
+ getValue(T), getRight(T));
+ } else {
+ return balanceTree(getLeft(T), getValue(T),
+ remove_internal(K, getRight(T)));
+ }
}
- TreeTy* CombineLeftRightTrees(TreeTy* L, TreeTy* R) {
- if (isEmpty(L)) return R;
- if (isEmpty(R)) return L;
-
+ TreeTy* combineTrees(TreeTy* L, TreeTy* R) {
+ if (isEmpty(L))
+ return R;
+ if (isEmpty(R))
+ return L;
TreeTy* OldNode;
- TreeTy* NewRight = RemoveMinBinding(R,OldNode);
- return Balance(L,Value(OldNode),NewRight);
+ TreeTy* newRight = removeMinBinding(R,OldNode);
+ return balanceTree(L, getValue(OldNode), newRight);
}
- TreeTy* RemoveMinBinding(TreeTy* T, TreeTy*& NodeRemoved) {
+ TreeTy* removeMinBinding(TreeTy* T, TreeTy*& Noderemoved) {
assert(!isEmpty(T));
-
- if (isEmpty(Left(T))) {
- NodeRemoved = T;
- return Right(T);
+ if (isEmpty(getLeft(T))) {
+ Noderemoved = T;
+ return getRight(T);
}
-
- return Balance(RemoveMinBinding(Left(T),NodeRemoved),Value(T),Right(T));
+ return balanceTree(removeMinBinding(getLeft(T), Noderemoved),
+ getValue(T), getRight(T));
}
- /// MarkImmutable - Clears the mutable bits of a root and all of its
+ /// markImmutable - Clears the mutable bits of a root and all of its
/// descendants.
- void MarkImmutable(TreeTy* T) {
+ void markImmutable(TreeTy* T) {
if (!T || !T->isMutable())
return;
-
- T->MarkImmutable();
- MarkImmutable(Left(T));
- MarkImmutable(Right(T));
+ T->markImmutable();
+ markImmutable(getLeft(T));
+ markImmutable(getRight(T));
}
public:
- TreeTy *GetCanonicalTree(TreeTy *TNew) {
+ TreeTy *getCanonicalTree(TreeTy *TNew) {
if (!TNew)
- return NULL;
-
- // Search the FoldingSet bucket for a Tree with the same digest.
- FoldingSetNodeID ID;
- unsigned digest = TNew->ComputeDigest();
- ID.AddInteger(digest);
- unsigned hash = ID.ComputeHash();
-
- typename CacheTy::bucket_iterator I = Cache.bucket_begin(hash);
- typename CacheTy::bucket_iterator E = Cache.bucket_end(hash);
-
- for (; I != E; ++I) {
- TreeTy *T = &*I;
-
- if (T->ComputeDigest() != digest)
- continue;
-
- // We found a collision. Perform a comparison of Contents('T')
- // with Contents('TNew')
- typename TreeTy::iterator TI = T->begin(), TE = T->end();
-
- if (!CompareTreeWithSection(TNew, TI, TE))
- continue;
-
- if (TI != TE)
- continue; // T has more contents than TNew.
-
- // Trees did match! Return 'T'.
- return T;
+ return 0;
+
+ if (TNew->IsCanonicalized)
+ return TNew;
+
+ // Search the hashtable for another tree with the same digest, and
+ // if find a collision compare those trees by their contents.
+ unsigned digest = TNew->computeDigest();
+ TreeTy *&entry = Cache[digest];
+ do {
+ if (!entry)
+ break;
+ for (TreeTy *T = entry ; T != 0; T = T->next) {
+ // Compare the Contents('T') with Contents('TNew')
+ typename TreeTy::iterator TI = T->begin(), TE = T->end();
+ if (!compareTreeWithSection(TNew, TI, TE))
+ continue;
+ if (TI != TE)
+ continue; // T has more contents than TNew.
+ // Trees did match! Return 'T'.
+ if (TNew->refCount == 0)
+ TNew->destroy();
+ return T;
+ }
+ entry->prev = TNew;
+ TNew->next = entry;
}
+ while (false);
- // 'TNew' is the only tree of its kind. Return it.
- Cache.InsertNode(TNew, (void*) &*Cache.bucket_end(hash));
+ entry = TNew;
+ TNew->IsCanonicalized = true;
return TNew;
}
};
-
//===----------------------------------------------------------------------===//
// Immutable AVL-Tree Iterators.
//===----------------------------------------------------------------------===//
@@ -635,19 +667,17 @@ public:
}
- bool AtEnd() const { return stack.empty(); }
+ bool atEnd() const { return stack.empty(); }
- bool AtBeginning() const {
+ bool atBeginning() const {
return stack.size() == 1 && getVisitState() == VisitedNone;
}
- void SkipToParent() {
+ void skipToParent() {
assert(!stack.empty());
stack.pop_back();
-
if (stack.empty())
return;
-
switch (getVisitState()) {
case VisitedNone:
stack.back() |= VisitedLeft;
@@ -663,11 +693,9 @@ public:
inline bool operator==(const _Self& x) const {
if (stack.size() != x.stack.size())
return false;
-
for (unsigned i = 0 ; i < stack.size(); i++)
if (stack[i] != x.stack[i])
return false;
-
return true;
}
@@ -675,70 +703,52 @@ public:
_Self& operator++() {
assert(!stack.empty());
-
TreeTy* Current = reinterpret_cast<TreeTy*>(stack.back() & ~Flags);
assert(Current);
-
switch (getVisitState()) {
case VisitedNone:
if (TreeTy* L = Current->getLeft())
stack.push_back(reinterpret_cast<uintptr_t>(L));
else
stack.back() |= VisitedLeft;
-
break;
-
case VisitedLeft:
if (TreeTy* R = Current->getRight())
stack.push_back(reinterpret_cast<uintptr_t>(R));
else
stack.back() |= VisitedRight;
-
break;
-
case VisitedRight:
- SkipToParent();
+ skipToParent();
break;
-
default:
assert(false && "Unreachable.");
}
-
return *this;
}
_Self& operator--() {
assert(!stack.empty());
-
TreeTy* Current = reinterpret_cast<TreeTy*>(stack.back() & ~Flags);
assert(Current);
-
switch (getVisitState()) {
case VisitedNone:
stack.pop_back();
break;
-
case VisitedLeft:
stack.back() &= ~Flags; // Set state to "VisitedNone."
-
if (TreeTy* L = Current->getLeft())
stack.push_back(reinterpret_cast<uintptr_t>(L) | VisitedRight);
-
break;
-
case VisitedRight:
stack.back() &= ~Flags;
stack.back() |= VisitedLeft;
-
if (TreeTy* R = Current->getRight())
stack.push_back(reinterpret_cast<uintptr_t>(R) | VisitedRight);
-
break;
-
default:
assert(false && "Unreachable.");
}
-
return *this;
}
};
@@ -769,7 +779,7 @@ public:
inline _Self& operator++() {
do ++InternalItr;
- while (!InternalItr.AtEnd() &&
+ while (!InternalItr.atEnd() &&
InternalItr.getVisitState() != InternalIteratorTy::VisitedLeft);
return *this;
@@ -777,16 +787,16 @@ public:
inline _Self& operator--() {
do --InternalItr;
- while (!InternalItr.AtBeginning() &&
+ while (!InternalItr.atBeginning() &&
InternalItr.getVisitState() != InternalIteratorTy::VisitedLeft);
return *this;
}
- inline void SkipSubTree() {
- InternalItr.SkipToParent();
+ inline void skipSubTree() {
+ InternalItr.skipToParent();
- while (!InternalItr.AtEnd() &&
+ while (!InternalItr.atEnd() &&
InternalItr.getVisitState() != InternalIteratorTy::VisitedLeft)
++InternalItr;
}
@@ -927,7 +937,23 @@ public:
/// should use a Factory object to create sets instead of directly
/// invoking the constructor, but there are cases where make this
/// constructor public is useful.
- explicit ImmutableSet(TreeTy* R) : Root(R) {}
+ explicit ImmutableSet(TreeTy* R) : Root(R) {
+ if (Root) { Root->retain(); }
+ }
+ ImmutableSet(const ImmutableSet &X) : Root(X.Root) {
+ if (Root) { Root->retain(); }
+ }
+ ImmutableSet &operator=(const ImmutableSet &X) {
+ if (Root != X.Root) {
+ if (X.Root) { X.Root->retain(); }
+ if (Root) { Root->release(); }
+ Root = X.Root;
+ }
+ return *this;
+ }
+ ~ImmutableSet() {
+ if (Root) { Root->release(); }
+ }
class Factory {
typename TreeTy::Factory F;
@@ -940,33 +966,33 @@ public:
Factory(BumpPtrAllocator& Alloc, bool canonicalize = true)
: F(Alloc), Canonicalize(canonicalize) {}
- /// GetEmptySet - Returns an immutable set that contains no elements.
- ImmutableSet GetEmptySet() {
- return ImmutableSet(F.GetEmptyTree());
+ /// getEmptySet - Returns an immutable set that contains no elements.
+ ImmutableSet getEmptySet() {
+ return ImmutableSet(F.getEmptyTree());
}
- /// Add - Creates a new immutable set that contains all of the values
+ /// add - Creates a new immutable set that contains all of the values
/// of the original set with the addition of the specified value. If
/// the original set already included the value, then the original set is
/// returned and no memory is allocated. The time and space complexity
/// of this operation is logarithmic in the size of the original set.
/// The memory allocated to represent the set is released when the
/// factory object that created the set is destroyed.
- ImmutableSet Add(ImmutableSet Old, value_type_ref V) {
- TreeTy *NewT = F.Add(Old.Root, V);
- return ImmutableSet(Canonicalize ? F.GetCanonicalTree(NewT) : NewT);
+ ImmutableSet add(ImmutableSet Old, value_type_ref V) {
+ TreeTy *NewT = F.add(Old.Root, V);
+ return ImmutableSet(Canonicalize ? F.getCanonicalTree(NewT) : NewT);
}
- /// Remove - Creates a new immutable set that contains all of the values
+ /// remove - Creates a new immutable set that contains all of the values
/// of the original set with the exception of the specified value. If
/// the original set did not contain the value, the original set is
/// returned and no memory is allocated. The time and space complexity
/// of this operation is logarithmic in the size of the original set.
/// The memory allocated to represent the set is released when the
/// factory object that created the set is destroyed.
- ImmutableSet Remove(ImmutableSet Old, value_type_ref V) {
- TreeTy *NewT = F.Remove(Old.Root, V);
- return ImmutableSet(Canonicalize ? F.GetCanonicalTree(NewT) : NewT);
+ ImmutableSet remove(ImmutableSet Old, value_type_ref V) {
+ TreeTy *NewT = F.remove(Old.Root, V);
+ return ImmutableSet(Canonicalize ? F.getCanonicalTree(NewT) : NewT);
}
BumpPtrAllocator& getAllocator() { return F.getAllocator(); }
@@ -978,20 +1004,21 @@ public:
friend class Factory;
- /// contains - Returns true if the set contains the specified value.
+ /// Returns true if the set contains the specified value.
bool contains(value_type_ref V) const {
return Root ? Root->contains(V) : false;
}
- bool operator==(ImmutableSet RHS) const {
+ bool operator==(const ImmutableSet &RHS) const {
return Root && RHS.Root ? Root->isEqual(*RHS.Root) : Root == RHS.Root;
}
- bool operator!=(ImmutableSet RHS) const {
+ bool operator!=(const ImmutableSet &RHS) const {
return Root && RHS.Root ? Root->isNotEqual(*RHS.Root) : Root != RHS.Root;
}
TreeTy *getRoot() {
+ if (Root) { Root->retain(); }
return Root;
}
@@ -1049,7 +1076,7 @@ public:
// For testing.
//===--------------------------------------------------===//
- void verify() const { if (Root) Root->verify(); }
+ void validateTree() const { if (Root) Root->validateTree(); }
};
} // end namespace llvm
diff --git a/include/llvm/ADT/InMemoryStruct.h b/include/llvm/ADT/InMemoryStruct.h
new file mode 100644
index 0000000..a560845
--- /dev/null
+++ b/include/llvm/ADT/InMemoryStruct.h
@@ -0,0 +1,77 @@
+//===- InMemoryStruct.h - Indirect Struct Access Smart Pointer --*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_INMEMORYSTRUCT_H
+#define LLVM_ADT_INMEMORYSTRUCT_H
+
+#include <cassert>
+
+namespace llvm {
+
+/// \brief Helper object for abstracting access to an in-memory structure which
+/// may require some kind of temporary storage.
+///
+/// This class is designed to be used for accessing file data structures which
+/// in the common case can be accessed from a direct pointer to a memory mapped
+/// object, but which in some cases may require indirect access to a temporary
+/// structure (which, for example, may have undergone endianness translation).
+template<typename T>
+class InMemoryStruct {
+ typedef T value_type;
+ typedef value_type &reference;
+ typedef value_type *pointer;
+ typedef const value_type &const_reference;
+ typedef const value_type *const_pointer;
+
+ /// \brief The smart pointer target.
+ value_type *Target;
+
+ /// \brief A temporary object which can be used as a target of the smart
+ /// pointer.
+ value_type Contents;
+
+private:
+
+public:
+ InMemoryStruct() : Target(0) {}
+ InMemoryStruct(reference Value) : Target(&Contents), Contents(Value) {}
+ InMemoryStruct(pointer Value) : Target(Value) {}
+ InMemoryStruct(const InMemoryStruct<T> &Value) { *this = Value; }
+
+ void operator=(const InMemoryStruct<T> &Value) {
+ if (Value.Target != &Value.Contents) {
+ Target = Value.Target;
+ } else {
+ Target = &Contents;
+ Contents = Value.Contents;
+ }
+ }
+
+ const_reference operator*() const {
+ assert(Target && "Cannot dereference null pointer");
+ return *Target;
+ }
+ reference operator*() {
+ assert(Target && "Cannot dereference null pointer");
+ return *Target;
+ }
+
+ const_pointer operator->() const {
+ return Target;
+ }
+ pointer operator->() {
+ return Target;
+ }
+
+ operator bool() const { return Target != 0; }
+};
+
+}
+
+#endif
diff --git a/include/llvm/ADT/IndexedMap.h b/include/llvm/ADT/IndexedMap.h
index 89f0dfa..87126ea 100644
--- a/include/llvm/ADT/IndexedMap.h
+++ b/include/llvm/ADT/IndexedMap.h
@@ -55,6 +55,14 @@ namespace llvm {
return storage_[toIndex_(n)];
}
+ void reserve(typename StorageT::size_type s) {
+ storage_.reserve(s);
+ }
+
+ void resize(typename StorageT::size_type s) {
+ storage_.resize(s, nullVal_);
+ }
+
void clear() {
storage_.clear();
}
@@ -62,7 +70,11 @@ namespace llvm {
void grow(IndexT n) {
unsigned NewSize = toIndex_(n) + 1;
if (NewSize > storage_.size())
- storage_.resize(NewSize, nullVal_);
+ resize(NewSize);
+ }
+
+ bool inBounds(IndexT n) const {
+ return toIndex_(n) < storage_.size();
}
typename StorageT::size_type size() const {
diff --git a/include/llvm/ADT/IntEqClasses.h b/include/llvm/ADT/IntEqClasses.h
new file mode 100644
index 0000000..8e75c48
--- /dev/null
+++ b/include/llvm/ADT/IntEqClasses.h
@@ -0,0 +1,88 @@
+//===-- llvm/ADT/IntEqClasses.h - Equiv. Classes of Integers ----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Equivalence classes for small integers. This is a mapping of the integers
+// 0 .. N-1 into M equivalence classes numbered 0 .. M-1.
+//
+// Initially each integer has its own equivalence class. Classes are joined by
+// passing a representative member of each class to join().
+//
+// Once the classes are built, compress() will number them 0 .. M-1 and prevent
+// further changes.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_INTEQCLASSES_H
+#define LLVM_ADT_INTEQCLASSES_H
+
+#include "llvm/ADT/SmallVector.h"
+
+namespace llvm {
+
+class IntEqClasses {
+ /// EC - When uncompressed, map each integer to a smaller member of its
+ /// equivalence class. The class leader is the smallest member and maps to
+ /// itself.
+ ///
+ /// When compressed, EC[i] is the equivalence class of i.
+ SmallVector<unsigned, 8> EC;
+
+ /// NumClasses - The number of equivalence classes when compressed, or 0 when
+ /// uncompressed.
+ unsigned NumClasses;
+
+public:
+ /// IntEqClasses - Create an equivalence class mapping for 0 .. N-1.
+ IntEqClasses(unsigned N = 0) : NumClasses(0) { grow(N); }
+
+ /// grow - Increase capacity to hold 0 .. N-1, putting new integers in unique
+ /// equivalence classes.
+ /// This requires an uncompressed map.
+ void grow(unsigned N);
+
+ /// clear - Clear all classes so that grow() will assign a unique class to
+ /// every integer.
+ void clear() {
+ EC.clear();
+ NumClasses = 0;
+ }
+
+ /// join - Join the equivalence classes of a and b. After joining classes,
+ /// findLeader(a) == findLeader(b).
+ /// This requires an uncompressed map.
+ void join(unsigned a, unsigned b);
+
+ /// findLeader - Compute the leader of a's equivalence class. This is the
+ /// smallest member of the class.
+ /// This requires an uncompressed map.
+ unsigned findLeader(unsigned a) const;
+
+ /// compress - Compress equivalence classes by numbering them 0 .. M.
+ /// This makes the equivalence class map immutable.
+ void compress();
+
+ /// getNumClasses - Return the number of equivalence classes after compress()
+ /// was called.
+ unsigned getNumClasses() const { return NumClasses; }
+
+ /// operator[] - Return a's equivalence class number, 0 .. getNumClasses()-1.
+ /// This requires a compressed map.
+ unsigned operator[](unsigned a) const {
+ assert(NumClasses && "operator[] called before compress()");
+ return EC[a];
+ }
+
+ /// uncompress - Change back to the uncompressed representation that allows
+ /// editing.
+ void uncompress();
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/include/llvm/ADT/IntervalMap.h b/include/llvm/ADT/IntervalMap.h
new file mode 100644
index 0000000..79f24d3
--- /dev/null
+++ b/include/llvm/ADT/IntervalMap.h
@@ -0,0 +1,2139 @@
+//===- llvm/ADT/IntervalMap.h - A sorted interval map -----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements a coalescing interval map for small objects.
+//
+// KeyT objects are mapped to ValT objects. Intervals of keys that map to the
+// same value are represented in a compressed form.
+//
+// Iterators provide ordered access to the compressed intervals rather than the
+// individual keys, and insert and erase operations use key intervals as well.
+//
+// Like SmallVector, IntervalMap will store the first N intervals in the map
+// object itself without any allocations. When space is exhausted it switches to
+// a B+-tree representation with very small overhead for small key and value
+// objects.
+//
+// A Traits class specifies how keys are compared. It also allows IntervalMap to
+// work with both closed and half-open intervals.
+//
+// Keys and values are not stored next to each other in a std::pair, so we don't
+// provide such a value_type. Dereferencing iterators only returns the mapped
+// value. The interval bounds are accessible through the start() and stop()
+// iterator methods.
+//
+// IntervalMap is optimized for small key and value objects, 4 or 8 bytes each
+// is the optimal size. For large objects use std::map instead.
+//
+//===----------------------------------------------------------------------===//
+//
+// Synopsis:
+//
+// template <typename KeyT, typename ValT, unsigned N, typename Traits>
+// class IntervalMap {
+// public:
+// typedef KeyT key_type;
+// typedef ValT mapped_type;
+// typedef RecyclingAllocator<...> Allocator;
+// class iterator;
+// class const_iterator;
+//
+// explicit IntervalMap(Allocator&);
+// ~IntervalMap():
+//
+// bool empty() const;
+// KeyT start() const;
+// KeyT stop() const;
+// ValT lookup(KeyT x, Value NotFound = Value()) const;
+//
+// const_iterator begin() const;
+// const_iterator end() const;
+// iterator begin();
+// iterator end();
+// const_iterator find(KeyT x) const;
+// iterator find(KeyT x);
+//
+// void insert(KeyT a, KeyT b, ValT y);
+// void clear();
+// };
+//
+// template <typename KeyT, typename ValT, unsigned N, typename Traits>
+// class IntervalMap::const_iterator :
+// public std::iterator<std::bidirectional_iterator_tag, ValT> {
+// public:
+// bool operator==(const const_iterator &) const;
+// bool operator!=(const const_iterator &) const;
+// bool valid() const;
+//
+// const KeyT &start() const;
+// const KeyT &stop() const;
+// const ValT &value() const;
+// const ValT &operator*() const;
+// const ValT *operator->() const;
+//
+// const_iterator &operator++();
+// const_iterator &operator++(int);
+// const_iterator &operator--();
+// const_iterator &operator--(int);
+// void goToBegin();
+// void goToEnd();
+// void find(KeyT x);
+// void advanceTo(KeyT x);
+// };
+//
+// template <typename KeyT, typename ValT, unsigned N, typename Traits>
+// class IntervalMap::iterator : public const_iterator {
+// public:
+// void insert(KeyT a, KeyT b, Value y);
+// void erase();
+// };
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_INTERVALMAP_H
+#define LLVM_ADT_INTERVALMAP_H
+
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/PointerIntPair.h"
+#include "llvm/Support/Allocator.h"
+#include "llvm/Support/RecyclingAllocator.h"
+#include <iterator>
+
+namespace llvm {
+
+
+//===----------------------------------------------------------------------===//
+//--- Key traits ---//
+//===----------------------------------------------------------------------===//
+//
+// The IntervalMap works with closed or half-open intervals.
+// Adjacent intervals that map to the same value are coalesced.
+//
+// The IntervalMapInfo traits class is used to determine if a key is contained
+// in an interval, and if two intervals are adjacent so they can be coalesced.
+// The provided implementation works for closed integer intervals, other keys
+// probably need a specialized version.
+//
+// The point x is contained in [a;b] when !startLess(x, a) && !stopLess(b, x).
+//
+// It is assumed that (a;b] half-open intervals are not used, only [a;b) is
+// allowed. This is so that stopLess(a, b) can be used to determine if two
+// intervals overlap.
+//
+//===----------------------------------------------------------------------===//
+
+template <typename T>
+struct IntervalMapInfo {
+
+ /// startLess - Return true if x is not in [a;b].
+ /// This is x < a both for closed intervals and for [a;b) half-open intervals.
+ static inline bool startLess(const T &x, const T &a) {
+ return x < a;
+ }
+
+ /// stopLess - Return true if x is not in [a;b].
+ /// This is b < x for a closed interval, b <= x for [a;b) half-open intervals.
+ static inline bool stopLess(const T &b, const T &x) {
+ return b < x;
+ }
+
+ /// adjacent - Return true when the intervals [x;a] and [b;y] can coalesce.
+ /// This is a+1 == b for closed intervals, a == b for half-open intervals.
+ static inline bool adjacent(const T &a, const T &b) {
+ return a+1 == b;
+ }
+
+};
+
+/// IntervalMapImpl - Namespace used for IntervalMap implementation details.
+/// It should be considered private to the implementation.
+namespace IntervalMapImpl {
+
+// Forward declarations.
+template <typename, typename, unsigned, typename> class LeafNode;
+template <typename, typename, unsigned, typename> class BranchNode;
+
+typedef std::pair<unsigned,unsigned> IdxPair;
+
+
+//===----------------------------------------------------------------------===//
+//--- IntervalMapImpl::NodeBase ---//
+//===----------------------------------------------------------------------===//
+//
+// Both leaf and branch nodes store vectors of pairs.
+// Leaves store ((KeyT, KeyT), ValT) pairs, branches use (NodeRef, KeyT).
+//
+// Keys and values are stored in separate arrays to avoid padding caused by
+// different object alignments. This also helps improve locality of reference
+// when searching the keys.
+//
+// The nodes don't know how many elements they contain - that information is
+// stored elsewhere. Omitting the size field prevents padding and allows a node
+// to fill the allocated cache lines completely.
+//
+// These are typical key and value sizes, the node branching factor (N), and
+// wasted space when nodes are sized to fit in three cache lines (192 bytes):
+//
+// T1 T2 N Waste Used by
+// 4 4 24 0 Branch<4> (32-bit pointers)
+// 8 4 16 0 Leaf<4,4>, Branch<4>
+// 8 8 12 0 Leaf<4,8>, Branch<8>
+// 16 4 9 12 Leaf<8,4>
+// 16 8 8 0 Leaf<8,8>
+//
+//===----------------------------------------------------------------------===//
+
+template <typename T1, typename T2, unsigned N>
+class NodeBase {
+public:
+ enum { Capacity = N };
+
+ T1 first[N];
+ T2 second[N];
+
+ /// copy - Copy elements from another node.
+ /// @param Other Node elements are copied from.
+ /// @param i Beginning of the source range in other.
+ /// @param j Beginning of the destination range in this.
+ /// @param Count Number of elements to copy.
+ template <unsigned M>
+ void copy(const NodeBase<T1, T2, M> &Other, unsigned i,
+ unsigned j, unsigned Count) {
+ assert(i + Count <= M && "Invalid source range");
+ assert(j + Count <= N && "Invalid dest range");
+ for (unsigned e = i + Count; i != e; ++i, ++j) {
+ first[j] = Other.first[i];
+ second[j] = Other.second[i];
+ }
+ }
+
+ /// moveLeft - Move elements to the left.
+ /// @param i Beginning of the source range.
+ /// @param j Beginning of the destination range.
+ /// @param Count Number of elements to copy.
+ void moveLeft(unsigned i, unsigned j, unsigned Count) {
+ assert(j <= i && "Use moveRight shift elements right");
+ copy(*this, i, j, Count);
+ }
+
+ /// moveRight - Move elements to the right.
+ /// @param i Beginning of the source range.
+ /// @param j Beginning of the destination range.
+ /// @param Count Number of elements to copy.
+ void moveRight(unsigned i, unsigned j, unsigned Count) {
+ assert(i <= j && "Use moveLeft shift elements left");
+ assert(j + Count <= N && "Invalid range");
+ while (Count--) {
+ first[j + Count] = first[i + Count];
+ second[j + Count] = second[i + Count];
+ }
+ }
+
+ /// erase - Erase elements [i;j).
+ /// @param i Beginning of the range to erase.
+ /// @param j End of the range. (Exclusive).
+ /// @param Size Number of elements in node.
+ void erase(unsigned i, unsigned j, unsigned Size) {
+ moveLeft(j, i, Size - j);
+ }
+
+ /// erase - Erase element at i.
+ /// @param i Index of element to erase.
+ /// @param Size Number of elements in node.
+ void erase(unsigned i, unsigned Size) {
+ erase(i, i+1, Size);
+ }
+
+ /// shift - Shift elements [i;size) 1 position to the right.
+ /// @param i Beginning of the range to move.
+ /// @param Size Number of elements in node.
+ void shift(unsigned i, unsigned Size) {
+ moveRight(i, i + 1, Size - i);
+ }
+
+ /// transferToLeftSib - Transfer elements to a left sibling node.
+ /// @param Size Number of elements in this.
+ /// @param Sib Left sibling node.
+ /// @param SSize Number of elements in sib.
+ /// @param Count Number of elements to transfer.
+ void transferToLeftSib(unsigned Size, NodeBase &Sib, unsigned SSize,
+ unsigned Count) {
+ Sib.copy(*this, 0, SSize, Count);
+ erase(0, Count, Size);
+ }
+
+ /// transferToRightSib - Transfer elements to a right sibling node.
+ /// @param Size Number of elements in this.
+ /// @param Sib Right sibling node.
+ /// @param SSize Number of elements in sib.
+ /// @param Count Number of elements to transfer.
+ void transferToRightSib(unsigned Size, NodeBase &Sib, unsigned SSize,
+ unsigned Count) {
+ Sib.moveRight(0, Count, SSize);
+ Sib.copy(*this, Size-Count, 0, Count);
+ }
+
+ /// adjustFromLeftSib - Adjust the number if elements in this node by moving
+ /// elements to or from a left sibling node.
+ /// @param Size Number of elements in this.
+ /// @param Sib Right sibling node.
+ /// @param SSize Number of elements in sib.
+ /// @param Add The number of elements to add to this node, possibly < 0.
+ /// @return Number of elements added to this node, possibly negative.
+ int adjustFromLeftSib(unsigned Size, NodeBase &Sib, unsigned SSize, int Add) {
+ if (Add > 0) {
+ // We want to grow, copy from sib.
+ unsigned Count = std::min(std::min(unsigned(Add), SSize), N - Size);
+ Sib.transferToRightSib(SSize, *this, Size, Count);
+ return Count;
+ } else {
+ // We want to shrink, copy to sib.
+ unsigned Count = std::min(std::min(unsigned(-Add), Size), N - SSize);
+ transferToLeftSib(Size, Sib, SSize, Count);
+ return -Count;
+ }
+ }
+};
+
+/// IntervalMapImpl::adjustSiblingSizes - Move elements between sibling nodes.
+/// @param Node Array of pointers to sibling nodes.
+/// @param Nodes Number of nodes.
+/// @param CurSize Array of current node sizes, will be overwritten.
+/// @param NewSize Array of desired node sizes.
+template <typename NodeT>
+void adjustSiblingSizes(NodeT *Node[], unsigned Nodes,
+ unsigned CurSize[], const unsigned NewSize[]) {
+ // Move elements right.
+ for (int n = Nodes - 1; n; --n) {
+ if (CurSize[n] == NewSize[n])
+ continue;
+ for (int m = n - 1; m != -1; --m) {
+ int d = Node[n]->adjustFromLeftSib(CurSize[n], *Node[m], CurSize[m],
+ NewSize[n] - CurSize[n]);
+ CurSize[m] -= d;
+ CurSize[n] += d;
+ // Keep going if the current node was exhausted.
+ if (CurSize[n] >= NewSize[n])
+ break;
+ }
+ }
+
+ if (Nodes == 0)
+ return;
+
+ // Move elements left.
+ for (unsigned n = 0; n != Nodes - 1; ++n) {
+ if (CurSize[n] == NewSize[n])
+ continue;
+ for (unsigned m = n + 1; m != Nodes; ++m) {
+ int d = Node[m]->adjustFromLeftSib(CurSize[m], *Node[n], CurSize[n],
+ CurSize[n] - NewSize[n]);
+ CurSize[m] += d;
+ CurSize[n] -= d;
+ // Keep going if the current node was exhausted.
+ if (CurSize[n] >= NewSize[n])
+ break;
+ }
+ }
+
+#ifndef NDEBUG
+ for (unsigned n = 0; n != Nodes; n++)
+ assert(CurSize[n] == NewSize[n] && "Insufficient element shuffle");
+#endif
+}
+
+/// IntervalMapImpl::distribute - Compute a new distribution of node elements
+/// after an overflow or underflow. Reserve space for a new element at Position,
+/// and compute the node that will hold Position after redistributing node
+/// elements.
+///
+/// It is required that
+///
+/// Elements == sum(CurSize), and
+/// Elements + Grow <= Nodes * Capacity.
+///
+/// NewSize[] will be filled in such that:
+///
+/// sum(NewSize) == Elements, and
+/// NewSize[i] <= Capacity.
+///
+/// The returned index is the node where Position will go, so:
+///
+/// sum(NewSize[0..idx-1]) <= Position
+/// sum(NewSize[0..idx]) >= Position
+///
+/// The last equality, sum(NewSize[0..idx]) == Position, can only happen when
+/// Grow is set and NewSize[idx] == Capacity-1. The index points to the node
+/// before the one holding the Position'th element where there is room for an
+/// insertion.
+///
+/// @param Nodes The number of nodes.
+/// @param Elements Total elements in all nodes.
+/// @param Capacity The capacity of each node.
+/// @param CurSize Array[Nodes] of current node sizes, or NULL.
+/// @param NewSize Array[Nodes] to receive the new node sizes.
+/// @param Position Insert position.
+/// @param Grow Reserve space for a new element at Position.
+/// @return (node, offset) for Position.
+IdxPair distribute(unsigned Nodes, unsigned Elements, unsigned Capacity,
+ const unsigned *CurSize, unsigned NewSize[],
+ unsigned Position, bool Grow);
+
+
+//===----------------------------------------------------------------------===//
+//--- IntervalMapImpl::NodeSizer ---//
+//===----------------------------------------------------------------------===//
+//
+// Compute node sizes from key and value types.
+//
+// The branching factors are chosen to make nodes fit in three cache lines.
+// This may not be possible if keys or values are very large. Such large objects
+// are handled correctly, but a std::map would probably give better performance.
+//
+//===----------------------------------------------------------------------===//
+
+enum {
+ // Cache line size. Most architectures have 32 or 64 byte cache lines.
+ // We use 64 bytes here because it provides good branching factors.
+ Log2CacheLine = 6,
+ CacheLineBytes = 1 << Log2CacheLine,
+ DesiredNodeBytes = 3 * CacheLineBytes
+};
+
+template <typename KeyT, typename ValT>
+struct NodeSizer {
+ enum {
+ // Compute the leaf node branching factor that makes a node fit in three
+ // cache lines. The branching factor must be at least 3, or some B+-tree
+ // balancing algorithms won't work.
+ // LeafSize can't be larger than CacheLineBytes. This is required by the
+ // PointerIntPair used by NodeRef.
+ DesiredLeafSize = DesiredNodeBytes /
+ static_cast<unsigned>(2*sizeof(KeyT)+sizeof(ValT)),
+ MinLeafSize = 3,
+ LeafSize = DesiredLeafSize > MinLeafSize ? DesiredLeafSize : MinLeafSize
+ };
+
+ typedef NodeBase<std::pair<KeyT, KeyT>, ValT, LeafSize> LeafBase;
+
+ enum {
+ // Now that we have the leaf branching factor, compute the actual allocation
+ // unit size by rounding up to a whole number of cache lines.
+ AllocBytes = (sizeof(LeafBase) + CacheLineBytes-1) & ~(CacheLineBytes-1),
+
+ // Determine the branching factor for branch nodes.
+ BranchSize = AllocBytes /
+ static_cast<unsigned>(sizeof(KeyT) + sizeof(void*))
+ };
+
+ /// Allocator - The recycling allocator used for both branch and leaf nodes.
+ /// This typedef is very likely to be identical for all IntervalMaps with
+ /// reasonably sized entries, so the same allocator can be shared among
+ /// different kinds of maps.
+ typedef RecyclingAllocator<BumpPtrAllocator, char,
+ AllocBytes, CacheLineBytes> Allocator;
+
+};
+
+
+//===----------------------------------------------------------------------===//
+//--- IntervalMapImpl::NodeRef ---//
+//===----------------------------------------------------------------------===//
+//
+// B+-tree nodes can be leaves or branches, so we need a polymorphic node
+// pointer that can point to both kinds.
+//
+// All nodes are cache line aligned and the low 6 bits of a node pointer are
+// always 0. These bits are used to store the number of elements in the
+// referenced node. Besides saving space, placing node sizes in the parents
+// allow tree balancing algorithms to run without faulting cache lines for nodes
+// that may not need to be modified.
+//
+// A NodeRef doesn't know whether it references a leaf node or a branch node.
+// It is the responsibility of the caller to use the correct types.
+//
+// Nodes are never supposed to be empty, and it is invalid to store a node size
+// of 0 in a NodeRef. The valid range of sizes is 1-64.
+//
+//===----------------------------------------------------------------------===//
+
+class NodeRef {
+ struct CacheAlignedPointerTraits {
+ static inline void *getAsVoidPointer(void *P) { return P; }
+ static inline void *getFromVoidPointer(void *P) { return P; }
+ enum { NumLowBitsAvailable = Log2CacheLine };
+ };
+ PointerIntPair<void*, Log2CacheLine, unsigned, CacheAlignedPointerTraits> pip;
+
+public:
+ /// NodeRef - Create a null ref.
+ NodeRef() {}
+
+ /// operator bool - Detect a null ref.
+ operator bool() const { return pip.getOpaqueValue(); }
+
+ /// NodeRef - Create a reference to the node p with n elements.
+ template <typename NodeT>
+ NodeRef(NodeT *p, unsigned n) : pip(p, n - 1) {
+ assert(n <= NodeT::Capacity && "Size too big for node");
+ }
+
+ /// size - Return the number of elements in the referenced node.
+ unsigned size() const { return pip.getInt() + 1; }
+
+ /// setSize - Update the node size.
+ void setSize(unsigned n) { pip.setInt(n - 1); }
+
+ /// subtree - Access the i'th subtree reference in a branch node.
+ /// This depends on branch nodes storing the NodeRef array as their first
+ /// member.
+ NodeRef &subtree(unsigned i) const {
+ return reinterpret_cast<NodeRef*>(pip.getPointer())[i];
+ }
+
+ /// get - Dereference as a NodeT reference.
+ template <typename NodeT>
+ NodeT &get() const {
+ return *reinterpret_cast<NodeT*>(pip.getPointer());
+ }
+
+ bool operator==(const NodeRef &RHS) const {
+ if (pip == RHS.pip)
+ return true;
+ assert(pip.getPointer() != RHS.pip.getPointer() && "Inconsistent NodeRefs");
+ return false;
+ }
+
+ bool operator!=(const NodeRef &RHS) const {
+ return !operator==(RHS);
+ }
+};
+
+//===----------------------------------------------------------------------===//
+//--- IntervalMapImpl::LeafNode ---//
+//===----------------------------------------------------------------------===//
+//
+// Leaf nodes store up to N disjoint intervals with corresponding values.
+//
+// The intervals are kept sorted and fully coalesced so there are no adjacent
+// intervals mapping to the same value.
+//
+// These constraints are always satisfied:
+//
+// - Traits::stopLess(start(i), stop(i)) - Non-empty, sane intervals.
+//
+// - Traits::stopLess(stop(i), start(i + 1) - Sorted.
+//
+// - value(i) != value(i + 1) || !Traits::adjacent(stop(i), start(i + 1))
+// - Fully coalesced.
+//
+//===----------------------------------------------------------------------===//
+
+template <typename KeyT, typename ValT, unsigned N, typename Traits>
+class LeafNode : public NodeBase<std::pair<KeyT, KeyT>, ValT, N> {
+public:
+ const KeyT &start(unsigned i) const { return this->first[i].first; }
+ const KeyT &stop(unsigned i) const { return this->first[i].second; }
+ const ValT &value(unsigned i) const { return this->second[i]; }
+
+ KeyT &start(unsigned i) { return this->first[i].first; }
+ KeyT &stop(unsigned i) { return this->first[i].second; }
+ ValT &value(unsigned i) { return this->second[i]; }
+
+ /// findFrom - Find the first interval after i that may contain x.
+ /// @param i Starting index for the search.
+ /// @param Size Number of elements in node.
+ /// @param x Key to search for.
+ /// @return First index with !stopLess(key[i].stop, x), or size.
+ /// This is the first interval that can possibly contain x.
+ unsigned findFrom(unsigned i, unsigned Size, KeyT x) const {
+ assert(i <= Size && Size <= N && "Bad indices");
+ assert((i == 0 || Traits::stopLess(stop(i - 1), x)) &&
+ "Index is past the needed point");
+ while (i != Size && Traits::stopLess(stop(i), x)) ++i;
+ return i;
+ }
+
+ /// safeFind - Find an interval that is known to exist. This is the same as
+ /// findFrom except is it assumed that x is at least within range of the last
+ /// interval.
+ /// @param i Starting index for the search.
+ /// @param x Key to search for.
+ /// @return First index with !stopLess(key[i].stop, x), never size.
+ /// This is the first interval that can possibly contain x.
+ unsigned safeFind(unsigned i, KeyT x) const {
+ assert(i < N && "Bad index");
+ assert((i == 0 || Traits::stopLess(stop(i - 1), x)) &&
+ "Index is past the needed point");
+ while (Traits::stopLess(stop(i), x)) ++i;
+ assert(i < N && "Unsafe intervals");
+ return i;
+ }
+
+ /// safeLookup - Lookup mapped value for a safe key.
+ /// It is assumed that x is within range of the last entry.
+ /// @param x Key to search for.
+ /// @param NotFound Value to return if x is not in any interval.
+ /// @return The mapped value at x or NotFound.
+ ValT safeLookup(KeyT x, ValT NotFound) const {
+ unsigned i = safeFind(0, x);
+ return Traits::startLess(x, start(i)) ? NotFound : value(i);
+ }
+
+ unsigned insertFrom(unsigned &Pos, unsigned Size, KeyT a, KeyT b, ValT y);
+};
+
+/// insertFrom - Add mapping of [a;b] to y if possible, coalescing as much as
+/// possible. This may cause the node to grow by 1, or it may cause the node
+/// to shrink because of coalescing.
+/// @param i Starting index = insertFrom(0, size, a)
+/// @param Size Number of elements in node.
+/// @param a Interval start.
+/// @param b Interval stop.
+/// @param y Value be mapped.
+/// @return (insert position, new size), or (i, Capacity+1) on overflow.
+template <typename KeyT, typename ValT, unsigned N, typename Traits>
+unsigned LeafNode<KeyT, ValT, N, Traits>::
+insertFrom(unsigned &Pos, unsigned Size, KeyT a, KeyT b, ValT y) {
+ unsigned i = Pos;
+ assert(i <= Size && Size <= N && "Invalid index");
+ assert(!Traits::stopLess(b, a) && "Invalid interval");
+
+ // Verify the findFrom invariant.
+ assert((i == 0 || Traits::stopLess(stop(i - 1), a)));
+ assert((i == Size || !Traits::stopLess(stop(i), a)));
+ assert((i == Size || Traits::stopLess(b, start(i))) && "Overlapping insert");
+
+ // Coalesce with previous interval.
+ if (i && value(i - 1) == y && Traits::adjacent(stop(i - 1), a)) {
+ Pos = i - 1;
+ // Also coalesce with next interval?
+ if (i != Size && value(i) == y && Traits::adjacent(b, start(i))) {
+ stop(i - 1) = stop(i);
+ this->erase(i, Size);
+ return Size - 1;
+ }
+ stop(i - 1) = b;
+ return Size;
+ }
+
+ // Detect overflow.
+ if (i == N)
+ return N + 1;
+
+ // Add new interval at end.
+ if (i == Size) {
+ start(i) = a;
+ stop(i) = b;
+ value(i) = y;
+ return Size + 1;
+ }
+
+ // Try to coalesce with following interval.
+ if (value(i) == y && Traits::adjacent(b, start(i))) {
+ start(i) = a;
+ return Size;
+ }
+
+ // We must insert before i. Detect overflow.
+ if (Size == N)
+ return N + 1;
+
+ // Insert before i.
+ this->shift(i, Size);
+ start(i) = a;
+ stop(i) = b;
+ value(i) = y;
+ return Size + 1;
+}
+
+
+//===----------------------------------------------------------------------===//
+//--- IntervalMapImpl::BranchNode ---//
+//===----------------------------------------------------------------------===//
+//
+// A branch node stores references to 1--N subtrees all of the same height.
+//
+// The key array in a branch node holds the rightmost stop key of each subtree.
+// It is redundant to store the last stop key since it can be found in the
+// parent node, but doing so makes tree balancing a lot simpler.
+//
+// It is unusual for a branch node to only have one subtree, but it can happen
+// in the root node if it is smaller than the normal nodes.
+//
+// When all of the leaf nodes from all the subtrees are concatenated, they must
+// satisfy the same constraints as a single leaf node. They must be sorted,
+// sane, and fully coalesced.
+//
+//===----------------------------------------------------------------------===//
+
+template <typename KeyT, typename ValT, unsigned N, typename Traits>
+class BranchNode : public NodeBase<NodeRef, KeyT, N> {
+public:
+ const KeyT &stop(unsigned i) const { return this->second[i]; }
+ const NodeRef &subtree(unsigned i) const { return this->first[i]; }
+
+ KeyT &stop(unsigned i) { return this->second[i]; }
+ NodeRef &subtree(unsigned i) { return this->first[i]; }
+
+ /// findFrom - Find the first subtree after i that may contain x.
+ /// @param i Starting index for the search.
+ /// @param Size Number of elements in node.
+ /// @param x Key to search for.
+ /// @return First index with !stopLess(key[i], x), or size.
+ /// This is the first subtree that can possibly contain x.
+ unsigned findFrom(unsigned i, unsigned Size, KeyT x) const {
+ assert(i <= Size && Size <= N && "Bad indices");
+ assert((i == 0 || Traits::stopLess(stop(i - 1), x)) &&
+ "Index to findFrom is past the needed point");
+ while (i != Size && Traits::stopLess(stop(i), x)) ++i;
+ return i;
+ }
+
+ /// safeFind - Find a subtree that is known to exist. This is the same as
+ /// findFrom except is it assumed that x is in range.
+ /// @param i Starting index for the search.
+ /// @param x Key to search for.
+ /// @return First index with !stopLess(key[i], x), never size.
+ /// This is the first subtree that can possibly contain x.
+ unsigned safeFind(unsigned i, KeyT x) const {
+ assert(i < N && "Bad index");
+ assert((i == 0 || Traits::stopLess(stop(i - 1), x)) &&
+ "Index is past the needed point");
+ while (Traits::stopLess(stop(i), x)) ++i;
+ assert(i < N && "Unsafe intervals");
+ return i;
+ }
+
+ /// safeLookup - Get the subtree containing x, Assuming that x is in range.
+ /// @param x Key to search for.
+ /// @return Subtree containing x
+ NodeRef safeLookup(KeyT x) const {
+ return subtree(safeFind(0, x));
+ }
+
+ /// insert - Insert a new (subtree, stop) pair.
+ /// @param i Insert position, following entries will be shifted.
+ /// @param Size Number of elements in node.
+ /// @param Node Subtree to insert.
+ /// @param Stop Last key in subtree.
+ void insert(unsigned i, unsigned Size, NodeRef Node, KeyT Stop) {
+ assert(Size < N && "branch node overflow");
+ assert(i <= Size && "Bad insert position");
+ this->shift(i, Size);
+ subtree(i) = Node;
+ stop(i) = Stop;
+ }
+};
+
+//===----------------------------------------------------------------------===//
+//--- IntervalMapImpl::Path ---//
+//===----------------------------------------------------------------------===//
+//
+// A Path is used by iterators to represent a position in a B+-tree, and the
+// path to get there from the root.
+//
+// The Path class also constains the tree navigation code that doesn't have to
+// be templatized.
+//
+//===----------------------------------------------------------------------===//
+
+class Path {
+ /// Entry - Each step in the path is a node pointer and an offset into that
+ /// node.
+ struct Entry {
+ void *node;
+ unsigned size;
+ unsigned offset;
+
+ Entry(void *Node, unsigned Size, unsigned Offset)
+ : node(Node), size(Size), offset(Offset) {}
+
+ Entry(NodeRef Node, unsigned Offset)
+ : node(&Node.subtree(0)), size(Node.size()), offset(Offset) {}
+
+ NodeRef &subtree(unsigned i) const {
+ return reinterpret_cast<NodeRef*>(node)[i];
+ }
+ };
+
+ /// path - The path entries, path[0] is the root node, path.back() is a leaf.
+ SmallVector<Entry, 4> path;
+
+public:
+ // Node accessors.
+ template <typename NodeT> NodeT &node(unsigned Level) const {
+ return *reinterpret_cast<NodeT*>(path[Level].node);
+ }
+ unsigned size(unsigned Level) const { return path[Level].size; }
+ unsigned offset(unsigned Level) const { return path[Level].offset; }
+ unsigned &offset(unsigned Level) { return path[Level].offset; }
+
+ // Leaf accessors.
+ template <typename NodeT> NodeT &leaf() const {
+ return *reinterpret_cast<NodeT*>(path.back().node);
+ }
+ unsigned leafSize() const { return path.back().size; }
+ unsigned leafOffset() const { return path.back().offset; }
+ unsigned &leafOffset() { return path.back().offset; }
+
+ /// valid - Return true if path is at a valid node, not at end().
+ bool valid() const {
+ return !path.empty() && path.front().offset < path.front().size;
+ }
+
+ /// height - Return the height of the tree corresponding to this path.
+ /// This matches map->height in a full path.
+ unsigned height() const { return path.size() - 1; }
+
+ /// subtree - Get the subtree referenced from Level. When the path is
+ /// consistent, node(Level + 1) == subtree(Level).
+ /// @param Level 0..height-1. The leaves have no subtrees.
+ NodeRef &subtree(unsigned Level) const {
+ return path[Level].subtree(path[Level].offset);
+ }
+
+ /// reset - Reset cached information about node(Level) from subtree(Level -1).
+ /// @param Level 1..height. THe node to update after parent node changed.
+ void reset(unsigned Level) {
+ path[Level] = Entry(subtree(Level - 1), offset(Level));
+ }
+
+ /// push - Add entry to path.
+ /// @param Node Node to add, should be subtree(path.size()-1).
+ /// @param Offset Offset into Node.
+ void push(NodeRef Node, unsigned Offset) {
+ path.push_back(Entry(Node, Offset));
+ }
+
+ /// pop - Remove the last path entry.
+ void pop() {
+ path.pop_back();
+ }
+
+ /// setSize - Set the size of a node both in the path and in the tree.
+ /// @param Level 0..height. Note that setting the root size won't change
+ /// map->rootSize.
+ /// @param Size New node size.
+ void setSize(unsigned Level, unsigned Size) {
+ path[Level].size = Size;
+ if (Level)
+ subtree(Level - 1).setSize(Size);
+ }
+
+ /// setRoot - Clear the path and set a new root node.
+ /// @param Node New root node.
+ /// @param Size New root size.
+ /// @param Offset Offset into root node.
+ void setRoot(void *Node, unsigned Size, unsigned Offset) {
+ path.clear();
+ path.push_back(Entry(Node, Size, Offset));
+ }
+
+ /// replaceRoot - Replace the current root node with two new entries after the
+ /// tree height has increased.
+ /// @param Root The new root node.
+ /// @param Size Number of entries in the new root.
+ /// @param Offsets Offsets into the root and first branch nodes.
+ void replaceRoot(void *Root, unsigned Size, IdxPair Offsets);
+
+ /// getLeftSibling - Get the left sibling node at Level, or a null NodeRef.
+ /// @param Level Get the sibling to node(Level).
+ /// @return Left sibling, or NodeRef().
+ NodeRef getLeftSibling(unsigned Level) const;
+
+ /// moveLeft - Move path to the left sibling at Level. Leave nodes below Level
+ /// unaltered.
+ /// @param Level Move node(Level).
+ void moveLeft(unsigned Level);
+
+ /// fillLeft - Grow path to Height by taking leftmost branches.
+ /// @param Height The target height.
+ void fillLeft(unsigned Height) {
+ while (height() < Height)
+ push(subtree(height()), 0);
+ }
+
+ /// getLeftSibling - Get the left sibling node at Level, or a null NodeRef.
+ /// @param Level Get the sinbling to node(Level).
+ /// @return Left sibling, or NodeRef().
+ NodeRef getRightSibling(unsigned Level) const;
+
+ /// moveRight - Move path to the left sibling at Level. Leave nodes below
+ /// Level unaltered.
+ /// @param Level Move node(Level).
+ void moveRight(unsigned Level);
+
+ /// atBegin - Return true if path is at begin().
+ bool atBegin() const {
+ for (unsigned i = 0, e = path.size(); i != e; ++i)
+ if (path[i].offset != 0)
+ return false;
+ return true;
+ }
+
+ /// atLastEntry - Return true if the path is at the last entry of the node at
+ /// Level.
+ /// @param Level Node to examine.
+ bool atLastEntry(unsigned Level) const {
+ return path[Level].offset == path[Level].size - 1;
+ }
+
+ /// legalizeForInsert - Prepare the path for an insertion at Level. When the
+ /// path is at end(), node(Level) may not be a legal node. legalizeForInsert
+ /// ensures that node(Level) is real by moving back to the last node at Level,
+ /// and setting offset(Level) to size(Level) if required.
+ /// @param Level The level where an insertion is about to take place.
+ void legalizeForInsert(unsigned Level) {
+ if (valid())
+ return;
+ moveLeft(Level);
+ ++path[Level].offset;
+ }
+};
+
+} // namespace IntervalMapImpl
+
+
+//===----------------------------------------------------------------------===//
+//--- IntervalMap ----//
+//===----------------------------------------------------------------------===//
+
+template <typename KeyT, typename ValT,
+ unsigned N = IntervalMapImpl::NodeSizer<KeyT, ValT>::LeafSize,
+ typename Traits = IntervalMapInfo<KeyT> >
+class IntervalMap {
+ typedef IntervalMapImpl::NodeSizer<KeyT, ValT> Sizer;
+ typedef IntervalMapImpl::LeafNode<KeyT, ValT, Sizer::LeafSize, Traits> Leaf;
+ typedef IntervalMapImpl::BranchNode<KeyT, ValT, Sizer::BranchSize, Traits>
+ Branch;
+ typedef IntervalMapImpl::LeafNode<KeyT, ValT, N, Traits> RootLeaf;
+ typedef IntervalMapImpl::IdxPair IdxPair;
+
+ // The RootLeaf capacity is given as a template parameter. We must compute the
+ // corresponding RootBranch capacity.
+ enum {
+ DesiredRootBranchCap = (sizeof(RootLeaf) - sizeof(KeyT)) /
+ (sizeof(KeyT) + sizeof(IntervalMapImpl::NodeRef)),
+ RootBranchCap = DesiredRootBranchCap ? DesiredRootBranchCap : 1
+ };
+
+ typedef IntervalMapImpl::BranchNode<KeyT, ValT, RootBranchCap, Traits>
+ RootBranch;
+
+ // When branched, we store a global start key as well as the branch node.
+ struct RootBranchData {
+ KeyT start;
+ RootBranch node;
+ };
+
+ enum {
+ RootDataSize = sizeof(RootBranchData) > sizeof(RootLeaf) ?
+ sizeof(RootBranchData) : sizeof(RootLeaf)
+ };
+
+public:
+ typedef typename Sizer::Allocator Allocator;
+ typedef KeyT KeyType;
+ typedef ValT ValueType;
+ typedef Traits KeyTraits;
+
+private:
+ // The root data is either a RootLeaf or a RootBranchData instance.
+ // We can't put them in a union since C++03 doesn't allow non-trivial
+ // constructors in unions.
+ // Instead, we use a char array with pointer alignment. The alignment is
+ // ensured by the allocator member in the class, but still verified in the
+ // constructor. We don't support keys or values that are more aligned than a
+ // pointer.
+ char data[RootDataSize];
+
+ // Tree height.
+ // 0: Leaves in root.
+ // 1: Root points to leaf.
+ // 2: root->branch->leaf ...
+ unsigned height;
+
+ // Number of entries in the root node.
+ unsigned rootSize;
+
+ // Allocator used for creating external nodes.
+ Allocator &allocator;
+
+ /// dataAs - Represent data as a node type without breaking aliasing rules.
+ template <typename T>
+ T &dataAs() const {
+ union {
+ const char *d;
+ T *t;
+ } u;
+ u.d = data;
+ return *u.t;
+ }
+
+ const RootLeaf &rootLeaf() const {
+ assert(!branched() && "Cannot acces leaf data in branched root");
+ return dataAs<RootLeaf>();
+ }
+ RootLeaf &rootLeaf() {
+ assert(!branched() && "Cannot acces leaf data in branched root");
+ return dataAs<RootLeaf>();
+ }
+ RootBranchData &rootBranchData() const {
+ assert(branched() && "Cannot access branch data in non-branched root");
+ return dataAs<RootBranchData>();
+ }
+ RootBranchData &rootBranchData() {
+ assert(branched() && "Cannot access branch data in non-branched root");
+ return dataAs<RootBranchData>();
+ }
+ const RootBranch &rootBranch() const { return rootBranchData().node; }
+ RootBranch &rootBranch() { return rootBranchData().node; }
+ KeyT rootBranchStart() const { return rootBranchData().start; }
+ KeyT &rootBranchStart() { return rootBranchData().start; }
+
+ template <typename NodeT> NodeT *newNode() {
+ return new(allocator.template Allocate<NodeT>()) NodeT();
+ }
+
+ template <typename NodeT> void deleteNode(NodeT *P) {
+ P->~NodeT();
+ allocator.Deallocate(P);
+ }
+
+ IdxPair branchRoot(unsigned Position);
+ IdxPair splitRoot(unsigned Position);
+
+ void switchRootToBranch() {
+ rootLeaf().~RootLeaf();
+ height = 1;
+ new (&rootBranchData()) RootBranchData();
+ }
+
+ void switchRootToLeaf() {
+ rootBranchData().~RootBranchData();
+ height = 0;
+ new(&rootLeaf()) RootLeaf();
+ }
+
+ bool branched() const { return height > 0; }
+
+ ValT treeSafeLookup(KeyT x, ValT NotFound) const;
+ void visitNodes(void (IntervalMap::*f)(IntervalMapImpl::NodeRef,
+ unsigned Level));
+ void deleteNode(IntervalMapImpl::NodeRef Node, unsigned Level);
+
+public:
+ explicit IntervalMap(Allocator &a) : height(0), rootSize(0), allocator(a) {
+ assert((uintptr_t(data) & (alignOf<RootLeaf>() - 1)) == 0 &&
+ "Insufficient alignment");
+ new(&rootLeaf()) RootLeaf();
+ }
+
+ ~IntervalMap() {
+ clear();
+ rootLeaf().~RootLeaf();
+ }
+
+ /// empty - Return true when no intervals are mapped.
+ bool empty() const {
+ return rootSize == 0;
+ }
+
+ /// start - Return the smallest mapped key in a non-empty map.
+ KeyT start() const {
+ assert(!empty() && "Empty IntervalMap has no start");
+ return !branched() ? rootLeaf().start(0) : rootBranchStart();
+ }
+
+ /// stop - Return the largest mapped key in a non-empty map.
+ KeyT stop() const {
+ assert(!empty() && "Empty IntervalMap has no stop");
+ return !branched() ? rootLeaf().stop(rootSize - 1) :
+ rootBranch().stop(rootSize - 1);
+ }
+
+ /// lookup - Return the mapped value at x or NotFound.
+ ValT lookup(KeyT x, ValT NotFound = ValT()) const {
+ if (empty() || Traits::startLess(x, start()) || Traits::stopLess(stop(), x))
+ return NotFound;
+ return branched() ? treeSafeLookup(x, NotFound) :
+ rootLeaf().safeLookup(x, NotFound);
+ }
+
+ /// insert - Add a mapping of [a;b] to y, coalesce with adjacent intervals.
+ /// It is assumed that no key in the interval is mapped to another value, but
+ /// overlapping intervals already mapped to y will be coalesced.
+ void insert(KeyT a, KeyT b, ValT y) {
+ if (branched() || rootSize == RootLeaf::Capacity)
+ return find(a).insert(a, b, y);
+
+ // Easy insert into root leaf.
+ unsigned p = rootLeaf().findFrom(0, rootSize, a);
+ rootSize = rootLeaf().insertFrom(p, rootSize, a, b, y);
+ }
+
+ /// clear - Remove all entries.
+ void clear();
+
+ class const_iterator;
+ class iterator;
+ friend class const_iterator;
+ friend class iterator;
+
+ const_iterator begin() const {
+ const_iterator I(*this);
+ I.goToBegin();
+ return I;
+ }
+
+ iterator begin() {
+ iterator I(*this);
+ I.goToBegin();
+ return I;
+ }
+
+ const_iterator end() const {
+ const_iterator I(*this);
+ I.goToEnd();
+ return I;
+ }
+
+ iterator end() {
+ iterator I(*this);
+ I.goToEnd();
+ return I;
+ }
+
+ /// find - Return an iterator pointing to the first interval ending at or
+ /// after x, or end().
+ const_iterator find(KeyT x) const {
+ const_iterator I(*this);
+ I.find(x);
+ return I;
+ }
+
+ iterator find(KeyT x) {
+ iterator I(*this);
+ I.find(x);
+ return I;
+ }
+};
+
+/// treeSafeLookup - Return the mapped value at x or NotFound, assuming a
+/// branched root.
+template <typename KeyT, typename ValT, unsigned N, typename Traits>
+ValT IntervalMap<KeyT, ValT, N, Traits>::
+treeSafeLookup(KeyT x, ValT NotFound) const {
+ assert(branched() && "treeLookup assumes a branched root");
+
+ IntervalMapImpl::NodeRef NR = rootBranch().safeLookup(x);
+ for (unsigned h = height-1; h; --h)
+ NR = NR.get<Branch>().safeLookup(x);
+ return NR.get<Leaf>().safeLookup(x, NotFound);
+}
+
+
+// branchRoot - Switch from a leaf root to a branched root.
+// Return the new (root offset, node offset) corresponding to Position.
+template <typename KeyT, typename ValT, unsigned N, typename Traits>
+IntervalMapImpl::IdxPair IntervalMap<KeyT, ValT, N, Traits>::
+branchRoot(unsigned Position) {
+ using namespace IntervalMapImpl;
+ // How many external leaf nodes to hold RootLeaf+1?
+ const unsigned Nodes = RootLeaf::Capacity / Leaf::Capacity + 1;
+
+ // Compute element distribution among new nodes.
+ unsigned size[Nodes];
+ IdxPair NewOffset(0, Position);
+
+ // Is is very common for the root node to be smaller than external nodes.
+ if (Nodes == 1)
+ size[0] = rootSize;
+ else
+ NewOffset = distribute(Nodes, rootSize, Leaf::Capacity, NULL, size,
+ Position, true);
+
+ // Allocate new nodes.
+ unsigned pos = 0;
+ NodeRef node[Nodes];
+ for (unsigned n = 0; n != Nodes; ++n) {
+ Leaf *L = newNode<Leaf>();
+ L->copy(rootLeaf(), pos, 0, size[n]);
+ node[n] = NodeRef(L, size[n]);
+ pos += size[n];
+ }
+
+ // Destroy the old leaf node, construct branch node instead.
+ switchRootToBranch();
+ for (unsigned n = 0; n != Nodes; ++n) {
+ rootBranch().stop(n) = node[n].template get<Leaf>().stop(size[n]-1);
+ rootBranch().subtree(n) = node[n];
+ }
+ rootBranchStart() = node[0].template get<Leaf>().start(0);
+ rootSize = Nodes;
+ return NewOffset;
+}
+
+// splitRoot - Split the current BranchRoot into multiple Branch nodes.
+// Return the new (root offset, node offset) corresponding to Position.
+template <typename KeyT, typename ValT, unsigned N, typename Traits>
+IntervalMapImpl::IdxPair IntervalMap<KeyT, ValT, N, Traits>::
+splitRoot(unsigned Position) {
+ using namespace IntervalMapImpl;
+ // How many external leaf nodes to hold RootBranch+1?
+ const unsigned Nodes = RootBranch::Capacity / Branch::Capacity + 1;
+
+ // Compute element distribution among new nodes.
+ unsigned Size[Nodes];
+ IdxPair NewOffset(0, Position);
+
+ // Is is very common for the root node to be smaller than external nodes.
+ if (Nodes == 1)
+ Size[0] = rootSize;
+ else
+ NewOffset = distribute(Nodes, rootSize, Leaf::Capacity, NULL, Size,
+ Position, true);
+
+ // Allocate new nodes.
+ unsigned Pos = 0;
+ NodeRef Node[Nodes];
+ for (unsigned n = 0; n != Nodes; ++n) {
+ Branch *B = newNode<Branch>();
+ B->copy(rootBranch(), Pos, 0, Size[n]);
+ Node[n] = NodeRef(B, Size[n]);
+ Pos += Size[n];
+ }
+
+ for (unsigned n = 0; n != Nodes; ++n) {
+ rootBranch().stop(n) = Node[n].template get<Branch>().stop(Size[n]-1);
+ rootBranch().subtree(n) = Node[n];
+ }
+ rootSize = Nodes;
+ ++height;
+ return NewOffset;
+}
+
+/// visitNodes - Visit each external node.
+template <typename KeyT, typename ValT, unsigned N, typename Traits>
+void IntervalMap<KeyT, ValT, N, Traits>::
+visitNodes(void (IntervalMap::*f)(IntervalMapImpl::NodeRef, unsigned Height)) {
+ if (!branched())
+ return;
+ SmallVector<IntervalMapImpl::NodeRef, 4> Refs, NextRefs;
+
+ // Collect level 0 nodes from the root.
+ for (unsigned i = 0; i != rootSize; ++i)
+ Refs.push_back(rootBranch().subtree(i));
+
+ // Visit all branch nodes.
+ for (unsigned h = height - 1; h; --h) {
+ for (unsigned i = 0, e = Refs.size(); i != e; ++i) {
+ for (unsigned j = 0, s = Refs[i].size(); j != s; ++j)
+ NextRefs.push_back(Refs[i].subtree(j));
+ (this->*f)(Refs[i], h);
+ }
+ Refs.clear();
+ Refs.swap(NextRefs);
+ }
+
+ // Visit all leaf nodes.
+ for (unsigned i = 0, e = Refs.size(); i != e; ++i)
+ (this->*f)(Refs[i], 0);
+}
+
+template <typename KeyT, typename ValT, unsigned N, typename Traits>
+void IntervalMap<KeyT, ValT, N, Traits>::
+deleteNode(IntervalMapImpl::NodeRef Node, unsigned Level) {
+ if (Level)
+ deleteNode(&Node.get<Branch>());
+ else
+ deleteNode(&Node.get<Leaf>());
+}
+
+template <typename KeyT, typename ValT, unsigned N, typename Traits>
+void IntervalMap<KeyT, ValT, N, Traits>::
+clear() {
+ if (branched()) {
+ visitNodes(&IntervalMap::deleteNode);
+ switchRootToLeaf();
+ }
+ rootSize = 0;
+}
+
+//===----------------------------------------------------------------------===//
+//--- IntervalMap::const_iterator ----//
+//===----------------------------------------------------------------------===//
+
+template <typename KeyT, typename ValT, unsigned N, typename Traits>
+class IntervalMap<KeyT, ValT, N, Traits>::const_iterator :
+ public std::iterator<std::bidirectional_iterator_tag, ValT> {
+protected:
+ friend class IntervalMap;
+
+ // The map referred to.
+ IntervalMap *map;
+
+ // We store a full path from the root to the current position.
+ // The path may be partially filled, but never between iterator calls.
+ IntervalMapImpl::Path path;
+
+ explicit const_iterator(const IntervalMap &map) :
+ map(const_cast<IntervalMap*>(&map)) {}
+
+ bool branched() const {
+ assert(map && "Invalid iterator");
+ return map->branched();
+ }
+
+ void setRoot(unsigned Offset) {
+ if (branched())
+ path.setRoot(&map->rootBranch(), map->rootSize, Offset);
+ else
+ path.setRoot(&map->rootLeaf(), map->rootSize, Offset);
+ }
+
+ void pathFillFind(KeyT x);
+ void treeFind(KeyT x);
+ void treeAdvanceTo(KeyT x);
+
+ /// unsafeStart - Writable access to start() for iterator.
+ KeyT &unsafeStart() const {
+ assert(valid() && "Cannot access invalid iterator");
+ return branched() ? path.leaf<Leaf>().start(path.leafOffset()) :
+ path.leaf<RootLeaf>().start(path.leafOffset());
+ }
+
+ /// unsafeStop - Writable access to stop() for iterator.
+ KeyT &unsafeStop() const {
+ assert(valid() && "Cannot access invalid iterator");
+ return branched() ? path.leaf<Leaf>().stop(path.leafOffset()) :
+ path.leaf<RootLeaf>().stop(path.leafOffset());
+ }
+
+ /// unsafeValue - Writable access to value() for iterator.
+ ValT &unsafeValue() const {
+ assert(valid() && "Cannot access invalid iterator");
+ return branched() ? path.leaf<Leaf>().value(path.leafOffset()) :
+ path.leaf<RootLeaf>().value(path.leafOffset());
+ }
+
+public:
+ /// const_iterator - Create an iterator that isn't pointing anywhere.
+ const_iterator() : map(0) {}
+
+ /// valid - Return true if the current position is valid, false for end().
+ bool valid() const { return path.valid(); }
+
+ /// start - Return the beginning of the current interval.
+ const KeyT &start() const { return unsafeStart(); }
+
+ /// stop - Return the end of the current interval.
+ const KeyT &stop() const { return unsafeStop(); }
+
+ /// value - Return the mapped value at the current interval.
+ const ValT &value() const { return unsafeValue(); }
+
+ const ValT &operator*() const { return value(); }
+
+ bool operator==(const const_iterator &RHS) const {
+ assert(map == RHS.map && "Cannot compare iterators from different maps");
+ if (!valid())
+ return !RHS.valid();
+ if (path.leafOffset() != RHS.path.leafOffset())
+ return false;
+ return &path.template leaf<Leaf>() == &RHS.path.template leaf<Leaf>();
+ }
+
+ bool operator!=(const const_iterator &RHS) const {
+ return !operator==(RHS);
+ }
+
+ /// goToBegin - Move to the first interval in map.
+ void goToBegin() {
+ setRoot(0);
+ if (branched())
+ path.fillLeft(map->height);
+ }
+
+ /// goToEnd - Move beyond the last interval in map.
+ void goToEnd() {
+ setRoot(map->rootSize);
+ }
+
+ /// preincrement - move to the next interval.
+ const_iterator &operator++() {
+ assert(valid() && "Cannot increment end()");
+ if (++path.leafOffset() == path.leafSize() && branched())
+ path.moveRight(map->height);
+ return *this;
+ }
+
+ /// postincrement - Dont do that!
+ const_iterator operator++(int) {
+ const_iterator tmp = *this;
+ operator++();
+ return tmp;
+ }
+
+ /// predecrement - move to the previous interval.
+ const_iterator &operator--() {
+ if (path.leafOffset() && (valid() || !branched()))
+ --path.leafOffset();
+ else
+ path.moveLeft(map->height);
+ return *this;
+ }
+
+ /// postdecrement - Dont do that!
+ const_iterator operator--(int) {
+ const_iterator tmp = *this;
+ operator--();
+ return tmp;
+ }
+
+ /// find - Move to the first interval with stop >= x, or end().
+ /// This is a full search from the root, the current position is ignored.
+ void find(KeyT x) {
+ if (branched())
+ treeFind(x);
+ else
+ setRoot(map->rootLeaf().findFrom(0, map->rootSize, x));
+ }
+
+ /// advanceTo - Move to the first interval with stop >= x, or end().
+ /// The search is started from the current position, and no earlier positions
+ /// can be found. This is much faster than find() for small moves.
+ void advanceTo(KeyT x) {
+ if (!valid())
+ return;
+ if (branched())
+ treeAdvanceTo(x);
+ else
+ path.leafOffset() =
+ map->rootLeaf().findFrom(path.leafOffset(), map->rootSize, x);
+ }
+
+};
+
+/// pathFillFind - Complete path by searching for x.
+/// @param x Key to search for.
+template <typename KeyT, typename ValT, unsigned N, typename Traits>
+void IntervalMap<KeyT, ValT, N, Traits>::
+const_iterator::pathFillFind(KeyT x) {
+ IntervalMapImpl::NodeRef NR = path.subtree(path.height());
+ for (unsigned i = map->height - path.height() - 1; i; --i) {
+ unsigned p = NR.get<Branch>().safeFind(0, x);
+ path.push(NR, p);
+ NR = NR.subtree(p);
+ }
+ path.push(NR, NR.get<Leaf>().safeFind(0, x));
+}
+
+/// treeFind - Find in a branched tree.
+/// @param x Key to search for.
+template <typename KeyT, typename ValT, unsigned N, typename Traits>
+void IntervalMap<KeyT, ValT, N, Traits>::
+const_iterator::treeFind(KeyT x) {
+ setRoot(map->rootBranch().findFrom(0, map->rootSize, x));
+ if (valid())
+ pathFillFind(x);
+}
+
+/// treeAdvanceTo - Find position after the current one.
+/// @param x Key to search for.
+template <typename KeyT, typename ValT, unsigned N, typename Traits>
+void IntervalMap<KeyT, ValT, N, Traits>::
+const_iterator::treeAdvanceTo(KeyT x) {
+ // Can we stay on the same leaf node?
+ if (!Traits::stopLess(path.leaf<Leaf>().stop(path.leafSize() - 1), x)) {
+ path.leafOffset() = path.leaf<Leaf>().safeFind(path.leafOffset(), x);
+ return;
+ }
+
+ // Drop the current leaf.
+ path.pop();
+
+ // Search towards the root for a usable subtree.
+ if (path.height()) {
+ for (unsigned l = path.height() - 1; l; --l) {
+ if (!Traits::stopLess(path.node<Branch>(l).stop(path.offset(l)), x)) {
+ // The branch node at l+1 is usable
+ path.offset(l + 1) =
+ path.node<Branch>(l + 1).safeFind(path.offset(l + 1), x);
+ return pathFillFind(x);
+ }
+ path.pop();
+ }
+ // Is the level-1 Branch usable?
+ if (!Traits::stopLess(map->rootBranch().stop(path.offset(0)), x)) {
+ path.offset(1) = path.node<Branch>(1).safeFind(path.offset(1), x);
+ return pathFillFind(x);
+ }
+ }
+
+ // We reached the root.
+ setRoot(map->rootBranch().findFrom(path.offset(0), map->rootSize, x));
+ if (valid())
+ pathFillFind(x);
+}
+
+//===----------------------------------------------------------------------===//
+//--- IntervalMap::iterator ----//
+//===----------------------------------------------------------------------===//
+
+template <typename KeyT, typename ValT, unsigned N, typename Traits>
+class IntervalMap<KeyT, ValT, N, Traits>::iterator : public const_iterator {
+ friend class IntervalMap;
+ typedef IntervalMapImpl::IdxPair IdxPair;
+
+ explicit iterator(IntervalMap &map) : const_iterator(map) {}
+
+ void setNodeStop(unsigned Level, KeyT Stop);
+ bool insertNode(unsigned Level, IntervalMapImpl::NodeRef Node, KeyT Stop);
+ template <typename NodeT> bool overflow(unsigned Level);
+ void treeInsert(KeyT a, KeyT b, ValT y);
+ void eraseNode(unsigned Level);
+ void treeErase(bool UpdateRoot = true);
+ bool canCoalesceLeft(KeyT Start, ValT x);
+ bool canCoalesceRight(KeyT Stop, ValT x);
+
+public:
+ /// iterator - Create null iterator.
+ iterator() {}
+
+ /// setStart - Move the start of the current interval.
+ /// This may cause coalescing with the previous interval.
+ /// @param a New start key, must not overlap the previous interval.
+ void setStart(KeyT a);
+
+ /// setStop - Move the end of the current interval.
+ /// This may cause coalescing with the following interval.
+ /// @param b New stop key, must not overlap the following interval.
+ void setStop(KeyT b);
+
+ /// setValue - Change the mapped value of the current interval.
+ /// This may cause coalescing with the previous and following intervals.
+ /// @param x New value.
+ void setValue(ValT x);
+
+ /// setStartUnchecked - Move the start of the current interval without
+ /// checking for coalescing or overlaps.
+ /// This should only be used when it is known that coalescing is not required.
+ /// @param a New start key.
+ void setStartUnchecked(KeyT a) { this->unsafeStart() = a; }
+
+ /// setStopUnchecked - Move the end of the current interval without checking
+ /// for coalescing or overlaps.
+ /// This should only be used when it is known that coalescing is not required.
+ /// @param b New stop key.
+ void setStopUnchecked(KeyT b) {
+ this->unsafeStop() = b;
+ // Update keys in branch nodes as well.
+ if (this->path.atLastEntry(this->path.height()))
+ setNodeStop(this->path.height(), b);
+ }
+
+ /// setValueUnchecked - Change the mapped value of the current interval
+ /// without checking for coalescing.
+ /// @param x New value.
+ void setValueUnchecked(ValT x) { this->unsafeValue() = x; }
+
+ /// insert - Insert mapping [a;b] -> y before the current position.
+ void insert(KeyT a, KeyT b, ValT y);
+
+ /// erase - Erase the current interval.
+ void erase();
+
+ iterator &operator++() {
+ const_iterator::operator++();
+ return *this;
+ }
+
+ iterator operator++(int) {
+ iterator tmp = *this;
+ operator++();
+ return tmp;
+ }
+
+ iterator &operator--() {
+ const_iterator::operator--();
+ return *this;
+ }
+
+ iterator operator--(int) {
+ iterator tmp = *this;
+ operator--();
+ return tmp;
+ }
+
+};
+
+/// canCoalesceLeft - Can the current interval coalesce to the left after
+/// changing start or value?
+/// @param Start New start of current interval.
+/// @param Value New value for current interval.
+/// @return True when updating the current interval would enable coalescing.
+template <typename KeyT, typename ValT, unsigned N, typename Traits>
+bool IntervalMap<KeyT, ValT, N, Traits>::
+iterator::canCoalesceLeft(KeyT Start, ValT Value) {
+ using namespace IntervalMapImpl;
+ Path &P = this->path;
+ if (!this->branched()) {
+ unsigned i = P.leafOffset();
+ RootLeaf &Node = P.leaf<RootLeaf>();
+ return i && Node.value(i-1) == Value &&
+ Traits::adjacent(Node.stop(i-1), Start);
+ }
+ // Branched.
+ if (unsigned i = P.leafOffset()) {
+ Leaf &Node = P.leaf<Leaf>();
+ return Node.value(i-1) == Value && Traits::adjacent(Node.stop(i-1), Start);
+ } else if (NodeRef NR = P.getLeftSibling(P.height())) {
+ unsigned i = NR.size() - 1;
+ Leaf &Node = NR.get<Leaf>();
+ return Node.value(i) == Value && Traits::adjacent(Node.stop(i), Start);
+ }
+ return false;
+}
+
+/// canCoalesceRight - Can the current interval coalesce to the right after
+/// changing stop or value?
+/// @param Stop New stop of current interval.
+/// @param Value New value for current interval.
+/// @return True when updating the current interval would enable coalescing.
+template <typename KeyT, typename ValT, unsigned N, typename Traits>
+bool IntervalMap<KeyT, ValT, N, Traits>::
+iterator::canCoalesceRight(KeyT Stop, ValT Value) {
+ using namespace IntervalMapImpl;
+ Path &P = this->path;
+ unsigned i = P.leafOffset() + 1;
+ if (!this->branched()) {
+ if (i >= P.leafSize())
+ return false;
+ RootLeaf &Node = P.leaf<RootLeaf>();
+ return Node.value(i) == Value && Traits::adjacent(Stop, Node.start(i));
+ }
+ // Branched.
+ if (i < P.leafSize()) {
+ Leaf &Node = P.leaf<Leaf>();
+ return Node.value(i) == Value && Traits::adjacent(Stop, Node.start(i));
+ } else if (NodeRef NR = P.getRightSibling(P.height())) {
+ Leaf &Node = NR.get<Leaf>();
+ return Node.value(0) == Value && Traits::adjacent(Stop, Node.start(0));
+ }
+ return false;
+}
+
+/// setNodeStop - Update the stop key of the current node at level and above.
+template <typename KeyT, typename ValT, unsigned N, typename Traits>
+void IntervalMap<KeyT, ValT, N, Traits>::
+iterator::setNodeStop(unsigned Level, KeyT Stop) {
+ // There are no references to the root node, so nothing to update.
+ if (!Level)
+ return;
+ IntervalMapImpl::Path &P = this->path;
+ // Update nodes pointing to the current node.
+ while (--Level) {
+ P.node<Branch>(Level).stop(P.offset(Level)) = Stop;
+ if (!P.atLastEntry(Level))
+ return;
+ }
+ // Update root separately since it has a different layout.
+ P.node<RootBranch>(Level).stop(P.offset(Level)) = Stop;
+}
+
+template <typename KeyT, typename ValT, unsigned N, typename Traits>
+void IntervalMap<KeyT, ValT, N, Traits>::
+iterator::setStart(KeyT a) {
+ assert(Traits::stopLess(a, this->stop()) && "Cannot move start beyond stop");
+ KeyT &CurStart = this->unsafeStart();
+ if (!Traits::startLess(a, CurStart) || !canCoalesceLeft(a, this->value())) {
+ CurStart = a;
+ return;
+ }
+ // Coalesce with the interval to the left.
+ --*this;
+ a = this->start();
+ erase();
+ setStartUnchecked(a);
+}
+
+template <typename KeyT, typename ValT, unsigned N, typename Traits>
+void IntervalMap<KeyT, ValT, N, Traits>::
+iterator::setStop(KeyT b) {
+ assert(Traits::stopLess(this->start(), b) && "Cannot move stop beyond start");
+ if (Traits::startLess(b, this->stop()) ||
+ !canCoalesceRight(b, this->value())) {
+ setStopUnchecked(b);
+ return;
+ }
+ // Coalesce with interval to the right.
+ KeyT a = this->start();
+ erase();
+ setStartUnchecked(a);
+}
+
+template <typename KeyT, typename ValT, unsigned N, typename Traits>
+void IntervalMap<KeyT, ValT, N, Traits>::
+iterator::setValue(ValT x) {
+ setValueUnchecked(x);
+ if (canCoalesceRight(this->stop(), x)) {
+ KeyT a = this->start();
+ erase();
+ setStartUnchecked(a);
+ }
+ if (canCoalesceLeft(this->start(), x)) {
+ --*this;
+ KeyT a = this->start();
+ erase();
+ setStartUnchecked(a);
+ }
+}
+
+/// insertNode - insert a node before the current path at level.
+/// Leave the current path pointing at the new node.
+/// @param Level path index of the node to be inserted.
+/// @param Node The node to be inserted.
+/// @param Stop The last index in the new node.
+/// @return True if the tree height was increased.
+template <typename KeyT, typename ValT, unsigned N, typename Traits>
+bool IntervalMap<KeyT, ValT, N, Traits>::
+iterator::insertNode(unsigned Level, IntervalMapImpl::NodeRef Node, KeyT Stop) {
+ assert(Level && "Cannot insert next to the root");
+ bool SplitRoot = false;
+ IntervalMap &IM = *this->map;
+ IntervalMapImpl::Path &P = this->path;
+
+ if (Level == 1) {
+ // Insert into the root branch node.
+ if (IM.rootSize < RootBranch::Capacity) {
+ IM.rootBranch().insert(P.offset(0), IM.rootSize, Node, Stop);
+ P.setSize(0, ++IM.rootSize);
+ P.reset(Level);
+ return SplitRoot;
+ }
+
+ // We need to split the root while keeping our position.
+ SplitRoot = true;
+ IdxPair Offset = IM.splitRoot(P.offset(0));
+ P.replaceRoot(&IM.rootBranch(), IM.rootSize, Offset);
+
+ // Fall through to insert at the new higher level.
+ ++Level;
+ }
+
+ // When inserting before end(), make sure we have a valid path.
+ P.legalizeForInsert(--Level);
+
+ // Insert into the branch node at Level-1.
+ if (P.size(Level) == Branch::Capacity) {
+ // Branch node is full, handle handle the overflow.
+ assert(!SplitRoot && "Cannot overflow after splitting the root");
+ SplitRoot = overflow<Branch>(Level);
+ Level += SplitRoot;
+ }
+ P.node<Branch>(Level).insert(P.offset(Level), P.size(Level), Node, Stop);
+ P.setSize(Level, P.size(Level) + 1);
+ if (P.atLastEntry(Level))
+ setNodeStop(Level, Stop);
+ P.reset(Level + 1);
+ return SplitRoot;
+}
+
+// insert
+template <typename KeyT, typename ValT, unsigned N, typename Traits>
+void IntervalMap<KeyT, ValT, N, Traits>::
+iterator::insert(KeyT a, KeyT b, ValT y) {
+ if (this->branched())
+ return treeInsert(a, b, y);
+ IntervalMap &IM = *this->map;
+ IntervalMapImpl::Path &P = this->path;
+
+ // Try simple root leaf insert.
+ unsigned Size = IM.rootLeaf().insertFrom(P.leafOffset(), IM.rootSize, a, b, y);
+
+ // Was the root node insert successful?
+ if (Size <= RootLeaf::Capacity) {
+ P.setSize(0, IM.rootSize = Size);
+ return;
+ }
+
+ // Root leaf node is full, we must branch.
+ IdxPair Offset = IM.branchRoot(P.leafOffset());
+ P.replaceRoot(&IM.rootBranch(), IM.rootSize, Offset);
+
+ // Now it fits in the new leaf.
+ treeInsert(a, b, y);
+}
+
+
+template <typename KeyT, typename ValT, unsigned N, typename Traits>
+void IntervalMap<KeyT, ValT, N, Traits>::
+iterator::treeInsert(KeyT a, KeyT b, ValT y) {
+ using namespace IntervalMapImpl;
+ Path &P = this->path;
+
+ if (!P.valid())
+ P.legalizeForInsert(this->map->height);
+
+ // Check if this insertion will extend the node to the left.
+ if (P.leafOffset() == 0 && Traits::startLess(a, P.leaf<Leaf>().start(0))) {
+ // Node is growing to the left, will it affect a left sibling node?
+ if (NodeRef Sib = P.getLeftSibling(P.height())) {
+ Leaf &SibLeaf = Sib.get<Leaf>();
+ unsigned SibOfs = Sib.size() - 1;
+ if (SibLeaf.value(SibOfs) == y &&
+ Traits::adjacent(SibLeaf.stop(SibOfs), a)) {
+ // This insertion will coalesce with the last entry in SibLeaf. We can
+ // handle it in two ways:
+ // 1. Extend SibLeaf.stop to b and be done, or
+ // 2. Extend a to SibLeaf, erase the SibLeaf entry and continue.
+ // We prefer 1., but need 2 when coalescing to the right as well.
+ Leaf &CurLeaf = P.leaf<Leaf>();
+ P.moveLeft(P.height());
+ if (Traits::stopLess(b, CurLeaf.start(0)) &&
+ (y != CurLeaf.value(0) || !Traits::adjacent(b, CurLeaf.start(0)))) {
+ // Easy, just extend SibLeaf and we're done.
+ setNodeStop(P.height(), SibLeaf.stop(SibOfs) = b);
+ return;
+ } else {
+ // We have both left and right coalescing. Erase the old SibLeaf entry
+ // and continue inserting the larger interval.
+ a = SibLeaf.start(SibOfs);
+ treeErase(/* UpdateRoot= */false);
+ }
+ }
+ } else {
+ // No left sibling means we are at begin(). Update cached bound.
+ this->map->rootBranchStart() = a;
+ }
+ }
+
+ // When we are inserting at the end of a leaf node, we must update stops.
+ unsigned Size = P.leafSize();
+ bool Grow = P.leafOffset() == Size;
+ Size = P.leaf<Leaf>().insertFrom(P.leafOffset(), Size, a, b, y);
+
+ // Leaf insertion unsuccessful? Overflow and try again.
+ if (Size > Leaf::Capacity) {
+ overflow<Leaf>(P.height());
+ Grow = P.leafOffset() == P.leafSize();
+ Size = P.leaf<Leaf>().insertFrom(P.leafOffset(), P.leafSize(), a, b, y);
+ assert(Size <= Leaf::Capacity && "overflow() didn't make room");
+ }
+
+ // Inserted, update offset and leaf size.
+ P.setSize(P.height(), Size);
+
+ // Insert was the last node entry, update stops.
+ if (Grow)
+ setNodeStop(P.height(), b);
+}
+
+/// erase - erase the current interval and move to the next position.
+template <typename KeyT, typename ValT, unsigned N, typename Traits>
+void IntervalMap<KeyT, ValT, N, Traits>::
+iterator::erase() {
+ IntervalMap &IM = *this->map;
+ IntervalMapImpl::Path &P = this->path;
+ assert(P.valid() && "Cannot erase end()");
+ if (this->branched())
+ return treeErase();
+ IM.rootLeaf().erase(P.leafOffset(), IM.rootSize);
+ P.setSize(0, --IM.rootSize);
+}
+
+/// treeErase - erase() for a branched tree.
+template <typename KeyT, typename ValT, unsigned N, typename Traits>
+void IntervalMap<KeyT, ValT, N, Traits>::
+iterator::treeErase(bool UpdateRoot) {
+ IntervalMap &IM = *this->map;
+ IntervalMapImpl::Path &P = this->path;
+ Leaf &Node = P.leaf<Leaf>();
+
+ // Nodes are not allowed to become empty.
+ if (P.leafSize() == 1) {
+ IM.deleteNode(&Node);
+ eraseNode(IM.height);
+ // Update rootBranchStart if we erased begin().
+ if (UpdateRoot && IM.branched() && P.valid() && P.atBegin())
+ IM.rootBranchStart() = P.leaf<Leaf>().start(0);
+ return;
+ }
+
+ // Erase current entry.
+ Node.erase(P.leafOffset(), P.leafSize());
+ unsigned NewSize = P.leafSize() - 1;
+ P.setSize(IM.height, NewSize);
+ // When we erase the last entry, update stop and move to a legal position.
+ if (P.leafOffset() == NewSize) {
+ setNodeStop(IM.height, Node.stop(NewSize - 1));
+ P.moveRight(IM.height);
+ } else if (UpdateRoot && P.atBegin())
+ IM.rootBranchStart() = P.leaf<Leaf>().start(0);
+}
+
+/// eraseNode - Erase the current node at Level from its parent and move path to
+/// the first entry of the next sibling node.
+/// The node must be deallocated by the caller.
+/// @param Level 1..height, the root node cannot be erased.
+template <typename KeyT, typename ValT, unsigned N, typename Traits>
+void IntervalMap<KeyT, ValT, N, Traits>::
+iterator::eraseNode(unsigned Level) {
+ assert(Level && "Cannot erase root node");
+ IntervalMap &IM = *this->map;
+ IntervalMapImpl::Path &P = this->path;
+
+ if (--Level == 0) {
+ IM.rootBranch().erase(P.offset(0), IM.rootSize);
+ P.setSize(0, --IM.rootSize);
+ // If this cleared the root, switch to height=0.
+ if (IM.empty()) {
+ IM.switchRootToLeaf();
+ this->setRoot(0);
+ return;
+ }
+ } else {
+ // Remove node ref from branch node at Level.
+ Branch &Parent = P.node<Branch>(Level);
+ if (P.size(Level) == 1) {
+ // Branch node became empty, remove it recursively.
+ IM.deleteNode(&Parent);
+ eraseNode(Level);
+ } else {
+ // Branch node won't become empty.
+ Parent.erase(P.offset(Level), P.size(Level));
+ unsigned NewSize = P.size(Level) - 1;
+ P.setSize(Level, NewSize);
+ // If we removed the last branch, update stop and move to a legal pos.
+ if (P.offset(Level) == NewSize) {
+ setNodeStop(Level, Parent.stop(NewSize - 1));
+ P.moveRight(Level);
+ }
+ }
+ }
+ // Update path cache for the new right sibling position.
+ if (P.valid()) {
+ P.reset(Level + 1);
+ P.offset(Level + 1) = 0;
+ }
+}
+
+/// overflow - Distribute entries of the current node evenly among
+/// its siblings and ensure that the current node is not full.
+/// This may require allocating a new node.
+/// @param NodeT The type of node at Level (Leaf or Branch).
+/// @param Level path index of the overflowing node.
+/// @return True when the tree height was changed.
+template <typename KeyT, typename ValT, unsigned N, typename Traits>
+template <typename NodeT>
+bool IntervalMap<KeyT, ValT, N, Traits>::
+iterator::overflow(unsigned Level) {
+ using namespace IntervalMapImpl;
+ Path &P = this->path;
+ unsigned CurSize[4];
+ NodeT *Node[4];
+ unsigned Nodes = 0;
+ unsigned Elements = 0;
+ unsigned Offset = P.offset(Level);
+
+ // Do we have a left sibling?
+ NodeRef LeftSib = P.getLeftSibling(Level);
+ if (LeftSib) {
+ Offset += Elements = CurSize[Nodes] = LeftSib.size();
+ Node[Nodes++] = &LeftSib.get<NodeT>();
+ }
+
+ // Current node.
+ Elements += CurSize[Nodes] = P.size(Level);
+ Node[Nodes++] = &P.node<NodeT>(Level);
+
+ // Do we have a right sibling?
+ NodeRef RightSib = P.getRightSibling(Level);
+ if (RightSib) {
+ Elements += CurSize[Nodes] = RightSib.size();
+ Node[Nodes++] = &RightSib.get<NodeT>();
+ }
+
+ // Do we need to allocate a new node?
+ unsigned NewNode = 0;
+ if (Elements + 1 > Nodes * NodeT::Capacity) {
+ // Insert NewNode at the penultimate position, or after a single node.
+ NewNode = Nodes == 1 ? 1 : Nodes - 1;
+ CurSize[Nodes] = CurSize[NewNode];
+ Node[Nodes] = Node[NewNode];
+ CurSize[NewNode] = 0;
+ Node[NewNode] = this->map->newNode<NodeT>();
+ ++Nodes;
+ }
+
+ // Compute the new element distribution.
+ unsigned NewSize[4];
+ IdxPair NewOffset = distribute(Nodes, Elements, NodeT::Capacity,
+ CurSize, NewSize, Offset, true);
+ adjustSiblingSizes(Node, Nodes, CurSize, NewSize);
+
+ // Move current location to the leftmost node.
+ if (LeftSib)
+ P.moveLeft(Level);
+
+ // Elements have been rearranged, now update node sizes and stops.
+ bool SplitRoot = false;
+ unsigned Pos = 0;
+ for (;;) {
+ KeyT Stop = Node[Pos]->stop(NewSize[Pos]-1);
+ if (NewNode && Pos == NewNode) {
+ SplitRoot = insertNode(Level, NodeRef(Node[Pos], NewSize[Pos]), Stop);
+ Level += SplitRoot;
+ } else {
+ P.setSize(Level, NewSize[Pos]);
+ setNodeStop(Level, Stop);
+ }
+ if (Pos + 1 == Nodes)
+ break;
+ P.moveRight(Level);
+ ++Pos;
+ }
+
+ // Where was I? Find NewOffset.
+ while(Pos != NewOffset.first) {
+ P.moveLeft(Level);
+ --Pos;
+ }
+ P.offset(Level) = NewOffset.second;
+ return SplitRoot;
+}
+
+//===----------------------------------------------------------------------===//
+//--- IntervalMapOverlaps ----//
+//===----------------------------------------------------------------------===//
+
+/// IntervalMapOverlaps - Iterate over the overlaps of mapped intervals in two
+/// IntervalMaps. The maps may be different, but the KeyT and Traits types
+/// should be the same.
+///
+/// Typical uses:
+///
+/// 1. Test for overlap:
+/// bool overlap = IntervalMapOverlaps(a, b).valid();
+///
+/// 2. Enumerate overlaps:
+/// for (IntervalMapOverlaps I(a, b); I.valid() ; ++I) { ... }
+///
+template <typename MapA, typename MapB>
+class IntervalMapOverlaps {
+ typedef typename MapA::KeyType KeyType;
+ typedef typename MapA::KeyTraits Traits;
+ typename MapA::const_iterator posA;
+ typename MapB::const_iterator posB;
+
+ /// advance - Move posA and posB forward until reaching an overlap, or until
+ /// either meets end.
+ /// Don't move the iterators if they are already overlapping.
+ void advance() {
+ if (!valid())
+ return;
+
+ if (Traits::stopLess(posA.stop(), posB.start())) {
+ // A ends before B begins. Catch up.
+ posA.advanceTo(posB.start());
+ if (!posA.valid() || !Traits::stopLess(posB.stop(), posA.start()))
+ return;
+ } else if (Traits::stopLess(posB.stop(), posA.start())) {
+ // B ends before A begins. Catch up.
+ posB.advanceTo(posA.start());
+ if (!posB.valid() || !Traits::stopLess(posA.stop(), posB.start()))
+ return;
+ } else
+ // Already overlapping.
+ return;
+
+ for (;;) {
+ // Make a.end > b.start.
+ posA.advanceTo(posB.start());
+ if (!posA.valid() || !Traits::stopLess(posB.stop(), posA.start()))
+ return;
+ // Make b.end > a.start.
+ posB.advanceTo(posA.start());
+ if (!posB.valid() || !Traits::stopLess(posA.stop(), posB.start()))
+ return;
+ }
+ }
+
+public:
+ /// IntervalMapOverlaps - Create an iterator for the overlaps of a and b.
+ IntervalMapOverlaps(const MapA &a, const MapB &b)
+ : posA(b.empty() ? a.end() : a.find(b.start())),
+ posB(posA.valid() ? b.find(posA.start()) : b.end()) { advance(); }
+
+ /// valid - Return true if iterator is at an overlap.
+ bool valid() const {
+ return posA.valid() && posB.valid();
+ }
+
+ /// a - access the left hand side in the overlap.
+ const typename MapA::const_iterator &a() const { return posA; }
+
+ /// b - access the right hand side in the overlap.
+ const typename MapB::const_iterator &b() const { return posB; }
+
+ /// start - Beginning of the overlapping interval.
+ KeyType start() const {
+ KeyType ak = a().start();
+ KeyType bk = b().start();
+ return Traits::startLess(ak, bk) ? bk : ak;
+ }
+
+ /// stop - End of the overlapping interval.
+ KeyType stop() const {
+ KeyType ak = a().stop();
+ KeyType bk = b().stop();
+ return Traits::startLess(ak, bk) ? ak : bk;
+ }
+
+ /// skipA - Move to the next overlap that doesn't involve a().
+ void skipA() {
+ ++posA;
+ advance();
+ }
+
+ /// skipB - Move to the next overlap that doesn't involve b().
+ void skipB() {
+ ++posB;
+ advance();
+ }
+
+ /// Preincrement - Move to the next overlap.
+ IntervalMapOverlaps &operator++() {
+ // Bump the iterator that ends first. The other one may have more overlaps.
+ if (Traits::startLess(posB.stop(), posA.stop()))
+ skipB();
+ else
+ skipA();
+ return *this;
+ }
+
+ /// advanceTo - Move to the first overlapping interval with
+ /// stopLess(x, stop()).
+ void advanceTo(KeyType x) {
+ if (!valid())
+ return;
+ // Make sure advanceTo sees monotonic keys.
+ if (Traits::stopLess(posA.stop(), x))
+ posA.advanceTo(x);
+ if (Traits::stopLess(posB.stop(), x))
+ posB.advanceTo(x);
+ advance();
+ }
+};
+
+} // namespace llvm
+
+#endif
diff --git a/include/llvm/ADT/Optional.h b/include/llvm/ADT/Optional.h
index 34e54a0..ee8b69f 100644
--- a/include/llvm/ADT/Optional.h
+++ b/include/llvm/ADT/Optional.h
@@ -61,6 +61,60 @@ template <typename T>
struct simplify_type<Optional<T> >
: public simplify_type<const Optional<T> > {};
+/// \brief Poison comparison between two \c Optional objects. Clients needs to
+/// explicitly compare the underlying values and account for empty \c Optional
+/// objects.
+///
+/// This routine will never be defined. It returns \c void to help diagnose
+/// errors at compile time.
+template<typename T, typename U>
+void operator==(const Optional<T> &X, const Optional<U> &Y);
+
+/// \brief Poison comparison between two \c Optional objects. Clients needs to
+/// explicitly compare the underlying values and account for empty \c Optional
+/// objects.
+///
+/// This routine will never be defined. It returns \c void to help diagnose
+/// errors at compile time.
+template<typename T, typename U>
+void operator!=(const Optional<T> &X, const Optional<U> &Y);
+
+/// \brief Poison comparison between two \c Optional objects. Clients needs to
+/// explicitly compare the underlying values and account for empty \c Optional
+/// objects.
+///
+/// This routine will never be defined. It returns \c void to help diagnose
+/// errors at compile time.
+template<typename T, typename U>
+void operator<(const Optional<T> &X, const Optional<U> &Y);
+
+/// \brief Poison comparison between two \c Optional objects. Clients needs to
+/// explicitly compare the underlying values and account for empty \c Optional
+/// objects.
+///
+/// This routine will never be defined. It returns \c void to help diagnose
+/// errors at compile time.
+template<typename T, typename U>
+void operator<=(const Optional<T> &X, const Optional<U> &Y);
+
+/// \brief Poison comparison between two \c Optional objects. Clients needs to
+/// explicitly compare the underlying values and account for empty \c Optional
+/// objects.
+///
+/// This routine will never be defined. It returns \c void to help diagnose
+/// errors at compile time.
+template<typename T, typename U>
+void operator>=(const Optional<T> &X, const Optional<U> &Y);
+
+/// \brief Poison comparison between two \c Optional objects. Clients needs to
+/// explicitly compare the underlying values and account for empty \c Optional
+/// objects.
+///
+/// This routine will never be defined. It returns \c void to help diagnose
+/// errors at compile time.
+template<typename T, typename U>
+void operator>(const Optional<T> &X, const Optional<U> &Y);
+
} // end llvm namespace
#endif
diff --git a/include/llvm/ADT/PointerIntPair.h b/include/llvm/ADT/PointerIntPair.h
index 64f4a7c..85dbba2 100644
--- a/include/llvm/ADT/PointerIntPair.h
+++ b/include/llvm/ADT/PointerIntPair.h
@@ -91,6 +91,13 @@ public:
Value |= IntVal << IntShift; // Set new integer.
}
+ PointerTy const *getAddrOfPointer() const {
+ assert(Value == reinterpret_cast<intptr_t>(getPointer()) &&
+ "Can only return the address if IntBits is cleared and "
+ "PtrTraits doesn't change the pointer");
+ return reinterpret_cast<PointerTy const *>(&Value);
+ }
+
void *getOpaqueValue() const { return reinterpret_cast<void*>(Value); }
void setFromOpaqueValue(void *Val) { Value = reinterpret_cast<intptr_t>(Val);}
diff --git a/include/llvm/ADT/PointerUnion.h b/include/llvm/ADT/PointerUnion.h
index 3a514b5..61de042 100644
--- a/include/llvm/ADT/PointerUnion.h
+++ b/include/llvm/ADT/PointerUnion.h
@@ -107,6 +107,18 @@ namespace llvm {
if (is<T>()) return get<T>();
return T();
}
+
+ /// \brief If the union is set to the first pointer type we can get an
+ /// address pointing to it.
+ template <typename T>
+ PT1 const *getAddrOf() const {
+ assert(is<PT1>() && "Val is not the first pointer");
+ assert(get<PT1>() == Val.getPointer() &&
+ "Can't get the address because PointerLikeTypeTraits changes the ptr");
+ T const *can_only_get_address_of_first_pointer_type
+ = reinterpret_cast<PT1 const *>(Val.getAddrOfPointer());
+ return can_only_get_address_of_first_pointer_type;
+ }
/// Assignment operators - Allow assigning into this union from either
/// pointer type, setting the discriminator to remember what it came from.
diff --git a/include/llvm/ADT/PostOrderIterator.h b/include/llvm/ADT/PostOrderIterator.h
index 47e5b2b..e3b4994 100644
--- a/include/llvm/ADT/PostOrderIterator.h
+++ b/include/llvm/ADT/PostOrderIterator.h
@@ -56,8 +56,7 @@ class po_iterator : public std::iterator<std::forward_iterator_tag,
void traverseChild() {
while (VisitStack.back().second != GT::child_end(VisitStack.back().first)) {
NodeType *BB = *VisitStack.back().second++;
- if (!this->Visited.count(BB)) { // If the block is not visited...
- this->Visited.insert(BB);
+ if (this->Visited.insert(BB)) { // If the block is not visited...
VisitStack.push_back(std::make_pair(BB, GT::child_begin(BB)));
}
}
@@ -72,8 +71,7 @@ class po_iterator : public std::iterator<std::forward_iterator_tag,
inline po_iterator(NodeType *BB, SetType &S) :
po_iterator_storage<SetType, ExtStorage>(S) {
- if(!S.count(BB)) {
- this->Visited.insert(BB);
+ if (this->Visited.insert(BB)) {
VisitStack.push_back(std::make_pair(BB, GT::child_begin(BB)));
traverseChild();
}
diff --git a/include/llvm/ADT/SCCIterator.h b/include/llvm/ADT/SCCIterator.h
index c49d599..3e93cfe 100644
--- a/include/llvm/ADT/SCCIterator.h
+++ b/include/llvm/ADT/SCCIterator.h
@@ -60,7 +60,7 @@ class scc_iterator
// First element is basic block pointer, second is the 'next child' to visit
std::vector<std::pair<NodeType *, ChildItTy> > VisitStack;
- // MinVistNumStack - Stack holding the "min" values for each node in the DFS.
+ // MinVisitNumStack - Stack holding the "min" values for each node in the DFS.
// This is used to track the minimum uplink values for all children of
// the corresponding node on the VisitStack.
std::vector<unsigned> MinVisitNumStack;
diff --git a/include/llvm/ADT/ScopedHashTable.h b/include/llvm/ADT/ScopedHashTable.h
index c96ad19..af3c482 100644
--- a/include/llvm/ADT/ScopedHashTable.h
+++ b/include/llvm/ADT/ScopedHashTable.h
@@ -31,25 +31,23 @@
#ifndef LLVM_ADT_SCOPEDHASHTABLE_H
#define LLVM_ADT_SCOPEDHASHTABLE_H
-#include <cassert>
#include "llvm/ADT/DenseMap.h"
+#include "llvm/Support/Allocator.h"
namespace llvm {
-template <typename K, typename V, typename KInfo = DenseMapInfo<K> >
+template <typename K, typename V, typename KInfo = DenseMapInfo<K>,
+ typename AllocatorTy = MallocAllocator>
class ScopedHashTable;
-template <typename K, typename V, typename KInfo = DenseMapInfo<K> >
+template <typename K, typename V>
class ScopedHashTableVal {
ScopedHashTableVal *NextInScope;
ScopedHashTableVal *NextForKey;
K Key;
V Val;
+ ScopedHashTableVal(const K &key, const V &val) : Key(key), Val(val) {}
public:
- ScopedHashTableVal(ScopedHashTableVal *nextInScope,
- ScopedHashTableVal *nextForKey, const K &key, const V &val)
- : NextInScope(nextInScope), NextForKey(nextForKey), Key(key), Val(val) {
- }
const K &getKey() const { return Key; }
const V &getValue() const { return Val; }
@@ -57,33 +55,53 @@ public:
ScopedHashTableVal *getNextForKey() { return NextForKey; }
const ScopedHashTableVal *getNextForKey() const { return NextForKey; }
-public:
ScopedHashTableVal *getNextInScope() { return NextInScope; }
+
+ template <typename AllocatorTy>
+ static ScopedHashTableVal *Create(ScopedHashTableVal *nextInScope,
+ ScopedHashTableVal *nextForKey,
+ const K &key, const V &val,
+ AllocatorTy &Allocator) {
+ ScopedHashTableVal *New = Allocator.template Allocate<ScopedHashTableVal>();
+ // Set up the value.
+ new (New) ScopedHashTableVal(key, val);
+ New->NextInScope = nextInScope;
+ New->NextForKey = nextForKey;
+ return New;
+ }
+
+ template <typename AllocatorTy>
+ void Destroy(AllocatorTy &Allocator) {
+ // Free memory referenced by the item.
+ this->~ScopedHashTableVal();
+ Allocator.Deallocate(this);
+ }
};
-template <typename K, typename V, typename KInfo = DenseMapInfo<K> >
+template <typename K, typename V, typename KInfo = DenseMapInfo<K>,
+ typename AllocatorTy = MallocAllocator>
class ScopedHashTableScope {
/// HT - The hashtable that we are active for.
- ScopedHashTable<K, V, KInfo> &HT;
+ ScopedHashTable<K, V, KInfo, AllocatorTy> &HT;
/// PrevScope - This is the scope that we are shadowing in HT.
ScopedHashTableScope *PrevScope;
/// LastValInScope - This is the last value that was inserted for this scope
/// or null if none have been inserted yet.
- ScopedHashTableVal<K, V, KInfo> *LastValInScope;
+ ScopedHashTableVal<K, V> *LastValInScope;
void operator=(ScopedHashTableScope&); // DO NOT IMPLEMENT
ScopedHashTableScope(ScopedHashTableScope&); // DO NOT IMPLEMENT
public:
- ScopedHashTableScope(ScopedHashTable<K, V, KInfo> &HT);
+ ScopedHashTableScope(ScopedHashTable<K, V, KInfo, AllocatorTy> &HT);
~ScopedHashTableScope();
private:
- friend class ScopedHashTable<K, V, KInfo>;
- ScopedHashTableVal<K, V, KInfo> *getLastValInScope() {
+ friend class ScopedHashTable<K, V, KInfo, AllocatorTy>;
+ ScopedHashTableVal<K, V> *getLastValInScope() {
return LastValInScope;
}
- void setLastValInScope(ScopedHashTableVal<K, V, KInfo> *Val) {
+ void setLastValInScope(ScopedHashTableVal<K, V> *Val) {
LastValInScope = Val;
}
};
@@ -91,9 +109,9 @@ private:
template <typename K, typename V, typename KInfo = DenseMapInfo<K> >
class ScopedHashTableIterator {
- ScopedHashTableVal<K, V, KInfo> *Node;
+ ScopedHashTableVal<K, V> *Node;
public:
- ScopedHashTableIterator(ScopedHashTableVal<K, V, KInfo> *node) : Node(node) {}
+ ScopedHashTableIterator(ScopedHashTableVal<K, V> *node) : Node(node) {}
V &operator*() const {
assert(Node && "Dereference end()");
@@ -121,26 +139,40 @@ public:
};
-template <typename K, typename V, typename KInfo>
+template <typename K, typename V, typename KInfo, typename AllocatorTy>
class ScopedHashTable {
- DenseMap<K, ScopedHashTableVal<K, V, KInfo>*, KInfo> TopLevelMap;
- ScopedHashTableScope<K, V, KInfo> *CurScope;
+ typedef ScopedHashTableVal<K, V> ValTy;
+ DenseMap<K, ValTy*, KInfo> TopLevelMap;
+ ScopedHashTableScope<K, V, KInfo, AllocatorTy> *CurScope;
+
+ AllocatorTy Allocator;
+
ScopedHashTable(const ScopedHashTable&); // NOT YET IMPLEMENTED
void operator=(const ScopedHashTable&); // NOT YET IMPLEMENTED
- friend class ScopedHashTableScope<K, V, KInfo>;
+ friend class ScopedHashTableScope<K, V, KInfo, AllocatorTy>;
public:
ScopedHashTable() : CurScope(0) {}
+ ScopedHashTable(AllocatorTy A) : CurScope(0), Allocator(A) {}
~ScopedHashTable() {
assert(CurScope == 0 && TopLevelMap.empty() && "Scope imbalance!");
}
+
+ /// ScopeTy - This is a helpful typedef that allows clients to get easy access
+ /// to the name of the scope for this hash table.
+ typedef ScopedHashTableScope<K, V, KInfo, AllocatorTy> ScopeTy;
+
+ /// Access to the allocator.
+ typedef typename ReferenceAdder<AllocatorTy>::result AllocatorRefTy;
+ typedef typename ReferenceAdder<const AllocatorTy>::result AllocatorCRefTy;
+ AllocatorRefTy getAllocator() { return Allocator; }
+ AllocatorCRefTy getAllocator() const { return Allocator; }
bool count(const K &Key) const {
return TopLevelMap.count(Key);
}
V lookup(const K &Key) {
- typename DenseMap<K, ScopedHashTableVal<K, V, KInfo>*, KInfo>::iterator
- I = TopLevelMap.find(Key);
+ typename DenseMap<K, ValTy*, KInfo>::iterator I = TopLevelMap.find(Key);
if (I != TopLevelMap.end())
return I->second->getValue();
@@ -150,10 +182,10 @@ public:
void insert(const K &Key, const V &Val) {
assert(CurScope && "No scope active!");
- ScopedHashTableVal<K, V, KInfo> *&KeyEntry = TopLevelMap[Key];
+ ScopedHashTableVal<K, V> *&KeyEntry = TopLevelMap[Key];
- KeyEntry= new ScopedHashTableVal<K, V, KInfo>(CurScope->getLastValInScope(),
- KeyEntry, Key, Val);
+ KeyEntry = ValTy::Create(CurScope->getLastValInScope(), KeyEntry, Key, Val,
+ Allocator);
CurScope->setLastValInScope(KeyEntry);
}
@@ -162,7 +194,7 @@ public:
iterator end() { return iterator(0); }
iterator begin(const K &Key) {
- typename DenseMap<K, ScopedHashTableVal<K, V, KInfo>*, KInfo>::iterator I =
+ typename DenseMap<K, ValTy*, KInfo>::iterator I =
TopLevelMap.find(Key);
if (I == TopLevelMap.end()) return end();
return iterator(I->second);
@@ -171,29 +203,28 @@ public:
/// ScopedHashTableScope ctor - Install this as the current scope for the hash
/// table.
-template <typename K, typename V, typename KInfo>
-ScopedHashTableScope<K, V, KInfo>::
- ScopedHashTableScope(ScopedHashTable<K, V, KInfo> &ht) : HT(ht) {
+template <typename K, typename V, typename KInfo, typename Allocator>
+ScopedHashTableScope<K, V, KInfo, Allocator>::
+ ScopedHashTableScope(ScopedHashTable<K, V, KInfo, Allocator> &ht) : HT(ht) {
PrevScope = HT.CurScope;
HT.CurScope = this;
LastValInScope = 0;
}
-template <typename K, typename V, typename KInfo>
-ScopedHashTableScope<K, V, KInfo>::~ScopedHashTableScope() {
+template <typename K, typename V, typename KInfo, typename Allocator>
+ScopedHashTableScope<K, V, KInfo, Allocator>::~ScopedHashTableScope() {
assert(HT.CurScope == this && "Scope imbalance!");
HT.CurScope = PrevScope;
// Pop and delete all values corresponding to this scope.
- while (ScopedHashTableVal<K, V, KInfo> *ThisEntry = LastValInScope) {
+ while (ScopedHashTableVal<K, V> *ThisEntry = LastValInScope) {
// Pop this value out of the TopLevelMap.
if (ThisEntry->getNextForKey() == 0) {
assert(HT.TopLevelMap[ThisEntry->getKey()] == ThisEntry &&
"Scope imbalance!");
HT.TopLevelMap.erase(ThisEntry->getKey());
} else {
- ScopedHashTableVal<K, V, KInfo> *&KeyEntry =
- HT.TopLevelMap[ThisEntry->getKey()];
+ ScopedHashTableVal<K, V> *&KeyEntry = HT.TopLevelMap[ThisEntry->getKey()];
assert(KeyEntry == ThisEntry && "Scope imbalance!");
KeyEntry = ThisEntry->getNextForKey();
}
@@ -202,7 +233,7 @@ ScopedHashTableScope<K, V, KInfo>::~ScopedHashTableScope() {
LastValInScope = ThisEntry->getNextInScope();
// Delete this entry.
- delete ThisEntry;
+ ThisEntry->Destroy(HT.getAllocator());
}
}
diff --git a/include/llvm/ADT/SetVector.h b/include/llvm/ADT/SetVector.h
index bf8286c..abe2067 100644
--- a/include/llvm/ADT/SetVector.h
+++ b/include/llvm/ADT/SetVector.h
@@ -114,13 +114,15 @@ public:
}
/// @brief Remove an item from the set vector.
- void remove(const value_type& X) {
+ bool remove(const value_type& X) {
if (set_.erase(X)) {
typename vector_type::iterator I =
std::find(vector_.begin(), vector_.end(), X);
assert(I != vector_.end() && "Corrupted SetVector instances!");
vector_.erase(I);
+ return true;
}
+ return false;
}
diff --git a/include/llvm/ADT/SmallBitVector.h b/include/llvm/ADT/SmallBitVector.h
index 3441d0a..b15b3ee 100644
--- a/include/llvm/ADT/SmallBitVector.h
+++ b/include/llvm/ADT/SmallBitVector.h
@@ -187,6 +187,13 @@ public:
return getPointer()->any();
}
+ /// all - Returns true if all bits are set.
+ bool all() const {
+ if (isSmall())
+ return getSmallBits() == (uintptr_t(1) << getSmallSize()) - 1;
+ return getPointer()->all();
+ }
+
/// none - Returns true if none of the bits are set.
bool none() const {
if (isSmall())
diff --git a/include/llvm/ADT/SmallPtrSet.h b/include/llvm/ADT/SmallPtrSet.h
index 424bdba..ff32ba8 100644
--- a/include/llvm/ADT/SmallPtrSet.h
+++ b/include/llvm/ADT/SmallPtrSet.h
@@ -16,9 +16,10 @@
#define LLVM_ADT_SMALLPTRSET_H
#include <cassert>
+#include <cstddef>
#include <cstring>
#include <iterator>
-#include "llvm/System/DataTypes.h"
+#include "llvm/Support/DataTypes.h"
#include "llvm/Support/PointerLikeTypeTraits.h"
namespace llvm {
@@ -56,7 +57,7 @@ protected:
/// it, so that the end iterator actually points to valid memory.
unsigned CurArraySize;
- // If small, this is # elts allocated consequtively
+ // If small, this is # elts allocated consecutively
unsigned NumElements;
unsigned NumTombstones;
diff --git a/include/llvm/ADT/SmallString.h b/include/llvm/ADT/SmallString.h
index 05bd8a4..da26416 100644
--- a/include/llvm/ADT/SmallString.h
+++ b/include/llvm/ADT/SmallString.h
@@ -27,6 +27,9 @@ public:
// Default ctor - Initialize to empty.
SmallString() {}
+ // Initialize from a StringRef.
+ SmallString(StringRef S) : SmallVector<char, InternalLen>(S.begin(), S.end()) {}
+
// Initialize with a range.
template<typename ItTy>
SmallString(ItTy S, ItTy E) : SmallVector<char, InternalLen>(S, E) {}
@@ -38,15 +41,16 @@ public:
// Extra methods.
StringRef str() const { return StringRef(this->begin(), this->size()); }
- // Implicit conversion to StringRef.
- operator StringRef() const { return str(); }
-
- const char *c_str() {
+ // TODO: Make this const, if it's safe...
+ const char* c_str() {
this->push_back(0);
this->pop_back();
return this->data();
}
+ // Implicit conversion to StringRef.
+ operator StringRef() const { return str(); }
+
// Extra operators.
const SmallString &operator=(StringRef RHS) {
this->clear();
diff --git a/include/llvm/ADT/SmallVector.h b/include/llvm/ADT/SmallVector.h
index fec6bcd..8b0a13d 100644
--- a/include/llvm/ADT/SmallVector.h
+++ b/include/llvm/ADT/SmallVector.h
@@ -20,6 +20,7 @@
#include <cstddef>
#include <cstdlib>
#include <cstring>
+#include <iterator>
#include <memory>
#ifdef _MSC_VER
@@ -57,19 +58,13 @@ protected:
// Allocate raw space for N elements of type T. If T has a ctor or dtor, we
// don't want it to be automatically run, so we need to represent the space as
// something else. An array of char would work great, but might not be
- // aligned sufficiently. Instead, we either use GCC extensions, or some
- // number of union instances for the space, which guarantee maximal alignment.
- struct U {
-#ifdef __GNUC__
- char X __attribute__((aligned));
-#else
- union {
- double D;
- long double LD;
- long long L;
- void *P;
- } X;
-#endif
+ // aligned sufficiently. Instead we use some number of union instances for
+ // the space, which guarantee maximal alignment.
+ union U {
+ double D;
+ long double LD;
+ long long L;
+ void *P;
} FirstEl;
// Space after 'FirstEl' is clobbered, do not add any instance vars after it.
@@ -94,7 +89,7 @@ protected:
}
/// grow_pod - This is an implementation of the grow() method which only works
- /// on POD-like datatypes and is out of line to reduce code duplication.
+ /// on POD-like data types and is out of line to reduce code duplication.
void grow_pod(size_t MinSizeInBytes, size_t TSize);
public:
@@ -269,7 +264,7 @@ public:
template <typename T>
class SmallVectorImpl : public SmallVectorTemplateBase<T, isPodLike<T>::value> {
typedef SmallVectorTemplateBase<T, isPodLike<T>::value > SuperClass;
-
+
SmallVectorImpl(const SmallVectorImpl&); // DISABLED.
public:
typedef typename SuperClass::iterator iterator;
@@ -346,7 +341,6 @@ public:
return Result;
}
-
void swap(SmallVectorImpl &RHS);
/// append - Add the specified range to the end of the SmallVector.
diff --git a/include/llvm/ADT/SparseBitVector.h b/include/llvm/ADT/SparseBitVector.h
index 0862981..d977136 100644
--- a/include/llvm/ADT/SparseBitVector.h
+++ b/include/llvm/ADT/SparseBitVector.h
@@ -17,7 +17,7 @@
#include "llvm/ADT/ilist.h"
#include "llvm/ADT/ilist_node.h"
-#include "llvm/System/DataTypes.h"
+#include "llvm/Support/DataTypes.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/raw_ostream.h"
#include <cassert>
diff --git a/include/llvm/ADT/Statistic.h b/include/llvm/ADT/Statistic.h
index 3a1319f..f137ea2 100644
--- a/include/llvm/ADT/Statistic.h
+++ b/include/llvm/ADT/Statistic.h
@@ -26,7 +26,7 @@
#ifndef LLVM_ADT_STATISTIC_H
#define LLVM_ADT_STATISTIC_H
-#include "llvm/System/Atomic.h"
+#include "llvm/Support/Atomic.h"
namespace llvm {
class raw_ostream;
diff --git a/include/llvm/ADT/StringExtras.h b/include/llvm/ADT/StringExtras.h
index 3c53ade..acbed66 100644
--- a/include/llvm/ADT/StringExtras.h
+++ b/include/llvm/ADT/StringExtras.h
@@ -14,7 +14,7 @@
#ifndef LLVM_ADT_STRINGEXTRAS_H
#define LLVM_ADT_STRINGEXTRAS_H
-#include "llvm/System/DataTypes.h"
+#include "llvm/Support/DataTypes.h"
#include "llvm/ADT/APFloat.h"
#include "llvm/ADT/StringRef.h"
#include <cctype>
@@ -25,10 +25,11 @@
namespace llvm {
template<typename T> class SmallVectorImpl;
-/// hexdigit - Return the (uppercase) hexadecimal character for the
+/// hexdigit - Return the hexadecimal character for the
/// given number \arg X (which should be less than 16).
-static inline char hexdigit(unsigned X) {
- return X < 10 ? '0' + X : 'A' + X - 10;
+static inline char hexdigit(unsigned X, bool LowerCase = false) {
+ const char HexChar = LowerCase ? 'a' : 'A';
+ return X < 10 ? '0' + X : HexChar + X - 10;
}
/// utohex_buffer - Emit the specified number into the buffer specified by
diff --git a/include/llvm/ADT/StringMap.h b/include/llvm/ADT/StringMap.h
index 59ff6aa..bad0e6f 100644
--- a/include/llvm/ADT/StringMap.h
+++ b/include/llvm/ADT/StringMap.h
@@ -137,8 +137,8 @@ public:
StringMapEntry(unsigned strLen, const ValueTy &V)
: StringMapEntryBase(strLen), second(V) {}
- StringRef getKey() const {
- return StringRef(getKeyData(), getKeyLength());
+ StringRef getKey() const {
+ return StringRef(getKeyData(), getKeyLength());
}
const ValueTy &getValue() const { return second; }
@@ -167,7 +167,7 @@ public:
unsigned AllocSize = static_cast<unsigned>(sizeof(StringMapEntry))+
KeyLength+1;
- unsigned Alignment = alignof<StringMapEntry>();
+ unsigned Alignment = alignOf<StringMapEntry>();
StringMapEntry *NewItem =
static_cast<StringMapEntry*>(Allocator.Allocate(AllocSize,Alignment));
@@ -216,14 +216,14 @@ public:
static const StringMapEntry &GetStringMapEntryFromValue(const ValueTy &V) {
return GetStringMapEntryFromValue(const_cast<ValueTy&>(V));
}
-
+
/// GetStringMapEntryFromKeyData - Given key data that is known to be embedded
/// into a StringMapEntry, return the StringMapEntry itself.
static StringMapEntry &GetStringMapEntryFromKeyData(const char *KeyData) {
char *Ptr = const_cast<char*>(KeyData) - sizeof(StringMapEntry<ValueTy>);
return *reinterpret_cast<StringMapEntry*>(Ptr);
}
-
+
/// Destroy - Destroy this StringMapEntry, releasing memory back to the
/// specified allocator.
@@ -254,7 +254,7 @@ public:
StringMap() : StringMapImpl(static_cast<unsigned>(sizeof(MapEntryTy))) {}
explicit StringMap(unsigned InitialSize)
: StringMapImpl(InitialSize, static_cast<unsigned>(sizeof(MapEntryTy))) {}
-
+
explicit StringMap(AllocatorTy A)
: StringMapImpl(static_cast<unsigned>(sizeof(MapEntryTy))), Allocator(A) {}
@@ -262,16 +262,19 @@ public:
: StringMapImpl(static_cast<unsigned>(sizeof(MapEntryTy))) {
assert(RHS.empty() &&
"Copy ctor from non-empty stringmap not implemented yet!");
+ (void)RHS;
}
void operator=(const StringMap &RHS) {
assert(RHS.empty() &&
"assignment from non-empty stringmap not implemented yet!");
+ (void)RHS;
clear();
}
-
- AllocatorTy &getAllocator() { return Allocator; }
- const AllocatorTy &getAllocator() const { return Allocator; }
+ typedef typename ReferenceAdder<AllocatorTy>::result AllocatorRefTy;
+ typedef typename ReferenceAdder<const AllocatorTy>::result AllocatorCRefTy;
+ AllocatorRefTy getAllocator() { return Allocator; }
+ AllocatorCRefTy getAllocator() const { return Allocator; }
typedef const char* key_type;
typedef ValueTy mapped_type;
diff --git a/include/llvm/ADT/StringRef.h b/include/llvm/ADT/StringRef.h
index 8386d3e..1766d2b 100644
--- a/include/llvm/ADT/StringRef.h
+++ b/include/llvm/ADT/StringRef.h
@@ -132,7 +132,7 @@ namespace llvm {
/// numbers.
int compare_numeric(StringRef RHS) const;
- /// \brief Determine the edit distance between this string and another
+ /// \brief Determine the edit distance between this string and another
/// string.
///
/// \param Other the string to compare this string against.
@@ -142,11 +142,16 @@ namespace llvm {
/// operation, rather than as two operations (an insertion and a
/// removal).
///
+ /// \param MaxEditDistance If non-zero, the maximum edit distance that
+ /// this routine is allowed to compute. If the edit distance will exceed
+ /// that maximum, returns \c MaxEditDistance+1.
+ ///
/// \returns the minimum number of character insertions, removals,
/// or (if \p AllowReplacements is \c true) replacements needed to
/// transform one of the given strings into the other. If zero,
/// the strings are identical.
- unsigned edit_distance(StringRef Other, bool AllowReplacements = true);
+ unsigned edit_distance(StringRef Other, bool AllowReplacements = true,
+ unsigned MaxEditDistance = 0);
/// str - Get the contents as an std::string.
std::string str() const {
@@ -251,6 +256,18 @@ namespace llvm {
/// Note: O(size() + Chars.size())
size_type find_first_not_of(StringRef Chars, size_t From = 0) const;
+ /// find_last_of - Find the last character in the string that is \arg C, or
+ /// npos if not found.
+ size_type find_last_of(char C, size_t From = npos) const {
+ return rfind(C, From);
+ }
+
+ /// find_last_of - Find the last character in the string that is in \arg C,
+ /// or npos if not found.
+ ///
+ /// Note: O(size() + Chars.size())
+ size_type find_last_of(StringRef Chars, size_t From = npos) const;
+
/// @}
/// @name Helpful Algorithms
/// @{
@@ -432,6 +449,10 @@ namespace llvm {
/// @}
+ // StringRefs can be treated like a POD type.
+ template <typename T> struct isPodLike;
+ template <> struct isPodLike<StringRef> { static const bool value = true; };
+
}
#endif
diff --git a/include/llvm/ADT/Triple.h b/include/llvm/ADT/Triple.h
index 8dca3c1..e6dcc23 100644
--- a/include/llvm/ADT/Triple.h
+++ b/include/llvm/ADT/Triple.h
@@ -45,7 +45,7 @@ class Triple {
public:
enum ArchType {
UnknownArch,
-
+
alpha, // Alpha: alpha
arm, // ARM; arm, armv.*, xscale
bfin, // Blackfin: bfin
@@ -53,7 +53,6 @@ public:
mips, // MIPS: mips, mipsallegrex
mipsel, // MIPSEL: mipsel, mipsallegrexel, psp
msp430, // MSP430: msp430
- pic16, // PIC16: pic16
ppc, // PPC: powerpc
ppc64, // PPC64: powerpc64, ppu
sparc, // Sparc: sparc
@@ -65,13 +64,14 @@ public:
x86_64, // X86-64: amd64, x86_64
xcore, // XCore: xcore
mblaze, // MBlaze: mblaze
+ ptx, // PTX: ptx
InvalidArch
};
enum VendorType {
UnknownVendor,
- Apple,
+ Apple,
PC
};
enum OSType {
@@ -84,8 +84,7 @@ public:
FreeBSD,
Linux,
Lv2, // PS3
- MinGW32,
- MinGW64,
+ MinGW32, // i*86-pc-mingw32, *-w64-mingw32
NetBSD,
OpenBSD,
Psp,
@@ -94,7 +93,15 @@ public:
Haiku,
Minix
};
-
+ enum EnvironmentType {
+ UnknownEnvironment,
+
+ GNU,
+ GNUEABI,
+ EABI,
+ MachO
+ };
+
private:
std::string Data;
@@ -107,16 +114,20 @@ private:
/// The parsed OS type.
mutable OSType OS;
+ /// The parsed Environment type.
+ mutable EnvironmentType Environment;
+
bool isInitialized() const { return Arch != InvalidArch; }
static ArchType ParseArch(StringRef ArchName);
static VendorType ParseVendor(StringRef VendorName);
static OSType ParseOS(StringRef OSName);
+ static EnvironmentType ParseEnvironment(StringRef EnvironmentName);
void Parse() const;
public:
/// @name Constructors
/// @{
-
+
Triple() : Data(), Arch(InvalidArch) {}
explicit Triple(StringRef Str) : Data(Str), Arch(InvalidArch) {}
explicit Triple(StringRef ArchStr, StringRef VendorStr, StringRef OSStr)
@@ -127,6 +138,17 @@ public:
Data += OSStr;
}
+ explicit Triple(StringRef ArchStr, StringRef VendorStr, StringRef OSStr,
+ StringRef EnvironmentStr)
+ : Data(ArchStr), Arch(InvalidArch) {
+ Data += '-';
+ Data += VendorStr;
+ Data += '-';
+ Data += OSStr;
+ Data += '-';
+ Data += EnvironmentStr;
+ }
+
/// @}
/// @name Normalization
/// @{
@@ -140,22 +162,22 @@ public:
/// @}
/// @name Typed Component Access
/// @{
-
+
/// getArch - Get the parsed architecture type of this triple.
- ArchType getArch() const {
- if (!isInitialized()) Parse();
+ ArchType getArch() const {
+ if (!isInitialized()) Parse();
return Arch;
}
-
+
/// getVendor - Get the parsed vendor type of this triple.
- VendorType getVendor() const {
- if (!isInitialized()) Parse();
+ VendorType getVendor() const {
+ if (!isInitialized()) Parse();
return Vendor;
}
-
+
/// getOS - Get the parsed operating system type of this triple.
- OSType getOS() const {
- if (!isInitialized()) Parse();
+ OSType getOS() const {
+ if (!isInitialized()) Parse();
return OS;
}
@@ -165,6 +187,12 @@ public:
return getEnvironmentName() != "";
}
+ /// getEnvironment - Get the parsed environment type of this triple.
+ EnvironmentType getEnvironment() const {
+ if (!isInitialized()) Parse();
+ return Environment;
+ }
+
/// @}
/// @name Direct Component Access
/// @{
@@ -193,13 +221,13 @@ public:
/// if the environment component is present).
StringRef getOSAndEnvironmentName() const;
-
+
/// getDarwinNumber - Parse the 'darwin number' out of the specific target
/// triple. For example, if we have darwin8.5 return 8,5,0. If any entry is
/// not defined, return 0's. This requires that the triple have an OSType of
/// darwin before it is called.
void getDarwinNumber(unsigned &Maj, unsigned &Min, unsigned &Revision) const;
-
+
/// getDarwinMajorNumber - Return just the major version number, this is
/// specialized because it is a common query.
unsigned getDarwinMajorNumber() const {
@@ -207,7 +235,7 @@ public:
getDarwinNumber(Maj, Min, Rev);
return Maj;
}
-
+
/// @}
/// @name Mutators
/// @{
@@ -224,6 +252,10 @@ public:
/// to a known type.
void setOS(OSType Kind);
+ /// setEnvironment - Set the environment (fourth) component of the triple
+ /// to a known type.
+ void setEnvironment(EnvironmentType Kind);
+
/// setTriple - Set all components to the new triple \arg Str.
void setTriple(const Twine &Str);
@@ -271,9 +303,14 @@ public:
/// vendor.
static const char *getVendorTypeName(VendorType Kind);
- /// getOSTypeName - Get the canonical name for the \arg Kind vendor.
+ /// getOSTypeName - Get the canonical name for the \arg Kind operating
+ /// system.
static const char *getOSTypeName(OSType Kind);
+ /// getEnvironmentTypeName - Get the canonical name for the \arg Kind
+ /// environment.
+ static const char *getEnvironmentTypeName(EnvironmentType Kind);
+
/// @}
/// @name Static helpers for converting alternate architecture names.
/// @{
diff --git a/include/llvm/ADT/Twine.h b/include/llvm/ADT/Twine.h
index b519a3e..ab8d365 100644
--- a/include/llvm/ADT/Twine.h
+++ b/include/llvm/ADT/Twine.h
@@ -11,7 +11,7 @@
#define LLVM_ADT_TWINE_H
#include "llvm/ADT/StringRef.h"
-#include "llvm/System/DataTypes.h"
+#include "llvm/Support/DataTypes.h"
#include <cassert>
#include <string>
@@ -42,7 +42,7 @@ namespace llvm {
/// Twines support a special 'null' value, which always concatenates to form
/// itself, and renders as an empty string. This can be returned from APIs to
/// effectively nullify any concatenations performed on the result.
- ///
+ ///
/// \b Implementation \n
///
/// Given the nature of a Twine, it is not possible for the Twine's
@@ -99,7 +99,7 @@ namespace llvm {
/// A pointer to a StringRef instance.
StringRefKind,
- /// An unsigned int value reinterpreted as a pointer, to render as an
+ /// An unsigned int value reinterpreted as a pointer, to render as an
/// unsigned decimal integer.
DecUIKind,
@@ -260,32 +260,32 @@ namespace llvm {
}
/// Construct a twine to print \arg Val as an unsigned decimal integer.
- explicit Twine(unsigned Val)
+ explicit Twine(unsigned Val)
: LHS((void*)(intptr_t)Val), LHSKind(DecUIKind), RHSKind(EmptyKind) {
}
/// Construct a twine to print \arg Val as a signed decimal integer.
- explicit Twine(int Val)
+ explicit Twine(int Val)
: LHS((void*)(intptr_t)Val), LHSKind(DecIKind), RHSKind(EmptyKind) {
}
/// Construct a twine to print \arg Val as an unsigned decimal integer.
- explicit Twine(const unsigned long &Val)
+ explicit Twine(const unsigned long &Val)
: LHS(&Val), LHSKind(DecULKind), RHSKind(EmptyKind) {
}
/// Construct a twine to print \arg Val as a signed decimal integer.
- explicit Twine(const long &Val)
+ explicit Twine(const long &Val)
: LHS(&Val), LHSKind(DecLKind), RHSKind(EmptyKind) {
}
/// Construct a twine to print \arg Val as an unsigned decimal integer.
- explicit Twine(const unsigned long long &Val)
+ explicit Twine(const unsigned long long &Val)
: LHS(&Val), LHSKind(DecULLKind), RHSKind(EmptyKind) {
}
/// Construct a twine to print \arg Val as a signed decimal integer.
- explicit Twine(const long long &Val)
+ explicit Twine(const long long &Val)
: LHS(&Val), LHSKind(DecLLKind), RHSKind(EmptyKind) {
}
@@ -330,12 +330,12 @@ namespace llvm {
bool isTriviallyEmpty() const {
return isNullary();
}
-
+
/// isSingleStringRef - Return true if this twine can be dynamically
/// accessed as a single StringRef value with getSingleStringRef().
bool isSingleStringRef() const {
if (getRHSKind() != EmptyKind) return false;
-
+
switch (getLHSKind()) {
case EmptyKind:
case CStringKind:
@@ -382,6 +382,14 @@ namespace llvm {
/// SmallVector and a StringRef to the SmallVector's data is returned.
StringRef toStringRef(SmallVectorImpl<char> &Out) const;
+ /// toNullTerminatedStringRef - This returns the twine as a single null
+ /// terminated StringRef if it can be represented as such. Otherwise the
+ /// twine is written into the given SmallVector and a StringRef to the
+ /// SmallVector's data is returned.
+ ///
+ /// The returned StringRef's size does not include the null terminator.
+ StringRef toNullTerminatedStringRef(SmallVectorImpl<char> &Out) const;
+
/// print - Write the concatenated string represented by this twine to the
/// stream \arg OS.
void print(raw_ostream &OS) const;
diff --git a/include/llvm/ADT/ValueMap.h b/include/llvm/ADT/ValueMap.h
index ded17fc..d1f4e5a 100644
--- a/include/llvm/ADT/ValueMap.h
+++ b/include/llvm/ADT/ValueMap.h
@@ -29,7 +29,7 @@
#include "llvm/ADT/DenseMap.h"
#include "llvm/Support/ValueHandle.h"
#include "llvm/Support/type_traits.h"
-#include "llvm/System/Mutex.h"
+#include "llvm/Support/Mutex.h"
#include <iterator>
diff --git a/include/llvm/ADT/ilist.h b/include/llvm/ADT/ilist.h
index 4e3afe1..865fcb3 100644
--- a/include/llvm/ADT/ilist.h
+++ b/include/llvm/ADT/ilist.h
@@ -38,6 +38,7 @@
#ifndef LLVM_ADT_ILIST_H
#define LLVM_ADT_ILIST_H
+#include <algorithm>
#include <cassert>
#include <cstddef>
#include <iterator>
OpenPOWER on IntegriCloud