diff options
Diffstat (limited to 'lib/Support')
29 files changed, 627 insertions, 386 deletions
diff --git a/lib/Support/APFloat.cpp b/lib/Support/APFloat.cpp index ed261a4..7e8b4a3 100644 --- a/lib/Support/APFloat.cpp +++ b/lib/Support/APFloat.cpp @@ -46,22 +46,27 @@ namespace llvm { /* Number of bits in the significand. This includes the integer bit. */ unsigned int precision; - - /* True if arithmetic is supported. */ - unsigned int arithmeticOK; }; - const fltSemantics APFloat::IEEEhalf = { 15, -14, 11, true }; - const fltSemantics APFloat::IEEEsingle = { 127, -126, 24, true }; - const fltSemantics APFloat::IEEEdouble = { 1023, -1022, 53, true }; - const fltSemantics APFloat::IEEEquad = { 16383, -16382, 113, true }; - const fltSemantics APFloat::x87DoubleExtended = { 16383, -16382, 64, true }; - const fltSemantics APFloat::Bogus = { 0, 0, 0, true }; - - // The PowerPC format consists of two doubles. It does not map cleanly - // onto the usual format above. For now only storage of constants of - // this type is supported, no arithmetic. - const fltSemantics APFloat::PPCDoubleDouble = { 1023, -1022, 106, false }; + const fltSemantics APFloat::IEEEhalf = { 15, -14, 11 }; + const fltSemantics APFloat::IEEEsingle = { 127, -126, 24 }; + const fltSemantics APFloat::IEEEdouble = { 1023, -1022, 53 }; + const fltSemantics APFloat::IEEEquad = { 16383, -16382, 113 }; + const fltSemantics APFloat::x87DoubleExtended = { 16383, -16382, 64 }; + const fltSemantics APFloat::Bogus = { 0, 0, 0 }; + + /* The PowerPC format consists of two doubles. It does not map cleanly + onto the usual format above. It is approximated using twice the + mantissa bits. Note that for exponents near the double minimum, + we no longer can represent the full 106 mantissa bits, so those + will be treated as denormal numbers. + + FIXME: While this approximation is equivalent to what GCC uses for + compile-time arithmetic on PPC double-double numbers, it is not able + to represent all possible values held by a PPC double-double number, + for example: (long double) 1.0 + (long double) 0x1p-106 + Should this be replaced by a full emulation of PPC double-double? */ + const fltSemantics APFloat::PPCDoubleDouble = { 1023, -1022 + 53, 53 + 53 }; /* A tight upper bound on number of parts required to hold the value pow(5, power) is @@ -116,12 +121,6 @@ hexDigitValue(unsigned int c) return -1U; } -static inline void -assertArithmeticOK(const llvm::fltSemantics &semantics) { - assert(semantics.arithmeticOK && - "Compile-time arithmetic does not support these semantics"); -} - /* Return the value of a decimal exponent of the form [+-]ddddddd. @@ -196,8 +195,10 @@ totalExponent(StringRef::iterator p, StringRef::iterator end, assert(value < 10U && "Invalid character in exponent"); unsignedExponent = unsignedExponent * 10 + value; - if (unsignedExponent > 32767) + if (unsignedExponent > 32767) { overflow = true; + break; + } } if (exponentAdjustment > 32767 || exponentAdjustment < -32768) @@ -610,8 +611,6 @@ APFloat::assign(const APFloat &rhs) sign = rhs.sign; category = rhs.category; exponent = rhs.exponent; - sign2 = rhs.sign2; - exponent2 = rhs.exponent2; if (category == fcNormal || category == fcNaN) copySignificand(rhs); } @@ -705,16 +704,10 @@ APFloat::bitwiseIsEqual(const APFloat &rhs) const { category != rhs.category || sign != rhs.sign) return false; - if (semantics==(const llvm::fltSemantics*)&PPCDoubleDouble && - sign2 != rhs.sign2) - return false; if (category==fcZero || category==fcInfinity) return true; else if (category==fcNormal && exponent!=rhs.exponent) return false; - else if (semantics==(const llvm::fltSemantics*)&PPCDoubleDouble && - exponent2!=rhs.exponent2) - return false; else { int i= partCount(); const integerPart* p=significandParts(); @@ -727,9 +720,7 @@ APFloat::bitwiseIsEqual(const APFloat &rhs) const { } } -APFloat::APFloat(const fltSemantics &ourSemantics, integerPart value) - : exponent2(0), sign2(0) { - assertArithmeticOK(ourSemantics); +APFloat::APFloat(const fltSemantics &ourSemantics, integerPart value) { initialize(&ourSemantics); sign = 0; zeroSignificand(); @@ -738,24 +729,19 @@ APFloat::APFloat(const fltSemantics &ourSemantics, integerPart value) normalize(rmNearestTiesToEven, lfExactlyZero); } -APFloat::APFloat(const fltSemantics &ourSemantics) : exponent2(0), sign2(0) { - assertArithmeticOK(ourSemantics); +APFloat::APFloat(const fltSemantics &ourSemantics) { initialize(&ourSemantics); category = fcZero; sign = false; } -APFloat::APFloat(const fltSemantics &ourSemantics, uninitializedTag tag) - : exponent2(0), sign2(0) { - assertArithmeticOK(ourSemantics); +APFloat::APFloat(const fltSemantics &ourSemantics, uninitializedTag tag) { // Allocates storage if necessary but does not initialize it. initialize(&ourSemantics); } APFloat::APFloat(const fltSemantics &ourSemantics, - fltCategory ourCategory, bool negative) - : exponent2(0), sign2(0) { - assertArithmeticOK(ourSemantics); + fltCategory ourCategory, bool negative) { initialize(&ourSemantics); category = ourCategory; sign = negative; @@ -765,14 +751,12 @@ APFloat::APFloat(const fltSemantics &ourSemantics, makeNaN(); } -APFloat::APFloat(const fltSemantics &ourSemantics, StringRef text) - : exponent2(0), sign2(0) { - assertArithmeticOK(ourSemantics); +APFloat::APFloat(const fltSemantics &ourSemantics, StringRef text) { initialize(&ourSemantics); convertFromString(text, rmNearestTiesToEven); } -APFloat::APFloat(const APFloat &rhs) : exponent2(0), sign2(0) { +APFloat::APFloat(const APFloat &rhs) { initialize(rhs.semantics); assign(rhs); } @@ -1559,8 +1543,6 @@ APFloat::addOrSubtract(const APFloat &rhs, roundingMode rounding_mode, { opStatus fs; - assertArithmeticOK(*semantics); - fs = addOrSubtractSpecials(rhs, subtract); /* This return code means it was not a simple case. */ @@ -1605,7 +1587,6 @@ APFloat::multiply(const APFloat &rhs, roundingMode rounding_mode) { opStatus fs; - assertArithmeticOK(*semantics); sign ^= rhs.sign; fs = multiplySpecials(rhs); @@ -1625,7 +1606,6 @@ APFloat::divide(const APFloat &rhs, roundingMode rounding_mode) { opStatus fs; - assertArithmeticOK(*semantics); sign ^= rhs.sign; fs = divideSpecials(rhs); @@ -1647,7 +1627,6 @@ APFloat::remainder(const APFloat &rhs) APFloat V = *this; unsigned int origSign = sign; - assertArithmeticOK(*semantics); fs = V.divide(rhs, rmNearestTiesToEven); if (fs == opDivByZero) return fs; @@ -1682,7 +1661,6 @@ APFloat::opStatus APFloat::mod(const APFloat &rhs, roundingMode rounding_mode) { opStatus fs; - assertArithmeticOK(*semantics); fs = modSpecials(rhs); if (category == fcNormal && rhs.category == fcNormal) { @@ -1726,8 +1704,6 @@ APFloat::fusedMultiplyAdd(const APFloat &multiplicand, { opStatus fs; - assertArithmeticOK(*semantics); - /* Post-multiplication sign, before addition. */ sign ^= multiplicand.sign; @@ -1768,12 +1744,11 @@ APFloat::fusedMultiplyAdd(const APFloat &multiplicand, /* Rounding-mode corrrect round to integral value. */ APFloat::opStatus APFloat::roundToIntegral(roundingMode rounding_mode) { opStatus fs; - assertArithmeticOK(*semantics); // If the exponent is large enough, we know that this value is already // integral, and the arithmetic below would potentially cause it to saturate // to +/-Inf. Bail out early instead. - if (exponent+1 >= (int)semanticsPrecision(*semantics)) + if (category == fcNormal && exponent+1 >= (int)semanticsPrecision(*semantics)) return opOK; // The algorithm here is quite simple: we add 2^(p-1), where p is the @@ -1815,7 +1790,6 @@ APFloat::compare(const APFloat &rhs) const { cmpResult result; - assertArithmeticOK(*semantics); assert(semantics == rhs.semantics); switch (convolve(category, rhs.category)) { @@ -1900,8 +1874,6 @@ APFloat::convert(const fltSemantics &toSemantics, int shift; const fltSemantics &fromSemantics = *semantics; - assertArithmeticOK(fromSemantics); - assertArithmeticOK(toSemantics); lostFraction = lfExactlyZero; newPartCount = partCountForBits(toSemantics.precision + 1); oldPartCount = partCount(); @@ -1986,8 +1958,6 @@ APFloat::convertToSignExtendedInteger(integerPart *parts, unsigned int width, const integerPart *src; unsigned int dstPartsCount, truncatedBits; - assertArithmeticOK(*semantics); - *isExact = false; /* Handle the three special cases first. */ @@ -2149,7 +2119,6 @@ APFloat::convertFromUnsignedParts(const integerPart *src, integerPart *dst; lostFraction lost_fraction; - assertArithmeticOK(*semantics); category = fcNormal; omsb = APInt::tcMSB(src, srcCount) + 1; dst = significandParts(); @@ -2200,7 +2169,6 @@ APFloat::convertFromSignExtendedInteger(const integerPart *src, { opStatus status; - assertArithmeticOK(*semantics); if (isSigned && APInt::tcExtractBit(src, srcCount * integerPartWidth - 1)) { integerPart *copy; @@ -2334,7 +2302,7 @@ APFloat::roundSignificandWithExponent(const integerPart *decSigParts, roundingMode rounding_mode) { unsigned int parts, pow5PartCount; - fltSemantics calcSemantics = { 32767, -32767, 0, true }; + fltSemantics calcSemantics = { 32767, -32767, 0 }; integerPart pow5Parts[maxPowerOfFiveParts]; bool isNearest; @@ -2526,7 +2494,6 @@ APFloat::convertFromDecimalString(StringRef str, roundingMode rounding_mode) APFloat::opStatus APFloat::convertFromString(StringRef str, roundingMode rounding_mode) { - assertArithmeticOK(*semantics); assert(!str.empty() && "Invalid string length"); /* Handle a leading minus sign. */ @@ -2578,8 +2545,6 @@ APFloat::convertToHexString(char *dst, unsigned int hexDigits, { char *p; - assertArithmeticOK(*semantics); - p = dst; if (sign) *dst++ = '-'; @@ -2788,42 +2753,46 @@ APFloat::convertPPCDoubleDoubleAPFloatToAPInt() const assert(semantics == (const llvm::fltSemantics*)&PPCDoubleDouble); assert(partCount()==2); - uint64_t myexponent, mysignificand, myexponent2, mysignificand2; - - if (category==fcNormal) { - myexponent = exponent + 1023; //bias - myexponent2 = exponent2 + 1023; - mysignificand = significandParts()[0]; - mysignificand2 = significandParts()[1]; - if (myexponent==1 && !(mysignificand & 0x10000000000000LL)) - myexponent = 0; // denormal - if (myexponent2==1 && !(mysignificand2 & 0x10000000000000LL)) - myexponent2 = 0; // denormal - } else if (category==fcZero) { - myexponent = 0; - mysignificand = 0; - myexponent2 = 0; - mysignificand2 = 0; - } else if (category==fcInfinity) { - myexponent = 0x7ff; - myexponent2 = 0; - mysignificand = 0; - mysignificand2 = 0; + uint64_t words[2]; + opStatus fs; + bool losesInfo; + + // Convert number to double. To avoid spurious underflows, we re- + // normalize against the "double" minExponent first, and only *then* + // truncate the mantissa. The result of that second conversion + // may be inexact, but should never underflow. + APFloat extended(*this); + fltSemantics extendedSemantics = *semantics; + extendedSemantics.minExponent = IEEEdouble.minExponent; + fs = extended.convert(extendedSemantics, rmNearestTiesToEven, &losesInfo); + assert(fs == opOK && !losesInfo); + (void)fs; + + APFloat u(extended); + fs = u.convert(IEEEdouble, rmNearestTiesToEven, &losesInfo); + assert(fs == opOK || fs == opInexact); + (void)fs; + words[0] = *u.convertDoubleAPFloatToAPInt().getRawData(); + + // If conversion was exact or resulted in a special case, we're done; + // just set the second double to zero. Otherwise, re-convert back to + // the extended format and compute the difference. This now should + // convert exactly to double. + if (u.category == fcNormal && losesInfo) { + fs = u.convert(extendedSemantics, rmNearestTiesToEven, &losesInfo); + assert(fs == opOK && !losesInfo); + (void)fs; + + APFloat v(extended); + v.subtract(u, rmNearestTiesToEven); + fs = v.convert(IEEEdouble, rmNearestTiesToEven, &losesInfo); + assert(fs == opOK && !losesInfo); + (void)fs; + words[1] = *v.convertDoubleAPFloatToAPInt().getRawData(); } else { - assert(category == fcNaN && "Unknown category"); - myexponent = 0x7ff; - mysignificand = significandParts()[0]; - myexponent2 = exponent2; - mysignificand2 = significandParts()[1]; + words[1] = 0; } - uint64_t words[2]; - words[0] = ((uint64_t)(sign & 1) << 63) | - ((myexponent & 0x7ff) << 52) | - (mysignificand & 0xfffffffffffffLL); - words[1] = ((uint64_t)(sign2 & 1) << 63) | - ((myexponent2 & 0x7ff) << 52) | - (mysignificand2 & 0xfffffffffffffLL); return APInt(128, words); } @@ -3043,47 +3012,23 @@ APFloat::initFromPPCDoubleDoubleAPInt(const APInt &api) assert(api.getBitWidth()==128); uint64_t i1 = api.getRawData()[0]; uint64_t i2 = api.getRawData()[1]; - uint64_t myexponent = (i1 >> 52) & 0x7ff; - uint64_t mysignificand = i1 & 0xfffffffffffffLL; - uint64_t myexponent2 = (i2 >> 52) & 0x7ff; - uint64_t mysignificand2 = i2 & 0xfffffffffffffLL; + opStatus fs; + bool losesInfo; - initialize(&APFloat::PPCDoubleDouble); - assert(partCount()==2); + // Get the first double and convert to our format. + initFromDoubleAPInt(APInt(64, i1)); + fs = convert(PPCDoubleDouble, rmNearestTiesToEven, &losesInfo); + assert(fs == opOK && !losesInfo); + (void)fs; - sign = static_cast<unsigned int>(i1>>63); - sign2 = static_cast<unsigned int>(i2>>63); - if (myexponent==0 && mysignificand==0) { - // exponent, significand meaningless - // exponent2 and significand2 are required to be 0; we don't check - category = fcZero; - } else if (myexponent==0x7ff && mysignificand==0) { - // exponent, significand meaningless - // exponent2 and significand2 are required to be 0; we don't check - category = fcInfinity; - } else if (myexponent==0x7ff && mysignificand!=0) { - // exponent meaningless. So is the whole second word, but keep it - // for determinism. - category = fcNaN; - exponent2 = myexponent2; - significandParts()[0] = mysignificand; - significandParts()[1] = mysignificand2; - } else { - category = fcNormal; - // Note there is no category2; the second word is treated as if it is - // fcNormal, although it might be something else considered by itself. - exponent = myexponent - 1023; - exponent2 = myexponent2 - 1023; - significandParts()[0] = mysignificand; - significandParts()[1] = mysignificand2; - if (myexponent==0) // denormal - exponent = -1022; - else - significandParts()[0] |= 0x10000000000000LL; // integer bit - if (myexponent2==0) - exponent2 = -1022; - else - significandParts()[1] |= 0x10000000000000LL; // integer bit + // Unless we have a special case, add in second double. + if (category == fcNormal) { + APFloat v(APInt(64, i2)); + fs = v.convert(PPCDoubleDouble, rmNearestTiesToEven, &losesInfo); + assert(fs == opOK && !losesInfo); + (void)fs; + + add(v, rmNearestTiesToEven); } } @@ -3309,15 +3254,15 @@ APFloat APFloat::getSmallestNormalized(const fltSemantics &Sem, bool Negative) { return Val; } -APFloat::APFloat(const APInt& api, bool isIEEE) : exponent2(0), sign2(0) { +APFloat::APFloat(const APInt& api, bool isIEEE) { initFromAPInt(api, isIEEE); } -APFloat::APFloat(float f) : exponent2(0), sign2(0) { +APFloat::APFloat(float f) { initFromAPInt(APInt::floatToBits(f)); } -APFloat::APFloat(double d) : exponent2(0), sign2(0) { +APFloat::APFloat(double d) { initFromAPInt(APInt::doubleToBits(d)); } @@ -3608,11 +3553,6 @@ void APFloat::toString(SmallVectorImpl<char> &Str, } bool APFloat::getExactInverse(APFloat *inv) const { - // We can only guarantee the existence of an exact inverse for IEEE floats. - if (semantics != &IEEEhalf && semantics != &IEEEsingle && - semantics != &IEEEdouble && semantics != &IEEEquad) - return false; - // Special floats and denormals have no exact inverse. if (category != fcNormal) return false; diff --git a/lib/Support/Atomic.cpp b/lib/Support/Atomic.cpp index 3001f6c..9559ad7 100644 --- a/lib/Support/Atomic.cpp +++ b/lib/Support/Atomic.cpp @@ -21,11 +21,15 @@ using namespace llvm; #undef MemoryFence #endif +#if defined(__GNUC__) || (defined(__IBMCPP__) && __IBMCPP__ >= 1210) +#define GNU_ATOMICS +#endif + void sys::MemoryFence() { #if LLVM_HAS_ATOMICS == 0 return; #else -# if defined(__GNUC__) +# if defined(GNU_ATOMICS) __sync_synchronize(); # elif defined(_MSC_VER) MemoryBarrier(); @@ -43,7 +47,7 @@ sys::cas_flag sys::CompareAndSwap(volatile sys::cas_flag* ptr, if (result == old_value) *ptr = new_value; return result; -#elif defined(__GNUC__) +#elif defined(GNU_ATOMICS) return __sync_val_compare_and_swap(ptr, old_value, new_value); #elif defined(_MSC_VER) return InterlockedCompareExchange(ptr, new_value, old_value); @@ -56,7 +60,7 @@ sys::cas_flag sys::AtomicIncrement(volatile sys::cas_flag* ptr) { #if LLVM_HAS_ATOMICS == 0 ++(*ptr); return *ptr; -#elif defined(__GNUC__) +#elif defined(GNU_ATOMICS) return __sync_add_and_fetch(ptr, 1); #elif defined(_MSC_VER) return InterlockedIncrement(ptr); @@ -69,7 +73,7 @@ sys::cas_flag sys::AtomicDecrement(volatile sys::cas_flag* ptr) { #if LLVM_HAS_ATOMICS == 0 --(*ptr); return *ptr; -#elif defined(__GNUC__) +#elif defined(GNU_ATOMICS) return __sync_sub_and_fetch(ptr, 1); #elif defined(_MSC_VER) return InterlockedDecrement(ptr); @@ -82,7 +86,7 @@ sys::cas_flag sys::AtomicAdd(volatile sys::cas_flag* ptr, sys::cas_flag val) { #if LLVM_HAS_ATOMICS == 0 *ptr += val; return *ptr; -#elif defined(__GNUC__) +#elif defined(GNU_ATOMICS) return __sync_add_and_fetch(ptr, val); #elif defined(_MSC_VER) return InterlockedExchangeAdd(ptr, val) + val; diff --git a/lib/Support/CMakeLists.txt b/lib/Support/CMakeLists.txt index 83baf60..6af0f4a 100644 --- a/lib/Support/CMakeLists.txt +++ b/lib/Support/CMakeLists.txt @@ -1,9 +1,3 @@ -## FIXME: This only requires RTTI because tblgen uses it. Fix that. -set(LLVM_REQUIRES_RTTI 1) -if( MINGW ) - set(LLVM_REQUIRES_EH 1) -endif() - add_llvm_library(LLVMSupport APFloat.cpp APInt.cpp diff --git a/lib/Support/CommandLine.cpp b/lib/Support/CommandLine.cpp index 593315d1..fc4f189 100644 --- a/lib/Support/CommandLine.cpp +++ b/lib/Support/CommandLine.cpp @@ -464,7 +464,7 @@ static void ParseCStringVector(std::vector<char *> &OutputVector, /// an environment variable (whose name is given in ENVVAR). /// void cl::ParseEnvironmentOptions(const char *progName, const char *envVar, - const char *Overview, bool ReadResponseFiles) { + const char *Overview) { // Check args. assert(progName && "Program name not specified"); assert(envVar && "Environment variable name missing"); @@ -483,7 +483,7 @@ void cl::ParseEnvironmentOptions(const char *progName, const char *envVar, // and hand it off to ParseCommandLineOptions(). ParseCStringVector(newArgv, envValue); int newArgc = static_cast<int>(newArgv.size()); - ParseCommandLineOptions(newArgc, &newArgv[0], Overview, ReadResponseFiles); + ParseCommandLineOptions(newArgc, &newArgv[0], Overview); // Free all the strdup()ed strings. for (std::vector<char*>::iterator i = newArgv.begin(), e = newArgv.end(); @@ -529,7 +529,7 @@ static void ExpandResponseFiles(unsigned argc, const char*const* argv, } void cl::ParseCommandLineOptions(int argc, const char * const *argv, - const char *Overview, bool ReadResponseFiles) { + const char *Overview) { // Process all registered options. SmallVector<Option*, 4> PositionalOpts; SmallVector<Option*, 4> SinkOpts; @@ -541,12 +541,10 @@ void cl::ParseCommandLineOptions(int argc, const char * const *argv, // Expand response files. std::vector<char*> newArgv; - if (ReadResponseFiles) { - newArgv.push_back(strdup(argv[0])); - ExpandResponseFiles(argc, argv, newArgv); - argv = &newArgv[0]; - argc = static_cast<int>(newArgv.size()); - } + newArgv.push_back(strdup(argv[0])); + ExpandResponseFiles(argc, argv, newArgv); + argv = &newArgv[0]; + argc = static_cast<int>(newArgv.size()); // Copy the program name into ProgName, making sure not to overflow it. std::string ProgName = sys::path::filename(argv[0]); @@ -839,12 +837,10 @@ void cl::ParseCommandLineOptions(int argc, const char * const *argv, MoreHelp->clear(); // Free the memory allocated by ExpandResponseFiles. - if (ReadResponseFiles) { - // Free all the strdup()ed strings. - for (std::vector<char*>::iterator i = newArgv.begin(), e = newArgv.end(); - i != e; ++i) - free(*i); - } + // Free all the strdup()ed strings. + for (std::vector<char*>::iterator i = newArgv.begin(), e = newArgv.end(); + i != e; ++i) + free(*i); // If we had an error processing our arguments, don't let the program execute if (ErrorParsing) exit(1); diff --git a/lib/Support/DAGDeltaAlgorithm.cpp b/lib/Support/DAGDeltaAlgorithm.cpp index 1e89c6a..34e82cf 100644 --- a/lib/Support/DAGDeltaAlgorithm.cpp +++ b/lib/Support/DAGDeltaAlgorithm.cpp @@ -122,7 +122,7 @@ private: DDA.UpdatedSearchState(Changes, Sets, Required); } - /// ExecuteOneTest - Execute a single test predicate on the change set \arg S. + /// ExecuteOneTest - Execute a single test predicate on the change set \p S. bool ExecuteOneTest(const changeset_ty &S) { // Check dependencies invariant. DEBUG({ @@ -143,8 +143,8 @@ public: changeset_ty Run(); - /// GetTestResult - Get the test result for the active set \arg Changes with - /// \arg Required changes from the cache, executing the test if necessary. + /// GetTestResult - Get the test result for the active set \p Changes with + /// \p Required changes from the cache, executing the test if necessary. /// /// \param Changes - The set of active changes being minimized, which should /// have their pred closure included in the test. @@ -163,11 +163,11 @@ class DeltaActiveSetHelper : public DeltaAlgorithm { protected: /// UpdatedSearchState - Callback used when the search state changes. virtual void UpdatedSearchState(const changeset_ty &Changes, - const changesetlist_ty &Sets) { + const changesetlist_ty &Sets) LLVM_OVERRIDE { DDAI.UpdatedSearchState(Changes, Sets, Required); } - virtual bool ExecuteOneTest(const changeset_ty &S) { + virtual bool ExecuteOneTest(const changeset_ty &S) LLVM_OVERRIDE { return DDAI.GetTestResult(S, Required); } diff --git a/lib/Support/DataExtractor.cpp b/lib/Support/DataExtractor.cpp index dc21155..3d5cce0 100644 --- a/lib/Support/DataExtractor.cpp +++ b/lib/Support/DataExtractor.cpp @@ -139,7 +139,7 @@ uint64_t DataExtractor::getULEB128(uint32_t *offset_ptr) const { while (isValidOffset(offset)) { byte = Data[offset++]; - result |= (byte & 0x7f) << shift; + result |= uint64_t(byte & 0x7f) << shift; shift += 7; if ((byte & 0x80) == 0) break; @@ -160,7 +160,7 @@ int64_t DataExtractor::getSLEB128(uint32_t *offset_ptr) const { while (isValidOffset(offset)) { byte = Data[offset++]; - result |= (byte & 0x7f) << shift; + result |= uint64_t(byte & 0x7f) << shift; shift += 7; if ((byte & 0x80) == 0) break; @@ -168,7 +168,7 @@ int64_t DataExtractor::getSLEB128(uint32_t *offset_ptr) const { // Sign bit of byte is 2nd high order bit (0x40) if (shift < 64 && (byte & 0x40)) - result |= -(1 << shift); + result |= -(1ULL << shift); *offset_ptr = offset; return result; diff --git a/lib/Support/DataStream.cpp b/lib/Support/DataStream.cpp index 94d14a5..3a38e2a 100644 --- a/lib/Support/DataStream.cpp +++ b/lib/Support/DataStream.cpp @@ -58,7 +58,7 @@ public: virtual ~DataFileStreamer() { close(Fd); } - virtual size_t GetBytes(unsigned char *buf, size_t len) { + virtual size_t GetBytes(unsigned char *buf, size_t len) LLVM_OVERRIDE { NumStreamFetches++; return read(Fd, buf, len); } diff --git a/lib/Support/DynamicLibrary.cpp b/lib/Support/DynamicLibrary.cpp index fb02c07..45fec36 100644 --- a/lib/Support/DynamicLibrary.cpp +++ b/lib/Support/DynamicLibrary.cpp @@ -160,7 +160,7 @@ void* DynamicLibrary::SearchForAddressOfSymbol(const char *symbolName) { // On linux we have a weird situation. The stderr/out/in symbols are both // macros and global variables because of standards requirements. So, we // boldly use the EXPLICIT_SYMBOL macro without checking for a #define first. -#if defined(__linux__) +#if defined(__linux__) and !defined(__ANDROID__) { EXPLICIT_SYMBOL(stderr); EXPLICIT_SYMBOL(stdout); diff --git a/lib/Support/Errno.cpp b/lib/Support/Errno.cpp index dd218f6..730220f 100644 --- a/lib/Support/Errno.cpp +++ b/lib/Support/Errno.cpp @@ -13,6 +13,7 @@ #include "llvm/Support/Errno.h" #include "llvm/Config/config.h" // Get autoconf configuration settings +#include "llvm/Support/raw_ostream.h" #if HAVE_STRING_H #include <string.h> @@ -39,7 +40,7 @@ std::string StrError(int errnum) { const int MaxErrStrLen = 2000; char buffer[MaxErrStrLen]; buffer[0] = '\0'; - char* str = buffer; + std::string str; #ifdef HAVE_STRERROR_R // strerror_r is thread-safe. if (errnum) @@ -49,21 +50,25 @@ std::string StrError(int errnum) { str = strerror_r(errnum,buffer,MaxErrStrLen-1); # else strerror_r(errnum,buffer,MaxErrStrLen-1); + str = buffer; # endif #elif HAVE_DECL_STRERROR_S // "Windows Secure API" - if (errnum) + if (errnum) { strerror_s(buffer, MaxErrStrLen - 1, errnum); + str = buffer; + } #elif defined(HAVE_STRERROR) // Copy the thread un-safe result of strerror into // the buffer as fast as possible to minimize impact // of collision of strerror in multiple threads. if (errnum) - strncpy(buffer,strerror(errnum),MaxErrStrLen-1); - buffer[MaxErrStrLen-1] = '\0'; + str = strerror(errnum); #else // Strange that this system doesn't even have strerror // but, oh well, just use a generic message - sprintf(buffer, "Error #%d", errnum); + raw_string_ostream stream(str); + stream << "Error #" << errnum; + stream.flush(); #endif return str; } diff --git a/lib/Support/FoldingSet.cpp b/lib/Support/FoldingSet.cpp index c6282c6..4d489a8 100644 --- a/lib/Support/FoldingSet.cpp +++ b/lib/Support/FoldingSet.cpp @@ -38,6 +38,14 @@ bool FoldingSetNodeIDRef::operator==(FoldingSetNodeIDRef RHS) const { return memcmp(Data, RHS.Data, Size*sizeof(*Data)) == 0; } +/// Used to compare the "ordering" of two nodes as defined by the +/// profiled bits and their ordering defined by memcmp(). +bool FoldingSetNodeIDRef::operator<(FoldingSetNodeIDRef RHS) const { + if (Size != RHS.Size) + return Size < RHS.Size; + return memcmp(Data, RHS.Data, Size*sizeof(*Data)) < 0; +} + //===----------------------------------------------------------------------===// // FoldingSetNodeID Implementation @@ -152,6 +160,16 @@ bool FoldingSetNodeID::operator==(FoldingSetNodeIDRef RHS) const { return FoldingSetNodeIDRef(Bits.data(), Bits.size()) == RHS; } +/// Used to compare the "ordering" of two nodes as defined by the +/// profiled bits and their ordering defined by memcmp(). +bool FoldingSetNodeID::operator<(const FoldingSetNodeID &RHS)const{ + return *this < FoldingSetNodeIDRef(RHS.Bits.data(), RHS.Bits.size()); +} + +bool FoldingSetNodeID::operator<(FoldingSetNodeIDRef RHS) const { + return FoldingSetNodeIDRef(Bits.data(), Bits.size()) < RHS; +} + /// Intern - Copy this node's data to a memory region allocated from the /// given allocator and return a FoldingSetNodeIDRef describing the /// interned data. diff --git a/lib/Support/Host.cpp b/lib/Support/Host.cpp index 9a2c39d..34e32b8 100644 --- a/lib/Support/Host.cpp +++ b/lib/Support/Host.cpp @@ -234,6 +234,8 @@ std::string sys::getHostCPUName() { case 37: // Intel Core i7, laptop version. case 44: // Intel Core i7 processor and Intel Xeon processor. All // processors are manufactured using the 32 nm process. + case 46: // Nehalem EX + case 47: // Westmere EX return "corei7"; // SandyBridge: @@ -303,6 +305,7 @@ std::string sys::getHostCPUName() { case 8: return "k6-2"; case 9: case 13: return "k6-3"; + case 10: return "geode"; default: return "pentium"; } case 6: @@ -500,6 +503,7 @@ std::string sys::getHostCPUName() { .Case("0xb76", "arm1176jz-s") .Case("0xc08", "cortex-a8") .Case("0xc09", "cortex-a9") + .Case("0xc0f", "cortex-a15") .Case("0xc20", "cortex-m0") .Case("0xc23", "cortex-m3") .Case("0xc24", "cortex-m4") diff --git a/lib/Support/LockFileManager.cpp b/lib/Support/LockFileManager.cpp index 64404a1..59bfcfc 100644 --- a/lib/Support/LockFileManager.cpp +++ b/lib/Support/LockFileManager.cpp @@ -49,7 +49,7 @@ LockFileManager::readLockFile(StringRef LockFileName) { } bool LockFileManager::processStillExecuting(StringRef Hostname, int PID) { -#if LLVM_ON_UNIX +#if LLVM_ON_UNIX && !defined(__ANDROID__) char MyHostname[256]; MyHostname[255] = 0; MyHostname[0] = 0; diff --git a/lib/Support/Makefile b/lib/Support/Makefile index d68e500..4a2185d 100644 --- a/lib/Support/Makefile +++ b/lib/Support/Makefile @@ -11,9 +11,6 @@ LEVEL = ../.. LIBRARYNAME = LLVMSupport BUILD_ARCHIVE = 1 -## FIXME: This only requires RTTI because tblgen uses it. Fix that. -REQUIRES_RTTI = 1 - EXTRA_DIST = Unix Win32 README.txt include $(LEVEL)/Makefile.common diff --git a/lib/Support/Memory.cpp b/lib/Support/Memory.cpp index 22f7494..12f0838 100644 --- a/lib/Support/Memory.cpp +++ b/lib/Support/Memory.cpp @@ -16,14 +16,6 @@ #include "llvm/Support/Valgrind.h" #include "llvm/Config/config.h" -#if defined(__mips__) -#include <sys/cachectl.h> -#endif - -namespace llvm { -using namespace sys; -} - // Include the platform-specific parts of this class. #ifdef LLVM_ON_UNIX #include "Unix/Memory.inc" @@ -31,51 +23,3 @@ using namespace sys; #ifdef LLVM_ON_WIN32 #include "Windows/Memory.inc" #endif - -extern "C" void sys_icache_invalidate(const void *Addr, size_t len); - -/// InvalidateInstructionCache - Before the JIT can run a block of code -/// that has been emitted it must invalidate the instruction cache on some -/// platforms. -void llvm::sys::Memory::InvalidateInstructionCache(const void *Addr, - size_t Len) { - -// icache invalidation for PPC and ARM. -#if defined(__APPLE__) - -# if (defined(__POWERPC__) || defined (__ppc__) || \ - defined(_POWER) || defined(_ARCH_PPC)) || defined(__arm__) - sys_icache_invalidate(const_cast<void *>(Addr), Len); -# endif - -#else - -# if (defined(__POWERPC__) || defined (__ppc__) || \ - defined(_POWER) || defined(_ARCH_PPC)) && defined(__GNUC__) - const size_t LineSize = 32; - - const intptr_t Mask = ~(LineSize - 1); - const intptr_t StartLine = ((intptr_t) Addr) & Mask; - const intptr_t EndLine = ((intptr_t) Addr + Len + LineSize - 1) & Mask; - - for (intptr_t Line = StartLine; Line < EndLine; Line += LineSize) - asm volatile("dcbf 0, %0" : : "r"(Line)); - asm volatile("sync"); - - for (intptr_t Line = StartLine; Line < EndLine; Line += LineSize) - asm volatile("icbi 0, %0" : : "r"(Line)); - asm volatile("isync"); -# elif defined(__arm__) && defined(__GNUC__) - // FIXME: Can we safely always call this for __GNUC__ everywhere? - const char *Start = static_cast<const char *>(Addr); - const char *End = Start + Len; - __clear_cache(const_cast<char *>(Start), const_cast<char *>(End)); -# elif defined(__mips__) - const char *Start = static_cast<const char *>(Addr); - cacheflush(const_cast<char *>(Start), Len, BCACHE); -# endif - -#endif // end apple - - ValgrindDiscardTranslations(Addr, Len); -} diff --git a/lib/Support/MemoryBuffer.cpp b/lib/Support/MemoryBuffer.cpp index 992f03c..ec373e7 100644 --- a/lib/Support/MemoryBuffer.cpp +++ b/lib/Support/MemoryBuffer.cpp @@ -33,6 +33,9 @@ #include <unistd.h> #else #include <io.h> +#ifndef S_ISFIFO +#define S_ISFIFO(x) (0) +#endif #endif #include <fcntl.h> using namespace llvm; @@ -81,12 +84,12 @@ public: init(InputData.begin(), InputData.end(), RequiresNullTerminator); } - virtual const char *getBufferIdentifier() const { + virtual const char *getBufferIdentifier() const LLVM_OVERRIDE { // The name is stored after the class itself. return reinterpret_cast<const char*>(this + 1); } - - virtual BufferKind getBufferKind() const { + + virtual BufferKind getBufferKind() const LLVM_OVERRIDE { return MemoryBuffer_Malloc; } }; @@ -194,13 +197,34 @@ public: sys::Path::UnMapFilePages(reinterpret_cast<const char*>(RealStart), RealSize); } - - virtual BufferKind getBufferKind() const { + + virtual BufferKind getBufferKind() const LLVM_OVERRIDE { return MemoryBuffer_MMap; } }; } +static error_code getMemoryBufferForStream(int FD, + StringRef BufferName, + OwningPtr<MemoryBuffer> &result) { + const ssize_t ChunkSize = 4096*4; + SmallString<ChunkSize> Buffer; + ssize_t ReadBytes; + // Read into Buffer until we hit EOF. + do { + Buffer.reserve(Buffer.size() + ChunkSize); + ReadBytes = read(FD, Buffer.end(), ChunkSize); + if (ReadBytes == -1) { + if (errno == EINTR) continue; + return error_code(errno, posix_category()); + } + Buffer.set_size(Buffer.size() + ReadBytes); + } while (ReadBytes != 0); + + result.reset(MemoryBuffer::getMemBufferCopy(Buffer, BufferName)); + return error_code::success(); +} + error_code MemoryBuffer::getFile(StringRef Filename, OwningPtr<MemoryBuffer> &result, int64_t FileSize, @@ -297,6 +321,13 @@ error_code MemoryBuffer::getOpenFile(int FD, const char *Filename, if (fstat(FD, &FileInfo) == -1) { return error_code(errno, posix_category()); } + + // If this is a named pipe, we can't trust the size. Create the memory + // buffer by copying off the stream. + if (S_ISFIFO(FileInfo.st_mode)) { + return getMemoryBufferForStream(FD, Filename, result); + } + FileSize = FileInfo.st_size; } MapSize = FileSize; @@ -370,20 +401,5 @@ error_code MemoryBuffer::getSTDIN(OwningPtr<MemoryBuffer> &result) { // fallback if it fails. sys::Program::ChangeStdinToBinary(); - const ssize_t ChunkSize = 4096*4; - SmallString<ChunkSize> Buffer; - ssize_t ReadBytes; - // Read into Buffer until we hit EOF. - do { - Buffer.reserve(Buffer.size() + ChunkSize); - ReadBytes = read(0, Buffer.end(), ChunkSize); - if (ReadBytes == -1) { - if (errno == EINTR) continue; - return error_code(errno, posix_category()); - } - Buffer.set_size(Buffer.size() + ReadBytes); - } while (ReadBytes != 0); - - result.reset(getMemBufferCopy(Buffer, "<stdin>")); - return error_code::success(); + return getMemoryBufferForStream(0, "<stdin>", result); } diff --git a/lib/Support/SmallVector.cpp b/lib/Support/SmallVector.cpp index a89f149..f9c0e78 100644 --- a/lib/Support/SmallVector.cpp +++ b/lib/Support/SmallVector.cpp @@ -16,14 +16,15 @@ using namespace llvm; /// grow_pod - This is an implementation of the grow() method which only works /// on POD-like datatypes and is out of line to reduce code duplication. -void SmallVectorBase::grow_pod(size_t MinSizeInBytes, size_t TSize) { +void SmallVectorBase::grow_pod(void *FirstEl, size_t MinSizeInBytes, + size_t TSize) { size_t CurSizeBytes = size_in_bytes(); size_t NewCapacityInBytes = 2 * capacity_in_bytes() + TSize; // Always grow. if (NewCapacityInBytes < MinSizeInBytes) NewCapacityInBytes = MinSizeInBytes; void *NewElts; - if (this->isSmall()) { + if (BeginX == FirstEl) { NewElts = malloc(NewCapacityInBytes); // Copy the elements over. No need to run dtors on PODs. @@ -37,4 +38,3 @@ void SmallVectorBase::grow_pod(size_t MinSizeInBytes, size_t TSize) { this->BeginX = NewElts; this->CapacityX = (char*)this->BeginX + NewCapacityInBytes; } - diff --git a/lib/Support/StreamableMemoryObject.cpp b/lib/Support/StreamableMemoryObject.cpp index fe3752a..59e27a2 100644 --- a/lib/Support/StreamableMemoryObject.cpp +++ b/lib/Support/StreamableMemoryObject.cpp @@ -8,6 +8,7 @@ //===----------------------------------------------------------------------===// #include "llvm/Support/StreamableMemoryObject.h" +#include "llvm/Support/Compiler.h" #include <cassert> #include <cstring> @@ -23,18 +24,23 @@ public: assert(LastChar >= FirstChar && "Invalid start/end range"); } - virtual uint64_t getBase() const { return 0; } - virtual uint64_t getExtent() const { return LastChar - FirstChar; } - virtual int readByte(uint64_t address, uint8_t* ptr) const; + virtual uint64_t getBase() const LLVM_OVERRIDE { return 0; } + virtual uint64_t getExtent() const LLVM_OVERRIDE { + return LastChar - FirstChar; + } + virtual int readByte(uint64_t address, uint8_t* ptr) const LLVM_OVERRIDE; virtual int readBytes(uint64_t address, uint64_t size, uint8_t* buf, - uint64_t* copied) const; - virtual const uint8_t *getPointer(uint64_t address, uint64_t size) const; - virtual bool isValidAddress(uint64_t address) const { + uint64_t* copied) const LLVM_OVERRIDE; + virtual const uint8_t *getPointer(uint64_t address, + uint64_t size) const LLVM_OVERRIDE; + virtual bool isValidAddress(uint64_t address) const LLVM_OVERRIDE { return validAddress(address); } - virtual bool isObjectEnd(uint64_t address) const {return objectEnd(address);} + virtual bool isObjectEnd(uint64_t address) const LLVM_OVERRIDE { + return objectEnd(address); + } private: const uint8_t* const FirstChar; @@ -49,8 +55,8 @@ private: return static_cast<ptrdiff_t>(address) == LastChar - FirstChar; } - RawMemoryObject(const RawMemoryObject&); // DO NOT IMPLEMENT - void operator=(const RawMemoryObject&); // DO NOT IMPLEMENT + RawMemoryObject(const RawMemoryObject&) LLVM_DELETED_FUNCTION; + void operator=(const RawMemoryObject&) LLVM_DELETED_FUNCTION; }; int RawMemoryObject::readByte(uint64_t address, uint8_t* ptr) const { diff --git a/lib/Support/StringMap.cpp b/lib/Support/StringMap.cpp index c2fc261..9ac1f86 100644 --- a/lib/Support/StringMap.cpp +++ b/lib/Support/StringMap.cpp @@ -13,6 +13,7 @@ #include "llvm/ADT/StringMap.h" #include "llvm/ADT/StringExtras.h" +#include "llvm/Support/Compiler.h" #include <cassert> using namespace llvm; @@ -69,7 +70,7 @@ unsigned StringMapImpl::LookupBucketFor(StringRef Name) { while (1) { StringMapEntryBase *BucketItem = TheTable[BucketNo]; // If we found an empty bucket, this key isn't in the table yet, return it. - if (BucketItem == 0) { + if (LLVM_LIKELY(BucketItem == 0)) { // If we found a tombstone, we want to reuse the tombstone instead of an // empty bucket. This reduces probing. if (FirstTombstone != -1) { @@ -84,7 +85,7 @@ unsigned StringMapImpl::LookupBucketFor(StringRef Name) { if (BucketItem == getTombstoneVal()) { // Skip over tombstones. However, remember the first one we see. if (FirstTombstone == -1) FirstTombstone = BucketNo; - } else if (HashTable[BucketNo] == FullHashValue) { + } else if (LLVM_LIKELY(HashTable[BucketNo] == FullHashValue)) { // If the full hash value matches, check deeply for a match. The common // case here is that we are only looking at the buckets (for item info // being non-null and for the full hash value) not at the items. This @@ -123,12 +124,12 @@ int StringMapImpl::FindKey(StringRef Key) const { while (1) { StringMapEntryBase *BucketItem = TheTable[BucketNo]; // If we found an empty bucket, this key isn't in the table yet, return. - if (BucketItem == 0) + if (LLVM_LIKELY(BucketItem == 0)) return -1; if (BucketItem == getTombstoneVal()) { // Ignore tombstones. - } else if (HashTable[BucketNo] == FullHashValue) { + } else if (LLVM_LIKELY(HashTable[BucketNo] == FullHashValue)) { // If the full hash value matches, check deeply for a match. The common // case here is that we are only looking at the buckets (for item info // being non-null and for the full hash value) not at the items. This diff --git a/lib/Support/StringRef.cpp b/lib/Support/StringRef.cpp index 8aab4b2..f8e9208 100644 --- a/lib/Support/StringRef.cpp +++ b/lib/Support/StringRef.cpp @@ -350,8 +350,8 @@ bool llvm::getAsUnsignedInteger(StringRef Str, unsigned Radix, unsigned long long PrevResult = Result; Result = Result*Radix+CharVal; - // Check for overflow. - if (Result < PrevResult) + // Check for overflow by shifting back and seeing if bits were lost. + if (Result/Radix < PrevResult) return true; Str = Str.substr(1); diff --git a/lib/Support/Triple.cpp b/lib/Support/Triple.cpp index cca549d..c058c05 100644 --- a/lib/Support/Triple.cpp +++ b/lib/Support/Triple.cpp @@ -42,6 +42,8 @@ const char *Triple::getArchTypeName(ArchType Kind) { case nvptx64: return "nvptx64"; case le32: return "le32"; case amdil: return "amdil"; + case spir: return "spir"; + case spir64: return "spir64"; } llvm_unreachable("Invalid ArchType!"); @@ -83,6 +85,8 @@ const char *Triple::getArchTypePrefix(ArchType Kind) { case nvptx64: return "nvptx"; case le32: return "le32"; case amdil: return "amdil"; + case spir: return "spir"; + case spir64: return "spir"; } } @@ -95,6 +99,8 @@ const char *Triple::getVendorTypeName(VendorType Kind) { case SCEI: return "scei"; case BGP: return "bgp"; case BGQ: return "bgq"; + case Freescale: return "fsl"; + case IBM: return "ibm"; } llvm_unreachable("Invalid VendorType!"); @@ -125,6 +131,7 @@ const char *Triple::getOSTypeName(OSType Kind) { case NativeClient: return "nacl"; case CNK: return "cnk"; case Bitrig: return "bitrig"; + case AIX: return "aix"; } llvm_unreachable("Invalid OSType"); @@ -138,7 +145,8 @@ const char *Triple::getEnvironmentTypeName(EnvironmentType Kind) { case GNUEABI: return "gnueabi"; case EABI: return "eabi"; case MachO: return "macho"; - case ANDROIDEABI: return "androideabi"; + case Android: return "android"; + case ELF: return "elf"; } llvm_unreachable("Invalid EnvironmentType!"); @@ -170,40 +178,11 @@ Triple::ArchType Triple::getArchTypeForLLVMName(StringRef Name) { .Case("nvptx64", nvptx64) .Case("le32", le32) .Case("amdil", amdil) + .Case("spir", spir) + .Case("spir64", spir64) .Default(UnknownArch); } -Triple::ArchType Triple::getArchTypeForDarwinArchName(StringRef Str) { - // See arch(3) and llvm-gcc's driver-driver.c. We don't implement support for - // archs which Darwin doesn't use. - - // The matching this routine does is fairly pointless, since it is neither the - // complete architecture list, nor a reasonable subset. The problem is that - // historically the driver driver accepts this and also ties its -march= - // handling to the architecture name, so we need to be careful before removing - // support for it. - - // This code must be kept in sync with Clang's Darwin specific argument - // translation. - - return StringSwitch<ArchType>(Str) - .Cases("ppc", "ppc601", "ppc603", "ppc604", "ppc604e", Triple::ppc) - .Cases("ppc750", "ppc7400", "ppc7450", "ppc970", Triple::ppc) - .Case("ppc64", Triple::ppc64) - .Cases("i386", "i486", "i486SX", "i586", "i686", Triple::x86) - .Cases("pentium", "pentpro", "pentIIm3", "pentIIm5", "pentium4", - Triple::x86) - .Case("x86_64", Triple::x86_64) - // This is derived from the driver driver. - .Cases("arm", "armv4t", "armv5", "armv6", Triple::arm) - .Cases("armv7", "armv7f", "armv7k", "armv7s", "xscale", Triple::arm) - .Case("r600", Triple::r600) - .Case("nvptx", Triple::nvptx) - .Case("nvptx64", Triple::nvptx64) - .Case("amdil", Triple::amdil) - .Default(Triple::UnknownArch); -} - // Returns architecture name that is understood by the target assembler. const char *Triple::getArchNameForAssembler() { if (!isOSDarwin() && getVendor() != Triple::Apple) @@ -225,6 +204,8 @@ const char *Triple::getArchNameForAssembler() { .Case("nvptx64", "nvptx64") .Case("le32", "le32") .Case("amdil", "amdil") + .Case("spir", "spir") + .Case("spir64", "spir64") .Default(NULL); } @@ -259,6 +240,8 @@ static Triple::ArchType parseArch(StringRef ArchName) { .Case("nvptx64", Triple::nvptx64) .Case("le32", Triple::le32) .Case("amdil", Triple::amdil) + .Case("spir", Triple::spir) + .Case("spir64", Triple::spir64) .Default(Triple::UnknownArch); } @@ -269,6 +252,8 @@ static Triple::VendorType parseVendor(StringRef VendorName) { .Case("scei", Triple::SCEI) .Case("bgp", Triple::BGP) .Case("bgq", Triple::BGQ) + .Case("fsl", Triple::Freescale) + .Case("ibm", Triple::IBM) .Default(Triple::UnknownVendor); } @@ -295,6 +280,7 @@ static Triple::OSType parseOS(StringRef OSName) { .StartsWith("nacl", Triple::NativeClient) .StartsWith("cnk", Triple::CNK) .StartsWith("bitrig", Triple::Bitrig) + .StartsWith("aix", Triple::AIX) .Default(Triple::UnknownOS); } @@ -305,7 +291,8 @@ static Triple::EnvironmentType parseEnvironment(StringRef EnvironmentName) { .StartsWith("gnueabi", Triple::GNUEABI) .StartsWith("gnu", Triple::GNU) .StartsWith("macho", Triple::MachO) - .StartsWith("androideabi", Triple::ANDROIDEABI) + .StartsWith("android", Triple::Android) + .StartsWith("elf", Triple::ELF) .Default(Triple::UnknownEnvironment); } @@ -690,6 +677,7 @@ static unsigned getArchPointerBitWidth(llvm::Triple::ArchType Arch) { case llvm::Triple::thumb: case llvm::Triple::x86: case llvm::Triple::xcore: + case llvm::Triple::spir: return 32; case llvm::Triple::mips64: @@ -698,6 +686,7 @@ static unsigned getArchPointerBitWidth(llvm::Triple::ArchType Arch) { case llvm::Triple::ppc64: case llvm::Triple::sparcv9: case llvm::Triple::x86_64: + case llvm::Triple::spir64: return 64; } llvm_unreachable("Invalid architecture value"); @@ -724,6 +713,7 @@ Triple Triple::get32BitArchVariant() const { break; case Triple::amdil: + case Triple::spir: case Triple::arm: case Triple::cellspu: case Triple::hexagon: @@ -748,6 +738,7 @@ Triple Triple::get32BitArchVariant() const { case Triple::ppc64: T.setArch(Triple::ppc); break; case Triple::sparcv9: T.setArch(Triple::sparc); break; case Triple::x86_64: T.setArch(Triple::x86); break; + case Triple::spir64: T.setArch(Triple::spir); break; } return T; } @@ -770,6 +761,7 @@ Triple Triple::get64BitArchVariant() const { T.setArch(UnknownArch); break; + case Triple::spir64: case Triple::mips64: case Triple::mips64el: case Triple::nvptx64: @@ -785,6 +777,7 @@ Triple Triple::get64BitArchVariant() const { case Triple::ppc: T.setArch(Triple::ppc64); break; case Triple::sparc: T.setArch(Triple::sparcv9); break; case Triple::x86: T.setArch(Triple::x86_64); break; + case Triple::spir: T.setArch(Triple::spir64); break; } return T; } diff --git a/lib/Support/Unix/Memory.inc b/lib/Support/Unix/Memory.inc index 5a57a28..9a8abd2 100644 --- a/lib/Support/Unix/Memory.inc +++ b/lib/Support/Unix/Memory.inc @@ -13,6 +13,7 @@ #include "Unix.h" #include "llvm/Support/DataTypes.h" +#include "llvm/Support/ErrorHandling.h" #include "llvm/Support/Process.h" #ifdef HAVE_SYS_MMAN_H @@ -23,14 +24,146 @@ #include <mach/mach.h> #endif +#if defined(__mips__) +# if defined(__OpenBSD__) +# include <mips64/sysarch.h> +# else +# include <sys/cachectl.h> +# endif +#endif + +extern "C" void sys_icache_invalidate(const void *Addr, size_t len); + +namespace { + +int getPosixProtectionFlags(unsigned Flags) { + switch (Flags) { + case llvm::sys::Memory::MF_READ: + return PROT_READ; + case llvm::sys::Memory::MF_WRITE: + return PROT_WRITE; + case llvm::sys::Memory::MF_READ|llvm::sys::Memory::MF_WRITE: + return PROT_READ | PROT_WRITE; + case llvm::sys::Memory::MF_READ|llvm::sys::Memory::MF_EXEC: + return PROT_READ | PROT_EXEC; + case llvm::sys::Memory::MF_READ | + llvm::sys::Memory::MF_WRITE | + llvm::sys::Memory::MF_EXEC: + return PROT_READ | PROT_WRITE | PROT_EXEC; + case llvm::sys::Memory::MF_EXEC: + return PROT_EXEC; + default: + llvm_unreachable("Illegal memory protection flag specified!"); + } + // Provide a default return value as required by some compilers. + return PROT_NONE; +} + +} // namespace + +namespace llvm { +namespace sys { + +MemoryBlock +Memory::allocateMappedMemory(size_t NumBytes, + const MemoryBlock *const NearBlock, + unsigned PFlags, + error_code &EC) { + EC = error_code::success(); + if (NumBytes == 0) + return MemoryBlock(); + + static const size_t PageSize = Process::GetPageSize(); + const size_t NumPages = (NumBytes+PageSize-1)/PageSize; + + int fd = -1; +#ifdef NEED_DEV_ZERO_FOR_MMAP + static int zero_fd = open("/dev/zero", O_RDWR); + if (zero_fd == -1) { + EC = error_code(errno, system_category()); + return MemoryBlock(); + } + fd = zero_fd; +#endif + + int MMFlags = MAP_PRIVATE | +#ifdef HAVE_MMAP_ANONYMOUS + MAP_ANONYMOUS +#else + MAP_ANON +#endif + ; // Ends statement above + + int Protect = getPosixProtectionFlags(PFlags); + + // Use any near hint and the page size to set a page-aligned starting address + uintptr_t Start = NearBlock ? reinterpret_cast<uintptr_t>(NearBlock->base()) + + NearBlock->size() : 0; + if (Start && Start % PageSize) + Start += PageSize - Start % PageSize; + + void *Addr = ::mmap(reinterpret_cast<void*>(Start), PageSize*NumPages, + Protect, MMFlags, fd, 0); + if (Addr == MAP_FAILED) { + if (NearBlock) //Try again without a near hint + return allocateMappedMemory(NumBytes, 0, PFlags, EC); + + EC = error_code(errno, system_category()); + return MemoryBlock(); + } + + MemoryBlock Result; + Result.Address = Addr; + Result.Size = NumPages*PageSize; + + if (PFlags & MF_EXEC) + Memory::InvalidateInstructionCache(Result.Address, Result.Size); + + return Result; +} + +error_code +Memory::releaseMappedMemory(MemoryBlock &M) { + if (M.Address == 0 || M.Size == 0) + return error_code::success(); + + if (0 != ::munmap(M.Address, M.Size)) + return error_code(errno, system_category()); + + M.Address = 0; + M.Size = 0; + + return error_code::success(); +} + +error_code +Memory::protectMappedMemory(const MemoryBlock &M, unsigned Flags) { + if (M.Address == 0 || M.Size == 0) + return error_code::success(); + + if (!Flags) + return error_code(EINVAL, generic_category()); + + int Protect = getPosixProtectionFlags(Flags); + + int Result = ::mprotect(M.Address, M.Size, Protect); + if (Result != 0) + return error_code(errno, system_category()); + + if (Flags & MF_EXEC) + Memory::InvalidateInstructionCache(M.Address, M.Size); + + return error_code::success(); +} + /// AllocateRWX - Allocate a slab of memory with read/write/execute /// permissions. This is typically used for JIT applications where we want /// to emit code to the memory then jump to it. Getting this type of memory /// is very OS specific. /// -llvm::sys::MemoryBlock -llvm::sys::Memory::AllocateRWX(size_t NumBytes, const MemoryBlock* NearBlock, - std::string *ErrMsg) { +MemoryBlock +Memory::AllocateRWX(size_t NumBytes, const MemoryBlock* NearBlock, + std::string *ErrMsg) { if (NumBytes == 0) return MemoryBlock(); size_t pageSize = Process::GetPageSize(); @@ -78,7 +211,7 @@ llvm::sys::Memory::AllocateRWX(size_t NumBytes, const MemoryBlock* NearBlock, VM_PROT_READ | VM_PROT_EXECUTE | VM_PROT_COPY); if (KERN_SUCCESS != kr) { MakeErrMsg(ErrMsg, "vm_protect max RX failed"); - return sys::MemoryBlock(); + return MemoryBlock(); } kr = vm_protect(mach_task_self(), (vm_address_t)pa, @@ -86,7 +219,7 @@ llvm::sys::Memory::AllocateRWX(size_t NumBytes, const MemoryBlock* NearBlock, VM_PROT_READ | VM_PROT_WRITE); if (KERN_SUCCESS != kr) { MakeErrMsg(ErrMsg, "vm_protect RW failed"); - return sys::MemoryBlock(); + return MemoryBlock(); } #endif @@ -97,17 +230,17 @@ llvm::sys::Memory::AllocateRWX(size_t NumBytes, const MemoryBlock* NearBlock, return result; } -bool llvm::sys::Memory::ReleaseRWX(MemoryBlock &M, std::string *ErrMsg) { +bool Memory::ReleaseRWX(MemoryBlock &M, std::string *ErrMsg) { if (M.Address == 0 || M.Size == 0) return false; if (0 != ::munmap(M.Address, M.Size)) return MakeErrMsg(ErrMsg, "Can't release RWX Memory"); return false; } -bool llvm::sys::Memory::setWritable (MemoryBlock &M, std::string *ErrMsg) { +bool Memory::setWritable (MemoryBlock &M, std::string *ErrMsg) { #if defined(__APPLE__) && defined(__arm__) if (M.Address == 0 || M.Size == 0) return false; - sys::Memory::InvalidateInstructionCache(M.Address, M.Size); + Memory::InvalidateInstructionCache(M.Address, M.Size); kern_return_t kr = vm_protect(mach_task_self(), (vm_address_t)M.Address, (vm_size_t)M.Size, 0, VM_PROT_READ | VM_PROT_WRITE); return KERN_SUCCESS == kr; @@ -116,10 +249,10 @@ bool llvm::sys::Memory::setWritable (MemoryBlock &M, std::string *ErrMsg) { #endif } -bool llvm::sys::Memory::setExecutable (MemoryBlock &M, std::string *ErrMsg) { +bool Memory::setExecutable (MemoryBlock &M, std::string *ErrMsg) { #if defined(__APPLE__) && defined(__arm__) if (M.Address == 0 || M.Size == 0) return false; - sys::Memory::InvalidateInstructionCache(M.Address, M.Size); + Memory::InvalidateInstructionCache(M.Address, M.Size); kern_return_t kr = vm_protect(mach_task_self(), (vm_address_t)M.Address, (vm_size_t)M.Size, 0, VM_PROT_READ | VM_PROT_EXECUTE | VM_PROT_COPY); return KERN_SUCCESS == kr; @@ -128,7 +261,7 @@ bool llvm::sys::Memory::setExecutable (MemoryBlock &M, std::string *ErrMsg) { #endif } -bool llvm::sys::Memory::setRangeWritable(const void *Addr, size_t Size) { +bool Memory::setRangeWritable(const void *Addr, size_t Size) { #if defined(__APPLE__) && defined(__arm__) kern_return_t kr = vm_protect(mach_task_self(), (vm_address_t)Addr, (vm_size_t)Size, 0, @@ -139,7 +272,7 @@ bool llvm::sys::Memory::setRangeWritable(const void *Addr, size_t Size) { #endif } -bool llvm::sys::Memory::setRangeExecutable(const void *Addr, size_t Size) { +bool Memory::setRangeExecutable(const void *Addr, size_t Size) { #if defined(__APPLE__) && defined(__arm__) kern_return_t kr = vm_protect(mach_task_self(), (vm_address_t)Addr, (vm_size_t)Size, 0, @@ -149,3 +282,52 @@ bool llvm::sys::Memory::setRangeExecutable(const void *Addr, size_t Size) { return true; #endif } + +/// InvalidateInstructionCache - Before the JIT can run a block of code +/// that has been emitted it must invalidate the instruction cache on some +/// platforms. +void Memory::InvalidateInstructionCache(const void *Addr, + size_t Len) { + +// icache invalidation for PPC and ARM. +#if defined(__APPLE__) + +# if (defined(__POWERPC__) || defined (__ppc__) || \ + defined(_POWER) || defined(_ARCH_PPC)) || defined(__arm__) + sys_icache_invalidate(const_cast<void *>(Addr), Len); +# endif + +#else + +# if (defined(__POWERPC__) || defined (__ppc__) || \ + defined(_POWER) || defined(_ARCH_PPC)) && defined(__GNUC__) + const size_t LineSize = 32; + + const intptr_t Mask = ~(LineSize - 1); + const intptr_t StartLine = ((intptr_t) Addr) & Mask; + const intptr_t EndLine = ((intptr_t) Addr + Len + LineSize - 1) & Mask; + + for (intptr_t Line = StartLine; Line < EndLine; Line += LineSize) + asm volatile("dcbf 0, %0" : : "r"(Line)); + asm volatile("sync"); + + for (intptr_t Line = StartLine; Line < EndLine; Line += LineSize) + asm volatile("icbi 0, %0" : : "r"(Line)); + asm volatile("isync"); +# elif defined(__arm__) && defined(__GNUC__) + // FIXME: Can we safely always call this for __GNUC__ everywhere? + const char *Start = static_cast<const char *>(Addr); + const char *End = Start + Len; + __clear_cache(const_cast<char *>(Start), const_cast<char *>(End)); +# elif defined(__mips__) + const char *Start = static_cast<const char *>(Addr); + cacheflush(const_cast<char *>(Start), Len, BCACHE); +# endif + +#endif // end apple + + ValgrindDiscardTranslations(Addr, Len); +} + +} // namespace sys +} // namespace llvm diff --git a/lib/Support/Unix/Path.inc b/lib/Support/Unix/Path.inc index 6bddbdf..6a5ebb8 100644 --- a/lib/Support/Unix/Path.inc +++ b/lib/Support/Unix/Path.inc @@ -261,7 +261,8 @@ Path::GetCurrentDirectory() { } #if defined(__FreeBSD__) || defined (__NetBSD__) || defined(__Bitrig__) || \ - defined(__OpenBSD__) || defined(__minix) || defined(__FreeBSD_kernel__) + defined(__OpenBSD__) || defined(__minix) || defined(__FreeBSD_kernel__) || \ + defined(__linux__) || defined(__CYGWIN__) static int test_dir(char buf[PATH_MAX], char ret[PATH_MAX], const char *dir, const char *bin) @@ -337,9 +338,17 @@ Path Path::GetMainExecutable(const char *argv0, void *MainAddr) { return Path(exe_path); #elif defined(__linux__) || defined(__CYGWIN__) char exe_path[MAXPATHLEN]; - ssize_t len = readlink("/proc/self/exe", exe_path, sizeof(exe_path)); - if (len >= 0) - return Path(StringRef(exe_path, len)); + StringRef aPath("/proc/self/exe"); + if (sys::fs::exists(aPath)) { + // /proc is not always mounted under Linux (chroot for example). + ssize_t len = readlink(aPath.str().c_str(), exe_path, sizeof(exe_path)); + if (len >= 0) + return Path(StringRef(exe_path, len)); + } else { + // Fall back to the classical detection. + if (getprogpath(exe_path, argv0) != NULL) + return Path(exe_path); + } #elif defined(HAVE_DLFCN_H) // Use dladdr to get executable path if available. Dl_info DLInfo; diff --git a/lib/Support/Unix/Signals.inc b/lib/Support/Unix/Signals.inc index 5195116..9e94068 100644 --- a/lib/Support/Unix/Signals.inc +++ b/lib/Support/Unix/Signals.inc @@ -121,17 +121,29 @@ static void UnregisterHandlers() { /// NB: This must be an async signal safe function. It cannot allocate or free /// memory, even in debug builds. static void RemoveFilesToRemove() { - // Note: avoid iterators in case of debug iterators that allocate or release + // We avoid iterators in case of debug iterators that allocate or release // memory. for (unsigned i = 0, e = FilesToRemove.size(); i != e; ++i) { - // Note that we don't want to use any external code here, and we don't care - // about errors. We're going to try as hard as we can as often as we need - // to to make these files go away. If these aren't files, too bad. - // - // We do however rely on a std::string implementation for which repeated - // calls to 'c_str()' don't allocate memory. We pre-call 'c_str()' on all - // of these strings to try to ensure this is safe. - unlink(FilesToRemove[i].c_str()); + // We rely on a std::string implementation for which repeated calls to + // 'c_str()' don't allocate memory. We pre-call 'c_str()' on all of these + // strings to try to ensure this is safe. + const char *path = FilesToRemove[i].c_str(); + + // Get the status so we can determine if it's a file or directory. If we + // can't stat the file, ignore it. + struct stat buf; + if (stat(path, &buf) != 0) + continue; + + // If this is not a regular file, ignore it. We want to prevent removal of + // special files like /dev/null, even if the compiler is being run with the + // super-user permissions. + if (!S_ISREG(buf.st_mode)) + continue; + + // Otherwise, remove the file. We ignore any errors here as there is nothing + // else we can do. + unlink(path); } } @@ -243,7 +255,7 @@ void llvm::sys::AddSignalHandler(void (*FnPtr)(void *), void *Cookie) { // On glibc systems we have the 'backtrace' function, which works nicely, but // doesn't demangle symbols. static void PrintStackTrace(void *) { -#ifdef HAVE_BACKTRACE +#if defined(HAVE_BACKTRACE) && defined(ENABLE_BACKTRACES) static void* StackTrace[256]; // Use backtrace() to output a backtrace on Linux systems with glibc. int depth = backtrace(StackTrace, @@ -293,7 +305,7 @@ static void PrintStackTrace(void *) { #endif } -/// PrintStackTraceOnErrorSignal - When an error signal (such as SIBABRT or +/// PrintStackTraceOnErrorSignal - When an error signal (such as SIGABRT or /// SIGSEGV) is delivered to the process, print a stack trace and then exit. void llvm::sys::PrintStackTraceOnErrorSignal() { AddSignalHandler(PrintStackTrace, 0); @@ -305,10 +317,10 @@ void llvm::sys::PrintStackTraceOnErrorSignal() { exception_mask_t mask = EXC_MASK_CRASH; - kern_return_t ret = task_set_exception_ports(self, + kern_return_t ret = task_set_exception_ports(self, mask, MACH_PORT_NULL, - EXCEPTION_STATE_IDENTITY | MACH_EXCEPTION_CODES, + EXCEPTION_STATE_IDENTITY | MACH_EXCEPTION_CODES, THREAD_STATE_NONE); (void)ret; } diff --git a/lib/Support/Windows/Memory.inc b/lib/Support/Windows/Memory.inc index fcc7283..cb80f28 100644 --- a/lib/Support/Windows/Memory.inc +++ b/lib/Support/Windows/Memory.inc @@ -12,51 +12,163 @@ // //===----------------------------------------------------------------------===// -#include "Windows.h" #include "llvm/Support/DataTypes.h" +#include "llvm/Support/ErrorHandling.h" #include "llvm/Support/Process.h" +#include "Windows.h" + +namespace { + +DWORD getWindowsProtectionFlags(unsigned Flags) { + switch (Flags) { + // Contrary to what you might expect, the Windows page protection flags + // are not a bitwise combination of RWX values + case llvm::sys::Memory::MF_READ: + return PAGE_READONLY; + case llvm::sys::Memory::MF_WRITE: + // Note: PAGE_WRITE is not supported by VirtualProtect + return PAGE_READWRITE; + case llvm::sys::Memory::MF_READ|llvm::sys::Memory::MF_WRITE: + return PAGE_READWRITE; + case llvm::sys::Memory::MF_READ|llvm::sys::Memory::MF_EXEC: + return PAGE_EXECUTE_READ; + case llvm::sys::Memory::MF_READ | + llvm::sys::Memory::MF_WRITE | + llvm::sys::Memory::MF_EXEC: + return PAGE_EXECUTE_READWRITE; + case llvm::sys::Memory::MF_EXEC: + return PAGE_EXECUTE; + default: + llvm_unreachable("Illegal memory protection flag specified!"); + } + // Provide a default return value as required by some compilers. + return PAGE_NOACCESS; +} + +size_t getAllocationGranularity() { + SYSTEM_INFO Info; + ::GetSystemInfo(&Info); + if (Info.dwPageSize > Info.dwAllocationGranularity) + return Info.dwPageSize; + else + return Info.dwAllocationGranularity; +} + +} // namespace namespace llvm { -using namespace sys; +namespace sys { //===----------------------------------------------------------------------===// //=== WARNING: Implementation here must contain only Win32 specific code //=== and must not be UNIX code //===----------------------------------------------------------------------===// -MemoryBlock Memory::AllocateRWX(size_t NumBytes, - const MemoryBlock *NearBlock, - std::string *ErrMsg) { - if (NumBytes == 0) return MemoryBlock(); +MemoryBlock Memory::allocateMappedMemory(size_t NumBytes, + const MemoryBlock *const NearBlock, + unsigned Flags, + error_code &EC) { + EC = error_code::success(); + if (NumBytes == 0) + return MemoryBlock(); + + // While we'd be happy to allocate single pages, the Windows allocation + // granularity may be larger than a single page (in practice, it is 64K) + // so mapping less than that will create an unreachable fragment of memory. + static const size_t Granularity = getAllocationGranularity(); + const size_t NumBlocks = (NumBytes+Granularity-1)/Granularity; - static const size_t pageSize = Process::GetPageSize(); - size_t NumPages = (NumBytes+pageSize-1)/pageSize; + uintptr_t Start = NearBlock ? reinterpret_cast<uintptr_t>(NearBlock->base()) + + NearBlock->size() + : NULL; - PVOID start = NearBlock ? static_cast<unsigned char *>(NearBlock->base()) + - NearBlock->size() : NULL; + // If the requested address is not aligned to the allocation granularity, + // round up to get beyond NearBlock. VirtualAlloc would have rounded down. + if (Start && Start % Granularity != 0) + Start += Granularity - Start % Granularity; - void *pa = VirtualAlloc(start, NumPages*pageSize, MEM_RESERVE | MEM_COMMIT, - PAGE_EXECUTE_READWRITE); - if (pa == NULL) { + DWORD Protect = getWindowsProtectionFlags(Flags); + + void *PA = ::VirtualAlloc(reinterpret_cast<void*>(Start), + NumBlocks*Granularity, + MEM_RESERVE | MEM_COMMIT, Protect); + if (PA == NULL) { if (NearBlock) { // Try again without the NearBlock hint - return AllocateRWX(NumBytes, NULL, ErrMsg); + return allocateMappedMemory(NumBytes, NULL, Flags, EC); } - MakeErrMsg(ErrMsg, "Can't allocate RWX Memory: "); + EC = error_code(::GetLastError(), system_category()); return MemoryBlock(); } - MemoryBlock result; - result.Address = pa; - result.Size = NumPages*pageSize; - return result; + MemoryBlock Result; + Result.Address = PA; + Result.Size = NumBlocks*Granularity; + ; + if (Flags & MF_EXEC) + Memory::InvalidateInstructionCache(Result.Address, Result.Size); + + return Result; } -bool Memory::ReleaseRWX(MemoryBlock &M, std::string *ErrMsg) { - if (M.Address == 0 || M.Size == 0) return false; +error_code Memory::releaseMappedMemory(MemoryBlock &M) { + if (M.Address == 0 || M.Size == 0) + return error_code::success(); + if (!VirtualFree(M.Address, 0, MEM_RELEASE)) - return MakeErrMsg(ErrMsg, "Can't release RWX Memory: "); - return false; + return error_code(::GetLastError(), system_category()); + + M.Address = 0; + M.Size = 0; + + return error_code::success(); +} + +error_code Memory::protectMappedMemory(const MemoryBlock &M, + unsigned Flags) { + if (M.Address == 0 || M.Size == 0) + return error_code::success(); + + DWORD Protect = getWindowsProtectionFlags(Flags); + + DWORD OldFlags; + if (!VirtualProtect(M.Address, M.Size, Protect, &OldFlags)) + return error_code(::GetLastError(), system_category()); + + if (Flags & MF_EXEC) + Memory::InvalidateInstructionCache(M.Address, M.Size); + + return error_code::success(); +} + +/// InvalidateInstructionCache - Before the JIT can run a block of code +/// that has been emitted it must invalidate the instruction cache on some +/// platforms. +void Memory::InvalidateInstructionCache( + const void *Addr, size_t Len) { + FlushInstructionCache(GetCurrentProcess(), Addr, Len); +} + + +MemoryBlock Memory::AllocateRWX(size_t NumBytes, + const MemoryBlock *NearBlock, + std::string *ErrMsg) { + MemoryBlock MB; + error_code EC; + MB = allocateMappedMemory(NumBytes, NearBlock, + MF_READ|MF_WRITE|MF_EXEC, EC); + if (EC != error_code::success() && ErrMsg) { + MakeErrMsg(ErrMsg, EC.message()); + } + return MB; +} + +bool Memory::ReleaseRWX(MemoryBlock &M, std::string *ErrMsg) { + error_code EC = releaseMappedMemory(M); + if (EC == error_code::success()) + return false; + MakeErrMsg(ErrMsg, EC.message()); + return true; } static DWORD getProtection(const void *addr) { @@ -93,7 +205,7 @@ bool Memory::setRangeWritable(const void *Addr, size_t Size) { } DWORD oldProt; - sys::Memory::InvalidateInstructionCache(Addr, Size); + Memory::InvalidateInstructionCache(Addr, Size); return ::VirtualProtect(const_cast<LPVOID>(Addr), Size, prot, &oldProt) == TRUE; } @@ -112,9 +224,10 @@ bool Memory::setRangeExecutable(const void *Addr, size_t Size) { } DWORD oldProt; - sys::Memory::InvalidateInstructionCache(Addr, Size); + Memory::InvalidateInstructionCache(Addr, Size); return ::VirtualProtect(const_cast<LPVOID>(Addr), Size, prot, &oldProt) == TRUE; } -} +} // namespace sys +} // namespace llvm diff --git a/lib/Support/Windows/PathV2.inc b/lib/Support/Windows/PathV2.inc index 696768b..3dfac66 100644 --- a/lib/Support/Windows/PathV2.inc +++ b/lib/Support/Windows/PathV2.inc @@ -794,7 +794,7 @@ mapped_file_region::mapped_file_region(const Twine &path, SmallVector<wchar_t, 128> path_utf16; // Convert path to UTF-16. - if (ec = UTF8ToUTF16(path.toStringRef(path_storage), path_utf16)) + if ((ec = UTF8ToUTF16(path.toStringRef(path_storage), path_utf16))) return; // Get file handle for creating a file mapping. diff --git a/lib/Support/YAMLParser.cpp b/lib/Support/YAMLParser.cpp index 7c353c8..34df636 100644 --- a/lib/Support/YAMLParser.cpp +++ b/lib/Support/YAMLParser.cpp @@ -903,6 +903,7 @@ bool Scanner::consume(uint32_t Expected) { void Scanner::skip(uint32_t Distance) { Current += Distance; Column += Distance; + assert(Current <= End && "Skipped past the end"); } bool Scanner::isBlankOrBreak(StringRef::iterator Position) { @@ -1239,6 +1240,12 @@ bool Scanner::scanFlowScalar(bool IsDoubleQuoted) { } } } + + if (Current == End) { + setError("Expected quote at end of scalar", Current); + return false; + } + skip(1); // Skip ending quote. Token T; T.Kind = Token::TK_Scalar; diff --git a/lib/Support/raw_ostream.cpp b/lib/Support/raw_ostream.cpp index fa69c2d..7cd5364 100644 --- a/lib/Support/raw_ostream.cpp +++ b/lib/Support/raw_ostream.cpp @@ -266,8 +266,8 @@ void raw_ostream::flush_nonempty() { raw_ostream &raw_ostream::write(unsigned char C) { // Group exceptional cases into a single branch. - if (BUILTIN_EXPECT(OutBufCur >= OutBufEnd, false)) { - if (BUILTIN_EXPECT(!OutBufStart, false)) { + if (LLVM_UNLIKELY(OutBufCur >= OutBufEnd)) { + if (LLVM_UNLIKELY(!OutBufStart)) { if (BufferMode == Unbuffered) { write_impl(reinterpret_cast<char*>(&C), 1); return *this; @@ -286,8 +286,8 @@ raw_ostream &raw_ostream::write(unsigned char C) { raw_ostream &raw_ostream::write(const char *Ptr, size_t Size) { // Group exceptional cases into a single branch. - if (BUILTIN_EXPECT(size_t(OutBufEnd - OutBufCur) < Size, false)) { - if (BUILTIN_EXPECT(!OutBufStart, false)) { + if (LLVM_UNLIKELY(size_t(OutBufEnd - OutBufCur) < Size)) { + if (LLVM_UNLIKELY(!OutBufStart)) { if (BufferMode == Unbuffered) { write_impl(Ptr, Size); return *this; @@ -302,7 +302,7 @@ raw_ostream &raw_ostream::write(const char *Ptr, size_t Size) { // If the buffer is empty at this point we have a string that is larger // than the buffer. Directly write the chunk that is a multiple of the // preferred buffer size and put the remainder in the buffer. - if (BUILTIN_EXPECT(OutBufCur == OutBufStart, false)) { + if (LLVM_UNLIKELY(OutBufCur == OutBufStart)) { size_t BytesToWrite = Size - (Size % NumBytes); write_impl(Ptr, BytesToWrite); copy_to_buffer(Ptr + BytesToWrite, Size - BytesToWrite); @@ -523,7 +523,7 @@ void raw_fd_ostream::write_impl(const char *Ptr, size_t Size) { ssize_t ret; // Check whether we should attempt to use atomic writes. - if (BUILTIN_EXPECT(!UseAtomicWrites, true)) { + if (LLVM_LIKELY(!UseAtomicWrites)) { ret = ::write(FD, Ptr, Size); } else { // Use ::writev() where available. diff --git a/lib/Support/regexec.c b/lib/Support/regexec.c index 0078616..bd5e72d 100644 --- a/lib/Support/regexec.c +++ b/lib/Support/regexec.c @@ -69,7 +69,7 @@ #define SETUP(v) ((v) = 0) #define onestate long #define INIT(o, n) ((o) = (unsigned long)1 << (n)) -#define INC(o) ((o) <<= 1) +#define INC(o) ((o) = (unsigned long)(o) << 1) #define ISSTATEIN(v, o) (((v) & (o)) != 0) /* some abbreviations; note that some of these know variable names! */ /* do "if I'm here, I can also be there" etc without branches */ diff --git a/lib/Support/system_error.cpp b/lib/Support/system_error.cpp index 56898de..2df223c 100644 --- a/lib/Support/system_error.cpp +++ b/lib/Support/system_error.cpp @@ -48,8 +48,8 @@ _do_message::message(int ev) const { class _generic_error_category : public _do_message { public: - virtual const char* name() const; - virtual std::string message(int ev) const; + virtual const char* name() const LLVM_OVERRIDE; + virtual std::string message(int ev) const LLVM_OVERRIDE; }; const char* @@ -74,9 +74,9 @@ generic_category() { class _system_error_category : public _do_message { public: - virtual const char* name() const; - virtual std::string message(int ev) const; - virtual error_condition default_error_condition(int ev) const; + virtual const char* name() const LLVM_OVERRIDE; + virtual std::string message(int ev) const LLVM_OVERRIDE; + virtual error_condition default_error_condition(int ev) const LLVM_OVERRIDE; }; const char* |