diff options
author | dim <dim@FreeBSD.org> | 2016-01-06 20:19:13 +0000 |
---|---|---|
committer | dim <dim@FreeBSD.org> | 2016-01-06 20:19:13 +0000 |
commit | e06c171d67ab436f270b15f7e364a8d8f77c01f2 (patch) | |
tree | b7c03c042b220d85a294b0e2e89936b631d3e6ad /contrib/llvm/lib/Analysis | |
parent | db873d7452584205dd063528dc8addbf28aa396b (diff) | |
parent | ff2ba393a56d9d99dcb76ceada542233db28af9a (diff) | |
download | FreeBSD-src-e06c171d67ab436f270b15f7e364a8d8f77c01f2.zip FreeBSD-src-e06c171d67ab436f270b15f7e364a8d8f77c01f2.tar.gz |
Update llvm to trunk r256945.
Diffstat (limited to 'contrib/llvm/lib/Analysis')
-rw-r--r-- | contrib/llvm/lib/Analysis/BasicAliasAnalysis.cpp | 25 | ||||
-rw-r--r-- | contrib/llvm/lib/Analysis/GlobalsModRef.cpp | 17 | ||||
-rw-r--r-- | contrib/llvm/lib/Analysis/MemoryBuiltins.cpp | 7 | ||||
-rw-r--r-- | contrib/llvm/lib/Analysis/MemoryDependenceAnalysis.cpp | 22 | ||||
-rw-r--r-- | contrib/llvm/lib/Analysis/TargetLibraryInfo.cpp | 13 | ||||
-rw-r--r-- | contrib/llvm/lib/Analysis/ValueTracking.cpp | 25 |
6 files changed, 39 insertions, 70 deletions
diff --git a/contrib/llvm/lib/Analysis/BasicAliasAnalysis.cpp b/contrib/llvm/lib/Analysis/BasicAliasAnalysis.cpp index 00f346e..85404d8 100644 --- a/contrib/llvm/lib/Analysis/BasicAliasAnalysis.cpp +++ b/contrib/llvm/lib/Analysis/BasicAliasAnalysis.cpp @@ -543,7 +543,6 @@ static bool isMemsetPattern16(const Function *MS, isa<IntegerType>(MemsetType->getParamType(2))) return true; } - return false; } @@ -583,9 +582,6 @@ FunctionModRefBehavior BasicAAResult::getModRefBehavior(const Function *F) { if (F->onlyAccessesArgMemory()) Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesArgumentPointees); - if (isMemsetPattern16(F, TLI)) - Min = FMRB_OnlyAccessesArgumentPointees; - // Otherwise be conservative. return FunctionModRefBehavior(AAResultBase::getModRefBehavior(F) & Min); } @@ -599,22 +595,21 @@ ModRefInfo BasicAAResult::getArgModRefInfo(ImmutableCallSite CS, case Intrinsic::memset: case Intrinsic::memcpy: case Intrinsic::memmove: - assert((ArgIdx == 0 || ArgIdx == 1) && - "Invalid argument index for memory intrinsic"); - return ArgIdx ? MRI_Ref : MRI_Mod; + // We don't currently have a writeonly attribute. All other properties + // of these intrinsics are nicely described via attributes in + // Intrinsics.td and handled generically below. + if (ArgIdx == 0) + return MRI_Mod; } // We can bound the aliasing properties of memset_pattern16 just as we can // for memcpy/memset. This is particularly important because the // LoopIdiomRecognizer likes to turn loops into calls to memset_pattern16 - // whenever possible. - if (CS.getCalledFunction() && - isMemsetPattern16(CS.getCalledFunction(), TLI)) { - assert((ArgIdx == 0 || ArgIdx == 1) && - "Invalid argument index for memset_pattern16"); - return ArgIdx ? MRI_Ref : MRI_Mod; - } - // FIXME: Handle memset_pattern4 and memset_pattern8 also. + // whenever possible. Note that all but the missing writeonly attribute are + // handled via InferFunctionAttr. + if (CS.getCalledFunction() && isMemsetPattern16(CS.getCalledFunction(), TLI)) + if (ArgIdx == 0) + return MRI_Mod; if (CS.paramHasAttr(ArgIdx + 1, Attribute::ReadOnly)) return MRI_Ref; diff --git a/contrib/llvm/lib/Analysis/GlobalsModRef.cpp b/contrib/llvm/lib/Analysis/GlobalsModRef.cpp index ab2263a..249f395 100644 --- a/contrib/llvm/lib/Analysis/GlobalsModRef.cpp +++ b/contrib/llvm/lib/Analysis/GlobalsModRef.cpp @@ -376,15 +376,6 @@ bool GlobalsAAResult::AnalyzeUsesOfPointer(Value *V, } else { return true; // Argument of an unknown call. } - // If the Callee is not ReadNone, it may read the global, - // and if it is not ReadOnly, it may also write to it. - Function *CalleeF = CS.getCalledFunction(); - if (!CalleeF->doesNotAccessMemory()) { - if (Readers) - Readers->insert(CalleeF); - if (Writers && !CalleeF->onlyReadsMemory()) - Writers->insert(CalleeF); - } } } else if (ICmpInst *ICI = dyn_cast<ICmpInst>(I)) { if (!isa<ConstantPointerNull>(ICI->getOperand(1))) @@ -516,7 +507,7 @@ void GlobalsAAResult::AnalyzeCallGraph(CallGraph &CG, Module &M) { if (F->isDeclaration()) { // Try to get mod/ref behaviour from function attributes. - if (F->doesNotAccessMemory() || F->onlyAccessesInaccessibleMemory()) { + if (F->doesNotAccessMemory()) { // Can't do better than that! } else if (F->onlyReadsMemory()) { FI.addModRefInfo(MRI_Ref); @@ -524,12 +515,6 @@ void GlobalsAAResult::AnalyzeCallGraph(CallGraph &CG, Module &M) { // This function might call back into the module and read a global - // consider every global as possibly being read by this function. FI.setMayReadAnyGlobal(); - } else if (F->onlyAccessesArgMemory() || - F->onlyAccessesInaccessibleMemOrArgMem()) { - // This function may only access (read/write) memory pointed to by its - // arguments. If this pointer is to a global, this escaping use of the - // pointer is captured in AnalyzeUsesOfPointer(). - FI.addModRefInfo(MRI_ModRef); } else { FI.addModRefInfo(MRI_ModRef); // Can't say anything useful unless it's an intrinsic - they don't diff --git a/contrib/llvm/lib/Analysis/MemoryBuiltins.cpp b/contrib/llvm/lib/Analysis/MemoryBuiltins.cpp index b19ecad..9e896ae 100644 --- a/contrib/llvm/lib/Analysis/MemoryBuiltins.cpp +++ b/contrib/llvm/lib/Analysis/MemoryBuiltins.cpp @@ -187,13 +187,6 @@ bool llvm::isAllocLikeFn(const Value *V, const TargetLibraryInfo *TLI, return getAllocationData(V, AllocLike, TLI, LookThroughBitCast); } -/// \brief Tests if a value is a call or invoke to a library function that -/// allocates memory and never returns null (such as operator new). -bool llvm::isOperatorNewLikeFn(const Value *V, const TargetLibraryInfo *TLI, - bool LookThroughBitCast) { - return getAllocationData(V, OpNewLike, TLI, LookThroughBitCast); -} - /// extractMallocCall - Returns the corresponding CallInst if the instruction /// is a malloc call. Since CallInst::CreateMalloc() only creates calls, we /// ignore InvokeInst here. diff --git a/contrib/llvm/lib/Analysis/MemoryDependenceAnalysis.cpp b/contrib/llvm/lib/Analysis/MemoryDependenceAnalysis.cpp index 3e80bfe..6918360 100644 --- a/contrib/llvm/lib/Analysis/MemoryDependenceAnalysis.cpp +++ b/contrib/llvm/lib/Analysis/MemoryDependenceAnalysis.cpp @@ -477,7 +477,7 @@ MemDepResult MemoryDependenceAnalysis::getSimplePointerDependencyFrom( // being 42. A key property of this program however is that if either // 1 or 4 were missing, there would be a race between the store of 42 // either the store of 0 or the load (making the whole progam racy). - // The paper mentionned above shows that the same property is respected + // The paper mentioned above shows that the same property is respected // by every program that can detect any optimisation of that kind: either // it is racy (undefined) or there is a release followed by an acquire // between the pair of accesses under consideration. @@ -685,13 +685,13 @@ MemDepResult MemoryDependenceAnalysis::getSimplePointerDependencyFrom( return MemDepResult::getDef(Inst); if (isInvariantLoad) continue; - // Be conservative if the accessed pointer may alias the allocation. - if (AA->alias(Inst, AccessPtr) != NoAlias) - return MemDepResult::getClobber(Inst); - // If the allocation is not aliased and does not read memory (like - // strdup), it is safe to ignore. - if (isa<AllocaInst>(Inst) || - isMallocLikeFn(Inst, TLI) || isCallocLikeFn(Inst, TLI)) + // Be conservative if the accessed pointer may alias the allocation - + // fallback to the generic handling below. + if ((AA->alias(Inst, AccessPtr) == NoAlias) && + // If the allocation is not aliased and does not read memory (like + // strdup), it is safe to ignore. + (isa<AllocaInst>(Inst) || isMallocLikeFn(Inst, TLI) || + isCallocLikeFn(Inst, TLI))) continue; } @@ -792,10 +792,8 @@ MemDepResult MemoryDependenceAnalysis::getDependency(Instruction *QueryInst) { static void AssertSorted(MemoryDependenceAnalysis::NonLocalDepInfo &Cache, int Count = -1) { if (Count == -1) Count = Cache.size(); - if (Count == 0) return; - - for (unsigned i = 1; i != unsigned(Count); ++i) - assert(!(Cache[i] < Cache[i-1]) && "Cache isn't sorted!"); + assert(std::is_sorted(Cache.begin(), Cache.begin() + Count) && + "Cache isn't sorted!"); } #endif diff --git a/contrib/llvm/lib/Analysis/TargetLibraryInfo.cpp b/contrib/llvm/lib/Analysis/TargetLibraryInfo.cpp index e00f4ae..ce38819 100644 --- a/contrib/llvm/lib/Analysis/TargetLibraryInfo.cpp +++ b/contrib/llvm/lib/Analysis/TargetLibraryInfo.cpp @@ -52,14 +52,13 @@ static bool hasSinCosPiStret(const Triple &T) { /// specified target triple. This should be carefully written so that a missing /// target triple gets a sane set of defaults. static void initialize(TargetLibraryInfoImpl &TLI, const Triple &T, - const char *const *StandardNames) { -#ifndef NDEBUG + ArrayRef<const char *> StandardNames) { // Verify that the StandardNames array is in alphabetical order. - for (unsigned F = 1; F < LibFunc::NumLibFuncs; ++F) { - if (strcmp(StandardNames[F-1], StandardNames[F]) >= 0) - llvm_unreachable("TargetLibraryInfoImpl function names must be sorted"); - } -#endif // !NDEBUG + assert(std::is_sorted(StandardNames.begin(), StandardNames.end(), + [](const char *LHS, const char *RHS) { + return strcmp(LHS, RHS) < 0; + }) && + "TargetLibraryInfoImpl function names must be sorted"); if (T.getArch() == Triple::r600 || T.getArch() == Triple::amdgcn) { diff --git a/contrib/llvm/lib/Analysis/ValueTracking.cpp b/contrib/llvm/lib/Analysis/ValueTracking.cpp index 314ec9c..abc57ed 100644 --- a/contrib/llvm/lib/Analysis/ValueTracking.cpp +++ b/contrib/llvm/lib/Analysis/ValueTracking.cpp @@ -1743,9 +1743,10 @@ bool isKnownToBeAPowerOfTwo(Value *V, bool OrZero, unsigned Depth, return false; Value *X = nullptr, *Y = nullptr; - // A shift of a power of two is a power of two or zero. + // A shift left or a logical shift right of a power of two is a power of two + // or zero. if (OrZero && (match(V, m_Shl(m_Value(X), m_Value())) || - match(V, m_Shr(m_Value(X), m_Value())))) + match(V, m_LShr(m_Value(X), m_Value())))) return isKnownToBeAPowerOfTwo(X, /*OrZero*/ true, Depth, Q, DL); if (ZExtInst *ZI = dyn_cast<ZExtInst>(V)) @@ -2829,7 +2830,12 @@ Value *llvm::GetPointerBaseWithConstantOffset(Value *Ptr, int64_t &Offset, const DataLayout &DL) { unsigned BitWidth = DL.getPointerTypeSizeInBits(Ptr->getType()); APInt ByteOffset(BitWidth, 0); - while (1) { + + // We walk up the defs but use a visited set to handle unreachable code. In + // that case, we stop after accumulating the cycle once (not that it + // matters). + SmallPtrSet<Value *, 16> Visited; + while (Visited.insert(Ptr).second) { if (Ptr->getType()->isVectorTy()) break; @@ -3268,12 +3274,9 @@ static bool isDereferenceableAndAlignedPointer( } // For gc.relocate, look through relocations - if (const IntrinsicInst *I = dyn_cast<IntrinsicInst>(V)) - if (I->getIntrinsicID() == Intrinsic::experimental_gc_relocate) { - GCRelocateOperands RelocateInst(I); - return isDereferenceableAndAlignedPointer( - RelocateInst.getDerivedPtr(), Align, DL, CtxI, DT, TLI, Visited); - } + if (const GCRelocateInst *RelocateInst = dyn_cast<GCRelocateInst>(V)) + return isDereferenceableAndAlignedPointer( + RelocateInst->getDerivedPtr(), Align, DL, CtxI, DT, TLI, Visited); if (const AddrSpaceCastInst *ASC = dyn_cast<AddrSpaceCastInst>(V)) return isDereferenceableAndAlignedPointer(ASC->getOperand(0), Align, DL, @@ -3474,10 +3477,6 @@ bool llvm::isKnownNonNull(const Value *V, const TargetLibraryInfo *TLI) { if (CS.isReturnNonNull()) return true; - // operator new never returns null. - if (isOperatorNewLikeFn(V, TLI, /*LookThroughBitCast=*/true)) - return true; - return false; } |