diff options
Diffstat (limited to 'contrib/llvm/lib/Analysis/BasicAliasAnalysis.cpp')
-rw-r--r-- | contrib/llvm/lib/Analysis/BasicAliasAnalysis.cpp | 149 |
1 files changed, 119 insertions, 30 deletions
diff --git a/contrib/llvm/lib/Analysis/BasicAliasAnalysis.cpp b/contrib/llvm/lib/Analysis/BasicAliasAnalysis.cpp index 43d5c3c..c8d0579 100644 --- a/contrib/llvm/lib/Analysis/BasicAliasAnalysis.cpp +++ b/contrib/llvm/lib/Analysis/BasicAliasAnalysis.cpp @@ -63,6 +63,21 @@ const unsigned MaxNumPhiBBsValueReachabilityCheck = 20; // depth otherwise the algorithm in aliasGEP will assert. static const unsigned MaxLookupSearchDepth = 6; +bool BasicAAResult::invalidate(Function &F, const PreservedAnalyses &PA, + FunctionAnalysisManager::Invalidator &Inv) { + // We don't care if this analysis itself is preserved, it has no state. But + // we need to check that the analyses it depends on have been. Note that we + // may be created without handles to some analyses and in that case don't + // depend on them. + if (Inv.invalidate<AssumptionAnalysis>(F, PA) || + (DT && Inv.invalidate<DominatorTreeAnalysis>(F, PA)) || + (LI && Inv.invalidate<LoopAnalysis>(F, PA))) + return true; + + // Otherwise this analysis result remains valid. + return false; +} + //===----------------------------------------------------------------------===// // Useful predicates //===----------------------------------------------------------------------===// @@ -227,7 +242,7 @@ static bool isObjectSize(const Value *V, uint64_t Size, const DataLayout &DL, Offset = 0; return V; } - // FALL THROUGH. + LLVM_FALLTHROUGH; case Instruction::Add: V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, ZExtBits, SExtBits, DL, Depth + 1, AC, DT, NSW, NUW); @@ -275,7 +290,7 @@ static bool isObjectSize(const Value *V, uint64_t Size, const DataLayout &DL, GetLinearExpression(CastOp, Scale, Offset, ZExtBits, SExtBits, DL, Depth + 1, AC, DT, NSW, NUW); - // zext(zext(%x)) == zext(%x), and similiarly for sext; we'll handle this + // zext(zext(%x)) == zext(%x), and similarly for sext; we'll handle this // by just incrementing the number of bits we've extended by. unsigned ExtendedBy = NewWidth - SmallWidth; @@ -409,11 +424,13 @@ bool BasicAAResult::DecomposeGEPExpression(const Value *V, // Walk the indices of the GEP, accumulating them into BaseOff/VarIndices. gep_type_iterator GTI = gep_type_begin(GEPOp); unsigned PointerSize = DL.getPointerSizeInBits(AS); + // Assume all GEP operands are constants until proven otherwise. + bool GepHasConstantOffset = true; for (User::const_op_iterator I = GEPOp->op_begin() + 1, E = GEPOp->op_end(); - I != E; ++I) { + I != E; ++I, ++GTI) { const Value *Index = *I; // Compute the (potentially symbolic) offset in bytes for this index. - if (StructType *STy = dyn_cast<StructType>(*GTI++)) { + if (StructType *STy = GTI.getStructTypeOrNull()) { // For a struct, add the member offset. unsigned FieldNo = cast<ConstantInt>(Index)->getZExtValue(); if (FieldNo == 0) @@ -429,11 +446,13 @@ bool BasicAAResult::DecomposeGEPExpression(const Value *V, if (CIdx->isZero()) continue; Decomposed.OtherOffset += - DL.getTypeAllocSize(*GTI) * CIdx->getSExtValue(); + DL.getTypeAllocSize(GTI.getIndexedType()) * CIdx->getSExtValue(); continue; } - uint64_t Scale = DL.getTypeAllocSize(*GTI); + GepHasConstantOffset = false; + + uint64_t Scale = DL.getTypeAllocSize(GTI.getIndexedType()); unsigned ZExtBits = 0, SExtBits = 0; // If the integer type is smaller than the pointer size, it is implicitly @@ -458,7 +477,7 @@ bool BasicAAResult::DecomposeGEPExpression(const Value *V, // A[x][x] -> x*16 + x*4 -> x*20 // This also ensures that 'x' only appears in the index list once. for (unsigned i = 0, e = Decomposed.VarIndices.size(); i != e; ++i) { - if (Decomposed.VarIndices[i].V == Index && + if (Decomposed.VarIndices[i].V == Index && Decomposed.VarIndices[i].ZExtBits == ZExtBits && Decomposed.VarIndices[i].SExtBits == SExtBits) { Scale += Decomposed.VarIndices[i].Scale; @@ -479,10 +498,12 @@ bool BasicAAResult::DecomposeGEPExpression(const Value *V, } // Take care of wrap-arounds - Decomposed.StructOffset = - adjustToPointerSize(Decomposed.StructOffset, PointerSize); - Decomposed.OtherOffset = - adjustToPointerSize(Decomposed.OtherOffset, PointerSize); + if (GepHasConstantOffset) { + Decomposed.StructOffset = + adjustToPointerSize(Decomposed.StructOffset, PointerSize); + Decomposed.OtherOffset = + adjustToPointerSize(Decomposed.OtherOffset, PointerSize); + } // Analyze the base pointer next. V = GEPOp->getOperand(0); @@ -603,6 +624,10 @@ FunctionModRefBehavior BasicAAResult::getModRefBehavior(const Function *F) { if (F->onlyAccessesArgMemory()) Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesArgumentPointees); + else if (F->onlyAccessesInaccessibleMemory()) + Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesInaccessibleMem); + else if (F->onlyAccessesInaccessibleMemOrArgMem()) + Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesInaccessibleOrArgMem); return Min; } @@ -732,7 +757,8 @@ ModRefInfo BasicAAResult::getModRefInfo(ImmutableCallSite CS, // pointer were passed to arguments that were neither of these, then it // couldn't be no-capture. if (!(*CI)->getType()->isPointerTy() || - (!CS.doesNotCapture(OperandNo) && !CS.isByValArgument(OperandNo))) + (!CS.doesNotCapture(OperandNo) && + OperandNo < CS.getNumArgOperands() && !CS.isByValArgument(OperandNo))) continue; // If this is a no-capture pointer argument, see if we can tell that it @@ -765,6 +791,31 @@ ModRefInfo BasicAAResult::getModRefInfo(ImmutableCallSite CS, return MRI_NoModRef; } + // The semantics of memcpy intrinsics forbid overlap between their respective + // operands, i.e., source and destination of any given memcpy must no-alias. + // If Loc must-aliases either one of these two locations, then it necessarily + // no-aliases the other. + if (auto *Inst = dyn_cast<MemCpyInst>(CS.getInstruction())) { + AliasResult SrcAA, DestAA; + + if ((SrcAA = getBestAAResults().alias(MemoryLocation::getForSource(Inst), + Loc)) == MustAlias) + // Loc is exactly the memcpy source thus disjoint from memcpy dest. + return MRI_Ref; + if ((DestAA = getBestAAResults().alias(MemoryLocation::getForDest(Inst), + Loc)) == MustAlias) + // The converse case. + return MRI_Mod; + + // It's also possible for Loc to alias both src and dest, or neither. + ModRefInfo rv = MRI_NoModRef; + if (SrcAA != NoAlias) + rv = static_cast<ModRefInfo>(rv | MRI_Ref); + if (DestAA != NoAlias) + rv = static_cast<ModRefInfo>(rv | MRI_Mod); + return rv; + } + // While the assume intrinsic is marked as arbitrarily writing so that // proper control dependencies will be maintained, it never aliases any // particular memory location. @@ -781,6 +832,32 @@ ModRefInfo BasicAAResult::getModRefInfo(ImmutableCallSite CS, if (isIntrinsicCall(CS, Intrinsic::experimental_guard)) return MRI_Ref; + // Like assumes, invariant.start intrinsics were also marked as arbitrarily + // writing so that proper control dependencies are maintained but they never + // mod any particular memory location visible to the IR. + // *Unlike* assumes (which are now modeled as NoModRef), invariant.start + // intrinsic is now modeled as reading memory. This prevents hoisting the + // invariant.start intrinsic over stores. Consider: + // *ptr = 40; + // *ptr = 50; + // invariant_start(ptr) + // int val = *ptr; + // print(val); + // + // This cannot be transformed to: + // + // *ptr = 40; + // invariant_start(ptr) + // *ptr = 50; + // int val = *ptr; + // print(val); + // + // The transformation will cause the second store to be ignored (based on + // rules of invariant.start) and print 40, while the first program always + // prints 50. + if (isIntrinsicCall(CS, Intrinsic::invariant_start)) + return MRI_Ref; + // The AAResultBase base class has some smarts, lets use them. return AAResultBase::getModRefInfo(CS, Loc); } @@ -1114,13 +1191,14 @@ AliasResult BasicAAResult::aliasGEP(const GEPOperator *GEP1, uint64_t V1Size, return MayAlias; AliasResult R = aliasCheck(UnderlyingV1, MemoryLocation::UnknownSize, - AAMDNodes(), V2, V2Size, V2AAInfo); + AAMDNodes(), V2, MemoryLocation::UnknownSize, + V2AAInfo, nullptr, UnderlyingV2); if (R != MustAlias) // If V2 may alias GEP base pointer, conservatively returns MayAlias. // If V2 is known not to alias GEP base pointer, then the two values - // cannot alias per GEP semantics: "A pointer value formed from a - // getelementptr instruction is associated with the addresses associated - // with the first operand of the getelementptr". + // cannot alias per GEP semantics: "Any memory access must be done through + // a pointer value associated with an address range of the memory access, + // otherwise the behavior is undefined.". return R; // If the max search depth is reached the result is undefined @@ -1251,7 +1329,8 @@ static AliasResult MergeAliasResults(AliasResult A, AliasResult B) { AliasResult BasicAAResult::aliasSelect(const SelectInst *SI, uint64_t SISize, const AAMDNodes &SIAAInfo, const Value *V2, uint64_t V2Size, - const AAMDNodes &V2AAInfo) { + const AAMDNodes &V2AAInfo, + const Value *UnderV2) { // If the values are Selects with the same condition, we can do a more precise // check: just check for aliases between the values on corresponding arms. if (const SelectInst *SI2 = dyn_cast<SelectInst>(V2)) @@ -1269,12 +1348,14 @@ AliasResult BasicAAResult::aliasSelect(const SelectInst *SI, uint64_t SISize, // If both arms of the Select node NoAlias or MustAlias V2, then returns // NoAlias / MustAlias. Otherwise, returns MayAlias. AliasResult Alias = - aliasCheck(V2, V2Size, V2AAInfo, SI->getTrueValue(), SISize, SIAAInfo); + aliasCheck(V2, V2Size, V2AAInfo, SI->getTrueValue(), + SISize, SIAAInfo, UnderV2); if (Alias == MayAlias) return MayAlias; AliasResult ThisAlias = - aliasCheck(V2, V2Size, V2AAInfo, SI->getFalseValue(), SISize, SIAAInfo); + aliasCheck(V2, V2Size, V2AAInfo, SI->getFalseValue(), SISize, SIAAInfo, + UnderV2); return MergeAliasResults(ThisAlias, Alias); } @@ -1282,8 +1363,8 @@ AliasResult BasicAAResult::aliasSelect(const SelectInst *SI, uint64_t SISize, /// another. AliasResult BasicAAResult::aliasPHI(const PHINode *PN, uint64_t PNSize, const AAMDNodes &PNAAInfo, const Value *V2, - uint64_t V2Size, - const AAMDNodes &V2AAInfo) { + uint64_t V2Size, const AAMDNodes &V2AAInfo, + const Value *UnderV2) { // Track phi nodes we have visited. We use this information when we determine // value equivalence. VisitedPhiBBs.insert(PN->getParent()); @@ -1362,7 +1443,8 @@ AliasResult BasicAAResult::aliasPHI(const PHINode *PN, uint64_t PNSize, PNSize = MemoryLocation::UnknownSize; AliasResult Alias = - aliasCheck(V2, V2Size, V2AAInfo, V1Srcs[0], PNSize, PNAAInfo); + aliasCheck(V2, V2Size, V2AAInfo, V1Srcs[0], + PNSize, PNAAInfo, UnderV2); // Early exit if the check of the first PHI source against V2 is MayAlias. // Other results are not possible. @@ -1375,7 +1457,7 @@ AliasResult BasicAAResult::aliasPHI(const PHINode *PN, uint64_t PNSize, Value *V = V1Srcs[i]; AliasResult ThisAlias = - aliasCheck(V2, V2Size, V2AAInfo, V, PNSize, PNAAInfo); + aliasCheck(V2, V2Size, V2AAInfo, V, PNSize, PNAAInfo, UnderV2); Alias = MergeAliasResults(ThisAlias, Alias); if (Alias == MayAlias) break; @@ -1388,7 +1470,8 @@ AliasResult BasicAAResult::aliasPHI(const PHINode *PN, uint64_t PNSize, /// array references. AliasResult BasicAAResult::aliasCheck(const Value *V1, uint64_t V1Size, AAMDNodes V1AAInfo, const Value *V2, - uint64_t V2Size, AAMDNodes V2AAInfo) { + uint64_t V2Size, AAMDNodes V2AAInfo, + const Value *O1, const Value *O2) { // If either of the memory references is empty, it doesn't matter what the // pointer values are. if (V1Size == 0 || V2Size == 0) @@ -1416,8 +1499,11 @@ AliasResult BasicAAResult::aliasCheck(const Value *V1, uint64_t V1Size, return NoAlias; // Scalars cannot alias each other // Figure out what objects these things are pointing to if we can. - const Value *O1 = GetUnderlyingObject(V1, DL, MaxLookupSearchDepth); - const Value *O2 = GetUnderlyingObject(V2, DL, MaxLookupSearchDepth); + if (O1 == nullptr) + O1 = GetUnderlyingObject(V1, DL, MaxLookupSearchDepth); + + if (O2 == nullptr) + O2 = GetUnderlyingObject(V2, DL, MaxLookupSearchDepth); // Null values in the default address space don't point to any object, so they // don't alias any other pointer. @@ -1500,23 +1586,26 @@ AliasResult BasicAAResult::aliasCheck(const Value *V1, uint64_t V1Size, if (isa<PHINode>(V2) && !isa<PHINode>(V1)) { std::swap(V1, V2); + std::swap(O1, O2); std::swap(V1Size, V2Size); std::swap(V1AAInfo, V2AAInfo); } if (const PHINode *PN = dyn_cast<PHINode>(V1)) { - AliasResult Result = aliasPHI(PN, V1Size, V1AAInfo, V2, V2Size, V2AAInfo); + AliasResult Result = aliasPHI(PN, V1Size, V1AAInfo, + V2, V2Size, V2AAInfo, O2); if (Result != MayAlias) return AliasCache[Locs] = Result; } if (isa<SelectInst>(V2) && !isa<SelectInst>(V1)) { std::swap(V1, V2); + std::swap(O1, O2); std::swap(V1Size, V2Size); std::swap(V1AAInfo, V2AAInfo); } if (const SelectInst *S1 = dyn_cast<SelectInst>(V1)) { AliasResult Result = - aliasSelect(S1, V1Size, V1AAInfo, V2, V2Size, V2AAInfo); + aliasSelect(S1, V1Size, V1AAInfo, V2, V2Size, V2AAInfo, O2); if (Result != MayAlias) return AliasCache[Locs] = Result; } @@ -1667,9 +1756,9 @@ bool BasicAAResult::constantOffsetHeuristic( // BasicAliasAnalysis Pass //===----------------------------------------------------------------------===// -char BasicAA::PassID; +AnalysisKey BasicAA::Key; -BasicAAResult BasicAA::run(Function &F, AnalysisManager<Function> &AM) { +BasicAAResult BasicAA::run(Function &F, FunctionAnalysisManager &AM) { return BasicAAResult(F.getParent()->getDataLayout(), AM.getResult<TargetLibraryAnalysis>(F), AM.getResult<AssumptionAnalysis>(F), |