diff options
Diffstat (limited to 'contrib/llvm/lib/Analysis/CGSCCPassManager.cpp')
-rw-r--r-- | contrib/llvm/lib/Analysis/CGSCCPassManager.cpp | 268 |
1 files changed, 204 insertions, 64 deletions
diff --git a/contrib/llvm/lib/Analysis/CGSCCPassManager.cpp b/contrib/llvm/lib/Analysis/CGSCCPassManager.cpp index 054bdc4..74b5d79 100644 --- a/contrib/llvm/lib/Analysis/CGSCCPassManager.cpp +++ b/contrib/llvm/lib/Analysis/CGSCCPassManager.cpp @@ -117,6 +117,7 @@ bool CGSCCAnalysisManagerModuleProxy::Result::invalidate( PA.allAnalysesInSetPreserved<AllAnalysesOn<LazyCallGraph::SCC>>(); // Ok, we have a graph, so we can propagate the invalidation down into it. + G->buildRefSCCs(); for (auto &RC : G->postorder_ref_sccs()) for (auto &C : RC) { Optional<PreservedAnalyses> InnerPA; @@ -195,18 +196,117 @@ FunctionAnalysisManagerCGSCCProxy::run(LazyCallGraph::SCC &C, bool FunctionAnalysisManagerCGSCCProxy::Result::invalidate( LazyCallGraph::SCC &C, const PreservedAnalyses &PA, CGSCCAnalysisManager::Invalidator &Inv) { - for (LazyCallGraph::Node &N : C) - FAM->invalidate(N.getFunction(), PA); + // If literally everything is preserved, we're done. + if (PA.areAllPreserved()) + return false; // This is still a valid proxy. + + // If this proxy isn't marked as preserved, then even if the result remains + // valid, the key itself may no longer be valid, so we clear everything. + // + // Note that in order to preserve this proxy, a module pass must ensure that + // the FAM has been completely updated to handle the deletion of functions. + // Specifically, any FAM-cached results for those functions need to have been + // forcibly cleared. When preserved, this proxy will only invalidate results + // cached on functions *still in the module* at the end of the module pass. + auto PAC = PA.getChecker<FunctionAnalysisManagerCGSCCProxy>(); + if (!PAC.preserved() && !PAC.preservedSet<AllAnalysesOn<LazyCallGraph::SCC>>()) { + for (LazyCallGraph::Node &N : C) + FAM->clear(N.getFunction()); - // This proxy doesn't need to handle invalidation itself. Instead, the - // module-level CGSCC proxy handles it above by ensuring that if the - // module-level FAM proxy becomes invalid the entire SCC layer, which - // includes this proxy, is cleared. + return true; + } + + // Directly check if the relevant set is preserved. + bool AreFunctionAnalysesPreserved = + PA.allAnalysesInSetPreserved<AllAnalysesOn<Function>>(); + + // Now walk all the functions to see if any inner analysis invalidation is + // necessary. + for (LazyCallGraph::Node &N : C) { + Function &F = N.getFunction(); + Optional<PreservedAnalyses> FunctionPA; + + // Check to see whether the preserved set needs to be pruned based on + // SCC-level analysis invalidation that triggers deferred invalidation + // registered with the outer analysis manager proxy for this function. + if (auto *OuterProxy = + FAM->getCachedResult<CGSCCAnalysisManagerFunctionProxy>(F)) + for (const auto &OuterInvalidationPair : + OuterProxy->getOuterInvalidations()) { + AnalysisKey *OuterAnalysisID = OuterInvalidationPair.first; + const auto &InnerAnalysisIDs = OuterInvalidationPair.second; + if (Inv.invalidate(OuterAnalysisID, C, PA)) { + if (!FunctionPA) + FunctionPA = PA; + for (AnalysisKey *InnerAnalysisID : InnerAnalysisIDs) + FunctionPA->abandon(InnerAnalysisID); + } + } + + // Check if we needed a custom PA set, and if so we'll need to run the + // inner invalidation. + if (FunctionPA) { + FAM->invalidate(F, *FunctionPA); + continue; + } + + // Otherwise we only need to do invalidation if the original PA set didn't + // preserve all function analyses. + if (!AreFunctionAnalysesPreserved) + FAM->invalidate(F, PA); + } + + // Return false to indicate that this result is still a valid proxy. return false; } } // End llvm namespace +/// When a new SCC is created for the graph and there might be function +/// analysis results cached for the functions now in that SCC two forms of +/// updates are required. +/// +/// First, a proxy from the SCC to the FunctionAnalysisManager needs to be +/// created so that any subsequent invalidation events to the SCC are +/// propagated to the function analysis results cached for functions within it. +/// +/// Second, if any of the functions within the SCC have analysis results with +/// outer analysis dependencies, then those dependencies would point to the +/// *wrong* SCC's analysis result. We forcibly invalidate the necessary +/// function analyses so that they don't retain stale handles. +static void updateNewSCCFunctionAnalyses(LazyCallGraph::SCC &C, + LazyCallGraph &G, + CGSCCAnalysisManager &AM) { + // Get the relevant function analysis manager. + auto &FAM = + AM.getResult<FunctionAnalysisManagerCGSCCProxy>(C, G).getManager(); + + // Now walk the functions in this SCC and invalidate any function analysis + // results that might have outer dependencies on an SCC analysis. + for (LazyCallGraph::Node &N : C) { + Function &F = N.getFunction(); + + auto *OuterProxy = + FAM.getCachedResult<CGSCCAnalysisManagerFunctionProxy>(F); + if (!OuterProxy) + // No outer analyses were queried, nothing to do. + continue; + + // Forcibly abandon all the inner analyses with dependencies, but + // invalidate nothing else. + auto PA = PreservedAnalyses::all(); + for (const auto &OuterInvalidationPair : + OuterProxy->getOuterInvalidations()) { + const auto &InnerAnalysisIDs = OuterInvalidationPair.second; + for (AnalysisKey *InnerAnalysisID : InnerAnalysisIDs) + PA.abandon(InnerAnalysisID); + } + + // Now invalidate anything we found. + FAM.invalidate(F, PA); + } +} + namespace { /// Helper function to update both the \c CGSCCAnalysisManager \p AM and the \c /// CGSCCPassManager's \c CGSCCUpdateResult \p UR based on a range of newly @@ -235,7 +335,6 @@ incorporateNewSCCRange(const SCCRangeT &NewSCCRange, LazyCallGraph &G, dbgs() << "Enqueuing the existing SCC in the worklist:" << *C << "\n"; SCC *OldC = C; - (void)OldC; // Update the current SCC. Note that if we have new SCCs, this must actually // change the SCC. @@ -244,6 +343,26 @@ incorporateNewSCCRange(const SCCRangeT &NewSCCRange, LazyCallGraph &G, C = &*NewSCCRange.begin(); assert(G.lookupSCC(N) == C && "Failed to update current SCC!"); + // If we had a cached FAM proxy originally, we will want to create more of + // them for each SCC that was split off. + bool NeedFAMProxy = + AM.getCachedResult<FunctionAnalysisManagerCGSCCProxy>(*OldC) != nullptr; + + // We need to propagate an invalidation call to all but the newly current SCC + // because the outer pass manager won't do that for us after splitting them. + // FIXME: We should accept a PreservedAnalysis from the CG updater so that if + // there are preserved ananalyses we can avoid invalidating them here for + // split-off SCCs. + // We know however that this will preserve any FAM proxy so go ahead and mark + // that. + PreservedAnalyses PA; + PA.preserve<FunctionAnalysisManagerCGSCCProxy>(); + AM.invalidate(*OldC, PA); + + // Ensure the now-current SCC's function analyses are updated. + if (NeedFAMProxy) + updateNewSCCFunctionAnalyses(*C, G, AM); + for (SCC &NewC : reverse(make_range(std::next(NewSCCRange.begin()), NewSCCRange.end()))) { assert(C != &NewC && "No need to re-visit the current SCC!"); @@ -251,6 +370,14 @@ incorporateNewSCCRange(const SCCRangeT &NewSCCRange, LazyCallGraph &G, UR.CWorklist.insert(&NewC); if (DebugLogging) dbgs() << "Enqueuing a newly formed SCC:" << NewC << "\n"; + + // Ensure new SCCs' function analyses are updated. + if (NeedFAMProxy) + updateNewSCCFunctionAnalyses(NewC, G, AM); + + // Also propagate a normal invalidation to the new SCC as only the current + // will get one from the pass manager infrastructure. + AM.invalidate(NewC, PA); } return C; } @@ -273,9 +400,9 @@ LazyCallGraph::SCC &llvm::updateCGAndAnalysisManagerForFunctionPass( // demoted edges. SmallVector<Constant *, 16> Worklist; SmallPtrSet<Constant *, 16> Visited; - SmallPtrSet<Function *, 16> RetainedEdges; - SmallSetVector<Function *, 4> PromotedRefTargets; - SmallSetVector<Function *, 4> DemotedCallTargets; + SmallPtrSet<Node *, 16> RetainedEdges; + SmallSetVector<Node *, 4> PromotedRefTargets; + SmallSetVector<Node *, 4> DemotedCallTargets; // First walk the function and handle all called functions. We do this first // because if there is a single call edge, whether there are ref edges is @@ -284,7 +411,8 @@ LazyCallGraph::SCC &llvm::updateCGAndAnalysisManagerForFunctionPass( if (auto CS = CallSite(&I)) if (Function *Callee = CS.getCalledFunction()) if (Visited.insert(Callee).second && !Callee->isDeclaration()) { - const Edge *E = N.lookup(*Callee); + Node &CalleeN = *G.lookup(*Callee); + Edge *E = N->lookup(CalleeN); // FIXME: We should really handle adding new calls. While it will // make downstream usage more complex, there is no fundamental // limitation and it will allow passes within the CGSCC to be a bit @@ -293,9 +421,9 @@ LazyCallGraph::SCC &llvm::updateCGAndAnalysisManagerForFunctionPass( assert(E && "No function transformations should introduce *new* " "call edges! Any new calls should be modeled as " "promoted existing ref edges!"); - RetainedEdges.insert(Callee); + RetainedEdges.insert(&CalleeN); if (!E->isCall()) - PromotedRefTargets.insert(Callee); + PromotedRefTargets.insert(&CalleeN); } // Now walk all references. @@ -305,25 +433,31 @@ LazyCallGraph::SCC &llvm::updateCGAndAnalysisManagerForFunctionPass( if (Visited.insert(C).second) Worklist.push_back(C); - LazyCallGraph::visitReferences(Worklist, Visited, [&](Function &Referee) { - const Edge *E = N.lookup(Referee); + auto VisitRef = [&](Function &Referee) { + Node &RefereeN = *G.lookup(Referee); + Edge *E = N->lookup(RefereeN); // FIXME: Similarly to new calls, we also currently preclude // introducing new references. See above for details. assert(E && "No function transformations should introduce *new* ref " "edges! Any new ref edges would require IPO which " "function passes aren't allowed to do!"); - RetainedEdges.insert(&Referee); + RetainedEdges.insert(&RefereeN); if (E->isCall()) - DemotedCallTargets.insert(&Referee); - }); + DemotedCallTargets.insert(&RefereeN); + }; + LazyCallGraph::visitReferences(Worklist, Visited, VisitRef); + + // Include synthetic reference edges to known, defined lib functions. + for (auto *F : G.getLibFunctions()) + VisitRef(*F); // First remove all of the edges that are no longer present in this function. // We have to build a list of dead targets first and then remove them as the // data structures will all be invalidated by removing them. SmallVector<PointerIntPair<Node *, 1, Edge::Kind>, 4> DeadTargets; - for (Edge &E : N) - if (!RetainedEdges.count(&E.getFunction())) - DeadTargets.push_back({E.getNode(), E.getKind()}); + for (Edge &E : *N) + if (!RetainedEdges.count(&E.getNode())) + DeadTargets.push_back({&E.getNode(), E.getKind()}); for (auto DeadTarget : DeadTargets) { Node &TargetN = *DeadTarget.getPointer(); bool IsCall = DeadTarget.getInt() == Edge::Call; @@ -346,14 +480,6 @@ LazyCallGraph::SCC &llvm::updateCGAndAnalysisManagerForFunctionPass( // For separate SCCs this is trivial. RC->switchTrivialInternalEdgeToRef(N, TargetN); } else { - // Otherwise we may end up re-structuring the call graph. First, - // invalidate any SCC analyses. We have to do this before we split - // functions into new SCCs and lose track of where their analyses are - // cached. - // FIXME: We should accept a more precise preserved set here. For - // example, it might be possible to preserve some function analyses - // even as the SCC structure is changed. - AM.invalidate(*C, PreservedAnalyses::none()); // Now update the call graph. C = incorporateNewSCCRange(RC->switchInternalEdgeToRef(N, TargetN), G, N, C, AM, UR, DebugLogging); @@ -397,9 +523,8 @@ LazyCallGraph::SCC &llvm::updateCGAndAnalysisManagerForFunctionPass( // Next demote all the call edges that are now ref edges. This helps make // the SCCs small which should minimize the work below as we don't want to // form cycles that this would break. - for (Function *RefTarget : DemotedCallTargets) { - Node &TargetN = *G.lookup(*RefTarget); - SCC &TargetC = *G.lookupSCC(TargetN); + for (Node *RefTarget : DemotedCallTargets) { + SCC &TargetC = *G.lookupSCC(*RefTarget); RefSCC &TargetRC = TargetC.getOuterRefSCC(); // The easy case is when the target RefSCC is not this RefSCC. This is @@ -407,10 +532,10 @@ LazyCallGraph::SCC &llvm::updateCGAndAnalysisManagerForFunctionPass( if (&TargetRC != RC) { assert(RC->isAncestorOf(TargetRC) && "Cannot potentially form RefSCC cycles here!"); - RC->switchOutgoingEdgeToRef(N, TargetN); + RC->switchOutgoingEdgeToRef(N, *RefTarget); if (DebugLogging) dbgs() << "Switch outgoing call edge to a ref edge from '" << N - << "' to '" << TargetN << "'\n"; + << "' to '" << *RefTarget << "'\n"; continue; } @@ -418,26 +543,18 @@ LazyCallGraph::SCC &llvm::updateCGAndAnalysisManagerForFunctionPass( // some SCCs. if (C != &TargetC) { // For separate SCCs this is trivial. - RC->switchTrivialInternalEdgeToRef(N, TargetN); + RC->switchTrivialInternalEdgeToRef(N, *RefTarget); continue; } - // Otherwise we may end up re-structuring the call graph. First, invalidate - // any SCC analyses. We have to do this before we split functions into new - // SCCs and lose track of where their analyses are cached. - // FIXME: We should accept a more precise preserved set here. For example, - // it might be possible to preserve some function analyses even as the SCC - // structure is changed. - AM.invalidate(*C, PreservedAnalyses::none()); // Now update the call graph. - C = incorporateNewSCCRange(RC->switchInternalEdgeToRef(N, TargetN), G, - N, C, AM, UR, DebugLogging); + C = incorporateNewSCCRange(RC->switchInternalEdgeToRef(N, *RefTarget), G, N, + C, AM, UR, DebugLogging); } // Now promote ref edges into call edges. - for (Function *CallTarget : PromotedRefTargets) { - Node &TargetN = *G.lookup(*CallTarget); - SCC &TargetC = *G.lookupSCC(TargetN); + for (Node *CallTarget : PromotedRefTargets) { + SCC &TargetC = *G.lookupSCC(*CallTarget); RefSCC &TargetRC = TargetC.getOuterRefSCC(); // The easy case is when the target RefSCC is not this RefSCC. This is @@ -445,38 +562,61 @@ LazyCallGraph::SCC &llvm::updateCGAndAnalysisManagerForFunctionPass( if (&TargetRC != RC) { assert(RC->isAncestorOf(TargetRC) && "Cannot potentially form RefSCC cycles here!"); - RC->switchOutgoingEdgeToCall(N, TargetN); + RC->switchOutgoingEdgeToCall(N, *CallTarget); if (DebugLogging) dbgs() << "Switch outgoing ref edge to a call edge from '" << N - << "' to '" << TargetN << "'\n"; + << "' to '" << *CallTarget << "'\n"; continue; } if (DebugLogging) dbgs() << "Switch an internal ref edge to a call edge from '" << N - << "' to '" << TargetN << "'\n"; + << "' to '" << *CallTarget << "'\n"; // Otherwise we are switching an internal ref edge to a call edge. This // may merge away some SCCs, and we add those to the UpdateResult. We also // need to make sure to update the worklist in the event SCCs have moved - // before the current one in the post-order sequence. + // before the current one in the post-order sequence + bool HasFunctionAnalysisProxy = false; auto InitialSCCIndex = RC->find(*C) - RC->begin(); - auto InvalidatedSCCs = RC->switchInternalEdgeToCall(N, TargetN); - if (!InvalidatedSCCs.empty()) { + bool FormedCycle = RC->switchInternalEdgeToCall( + N, *CallTarget, [&](ArrayRef<SCC *> MergedSCCs) { + for (SCC *MergedC : MergedSCCs) { + assert(MergedC != &TargetC && "Cannot merge away the target SCC!"); + + HasFunctionAnalysisProxy |= + AM.getCachedResult<FunctionAnalysisManagerCGSCCProxy>( + *MergedC) != nullptr; + + // Mark that this SCC will no longer be valid. + UR.InvalidatedSCCs.insert(MergedC); + + // FIXME: We should really do a 'clear' here to forcibly release + // memory, but we don't have a good way of doing that and + // preserving the function analyses. + auto PA = PreservedAnalyses::allInSet<AllAnalysesOn<Function>>(); + PA.preserve<FunctionAnalysisManagerCGSCCProxy>(); + AM.invalidate(*MergedC, PA); + } + }); + + // If we formed a cycle by creating this call, we need to update more data + // structures. + if (FormedCycle) { C = &TargetC; assert(G.lookupSCC(N) == C && "Failed to update current SCC!"); - // Any analyses cached for this SCC are no longer precise as the shape - // has changed by introducing this cycle. - AM.invalidate(*C, PreservedAnalyses::none()); - - for (SCC *InvalidatedC : InvalidatedSCCs) { - assert(InvalidatedC != C && "Cannot invalidate the current SCC!"); - UR.InvalidatedSCCs.insert(InvalidatedC); + // If one of the invalidated SCCs had a cached proxy to a function + // analysis manager, we need to create a proxy in the new current SCC as + // the invaliadted SCCs had their functions moved. + if (HasFunctionAnalysisProxy) + AM.getResult<FunctionAnalysisManagerCGSCCProxy>(*C, G); - // Also clear any cached analyses for the SCCs that are dead. This - // isn't really necessary for correctness but can release memory. - AM.clear(*InvalidatedC); - } + // Any analyses cached for this SCC are no longer precise as the shape + // has changed by introducing this cycle. However, we have taken care to + // update the proxies so it remains valide. + auto PA = PreservedAnalyses::allInSet<AllAnalysesOn<Function>>(); + PA.preserve<FunctionAnalysisManagerCGSCCProxy>(); + AM.invalidate(*C, PA); } auto NewSCCIndex = RC->find(*C) - RC->begin(); if (InitialSCCIndex < NewSCCIndex) { |