summaryrefslogtreecommitdiffstats
path: root/contrib/llvm/lib/Transforms/ObjCARC
diff options
context:
space:
mode:
Diffstat (limited to 'contrib/llvm/lib/Transforms/ObjCARC')
-rw-r--r--contrib/llvm/lib/Transforms/ObjCARC/ARCRuntimeEntryPoints.h184
-rw-r--r--contrib/llvm/lib/Transforms/ObjCARC/BlotMapVector.h108
-rw-r--r--contrib/llvm/lib/Transforms/ObjCARC/DependencyAnalysis.cpp278
-rw-r--r--contrib/llvm/lib/Transforms/ObjCARC/DependencyAnalysis.h88
-rw-r--r--contrib/llvm/lib/Transforms/ObjCARC/ObjCARC.cpp42
-rw-r--r--contrib/llvm/lib/Transforms/ObjCARC/ObjCARC.h76
-rw-r--r--contrib/llvm/lib/Transforms/ObjCARC/ObjCARCAPElim.cpp173
-rw-r--r--contrib/llvm/lib/Transforms/ObjCARC/ObjCARCContract.cpp665
-rw-r--r--contrib/llvm/lib/Transforms/ObjCARC/ObjCARCExpand.cpp128
-rw-r--r--contrib/llvm/lib/Transforms/ObjCARC/ObjCARCOpts.cpp2246
-rw-r--r--contrib/llvm/lib/Transforms/ObjCARC/ProvenanceAnalysis.cpp177
-rw-r--r--contrib/llvm/lib/Transforms/ObjCARC/ProvenanceAnalysis.h81
-rw-r--r--contrib/llvm/lib/Transforms/ObjCARC/ProvenanceAnalysisEvaluator.cpp94
-rw-r--r--contrib/llvm/lib/Transforms/ObjCARC/PtrState.cpp404
-rw-r--r--contrib/llvm/lib/Transforms/ObjCARC/PtrState.h210
15 files changed, 4954 insertions, 0 deletions
diff --git a/contrib/llvm/lib/Transforms/ObjCARC/ARCRuntimeEntryPoints.h b/contrib/llvm/lib/Transforms/ObjCARC/ARCRuntimeEntryPoints.h
new file mode 100644
index 0000000..d4fef10
--- /dev/null
+++ b/contrib/llvm/lib/Transforms/ObjCARC/ARCRuntimeEntryPoints.h
@@ -0,0 +1,184 @@
+//===- ARCRuntimeEntryPoints.h - ObjC ARC Optimization --*- C++ -*---------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+/// This file contains a class ARCRuntimeEntryPoints for use in
+/// creating/managing references to entry points to the arc objective c runtime.
+///
+/// WARNING: This file knows about certain library functions. It recognizes them
+/// by name, and hardwires knowledge of their semantics.
+///
+/// WARNING: This file knows about how certain Objective-C library functions are
+/// used. Naive LLVM IR transformations which would otherwise be
+/// behavior-preserving may break these assumptions.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TRANSFORMS_OBJCARC_ARCRUNTIMEENTRYPOINTS_H
+#define LLVM_LIB_TRANSFORMS_OBJCARC_ARCRUNTIMEENTRYPOINTS_H
+
+#include "ObjCARC.h"
+
+namespace llvm {
+namespace objcarc {
+
+enum class ARCRuntimeEntryPointKind {
+ AutoreleaseRV,
+ Release,
+ Retain,
+ RetainBlock,
+ Autorelease,
+ StoreStrong,
+ RetainRV,
+ RetainAutorelease,
+ RetainAutoreleaseRV,
+};
+
+/// Declarations for ObjC runtime functions and constants. These are initialized
+/// lazily to avoid cluttering up the Module with unused declarations.
+class ARCRuntimeEntryPoints {
+public:
+ ARCRuntimeEntryPoints() : TheModule(nullptr),
+ AutoreleaseRV(nullptr),
+ Release(nullptr),
+ Retain(nullptr),
+ RetainBlock(nullptr),
+ Autorelease(nullptr),
+ StoreStrong(nullptr),
+ RetainRV(nullptr),
+ RetainAutorelease(nullptr),
+ RetainAutoreleaseRV(nullptr) { }
+
+ void init(Module *M) {
+ TheModule = M;
+ AutoreleaseRV = nullptr;
+ Release = nullptr;
+ Retain = nullptr;
+ RetainBlock = nullptr;
+ Autorelease = nullptr;
+ StoreStrong = nullptr;
+ RetainRV = nullptr;
+ RetainAutorelease = nullptr;
+ RetainAutoreleaseRV = nullptr;
+ }
+
+ Constant *get(ARCRuntimeEntryPointKind kind) {
+ assert(TheModule != nullptr && "Not initialized.");
+
+ switch (kind) {
+ case ARCRuntimeEntryPointKind::AutoreleaseRV:
+ return getI8XRetI8XEntryPoint(AutoreleaseRV,
+ "objc_autoreleaseReturnValue", true);
+ case ARCRuntimeEntryPointKind::Release:
+ return getVoidRetI8XEntryPoint(Release, "objc_release");
+ case ARCRuntimeEntryPointKind::Retain:
+ return getI8XRetI8XEntryPoint(Retain, "objc_retain", true);
+ case ARCRuntimeEntryPointKind::RetainBlock:
+ return getI8XRetI8XEntryPoint(RetainBlock, "objc_retainBlock", false);
+ case ARCRuntimeEntryPointKind::Autorelease:
+ return getI8XRetI8XEntryPoint(Autorelease, "objc_autorelease", true);
+ case ARCRuntimeEntryPointKind::StoreStrong:
+ return getI8XRetI8XXI8XEntryPoint(StoreStrong, "objc_storeStrong");
+ case ARCRuntimeEntryPointKind::RetainRV:
+ return getI8XRetI8XEntryPoint(RetainRV,
+ "objc_retainAutoreleasedReturnValue", true);
+ case ARCRuntimeEntryPointKind::RetainAutorelease:
+ return getI8XRetI8XEntryPoint(RetainAutorelease, "objc_retainAutorelease",
+ true);
+ case ARCRuntimeEntryPointKind::RetainAutoreleaseRV:
+ return getI8XRetI8XEntryPoint(RetainAutoreleaseRV,
+ "objc_retainAutoreleaseReturnValue", true);
+ }
+
+ llvm_unreachable("Switch should be a covered switch.");
+ }
+
+private:
+ /// Cached reference to the module which we will insert declarations into.
+ Module *TheModule;
+
+ /// Declaration for ObjC runtime function objc_autoreleaseReturnValue.
+ Constant *AutoreleaseRV;
+ /// Declaration for ObjC runtime function objc_release.
+ Constant *Release;
+ /// Declaration for ObjC runtime function objc_retain.
+ Constant *Retain;
+ /// Declaration for ObjC runtime function objc_retainBlock.
+ Constant *RetainBlock;
+ /// Declaration for ObjC runtime function objc_autorelease.
+ Constant *Autorelease;
+ /// Declaration for objc_storeStrong().
+ Constant *StoreStrong;
+ /// Declaration for objc_retainAutoreleasedReturnValue().
+ Constant *RetainRV;
+ /// Declaration for objc_retainAutorelease().
+ Constant *RetainAutorelease;
+ /// Declaration for objc_retainAutoreleaseReturnValue().
+ Constant *RetainAutoreleaseRV;
+
+ Constant *getVoidRetI8XEntryPoint(Constant *&Decl,
+ const char *Name) {
+ if (Decl)
+ return Decl;
+
+ LLVMContext &C = TheModule->getContext();
+ Type *Params[] = { PointerType::getUnqual(Type::getInt8Ty(C)) };
+ AttributeSet Attr =
+ AttributeSet().addAttribute(C, AttributeSet::FunctionIndex,
+ Attribute::NoUnwind);
+ FunctionType *Fty = FunctionType::get(Type::getVoidTy(C), Params,
+ /*isVarArg=*/false);
+ return Decl = TheModule->getOrInsertFunction(Name, Fty, Attr);
+ }
+
+ Constant *getI8XRetI8XEntryPoint(Constant *& Decl,
+ const char *Name,
+ bool NoUnwind = false) {
+ if (Decl)
+ return Decl;
+
+ LLVMContext &C = TheModule->getContext();
+ Type *I8X = PointerType::getUnqual(Type::getInt8Ty(C));
+ Type *Params[] = { I8X };
+ FunctionType *Fty = FunctionType::get(I8X, Params, /*isVarArg=*/false);
+ AttributeSet Attr = AttributeSet();
+
+ if (NoUnwind)
+ Attr = Attr.addAttribute(C, AttributeSet::FunctionIndex,
+ Attribute::NoUnwind);
+
+ return Decl = TheModule->getOrInsertFunction(Name, Fty, Attr);
+ }
+
+ Constant *getI8XRetI8XXI8XEntryPoint(Constant *&Decl,
+ const char *Name) {
+ if (Decl)
+ return Decl;
+
+ LLVMContext &C = TheModule->getContext();
+ Type *I8X = PointerType::getUnqual(Type::getInt8Ty(C));
+ Type *I8XX = PointerType::getUnqual(I8X);
+ Type *Params[] = { I8XX, I8X };
+
+ AttributeSet Attr =
+ AttributeSet().addAttribute(C, AttributeSet::FunctionIndex,
+ Attribute::NoUnwind);
+ Attr = Attr.addAttribute(C, 1, Attribute::NoCapture);
+
+ FunctionType *Fty = FunctionType::get(Type::getVoidTy(C), Params,
+ /*isVarArg=*/false);
+
+ return Decl = TheModule->getOrInsertFunction(Name, Fty, Attr);
+ }
+
+}; // class ARCRuntimeEntryPoints
+
+} // namespace objcarc
+} // namespace llvm
+
+#endif
diff --git a/contrib/llvm/lib/Transforms/ObjCARC/BlotMapVector.h b/contrib/llvm/lib/Transforms/ObjCARC/BlotMapVector.h
new file mode 100644
index 0000000..d6439b6
--- /dev/null
+++ b/contrib/llvm/lib/Transforms/ObjCARC/BlotMapVector.h
@@ -0,0 +1,108 @@
+//===- BlotMapVector.h - A MapVector with the blot operation -*- C++ -*----===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ADT/DenseMap.h"
+#include <vector>
+#include <algorithm>
+
+namespace llvm {
+/// \brief An associative container with fast insertion-order (deterministic)
+/// iteration over its elements. Plus the special blot operation.
+template <class KeyT, class ValueT> class BlotMapVector {
+ /// Map keys to indices in Vector.
+ typedef DenseMap<KeyT, size_t> MapTy;
+ MapTy Map;
+
+ typedef std::vector<std::pair<KeyT, ValueT>> VectorTy;
+ /// Keys and values.
+ VectorTy Vector;
+
+public:
+ typedef typename VectorTy::iterator iterator;
+ typedef typename VectorTy::const_iterator const_iterator;
+ iterator begin() { return Vector.begin(); }
+ iterator end() { return Vector.end(); }
+ const_iterator begin() const { return Vector.begin(); }
+ const_iterator end() const { return Vector.end(); }
+
+#ifdef XDEBUG
+ ~BlotMapVector() {
+ assert(Vector.size() >= Map.size()); // May differ due to blotting.
+ for (typename MapTy::const_iterator I = Map.begin(), E = Map.end(); I != E;
+ ++I) {
+ assert(I->second < Vector.size());
+ assert(Vector[I->second].first == I->first);
+ }
+ for (typename VectorTy::const_iterator I = Vector.begin(), E = Vector.end();
+ I != E; ++I)
+ assert(!I->first || (Map.count(I->first) &&
+ Map[I->first] == size_t(I - Vector.begin())));
+ }
+#endif
+
+ ValueT &operator[](const KeyT &Arg) {
+ std::pair<typename MapTy::iterator, bool> Pair =
+ Map.insert(std::make_pair(Arg, size_t(0)));
+ if (Pair.second) {
+ size_t Num = Vector.size();
+ Pair.first->second = Num;
+ Vector.push_back(std::make_pair(Arg, ValueT()));
+ return Vector[Num].second;
+ }
+ return Vector[Pair.first->second].second;
+ }
+
+ std::pair<iterator, bool> insert(const std::pair<KeyT, ValueT> &InsertPair) {
+ std::pair<typename MapTy::iterator, bool> Pair =
+ Map.insert(std::make_pair(InsertPair.first, size_t(0)));
+ if (Pair.second) {
+ size_t Num = Vector.size();
+ Pair.first->second = Num;
+ Vector.push_back(InsertPair);
+ return std::make_pair(Vector.begin() + Num, true);
+ }
+ return std::make_pair(Vector.begin() + Pair.first->second, false);
+ }
+
+ iterator find(const KeyT &Key) {
+ typename MapTy::iterator It = Map.find(Key);
+ if (It == Map.end())
+ return Vector.end();
+ return Vector.begin() + It->second;
+ }
+
+ const_iterator find(const KeyT &Key) const {
+ typename MapTy::const_iterator It = Map.find(Key);
+ if (It == Map.end())
+ return Vector.end();
+ return Vector.begin() + It->second;
+ }
+
+ /// This is similar to erase, but instead of removing the element from the
+ /// vector, it just zeros out the key in the vector. This leaves iterators
+ /// intact, but clients must be prepared for zeroed-out keys when iterating.
+ void blot(const KeyT &Key) {
+ typename MapTy::iterator It = Map.find(Key);
+ if (It == Map.end())
+ return;
+ Vector[It->second].first = KeyT();
+ Map.erase(It);
+ }
+
+ void clear() {
+ Map.clear();
+ Vector.clear();
+ }
+
+ bool empty() const {
+ assert(Map.empty() == Vector.empty());
+ return Map.empty();
+ }
+};
+} //
diff --git a/contrib/llvm/lib/Transforms/ObjCARC/DependencyAnalysis.cpp b/contrib/llvm/lib/Transforms/ObjCARC/DependencyAnalysis.cpp
new file mode 100644
index 0000000..9d78e5a
--- /dev/null
+++ b/contrib/llvm/lib/Transforms/ObjCARC/DependencyAnalysis.cpp
@@ -0,0 +1,278 @@
+//===- DependencyAnalysis.cpp - ObjC ARC Optimization ---------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+///
+/// This file defines special dependency analysis routines used in Objective C
+/// ARC Optimizations.
+///
+/// WARNING: This file knows about certain library functions. It recognizes them
+/// by name, and hardwires knowledge of their semantics.
+///
+/// WARNING: This file knows about how certain Objective-C library functions are
+/// used. Naive LLVM IR transformations which would otherwise be
+/// behavior-preserving may break these assumptions.
+///
+//===----------------------------------------------------------------------===//
+
+#include "ObjCARC.h"
+#include "DependencyAnalysis.h"
+#include "ProvenanceAnalysis.h"
+#include "llvm/IR/CFG.h"
+
+using namespace llvm;
+using namespace llvm::objcarc;
+
+#define DEBUG_TYPE "objc-arc-dependency"
+
+/// Test whether the given instruction can result in a reference count
+/// modification (positive or negative) for the pointer's object.
+bool llvm::objcarc::CanAlterRefCount(const Instruction *Inst, const Value *Ptr,
+ ProvenanceAnalysis &PA,
+ ARCInstKind Class) {
+ switch (Class) {
+ case ARCInstKind::Autorelease:
+ case ARCInstKind::AutoreleaseRV:
+ case ARCInstKind::IntrinsicUser:
+ case ARCInstKind::User:
+ // These operations never directly modify a reference count.
+ return false;
+ default: break;
+ }
+
+ ImmutableCallSite CS(Inst);
+ assert(CS && "Only calls can alter reference counts!");
+
+ // See if AliasAnalysis can help us with the call.
+ FunctionModRefBehavior MRB = PA.getAA()->getModRefBehavior(CS);
+ if (AliasAnalysis::onlyReadsMemory(MRB))
+ return false;
+ if (AliasAnalysis::onlyAccessesArgPointees(MRB)) {
+ const DataLayout &DL = Inst->getModule()->getDataLayout();
+ for (ImmutableCallSite::arg_iterator I = CS.arg_begin(), E = CS.arg_end();
+ I != E; ++I) {
+ const Value *Op = *I;
+ if (IsPotentialRetainableObjPtr(Op, *PA.getAA()) &&
+ PA.related(Ptr, Op, DL))
+ return true;
+ }
+ return false;
+ }
+
+ // Assume the worst.
+ return true;
+}
+
+bool llvm::objcarc::CanDecrementRefCount(const Instruction *Inst,
+ const Value *Ptr,
+ ProvenanceAnalysis &PA,
+ ARCInstKind Class) {
+ // First perform a quick check if Class can not touch ref counts.
+ if (!CanDecrementRefCount(Class))
+ return false;
+
+ // Otherwise, just use CanAlterRefCount for now.
+ return CanAlterRefCount(Inst, Ptr, PA, Class);
+}
+
+/// Test whether the given instruction can "use" the given pointer's object in a
+/// way that requires the reference count to be positive.
+bool llvm::objcarc::CanUse(const Instruction *Inst, const Value *Ptr,
+ ProvenanceAnalysis &PA, ARCInstKind Class) {
+ // ARCInstKind::Call operations (as opposed to
+ // ARCInstKind::CallOrUser) never "use" objc pointers.
+ if (Class == ARCInstKind::Call)
+ return false;
+
+ const DataLayout &DL = Inst->getModule()->getDataLayout();
+
+ // Consider various instructions which may have pointer arguments which are
+ // not "uses".
+ if (const ICmpInst *ICI = dyn_cast<ICmpInst>(Inst)) {
+ // Comparing a pointer with null, or any other constant, isn't really a use,
+ // because we don't care what the pointer points to, or about the values
+ // of any other dynamic reference-counted pointers.
+ if (!IsPotentialRetainableObjPtr(ICI->getOperand(1), *PA.getAA()))
+ return false;
+ } else if (auto CS = ImmutableCallSite(Inst)) {
+ // For calls, just check the arguments (and not the callee operand).
+ for (ImmutableCallSite::arg_iterator OI = CS.arg_begin(),
+ OE = CS.arg_end(); OI != OE; ++OI) {
+ const Value *Op = *OI;
+ if (IsPotentialRetainableObjPtr(Op, *PA.getAA()) &&
+ PA.related(Ptr, Op, DL))
+ return true;
+ }
+ return false;
+ } else if (const StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
+ // Special-case stores, because we don't care about the stored value, just
+ // the store address.
+ const Value *Op = GetUnderlyingObjCPtr(SI->getPointerOperand(), DL);
+ // If we can't tell what the underlying object was, assume there is a
+ // dependence.
+ return IsPotentialRetainableObjPtr(Op, *PA.getAA()) &&
+ PA.related(Op, Ptr, DL);
+ }
+
+ // Check each operand for a match.
+ for (User::const_op_iterator OI = Inst->op_begin(), OE = Inst->op_end();
+ OI != OE; ++OI) {
+ const Value *Op = *OI;
+ if (IsPotentialRetainableObjPtr(Op, *PA.getAA()) && PA.related(Ptr, Op, DL))
+ return true;
+ }
+ return false;
+}
+
+/// Test if there can be dependencies on Inst through Arg. This function only
+/// tests dependencies relevant for removing pairs of calls.
+bool
+llvm::objcarc::Depends(DependenceKind Flavor, Instruction *Inst,
+ const Value *Arg, ProvenanceAnalysis &PA) {
+ // If we've reached the definition of Arg, stop.
+ if (Inst == Arg)
+ return true;
+
+ switch (Flavor) {
+ case NeedsPositiveRetainCount: {
+ ARCInstKind Class = GetARCInstKind(Inst);
+ switch (Class) {
+ case ARCInstKind::AutoreleasepoolPop:
+ case ARCInstKind::AutoreleasepoolPush:
+ case ARCInstKind::None:
+ return false;
+ default:
+ return CanUse(Inst, Arg, PA, Class);
+ }
+ }
+
+ case AutoreleasePoolBoundary: {
+ ARCInstKind Class = GetARCInstKind(Inst);
+ switch (Class) {
+ case ARCInstKind::AutoreleasepoolPop:
+ case ARCInstKind::AutoreleasepoolPush:
+ // These mark the end and begin of an autorelease pool scope.
+ return true;
+ default:
+ // Nothing else does this.
+ return false;
+ }
+ }
+
+ case CanChangeRetainCount: {
+ ARCInstKind Class = GetARCInstKind(Inst);
+ switch (Class) {
+ case ARCInstKind::AutoreleasepoolPop:
+ // Conservatively assume this can decrement any count.
+ return true;
+ case ARCInstKind::AutoreleasepoolPush:
+ case ARCInstKind::None:
+ return false;
+ default:
+ return CanAlterRefCount(Inst, Arg, PA, Class);
+ }
+ }
+
+ case RetainAutoreleaseDep:
+ switch (GetBasicARCInstKind(Inst)) {
+ case ARCInstKind::AutoreleasepoolPop:
+ case ARCInstKind::AutoreleasepoolPush:
+ // Don't merge an objc_autorelease with an objc_retain inside a different
+ // autoreleasepool scope.
+ return true;
+ case ARCInstKind::Retain:
+ case ARCInstKind::RetainRV:
+ // Check for a retain of the same pointer for merging.
+ return GetArgRCIdentityRoot(Inst) == Arg;
+ default:
+ // Nothing else matters for objc_retainAutorelease formation.
+ return false;
+ }
+
+ case RetainAutoreleaseRVDep: {
+ ARCInstKind Class = GetBasicARCInstKind(Inst);
+ switch (Class) {
+ case ARCInstKind::Retain:
+ case ARCInstKind::RetainRV:
+ // Check for a retain of the same pointer for merging.
+ return GetArgRCIdentityRoot(Inst) == Arg;
+ default:
+ // Anything that can autorelease interrupts
+ // retainAutoreleaseReturnValue formation.
+ return CanInterruptRV(Class);
+ }
+ }
+
+ case RetainRVDep:
+ return CanInterruptRV(GetBasicARCInstKind(Inst));
+ }
+
+ llvm_unreachable("Invalid dependence flavor");
+}
+
+/// Walk up the CFG from StartPos (which is in StartBB) and find local and
+/// non-local dependencies on Arg.
+///
+/// TODO: Cache results?
+void
+llvm::objcarc::FindDependencies(DependenceKind Flavor,
+ const Value *Arg,
+ BasicBlock *StartBB, Instruction *StartInst,
+ SmallPtrSetImpl<Instruction *> &DependingInsts,
+ SmallPtrSetImpl<const BasicBlock *> &Visited,
+ ProvenanceAnalysis &PA) {
+ BasicBlock::iterator StartPos = StartInst->getIterator();
+
+ SmallVector<std::pair<BasicBlock *, BasicBlock::iterator>, 4> Worklist;
+ Worklist.push_back(std::make_pair(StartBB, StartPos));
+ do {
+ std::pair<BasicBlock *, BasicBlock::iterator> Pair =
+ Worklist.pop_back_val();
+ BasicBlock *LocalStartBB = Pair.first;
+ BasicBlock::iterator LocalStartPos = Pair.second;
+ BasicBlock::iterator StartBBBegin = LocalStartBB->begin();
+ for (;;) {
+ if (LocalStartPos == StartBBBegin) {
+ pred_iterator PI(LocalStartBB), PE(LocalStartBB, false);
+ if (PI == PE)
+ // If we've reached the function entry, produce a null dependence.
+ DependingInsts.insert(nullptr);
+ else
+ // Add the predecessors to the worklist.
+ do {
+ BasicBlock *PredBB = *PI;
+ if (Visited.insert(PredBB).second)
+ Worklist.push_back(std::make_pair(PredBB, PredBB->end()));
+ } while (++PI != PE);
+ break;
+ }
+
+ Instruction *Inst = &*--LocalStartPos;
+ if (Depends(Flavor, Inst, Arg, PA)) {
+ DependingInsts.insert(Inst);
+ break;
+ }
+ }
+ } while (!Worklist.empty());
+
+ // Determine whether the original StartBB post-dominates all of the blocks we
+ // visited. If not, insert a sentinal indicating that most optimizations are
+ // not safe.
+ for (const BasicBlock *BB : Visited) {
+ if (BB == StartBB)
+ continue;
+ const TerminatorInst *TI = cast<TerminatorInst>(&BB->back());
+ for (succ_const_iterator SI(TI), SE(TI, false); SI != SE; ++SI) {
+ const BasicBlock *Succ = *SI;
+ if (Succ != StartBB && !Visited.count(Succ)) {
+ DependingInsts.insert(reinterpret_cast<Instruction *>(-1));
+ return;
+ }
+ }
+ }
+}
diff --git a/contrib/llvm/lib/Transforms/ObjCARC/DependencyAnalysis.h b/contrib/llvm/lib/Transforms/ObjCARC/DependencyAnalysis.h
new file mode 100644
index 0000000..8e042d4
--- /dev/null
+++ b/contrib/llvm/lib/Transforms/ObjCARC/DependencyAnalysis.h
@@ -0,0 +1,88 @@
+//===- DependencyAnalysis.h - ObjC ARC Optimization ---*- C++ -*-----------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+///
+/// This file declares special dependency analysis routines used in Objective C
+/// ARC Optimizations.
+///
+/// WARNING: This file knows about certain library functions. It recognizes them
+/// by name, and hardwires knowledge of their semantics.
+///
+/// WARNING: This file knows about how certain Objective-C library functions are
+/// used. Naive LLVM IR transformations which would otherwise be
+/// behavior-preserving may break these assumptions.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TRANSFORMS_OBJCARC_DEPENDENCYANALYSIS_H
+#define LLVM_LIB_TRANSFORMS_OBJCARC_DEPENDENCYANALYSIS_H
+
+#include "llvm/ADT/SmallPtrSet.h"
+
+namespace llvm {
+ class BasicBlock;
+ class Instruction;
+ class Value;
+}
+
+namespace llvm {
+namespace objcarc {
+
+class ProvenanceAnalysis;
+
+/// \enum DependenceKind
+/// \brief Defines different dependence kinds among various ARC constructs.
+///
+/// There are several kinds of dependence-like concepts in use here.
+///
+enum DependenceKind {
+ NeedsPositiveRetainCount,
+ AutoreleasePoolBoundary,
+ CanChangeRetainCount,
+ RetainAutoreleaseDep, ///< Blocks objc_retainAutorelease.
+ RetainAutoreleaseRVDep, ///< Blocks objc_retainAutoreleaseReturnValue.
+ RetainRVDep ///< Blocks objc_retainAutoreleasedReturnValue.
+};
+
+void FindDependencies(DependenceKind Flavor,
+ const Value *Arg,
+ BasicBlock *StartBB, Instruction *StartInst,
+ SmallPtrSetImpl<Instruction *> &DependingInstructions,
+ SmallPtrSetImpl<const BasicBlock *> &Visited,
+ ProvenanceAnalysis &PA);
+
+bool
+Depends(DependenceKind Flavor, Instruction *Inst, const Value *Arg,
+ ProvenanceAnalysis &PA);
+
+/// Test whether the given instruction can "use" the given pointer's object in a
+/// way that requires the reference count to be positive.
+bool CanUse(const Instruction *Inst, const Value *Ptr, ProvenanceAnalysis &PA,
+ ARCInstKind Class);
+
+/// Test whether the given instruction can result in a reference count
+/// modification (positive or negative) for the pointer's object.
+bool CanAlterRefCount(const Instruction *Inst, const Value *Ptr,
+ ProvenanceAnalysis &PA, ARCInstKind Class);
+
+/// Returns true if we can not conservatively prove that Inst can not decrement
+/// the reference count of Ptr. Returns false if we can.
+bool CanDecrementRefCount(const Instruction *Inst, const Value *Ptr,
+ ProvenanceAnalysis &PA, ARCInstKind Class);
+
+static inline bool CanDecrementRefCount(const Instruction *Inst,
+ const Value *Ptr,
+ ProvenanceAnalysis &PA) {
+ return CanDecrementRefCount(Inst, Ptr, PA, GetARCInstKind(Inst));
+}
+
+} // namespace objcarc
+} // namespace llvm
+
+#endif
diff --git a/contrib/llvm/lib/Transforms/ObjCARC/ObjCARC.cpp b/contrib/llvm/lib/Transforms/ObjCARC/ObjCARC.cpp
new file mode 100644
index 0000000..d860723
--- /dev/null
+++ b/contrib/llvm/lib/Transforms/ObjCARC/ObjCARC.cpp
@@ -0,0 +1,42 @@
+//===-- ObjCARC.cpp -------------------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements common infrastructure for libLLVMObjCARCOpts.a, which
+// implements several scalar transformations over the LLVM intermediate
+// representation, including the C bindings for that library.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ObjCARC.h"
+#include "llvm-c/Core.h"
+#include "llvm-c/Initialization.h"
+#include "llvm/InitializePasses.h"
+#include "llvm/Support/CommandLine.h"
+
+namespace llvm {
+ class PassRegistry;
+}
+
+using namespace llvm;
+using namespace llvm::objcarc;
+
+/// initializeObjCARCOptsPasses - Initialize all passes linked into the
+/// ObjCARCOpts library.
+void llvm::initializeObjCARCOpts(PassRegistry &Registry) {
+ initializeObjCARCAAWrapperPassPass(Registry);
+ initializeObjCARCAPElimPass(Registry);
+ initializeObjCARCExpandPass(Registry);
+ initializeObjCARCContractPass(Registry);
+ initializeObjCARCOptPass(Registry);
+ initializePAEvalPass(Registry);
+}
+
+void LLVMInitializeObjCARCOpts(LLVMPassRegistryRef R) {
+ initializeObjCARCOpts(*unwrap(R));
+}
diff --git a/contrib/llvm/lib/Transforms/ObjCARC/ObjCARC.h b/contrib/llvm/lib/Transforms/ObjCARC/ObjCARC.h
new file mode 100644
index 0000000..5fd45b0
--- /dev/null
+++ b/contrib/llvm/lib/Transforms/ObjCARC/ObjCARC.h
@@ -0,0 +1,76 @@
+//===- ObjCARC.h - ObjC ARC Optimization --------------*- C++ -*-----------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+/// This file defines common definitions/declarations used by the ObjC ARC
+/// Optimizer. ARC stands for Automatic Reference Counting and is a system for
+/// managing reference counts for objects in Objective C.
+///
+/// WARNING: This file knows about certain library functions. It recognizes them
+/// by name, and hardwires knowledge of their semantics.
+///
+/// WARNING: This file knows about how certain Objective-C library functions are
+/// used. Naive LLVM IR transformations which would otherwise be
+/// behavior-preserving may break these assumptions.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TRANSFORMS_OBJCARC_OBJCARC_H
+#define LLVM_LIB_TRANSFORMS_OBJCARC_OBJCARC_H
+
+#include "llvm/ADT/StringSwitch.h"
+#include "llvm/ADT/Optional.h"
+#include "llvm/Analysis/AliasAnalysis.h"
+#include "llvm/Analysis/ObjCARCAnalysisUtils.h"
+#include "llvm/Analysis/ObjCARCInstKind.h"
+#include "llvm/Analysis/Passes.h"
+#include "llvm/Analysis/ValueTracking.h"
+#include "llvm/IR/CallSite.h"
+#include "llvm/IR/InstIterator.h"
+#include "llvm/IR/Module.h"
+#include "llvm/Pass.h"
+#include "llvm/Transforms/ObjCARC.h"
+#include "llvm/Transforms/Utils/Local.h"
+
+namespace llvm {
+class raw_ostream;
+}
+
+namespace llvm {
+namespace objcarc {
+
+/// \brief Erase the given instruction.
+///
+/// Many ObjC calls return their argument verbatim,
+/// so if it's such a call and the return value has users, replace them with the
+/// argument value.
+///
+static inline void EraseInstruction(Instruction *CI) {
+ Value *OldArg = cast<CallInst>(CI)->getArgOperand(0);
+
+ bool Unused = CI->use_empty();
+
+ if (!Unused) {
+ // Replace the return value with the argument.
+ assert((IsForwarding(GetBasicARCInstKind(CI)) ||
+ (IsNoopOnNull(GetBasicARCInstKind(CI)) &&
+ isa<ConstantPointerNull>(OldArg))) &&
+ "Can't delete non-forwarding instruction with users!");
+ CI->replaceAllUsesWith(OldArg);
+ }
+
+ CI->eraseFromParent();
+
+ if (Unused)
+ RecursivelyDeleteTriviallyDeadInstructions(OldArg);
+}
+
+} // end namespace objcarc
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/lib/Transforms/ObjCARC/ObjCARCAPElim.cpp b/contrib/llvm/lib/Transforms/ObjCARC/ObjCARCAPElim.cpp
new file mode 100644
index 0000000..969e77c
--- /dev/null
+++ b/contrib/llvm/lib/Transforms/ObjCARC/ObjCARCAPElim.cpp
@@ -0,0 +1,173 @@
+//===- ObjCARCAPElim.cpp - ObjC ARC Optimization --------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+///
+/// This file defines ObjC ARC optimizations. ARC stands for Automatic
+/// Reference Counting and is a system for managing reference counts for objects
+/// in Objective C.
+///
+/// This specific file implements optimizations which remove extraneous
+/// autorelease pools.
+///
+/// WARNING: This file knows about certain library functions. It recognizes them
+/// by name, and hardwires knowledge of their semantics.
+///
+/// WARNING: This file knows about how certain Objective-C library functions are
+/// used. Naive LLVM IR transformations which would otherwise be
+/// behavior-preserving may break these assumptions.
+///
+//===----------------------------------------------------------------------===//
+
+#include "ObjCARC.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace llvm;
+using namespace llvm::objcarc;
+
+#define DEBUG_TYPE "objc-arc-ap-elim"
+
+namespace {
+ /// \brief Autorelease pool elimination.
+ class ObjCARCAPElim : public ModulePass {
+ void getAnalysisUsage(AnalysisUsage &AU) const override;
+ bool runOnModule(Module &M) override;
+
+ static bool MayAutorelease(ImmutableCallSite CS, unsigned Depth = 0);
+ static bool OptimizeBB(BasicBlock *BB);
+
+ public:
+ static char ID;
+ ObjCARCAPElim() : ModulePass(ID) {
+ initializeObjCARCAPElimPass(*PassRegistry::getPassRegistry());
+ }
+ };
+}
+
+char ObjCARCAPElim::ID = 0;
+INITIALIZE_PASS(ObjCARCAPElim,
+ "objc-arc-apelim",
+ "ObjC ARC autorelease pool elimination",
+ false, false)
+
+Pass *llvm::createObjCARCAPElimPass() {
+ return new ObjCARCAPElim();
+}
+
+void ObjCARCAPElim::getAnalysisUsage(AnalysisUsage &AU) const {
+ AU.setPreservesCFG();
+}
+
+/// Interprocedurally determine if calls made by the given call site can
+/// possibly produce autoreleases.
+bool ObjCARCAPElim::MayAutorelease(ImmutableCallSite CS, unsigned Depth) {
+ if (const Function *Callee = CS.getCalledFunction()) {
+ if (Callee->isDeclaration() || Callee->mayBeOverridden())
+ return true;
+ for (const BasicBlock &BB : *Callee) {
+ for (const Instruction &I : BB)
+ if (ImmutableCallSite JCS = ImmutableCallSite(&I))
+ // This recursion depth limit is arbitrary. It's just great
+ // enough to cover known interesting testcases.
+ if (Depth < 3 &&
+ !JCS.onlyReadsMemory() &&
+ MayAutorelease(JCS, Depth + 1))
+ return true;
+ }
+ return false;
+ }
+
+ return true;
+}
+
+bool ObjCARCAPElim::OptimizeBB(BasicBlock *BB) {
+ bool Changed = false;
+
+ Instruction *Push = nullptr;
+ for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ) {
+ Instruction *Inst = &*I++;
+ switch (GetBasicARCInstKind(Inst)) {
+ case ARCInstKind::AutoreleasepoolPush:
+ Push = Inst;
+ break;
+ case ARCInstKind::AutoreleasepoolPop:
+ // If this pop matches a push and nothing in between can autorelease,
+ // zap the pair.
+ if (Push && cast<CallInst>(Inst)->getArgOperand(0) == Push) {
+ Changed = true;
+ DEBUG(dbgs() << "ObjCARCAPElim::OptimizeBB: Zapping push pop "
+ "autorelease pair:\n"
+ " Pop: " << *Inst << "\n"
+ << " Push: " << *Push << "\n");
+ Inst->eraseFromParent();
+ Push->eraseFromParent();
+ }
+ Push = nullptr;
+ break;
+ case ARCInstKind::CallOrUser:
+ if (MayAutorelease(ImmutableCallSite(Inst)))
+ Push = nullptr;
+ break;
+ default:
+ break;
+ }
+ }
+
+ return Changed;
+}
+
+bool ObjCARCAPElim::runOnModule(Module &M) {
+ if (!EnableARCOpts)
+ return false;
+
+ // If nothing in the Module uses ARC, don't do anything.
+ if (!ModuleHasARC(M))
+ return false;
+
+ // Find the llvm.global_ctors variable, as the first step in
+ // identifying the global constructors. In theory, unnecessary autorelease
+ // pools could occur anywhere, but in practice it's pretty rare. Global
+ // ctors are a place where autorelease pools get inserted automatically,
+ // so it's pretty common for them to be unnecessary, and it's pretty
+ // profitable to eliminate them.
+ GlobalVariable *GV = M.getGlobalVariable("llvm.global_ctors");
+ if (!GV)
+ return false;
+
+ assert(GV->hasDefinitiveInitializer() &&
+ "llvm.global_ctors is uncooperative!");
+
+ bool Changed = false;
+
+ // Dig the constructor functions out of GV's initializer.
+ ConstantArray *Init = cast<ConstantArray>(GV->getInitializer());
+ for (User::op_iterator OI = Init->op_begin(), OE = Init->op_end();
+ OI != OE; ++OI) {
+ Value *Op = *OI;
+ // llvm.global_ctors is an array of three-field structs where the second
+ // members are constructor functions.
+ Function *F = dyn_cast<Function>(cast<ConstantStruct>(Op)->getOperand(1));
+ // If the user used a constructor function with the wrong signature and
+ // it got bitcasted or whatever, look the other way.
+ if (!F)
+ continue;
+ // Only look at function definitions.
+ if (F->isDeclaration())
+ continue;
+ // Only look at functions with one basic block.
+ if (std::next(F->begin()) != F->end())
+ continue;
+ // Ok, a single-block constructor function definition. Try to optimize it.
+ Changed |= OptimizeBB(&F->front());
+ }
+
+ return Changed;
+}
diff --git a/contrib/llvm/lib/Transforms/ObjCARC/ObjCARCContract.cpp b/contrib/llvm/lib/Transforms/ObjCARC/ObjCARCContract.cpp
new file mode 100644
index 0000000..1cdf568
--- /dev/null
+++ b/contrib/llvm/lib/Transforms/ObjCARC/ObjCARCContract.cpp
@@ -0,0 +1,665 @@
+//===- ObjCARCContract.cpp - ObjC ARC Optimization ------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+/// This file defines late ObjC ARC optimizations. ARC stands for Automatic
+/// Reference Counting and is a system for managing reference counts for objects
+/// in Objective C.
+///
+/// This specific file mainly deals with ``contracting'' multiple lower level
+/// operations into singular higher level operations through pattern matching.
+///
+/// WARNING: This file knows about certain library functions. It recognizes them
+/// by name, and hardwires knowledge of their semantics.
+///
+/// WARNING: This file knows about how certain Objective-C library functions are
+/// used. Naive LLVM IR transformations which would otherwise be
+/// behavior-preserving may break these assumptions.
+///
+//===----------------------------------------------------------------------===//
+
+// TODO: ObjCARCContract could insert PHI nodes when uses aren't
+// dominated by single calls.
+
+#include "ObjCARC.h"
+#include "ARCRuntimeEntryPoints.h"
+#include "DependencyAnalysis.h"
+#include "ProvenanceAnalysis.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/IR/Dominators.h"
+#include "llvm/IR/InlineAsm.h"
+#include "llvm/IR/Operator.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace llvm;
+using namespace llvm::objcarc;
+
+#define DEBUG_TYPE "objc-arc-contract"
+
+STATISTIC(NumPeeps, "Number of calls peephole-optimized");
+STATISTIC(NumStoreStrongs, "Number objc_storeStrong calls formed");
+
+//===----------------------------------------------------------------------===//
+// Declarations
+//===----------------------------------------------------------------------===//
+
+namespace {
+ /// \brief Late ARC optimizations
+ ///
+ /// These change the IR in a way that makes it difficult to be analyzed by
+ /// ObjCARCOpt, so it's run late.
+ class ObjCARCContract : public FunctionPass {
+ bool Changed;
+ AliasAnalysis *AA;
+ DominatorTree *DT;
+ ProvenanceAnalysis PA;
+ ARCRuntimeEntryPoints EP;
+
+ /// A flag indicating whether this optimization pass should run.
+ bool Run;
+
+ /// The inline asm string to insert between calls and RetainRV calls to make
+ /// the optimization work on targets which need it.
+ const MDString *RetainRVMarker;
+
+ /// The set of inserted objc_storeStrong calls. If at the end of walking the
+ /// function we have found no alloca instructions, these calls can be marked
+ /// "tail".
+ SmallPtrSet<CallInst *, 8> StoreStrongCalls;
+
+ /// Returns true if we eliminated Inst.
+ bool tryToPeepholeInstruction(Function &F, Instruction *Inst,
+ inst_iterator &Iter,
+ SmallPtrSetImpl<Instruction *> &DepInsts,
+ SmallPtrSetImpl<const BasicBlock *> &Visited,
+ bool &TailOkForStoreStrong);
+
+ bool optimizeRetainCall(Function &F, Instruction *Retain);
+
+ bool
+ contractAutorelease(Function &F, Instruction *Autorelease,
+ ARCInstKind Class,
+ SmallPtrSetImpl<Instruction *> &DependingInstructions,
+ SmallPtrSetImpl<const BasicBlock *> &Visited);
+
+ void tryToContractReleaseIntoStoreStrong(Instruction *Release,
+ inst_iterator &Iter);
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override;
+ bool doInitialization(Module &M) override;
+ bool runOnFunction(Function &F) override;
+
+ public:
+ static char ID;
+ ObjCARCContract() : FunctionPass(ID) {
+ initializeObjCARCContractPass(*PassRegistry::getPassRegistry());
+ }
+ };
+}
+
+//===----------------------------------------------------------------------===//
+// Implementation
+//===----------------------------------------------------------------------===//
+
+/// Turn objc_retain into objc_retainAutoreleasedReturnValue if the operand is a
+/// return value. We do this late so we do not disrupt the dataflow analysis in
+/// ObjCARCOpt.
+bool ObjCARCContract::optimizeRetainCall(Function &F, Instruction *Retain) {
+ ImmutableCallSite CS(GetArgRCIdentityRoot(Retain));
+ const Instruction *Call = CS.getInstruction();
+ if (!Call)
+ return false;
+ if (Call->getParent() != Retain->getParent())
+ return false;
+
+ // Check that the call is next to the retain.
+ BasicBlock::const_iterator I = ++Call->getIterator();
+ while (IsNoopInstruction(&*I))
+ ++I;
+ if (&*I != Retain)
+ return false;
+
+ // Turn it to an objc_retainAutoreleasedReturnValue.
+ Changed = true;
+ ++NumPeeps;
+
+ DEBUG(dbgs() << "Transforming objc_retain => "
+ "objc_retainAutoreleasedReturnValue since the operand is a "
+ "return value.\nOld: "<< *Retain << "\n");
+
+ // We do not have to worry about tail calls/does not throw since
+ // retain/retainRV have the same properties.
+ Constant *Decl = EP.get(ARCRuntimeEntryPointKind::RetainRV);
+ cast<CallInst>(Retain)->setCalledFunction(Decl);
+
+ DEBUG(dbgs() << "New: " << *Retain << "\n");
+ return true;
+}
+
+/// Merge an autorelease with a retain into a fused call.
+bool ObjCARCContract::contractAutorelease(
+ Function &F, Instruction *Autorelease, ARCInstKind Class,
+ SmallPtrSetImpl<Instruction *> &DependingInstructions,
+ SmallPtrSetImpl<const BasicBlock *> &Visited) {
+ const Value *Arg = GetArgRCIdentityRoot(Autorelease);
+
+ // Check that there are no instructions between the retain and the autorelease
+ // (such as an autorelease_pop) which may change the count.
+ CallInst *Retain = nullptr;
+ if (Class == ARCInstKind::AutoreleaseRV)
+ FindDependencies(RetainAutoreleaseRVDep, Arg,
+ Autorelease->getParent(), Autorelease,
+ DependingInstructions, Visited, PA);
+ else
+ FindDependencies(RetainAutoreleaseDep, Arg,
+ Autorelease->getParent(), Autorelease,
+ DependingInstructions, Visited, PA);
+
+ Visited.clear();
+ if (DependingInstructions.size() != 1) {
+ DependingInstructions.clear();
+ return false;
+ }
+
+ Retain = dyn_cast_or_null<CallInst>(*DependingInstructions.begin());
+ DependingInstructions.clear();
+
+ if (!Retain || GetBasicARCInstKind(Retain) != ARCInstKind::Retain ||
+ GetArgRCIdentityRoot(Retain) != Arg)
+ return false;
+
+ Changed = true;
+ ++NumPeeps;
+
+ DEBUG(dbgs() << " Fusing retain/autorelease!\n"
+ " Autorelease:" << *Autorelease << "\n"
+ " Retain: " << *Retain << "\n");
+
+ Constant *Decl = EP.get(Class == ARCInstKind::AutoreleaseRV
+ ? ARCRuntimeEntryPointKind::RetainAutoreleaseRV
+ : ARCRuntimeEntryPointKind::RetainAutorelease);
+ Retain->setCalledFunction(Decl);
+
+ DEBUG(dbgs() << " New RetainAutorelease: " << *Retain << "\n");
+
+ EraseInstruction(Autorelease);
+ return true;
+}
+
+static StoreInst *findSafeStoreForStoreStrongContraction(LoadInst *Load,
+ Instruction *Release,
+ ProvenanceAnalysis &PA,
+ AliasAnalysis *AA) {
+ StoreInst *Store = nullptr;
+ bool SawRelease = false;
+
+ // Get the location associated with Load.
+ MemoryLocation Loc = MemoryLocation::get(Load);
+
+ // Walk down to find the store and the release, which may be in either order.
+ for (auto I = std::next(BasicBlock::iterator(Load)),
+ E = Load->getParent()->end();
+ I != E; ++I) {
+ // If we found the store we were looking for and saw the release,
+ // break. There is no more work to be done.
+ if (Store && SawRelease)
+ break;
+
+ // Now we know that we have not seen either the store or the release. If I
+ // is the release, mark that we saw the release and continue.
+ Instruction *Inst = &*I;
+ if (Inst == Release) {
+ SawRelease = true;
+ continue;
+ }
+
+ // Otherwise, we check if Inst is a "good" store. Grab the instruction class
+ // of Inst.
+ ARCInstKind Class = GetBasicARCInstKind(Inst);
+
+ // If Inst is an unrelated retain, we don't care about it.
+ //
+ // TODO: This is one area where the optimization could be made more
+ // aggressive.
+ if (IsRetain(Class))
+ continue;
+
+ // If we have seen the store, but not the release...
+ if (Store) {
+ // We need to make sure that it is safe to move the release from its
+ // current position to the store. This implies proving that any
+ // instruction in between Store and the Release conservatively can not use
+ // the RCIdentityRoot of Release. If we can prove we can ignore Inst, so
+ // continue...
+ if (!CanUse(Inst, Load, PA, Class)) {
+ continue;
+ }
+
+ // Otherwise, be conservative and return nullptr.
+ return nullptr;
+ }
+
+ // Ok, now we know we have not seen a store yet. See if Inst can write to
+ // our load location, if it can not, just ignore the instruction.
+ if (!(AA->getModRefInfo(Inst, Loc) & MRI_Mod))
+ continue;
+
+ Store = dyn_cast<StoreInst>(Inst);
+
+ // If Inst can, then check if Inst is a simple store. If Inst is not a
+ // store or a store that is not simple, then we have some we do not
+ // understand writing to this memory implying we can not move the load
+ // over the write to any subsequent store that we may find.
+ if (!Store || !Store->isSimple())
+ return nullptr;
+
+ // Then make sure that the pointer we are storing to is Ptr. If so, we
+ // found our Store!
+ if (Store->getPointerOperand() == Loc.Ptr)
+ continue;
+
+ // Otherwise, we have an unknown store to some other ptr that clobbers
+ // Loc.Ptr. Bail!
+ return nullptr;
+ }
+
+ // If we did not find the store or did not see the release, fail.
+ if (!Store || !SawRelease)
+ return nullptr;
+
+ // We succeeded!
+ return Store;
+}
+
+static Instruction *
+findRetainForStoreStrongContraction(Value *New, StoreInst *Store,
+ Instruction *Release,
+ ProvenanceAnalysis &PA) {
+ // Walk up from the Store to find the retain.
+ BasicBlock::iterator I = Store->getIterator();
+ BasicBlock::iterator Begin = Store->getParent()->begin();
+ while (I != Begin && GetBasicARCInstKind(&*I) != ARCInstKind::Retain) {
+ Instruction *Inst = &*I;
+
+ // It is only safe to move the retain to the store if we can prove
+ // conservatively that nothing besides the release can decrement reference
+ // counts in between the retain and the store.
+ if (CanDecrementRefCount(Inst, New, PA) && Inst != Release)
+ return nullptr;
+ --I;
+ }
+ Instruction *Retain = &*I;
+ if (GetBasicARCInstKind(Retain) != ARCInstKind::Retain)
+ return nullptr;
+ if (GetArgRCIdentityRoot(Retain) != New)
+ return nullptr;
+ return Retain;
+}
+
+/// Attempt to merge an objc_release with a store, load, and objc_retain to form
+/// an objc_storeStrong. An objc_storeStrong:
+///
+/// objc_storeStrong(i8** %old_ptr, i8* new_value)
+///
+/// is equivalent to the following IR sequence:
+///
+/// ; Load old value.
+/// %old_value = load i8** %old_ptr (1)
+///
+/// ; Increment the new value and then release the old value. This must occur
+/// ; in order in case old_value releases new_value in its destructor causing
+/// ; us to potentially have a dangling ptr.
+/// tail call i8* @objc_retain(i8* %new_value) (2)
+/// tail call void @objc_release(i8* %old_value) (3)
+///
+/// ; Store the new_value into old_ptr
+/// store i8* %new_value, i8** %old_ptr (4)
+///
+/// The safety of this optimization is based around the following
+/// considerations:
+///
+/// 1. We are forming the store strong at the store. Thus to perform this
+/// optimization it must be safe to move the retain, load, and release to
+/// (4).
+/// 2. We need to make sure that any re-orderings of (1), (2), (3), (4) are
+/// safe.
+void ObjCARCContract::tryToContractReleaseIntoStoreStrong(Instruction *Release,
+ inst_iterator &Iter) {
+ // See if we are releasing something that we just loaded.
+ auto *Load = dyn_cast<LoadInst>(GetArgRCIdentityRoot(Release));
+ if (!Load || !Load->isSimple())
+ return;
+
+ // For now, require everything to be in one basic block.
+ BasicBlock *BB = Release->getParent();
+ if (Load->getParent() != BB)
+ return;
+
+ // First scan down the BB from Load, looking for a store of the RCIdentityRoot
+ // of Load's
+ StoreInst *Store =
+ findSafeStoreForStoreStrongContraction(Load, Release, PA, AA);
+ // If we fail, bail.
+ if (!Store)
+ return;
+
+ // Then find what new_value's RCIdentity Root is.
+ Value *New = GetRCIdentityRoot(Store->getValueOperand());
+
+ // Then walk up the BB and look for a retain on New without any intervening
+ // instructions which conservatively might decrement ref counts.
+ Instruction *Retain =
+ findRetainForStoreStrongContraction(New, Store, Release, PA);
+
+ // If we fail, bail.
+ if (!Retain)
+ return;
+
+ Changed = true;
+ ++NumStoreStrongs;
+
+ DEBUG(
+ llvm::dbgs() << " Contracting retain, release into objc_storeStrong.\n"
+ << " Old:\n"
+ << " Store: " << *Store << "\n"
+ << " Release: " << *Release << "\n"
+ << " Retain: " << *Retain << "\n"
+ << " Load: " << *Load << "\n");
+
+ LLVMContext &C = Release->getContext();
+ Type *I8X = PointerType::getUnqual(Type::getInt8Ty(C));
+ Type *I8XX = PointerType::getUnqual(I8X);
+
+ Value *Args[] = { Load->getPointerOperand(), New };
+ if (Args[0]->getType() != I8XX)
+ Args[0] = new BitCastInst(Args[0], I8XX, "", Store);
+ if (Args[1]->getType() != I8X)
+ Args[1] = new BitCastInst(Args[1], I8X, "", Store);
+ Constant *Decl = EP.get(ARCRuntimeEntryPointKind::StoreStrong);
+ CallInst *StoreStrong = CallInst::Create(Decl, Args, "", Store);
+ StoreStrong->setDoesNotThrow();
+ StoreStrong->setDebugLoc(Store->getDebugLoc());
+
+ // We can't set the tail flag yet, because we haven't yet determined
+ // whether there are any escaping allocas. Remember this call, so that
+ // we can set the tail flag once we know it's safe.
+ StoreStrongCalls.insert(StoreStrong);
+
+ DEBUG(llvm::dbgs() << " New Store Strong: " << *StoreStrong << "\n");
+
+ if (&*Iter == Store) ++Iter;
+ Store->eraseFromParent();
+ Release->eraseFromParent();
+ EraseInstruction(Retain);
+ if (Load->use_empty())
+ Load->eraseFromParent();
+}
+
+bool ObjCARCContract::tryToPeepholeInstruction(
+ Function &F, Instruction *Inst, inst_iterator &Iter,
+ SmallPtrSetImpl<Instruction *> &DependingInsts,
+ SmallPtrSetImpl<const BasicBlock *> &Visited,
+ bool &TailOkForStoreStrongs) {
+ // Only these library routines return their argument. In particular,
+ // objc_retainBlock does not necessarily return its argument.
+ ARCInstKind Class = GetBasicARCInstKind(Inst);
+ switch (Class) {
+ case ARCInstKind::FusedRetainAutorelease:
+ case ARCInstKind::FusedRetainAutoreleaseRV:
+ return false;
+ case ARCInstKind::Autorelease:
+ case ARCInstKind::AutoreleaseRV:
+ return contractAutorelease(F, Inst, Class, DependingInsts, Visited);
+ case ARCInstKind::Retain:
+ // Attempt to convert retains to retainrvs if they are next to function
+ // calls.
+ if (!optimizeRetainCall(F, Inst))
+ return false;
+ // If we succeed in our optimization, fall through.
+ // FALLTHROUGH
+ case ARCInstKind::RetainRV: {
+ // If we're compiling for a target which needs a special inline-asm
+ // marker to do the retainAutoreleasedReturnValue optimization,
+ // insert it now.
+ if (!RetainRVMarker)
+ return false;
+ BasicBlock::iterator BBI = Inst->getIterator();
+ BasicBlock *InstParent = Inst->getParent();
+
+ // Step up to see if the call immediately precedes the RetainRV call.
+ // If it's an invoke, we have to cross a block boundary. And we have
+ // to carefully dodge no-op instructions.
+ do {
+ if (&*BBI == InstParent->begin()) {
+ BasicBlock *Pred = InstParent->getSinglePredecessor();
+ if (!Pred)
+ goto decline_rv_optimization;
+ BBI = Pred->getTerminator()->getIterator();
+ break;
+ }
+ --BBI;
+ } while (IsNoopInstruction(&*BBI));
+
+ if (&*BBI == GetArgRCIdentityRoot(Inst)) {
+ DEBUG(dbgs() << "Adding inline asm marker for "
+ "retainAutoreleasedReturnValue optimization.\n");
+ Changed = true;
+ InlineAsm *IA =
+ InlineAsm::get(FunctionType::get(Type::getVoidTy(Inst->getContext()),
+ /*isVarArg=*/false),
+ RetainRVMarker->getString(),
+ /*Constraints=*/"", /*hasSideEffects=*/true);
+ CallInst::Create(IA, "", Inst);
+ }
+ decline_rv_optimization:
+ return false;
+ }
+ case ARCInstKind::InitWeak: {
+ // objc_initWeak(p, null) => *p = null
+ CallInst *CI = cast<CallInst>(Inst);
+ if (IsNullOrUndef(CI->getArgOperand(1))) {
+ Value *Null =
+ ConstantPointerNull::get(cast<PointerType>(CI->getType()));
+ Changed = true;
+ new StoreInst(Null, CI->getArgOperand(0), CI);
+
+ DEBUG(dbgs() << "OBJCARCContract: Old = " << *CI << "\n"
+ << " New = " << *Null << "\n");
+
+ CI->replaceAllUsesWith(Null);
+ CI->eraseFromParent();
+ }
+ return true;
+ }
+ case ARCInstKind::Release:
+ // Try to form an objc store strong from our release. If we fail, there is
+ // nothing further to do below, so continue.
+ tryToContractReleaseIntoStoreStrong(Inst, Iter);
+ return true;
+ case ARCInstKind::User:
+ // Be conservative if the function has any alloca instructions.
+ // Technically we only care about escaping alloca instructions,
+ // but this is sufficient to handle some interesting cases.
+ if (isa<AllocaInst>(Inst))
+ TailOkForStoreStrongs = false;
+ return true;
+ case ARCInstKind::IntrinsicUser:
+ // Remove calls to @clang.arc.use(...).
+ Inst->eraseFromParent();
+ return true;
+ default:
+ return true;
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// Top Level Driver
+//===----------------------------------------------------------------------===//
+
+bool ObjCARCContract::runOnFunction(Function &F) {
+ if (!EnableARCOpts)
+ return false;
+
+ // If nothing in the Module uses ARC, don't do anything.
+ if (!Run)
+ return false;
+
+ Changed = false;
+ AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
+ DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
+
+ PA.setAA(&getAnalysis<AAResultsWrapperPass>().getAAResults());
+
+ DEBUG(llvm::dbgs() << "**** ObjCARC Contract ****\n");
+
+ // Track whether it's ok to mark objc_storeStrong calls with the "tail"
+ // keyword. Be conservative if the function has variadic arguments.
+ // It seems that functions which "return twice" are also unsafe for the
+ // "tail" argument, because they are setjmp, which could need to
+ // return to an earlier stack state.
+ bool TailOkForStoreStrongs =
+ !F.isVarArg() && !F.callsFunctionThatReturnsTwice();
+
+ // For ObjC library calls which return their argument, replace uses of the
+ // argument with uses of the call return value, if it dominates the use. This
+ // reduces register pressure.
+ SmallPtrSet<Instruction *, 4> DependingInstructions;
+ SmallPtrSet<const BasicBlock *, 4> Visited;
+ for (inst_iterator I = inst_begin(&F), E = inst_end(&F); I != E;) {
+ Instruction *Inst = &*I++;
+
+ DEBUG(dbgs() << "Visiting: " << *Inst << "\n");
+
+ // First try to peephole Inst. If there is nothing further we can do in
+ // terms of undoing objc-arc-expand, process the next inst.
+ if (tryToPeepholeInstruction(F, Inst, I, DependingInstructions, Visited,
+ TailOkForStoreStrongs))
+ continue;
+
+ // Otherwise, try to undo objc-arc-expand.
+
+ // Don't use GetArgRCIdentityRoot because we don't want to look through bitcasts
+ // and such; to do the replacement, the argument must have type i8*.
+ Value *Arg = cast<CallInst>(Inst)->getArgOperand(0);
+
+ // TODO: Change this to a do-while.
+ for (;;) {
+ // If we're compiling bugpointed code, don't get in trouble.
+ if (!isa<Instruction>(Arg) && !isa<Argument>(Arg))
+ break;
+ // Look through the uses of the pointer.
+ for (Value::use_iterator UI = Arg->use_begin(), UE = Arg->use_end();
+ UI != UE; ) {
+ // Increment UI now, because we may unlink its element.
+ Use &U = *UI++;
+ unsigned OperandNo = U.getOperandNo();
+
+ // If the call's return value dominates a use of the call's argument
+ // value, rewrite the use to use the return value. We check for
+ // reachability here because an unreachable call is considered to
+ // trivially dominate itself, which would lead us to rewriting its
+ // argument in terms of its return value, which would lead to
+ // infinite loops in GetArgRCIdentityRoot.
+ if (DT->isReachableFromEntry(U) && DT->dominates(Inst, U)) {
+ Changed = true;
+ Instruction *Replacement = Inst;
+ Type *UseTy = U.get()->getType();
+ if (PHINode *PHI = dyn_cast<PHINode>(U.getUser())) {
+ // For PHI nodes, insert the bitcast in the predecessor block.
+ unsigned ValNo = PHINode::getIncomingValueNumForOperand(OperandNo);
+ BasicBlock *BB = PHI->getIncomingBlock(ValNo);
+ if (Replacement->getType() != UseTy)
+ Replacement = new BitCastInst(Replacement, UseTy, "",
+ &BB->back());
+ // While we're here, rewrite all edges for this PHI, rather
+ // than just one use at a time, to minimize the number of
+ // bitcasts we emit.
+ for (unsigned i = 0, e = PHI->getNumIncomingValues(); i != e; ++i)
+ if (PHI->getIncomingBlock(i) == BB) {
+ // Keep the UI iterator valid.
+ if (UI != UE &&
+ &PHI->getOperandUse(
+ PHINode::getOperandNumForIncomingValue(i)) == &*UI)
+ ++UI;
+ PHI->setIncomingValue(i, Replacement);
+ }
+ } else {
+ if (Replacement->getType() != UseTy)
+ Replacement = new BitCastInst(Replacement, UseTy, "",
+ cast<Instruction>(U.getUser()));
+ U.set(Replacement);
+ }
+ }
+ }
+
+ // If Arg is a no-op casted pointer, strip one level of casts and iterate.
+ if (const BitCastInst *BI = dyn_cast<BitCastInst>(Arg))
+ Arg = BI->getOperand(0);
+ else if (isa<GEPOperator>(Arg) &&
+ cast<GEPOperator>(Arg)->hasAllZeroIndices())
+ Arg = cast<GEPOperator>(Arg)->getPointerOperand();
+ else if (isa<GlobalAlias>(Arg) &&
+ !cast<GlobalAlias>(Arg)->mayBeOverridden())
+ Arg = cast<GlobalAlias>(Arg)->getAliasee();
+ else
+ break;
+ }
+ }
+
+ // If this function has no escaping allocas or suspicious vararg usage,
+ // objc_storeStrong calls can be marked with the "tail" keyword.
+ if (TailOkForStoreStrongs)
+ for (CallInst *CI : StoreStrongCalls)
+ CI->setTailCall();
+ StoreStrongCalls.clear();
+
+ return Changed;
+}
+
+//===----------------------------------------------------------------------===//
+// Misc Pass Manager
+//===----------------------------------------------------------------------===//
+
+char ObjCARCContract::ID = 0;
+INITIALIZE_PASS_BEGIN(ObjCARCContract, "objc-arc-contract",
+ "ObjC ARC contraction", false, false)
+INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
+INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
+INITIALIZE_PASS_END(ObjCARCContract, "objc-arc-contract",
+ "ObjC ARC contraction", false, false)
+
+void ObjCARCContract::getAnalysisUsage(AnalysisUsage &AU) const {
+ AU.addRequired<AAResultsWrapperPass>();
+ AU.addRequired<DominatorTreeWrapperPass>();
+ AU.setPreservesCFG();
+}
+
+Pass *llvm::createObjCARCContractPass() { return new ObjCARCContract(); }
+
+bool ObjCARCContract::doInitialization(Module &M) {
+ // If nothing in the Module uses ARC, don't do anything.
+ Run = ModuleHasARC(M);
+ if (!Run)
+ return false;
+
+ EP.init(&M);
+
+ // Initialize RetainRVMarker.
+ RetainRVMarker = nullptr;
+ if (NamedMDNode *NMD =
+ M.getNamedMetadata("clang.arc.retainAutoreleasedReturnValueMarker"))
+ if (NMD->getNumOperands() == 1) {
+ const MDNode *N = NMD->getOperand(0);
+ if (N->getNumOperands() == 1)
+ if (const MDString *S = dyn_cast<MDString>(N->getOperand(0)))
+ RetainRVMarker = S;
+ }
+
+ return false;
+}
diff --git a/contrib/llvm/lib/Transforms/ObjCARC/ObjCARCExpand.cpp b/contrib/llvm/lib/Transforms/ObjCARC/ObjCARCExpand.cpp
new file mode 100644
index 0000000..53c19c3
--- /dev/null
+++ b/contrib/llvm/lib/Transforms/ObjCARC/ObjCARCExpand.cpp
@@ -0,0 +1,128 @@
+//===- ObjCARCExpand.cpp - ObjC ARC Optimization --------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+/// This file defines ObjC ARC optimizations. ARC stands for Automatic
+/// Reference Counting and is a system for managing reference counts for objects
+/// in Objective C.
+///
+/// This specific file deals with early optimizations which perform certain
+/// cleanup operations.
+///
+/// WARNING: This file knows about certain library functions. It recognizes them
+/// by name, and hardwires knowledge of their semantics.
+///
+/// WARNING: This file knows about how certain Objective-C library functions are
+/// used. Naive LLVM IR transformations which would otherwise be
+/// behavior-preserving may break these assumptions.
+///
+//===----------------------------------------------------------------------===//
+
+#include "ObjCARC.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/InstIterator.h"
+#include "llvm/IR/Instruction.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/Value.h"
+#include "llvm/Pass.h"
+#include "llvm/PassAnalysisSupport.h"
+#include "llvm/PassRegistry.h"
+#include "llvm/PassSupport.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+
+#define DEBUG_TYPE "objc-arc-expand"
+
+namespace llvm {
+ class Module;
+}
+
+using namespace llvm;
+using namespace llvm::objcarc;
+
+namespace {
+ /// \brief Early ARC transformations.
+ class ObjCARCExpand : public FunctionPass {
+ void getAnalysisUsage(AnalysisUsage &AU) const override;
+ bool doInitialization(Module &M) override;
+ bool runOnFunction(Function &F) override;
+
+ /// A flag indicating whether this optimization pass should run.
+ bool Run;
+
+ public:
+ static char ID;
+ ObjCARCExpand() : FunctionPass(ID) {
+ initializeObjCARCExpandPass(*PassRegistry::getPassRegistry());
+ }
+ };
+}
+
+char ObjCARCExpand::ID = 0;
+INITIALIZE_PASS(ObjCARCExpand,
+ "objc-arc-expand", "ObjC ARC expansion", false, false)
+
+Pass *llvm::createObjCARCExpandPass() {
+ return new ObjCARCExpand();
+}
+
+void ObjCARCExpand::getAnalysisUsage(AnalysisUsage &AU) const {
+ AU.setPreservesCFG();
+}
+
+bool ObjCARCExpand::doInitialization(Module &M) {
+ Run = ModuleHasARC(M);
+ return false;
+}
+
+bool ObjCARCExpand::runOnFunction(Function &F) {
+ if (!EnableARCOpts)
+ return false;
+
+ // If nothing in the Module uses ARC, don't do anything.
+ if (!Run)
+ return false;
+
+ bool Changed = false;
+
+ DEBUG(dbgs() << "ObjCARCExpand: Visiting Function: " << F.getName() << "\n");
+
+ for (inst_iterator I = inst_begin(&F), E = inst_end(&F); I != E; ++I) {
+ Instruction *Inst = &*I;
+
+ DEBUG(dbgs() << "ObjCARCExpand: Visiting: " << *Inst << "\n");
+
+ switch (GetBasicARCInstKind(Inst)) {
+ case ARCInstKind::Retain:
+ case ARCInstKind::RetainRV:
+ case ARCInstKind::Autorelease:
+ case ARCInstKind::AutoreleaseRV:
+ case ARCInstKind::FusedRetainAutorelease:
+ case ARCInstKind::FusedRetainAutoreleaseRV: {
+ // These calls return their argument verbatim, as a low-level
+ // optimization. However, this makes high-level optimizations
+ // harder. Undo any uses of this optimization that the front-end
+ // emitted here. We'll redo them in the contract pass.
+ Changed = true;
+ Value *Value = cast<CallInst>(Inst)->getArgOperand(0);
+ DEBUG(dbgs() << "ObjCARCExpand: Old = " << *Inst << "\n"
+ " New = " << *Value << "\n");
+ Inst->replaceAllUsesWith(Value);
+ break;
+ }
+ default:
+ break;
+ }
+ }
+
+ DEBUG(dbgs() << "ObjCARCExpand: Finished List.\n\n");
+
+ return Changed;
+}
diff --git a/contrib/llvm/lib/Transforms/ObjCARC/ObjCARCOpts.cpp b/contrib/llvm/lib/Transforms/ObjCARC/ObjCARCOpts.cpp
new file mode 100644
index 0000000..f0ee6e2
--- /dev/null
+++ b/contrib/llvm/lib/Transforms/ObjCARC/ObjCARCOpts.cpp
@@ -0,0 +1,2246 @@
+//===- ObjCARCOpts.cpp - ObjC ARC Optimization ----------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+/// This file defines ObjC ARC optimizations. ARC stands for Automatic
+/// Reference Counting and is a system for managing reference counts for objects
+/// in Objective C.
+///
+/// The optimizations performed include elimination of redundant, partially
+/// redundant, and inconsequential reference count operations, elimination of
+/// redundant weak pointer operations, and numerous minor simplifications.
+///
+/// WARNING: This file knows about certain library functions. It recognizes them
+/// by name, and hardwires knowledge of their semantics.
+///
+/// WARNING: This file knows about how certain Objective-C library functions are
+/// used. Naive LLVM IR transformations which would otherwise be
+/// behavior-preserving may break these assumptions.
+///
+//===----------------------------------------------------------------------===//
+
+#include "ObjCARC.h"
+#include "ARCRuntimeEntryPoints.h"
+#include "BlotMapVector.h"
+#include "DependencyAnalysis.h"
+#include "ProvenanceAnalysis.h"
+#include "PtrState.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/DenseSet.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/Analysis/ObjCARCAliasAnalysis.h"
+#include "llvm/IR/CFG.h"
+#include "llvm/IR/IRBuilder.h"
+#include "llvm/IR/LLVMContext.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace llvm;
+using namespace llvm::objcarc;
+
+#define DEBUG_TYPE "objc-arc-opts"
+
+/// \defgroup ARCUtilities Utility declarations/definitions specific to ARC.
+/// @{
+
+/// \brief This is similar to GetRCIdentityRoot but it stops as soon
+/// as it finds a value with multiple uses.
+static const Value *FindSingleUseIdentifiedObject(const Value *Arg) {
+ if (Arg->hasOneUse()) {
+ if (const BitCastInst *BC = dyn_cast<BitCastInst>(Arg))
+ return FindSingleUseIdentifiedObject(BC->getOperand(0));
+ if (const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Arg))
+ if (GEP->hasAllZeroIndices())
+ return FindSingleUseIdentifiedObject(GEP->getPointerOperand());
+ if (IsForwarding(GetBasicARCInstKind(Arg)))
+ return FindSingleUseIdentifiedObject(
+ cast<CallInst>(Arg)->getArgOperand(0));
+ if (!IsObjCIdentifiedObject(Arg))
+ return nullptr;
+ return Arg;
+ }
+
+ // If we found an identifiable object but it has multiple uses, but they are
+ // trivial uses, we can still consider this to be a single-use value.
+ if (IsObjCIdentifiedObject(Arg)) {
+ for (const User *U : Arg->users())
+ if (!U->use_empty() || GetRCIdentityRoot(U) != Arg)
+ return nullptr;
+
+ return Arg;
+ }
+
+ return nullptr;
+}
+
+/// This is a wrapper around getUnderlyingObjCPtr along the lines of
+/// GetUnderlyingObjects except that it returns early when it sees the first
+/// alloca.
+static inline bool AreAnyUnderlyingObjectsAnAlloca(const Value *V,
+ const DataLayout &DL) {
+ SmallPtrSet<const Value *, 4> Visited;
+ SmallVector<const Value *, 4> Worklist;
+ Worklist.push_back(V);
+ do {
+ const Value *P = Worklist.pop_back_val();
+ P = GetUnderlyingObjCPtr(P, DL);
+
+ if (isa<AllocaInst>(P))
+ return true;
+
+ if (!Visited.insert(P).second)
+ continue;
+
+ if (const SelectInst *SI = dyn_cast<const SelectInst>(P)) {
+ Worklist.push_back(SI->getTrueValue());
+ Worklist.push_back(SI->getFalseValue());
+ continue;
+ }
+
+ if (const PHINode *PN = dyn_cast<const PHINode>(P)) {
+ for (Value *IncValue : PN->incoming_values())
+ Worklist.push_back(IncValue);
+ continue;
+ }
+ } while (!Worklist.empty());
+
+ return false;
+}
+
+
+/// @}
+///
+/// \defgroup ARCOpt ARC Optimization.
+/// @{
+
+// TODO: On code like this:
+//
+// objc_retain(%x)
+// stuff_that_cannot_release()
+// objc_autorelease(%x)
+// stuff_that_cannot_release()
+// objc_retain(%x)
+// stuff_that_cannot_release()
+// objc_autorelease(%x)
+//
+// The second retain and autorelease can be deleted.
+
+// TODO: It should be possible to delete
+// objc_autoreleasePoolPush and objc_autoreleasePoolPop
+// pairs if nothing is actually autoreleased between them. Also, autorelease
+// calls followed by objc_autoreleasePoolPop calls (perhaps in ObjC++ code
+// after inlining) can be turned into plain release calls.
+
+// TODO: Critical-edge splitting. If the optimial insertion point is
+// a critical edge, the current algorithm has to fail, because it doesn't
+// know how to split edges. It should be possible to make the optimizer
+// think in terms of edges, rather than blocks, and then split critical
+// edges on demand.
+
+// TODO: OptimizeSequences could generalized to be Interprocedural.
+
+// TODO: Recognize that a bunch of other objc runtime calls have
+// non-escaping arguments and non-releasing arguments, and may be
+// non-autoreleasing.
+
+// TODO: Sink autorelease calls as far as possible. Unfortunately we
+// usually can't sink them past other calls, which would be the main
+// case where it would be useful.
+
+// TODO: The pointer returned from objc_loadWeakRetained is retained.
+
+// TODO: Delete release+retain pairs (rare).
+
+STATISTIC(NumNoops, "Number of no-op objc calls eliminated");
+STATISTIC(NumPartialNoops, "Number of partially no-op objc calls eliminated");
+STATISTIC(NumAutoreleases,"Number of autoreleases converted to releases");
+STATISTIC(NumRets, "Number of return value forwarding "
+ "retain+autoreleases eliminated");
+STATISTIC(NumRRs, "Number of retain+release paths eliminated");
+STATISTIC(NumPeeps, "Number of calls peephole-optimized");
+#ifndef NDEBUG
+STATISTIC(NumRetainsBeforeOpt,
+ "Number of retains before optimization");
+STATISTIC(NumReleasesBeforeOpt,
+ "Number of releases before optimization");
+STATISTIC(NumRetainsAfterOpt,
+ "Number of retains after optimization");
+STATISTIC(NumReleasesAfterOpt,
+ "Number of releases after optimization");
+#endif
+
+namespace {
+ /// \brief Per-BasicBlock state.
+ class BBState {
+ /// The number of unique control paths from the entry which can reach this
+ /// block.
+ unsigned TopDownPathCount;
+
+ /// The number of unique control paths to exits from this block.
+ unsigned BottomUpPathCount;
+
+ /// The top-down traversal uses this to record information known about a
+ /// pointer at the bottom of each block.
+ BlotMapVector<const Value *, TopDownPtrState> PerPtrTopDown;
+
+ /// The bottom-up traversal uses this to record information known about a
+ /// pointer at the top of each block.
+ BlotMapVector<const Value *, BottomUpPtrState> PerPtrBottomUp;
+
+ /// Effective predecessors of the current block ignoring ignorable edges and
+ /// ignored backedges.
+ SmallVector<BasicBlock *, 2> Preds;
+
+ /// Effective successors of the current block ignoring ignorable edges and
+ /// ignored backedges.
+ SmallVector<BasicBlock *, 2> Succs;
+
+ public:
+ static const unsigned OverflowOccurredValue;
+
+ BBState() : TopDownPathCount(0), BottomUpPathCount(0) { }
+
+ typedef decltype(PerPtrTopDown)::iterator top_down_ptr_iterator;
+ typedef decltype(PerPtrTopDown)::const_iterator const_top_down_ptr_iterator;
+
+ top_down_ptr_iterator top_down_ptr_begin() { return PerPtrTopDown.begin(); }
+ top_down_ptr_iterator top_down_ptr_end() { return PerPtrTopDown.end(); }
+ const_top_down_ptr_iterator top_down_ptr_begin() const {
+ return PerPtrTopDown.begin();
+ }
+ const_top_down_ptr_iterator top_down_ptr_end() const {
+ return PerPtrTopDown.end();
+ }
+ bool hasTopDownPtrs() const {
+ return !PerPtrTopDown.empty();
+ }
+
+ typedef decltype(PerPtrBottomUp)::iterator bottom_up_ptr_iterator;
+ typedef decltype(
+ PerPtrBottomUp)::const_iterator const_bottom_up_ptr_iterator;
+
+ bottom_up_ptr_iterator bottom_up_ptr_begin() {
+ return PerPtrBottomUp.begin();
+ }
+ bottom_up_ptr_iterator bottom_up_ptr_end() { return PerPtrBottomUp.end(); }
+ const_bottom_up_ptr_iterator bottom_up_ptr_begin() const {
+ return PerPtrBottomUp.begin();
+ }
+ const_bottom_up_ptr_iterator bottom_up_ptr_end() const {
+ return PerPtrBottomUp.end();
+ }
+ bool hasBottomUpPtrs() const {
+ return !PerPtrBottomUp.empty();
+ }
+
+ /// Mark this block as being an entry block, which has one path from the
+ /// entry by definition.
+ void SetAsEntry() { TopDownPathCount = 1; }
+
+ /// Mark this block as being an exit block, which has one path to an exit by
+ /// definition.
+ void SetAsExit() { BottomUpPathCount = 1; }
+
+ /// Attempt to find the PtrState object describing the top down state for
+ /// pointer Arg. Return a new initialized PtrState describing the top down
+ /// state for Arg if we do not find one.
+ TopDownPtrState &getPtrTopDownState(const Value *Arg) {
+ return PerPtrTopDown[Arg];
+ }
+
+ /// Attempt to find the PtrState object describing the bottom up state for
+ /// pointer Arg. Return a new initialized PtrState describing the bottom up
+ /// state for Arg if we do not find one.
+ BottomUpPtrState &getPtrBottomUpState(const Value *Arg) {
+ return PerPtrBottomUp[Arg];
+ }
+
+ /// Attempt to find the PtrState object describing the bottom up state for
+ /// pointer Arg.
+ bottom_up_ptr_iterator findPtrBottomUpState(const Value *Arg) {
+ return PerPtrBottomUp.find(Arg);
+ }
+
+ void clearBottomUpPointers() {
+ PerPtrBottomUp.clear();
+ }
+
+ void clearTopDownPointers() {
+ PerPtrTopDown.clear();
+ }
+
+ void InitFromPred(const BBState &Other);
+ void InitFromSucc(const BBState &Other);
+ void MergePred(const BBState &Other);
+ void MergeSucc(const BBState &Other);
+
+ /// Compute the number of possible unique paths from an entry to an exit
+ /// which pass through this block. This is only valid after both the
+ /// top-down and bottom-up traversals are complete.
+ ///
+ /// Returns true if overflow occurred. Returns false if overflow did not
+ /// occur.
+ bool GetAllPathCountWithOverflow(unsigned &PathCount) const {
+ if (TopDownPathCount == OverflowOccurredValue ||
+ BottomUpPathCount == OverflowOccurredValue)
+ return true;
+ unsigned long long Product =
+ (unsigned long long)TopDownPathCount*BottomUpPathCount;
+ // Overflow occurred if any of the upper bits of Product are set or if all
+ // the lower bits of Product are all set.
+ return (Product >> 32) ||
+ ((PathCount = Product) == OverflowOccurredValue);
+ }
+
+ // Specialized CFG utilities.
+ typedef SmallVectorImpl<BasicBlock *>::const_iterator edge_iterator;
+ edge_iterator pred_begin() const { return Preds.begin(); }
+ edge_iterator pred_end() const { return Preds.end(); }
+ edge_iterator succ_begin() const { return Succs.begin(); }
+ edge_iterator succ_end() const { return Succs.end(); }
+
+ void addSucc(BasicBlock *Succ) { Succs.push_back(Succ); }
+ void addPred(BasicBlock *Pred) { Preds.push_back(Pred); }
+
+ bool isExit() const { return Succs.empty(); }
+ };
+
+ const unsigned BBState::OverflowOccurredValue = 0xffffffff;
+}
+
+namespace llvm {
+raw_ostream &operator<<(raw_ostream &OS,
+ BBState &BBState) LLVM_ATTRIBUTE_UNUSED;
+}
+
+void BBState::InitFromPred(const BBState &Other) {
+ PerPtrTopDown = Other.PerPtrTopDown;
+ TopDownPathCount = Other.TopDownPathCount;
+}
+
+void BBState::InitFromSucc(const BBState &Other) {
+ PerPtrBottomUp = Other.PerPtrBottomUp;
+ BottomUpPathCount = Other.BottomUpPathCount;
+}
+
+/// The top-down traversal uses this to merge information about predecessors to
+/// form the initial state for a new block.
+void BBState::MergePred(const BBState &Other) {
+ if (TopDownPathCount == OverflowOccurredValue)
+ return;
+
+ // Other.TopDownPathCount can be 0, in which case it is either dead or a
+ // loop backedge. Loop backedges are special.
+ TopDownPathCount += Other.TopDownPathCount;
+
+ // In order to be consistent, we clear the top down pointers when by adding
+ // TopDownPathCount becomes OverflowOccurredValue even though "true" overflow
+ // has not occurred.
+ if (TopDownPathCount == OverflowOccurredValue) {
+ clearTopDownPointers();
+ return;
+ }
+
+ // Check for overflow. If we have overflow, fall back to conservative
+ // behavior.
+ if (TopDownPathCount < Other.TopDownPathCount) {
+ TopDownPathCount = OverflowOccurredValue;
+ clearTopDownPointers();
+ return;
+ }
+
+ // For each entry in the other set, if our set has an entry with the same key,
+ // merge the entries. Otherwise, copy the entry and merge it with an empty
+ // entry.
+ for (auto MI = Other.top_down_ptr_begin(), ME = Other.top_down_ptr_end();
+ MI != ME; ++MI) {
+ auto Pair = PerPtrTopDown.insert(*MI);
+ Pair.first->second.Merge(Pair.second ? TopDownPtrState() : MI->second,
+ /*TopDown=*/true);
+ }
+
+ // For each entry in our set, if the other set doesn't have an entry with the
+ // same key, force it to merge with an empty entry.
+ for (auto MI = top_down_ptr_begin(), ME = top_down_ptr_end(); MI != ME; ++MI)
+ if (Other.PerPtrTopDown.find(MI->first) == Other.PerPtrTopDown.end())
+ MI->second.Merge(TopDownPtrState(), /*TopDown=*/true);
+}
+
+/// The bottom-up traversal uses this to merge information about successors to
+/// form the initial state for a new block.
+void BBState::MergeSucc(const BBState &Other) {
+ if (BottomUpPathCount == OverflowOccurredValue)
+ return;
+
+ // Other.BottomUpPathCount can be 0, in which case it is either dead or a
+ // loop backedge. Loop backedges are special.
+ BottomUpPathCount += Other.BottomUpPathCount;
+
+ // In order to be consistent, we clear the top down pointers when by adding
+ // BottomUpPathCount becomes OverflowOccurredValue even though "true" overflow
+ // has not occurred.
+ if (BottomUpPathCount == OverflowOccurredValue) {
+ clearBottomUpPointers();
+ return;
+ }
+
+ // Check for overflow. If we have overflow, fall back to conservative
+ // behavior.
+ if (BottomUpPathCount < Other.BottomUpPathCount) {
+ BottomUpPathCount = OverflowOccurredValue;
+ clearBottomUpPointers();
+ return;
+ }
+
+ // For each entry in the other set, if our set has an entry with the
+ // same key, merge the entries. Otherwise, copy the entry and merge
+ // it with an empty entry.
+ for (auto MI = Other.bottom_up_ptr_begin(), ME = Other.bottom_up_ptr_end();
+ MI != ME; ++MI) {
+ auto Pair = PerPtrBottomUp.insert(*MI);
+ Pair.first->second.Merge(Pair.second ? BottomUpPtrState() : MI->second,
+ /*TopDown=*/false);
+ }
+
+ // For each entry in our set, if the other set doesn't have an entry
+ // with the same key, force it to merge with an empty entry.
+ for (auto MI = bottom_up_ptr_begin(), ME = bottom_up_ptr_end(); MI != ME;
+ ++MI)
+ if (Other.PerPtrBottomUp.find(MI->first) == Other.PerPtrBottomUp.end())
+ MI->second.Merge(BottomUpPtrState(), /*TopDown=*/false);
+}
+
+raw_ostream &llvm::operator<<(raw_ostream &OS, BBState &BBInfo) {
+ // Dump the pointers we are tracking.
+ OS << " TopDown State:\n";
+ if (!BBInfo.hasTopDownPtrs()) {
+ DEBUG(llvm::dbgs() << " NONE!\n");
+ } else {
+ for (auto I = BBInfo.top_down_ptr_begin(), E = BBInfo.top_down_ptr_end();
+ I != E; ++I) {
+ const PtrState &P = I->second;
+ OS << " Ptr: " << *I->first
+ << "\n KnownSafe: " << (P.IsKnownSafe()?"true":"false")
+ << "\n ImpreciseRelease: "
+ << (P.IsTrackingImpreciseReleases()?"true":"false") << "\n"
+ << " HasCFGHazards: "
+ << (P.IsCFGHazardAfflicted()?"true":"false") << "\n"
+ << " KnownPositive: "
+ << (P.HasKnownPositiveRefCount()?"true":"false") << "\n"
+ << " Seq: "
+ << P.GetSeq() << "\n";
+ }
+ }
+
+ OS << " BottomUp State:\n";
+ if (!BBInfo.hasBottomUpPtrs()) {
+ DEBUG(llvm::dbgs() << " NONE!\n");
+ } else {
+ for (auto I = BBInfo.bottom_up_ptr_begin(), E = BBInfo.bottom_up_ptr_end();
+ I != E; ++I) {
+ const PtrState &P = I->second;
+ OS << " Ptr: " << *I->first
+ << "\n KnownSafe: " << (P.IsKnownSafe()?"true":"false")
+ << "\n ImpreciseRelease: "
+ << (P.IsTrackingImpreciseReleases()?"true":"false") << "\n"
+ << " HasCFGHazards: "
+ << (P.IsCFGHazardAfflicted()?"true":"false") << "\n"
+ << " KnownPositive: "
+ << (P.HasKnownPositiveRefCount()?"true":"false") << "\n"
+ << " Seq: "
+ << P.GetSeq() << "\n";
+ }
+ }
+
+ return OS;
+}
+
+namespace {
+
+ /// \brief The main ARC optimization pass.
+ class ObjCARCOpt : public FunctionPass {
+ bool Changed;
+ ProvenanceAnalysis PA;
+
+ /// A cache of references to runtime entry point constants.
+ ARCRuntimeEntryPoints EP;
+
+ /// A cache of MDKinds that can be passed into other functions to propagate
+ /// MDKind identifiers.
+ ARCMDKindCache MDKindCache;
+
+ // This is used to track if a pointer is stored into an alloca.
+ DenseSet<const Value *> MultiOwnersSet;
+
+ /// A flag indicating whether this optimization pass should run.
+ bool Run;
+
+ /// Flags which determine whether each of the interesting runtime functions
+ /// is in fact used in the current function.
+ unsigned UsedInThisFunction;
+
+ bool OptimizeRetainRVCall(Function &F, Instruction *RetainRV);
+ void OptimizeAutoreleaseRVCall(Function &F, Instruction *AutoreleaseRV,
+ ARCInstKind &Class);
+ void OptimizeIndividualCalls(Function &F);
+
+ void CheckForCFGHazards(const BasicBlock *BB,
+ DenseMap<const BasicBlock *, BBState> &BBStates,
+ BBState &MyStates) const;
+ bool VisitInstructionBottomUp(Instruction *Inst, BasicBlock *BB,
+ BlotMapVector<Value *, RRInfo> &Retains,
+ BBState &MyStates);
+ bool VisitBottomUp(BasicBlock *BB,
+ DenseMap<const BasicBlock *, BBState> &BBStates,
+ BlotMapVector<Value *, RRInfo> &Retains);
+ bool VisitInstructionTopDown(Instruction *Inst,
+ DenseMap<Value *, RRInfo> &Releases,
+ BBState &MyStates);
+ bool VisitTopDown(BasicBlock *BB,
+ DenseMap<const BasicBlock *, BBState> &BBStates,
+ DenseMap<Value *, RRInfo> &Releases);
+ bool Visit(Function &F, DenseMap<const BasicBlock *, BBState> &BBStates,
+ BlotMapVector<Value *, RRInfo> &Retains,
+ DenseMap<Value *, RRInfo> &Releases);
+
+ void MoveCalls(Value *Arg, RRInfo &RetainsToMove, RRInfo &ReleasesToMove,
+ BlotMapVector<Value *, RRInfo> &Retains,
+ DenseMap<Value *, RRInfo> &Releases,
+ SmallVectorImpl<Instruction *> &DeadInsts, Module *M);
+
+ bool
+ PairUpRetainsAndReleases(DenseMap<const BasicBlock *, BBState> &BBStates,
+ BlotMapVector<Value *, RRInfo> &Retains,
+ DenseMap<Value *, RRInfo> &Releases, Module *M,
+ SmallVectorImpl<Instruction *> &NewRetains,
+ SmallVectorImpl<Instruction *> &NewReleases,
+ SmallVectorImpl<Instruction *> &DeadInsts,
+ RRInfo &RetainsToMove, RRInfo &ReleasesToMove,
+ Value *Arg, bool KnownSafe,
+ bool &AnyPairsCompletelyEliminated);
+
+ bool PerformCodePlacement(DenseMap<const BasicBlock *, BBState> &BBStates,
+ BlotMapVector<Value *, RRInfo> &Retains,
+ DenseMap<Value *, RRInfo> &Releases, Module *M);
+
+ void OptimizeWeakCalls(Function &F);
+
+ bool OptimizeSequences(Function &F);
+
+ void OptimizeReturns(Function &F);
+
+#ifndef NDEBUG
+ void GatherStatistics(Function &F, bool AfterOptimization = false);
+#endif
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override;
+ bool doInitialization(Module &M) override;
+ bool runOnFunction(Function &F) override;
+ void releaseMemory() override;
+
+ public:
+ static char ID;
+ ObjCARCOpt() : FunctionPass(ID) {
+ initializeObjCARCOptPass(*PassRegistry::getPassRegistry());
+ }
+ };
+}
+
+char ObjCARCOpt::ID = 0;
+INITIALIZE_PASS_BEGIN(ObjCARCOpt,
+ "objc-arc", "ObjC ARC optimization", false, false)
+INITIALIZE_PASS_DEPENDENCY(ObjCARCAAWrapperPass)
+INITIALIZE_PASS_END(ObjCARCOpt,
+ "objc-arc", "ObjC ARC optimization", false, false)
+
+Pass *llvm::createObjCARCOptPass() {
+ return new ObjCARCOpt();
+}
+
+void ObjCARCOpt::getAnalysisUsage(AnalysisUsage &AU) const {
+ AU.addRequired<ObjCARCAAWrapperPass>();
+ AU.addRequired<AAResultsWrapperPass>();
+ // ARC optimization doesn't currently split critical edges.
+ AU.setPreservesCFG();
+}
+
+/// Turn objc_retainAutoreleasedReturnValue into objc_retain if the operand is
+/// not a return value. Or, if it can be paired with an
+/// objc_autoreleaseReturnValue, delete the pair and return true.
+bool
+ObjCARCOpt::OptimizeRetainRVCall(Function &F, Instruction *RetainRV) {
+ // Check for the argument being from an immediately preceding call or invoke.
+ const Value *Arg = GetArgRCIdentityRoot(RetainRV);
+ ImmutableCallSite CS(Arg);
+ if (const Instruction *Call = CS.getInstruction()) {
+ if (Call->getParent() == RetainRV->getParent()) {
+ BasicBlock::const_iterator I(Call);
+ ++I;
+ while (IsNoopInstruction(&*I))
+ ++I;
+ if (&*I == RetainRV)
+ return false;
+ } else if (const InvokeInst *II = dyn_cast<InvokeInst>(Call)) {
+ BasicBlock *RetainRVParent = RetainRV->getParent();
+ if (II->getNormalDest() == RetainRVParent) {
+ BasicBlock::const_iterator I = RetainRVParent->begin();
+ while (IsNoopInstruction(&*I))
+ ++I;
+ if (&*I == RetainRV)
+ return false;
+ }
+ }
+ }
+
+ // Check for being preceded by an objc_autoreleaseReturnValue on the same
+ // pointer. In this case, we can delete the pair.
+ BasicBlock::iterator I = RetainRV->getIterator(),
+ Begin = RetainRV->getParent()->begin();
+ if (I != Begin) {
+ do
+ --I;
+ while (I != Begin && IsNoopInstruction(&*I));
+ if (GetBasicARCInstKind(&*I) == ARCInstKind::AutoreleaseRV &&
+ GetArgRCIdentityRoot(&*I) == Arg) {
+ Changed = true;
+ ++NumPeeps;
+
+ DEBUG(dbgs() << "Erasing autoreleaseRV,retainRV pair: " << *I << "\n"
+ << "Erasing " << *RetainRV << "\n");
+
+ EraseInstruction(&*I);
+ EraseInstruction(RetainRV);
+ return true;
+ }
+ }
+
+ // Turn it to a plain objc_retain.
+ Changed = true;
+ ++NumPeeps;
+
+ DEBUG(dbgs() << "Transforming objc_retainAutoreleasedReturnValue => "
+ "objc_retain since the operand is not a return value.\n"
+ "Old = " << *RetainRV << "\n");
+
+ Constant *NewDecl = EP.get(ARCRuntimeEntryPointKind::Retain);
+ cast<CallInst>(RetainRV)->setCalledFunction(NewDecl);
+
+ DEBUG(dbgs() << "New = " << *RetainRV << "\n");
+
+ return false;
+}
+
+/// Turn objc_autoreleaseReturnValue into objc_autorelease if the result is not
+/// used as a return value.
+void ObjCARCOpt::OptimizeAutoreleaseRVCall(Function &F,
+ Instruction *AutoreleaseRV,
+ ARCInstKind &Class) {
+ // Check for a return of the pointer value.
+ const Value *Ptr = GetArgRCIdentityRoot(AutoreleaseRV);
+ SmallVector<const Value *, 2> Users;
+ Users.push_back(Ptr);
+ do {
+ Ptr = Users.pop_back_val();
+ for (const User *U : Ptr->users()) {
+ if (isa<ReturnInst>(U) || GetBasicARCInstKind(U) == ARCInstKind::RetainRV)
+ return;
+ if (isa<BitCastInst>(U))
+ Users.push_back(U);
+ }
+ } while (!Users.empty());
+
+ Changed = true;
+ ++NumPeeps;
+
+ DEBUG(dbgs() << "Transforming objc_autoreleaseReturnValue => "
+ "objc_autorelease since its operand is not used as a return "
+ "value.\n"
+ "Old = " << *AutoreleaseRV << "\n");
+
+ CallInst *AutoreleaseRVCI = cast<CallInst>(AutoreleaseRV);
+ Constant *NewDecl = EP.get(ARCRuntimeEntryPointKind::Autorelease);
+ AutoreleaseRVCI->setCalledFunction(NewDecl);
+ AutoreleaseRVCI->setTailCall(false); // Never tail call objc_autorelease.
+ Class = ARCInstKind::Autorelease;
+
+ DEBUG(dbgs() << "New: " << *AutoreleaseRV << "\n");
+
+}
+
+/// Visit each call, one at a time, and make simplifications without doing any
+/// additional analysis.
+void ObjCARCOpt::OptimizeIndividualCalls(Function &F) {
+ DEBUG(dbgs() << "\n== ObjCARCOpt::OptimizeIndividualCalls ==\n");
+ // Reset all the flags in preparation for recomputing them.
+ UsedInThisFunction = 0;
+
+ // Visit all objc_* calls in F.
+ for (inst_iterator I = inst_begin(&F), E = inst_end(&F); I != E; ) {
+ Instruction *Inst = &*I++;
+
+ ARCInstKind Class = GetBasicARCInstKind(Inst);
+
+ DEBUG(dbgs() << "Visiting: Class: " << Class << "; " << *Inst << "\n");
+
+ switch (Class) {
+ default: break;
+
+ // Delete no-op casts. These function calls have special semantics, but
+ // the semantics are entirely implemented via lowering in the front-end,
+ // so by the time they reach the optimizer, they are just no-op calls
+ // which return their argument.
+ //
+ // There are gray areas here, as the ability to cast reference-counted
+ // pointers to raw void* and back allows code to break ARC assumptions,
+ // however these are currently considered to be unimportant.
+ case ARCInstKind::NoopCast:
+ Changed = true;
+ ++NumNoops;
+ DEBUG(dbgs() << "Erasing no-op cast: " << *Inst << "\n");
+ EraseInstruction(Inst);
+ continue;
+
+ // If the pointer-to-weak-pointer is null, it's undefined behavior.
+ case ARCInstKind::StoreWeak:
+ case ARCInstKind::LoadWeak:
+ case ARCInstKind::LoadWeakRetained:
+ case ARCInstKind::InitWeak:
+ case ARCInstKind::DestroyWeak: {
+ CallInst *CI = cast<CallInst>(Inst);
+ if (IsNullOrUndef(CI->getArgOperand(0))) {
+ Changed = true;
+ Type *Ty = CI->getArgOperand(0)->getType();
+ new StoreInst(UndefValue::get(cast<PointerType>(Ty)->getElementType()),
+ Constant::getNullValue(Ty),
+ CI);
+ llvm::Value *NewValue = UndefValue::get(CI->getType());
+ DEBUG(dbgs() << "A null pointer-to-weak-pointer is undefined behavior."
+ "\nOld = " << *CI << "\nNew = " << *NewValue << "\n");
+ CI->replaceAllUsesWith(NewValue);
+ CI->eraseFromParent();
+ continue;
+ }
+ break;
+ }
+ case ARCInstKind::CopyWeak:
+ case ARCInstKind::MoveWeak: {
+ CallInst *CI = cast<CallInst>(Inst);
+ if (IsNullOrUndef(CI->getArgOperand(0)) ||
+ IsNullOrUndef(CI->getArgOperand(1))) {
+ Changed = true;
+ Type *Ty = CI->getArgOperand(0)->getType();
+ new StoreInst(UndefValue::get(cast<PointerType>(Ty)->getElementType()),
+ Constant::getNullValue(Ty),
+ CI);
+
+ llvm::Value *NewValue = UndefValue::get(CI->getType());
+ DEBUG(dbgs() << "A null pointer-to-weak-pointer is undefined behavior."
+ "\nOld = " << *CI << "\nNew = " << *NewValue << "\n");
+
+ CI->replaceAllUsesWith(NewValue);
+ CI->eraseFromParent();
+ continue;
+ }
+ break;
+ }
+ case ARCInstKind::RetainRV:
+ if (OptimizeRetainRVCall(F, Inst))
+ continue;
+ break;
+ case ARCInstKind::AutoreleaseRV:
+ OptimizeAutoreleaseRVCall(F, Inst, Class);
+ break;
+ }
+
+ // objc_autorelease(x) -> objc_release(x) if x is otherwise unused.
+ if (IsAutorelease(Class) && Inst->use_empty()) {
+ CallInst *Call = cast<CallInst>(Inst);
+ const Value *Arg = Call->getArgOperand(0);
+ Arg = FindSingleUseIdentifiedObject(Arg);
+ if (Arg) {
+ Changed = true;
+ ++NumAutoreleases;
+
+ // Create the declaration lazily.
+ LLVMContext &C = Inst->getContext();
+
+ Constant *Decl = EP.get(ARCRuntimeEntryPointKind::Release);
+ CallInst *NewCall = CallInst::Create(Decl, Call->getArgOperand(0), "",
+ Call);
+ NewCall->setMetadata(MDKindCache.get(ARCMDKindID::ImpreciseRelease),
+ MDNode::get(C, None));
+
+ DEBUG(dbgs() << "Replacing autorelease{,RV}(x) with objc_release(x) "
+ "since x is otherwise unused.\nOld: " << *Call << "\nNew: "
+ << *NewCall << "\n");
+
+ EraseInstruction(Call);
+ Inst = NewCall;
+ Class = ARCInstKind::Release;
+ }
+ }
+
+ // For functions which can never be passed stack arguments, add
+ // a tail keyword.
+ if (IsAlwaysTail(Class)) {
+ Changed = true;
+ DEBUG(dbgs() << "Adding tail keyword to function since it can never be "
+ "passed stack args: " << *Inst << "\n");
+ cast<CallInst>(Inst)->setTailCall();
+ }
+
+ // Ensure that functions that can never have a "tail" keyword due to the
+ // semantics of ARC truly do not do so.
+ if (IsNeverTail(Class)) {
+ Changed = true;
+ DEBUG(dbgs() << "Removing tail keyword from function: " << *Inst <<
+ "\n");
+ cast<CallInst>(Inst)->setTailCall(false);
+ }
+
+ // Set nounwind as needed.
+ if (IsNoThrow(Class)) {
+ Changed = true;
+ DEBUG(dbgs() << "Found no throw class. Setting nounwind on: " << *Inst
+ << "\n");
+ cast<CallInst>(Inst)->setDoesNotThrow();
+ }
+
+ if (!IsNoopOnNull(Class)) {
+ UsedInThisFunction |= 1 << unsigned(Class);
+ continue;
+ }
+
+ const Value *Arg = GetArgRCIdentityRoot(Inst);
+
+ // ARC calls with null are no-ops. Delete them.
+ if (IsNullOrUndef(Arg)) {
+ Changed = true;
+ ++NumNoops;
+ DEBUG(dbgs() << "ARC calls with null are no-ops. Erasing: " << *Inst
+ << "\n");
+ EraseInstruction(Inst);
+ continue;
+ }
+
+ // Keep track of which of retain, release, autorelease, and retain_block
+ // are actually present in this function.
+ UsedInThisFunction |= 1 << unsigned(Class);
+
+ // If Arg is a PHI, and one or more incoming values to the
+ // PHI are null, and the call is control-equivalent to the PHI, and there
+ // are no relevant side effects between the PHI and the call, the call
+ // could be pushed up to just those paths with non-null incoming values.
+ // For now, don't bother splitting critical edges for this.
+ SmallVector<std::pair<Instruction *, const Value *>, 4> Worklist;
+ Worklist.push_back(std::make_pair(Inst, Arg));
+ do {
+ std::pair<Instruction *, const Value *> Pair = Worklist.pop_back_val();
+ Inst = Pair.first;
+ Arg = Pair.second;
+
+ const PHINode *PN = dyn_cast<PHINode>(Arg);
+ if (!PN) continue;
+
+ // Determine if the PHI has any null operands, or any incoming
+ // critical edges.
+ bool HasNull = false;
+ bool HasCriticalEdges = false;
+ for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
+ Value *Incoming =
+ GetRCIdentityRoot(PN->getIncomingValue(i));
+ if (IsNullOrUndef(Incoming))
+ HasNull = true;
+ else if (cast<TerminatorInst>(PN->getIncomingBlock(i)->back())
+ .getNumSuccessors() != 1) {
+ HasCriticalEdges = true;
+ break;
+ }
+ }
+ // If we have null operands and no critical edges, optimize.
+ if (!HasCriticalEdges && HasNull) {
+ SmallPtrSet<Instruction *, 4> DependingInstructions;
+ SmallPtrSet<const BasicBlock *, 4> Visited;
+
+ // Check that there is nothing that cares about the reference
+ // count between the call and the phi.
+ switch (Class) {
+ case ARCInstKind::Retain:
+ case ARCInstKind::RetainBlock:
+ // These can always be moved up.
+ break;
+ case ARCInstKind::Release:
+ // These can't be moved across things that care about the retain
+ // count.
+ FindDependencies(NeedsPositiveRetainCount, Arg,
+ Inst->getParent(), Inst,
+ DependingInstructions, Visited, PA);
+ break;
+ case ARCInstKind::Autorelease:
+ // These can't be moved across autorelease pool scope boundaries.
+ FindDependencies(AutoreleasePoolBoundary, Arg,
+ Inst->getParent(), Inst,
+ DependingInstructions, Visited, PA);
+ break;
+ case ARCInstKind::RetainRV:
+ case ARCInstKind::AutoreleaseRV:
+ // Don't move these; the RV optimization depends on the autoreleaseRV
+ // being tail called, and the retainRV being immediately after a call
+ // (which might still happen if we get lucky with codegen layout, but
+ // it's not worth taking the chance).
+ continue;
+ default:
+ llvm_unreachable("Invalid dependence flavor");
+ }
+
+ if (DependingInstructions.size() == 1 &&
+ *DependingInstructions.begin() == PN) {
+ Changed = true;
+ ++NumPartialNoops;
+ // Clone the call into each predecessor that has a non-null value.
+ CallInst *CInst = cast<CallInst>(Inst);
+ Type *ParamTy = CInst->getArgOperand(0)->getType();
+ for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
+ Value *Incoming =
+ GetRCIdentityRoot(PN->getIncomingValue(i));
+ if (!IsNullOrUndef(Incoming)) {
+ CallInst *Clone = cast<CallInst>(CInst->clone());
+ Value *Op = PN->getIncomingValue(i);
+ Instruction *InsertPos = &PN->getIncomingBlock(i)->back();
+ if (Op->getType() != ParamTy)
+ Op = new BitCastInst(Op, ParamTy, "", InsertPos);
+ Clone->setArgOperand(0, Op);
+ Clone->insertBefore(InsertPos);
+
+ DEBUG(dbgs() << "Cloning "
+ << *CInst << "\n"
+ "And inserting clone at " << *InsertPos << "\n");
+ Worklist.push_back(std::make_pair(Clone, Incoming));
+ }
+ }
+ // Erase the original call.
+ DEBUG(dbgs() << "Erasing: " << *CInst << "\n");
+ EraseInstruction(CInst);
+ continue;
+ }
+ }
+ } while (!Worklist.empty());
+ }
+}
+
+/// If we have a top down pointer in the S_Use state, make sure that there are
+/// no CFG hazards by checking the states of various bottom up pointers.
+static void CheckForUseCFGHazard(const Sequence SuccSSeq,
+ const bool SuccSRRIKnownSafe,
+ TopDownPtrState &S,
+ bool &SomeSuccHasSame,
+ bool &AllSuccsHaveSame,
+ bool &NotAllSeqEqualButKnownSafe,
+ bool &ShouldContinue) {
+ switch (SuccSSeq) {
+ case S_CanRelease: {
+ if (!S.IsKnownSafe() && !SuccSRRIKnownSafe) {
+ S.ClearSequenceProgress();
+ break;
+ }
+ S.SetCFGHazardAfflicted(true);
+ ShouldContinue = true;
+ break;
+ }
+ case S_Use:
+ SomeSuccHasSame = true;
+ break;
+ case S_Stop:
+ case S_Release:
+ case S_MovableRelease:
+ if (!S.IsKnownSafe() && !SuccSRRIKnownSafe)
+ AllSuccsHaveSame = false;
+ else
+ NotAllSeqEqualButKnownSafe = true;
+ break;
+ case S_Retain:
+ llvm_unreachable("bottom-up pointer in retain state!");
+ case S_None:
+ llvm_unreachable("This should have been handled earlier.");
+ }
+}
+
+/// If we have a Top Down pointer in the S_CanRelease state, make sure that
+/// there are no CFG hazards by checking the states of various bottom up
+/// pointers.
+static void CheckForCanReleaseCFGHazard(const Sequence SuccSSeq,
+ const bool SuccSRRIKnownSafe,
+ TopDownPtrState &S,
+ bool &SomeSuccHasSame,
+ bool &AllSuccsHaveSame,
+ bool &NotAllSeqEqualButKnownSafe) {
+ switch (SuccSSeq) {
+ case S_CanRelease:
+ SomeSuccHasSame = true;
+ break;
+ case S_Stop:
+ case S_Release:
+ case S_MovableRelease:
+ case S_Use:
+ if (!S.IsKnownSafe() && !SuccSRRIKnownSafe)
+ AllSuccsHaveSame = false;
+ else
+ NotAllSeqEqualButKnownSafe = true;
+ break;
+ case S_Retain:
+ llvm_unreachable("bottom-up pointer in retain state!");
+ case S_None:
+ llvm_unreachable("This should have been handled earlier.");
+ }
+}
+
+/// Check for critical edges, loop boundaries, irreducible control flow, or
+/// other CFG structures where moving code across the edge would result in it
+/// being executed more.
+void
+ObjCARCOpt::CheckForCFGHazards(const BasicBlock *BB,
+ DenseMap<const BasicBlock *, BBState> &BBStates,
+ BBState &MyStates) const {
+ // If any top-down local-use or possible-dec has a succ which is earlier in
+ // the sequence, forget it.
+ for (auto I = MyStates.top_down_ptr_begin(), E = MyStates.top_down_ptr_end();
+ I != E; ++I) {
+ TopDownPtrState &S = I->second;
+ const Sequence Seq = I->second.GetSeq();
+
+ // We only care about S_Retain, S_CanRelease, and S_Use.
+ if (Seq == S_None)
+ continue;
+
+ // Make sure that if extra top down states are added in the future that this
+ // code is updated to handle it.
+ assert((Seq == S_Retain || Seq == S_CanRelease || Seq == S_Use) &&
+ "Unknown top down sequence state.");
+
+ const Value *Arg = I->first;
+ const TerminatorInst *TI = cast<TerminatorInst>(&BB->back());
+ bool SomeSuccHasSame = false;
+ bool AllSuccsHaveSame = true;
+ bool NotAllSeqEqualButKnownSafe = false;
+
+ succ_const_iterator SI(TI), SE(TI, false);
+
+ for (; SI != SE; ++SI) {
+ // If VisitBottomUp has pointer information for this successor, take
+ // what we know about it.
+ const DenseMap<const BasicBlock *, BBState>::iterator BBI =
+ BBStates.find(*SI);
+ assert(BBI != BBStates.end());
+ const BottomUpPtrState &SuccS = BBI->second.getPtrBottomUpState(Arg);
+ const Sequence SuccSSeq = SuccS.GetSeq();
+
+ // If bottom up, the pointer is in an S_None state, clear the sequence
+ // progress since the sequence in the bottom up state finished
+ // suggesting a mismatch in between retains/releases. This is true for
+ // all three cases that we are handling here: S_Retain, S_Use, and
+ // S_CanRelease.
+ if (SuccSSeq == S_None) {
+ S.ClearSequenceProgress();
+ continue;
+ }
+
+ // If we have S_Use or S_CanRelease, perform our check for cfg hazard
+ // checks.
+ const bool SuccSRRIKnownSafe = SuccS.IsKnownSafe();
+
+ // *NOTE* We do not use Seq from above here since we are allowing for
+ // S.GetSeq() to change while we are visiting basic blocks.
+ switch(S.GetSeq()) {
+ case S_Use: {
+ bool ShouldContinue = false;
+ CheckForUseCFGHazard(SuccSSeq, SuccSRRIKnownSafe, S, SomeSuccHasSame,
+ AllSuccsHaveSame, NotAllSeqEqualButKnownSafe,
+ ShouldContinue);
+ if (ShouldContinue)
+ continue;
+ break;
+ }
+ case S_CanRelease: {
+ CheckForCanReleaseCFGHazard(SuccSSeq, SuccSRRIKnownSafe, S,
+ SomeSuccHasSame, AllSuccsHaveSame,
+ NotAllSeqEqualButKnownSafe);
+ break;
+ }
+ case S_Retain:
+ case S_None:
+ case S_Stop:
+ case S_Release:
+ case S_MovableRelease:
+ break;
+ }
+ }
+
+ // If the state at the other end of any of the successor edges
+ // matches the current state, require all edges to match. This
+ // guards against loops in the middle of a sequence.
+ if (SomeSuccHasSame && !AllSuccsHaveSame) {
+ S.ClearSequenceProgress();
+ } else if (NotAllSeqEqualButKnownSafe) {
+ // If we would have cleared the state foregoing the fact that we are known
+ // safe, stop code motion. This is because whether or not it is safe to
+ // remove RR pairs via KnownSafe is an orthogonal concept to whether we
+ // are allowed to perform code motion.
+ S.SetCFGHazardAfflicted(true);
+ }
+ }
+}
+
+bool ObjCARCOpt::VisitInstructionBottomUp(
+ Instruction *Inst, BasicBlock *BB, BlotMapVector<Value *, RRInfo> &Retains,
+ BBState &MyStates) {
+ bool NestingDetected = false;
+ ARCInstKind Class = GetARCInstKind(Inst);
+ const Value *Arg = nullptr;
+
+ DEBUG(dbgs() << " Class: " << Class << "\n");
+
+ switch (Class) {
+ case ARCInstKind::Release: {
+ Arg = GetArgRCIdentityRoot(Inst);
+
+ BottomUpPtrState &S = MyStates.getPtrBottomUpState(Arg);
+ NestingDetected |= S.InitBottomUp(MDKindCache, Inst);
+ break;
+ }
+ case ARCInstKind::RetainBlock:
+ // In OptimizeIndividualCalls, we have strength reduced all optimizable
+ // objc_retainBlocks to objc_retains. Thus at this point any
+ // objc_retainBlocks that we see are not optimizable.
+ break;
+ case ARCInstKind::Retain:
+ case ARCInstKind::RetainRV: {
+ Arg = GetArgRCIdentityRoot(Inst);
+ BottomUpPtrState &S = MyStates.getPtrBottomUpState(Arg);
+ if (S.MatchWithRetain()) {
+ // Don't do retain+release tracking for ARCInstKind::RetainRV, because
+ // it's better to let it remain as the first instruction after a call.
+ if (Class != ARCInstKind::RetainRV) {
+ DEBUG(llvm::dbgs() << " Matching with: " << *Inst << "\n");
+ Retains[Inst] = S.GetRRInfo();
+ }
+ S.ClearSequenceProgress();
+ }
+ // A retain moving bottom up can be a use.
+ break;
+ }
+ case ARCInstKind::AutoreleasepoolPop:
+ // Conservatively, clear MyStates for all known pointers.
+ MyStates.clearBottomUpPointers();
+ return NestingDetected;
+ case ARCInstKind::AutoreleasepoolPush:
+ case ARCInstKind::None:
+ // These are irrelevant.
+ return NestingDetected;
+ case ARCInstKind::User:
+ // If we have a store into an alloca of a pointer we are tracking, the
+ // pointer has multiple owners implying that we must be more conservative.
+ //
+ // This comes up in the context of a pointer being ``KnownSafe''. In the
+ // presence of a block being initialized, the frontend will emit the
+ // objc_retain on the original pointer and the release on the pointer loaded
+ // from the alloca. The optimizer will through the provenance analysis
+ // realize that the two are related, but since we only require KnownSafe in
+ // one direction, will match the inner retain on the original pointer with
+ // the guard release on the original pointer. This is fixed by ensuring that
+ // in the presence of allocas we only unconditionally remove pointers if
+ // both our retain and our release are KnownSafe.
+ if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
+ const DataLayout &DL = BB->getModule()->getDataLayout();
+ if (AreAnyUnderlyingObjectsAnAlloca(SI->getPointerOperand(), DL)) {
+ auto I = MyStates.findPtrBottomUpState(
+ GetRCIdentityRoot(SI->getValueOperand()));
+ if (I != MyStates.bottom_up_ptr_end())
+ MultiOwnersSet.insert(I->first);
+ }
+ }
+ break;
+ default:
+ break;
+ }
+
+ // Consider any other possible effects of this instruction on each
+ // pointer being tracked.
+ for (auto MI = MyStates.bottom_up_ptr_begin(),
+ ME = MyStates.bottom_up_ptr_end();
+ MI != ME; ++MI) {
+ const Value *Ptr = MI->first;
+ if (Ptr == Arg)
+ continue; // Handled above.
+ BottomUpPtrState &S = MI->second;
+
+ if (S.HandlePotentialAlterRefCount(Inst, Ptr, PA, Class))
+ continue;
+
+ S.HandlePotentialUse(BB, Inst, Ptr, PA, Class);
+ }
+
+ return NestingDetected;
+}
+
+bool ObjCARCOpt::VisitBottomUp(BasicBlock *BB,
+ DenseMap<const BasicBlock *, BBState> &BBStates,
+ BlotMapVector<Value *, RRInfo> &Retains) {
+
+ DEBUG(dbgs() << "\n== ObjCARCOpt::VisitBottomUp ==\n");
+
+ bool NestingDetected = false;
+ BBState &MyStates = BBStates[BB];
+
+ // Merge the states from each successor to compute the initial state
+ // for the current block.
+ BBState::edge_iterator SI(MyStates.succ_begin()),
+ SE(MyStates.succ_end());
+ if (SI != SE) {
+ const BasicBlock *Succ = *SI;
+ DenseMap<const BasicBlock *, BBState>::iterator I = BBStates.find(Succ);
+ assert(I != BBStates.end());
+ MyStates.InitFromSucc(I->second);
+ ++SI;
+ for (; SI != SE; ++SI) {
+ Succ = *SI;
+ I = BBStates.find(Succ);
+ assert(I != BBStates.end());
+ MyStates.MergeSucc(I->second);
+ }
+ }
+
+ DEBUG(llvm::dbgs() << "Before:\n" << BBStates[BB] << "\n"
+ << "Performing Dataflow:\n");
+
+ // Visit all the instructions, bottom-up.
+ for (BasicBlock::iterator I = BB->end(), E = BB->begin(); I != E; --I) {
+ Instruction *Inst = &*std::prev(I);
+
+ // Invoke instructions are visited as part of their successors (below).
+ if (isa<InvokeInst>(Inst))
+ continue;
+
+ DEBUG(dbgs() << " Visiting " << *Inst << "\n");
+
+ NestingDetected |= VisitInstructionBottomUp(Inst, BB, Retains, MyStates);
+ }
+
+ // If there's a predecessor with an invoke, visit the invoke as if it were
+ // part of this block, since we can't insert code after an invoke in its own
+ // block, and we don't want to split critical edges.
+ for (BBState::edge_iterator PI(MyStates.pred_begin()),
+ PE(MyStates.pred_end()); PI != PE; ++PI) {
+ BasicBlock *Pred = *PI;
+ if (InvokeInst *II = dyn_cast<InvokeInst>(&Pred->back()))
+ NestingDetected |= VisitInstructionBottomUp(II, BB, Retains, MyStates);
+ }
+
+ DEBUG(llvm::dbgs() << "\nFinal State:\n" << BBStates[BB] << "\n");
+
+ return NestingDetected;
+}
+
+bool
+ObjCARCOpt::VisitInstructionTopDown(Instruction *Inst,
+ DenseMap<Value *, RRInfo> &Releases,
+ BBState &MyStates) {
+ bool NestingDetected = false;
+ ARCInstKind Class = GetARCInstKind(Inst);
+ const Value *Arg = nullptr;
+
+ DEBUG(llvm::dbgs() << " Class: " << Class << "\n");
+
+ switch (Class) {
+ case ARCInstKind::RetainBlock:
+ // In OptimizeIndividualCalls, we have strength reduced all optimizable
+ // objc_retainBlocks to objc_retains. Thus at this point any
+ // objc_retainBlocks that we see are not optimizable. We need to break since
+ // a retain can be a potential use.
+ break;
+ case ARCInstKind::Retain:
+ case ARCInstKind::RetainRV: {
+ Arg = GetArgRCIdentityRoot(Inst);
+ TopDownPtrState &S = MyStates.getPtrTopDownState(Arg);
+ NestingDetected |= S.InitTopDown(Class, Inst);
+ // A retain can be a potential use; proceed to the generic checking
+ // code below.
+ break;
+ }
+ case ARCInstKind::Release: {
+ Arg = GetArgRCIdentityRoot(Inst);
+ TopDownPtrState &S = MyStates.getPtrTopDownState(Arg);
+ // Try to form a tentative pair in between this release instruction and the
+ // top down pointers that we are tracking.
+ if (S.MatchWithRelease(MDKindCache, Inst)) {
+ // If we succeed, copy S's RRInfo into the Release -> {Retain Set
+ // Map}. Then we clear S.
+ DEBUG(llvm::dbgs() << " Matching with: " << *Inst << "\n");
+ Releases[Inst] = S.GetRRInfo();
+ S.ClearSequenceProgress();
+ }
+ break;
+ }
+ case ARCInstKind::AutoreleasepoolPop:
+ // Conservatively, clear MyStates for all known pointers.
+ MyStates.clearTopDownPointers();
+ return false;
+ case ARCInstKind::AutoreleasepoolPush:
+ case ARCInstKind::None:
+ // These can not be uses of
+ return false;
+ default:
+ break;
+ }
+
+ // Consider any other possible effects of this instruction on each
+ // pointer being tracked.
+ for (auto MI = MyStates.top_down_ptr_begin(),
+ ME = MyStates.top_down_ptr_end();
+ MI != ME; ++MI) {
+ const Value *Ptr = MI->first;
+ if (Ptr == Arg)
+ continue; // Handled above.
+ TopDownPtrState &S = MI->second;
+ if (S.HandlePotentialAlterRefCount(Inst, Ptr, PA, Class))
+ continue;
+
+ S.HandlePotentialUse(Inst, Ptr, PA, Class);
+ }
+
+ return NestingDetected;
+}
+
+bool
+ObjCARCOpt::VisitTopDown(BasicBlock *BB,
+ DenseMap<const BasicBlock *, BBState> &BBStates,
+ DenseMap<Value *, RRInfo> &Releases) {
+ DEBUG(dbgs() << "\n== ObjCARCOpt::VisitTopDown ==\n");
+ bool NestingDetected = false;
+ BBState &MyStates = BBStates[BB];
+
+ // Merge the states from each predecessor to compute the initial state
+ // for the current block.
+ BBState::edge_iterator PI(MyStates.pred_begin()),
+ PE(MyStates.pred_end());
+ if (PI != PE) {
+ const BasicBlock *Pred = *PI;
+ DenseMap<const BasicBlock *, BBState>::iterator I = BBStates.find(Pred);
+ assert(I != BBStates.end());
+ MyStates.InitFromPred(I->second);
+ ++PI;
+ for (; PI != PE; ++PI) {
+ Pred = *PI;
+ I = BBStates.find(Pred);
+ assert(I != BBStates.end());
+ MyStates.MergePred(I->second);
+ }
+ }
+
+ DEBUG(llvm::dbgs() << "Before:\n" << BBStates[BB] << "\n"
+ << "Performing Dataflow:\n");
+
+ // Visit all the instructions, top-down.
+ for (Instruction &Inst : *BB) {
+ DEBUG(dbgs() << " Visiting " << Inst << "\n");
+
+ NestingDetected |= VisitInstructionTopDown(&Inst, Releases, MyStates);
+ }
+
+ DEBUG(llvm::dbgs() << "\nState Before Checking for CFG Hazards:\n"
+ << BBStates[BB] << "\n\n");
+ CheckForCFGHazards(BB, BBStates, MyStates);
+ DEBUG(llvm::dbgs() << "Final State:\n" << BBStates[BB] << "\n");
+ return NestingDetected;
+}
+
+static void
+ComputePostOrders(Function &F,
+ SmallVectorImpl<BasicBlock *> &PostOrder,
+ SmallVectorImpl<BasicBlock *> &ReverseCFGPostOrder,
+ unsigned NoObjCARCExceptionsMDKind,
+ DenseMap<const BasicBlock *, BBState> &BBStates) {
+ /// The visited set, for doing DFS walks.
+ SmallPtrSet<BasicBlock *, 16> Visited;
+
+ // Do DFS, computing the PostOrder.
+ SmallPtrSet<BasicBlock *, 16> OnStack;
+ SmallVector<std::pair<BasicBlock *, succ_iterator>, 16> SuccStack;
+
+ // Functions always have exactly one entry block, and we don't have
+ // any other block that we treat like an entry block.
+ BasicBlock *EntryBB = &F.getEntryBlock();
+ BBState &MyStates = BBStates[EntryBB];
+ MyStates.SetAsEntry();
+ TerminatorInst *EntryTI = cast<TerminatorInst>(&EntryBB->back());
+ SuccStack.push_back(std::make_pair(EntryBB, succ_iterator(EntryTI)));
+ Visited.insert(EntryBB);
+ OnStack.insert(EntryBB);
+ do {
+ dfs_next_succ:
+ BasicBlock *CurrBB = SuccStack.back().first;
+ TerminatorInst *TI = cast<TerminatorInst>(&CurrBB->back());
+ succ_iterator SE(TI, false);
+
+ while (SuccStack.back().second != SE) {
+ BasicBlock *SuccBB = *SuccStack.back().second++;
+ if (Visited.insert(SuccBB).second) {
+ TerminatorInst *TI = cast<TerminatorInst>(&SuccBB->back());
+ SuccStack.push_back(std::make_pair(SuccBB, succ_iterator(TI)));
+ BBStates[CurrBB].addSucc(SuccBB);
+ BBState &SuccStates = BBStates[SuccBB];
+ SuccStates.addPred(CurrBB);
+ OnStack.insert(SuccBB);
+ goto dfs_next_succ;
+ }
+
+ if (!OnStack.count(SuccBB)) {
+ BBStates[CurrBB].addSucc(SuccBB);
+ BBStates[SuccBB].addPred(CurrBB);
+ }
+ }
+ OnStack.erase(CurrBB);
+ PostOrder.push_back(CurrBB);
+ SuccStack.pop_back();
+ } while (!SuccStack.empty());
+
+ Visited.clear();
+
+ // Do reverse-CFG DFS, computing the reverse-CFG PostOrder.
+ // Functions may have many exits, and there also blocks which we treat
+ // as exits due to ignored edges.
+ SmallVector<std::pair<BasicBlock *, BBState::edge_iterator>, 16> PredStack;
+ for (BasicBlock &ExitBB : F) {
+ BBState &MyStates = BBStates[&ExitBB];
+ if (!MyStates.isExit())
+ continue;
+
+ MyStates.SetAsExit();
+
+ PredStack.push_back(std::make_pair(&ExitBB, MyStates.pred_begin()));
+ Visited.insert(&ExitBB);
+ while (!PredStack.empty()) {
+ reverse_dfs_next_succ:
+ BBState::edge_iterator PE = BBStates[PredStack.back().first].pred_end();
+ while (PredStack.back().second != PE) {
+ BasicBlock *BB = *PredStack.back().second++;
+ if (Visited.insert(BB).second) {
+ PredStack.push_back(std::make_pair(BB, BBStates[BB].pred_begin()));
+ goto reverse_dfs_next_succ;
+ }
+ }
+ ReverseCFGPostOrder.push_back(PredStack.pop_back_val().first);
+ }
+ }
+}
+
+// Visit the function both top-down and bottom-up.
+bool ObjCARCOpt::Visit(Function &F,
+ DenseMap<const BasicBlock *, BBState> &BBStates,
+ BlotMapVector<Value *, RRInfo> &Retains,
+ DenseMap<Value *, RRInfo> &Releases) {
+
+ // Use reverse-postorder traversals, because we magically know that loops
+ // will be well behaved, i.e. they won't repeatedly call retain on a single
+ // pointer without doing a release. We can't use the ReversePostOrderTraversal
+ // class here because we want the reverse-CFG postorder to consider each
+ // function exit point, and we want to ignore selected cycle edges.
+ SmallVector<BasicBlock *, 16> PostOrder;
+ SmallVector<BasicBlock *, 16> ReverseCFGPostOrder;
+ ComputePostOrders(F, PostOrder, ReverseCFGPostOrder,
+ MDKindCache.get(ARCMDKindID::NoObjCARCExceptions),
+ BBStates);
+
+ // Use reverse-postorder on the reverse CFG for bottom-up.
+ bool BottomUpNestingDetected = false;
+ for (SmallVectorImpl<BasicBlock *>::const_reverse_iterator I =
+ ReverseCFGPostOrder.rbegin(), E = ReverseCFGPostOrder.rend();
+ I != E; ++I)
+ BottomUpNestingDetected |= VisitBottomUp(*I, BBStates, Retains);
+
+ // Use reverse-postorder for top-down.
+ bool TopDownNestingDetected = false;
+ for (SmallVectorImpl<BasicBlock *>::const_reverse_iterator I =
+ PostOrder.rbegin(), E = PostOrder.rend();
+ I != E; ++I)
+ TopDownNestingDetected |= VisitTopDown(*I, BBStates, Releases);
+
+ return TopDownNestingDetected && BottomUpNestingDetected;
+}
+
+/// Move the calls in RetainsToMove and ReleasesToMove.
+void ObjCARCOpt::MoveCalls(Value *Arg, RRInfo &RetainsToMove,
+ RRInfo &ReleasesToMove,
+ BlotMapVector<Value *, RRInfo> &Retains,
+ DenseMap<Value *, RRInfo> &Releases,
+ SmallVectorImpl<Instruction *> &DeadInsts,
+ Module *M) {
+ Type *ArgTy = Arg->getType();
+ Type *ParamTy = PointerType::getUnqual(Type::getInt8Ty(ArgTy->getContext()));
+
+ DEBUG(dbgs() << "== ObjCARCOpt::MoveCalls ==\n");
+
+ // Insert the new retain and release calls.
+ for (Instruction *InsertPt : ReleasesToMove.ReverseInsertPts) {
+ Value *MyArg = ArgTy == ParamTy ? Arg :
+ new BitCastInst(Arg, ParamTy, "", InsertPt);
+ Constant *Decl = EP.get(ARCRuntimeEntryPointKind::Retain);
+ CallInst *Call = CallInst::Create(Decl, MyArg, "", InsertPt);
+ Call->setDoesNotThrow();
+ Call->setTailCall();
+
+ DEBUG(dbgs() << "Inserting new Retain: " << *Call << "\n"
+ "At insertion point: " << *InsertPt << "\n");
+ }
+ for (Instruction *InsertPt : RetainsToMove.ReverseInsertPts) {
+ Value *MyArg = ArgTy == ParamTy ? Arg :
+ new BitCastInst(Arg, ParamTy, "", InsertPt);
+ Constant *Decl = EP.get(ARCRuntimeEntryPointKind::Release);
+ CallInst *Call = CallInst::Create(Decl, MyArg, "", InsertPt);
+ // Attach a clang.imprecise_release metadata tag, if appropriate.
+ if (MDNode *M = ReleasesToMove.ReleaseMetadata)
+ Call->setMetadata(MDKindCache.get(ARCMDKindID::ImpreciseRelease), M);
+ Call->setDoesNotThrow();
+ if (ReleasesToMove.IsTailCallRelease)
+ Call->setTailCall();
+
+ DEBUG(dbgs() << "Inserting new Release: " << *Call << "\n"
+ "At insertion point: " << *InsertPt << "\n");
+ }
+
+ // Delete the original retain and release calls.
+ for (Instruction *OrigRetain : RetainsToMove.Calls) {
+ Retains.blot(OrigRetain);
+ DeadInsts.push_back(OrigRetain);
+ DEBUG(dbgs() << "Deleting retain: " << *OrigRetain << "\n");
+ }
+ for (Instruction *OrigRelease : ReleasesToMove.Calls) {
+ Releases.erase(OrigRelease);
+ DeadInsts.push_back(OrigRelease);
+ DEBUG(dbgs() << "Deleting release: " << *OrigRelease << "\n");
+ }
+
+}
+
+bool ObjCARCOpt::PairUpRetainsAndReleases(
+ DenseMap<const BasicBlock *, BBState> &BBStates,
+ BlotMapVector<Value *, RRInfo> &Retains,
+ DenseMap<Value *, RRInfo> &Releases, Module *M,
+ SmallVectorImpl<Instruction *> &NewRetains,
+ SmallVectorImpl<Instruction *> &NewReleases,
+ SmallVectorImpl<Instruction *> &DeadInsts, RRInfo &RetainsToMove,
+ RRInfo &ReleasesToMove, Value *Arg, bool KnownSafe,
+ bool &AnyPairsCompletelyEliminated) {
+ // If a pair happens in a region where it is known that the reference count
+ // is already incremented, we can similarly ignore possible decrements unless
+ // we are dealing with a retainable object with multiple provenance sources.
+ bool KnownSafeTD = true, KnownSafeBU = true;
+ bool MultipleOwners = false;
+ bool CFGHazardAfflicted = false;
+
+ // Connect the dots between the top-down-collected RetainsToMove and
+ // bottom-up-collected ReleasesToMove to form sets of related calls.
+ // This is an iterative process so that we connect multiple releases
+ // to multiple retains if needed.
+ unsigned OldDelta = 0;
+ unsigned NewDelta = 0;
+ unsigned OldCount = 0;
+ unsigned NewCount = 0;
+ bool FirstRelease = true;
+ for (;;) {
+ for (SmallVectorImpl<Instruction *>::const_iterator
+ NI = NewRetains.begin(), NE = NewRetains.end(); NI != NE; ++NI) {
+ Instruction *NewRetain = *NI;
+ auto It = Retains.find(NewRetain);
+ assert(It != Retains.end());
+ const RRInfo &NewRetainRRI = It->second;
+ KnownSafeTD &= NewRetainRRI.KnownSafe;
+ MultipleOwners =
+ MultipleOwners || MultiOwnersSet.count(GetArgRCIdentityRoot(NewRetain));
+ for (Instruction *NewRetainRelease : NewRetainRRI.Calls) {
+ auto Jt = Releases.find(NewRetainRelease);
+ if (Jt == Releases.end())
+ return false;
+ const RRInfo &NewRetainReleaseRRI = Jt->second;
+
+ // If the release does not have a reference to the retain as well,
+ // something happened which is unaccounted for. Do not do anything.
+ //
+ // This can happen if we catch an additive overflow during path count
+ // merging.
+ if (!NewRetainReleaseRRI.Calls.count(NewRetain))
+ return false;
+
+ if (ReleasesToMove.Calls.insert(NewRetainRelease).second) {
+
+ // If we overflow when we compute the path count, don't remove/move
+ // anything.
+ const BBState &NRRBBState = BBStates[NewRetainRelease->getParent()];
+ unsigned PathCount = BBState::OverflowOccurredValue;
+ if (NRRBBState.GetAllPathCountWithOverflow(PathCount))
+ return false;
+ assert(PathCount != BBState::OverflowOccurredValue &&
+ "PathCount at this point can not be "
+ "OverflowOccurredValue.");
+ OldDelta -= PathCount;
+
+ // Merge the ReleaseMetadata and IsTailCallRelease values.
+ if (FirstRelease) {
+ ReleasesToMove.ReleaseMetadata =
+ NewRetainReleaseRRI.ReleaseMetadata;
+ ReleasesToMove.IsTailCallRelease =
+ NewRetainReleaseRRI.IsTailCallRelease;
+ FirstRelease = false;
+ } else {
+ if (ReleasesToMove.ReleaseMetadata !=
+ NewRetainReleaseRRI.ReleaseMetadata)
+ ReleasesToMove.ReleaseMetadata = nullptr;
+ if (ReleasesToMove.IsTailCallRelease !=
+ NewRetainReleaseRRI.IsTailCallRelease)
+ ReleasesToMove.IsTailCallRelease = false;
+ }
+
+ // Collect the optimal insertion points.
+ if (!KnownSafe)
+ for (Instruction *RIP : NewRetainReleaseRRI.ReverseInsertPts) {
+ if (ReleasesToMove.ReverseInsertPts.insert(RIP).second) {
+ // If we overflow when we compute the path count, don't
+ // remove/move anything.
+ const BBState &RIPBBState = BBStates[RIP->getParent()];
+ PathCount = BBState::OverflowOccurredValue;
+ if (RIPBBState.GetAllPathCountWithOverflow(PathCount))
+ return false;
+ assert(PathCount != BBState::OverflowOccurredValue &&
+ "PathCount at this point can not be "
+ "OverflowOccurredValue.");
+ NewDelta -= PathCount;
+ }
+ }
+ NewReleases.push_back(NewRetainRelease);
+ }
+ }
+ }
+ NewRetains.clear();
+ if (NewReleases.empty()) break;
+
+ // Back the other way.
+ for (SmallVectorImpl<Instruction *>::const_iterator
+ NI = NewReleases.begin(), NE = NewReleases.end(); NI != NE; ++NI) {
+ Instruction *NewRelease = *NI;
+ auto It = Releases.find(NewRelease);
+ assert(It != Releases.end());
+ const RRInfo &NewReleaseRRI = It->second;
+ KnownSafeBU &= NewReleaseRRI.KnownSafe;
+ CFGHazardAfflicted |= NewReleaseRRI.CFGHazardAfflicted;
+ for (Instruction *NewReleaseRetain : NewReleaseRRI.Calls) {
+ auto Jt = Retains.find(NewReleaseRetain);
+ if (Jt == Retains.end())
+ return false;
+ const RRInfo &NewReleaseRetainRRI = Jt->second;
+
+ // If the retain does not have a reference to the release as well,
+ // something happened which is unaccounted for. Do not do anything.
+ //
+ // This can happen if we catch an additive overflow during path count
+ // merging.
+ if (!NewReleaseRetainRRI.Calls.count(NewRelease))
+ return false;
+
+ if (RetainsToMove.Calls.insert(NewReleaseRetain).second) {
+ // If we overflow when we compute the path count, don't remove/move
+ // anything.
+ const BBState &NRRBBState = BBStates[NewReleaseRetain->getParent()];
+ unsigned PathCount = BBState::OverflowOccurredValue;
+ if (NRRBBState.GetAllPathCountWithOverflow(PathCount))
+ return false;
+ assert(PathCount != BBState::OverflowOccurredValue &&
+ "PathCount at this point can not be "
+ "OverflowOccurredValue.");
+ OldDelta += PathCount;
+ OldCount += PathCount;
+
+ // Collect the optimal insertion points.
+ if (!KnownSafe)
+ for (Instruction *RIP : NewReleaseRetainRRI.ReverseInsertPts) {
+ if (RetainsToMove.ReverseInsertPts.insert(RIP).second) {
+ // If we overflow when we compute the path count, don't
+ // remove/move anything.
+ const BBState &RIPBBState = BBStates[RIP->getParent()];
+
+ PathCount = BBState::OverflowOccurredValue;
+ if (RIPBBState.GetAllPathCountWithOverflow(PathCount))
+ return false;
+ assert(PathCount != BBState::OverflowOccurredValue &&
+ "PathCount at this point can not be "
+ "OverflowOccurredValue.");
+ NewDelta += PathCount;
+ NewCount += PathCount;
+ }
+ }
+ NewRetains.push_back(NewReleaseRetain);
+ }
+ }
+ }
+ NewReleases.clear();
+ if (NewRetains.empty()) break;
+ }
+
+ // We can only remove pointers if we are known safe in both directions.
+ bool UnconditionallySafe = KnownSafeTD && KnownSafeBU;
+ if (UnconditionallySafe) {
+ RetainsToMove.ReverseInsertPts.clear();
+ ReleasesToMove.ReverseInsertPts.clear();
+ NewCount = 0;
+ } else {
+ // Determine whether the new insertion points we computed preserve the
+ // balance of retain and release calls through the program.
+ // TODO: If the fully aggressive solution isn't valid, try to find a
+ // less aggressive solution which is.
+ if (NewDelta != 0)
+ return false;
+
+ // At this point, we are not going to remove any RR pairs, but we still are
+ // able to move RR pairs. If one of our pointers is afflicted with
+ // CFGHazards, we cannot perform such code motion so exit early.
+ const bool WillPerformCodeMotion = RetainsToMove.ReverseInsertPts.size() ||
+ ReleasesToMove.ReverseInsertPts.size();
+ if (CFGHazardAfflicted && WillPerformCodeMotion)
+ return false;
+ }
+
+ // Determine whether the original call points are balanced in the retain and
+ // release calls through the program. If not, conservatively don't touch
+ // them.
+ // TODO: It's theoretically possible to do code motion in this case, as
+ // long as the existing imbalances are maintained.
+ if (OldDelta != 0)
+ return false;
+
+ Changed = true;
+ assert(OldCount != 0 && "Unreachable code?");
+ NumRRs += OldCount - NewCount;
+ // Set to true if we completely removed any RR pairs.
+ AnyPairsCompletelyEliminated = NewCount == 0;
+
+ // We can move calls!
+ return true;
+}
+
+/// Identify pairings between the retains and releases, and delete and/or move
+/// them.
+bool ObjCARCOpt::PerformCodePlacement(
+ DenseMap<const BasicBlock *, BBState> &BBStates,
+ BlotMapVector<Value *, RRInfo> &Retains,
+ DenseMap<Value *, RRInfo> &Releases, Module *M) {
+ DEBUG(dbgs() << "\n== ObjCARCOpt::PerformCodePlacement ==\n");
+
+ bool AnyPairsCompletelyEliminated = false;
+ RRInfo RetainsToMove;
+ RRInfo ReleasesToMove;
+ SmallVector<Instruction *, 4> NewRetains;
+ SmallVector<Instruction *, 4> NewReleases;
+ SmallVector<Instruction *, 8> DeadInsts;
+
+ // Visit each retain.
+ for (BlotMapVector<Value *, RRInfo>::const_iterator I = Retains.begin(),
+ E = Retains.end();
+ I != E; ++I) {
+ Value *V = I->first;
+ if (!V) continue; // blotted
+
+ Instruction *Retain = cast<Instruction>(V);
+
+ DEBUG(dbgs() << "Visiting: " << *Retain << "\n");
+
+ Value *Arg = GetArgRCIdentityRoot(Retain);
+
+ // If the object being released is in static or stack storage, we know it's
+ // not being managed by ObjC reference counting, so we can delete pairs
+ // regardless of what possible decrements or uses lie between them.
+ bool KnownSafe = isa<Constant>(Arg) || isa<AllocaInst>(Arg);
+
+ // A constant pointer can't be pointing to an object on the heap. It may
+ // be reference-counted, but it won't be deleted.
+ if (const LoadInst *LI = dyn_cast<LoadInst>(Arg))
+ if (const GlobalVariable *GV =
+ dyn_cast<GlobalVariable>(
+ GetRCIdentityRoot(LI->getPointerOperand())))
+ if (GV->isConstant())
+ KnownSafe = true;
+
+ // Connect the dots between the top-down-collected RetainsToMove and
+ // bottom-up-collected ReleasesToMove to form sets of related calls.
+ NewRetains.push_back(Retain);
+ bool PerformMoveCalls = PairUpRetainsAndReleases(
+ BBStates, Retains, Releases, M, NewRetains, NewReleases, DeadInsts,
+ RetainsToMove, ReleasesToMove, Arg, KnownSafe,
+ AnyPairsCompletelyEliminated);
+
+ if (PerformMoveCalls) {
+ // Ok, everything checks out and we're all set. Let's move/delete some
+ // code!
+ MoveCalls(Arg, RetainsToMove, ReleasesToMove,
+ Retains, Releases, DeadInsts, M);
+ }
+
+ // Clean up state for next retain.
+ NewReleases.clear();
+ NewRetains.clear();
+ RetainsToMove.clear();
+ ReleasesToMove.clear();
+ }
+
+ // Now that we're done moving everything, we can delete the newly dead
+ // instructions, as we no longer need them as insert points.
+ while (!DeadInsts.empty())
+ EraseInstruction(DeadInsts.pop_back_val());
+
+ return AnyPairsCompletelyEliminated;
+}
+
+/// Weak pointer optimizations.
+void ObjCARCOpt::OptimizeWeakCalls(Function &F) {
+ DEBUG(dbgs() << "\n== ObjCARCOpt::OptimizeWeakCalls ==\n");
+
+ // First, do memdep-style RLE and S2L optimizations. We can't use memdep
+ // itself because it uses AliasAnalysis and we need to do provenance
+ // queries instead.
+ for (inst_iterator I = inst_begin(&F), E = inst_end(&F); I != E; ) {
+ Instruction *Inst = &*I++;
+
+ DEBUG(dbgs() << "Visiting: " << *Inst << "\n");
+
+ ARCInstKind Class = GetBasicARCInstKind(Inst);
+ if (Class != ARCInstKind::LoadWeak &&
+ Class != ARCInstKind::LoadWeakRetained)
+ continue;
+
+ // Delete objc_loadWeak calls with no users.
+ if (Class == ARCInstKind::LoadWeak && Inst->use_empty()) {
+ Inst->eraseFromParent();
+ continue;
+ }
+
+ // TODO: For now, just look for an earlier available version of this value
+ // within the same block. Theoretically, we could do memdep-style non-local
+ // analysis too, but that would want caching. A better approach would be to
+ // use the technique that EarlyCSE uses.
+ inst_iterator Current = std::prev(I);
+ BasicBlock *CurrentBB = &*Current.getBasicBlockIterator();
+ for (BasicBlock::iterator B = CurrentBB->begin(),
+ J = Current.getInstructionIterator();
+ J != B; --J) {
+ Instruction *EarlierInst = &*std::prev(J);
+ ARCInstKind EarlierClass = GetARCInstKind(EarlierInst);
+ switch (EarlierClass) {
+ case ARCInstKind::LoadWeak:
+ case ARCInstKind::LoadWeakRetained: {
+ // If this is loading from the same pointer, replace this load's value
+ // with that one.
+ CallInst *Call = cast<CallInst>(Inst);
+ CallInst *EarlierCall = cast<CallInst>(EarlierInst);
+ Value *Arg = Call->getArgOperand(0);
+ Value *EarlierArg = EarlierCall->getArgOperand(0);
+ switch (PA.getAA()->alias(Arg, EarlierArg)) {
+ case MustAlias:
+ Changed = true;
+ // If the load has a builtin retain, insert a plain retain for it.
+ if (Class == ARCInstKind::LoadWeakRetained) {
+ Constant *Decl = EP.get(ARCRuntimeEntryPointKind::Retain);
+ CallInst *CI = CallInst::Create(Decl, EarlierCall, "", Call);
+ CI->setTailCall();
+ }
+ // Zap the fully redundant load.
+ Call->replaceAllUsesWith(EarlierCall);
+ Call->eraseFromParent();
+ goto clobbered;
+ case MayAlias:
+ case PartialAlias:
+ goto clobbered;
+ case NoAlias:
+ break;
+ }
+ break;
+ }
+ case ARCInstKind::StoreWeak:
+ case ARCInstKind::InitWeak: {
+ // If this is storing to the same pointer and has the same size etc.
+ // replace this load's value with the stored value.
+ CallInst *Call = cast<CallInst>(Inst);
+ CallInst *EarlierCall = cast<CallInst>(EarlierInst);
+ Value *Arg = Call->getArgOperand(0);
+ Value *EarlierArg = EarlierCall->getArgOperand(0);
+ switch (PA.getAA()->alias(Arg, EarlierArg)) {
+ case MustAlias:
+ Changed = true;
+ // If the load has a builtin retain, insert a plain retain for it.
+ if (Class == ARCInstKind::LoadWeakRetained) {
+ Constant *Decl = EP.get(ARCRuntimeEntryPointKind::Retain);
+ CallInst *CI = CallInst::Create(Decl, EarlierCall, "", Call);
+ CI->setTailCall();
+ }
+ // Zap the fully redundant load.
+ Call->replaceAllUsesWith(EarlierCall->getArgOperand(1));
+ Call->eraseFromParent();
+ goto clobbered;
+ case MayAlias:
+ case PartialAlias:
+ goto clobbered;
+ case NoAlias:
+ break;
+ }
+ break;
+ }
+ case ARCInstKind::MoveWeak:
+ case ARCInstKind::CopyWeak:
+ // TOOD: Grab the copied value.
+ goto clobbered;
+ case ARCInstKind::AutoreleasepoolPush:
+ case ARCInstKind::None:
+ case ARCInstKind::IntrinsicUser:
+ case ARCInstKind::User:
+ // Weak pointers are only modified through the weak entry points
+ // (and arbitrary calls, which could call the weak entry points).
+ break;
+ default:
+ // Anything else could modify the weak pointer.
+ goto clobbered;
+ }
+ }
+ clobbered:;
+ }
+
+ // Then, for each destroyWeak with an alloca operand, check to see if
+ // the alloca and all its users can be zapped.
+ for (inst_iterator I = inst_begin(&F), E = inst_end(&F); I != E; ) {
+ Instruction *Inst = &*I++;
+ ARCInstKind Class = GetBasicARCInstKind(Inst);
+ if (Class != ARCInstKind::DestroyWeak)
+ continue;
+
+ CallInst *Call = cast<CallInst>(Inst);
+ Value *Arg = Call->getArgOperand(0);
+ if (AllocaInst *Alloca = dyn_cast<AllocaInst>(Arg)) {
+ for (User *U : Alloca->users()) {
+ const Instruction *UserInst = cast<Instruction>(U);
+ switch (GetBasicARCInstKind(UserInst)) {
+ case ARCInstKind::InitWeak:
+ case ARCInstKind::StoreWeak:
+ case ARCInstKind::DestroyWeak:
+ continue;
+ default:
+ goto done;
+ }
+ }
+ Changed = true;
+ for (auto UI = Alloca->user_begin(), UE = Alloca->user_end(); UI != UE;) {
+ CallInst *UserInst = cast<CallInst>(*UI++);
+ switch (GetBasicARCInstKind(UserInst)) {
+ case ARCInstKind::InitWeak:
+ case ARCInstKind::StoreWeak:
+ // These functions return their second argument.
+ UserInst->replaceAllUsesWith(UserInst->getArgOperand(1));
+ break;
+ case ARCInstKind::DestroyWeak:
+ // No return value.
+ break;
+ default:
+ llvm_unreachable("alloca really is used!");
+ }
+ UserInst->eraseFromParent();
+ }
+ Alloca->eraseFromParent();
+ done:;
+ }
+ }
+}
+
+/// Identify program paths which execute sequences of retains and releases which
+/// can be eliminated.
+bool ObjCARCOpt::OptimizeSequences(Function &F) {
+ // Releases, Retains - These are used to store the results of the main flow
+ // analysis. These use Value* as the key instead of Instruction* so that the
+ // map stays valid when we get around to rewriting code and calls get
+ // replaced by arguments.
+ DenseMap<Value *, RRInfo> Releases;
+ BlotMapVector<Value *, RRInfo> Retains;
+
+ // This is used during the traversal of the function to track the
+ // states for each identified object at each block.
+ DenseMap<const BasicBlock *, BBState> BBStates;
+
+ // Analyze the CFG of the function, and all instructions.
+ bool NestingDetected = Visit(F, BBStates, Retains, Releases);
+
+ // Transform.
+ bool AnyPairsCompletelyEliminated = PerformCodePlacement(BBStates, Retains,
+ Releases,
+ F.getParent());
+
+ // Cleanup.
+ MultiOwnersSet.clear();
+
+ return AnyPairsCompletelyEliminated && NestingDetected;
+}
+
+/// Check if there is a dependent call earlier that does not have anything in
+/// between the Retain and the call that can affect the reference count of their
+/// shared pointer argument. Note that Retain need not be in BB.
+static bool
+HasSafePathToPredecessorCall(const Value *Arg, Instruction *Retain,
+ SmallPtrSetImpl<Instruction *> &DepInsts,
+ SmallPtrSetImpl<const BasicBlock *> &Visited,
+ ProvenanceAnalysis &PA) {
+ FindDependencies(CanChangeRetainCount, Arg, Retain->getParent(), Retain,
+ DepInsts, Visited, PA);
+ if (DepInsts.size() != 1)
+ return false;
+
+ auto *Call = dyn_cast_or_null<CallInst>(*DepInsts.begin());
+
+ // Check that the pointer is the return value of the call.
+ if (!Call || Arg != Call)
+ return false;
+
+ // Check that the call is a regular call.
+ ARCInstKind Class = GetBasicARCInstKind(Call);
+ return Class == ARCInstKind::CallOrUser || Class == ARCInstKind::Call;
+}
+
+/// Find a dependent retain that precedes the given autorelease for which there
+/// is nothing in between the two instructions that can affect the ref count of
+/// Arg.
+static CallInst *
+FindPredecessorRetainWithSafePath(const Value *Arg, BasicBlock *BB,
+ Instruction *Autorelease,
+ SmallPtrSetImpl<Instruction *> &DepInsts,
+ SmallPtrSetImpl<const BasicBlock *> &Visited,
+ ProvenanceAnalysis &PA) {
+ FindDependencies(CanChangeRetainCount, Arg,
+ BB, Autorelease, DepInsts, Visited, PA);
+ if (DepInsts.size() != 1)
+ return nullptr;
+
+ auto *Retain = dyn_cast_or_null<CallInst>(*DepInsts.begin());
+
+ // Check that we found a retain with the same argument.
+ if (!Retain || !IsRetain(GetBasicARCInstKind(Retain)) ||
+ GetArgRCIdentityRoot(Retain) != Arg) {
+ return nullptr;
+ }
+
+ return Retain;
+}
+
+/// Look for an ``autorelease'' instruction dependent on Arg such that there are
+/// no instructions dependent on Arg that need a positive ref count in between
+/// the autorelease and the ret.
+static CallInst *
+FindPredecessorAutoreleaseWithSafePath(const Value *Arg, BasicBlock *BB,
+ ReturnInst *Ret,
+ SmallPtrSetImpl<Instruction *> &DepInsts,
+ SmallPtrSetImpl<const BasicBlock *> &V,
+ ProvenanceAnalysis &PA) {
+ FindDependencies(NeedsPositiveRetainCount, Arg,
+ BB, Ret, DepInsts, V, PA);
+ if (DepInsts.size() != 1)
+ return nullptr;
+
+ auto *Autorelease = dyn_cast_or_null<CallInst>(*DepInsts.begin());
+ if (!Autorelease)
+ return nullptr;
+ ARCInstKind AutoreleaseClass = GetBasicARCInstKind(Autorelease);
+ if (!IsAutorelease(AutoreleaseClass))
+ return nullptr;
+ if (GetArgRCIdentityRoot(Autorelease) != Arg)
+ return nullptr;
+
+ return Autorelease;
+}
+
+/// Look for this pattern:
+/// \code
+/// %call = call i8* @something(...)
+/// %2 = call i8* @objc_retain(i8* %call)
+/// %3 = call i8* @objc_autorelease(i8* %2)
+/// ret i8* %3
+/// \endcode
+/// And delete the retain and autorelease.
+void ObjCARCOpt::OptimizeReturns(Function &F) {
+ if (!F.getReturnType()->isPointerTy())
+ return;
+
+ DEBUG(dbgs() << "\n== ObjCARCOpt::OptimizeReturns ==\n");
+
+ SmallPtrSet<Instruction *, 4> DependingInstructions;
+ SmallPtrSet<const BasicBlock *, 4> Visited;
+ for (BasicBlock &BB: F) {
+ ReturnInst *Ret = dyn_cast<ReturnInst>(&BB.back());
+
+ DEBUG(dbgs() << "Visiting: " << *Ret << "\n");
+
+ if (!Ret)
+ continue;
+
+ const Value *Arg = GetRCIdentityRoot(Ret->getOperand(0));
+
+ // Look for an ``autorelease'' instruction that is a predecessor of Ret and
+ // dependent on Arg such that there are no instructions dependent on Arg
+ // that need a positive ref count in between the autorelease and Ret.
+ CallInst *Autorelease = FindPredecessorAutoreleaseWithSafePath(
+ Arg, &BB, Ret, DependingInstructions, Visited, PA);
+ DependingInstructions.clear();
+ Visited.clear();
+
+ if (!Autorelease)
+ continue;
+
+ CallInst *Retain = FindPredecessorRetainWithSafePath(
+ Arg, &BB, Autorelease, DependingInstructions, Visited, PA);
+ DependingInstructions.clear();
+ Visited.clear();
+
+ if (!Retain)
+ continue;
+
+ // Check that there is nothing that can affect the reference count
+ // between the retain and the call. Note that Retain need not be in BB.
+ bool HasSafePathToCall = HasSafePathToPredecessorCall(Arg, Retain,
+ DependingInstructions,
+ Visited, PA);
+ DependingInstructions.clear();
+ Visited.clear();
+
+ if (!HasSafePathToCall)
+ continue;
+
+ // If so, we can zap the retain and autorelease.
+ Changed = true;
+ ++NumRets;
+ DEBUG(dbgs() << "Erasing: " << *Retain << "\nErasing: "
+ << *Autorelease << "\n");
+ EraseInstruction(Retain);
+ EraseInstruction(Autorelease);
+ }
+}
+
+#ifndef NDEBUG
+void
+ObjCARCOpt::GatherStatistics(Function &F, bool AfterOptimization) {
+ llvm::Statistic &NumRetains =
+ AfterOptimization? NumRetainsAfterOpt : NumRetainsBeforeOpt;
+ llvm::Statistic &NumReleases =
+ AfterOptimization? NumReleasesAfterOpt : NumReleasesBeforeOpt;
+
+ for (inst_iterator I = inst_begin(&F), E = inst_end(&F); I != E; ) {
+ Instruction *Inst = &*I++;
+ switch (GetBasicARCInstKind(Inst)) {
+ default:
+ break;
+ case ARCInstKind::Retain:
+ ++NumRetains;
+ break;
+ case ARCInstKind::Release:
+ ++NumReleases;
+ break;
+ }
+ }
+}
+#endif
+
+bool ObjCARCOpt::doInitialization(Module &M) {
+ if (!EnableARCOpts)
+ return false;
+
+ // If nothing in the Module uses ARC, don't do anything.
+ Run = ModuleHasARC(M);
+ if (!Run)
+ return false;
+
+ // Intuitively, objc_retain and others are nocapture, however in practice
+ // they are not, because they return their argument value. And objc_release
+ // calls finalizers which can have arbitrary side effects.
+ MDKindCache.init(&M);
+
+ // Initialize our runtime entry point cache.
+ EP.init(&M);
+
+ return false;
+}
+
+bool ObjCARCOpt::runOnFunction(Function &F) {
+ if (!EnableARCOpts)
+ return false;
+
+ // If nothing in the Module uses ARC, don't do anything.
+ if (!Run)
+ return false;
+
+ Changed = false;
+
+ DEBUG(dbgs() << "<<< ObjCARCOpt: Visiting Function: " << F.getName() << " >>>"
+ "\n");
+
+ PA.setAA(&getAnalysis<AAResultsWrapperPass>().getAAResults());
+
+#ifndef NDEBUG
+ if (AreStatisticsEnabled()) {
+ GatherStatistics(F, false);
+ }
+#endif
+
+ // This pass performs several distinct transformations. As a compile-time aid
+ // when compiling code that isn't ObjC, skip these if the relevant ObjC
+ // library functions aren't declared.
+
+ // Preliminary optimizations. This also computes UsedInThisFunction.
+ OptimizeIndividualCalls(F);
+
+ // Optimizations for weak pointers.
+ if (UsedInThisFunction & ((1 << unsigned(ARCInstKind::LoadWeak)) |
+ (1 << unsigned(ARCInstKind::LoadWeakRetained)) |
+ (1 << unsigned(ARCInstKind::StoreWeak)) |
+ (1 << unsigned(ARCInstKind::InitWeak)) |
+ (1 << unsigned(ARCInstKind::CopyWeak)) |
+ (1 << unsigned(ARCInstKind::MoveWeak)) |
+ (1 << unsigned(ARCInstKind::DestroyWeak))))
+ OptimizeWeakCalls(F);
+
+ // Optimizations for retain+release pairs.
+ if (UsedInThisFunction & ((1 << unsigned(ARCInstKind::Retain)) |
+ (1 << unsigned(ARCInstKind::RetainRV)) |
+ (1 << unsigned(ARCInstKind::RetainBlock))))
+ if (UsedInThisFunction & (1 << unsigned(ARCInstKind::Release)))
+ // Run OptimizeSequences until it either stops making changes or
+ // no retain+release pair nesting is detected.
+ while (OptimizeSequences(F)) {}
+
+ // Optimizations if objc_autorelease is used.
+ if (UsedInThisFunction & ((1 << unsigned(ARCInstKind::Autorelease)) |
+ (1 << unsigned(ARCInstKind::AutoreleaseRV))))
+ OptimizeReturns(F);
+
+ // Gather statistics after optimization.
+#ifndef NDEBUG
+ if (AreStatisticsEnabled()) {
+ GatherStatistics(F, true);
+ }
+#endif
+
+ DEBUG(dbgs() << "\n");
+
+ return Changed;
+}
+
+void ObjCARCOpt::releaseMemory() {
+ PA.clear();
+}
+
+/// @}
+///
diff --git a/contrib/llvm/lib/Transforms/ObjCARC/ProvenanceAnalysis.cpp b/contrib/llvm/lib/Transforms/ObjCARC/ProvenanceAnalysis.cpp
new file mode 100644
index 0000000..9ffdfb4
--- /dev/null
+++ b/contrib/llvm/lib/Transforms/ObjCARC/ProvenanceAnalysis.cpp
@@ -0,0 +1,177 @@
+//===- ProvenanceAnalysis.cpp - ObjC ARC Optimization ---------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+///
+/// This file defines a special form of Alias Analysis called ``Provenance
+/// Analysis''. The word ``provenance'' refers to the history of the ownership
+/// of an object. Thus ``Provenance Analysis'' is an analysis which attempts to
+/// use various techniques to determine if locally
+///
+/// WARNING: This file knows about certain library functions. It recognizes them
+/// by name, and hardwires knowledge of their semantics.
+///
+/// WARNING: This file knows about how certain Objective-C library functions are
+/// used. Naive LLVM IR transformations which would otherwise be
+/// behavior-preserving may break these assumptions.
+///
+//===----------------------------------------------------------------------===//
+
+#include "ObjCARC.h"
+#include "ProvenanceAnalysis.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallPtrSet.h"
+
+using namespace llvm;
+using namespace llvm::objcarc;
+
+bool ProvenanceAnalysis::relatedSelect(const SelectInst *A,
+ const Value *B) {
+ const DataLayout &DL = A->getModule()->getDataLayout();
+ // If the values are Selects with the same condition, we can do a more precise
+ // check: just check for relations between the values on corresponding arms.
+ if (const SelectInst *SB = dyn_cast<SelectInst>(B))
+ if (A->getCondition() == SB->getCondition())
+ return related(A->getTrueValue(), SB->getTrueValue(), DL) ||
+ related(A->getFalseValue(), SB->getFalseValue(), DL);
+
+ // Check both arms of the Select node individually.
+ return related(A->getTrueValue(), B, DL) ||
+ related(A->getFalseValue(), B, DL);
+}
+
+bool ProvenanceAnalysis::relatedPHI(const PHINode *A,
+ const Value *B) {
+ const DataLayout &DL = A->getModule()->getDataLayout();
+ // If the values are PHIs in the same block, we can do a more precise as well
+ // as efficient check: just check for relations between the values on
+ // corresponding edges.
+ if (const PHINode *PNB = dyn_cast<PHINode>(B))
+ if (PNB->getParent() == A->getParent()) {
+ for (unsigned i = 0, e = A->getNumIncomingValues(); i != e; ++i)
+ if (related(A->getIncomingValue(i),
+ PNB->getIncomingValueForBlock(A->getIncomingBlock(i)), DL))
+ return true;
+ return false;
+ }
+
+ // Check each unique source of the PHI node against B.
+ SmallPtrSet<const Value *, 4> UniqueSrc;
+ for (Value *PV1 : A->incoming_values()) {
+ if (UniqueSrc.insert(PV1).second && related(PV1, B, DL))
+ return true;
+ }
+
+ // All of the arms checked out.
+ return false;
+}
+
+/// Test if the value of P, or any value covered by its provenance, is ever
+/// stored within the function (not counting callees).
+static bool IsStoredObjCPointer(const Value *P) {
+ SmallPtrSet<const Value *, 8> Visited;
+ SmallVector<const Value *, 8> Worklist;
+ Worklist.push_back(P);
+ Visited.insert(P);
+ do {
+ P = Worklist.pop_back_val();
+ for (const Use &U : P->uses()) {
+ const User *Ur = U.getUser();
+ if (isa<StoreInst>(Ur)) {
+ if (U.getOperandNo() == 0)
+ // The pointer is stored.
+ return true;
+ // The pointed is stored through.
+ continue;
+ }
+ if (isa<CallInst>(Ur))
+ // The pointer is passed as an argument, ignore this.
+ continue;
+ if (isa<PtrToIntInst>(P))
+ // Assume the worst.
+ return true;
+ if (Visited.insert(Ur).second)
+ Worklist.push_back(Ur);
+ }
+ } while (!Worklist.empty());
+
+ // Everything checked out.
+ return false;
+}
+
+bool ProvenanceAnalysis::relatedCheck(const Value *A, const Value *B,
+ const DataLayout &DL) {
+ // Skip past provenance pass-throughs.
+ A = GetUnderlyingObjCPtr(A, DL);
+ B = GetUnderlyingObjCPtr(B, DL);
+
+ // Quick check.
+ if (A == B)
+ return true;
+
+ // Ask regular AliasAnalysis, for a first approximation.
+ switch (AA->alias(A, B)) {
+ case NoAlias:
+ return false;
+ case MustAlias:
+ case PartialAlias:
+ return true;
+ case MayAlias:
+ break;
+ }
+
+ bool AIsIdentified = IsObjCIdentifiedObject(A);
+ bool BIsIdentified = IsObjCIdentifiedObject(B);
+
+ // An ObjC-Identified object can't alias a load if it is never locally stored.
+ if (AIsIdentified) {
+ // Check for an obvious escape.
+ if (isa<LoadInst>(B))
+ return IsStoredObjCPointer(A);
+ if (BIsIdentified) {
+ // Check for an obvious escape.
+ if (isa<LoadInst>(A))
+ return IsStoredObjCPointer(B);
+ // Both pointers are identified and escapes aren't an evident problem.
+ return false;
+ }
+ } else if (BIsIdentified) {
+ // Check for an obvious escape.
+ if (isa<LoadInst>(A))
+ return IsStoredObjCPointer(B);
+ }
+
+ // Special handling for PHI and Select.
+ if (const PHINode *PN = dyn_cast<PHINode>(A))
+ return relatedPHI(PN, B);
+ if (const PHINode *PN = dyn_cast<PHINode>(B))
+ return relatedPHI(PN, A);
+ if (const SelectInst *S = dyn_cast<SelectInst>(A))
+ return relatedSelect(S, B);
+ if (const SelectInst *S = dyn_cast<SelectInst>(B))
+ return relatedSelect(S, A);
+
+ // Conservative.
+ return true;
+}
+
+bool ProvenanceAnalysis::related(const Value *A, const Value *B,
+ const DataLayout &DL) {
+ // Begin by inserting a conservative value into the map. If the insertion
+ // fails, we have the answer already. If it succeeds, leave it there until we
+ // compute the real answer to guard against recursive queries.
+ if (A > B) std::swap(A, B);
+ std::pair<CachedResultsTy::iterator, bool> Pair =
+ CachedResults.insert(std::make_pair(ValuePairTy(A, B), true));
+ if (!Pair.second)
+ return Pair.first->second;
+
+ bool Result = relatedCheck(A, B, DL);
+ CachedResults[ValuePairTy(A, B)] = Result;
+ return Result;
+}
diff --git a/contrib/llvm/lib/Transforms/ObjCARC/ProvenanceAnalysis.h b/contrib/llvm/lib/Transforms/ObjCARC/ProvenanceAnalysis.h
new file mode 100644
index 0000000..1a12b659
--- /dev/null
+++ b/contrib/llvm/lib/Transforms/ObjCARC/ProvenanceAnalysis.h
@@ -0,0 +1,81 @@
+//===- ProvenanceAnalysis.h - ObjC ARC Optimization ---*- C++ -*-----------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+///
+/// This file declares a special form of Alias Analysis called ``Provenance
+/// Analysis''. The word ``provenance'' refers to the history of the ownership
+/// of an object. Thus ``Provenance Analysis'' is an analysis which attempts to
+/// use various techniques to determine if locally
+///
+/// WARNING: This file knows about certain library functions. It recognizes them
+/// by name, and hardwires knowledge of their semantics.
+///
+/// WARNING: This file knows about how certain Objective-C library functions are
+/// used. Naive LLVM IR transformations which would otherwise be
+/// behavior-preserving may break these assumptions.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TRANSFORMS_OBJCARC_PROVENANCEANALYSIS_H
+#define LLVM_LIB_TRANSFORMS_OBJCARC_PROVENANCEANALYSIS_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/Analysis/AliasAnalysis.h"
+
+namespace llvm {
+ class Value;
+ class DataLayout;
+ class PHINode;
+ class SelectInst;
+}
+
+namespace llvm {
+namespace objcarc {
+
+/// \brief This is similar to BasicAliasAnalysis, and it uses many of the same
+/// techniques, except it uses special ObjC-specific reasoning about pointer
+/// relationships.
+///
+/// In this context ``Provenance'' is defined as the history of an object's
+/// ownership. Thus ``Provenance Analysis'' is defined by using the notion of
+/// an ``independent provenance source'' of a pointer to determine whether or
+/// not two pointers have the same provenance source and thus could
+/// potentially be related.
+class ProvenanceAnalysis {
+ AliasAnalysis *AA;
+
+ typedef std::pair<const Value *, const Value *> ValuePairTy;
+ typedef DenseMap<ValuePairTy, bool> CachedResultsTy;
+ CachedResultsTy CachedResults;
+
+ bool relatedCheck(const Value *A, const Value *B, const DataLayout &DL);
+ bool relatedSelect(const SelectInst *A, const Value *B);
+ bool relatedPHI(const PHINode *A, const Value *B);
+
+ void operator=(const ProvenanceAnalysis &) = delete;
+ ProvenanceAnalysis(const ProvenanceAnalysis &) = delete;
+
+public:
+ ProvenanceAnalysis() {}
+
+ void setAA(AliasAnalysis *aa) { AA = aa; }
+
+ AliasAnalysis *getAA() const { return AA; }
+
+ bool related(const Value *A, const Value *B, const DataLayout &DL);
+
+ void clear() {
+ CachedResults.clear();
+ }
+};
+
+} // end namespace objcarc
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/lib/Transforms/ObjCARC/ProvenanceAnalysisEvaluator.cpp b/contrib/llvm/lib/Transforms/ObjCARC/ProvenanceAnalysisEvaluator.cpp
new file mode 100644
index 0000000..c274e81
--- /dev/null
+++ b/contrib/llvm/lib/Transforms/ObjCARC/ProvenanceAnalysisEvaluator.cpp
@@ -0,0 +1,94 @@
+//===- ProvenanceAnalysisEvaluator.cpp - ObjC ARC Optimization ------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ProvenanceAnalysis.h"
+#include "llvm/Pass.h"
+#include "llvm/ADT/SetVector.h"
+#include "llvm/Analysis/AliasAnalysis.h"
+#include "llvm/Analysis/Passes.h"
+#include "llvm/IR/InstIterator.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/Module.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace llvm;
+using namespace llvm::objcarc;
+
+namespace {
+class PAEval : public FunctionPass {
+
+public:
+ static char ID;
+ PAEval();
+ void getAnalysisUsage(AnalysisUsage &AU) const override;
+ bool runOnFunction(Function &F) override;
+};
+}
+
+char PAEval::ID = 0;
+PAEval::PAEval() : FunctionPass(ID) {}
+
+void PAEval::getAnalysisUsage(AnalysisUsage &AU) const {
+ AU.addRequired<AAResultsWrapperPass>();
+}
+
+static StringRef getName(Value *V) {
+ StringRef Name = V->getName();
+ if (Name.startswith("\1"))
+ return Name.substr(1);
+ return Name;
+}
+
+static void insertIfNamed(SetVector<Value *> &Values, Value *V) {
+ if (!V->hasName())
+ return;
+ Values.insert(V);
+}
+
+bool PAEval::runOnFunction(Function &F) {
+ SetVector<Value *> Values;
+
+ for (auto &Arg : F.args())
+ insertIfNamed(Values, &Arg);
+
+ for (auto I = inst_begin(F), E = inst_end(F); I != E; ++I) {
+ insertIfNamed(Values, &*I);
+
+ for (auto &Op : I->operands())
+ insertIfNamed(Values, Op);
+ }
+
+ ProvenanceAnalysis PA;
+ PA.setAA(&getAnalysis<AAResultsWrapperPass>().getAAResults());
+ const DataLayout &DL = F.getParent()->getDataLayout();
+
+ for (Value *V1 : Values) {
+ StringRef NameV1 = getName(V1);
+ for (Value *V2 : Values) {
+ StringRef NameV2 = getName(V2);
+ if (NameV1 >= NameV2)
+ continue;
+ errs() << NameV1 << " and " << NameV2;
+ if (PA.related(V1, V2, DL))
+ errs() << " are related.\n";
+ else
+ errs() << " are not related.\n";
+ }
+ }
+
+ return false;
+}
+
+FunctionPass *llvm::createPAEvalPass() { return new PAEval(); }
+
+INITIALIZE_PASS_BEGIN(PAEval, "pa-eval",
+ "Evaluate ProvenanceAnalysis on all pairs", false, true)
+INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
+INITIALIZE_PASS_END(PAEval, "pa-eval",
+ "Evaluate ProvenanceAnalysis on all pairs", false, true)
diff --git a/contrib/llvm/lib/Transforms/ObjCARC/PtrState.cpp b/contrib/llvm/lib/Transforms/ObjCARC/PtrState.cpp
new file mode 100644
index 0000000..df64fa3
--- /dev/null
+++ b/contrib/llvm/lib/Transforms/ObjCARC/PtrState.cpp
@@ -0,0 +1,404 @@
+//===--- PtrState.cpp -----------------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "PtrState.h"
+#include "DependencyAnalysis.h"
+#include "ObjCARC.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace llvm;
+using namespace llvm::objcarc;
+
+#define DEBUG_TYPE "objc-arc-ptr-state"
+
+//===----------------------------------------------------------------------===//
+// Utility
+//===----------------------------------------------------------------------===//
+
+raw_ostream &llvm::objcarc::operator<<(raw_ostream &OS, const Sequence S) {
+ switch (S) {
+ case S_None:
+ return OS << "S_None";
+ case S_Retain:
+ return OS << "S_Retain";
+ case S_CanRelease:
+ return OS << "S_CanRelease";
+ case S_Use:
+ return OS << "S_Use";
+ case S_Release:
+ return OS << "S_Release";
+ case S_MovableRelease:
+ return OS << "S_MovableRelease";
+ case S_Stop:
+ return OS << "S_Stop";
+ }
+ llvm_unreachable("Unknown sequence type.");
+}
+
+//===----------------------------------------------------------------------===//
+// Sequence
+//===----------------------------------------------------------------------===//
+
+static Sequence MergeSeqs(Sequence A, Sequence B, bool TopDown) {
+ // The easy cases.
+ if (A == B)
+ return A;
+ if (A == S_None || B == S_None)
+ return S_None;
+
+ if (A > B)
+ std::swap(A, B);
+ if (TopDown) {
+ // Choose the side which is further along in the sequence.
+ if ((A == S_Retain || A == S_CanRelease) &&
+ (B == S_CanRelease || B == S_Use))
+ return B;
+ } else {
+ // Choose the side which is further along in the sequence.
+ if ((A == S_Use || A == S_CanRelease) &&
+ (B == S_Use || B == S_Release || B == S_Stop || B == S_MovableRelease))
+ return A;
+ // If both sides are releases, choose the more conservative one.
+ if (A == S_Stop && (B == S_Release || B == S_MovableRelease))
+ return A;
+ if (A == S_Release && B == S_MovableRelease)
+ return A;
+ }
+
+ return S_None;
+}
+
+//===----------------------------------------------------------------------===//
+// RRInfo
+//===----------------------------------------------------------------------===//
+
+void RRInfo::clear() {
+ KnownSafe = false;
+ IsTailCallRelease = false;
+ ReleaseMetadata = nullptr;
+ Calls.clear();
+ ReverseInsertPts.clear();
+ CFGHazardAfflicted = false;
+}
+
+bool RRInfo::Merge(const RRInfo &Other) {
+ // Conservatively merge the ReleaseMetadata information.
+ if (ReleaseMetadata != Other.ReleaseMetadata)
+ ReleaseMetadata = nullptr;
+
+ // Conservatively merge the boolean state.
+ KnownSafe &= Other.KnownSafe;
+ IsTailCallRelease &= Other.IsTailCallRelease;
+ CFGHazardAfflicted |= Other.CFGHazardAfflicted;
+
+ // Merge the call sets.
+ Calls.insert(Other.Calls.begin(), Other.Calls.end());
+
+ // Merge the insert point sets. If there are any differences,
+ // that makes this a partial merge.
+ bool Partial = ReverseInsertPts.size() != Other.ReverseInsertPts.size();
+ for (Instruction *Inst : Other.ReverseInsertPts)
+ Partial |= ReverseInsertPts.insert(Inst).second;
+ return Partial;
+}
+
+//===----------------------------------------------------------------------===//
+// PtrState
+//===----------------------------------------------------------------------===//
+
+void PtrState::SetKnownPositiveRefCount() {
+ DEBUG(dbgs() << " Setting Known Positive.\n");
+ KnownPositiveRefCount = true;
+}
+
+void PtrState::ClearKnownPositiveRefCount() {
+ DEBUG(dbgs() << " Clearing Known Positive.\n");
+ KnownPositiveRefCount = false;
+}
+
+void PtrState::SetSeq(Sequence NewSeq) {
+ DEBUG(dbgs() << " Old: " << GetSeq() << "; New: " << NewSeq << "\n");
+ Seq = NewSeq;
+}
+
+void PtrState::ResetSequenceProgress(Sequence NewSeq) {
+ DEBUG(dbgs() << " Resetting sequence progress.\n");
+ SetSeq(NewSeq);
+ Partial = false;
+ RRI.clear();
+}
+
+void PtrState::Merge(const PtrState &Other, bool TopDown) {
+ Seq = MergeSeqs(GetSeq(), Other.GetSeq(), TopDown);
+ KnownPositiveRefCount &= Other.KnownPositiveRefCount;
+
+ // If we're not in a sequence (anymore), drop all associated state.
+ if (Seq == S_None) {
+ Partial = false;
+ RRI.clear();
+ } else if (Partial || Other.Partial) {
+ // If we're doing a merge on a path that's previously seen a partial
+ // merge, conservatively drop the sequence, to avoid doing partial
+ // RR elimination. If the branch predicates for the two merge differ,
+ // mixing them is unsafe.
+ ClearSequenceProgress();
+ } else {
+ // Otherwise merge the other PtrState's RRInfo into our RRInfo. At this
+ // point, we know that currently we are not partial. Stash whether or not
+ // the merge operation caused us to undergo a partial merging of reverse
+ // insertion points.
+ Partial = RRI.Merge(Other.RRI);
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// BottomUpPtrState
+//===----------------------------------------------------------------------===//
+
+bool BottomUpPtrState::InitBottomUp(ARCMDKindCache &Cache, Instruction *I) {
+ // If we see two releases in a row on the same pointer. If so, make
+ // a note, and we'll cicle back to revisit it after we've
+ // hopefully eliminated the second release, which may allow us to
+ // eliminate the first release too.
+ // Theoretically we could implement removal of nested retain+release
+ // pairs by making PtrState hold a stack of states, but this is
+ // simple and avoids adding overhead for the non-nested case.
+ bool NestingDetected = false;
+ if (GetSeq() == S_Release || GetSeq() == S_MovableRelease) {
+ DEBUG(dbgs() << " Found nested releases (i.e. a release pair)\n");
+ NestingDetected = true;
+ }
+
+ MDNode *ReleaseMetadata =
+ I->getMetadata(Cache.get(ARCMDKindID::ImpreciseRelease));
+ Sequence NewSeq = ReleaseMetadata ? S_MovableRelease : S_Release;
+ ResetSequenceProgress(NewSeq);
+ SetReleaseMetadata(ReleaseMetadata);
+ SetKnownSafe(HasKnownPositiveRefCount());
+ SetTailCallRelease(cast<CallInst>(I)->isTailCall());
+ InsertCall(I);
+ SetKnownPositiveRefCount();
+ return NestingDetected;
+}
+
+bool BottomUpPtrState::MatchWithRetain() {
+ SetKnownPositiveRefCount();
+
+ Sequence OldSeq = GetSeq();
+ switch (OldSeq) {
+ case S_Stop:
+ case S_Release:
+ case S_MovableRelease:
+ case S_Use:
+ // If OldSeq is not S_Use or OldSeq is S_Use and we are tracking an
+ // imprecise release, clear our reverse insertion points.
+ if (OldSeq != S_Use || IsTrackingImpreciseReleases())
+ ClearReverseInsertPts();
+ // FALL THROUGH
+ case S_CanRelease:
+ return true;
+ case S_None:
+ return false;
+ case S_Retain:
+ llvm_unreachable("bottom-up pointer in retain state!");
+ }
+ llvm_unreachable("Sequence unknown enum value");
+}
+
+bool BottomUpPtrState::HandlePotentialAlterRefCount(Instruction *Inst,
+ const Value *Ptr,
+ ProvenanceAnalysis &PA,
+ ARCInstKind Class) {
+ Sequence S = GetSeq();
+
+ // Check for possible releases.
+ if (!CanAlterRefCount(Inst, Ptr, PA, Class))
+ return false;
+
+ DEBUG(dbgs() << " CanAlterRefCount: Seq: " << S << "; " << *Ptr
+ << "\n");
+ switch (S) {
+ case S_Use:
+ SetSeq(S_CanRelease);
+ return true;
+ case S_CanRelease:
+ case S_Release:
+ case S_MovableRelease:
+ case S_Stop:
+ case S_None:
+ return false;
+ case S_Retain:
+ llvm_unreachable("bottom-up pointer in retain state!");
+ }
+ llvm_unreachable("Sequence unknown enum value");
+}
+
+void BottomUpPtrState::HandlePotentialUse(BasicBlock *BB, Instruction *Inst,
+ const Value *Ptr,
+ ProvenanceAnalysis &PA,
+ ARCInstKind Class) {
+ // Check for possible direct uses.
+ switch (GetSeq()) {
+ case S_Release:
+ case S_MovableRelease:
+ if (CanUse(Inst, Ptr, PA, Class)) {
+ DEBUG(dbgs() << " CanUse: Seq: " << GetSeq() << "; " << *Ptr
+ << "\n");
+ assert(!HasReverseInsertPts());
+ // If this is an invoke instruction, we're scanning it as part of
+ // one of its successor blocks, since we can't insert code after it
+ // in its own block, and we don't want to split critical edges.
+ if (isa<InvokeInst>(Inst))
+ InsertReverseInsertPt(&*BB->getFirstInsertionPt());
+ else
+ InsertReverseInsertPt(&*++Inst->getIterator());
+ SetSeq(S_Use);
+ } else if (Seq == S_Release && IsUser(Class)) {
+ DEBUG(dbgs() << " PreciseReleaseUse: Seq: " << GetSeq() << "; "
+ << *Ptr << "\n");
+ // Non-movable releases depend on any possible objc pointer use.
+ SetSeq(S_Stop);
+ assert(!HasReverseInsertPts());
+ // As above; handle invoke specially.
+ if (isa<InvokeInst>(Inst))
+ InsertReverseInsertPt(&*BB->getFirstInsertionPt());
+ else
+ InsertReverseInsertPt(&*++Inst->getIterator());
+ }
+ break;
+ case S_Stop:
+ if (CanUse(Inst, Ptr, PA, Class)) {
+ DEBUG(dbgs() << " PreciseStopUse: Seq: " << GetSeq() << "; "
+ << *Ptr << "\n");
+ SetSeq(S_Use);
+ }
+ break;
+ case S_CanRelease:
+ case S_Use:
+ case S_None:
+ break;
+ case S_Retain:
+ llvm_unreachable("bottom-up pointer in retain state!");
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// TopDownPtrState
+//===----------------------------------------------------------------------===//
+
+bool TopDownPtrState::InitTopDown(ARCInstKind Kind, Instruction *I) {
+ bool NestingDetected = false;
+ // Don't do retain+release tracking for ARCInstKind::RetainRV, because
+ // it's
+ // better to let it remain as the first instruction after a call.
+ if (Kind != ARCInstKind::RetainRV) {
+ // If we see two retains in a row on the same pointer. If so, make
+ // a note, and we'll cicle back to revisit it after we've
+ // hopefully eliminated the second retain, which may allow us to
+ // eliminate the first retain too.
+ // Theoretically we could implement removal of nested retain+release
+ // pairs by making PtrState hold a stack of states, but this is
+ // simple and avoids adding overhead for the non-nested case.
+ if (GetSeq() == S_Retain)
+ NestingDetected = true;
+
+ ResetSequenceProgress(S_Retain);
+ SetKnownSafe(HasKnownPositiveRefCount());
+ InsertCall(I);
+ }
+
+ SetKnownPositiveRefCount();
+ return NestingDetected;
+}
+
+bool TopDownPtrState::MatchWithRelease(ARCMDKindCache &Cache,
+ Instruction *Release) {
+ ClearKnownPositiveRefCount();
+
+ Sequence OldSeq = GetSeq();
+
+ MDNode *ReleaseMetadata =
+ Release->getMetadata(Cache.get(ARCMDKindID::ImpreciseRelease));
+
+ switch (OldSeq) {
+ case S_Retain:
+ case S_CanRelease:
+ if (OldSeq == S_Retain || ReleaseMetadata != nullptr)
+ ClearReverseInsertPts();
+ // FALL THROUGH
+ case S_Use:
+ SetReleaseMetadata(ReleaseMetadata);
+ SetTailCallRelease(cast<CallInst>(Release)->isTailCall());
+ return true;
+ case S_None:
+ return false;
+ case S_Stop:
+ case S_Release:
+ case S_MovableRelease:
+ llvm_unreachable("top-down pointer in bottom up state!");
+ }
+ llvm_unreachable("Sequence unknown enum value");
+}
+
+bool TopDownPtrState::HandlePotentialAlterRefCount(Instruction *Inst,
+ const Value *Ptr,
+ ProvenanceAnalysis &PA,
+ ARCInstKind Class) {
+ // Check for possible releases.
+ if (!CanAlterRefCount(Inst, Ptr, PA, Class))
+ return false;
+
+ DEBUG(dbgs() << " CanAlterRefCount: Seq: " << GetSeq() << "; " << *Ptr
+ << "\n");
+ ClearKnownPositiveRefCount();
+ switch (GetSeq()) {
+ case S_Retain:
+ SetSeq(S_CanRelease);
+ assert(!HasReverseInsertPts());
+ InsertReverseInsertPt(Inst);
+
+ // One call can't cause a transition from S_Retain to S_CanRelease
+ // and S_CanRelease to S_Use. If we've made the first transition,
+ // we're done.
+ return true;
+ case S_Use:
+ case S_CanRelease:
+ case S_None:
+ return false;
+ case S_Stop:
+ case S_Release:
+ case S_MovableRelease:
+ llvm_unreachable("top-down pointer in release state!");
+ }
+ llvm_unreachable("covered switch is not covered!?");
+}
+
+void TopDownPtrState::HandlePotentialUse(Instruction *Inst, const Value *Ptr,
+ ProvenanceAnalysis &PA,
+ ARCInstKind Class) {
+ // Check for possible direct uses.
+ switch (GetSeq()) {
+ case S_CanRelease:
+ if (!CanUse(Inst, Ptr, PA, Class))
+ return;
+ DEBUG(dbgs() << " CanUse: Seq: " << GetSeq() << "; " << *Ptr
+ << "\n");
+ SetSeq(S_Use);
+ return;
+ case S_Retain:
+ case S_Use:
+ case S_None:
+ return;
+ case S_Stop:
+ case S_Release:
+ case S_MovableRelease:
+ llvm_unreachable("top-down pointer in release state!");
+ }
+}
diff --git a/contrib/llvm/lib/Transforms/ObjCARC/PtrState.h b/contrib/llvm/lib/Transforms/ObjCARC/PtrState.h
new file mode 100644
index 0000000..9749e44
--- /dev/null
+++ b/contrib/llvm/lib/Transforms/ObjCARC/PtrState.h
@@ -0,0 +1,210 @@
+//===--- PtrState.h - ARC State for a Ptr -------------------*- C++ -*-----===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains declarations for the ARC state associated with a ptr. It
+// is only used by the ARC Sequence Dataflow computation. By separating this
+// from the actual dataflow, it is easier to consider the mechanics of the ARC
+// optimization separate from the actual predicates being used.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TRANSFORMS_OBJCARC_PTRSTATE_H
+#define LLVM_LIB_TRANSFORMS_OBJCARC_PTRSTATE_H
+
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/Analysis/ObjCARCInstKind.h"
+#include "llvm/IR/Instruction.h"
+#include "llvm/IR/Value.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Support/Debug.h"
+
+namespace llvm {
+namespace objcarc {
+
+class ARCMDKindCache;
+class ProvenanceAnalysis;
+
+/// \enum Sequence
+///
+/// \brief A sequence of states that a pointer may go through in which an
+/// objc_retain and objc_release are actually needed.
+enum Sequence {
+ S_None,
+ S_Retain, ///< objc_retain(x).
+ S_CanRelease, ///< foo(x) -- x could possibly see a ref count decrement.
+ S_Use, ///< any use of x.
+ S_Stop, ///< like S_Release, but code motion is stopped.
+ S_Release, ///< objc_release(x).
+ S_MovableRelease ///< objc_release(x), !clang.imprecise_release.
+};
+
+raw_ostream &operator<<(raw_ostream &OS,
+ const Sequence S) LLVM_ATTRIBUTE_UNUSED;
+
+/// \brief Unidirectional information about either a
+/// retain-decrement-use-release sequence or release-use-decrement-retain
+/// reverse sequence.
+struct RRInfo {
+ /// After an objc_retain, the reference count of the referenced
+ /// object is known to be positive. Similarly, before an objc_release, the
+ /// reference count of the referenced object is known to be positive. If
+ /// there are retain-release pairs in code regions where the retain count
+ /// is known to be positive, they can be eliminated, regardless of any side
+ /// effects between them.
+ ///
+ /// Also, a retain+release pair nested within another retain+release
+ /// pair all on the known same pointer value can be eliminated, regardless
+ /// of any intervening side effects.
+ ///
+ /// KnownSafe is true when either of these conditions is satisfied.
+ bool KnownSafe;
+
+ /// True of the objc_release calls are all marked with the "tail" keyword.
+ bool IsTailCallRelease;
+
+ /// If the Calls are objc_release calls and they all have a
+ /// clang.imprecise_release tag, this is the metadata tag.
+ MDNode *ReleaseMetadata;
+
+ /// For a top-down sequence, the set of objc_retains or
+ /// objc_retainBlocks. For bottom-up, the set of objc_releases.
+ SmallPtrSet<Instruction *, 2> Calls;
+
+ /// The set of optimal insert positions for moving calls in the opposite
+ /// sequence.
+ SmallPtrSet<Instruction *, 2> ReverseInsertPts;
+
+ /// If this is true, we cannot perform code motion but can still remove
+ /// retain/release pairs.
+ bool CFGHazardAfflicted;
+
+ RRInfo()
+ : KnownSafe(false), IsTailCallRelease(false), ReleaseMetadata(nullptr),
+ CFGHazardAfflicted(false) {}
+
+ void clear();
+
+ /// Conservatively merge the two RRInfo. Returns true if a partial merge has
+ /// occurred, false otherwise.
+ bool Merge(const RRInfo &Other);
+};
+
+/// \brief This class summarizes several per-pointer runtime properties which
+/// are propagated through the flow graph.
+class PtrState {
+protected:
+ /// True if the reference count is known to be incremented.
+ bool KnownPositiveRefCount;
+
+ /// True if we've seen an opportunity for partial RR elimination, such as
+ /// pushing calls into a CFG triangle or into one side of a CFG diamond.
+ bool Partial;
+
+ /// The current position in the sequence.
+ unsigned char Seq : 8;
+
+ /// Unidirectional information about the current sequence.
+ RRInfo RRI;
+
+ PtrState() : KnownPositiveRefCount(false), Partial(false), Seq(S_None) {}
+
+public:
+ bool IsKnownSafe() const { return RRI.KnownSafe; }
+
+ void SetKnownSafe(const bool NewValue) { RRI.KnownSafe = NewValue; }
+
+ bool IsTailCallRelease() const { return RRI.IsTailCallRelease; }
+
+ void SetTailCallRelease(const bool NewValue) {
+ RRI.IsTailCallRelease = NewValue;
+ }
+
+ bool IsTrackingImpreciseReleases() const {
+ return RRI.ReleaseMetadata != nullptr;
+ }
+
+ const MDNode *GetReleaseMetadata() const { return RRI.ReleaseMetadata; }
+
+ void SetReleaseMetadata(MDNode *NewValue) { RRI.ReleaseMetadata = NewValue; }
+
+ bool IsCFGHazardAfflicted() const { return RRI.CFGHazardAfflicted; }
+
+ void SetCFGHazardAfflicted(const bool NewValue) {
+ RRI.CFGHazardAfflicted = NewValue;
+ }
+
+ void SetKnownPositiveRefCount();
+ void ClearKnownPositiveRefCount();
+
+ bool HasKnownPositiveRefCount() const { return KnownPositiveRefCount; }
+
+ void SetSeq(Sequence NewSeq);
+
+ Sequence GetSeq() const { return static_cast<Sequence>(Seq); }
+
+ void ClearSequenceProgress() { ResetSequenceProgress(S_None); }
+
+ void ResetSequenceProgress(Sequence NewSeq);
+ void Merge(const PtrState &Other, bool TopDown);
+
+ void InsertCall(Instruction *I) { RRI.Calls.insert(I); }
+
+ void InsertReverseInsertPt(Instruction *I) { RRI.ReverseInsertPts.insert(I); }
+
+ void ClearReverseInsertPts() { RRI.ReverseInsertPts.clear(); }
+
+ bool HasReverseInsertPts() const { return !RRI.ReverseInsertPts.empty(); }
+
+ const RRInfo &GetRRInfo() const { return RRI; }
+};
+
+struct BottomUpPtrState : PtrState {
+ BottomUpPtrState() : PtrState() {}
+
+ /// (Re-)Initialize this bottom up pointer returning true if we detected a
+ /// pointer with nested releases.
+ bool InitBottomUp(ARCMDKindCache &Cache, Instruction *I);
+
+ /// Return true if this set of releases can be paired with a release. Modifies
+ /// state appropriately to reflect that the matching occurred if it is
+ /// successful.
+ ///
+ /// It is assumed that one has already checked that the RCIdentity of the
+ /// retain and the RCIdentity of this ptr state are the same.
+ bool MatchWithRetain();
+
+ void HandlePotentialUse(BasicBlock *BB, Instruction *Inst, const Value *Ptr,
+ ProvenanceAnalysis &PA, ARCInstKind Class);
+ bool HandlePotentialAlterRefCount(Instruction *Inst, const Value *Ptr,
+ ProvenanceAnalysis &PA, ARCInstKind Class);
+};
+
+struct TopDownPtrState : PtrState {
+ TopDownPtrState() : PtrState() {}
+
+ /// (Re-)Initialize this bottom up pointer returning true if we detected a
+ /// pointer with nested releases.
+ bool InitTopDown(ARCInstKind Kind, Instruction *I);
+
+ /// Return true if this set of retains can be paired with the given
+ /// release. Modifies state appropriately to reflect that the matching
+ /// occurred.
+ bool MatchWithRelease(ARCMDKindCache &Cache, Instruction *Release);
+
+ void HandlePotentialUse(Instruction *Inst, const Value *Ptr,
+ ProvenanceAnalysis &PA, ARCInstKind Class);
+
+ bool HandlePotentialAlterRefCount(Instruction *Inst, const Value *Ptr,
+ ProvenanceAnalysis &PA, ARCInstKind Class);
+};
+
+} // end namespace objcarc
+} // end namespace llvm
+
+#endif
OpenPOWER on IntegriCloud